Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[XDR - IR] Logs for OOTB integration #37352

Draft
wants to merge 7 commits into
base: master
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 28 additions & 17 deletions Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,14 +432,15 @@
})
if len(filters) > 0:
request_data['filters'] = filters

demisto.debug(f'{request_data=}')
reply = self._http_request(
method='POST',
url_suffix='/incidents/get_multiple_incidents_extra_data/',
json_data={'request_data': request_data},
headers=self.headers,
timeout=self.timeout,
)
demisto.debug(f'{reply=}')
if ALERTS_LIMIT_PER_INCIDENTS < 0:
ALERTS_LIMIT_PER_INCIDENTS = arg_to_number(reply.get('reply', {}).get('alerts_limit_per_incident')) or 50
demisto.debug(f'Setting alerts limit per incident to {ALERTS_LIMIT_PER_INCIDENTS}')
Expand Down Expand Up @@ -615,9 +616,9 @@
if isinstance(raw_incident, list):
raw_incident = raw_incident[0]
if raw_incident.get('incident', {}).get('alert_count') > ALERTS_LIMIT_PER_INCIDENTS:
demisto.debug(f'for incident:{incident_id} using the old call since "\
"alert_count:{raw_incident.get("incident", {}).get("alert_count")} >" \
"limit:{ALERTS_LIMIT_PER_INCIDENTS}')
demisto.debug(f"for incident:{incident_id} using the old call since "
f"alert_count:{raw_incident.get('incident', {}).get('alert_count')} >"
"limit:{ALERTS_LIMIT_PER_INCIDENTS}")
raw_incident = client.get_incident_extra_data(incident_id, alerts_limit)
readable_output = [tableToMarkdown(f'Incident {incident_id}', raw_incident.get('incident'), removeNull=True)]

Expand Down Expand Up @@ -1102,9 +1103,7 @@
# Get the last fetch time, if exists
last_fetch = last_run.get('time') if isinstance(last_run, dict) else None
demisto.debug(f"{last_fetch=}")
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add log for last_run

incidents_from_previous_run = last_run.get('incidents_from_previous_run', []) if isinstance(last_run,
dict) else []
demisto.debug(f"{incidents_from_previous_run=}")
incidents_from_previous_run = last_run.get('incidents_from_previous_run', []) if isinstance(last_run, dict) else []
# Handle first time fetch, fetch incidents retroactively
if last_fetch is None:
last_fetch, _ = parse_date_range(first_fetch_time, to_timestamp=True)
Expand All @@ -1117,12 +1116,17 @@

incidents = []
if incidents_from_previous_run:
demisto.debug('using incidents from previous run')
raw_incidents = incidents_from_previous_run
demisto.debug(f'Before update: {ALERTS_LIMIT_PER_INCIDENTS=}')
ALERTS_LIMIT_PER_INCIDENTS = last_run.get('alerts_limit_per_incident', -1) if isinstance(last_run, dict) else -1
demisto.debug(f'After update: {ALERTS_LIMIT_PER_INCIDENTS=}')
else:
if statuses:
demisto.debug('no incidents from previous run, fetching')
if statuses:

Check failure on line 1126 in Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py

View workflow job for this annotation

GitHub Actions / pre-commit / pre-commit

Ruff (W291)

Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py:1126:21: W291 Trailing whitespace
raw_incidents = []
for status in statuses:
demisto.debug(f'fetching for {status=}')
raw_incident_status = client.get_multiple_incidents_extra_data(
gte_creation_time_milliseconds=last_fetch,
status=status,
Expand All @@ -1132,6 +1136,7 @@
raw_incidents.extend(raw_incident_status)
raw_incidents = sorted(raw_incidents, key=lambda inc: inc.get('incident', {}).get('creation_time'))
else:
demisto.debug(f'fetching for all statuses')

Check failure on line 1139 in Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py

View workflow job for this annotation

GitHub Actions / pre-commit / pre-commit

Ruff (F541)

Packs/CortexXDR/Integrations/CortexXDRIR/CortexXDRIR.py:1139:27: F541 f-string without any placeholders
raw_incidents = client.get_multiple_incidents_extra_data(
gte_creation_time_milliseconds=last_fetch, limit=max_fetch,
starred=starred,
Expand All @@ -1148,19 +1153,19 @@
next_run = {}
try:
count_incidents = 0

incident_ids = []
for raw_incident in raw_incidents:
incident_data: dict[str, Any] = sort_incident_data(raw_incident) if raw_incident.get('incident') else raw_incident
incident_id = incident_data.get('incident_id')
demisto.debug(f'XDR Incident {incident_id=}')
alert_count = arg_to_number(incident_data.get('alert_count')) or 0
if alert_count > ALERTS_LIMIT_PER_INCIDENTS:
demisto.debug(f'for incident:{incident_id} using the old call since alert_count:{alert_count} >" \
"limit:{ALERTS_LIMIT_PER_INCIDENTS}')
demisto.debug(f"for incident:{incident_id} using the old call since alert_count:{alert_count} >"
"limit:{ALERTS_LIMIT_PER_INCIDENTS}")
raw_incident_ = client.get_incident_extra_data(incident_id=incident_id)
incident_data = sort_incident_data(raw_incident_)
sort_all_list_incident_fields(incident_data)
incident_data['mirror_direction'] = MIRROR_DIRECTION.get(demisto.params().get('mirror_direction', 'None'),
None)
incident_data['mirror_direction'] = MIRROR_DIRECTION.get(demisto.params().get('mirror_direction', 'None'), None)
incident_data['mirror_instance'] = integration_instance
incident_data['last_mirrored_in'] = int(datetime.now().timestamp() * 1000)
description = incident_data.get('description')
Expand All @@ -1171,15 +1176,17 @@
'rawJSON': json.dumps(incident_data),
}
if demisto.params().get('sync_owners') and incident_data.get('assigned_user_mail'):
demisto.debug(f'assigning user {incident_id=}')
incident['owner'] = demisto.findUser(email=incident_data.get('assigned_user_mail')).get('username')
# Update last run and add incident if the incident is newer than last fetch
if incident_data.get('creation_time', 0) > last_fetch:
last_fetch = incident_data['creation_time']
incident_ids.append(incident_id)
incidents.append(incident)
non_created_incidents.remove(raw_incident)

count_incidents += 1
if count_incidents == max_fetch:
demisto.debug(f'Reached {max_fetch=} incidents, breaking at {incident_id=}')
break

except Exception as e:
Expand All @@ -1188,6 +1195,8 @@
f"'{len(non_created_incidents)}'.\n The incidents will be created in the next fetch")
else:
raise
finally:
demisto.debug(f'Incidents fetched in this run: {incident_ids=}')

if non_created_incidents:
next_run['incidents_from_previous_run'] = non_created_incidents
Expand All @@ -1196,7 +1205,7 @@
next_run['incidents_from_previous_run'] = []

next_run['time'] = last_fetch + 1

demisto.debug(f'New next run: {next_run=}')
return next_run, incidents


Expand Down Expand Up @@ -1368,9 +1377,10 @@

elif command == 'fetch-incidents':
integration_instance = demisto.integrationInstance()
last_run = demisto.getLastRun().get('next_run')
full_last_run = demisto.getLastRun()
demisto.debug(
f"Before starting a new cycle of fetch incidents\n{last_run=}\n{integration_instance=}")
f"Before fetch incidents\n{full_last_run=}\n{integration_instance=}")
last_run = full_last_run.get('next_run')
next_run, incidents = fetch_incidents(client=client,
first_fetch_time=first_fetch_time,
integration_instance=integration_instance,
Expand All @@ -1387,6 +1397,7 @@

last_run_obj = demisto.getLastRun()
last_run_obj['next_run'] = next_run
demisto.debug(f'saving last run {last_run_obj}')
demisto.setLastRun(last_run_obj)
demisto.incidents(incidents)

Expand Down
Loading