|
120 | 120 | }
|
121 | 121 | ],
|
122 | 122 | "globalCustomCode": "\n\n\nfrom math import log",
|
123 |
| - "hash": "c62a83f2b885857ecb0ed931a92f20293e4829a0", |
| 123 | + "hash": "94337eb6a3725d1ec82b66a9f16e3f0dd4fbe956", |
124 | 124 | "nodes": {
|
125 | 125 | "0": {
|
126 | 126 | "data": {
|
|
136 | 136 | "type": "start",
|
137 | 137 | "warnings": {},
|
138 | 138 | "x": 180,
|
139 |
| - "y": 0 |
| 139 | + "y": -6.394884621840902e-14 |
140 | 140 | },
|
141 | 141 | "1": {
|
142 | 142 | "data": {
|
|
159 | 159 | "advanced": {
|
160 | 160 | "customName": "file detonate filter",
|
161 | 161 | "customNameId": 0,
|
| 162 | + "delimiter": ",", |
| 163 | + "delimiter_enabled": true, |
162 | 164 | "description": "Filters successful file detonation results.",
|
163 | 165 | "join": [],
|
164 | 166 | "note": "Filters successful file detonation results."
|
|
214 | 216 | "errors": {},
|
215 | 217 | "id": "11",
|
216 | 218 | "type": "code",
|
217 |
| - "userCode": " # Reference for scores: https://schema.ocsf.io/objects/reputation\n #phantom.debug(\"filtered_result_0_summary: {}\".format(filtered_result_0_summary))\n #phantom.debug(\"filtered_result_0_data: {}\".format(filtered_result_0_data))\n #phantom.debug(\"filtered_result_0_data___scans: {}\".format(filtered_result_0_data___scans))\n #phantom.debug(\"filtered_result_0_data___attributes___category: {}\".format(filtered_result_0_data___attributes___category))\n #phantom.debug(\"vault_id_detonation_result_item_0: {}\".format(vault_id_detonation_result_item_0))\n \n\n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n \n file_summary_list = filtered_result_0_summary\n normalize_score_file__file_score_object = []\n normalize_score_file__scores = []\n normalize_score_file__categories = []\n \n for summary_data in file_summary_list:\n # Set confidence based on percentage of vendors undetected\n # Reduce the confidence by percentage of vendors undetected.\n vendors = summary_data['harmless'] + summary_data['undetected'] + summary_data['malicious'] + summary_data['suspicious']\n confidence = 100 - int((summary_data['undetected']/vendors) * 100)\n\n # Normalize reputation on a 10 point scale based on number of malicious and suspicious divided by harmless vendors\n # This can be adjusted to include whatever logic is desired.\n suspect = summary_data['malicious'] + summary_data['suspicious']\n # If there are only harmless verdicts and no suspicious entries, set score_id to 1.\n if summary_data['harmless'] and not suspect:\n score_id = 1\n # If there are only harmless verdicts and no suspicious entries, set score_id to 1.\n elif not summary_data['harmless'] and not suspect:\n score_id = 0\n else:\n # customize score calculation as desired\n log_result = log((suspect/vendors) * 100, 100) # log imported from math in global code block\n score_id = int(log_result * 10) + 3\n\n phantom.debug(\"log_result: {}\".format(log_result))\n if score_id > 10:\n score_id = 10\n\n score = score_table[str(score_id)]\n\n normalize_score_file__file_score_object.append({'score': score, 'score_id': score_id, 'confidence': confidence})\n normalize_score_file__scores.append(score)\n #phantom.debug(\"normalize_score_file__file_score_object: {}\".format(normalize_score_file__file_score_object))\n #phantom.debug(\"normalize_score_file__scores: {}\".format(normalize_score_file__scores))\n\n", |
| 219 | + "userCode": " # Reference for scores: https://schema.ocsf.io/objects/reputation\n #phantom.debug(\"filtered_result_0_summary: {}\".format(filtered_result_0_summary))\n #phantom.debug(\"filtered_result_0_data: {}\".format(filtered_result_0_data))\n #phantom.debug(\"filtered_result_0_data___scans: {}\".format(filtered_result_0_data___scans))\n #phantom.debug(\"filtered_result_0_data___attributes___category: {}\".format(filtered_result_0_data___attributes___category))\n #phantom.debug(\"vault_id_detonation_result_item_0: {}\".format(vault_id_detonation_result_item_0))\n \n\n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n \n file_summary_list = filtered_result_0_summary\n normalize_score_file__file_score_object = []\n normalize_score_file__scores = []\n normalize_score_file__categories = []\n \n for summary_data in file_summary_list:\n # Set confidence based on percentage of vendors undetected\n # Reduce the confidence by percentage of vendors undetected.\n vendors = summary_data['harmless'] + summary_data['undetected'] + summary_data['malicious'] + summary_data['suspicious']\n confidence = 100 - int((summary_data['undetected']/vendors) * 100)\n\n # Normalize reputation on a 10 point scale based on number of malicious and suspicious divided by harmless vendors\n # This can be adjusted to include whatever logic is desired.\n suspect = summary_data['malicious'] + summary_data['suspicious']\n # If there are only harmless verdicts and no suspicious entries, set score_id to 1.\n if summary_data['harmless'] and not suspect:\n score_id = 1\n # If there are only harmless verdicts and no suspicious entries, set score_id to 1.\n elif not summary_data['harmless'] and not suspect:\n score_id = 0\n else:\n if suspect and vendors:\n # customize score calculation as desired\n log_result = log((suspect/vendors) * 100, 100) # log imported from math in global code block\n score_id = int(log_result * 10) + 3\n \n if score_id > 10:\n score_id = 10\n \n elif suspect == 0:\n score_id = 0\n\n score = score_table[str(score_id)]\n\n normalize_score_file__file_score_object.append({'score': score, 'score_id': score_id, 'confidence': confidence})\n normalize_score_file__scores.append(score)\n #phantom.debug(\"normalize_score_file__file_score_object: {}\".format(normalize_score_file__file_score_object))\n #phantom.debug(\"normalize_score_file__scores: {}\".format(normalize_score_file__scores))\n\n", |
218 | 220 | "warnings": {},
|
219 | 221 | "x": 340,
|
220 | 222 | "y": 686
|
|
282 | 284 | "advanced": {
|
283 | 285 | "customName": "url detonate filter",
|
284 | 286 | "customNameId": 0,
|
| 287 | + "delimiter": ",", |
| 288 | + "delimiter_enabled": true, |
285 | 289 | "description": "Filters successful url reputation results.",
|
286 | 290 | "join": [],
|
287 | 291 | "note": "Filters successful url reputation results."
|
|
318 | 322 | "advanced": {
|
319 | 323 | "customName": "input filter",
|
320 | 324 | "customNameId": 0,
|
| 325 | + "delimiter": ",", |
| 326 | + "delimiter_enabled": true, |
321 | 327 | "description": "Determine branches based on provided inputs.",
|
322 | 328 | "join": [],
|
323 | 329 | "note": "Determine branches based on provided inputs."
|
|
358 | 364 | "errors": {},
|
359 | 365 | "id": "2",
|
360 | 366 | "type": "filter",
|
361 |
| - "warnings": { |
362 |
| - "config": [ |
363 |
| - "Reconfigure invalid datapath." |
364 |
| - ] |
365 |
| - }, |
| 367 | + "warnings": {}, |
366 | 368 | "x": 220,
|
367 | 369 | "y": 140
|
368 | 370 | },
|
|
471 | 473 | "errors": {},
|
472 | 474 | "id": "6",
|
473 | 475 | "type": "code",
|
474 |
| - "userCode": "\n # Write your custom code here...\n #phantom.debug(\"filtered_result_0_data___attributes_categories: {}\".format(filtered_result_0_data___data_attributes_results___category))\n #phantom.debug(\"filtered_result_0_summary: {}\".format(filtered_result_0_summary))\n #phantom.debug(\"filtered_result_1_data___scans: {}\".format(filtered_result_1_data___scans))\n #phantom.debug(\"url_detonation_result_item_0: {}\".format(url_detonation_result_item_0))\n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n \n url_categories_list = filtered_result_0_data___attributes_categories\n url_summary_list = filtered_result_0_summary\n normalize_score_url__url_score_object = []\n normalize_score_url__score = []\n normalize_score_url__categories = []\n\n #for category, summary_data in zip(url_categories_list, url_summary_list):\n for category, summary_data in zip(url_categories_list, url_summary_list):\n \n # Set confidence based on percentage of vendors undetected\n # Reduce the confidence by percentage of vendors undetected.\n vendors = summary_data['harmless'] + summary_data['undetected'] + summary_data['malicious'] + summary_data['suspicious']\n confidence = 100 - int((summary_data['undetected']/vendors) * 100)\n \n #phantom.debug(\"vendors: {}\".format(vendors))\n #phantom.debug(\"confidence: {}\".format(confidence))\n\n # Normalize reputation on a 10 point scale based on number of malicious and suspicious divided by harmless vendors\n # This can be adjusted to include whatever logic is desired.\n suspect = summary_data['malicious'] + summary_data['suspicious']\n # If there are only harmless verdicts and no suspicious entries, set score_id to 1.\n if summary_data['harmless'] and not suspect:\n score_id = 1\n else:\n # customize score calculation as desired\n log_result = log((suspect/vendors) * 100, 100) # log imported from math in global code block\n score_id = int(log_result * 10) + 3\n #log_result = (suspect/vendors) # log imported from math in global code block\n #score_id = int(log_result * 100)\n #phantom.debug(\"log_result: {}\".format(log_result))\n \n if score_id > 10:\n score_id = 10\n \n if category != None:\n categories = [cat.lower() for cat in category.values()]\n categories = list(set(categories))\n else:\n categories = []\n \n score = score_table[str(score_id)]\n\n # Attach final object\n normalize_score_url__url_score_object.append({'score': score, 'score_id': score_id, 'confidence': confidence, 'categories': categories})\n normalize_score_url__score.append(score)\n normalize_score_url__categories.append(categories)\n #phantom.debug(\"normalize_score_url__url_score_object: {}\".format(normalize_score_url__url_score_object))\n #phantom.debug(\"normalize_score_url__score: {}\".format(normalize_score_url__score))\n #phantom.debug(\"normalize_score_url__categories: {}\".format(normalize_score_url__categories))\n\n\n", |
| 476 | + "userCode": "\n # Write your custom code here...\n #phantom.debug(\"filtered_result_0_data___attributes_categories: {}\".format(filtered_result_0_data___data_attributes_results___category))\n #phantom.debug(\"filtered_result_0_summary: {}\".format(filtered_result_0_summary))\n #phantom.debug(\"filtered_result_1_data___scans: {}\".format(filtered_result_1_data___scans))\n #phantom.debug(\"url_detonation_result_item_0: {}\".format(url_detonation_result_item_0))\n score_table = {\n \"0\":\"Unknown\",\n \"1\":\"Very_Safe\",\n \"2\":\"Safe\",\n \"3\":\"Probably_Safe\",\n \"4\":\"Leans_Safe\",\n \"5\":\"May_not_be_Safe\",\n \"6\":\"Exercise_Caution\",\n \"7\":\"Suspicious_or_Risky\",\n \"8\":\"Possibly_Malicious\",\n \"9\":\"Probably_Malicious\",\n \"10\":\"Malicious\"\n }\n \n url_categories_list = filtered_result_0_data___attributes_categories\n url_summary_list = filtered_result_0_summary\n normalize_score_url__url_score_object = []\n normalize_score_url__score = []\n normalize_score_url__categories = []\n\n #for category, summary_data in zip(url_categories_list, url_summary_list):\n for category, summary_data in zip(url_categories_list, url_summary_list):\n \n # Set confidence based on percentage of vendors undetected\n # Reduce the confidence by percentage of vendors undetected.\n vendors = summary_data['harmless'] + summary_data['undetected'] + summary_data['malicious'] + summary_data['suspicious']\n confidence = 100 - int((summary_data['undetected']/vendors) * 100)\n \n #phantom.debug(\"vendors: {}\".format(vendors))\n #phantom.debug(\"confidence: {}\".format(confidence))\n\n # Normalize reputation on a 10 point scale based on number of malicious and suspicious divided by harmless vendors\n # This can be adjusted to include whatever logic is desired.\n suspect = summary_data['malicious'] + summary_data['suspicious']\n # If there are only harmless verdicts and no suspicious entries, set score_id to 1.\n if summary_data['harmless'] and not suspect:\n score_id = 1\n else:\n if suspect and vendors:\n # customize score calculation as desired\n log_result = log((suspect/vendors) * 100, 100) # log imported from math in global code block\n score_id = int(log_result * 10) + 3\n \n if score_id > 10:\n score_id = 10\n \n elif suspect == 0:\n score_id = 0\n \n if category != None:\n categories = [cat.lower() for cat in category.values()]\n categories = list(set(categories))\n else:\n categories = []\n \n score = score_table[str(score_id)]\n\n # Attach final object\n normalize_score_url__url_score_object.append({'score': score, 'score_id': score_id, 'confidence': confidence, 'categories': categories})\n normalize_score_url__score.append(score)\n normalize_score_url__categories.append(categories)\n #phantom.debug(\"normalize_score_url__url_score_object: {}\".format(normalize_score_url__url_score_object))\n #phantom.debug(\"normalize_score_url__score: {}\".format(normalize_score_url__score))\n #phantom.debug(\"normalize_score_url__categories: {}\".format(normalize_score_url__categories))\n\n\n", |
475 | 477 | "warnings": {},
|
476 | 478 | "x": 0,
|
477 | 479 | "y": 686
|
|
580 | 582 | ],
|
581 | 583 | "playbook_type": "data",
|
582 | 584 | "python_version": "3",
|
583 |
| - "schema": "5.0.9", |
584 |
| - "version": "6.0.0.114895" |
| 585 | + "schema": "5.0.10", |
| 586 | + "version": "6.0.1.123902" |
585 | 587 | },
|
586 |
| - "create_time": "2023-04-12T11:31:47.902551+00:00", |
| 588 | + "create_time": "2023-06-07T18:52:45.928084+00:00", |
587 | 589 | "draft_mode": false,
|
588 | 590 | "labels": [
|
589 | 591 | "*"
|
|
0 commit comments