id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1754916 | <reponame>DasPrombelBread/laneBOT-Whatsapp
#!/usr/bin/python3
# PLEASE READ EVERY LINE WHERE I COMMENTED "TODO"! YOU CAN SEARCH IT IN THE TEXT BY USING THE SEARCH TOOL OF YOUR TEXT EDITOR OR IDE!
# INFO
# This Script was made by "DasPrombelBread" (Lane)!
# This program is standing under the MIT license,
# you can copy it and do whatever you want with it.
# Last updated: November 16, 2021
edition = "laneBOT aka Guenni aka Mika : WHATSAPP EDITION" # define which edition of the program it is
version = "5.3.08" # define the version
author = "Lane" # define the name of the creator
conf_file = "/home/pi/timetable/conf.txt" # set the location for config file
config = open(conf_file, 'r').read()
print(config)
if config.__contains__("location=home"):
location = "home"
elif config.__contains__("holidays=grandpa"):
location = "grandpa"
else:
print("LOCATION: Could not find correct arguments in " + conf_file + " using `home` configuration...")
sleep(5)
import pyautogui as pt # must be manually installed
import pyperclip # must be manually installed (on most systems)
import sys # is installed by default
import os # also installed by default
import subprocess # installed by default
import random # import random. installed by default
from time import sleep # time is also installed by default
from datetime import date, datetime # installed by default
import subprocess # installed by default
import pyautogui # must be manually installed on most systems
from pynput.keyboard import Key, Controller # must be manually installed
keyboard = Controller() # define keyboard as Controller(). this is used by pynput
pyperclip.copy("") # empty the clipboard
automatic = "no"
lang = "de" # set language to german by default
if location == "home":
print("at home")
chat_beginning = 775, 960
move_rel = 820, 1
logs = 450, 300
group1 = 450, 230
group2 = 450, 370
elif location == "grandpa":
print("at grandpa's")
chat_beginning = 505, 668
move_rel = 780, 1
logs = 300, 280
group1 = 300, 200
group2 = 300, 363
else:
print("could not find location, using home")
chat_beginning = 775, 960
move_rel = 820, 2
logs = 450, 300
group1 = 450, 230
group2 = 450, 370
def start_reloader():
subprocess.Popen("bot_reloader")
start_reloader()
def start_whatsapp():
subprocess.Popen("whatsapp")
sleep(40)
# write logs
pt.moveTo(logs)
sleep(.4)
pt.click()
sleep(2)
pyperclip.copy("WhatsApp has been Started!")
sleep(.2)
pt.keyDown("ctrl") # paste
sleep(.08) # paste
pt.press("v") # paste
sleep(.08) # paste
pt.keyUp("ctrl") # paste
sleep(.14)
pyautogui.press("enter") # send the message
sleep(.2)
pt.press("enter")
start_whatsapp()
while True: # an basic while loop, repeat the code written down here
def bot_main():
# pt.press("pagedown") # press "pagedown"-key to go to the end of the chat
time_now = datetime.now()
date_now = datetime.now().strftime("%w")
current_time = time_now.strftime("%H:%M:%S") # set the time format
def restart_wa():
sleep(0.2)
pt.keyDown("alt")
sleep(0.2)
pt.press("f4")
sleep(0.2)
pt.keyUp("alt")
sleep(0.4)
pyautogui.press("enter")
sleep(0.5)
sys.stdout.flush()
os.execv(sys.argv[0], sys.argv)
def timetable():
if date_now == "1": # date_now says which day of the week it is. 1 = monday, 2 = tuesday, ...
day = "monday"
elif date_now == "2":
day = "tuesday"
elif date_now == "3":
day = "wednesday"
elif date_now == "4":
day = "thursday"
elif date_now == "5":
day = "friday"
else:
day = "weekend"
print("Weekend!")
if day == "weekend":
print("Lesgo! (weekend)")
path = '/home/pi/timetable/weekend.txt' # define the path of the timetable-file
print("Path: " + path) # print the path to terminal
timetablecontent = open(path, 'r').read() # open file
pyperclip.copy(timetablecontent) # copy file to clipboard
sleep(.5)
if automatic == "yes":
print("starting automatically")
pt.moveTo(group1)
sleep(.3)
pt.click()
sleep(1.6)
else:
print("starting by command")
pt.keyDown("ctrl") # paste
sleep(.08) # paste
pt.press("v") # paste
sleep(.08) # paste
pt.keyUp("ctrl") # paste
sleep(.2)
pyautogui.press("enter") # send the message
pyperclip.copy("") # empty the clipboard
else:
print("Lesgo!")
path = '/home/pi/timetable/' + day + '.txt' # define the path of the timetable-file
print("Path: " + path) # print the path to terminal
timetablecontent = open(path, 'r').read() # open file
pyperclip.copy(timetablecontent) # copy file to clipboard
sleep(.5)
if automatic == "yes":
print("starting automatically")
pt.moveTo(group1)
sleep(.3)
pt.click()
sleep(1.6)
else:
print("starting by command")
pt.keyDown("ctrl") # paste
sleep(.08) # paste
pt.press("v") # paste
sleep(.08) # paste
pt.keyUp("ctrl") # paste
sleep(.2)
pyautogui.press("enter") # send the message
pyperclip.copy("") # empty the clipboard
if current_time.startswith("06:15:1"): # if current time starts with 06:30:1... am, start "timetable" function, else use the bot
if config.__contains__("holidays=true"):
random_count = random.randint(1,6)
if random_count == 4:
print("Timetable!")
sleep(10)
automatic = "yes"
timetable()
automatic = "no"
else:
print("Ferien!")
elif config.__contains__("holidays=false"):
print("Timetable!")
sleep(10)
automatic = "yes"
timetable()
automatic = "no"
else:
print("TIMETABLE: Could not find correct arguments in " + conf_file + " skipping posting timetable...")
sleep(10)
elif current_time.startswith("00:00:0"):
print("Restarting...")
sleep(10)
restart_wa()
elif current_time.startswith("05:30:0"):
print("Restarting...")
sleep(10)
restart_wa()
elif current_time.startswith("09:00:0"):
print("Restarting...")
sleep(10)
restart_wa()
elif current_time.startswith("11:00:0"):
print("Restarting...")
sleep(10)
restart_wa()
elif current_time.startswith("14:30:0"):
print("Restarting...")
sleep(10)
restart_wa()
elif current_time.startswith("17:00:0"):
print("Restarting...")
sleep(10)
restart_wa()
elif current_time.startswith("20:00:0"):
print("Restarting...")
sleep(10)
restart_wa()
else:
print("No timetable!")
def copy():
pt.moveTo(chat_beginning)
sleep(0.6)
pt.mouseDown() # press down left mouse button
sleep(0.3)
pt.moveRel(move_rel, duration=.22)
sleep(0.2)
pt.mouseUp() # stop pressing the left mouse button
sleep(0.2)
pt.keyDown("ctrl") # copy the message
sleep(0.15) # copy the message
pt.press("c") # copy the message
sleep(0.15) # copy the message
pt.keyUp("ctrl") # copy the message
sleep(0.3)
pt.click() # click into the void to remove the selection of the last message
copy()
whatmsg = pyperclip.paste() # define "whatmsg" with pyperclip.paste (the content in the clipboard)
if whatmsg.startswith("."):
lang = "de"
print("German command input")
elif whatmsg.startswith("!"):
lang = "en"
print("English command input")
else:
lang = "de" # set to default again
print("Last message isn't a command, so no language could be set")
def paste(): # paste and the message
sleep(.17)
pt.keyDown("ctrl") # paste
sleep(.08) # paste
pt.press("v") # paste
sleep(.08) # paste
pt.keyUp("ctrl") # paste
sleep(.14)
def bot_message(): # define the bot_message function
sleep(.1)
pt.press("a") # press "a" to automatically get into the text field
sleep(.1)
pt.keyDown("backspace") # delete the "a" and all other content that may be in the text field
sleep(.1)
pt.keyUp("backspace")
sleep(.1)
pt.keyDown("backspace")
sleep(.1)
pt.keyUp("backspace")
sleep(.2)
pyautogui.keyDown("shift") # put the "shift" key down
sleep(.1)
keyboard.press(":") # press the ":" key (only works when shift is down)
sleep(.1)
pyautogui.keyUp("shift") # put the "shift" key up
sleep(.1)
pyautogui.typewrite("robot") # write "robot", to call the "robot"-emoji
sleep(.1)
pyautogui.press("enter") # press enter to enter the "robot"-emoji
sleep(.1)
pyautogui.keyDown("shift") # make a ":" again
keyboard.press(":")
pyautogui.keyUp("shift")
def send(): # paste and send the message
bot_message()
sleep(.17)
pt.keyDown("ctrl") # paste
sleep(.08) # paste
pt.press("v") # paste
sleep(.08) # paste
pt.keyUp("ctrl") # paste
sleep(.14)
pyautogui.press("enter") # send the message
if whatmsg == ".help" or whatmsg == ".hilfe" or whatmsg == "!help" : # if whatsapp-message (remember whatmsg is the content in the clipboard) equals to ".help" move to next line, else go to the next "if" ("elif") statement
print("Revieved message:", whatmsg) # print the recieved message to the console (terminal)
if lang == "de":
pyperclip.copy(" Hilfe: http://www.lane-bot.tk/ oder https://is.gd/guenni \n \nInfo: " + edition + ". Version: " + version + ". Made by " + author) # write the info-message (with an interval of 0.1 seconds between every key)
elif lang == "en":
pyperclip.copy(" Help: http://www.lane-bot.tk/ or https://is.gd/guenni \n \nInfo: " + edition + ". Version: " + version + ". Made by " + author) # write the info-message (with an interval of 0.1 seconds between every key)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".about" or whatmsg == "!about": # if whatsapp-message (remember whatmsg is the content in the clipboard) equals to ".help" move to next line, else go to the next "if" ("elif") statement
print("Revieved message:", whatmsg) # print the recieved message to the console (terminal)
if lang == "de":
pyperclip.copy(" Der laneBOT ist aktuell ein WhatsApp- und Discord-Bot, welcher hauptsächlich dazu gedacht ist, diese Klassengruppe Informieren und Unterhalten zu können. Die Discord-Version kann aber auch auf jedem beliebigen anderen Server hinzugefügt werden (Invite-Link auf meiner Webseite). Die WhatsApp-Version richtet sich fast nur auf den Einsatz in meiner Schulklasse. \n\nEin kleiner Nachteil an dem WhatsApp-Bot ist, dass dieser denselben Account benutzt wie ich (Lane), allerdings sind die Nachrichten des Bots erkennbar durch einen Roboter-Emoji am Anfang jeder Nachricht. Weitere Informationen gibt es unter http://www.lane-bot.tk oder https://is.gd/guenni")
elif lang == "en":
pyperclip.copy(" Currently, the laneBOT a WhatsApp- and Discord-bot, which is mainly intendet to inform and entertain this group. The Discord-version can also be added to any other server (invite-link on my webseite). The WhatsApp-version is directed almost only for the use in my school class. \n\nA small disatvantage of the WhatsApp-bot is, that it uses the same account as I (Lane), but the messages which come from the bot are recognizable through a robot-emoji on the start of each message. More informations at http://www.lane-bot.tk or https://is.gd/guenni")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".info" or whatmsg == "!info":
print("Revieved message:", whatmsg)
if lang == "de":
pyperclip.copy(" Info : " + edition + ". Version : " + version + ". Made by " + author + " Web: http://www.lane-bot.tk/ oder https://is.gd/guenni")
elif lang == "en":
pyperclip.copy(" Info : " + edition + ". Version : " + version + ". Made by " + author + " Web: http://www.lane-bot.tk/ oder https://is.gd/guenni")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".oracle") or whatmsg.startswith("!oracle"): # if whatsapp message starts with ".oracle" open this function
if whatmsg.__contains__("wann") or whatmsg.__contains__("when"): # if the message contains "wann" (german word for "when") do this, else move to next "elif"-statement
print("Revieved message:", whatmsg) # print the recieved message to the console (terminal)
sleep(.3)
if lang == "de":
answer = random.choice(["Immer", "Nie", "Gestern", "Vorgestern", "Übermorgen", "Morgen", "Wird nicht vorkommen", "Ist schon passiert"]) # choose a random word as answer
pyperclip.copy(' Deine antwort lautet > ' + answer) # type the answer
elif lang == "en":
answer = random.choice(["ever", "never", "yesterday", "the day before yesterday", "the day after tomorrow", "tomorrow", "will not happen", "already happened"]) # choose a random word as answer
pyperclip.copy(' Your answer is > ' + answer) # type the answer
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".oracle wer") or whatmsg.startswith("!oracle who"): # if the message starts with ".oracle wer" ("wer" = german word for "who") do this, else move to "else"-statement
print("Recieved message:", whatmsg) # print the revieved messahe to the console (terminal)
sleep(.3)
if lang == "de":
answer = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"]) # choose a random name from my school class
pyperclip.copy(' Deine antwort lautet > ' + answer) # type the answer
elif lang == "en":
answer = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"]) # choose a random name from my school class
pyperclip.copy(' Your answer is > ' + answer) # type the answer
else:
print("No correct language found, ignoring command...")
send()
else:
print("Revieved message:", whatmsg) # if the message ditn't include "wann" or "wer", type one of these random answers
sleep(.3)
if lang == "de":
answer = random.choice(["Ja", "Nein", "Vielleicht", "Immer", "Nie", "Gestern", "Vorgestern", "Uebermorgen", "Wird nicht vorkommen", "Ist schon passiert", "Ja", "Nein", "Ich habe dich leider nicht verstanden.", "Ja", "Nein"]) # choose a random answer
pyperclip.copy(' Deine Antwort lautet > ' + answer) # type the answer
elif lang == "en":
answer = random.choice(["yes", "no", "maybe", "ever", "never", "yesterday", "the day before yesterday", "the day after tomorrow", "tomorrow", "will not happen", "has already happened", "yes", "no", "sorry, I didn't understand you", "yes", "no"]) # choose a random answer
pyperclip.copy(' Your answer is > ' + answer) # type the answer
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".spruch" or whatmsg == "!saying":
print("Recieved message:", whatmsg)
spruch = random.choice(["joke1", "joke2", "joke3", "joke4", "joke5", "joke6", "joke7", "joke8", "joke9", "joke10", "joke11", "joke12", "joke13", "joke14", "joke15", "joke16", "joke17", "joke18", "joke19", "joke20", "joke21", "joke22", "joke23", "joke24", "joke25", "joke26", "joke27", "joke28", "joke29", "joke30"])
if lang == "de":
if spruch == "joke1":
pyperclip.copy(" *Anmachspruch*: 'If she leaves for another, there always her mother'.") # copy the joke to clipboard
elif spruch == "joke2":
pyperclip.copy(" *Anmachspruch*: 'Ich bin neidisch auf dein Herz, weil es in dir pulsiert und ich nicht'")
elif spruch == "joke3":
pyperclip.copy(" *Anmachspruch*: Ich glaube deine Beine streiten sich, soll ich dazwischen gehen?")
elif spruch == "joke4":
pyperclip.copy(" *Anmachspruch*: Na Schnitte, schon belegt?")
elif spruch == "joke5":
pyperclip.copy(" *Anmachspruch*: Ey Praline schon ne Füllung?")
elif spruch == "joke6":
pyperclip.copy(" *Anmachspruch*: Ich bin neu in der Stadt kannst du mir den Weg zu dir nach hause zeigen?")
elif spruch == "joke7":
pyperclip.copy(" *Anmachspruch*: Sind deine Eltern Architekten, oder wieso bist du so gut gebaut?")
elif spruch == "joke8":
pyperclip.copy(" *Anmachspruch*: Wenn du eine Frucht wärst, wärst du ein Pfirsich weil du so süß bist.")
elif spruch == "joke9":
pyperclip.copy(" *Anmachspruch*: Bist du Gärtnerin? Weil immer wenn ich dich sehe wächst meine Gurke.")
elif spruch == "joke10":
pyperclip.copy(" *Anmachspruch:* Deine Lippen sehen sehr alleine aus maybe sollten die sich mal treffen?")
elif spruch == "joke11":
pyperclip.copy(" *Anmachspruch:* Deine Stimme ist echt geil aber noch geiler wäre es, wenn sie meinen namen stöhnen würde")
elif spruch == "joke12":
pyperclip.copy(" *Anmachspruch:* Lass es so machen: du gibst mir deinen Pulli, aber du musst ihn mir später auch wieder ausziehen")
elif spruch == "joke13":
pyperclip.copy(" *Anmachspruch:* Du siehst echt traurig aus... Aber mach dir keine Sorgen, meine Schultern sind immer frei für deine Beine")
elif spruch == "joke14":
pyperclip.copy(" *Anmachspruch:* Ich kenn mich zwar nicht so gut mit Gewürzen aus, aber du bist scharf")
elif spruch == "joke15":
pyperclip.copy(" *Anmachspruch:* Du magst schlafen, ich mag schlafen. Vielleicht sollten wir mal zusammen schlafen?")
elif spruch == "joke16":
pyperclip.copy(" *Anmachspruch:* Ich hab irgendwie mein Kissen verloren, darf ich zwischen deinen Beinen liegen?")
elif spruch == "joke17":
pyperclip.copy(" *Anmachspruch:* Ich heiße zwar nicht Manuel aber vielleicht bin ich ja dein Neuer-")
elif spruch == "joke18":
pyperclip.copy(" *Anmachspruch:* Ist irgendwo ein Atomkraftwerk explodiert oder warum hast du so eine krasse Ausstrahlung?")
elif spruch == "joke19":
pyperclip.copy(" *Anmachspruch:* Gold schmilzt bei 1064 Grad, Titan schmilzt bei 1668 Grad und ich schmelze bei deinem Anblick.")
elif spruch == "joke20":
pyperclip.copy(" *Anmachspruch:* Ich kann nicht sagen ob du schön bist. Wahre Schönheit kommt von Innen und in dir war ich noch nicht.")
elif spruch == "joke21":
pyperclip.copy(" *Anmachspruch:* Ich hab Spotify verklagt, weil du nicht unter den heißesten singles bist.")
elif spruch == "joke22":
pyperclip.copy(" *Anmachspruch:* Ich will dich jetzt nicht dumm anmachen, aber ich hätte nichts dagegen wenn du es machst.")
elif spruch == "joke23":
pyperclip.copy(" *Anmachspruch:* Wenn Corona dich noch nicht flachgelegt hat, kann ich es auch gerne tun.")
elif spruch == "joke24":
pyperclip.copy(" *Anmachspruch:* Hast du einen Namen oder kann ich dich direkt `Meins` nennen?")
elif spruch == "joke25":
pyperclip.copy(" *Anmachspruch:* Auch wenn ich in keiner Gewürzfabrik arbeite, erkenne ich, dass du scharf bist.")
elif spruch == "joke26":
pyperclip.copy(" *Anmachspruch:* Ich bin zwar kein Magier aber ich zerleg dich trotzdem in der Kiste")
elif spruch == "joke27":
pyperclip.copy(" *Anmachspruch:* Ich hab meine Decke verloren- darf ich mich heute Nacht an dich kuscheln?")
elif spruch == "joke28":
pyperclip.copy(" *Anmachspruch:* Du bist so heiß da braucht man sich nicht wundern warum die Gletscher schmelzen")
elif spruch == "joke29":
pyperclip.copy(" *Anmachspruch:* Wollen wir zusammen heute Nacht die Sterne anschauen? - Ich hol dir dann auch einen runter")
elif spruch == "joke30":
pyperclip.copy(" *Anmachspruch:* Wir sind wie eine Achterbahn, je schneller du wirst desto lauter werd ich")
elif lang == "en":
pyperclip.copy(" Sayings are not available in english.")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".cat" or whatmsg == "!cat":
print("Recieved message:", whatmsg)
cat1 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse1.jpg"
cat2 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse2.jpg"
cat3 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse3.jpg"
cat4 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse4.jpg"
cat5 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse5.jpg"
cat6 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse6.jpg"
cat7 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse7.jpg"
cat8 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse8.jpg"
cat9 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse9.jpg"
cat10 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse10.jpg"
cat11 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse11.jpg"
cat12 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse12.jpg"
cat13 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse13.jpg"
cat14 = "https://fh-site.github.io/else/laneBOT/images/cats/kadse14.jpg"
cat_list = random.choice([cat1, cat2, cat3, cat4, cat5, cat6, cat7, cat8, cat9, cat10, cat11, cat12, cat13, cat14])
if lang == "de":
pyperclip.copy(" Kadse: " + cat_list)
elif lang == "en":
pyperclip.copy(" Cet: " + cat_list)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".dog" or whatmsg == "!dog":
print("Recieved message:", whatmsg)
dog1 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau1.jpg"
dog2 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau2.jpg"
dog3 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau3.jpg"
dog4 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau4.jpg"
dog5 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau5.jpg"
dog6 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau6.jpg"
dog7 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau7.jpg"
dog8 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau8.jpg"
dog9 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau9.jpg"
dog10 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau10.jpg"
dog11 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau11.jpg"
dog12 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau12.jpg"
dog13 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau13.jpg"
dog14 = "https://fh-site.github.io/else/laneBOT/images/dogs/wuffwau14.jpg"
dog_list = random.choice([dog1, dog2, dog3, dog4, dog5, dog6, dog7, dog8, dog9, dog10, dog11, dog12, dog13, dog14])
if lang == "de":
pyperclip.copy(" Hühn(d)chen: " + dog_list)
elif lang == "en":
pyperclip.copy(" Doggo: " + dog_list)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".wahrheit-oder-pflicht") or whatmsg.startswith(".truth-or-dare") or whatmsg.startswith(".wop") or whatmsg.startswith(".w-o-p") or whatmsg.startswith(".tod") or whatmsg.startswith(".t-o-d") or whatmsg.startswith("!truth-or-dare") or whatmsg.startswith("!wop") or whatmsg.startswith("!w-o-p") or whatmsg.startswith("!tod") or whatmsg.startswith("!t-o-d"):
print("Recieved message:", whatmsg)
random_name = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
if lang == "de":
truth1 = "*Wahrheit*: Was war als Kind deine Lieblingsserie?"
truth2 = "*Wahrheit*: Hast du jemals erfolgreich jemanden verführt?"
truth3 = "*Wahrheit*: Falls dein Partner 30kg zu nehmen würde, würdest du ihn/sie verlassen?"
truth4 = "*Wahrheit*: Würdest du mit jemandem für Geld Sex haben? Und wenn ja, wie viel würdest du verlangen?"
truth5 = "*Wahrheit*: Wie viel geld würdest du verlangen, um nackt für eine Kunst-Klasse zu posen?"
truth6 = "*Wahrheit*: Beschreibe detailliert welche Unterhose du gerade trägst"
truth7 = "*Wahrheit*: Glaubst du, du kannst mit " + random_name + " vier mal nachts Sex haben?"
truth8 = "*Wahrheit*: Würdest du in einem Porno mit machen wenn niemand dein Gesicht erkennen kann?"
truth9 = "*Wahrheit*: Hast du schon mal die ganze Nacht lange gefeiert und musstest am nächsten Tag in die Schule?"
truth10 = "*Wahrheit*: Würdest du mit " + random_name + " aus gehen?"
truth11 = "*Wahrheit*: Hast du dich schon einmal geprügelt? Und wenn ja, hast du gewonnen?"
truth12 = "*Wahrheit*: Was ist die gefährlichste Sache, die du je getan hast?"
truth13 = "*Wahrheit*: Falls du in einer Woche ein Kind bekommen würdest, wie würdest du es nennen?"
truth14 = "*Wahrheit*: Was hasst du am meisten an deiner am besten befreundeten Person?"
truth15 = "*Wahrheit*: Was hasst du am meisten an dir selbst?"
truth16 = "*Wahrheit*: Was hasst du am meisten an " + random_name + "?"
truth17 = "*Wahrheit*: Würdest du " + random_name + " für 100 Euro küssen?"
truth18 = "*Wahrheit*: Würdest du für eine Nacht lieber eine Person haben, die hot und dumm oder nicht-so-hot und schlau ist? Und was würdest du mit der Person machen?"
truth19 = "*Wahrheit*: Was ist dein dunkelstes Geheimnis?"
truth20 = "*Wahrheit*: Hast du schon einmal Hentai gesehen oder erotische Geschichten gelesen?"
truth21 = "*Wahrheit:* Wo rasierst du dich überall?"
truth22 = "*Wahrheit:* Schaust du vor dem Spülen in die Kloschüssel?"
truth23 = "*Wahrheit:* Wen hast du zuletzt geküsst?"
truth24 = "*Wahrheit:* Hast du schon einmal jemanden gemobbt?"
truth25 = "*Wahrheit:* Was war die letzte Lüge, die du jemandem erzählt hast?"
truth26 = "*Wahrheit:* Pinkelst du in der Dusche?"
truth27 = "*Wahrheit:* Wen aus der Klasse würdest du am ehesten küssen?"
truth28 = "*Wahrheit:* Was und mit wem war dein schlechtestes Date?"
truth29 = "*Wahrheit:* Was magst du am liebsten an dir?"
truth30 = "*Wahrheit:* Was war dein schlimmster Korb?"
truth31 = "*Wahrheit:* Was war das schlimmste, das du jemals jemandem angetan hast?"
truth32 = "*Wahrheit:* Was ist die schmutzigste Nachricht, die du je verschickt hast?"
truth33 = "*Wahrheit:* Was ist deine dunkelste Fantasie?"
truth34 = "*Wahrheit:* Schaust du Pornos?"
truth35 = "*Wahrheit:* Besitzt du ein Sexspielzeug?"
truth36 = "*Wahrheit:* Hattest du schonmal Sex-Fantasien mit jemandem aus unserer Klasse?"
truth37 = "*Wahrheit:* Vor was hast du am meisten Angst?"
truth38 = "*Wahrheit:* Welches Talent hättest du gerne?"
truth39 = "*Wahrheit:* Was isst du am liebsten?"
truth40 = "*Wahrheit:* Hast du schon einmal geraucht?"
truth41 = "*Wahrheit:* Was ist dein Lieblingsfilm?"
truth42 = "*Wahrheit:* Hast du schon einmal etwas gestohlen?"
truth43 = "*Wahrheit:* Wann hast du das letzte mal geweint?"
truth44 = "*Wahrheit:* Was ist dein Lieblings-Schulfach?"
truth45 = "*Wahrheit:* Welches Buch hast du als letztes gelesen?"
dare1 = "*Pflicht:* Wickle die nächste Person zu dir in Toilettenpapier ein"
dare2 = "*Pflicht:* Versuche, eine eigene Nase abzulecken"
dare3 = "*Pflicht:* Küsse die nächste Person rechts von dir auf den Mund"
dare4 = "*Pflicht:* Du musst drei Minuten lange alles mit 'Ja' beantworten"
dare5 = "*Pflicht:* Gehe zu" + random_name + " und gib der Person eine Ohrfeige"
dare6 = "*Pflicht:* Du wirst " + random_name + " von jetzt an so lange 'Schatz' nennen, bis die Person dir sagt dass du aufhören sollst."
dare7 = "*Pflicht:* Kratze dein linkes Ohr mit einem deiner Füße wie eine Katze."
dare8 = "*Pflicht:* Lege deine Hände für 2 Minuten auf den Kopf und bewege dich nicht."
dare9 = "*Pflicht:* Schreibe in den nächsten Chat unter dieser Gruppe 'es ist aus mit uns'"
dare10 = "*Pflicht:* Versuche deinen Ellenbogen abzulecken"
dare11 = "*Pflicht:* Hüpfe im Kreis um den nächsten Tisch, der genug Platz außenrum bietet."
dare12 = "*Pflicht:* Sei kreativ: male etwas lustiges auf den Unterarm von " + random_name + "!"
dare13 = "*Pflicht:* Setze für die nächsten 2 Wochen an deinem Handy einen Wecker auf 8 Uhr (morgens)"
dare14 = "*Pflicht:* Für die nächsten zwei Minuten musst du jeden satz mit '" + random.choice(["ich liebe dich", "Bratwurst"]) + "' beenden!"
dare15 = "*Pflicht:* Schreibe deiner Mutter, dass du eine Sechs in Mathe geschrieben hast."
dare16 = "*Pflicht:* "
dare17 = "*Pflicht:* "
truth_list = random.choice([truth1, truth2, truth3, truth4, truth5, truth6, truth7, truth8, truth9, truth10, truth11, truth12, truth13, truth14, truth15, truth16, truth17, truth18, truth19, truth20, truth21, truth22, truth23, truth24, truth25, truth26, truth27, truth28, truth29, truth30, truth31, truth32, truth33, truth34, truth35, truth36, truth37, truth38, truth39, truth40, truth41, truth42, truth43, truth44, truth45])
dare_list = random.choice([dare1, dare2, dare3, dare4, dare5, dare6, dare7, dare8, dare9, dare10, dare11, dare12, dare13, dare14, dare15])
if whatmsg.endswith(" truth") or whatmsg.endswith(" wahrheit") or whatmsg.endswith(" w"):
pyperclip.copy(" " + truth_list)
send()
elif whatmsg.endswith(" dare") or whatmsg.endswith(" pflicht") or whatmsg.endswith(" p"):
pyperclip.copy(" " + dare_list)
send()
elif whatmsg == ".truth-or-dare" or whatmsg == ".wahrheit-oder-pflicht" or whatmsg == ".w-o-p" or whatmsg == ".wop":
pyperclip.copy(" " + random.choice([truth_list, dare_list]))
send()
else:
pyperclip.copy(" truth-or-dare: *Unknown argument!*")
send()
elif lang == "en":
pyperclip.copy(" Truth or dare is not available in english jet.")
send()
else:
print("No correct language found, ignoring command...")
elif whatmsg == ".illegal" or whatmsg == "!illegal":
print("Recieved message:", whatmsg)
if lang == "de":
illegal = random.choice(["Toete einen Menschen", "Kauf ne Waffe auf einem Darknet Markt", "Sag dass jemand aus der Klasse tiere fckt"])
pyperclip.copy(" Richtig illegal : " + illegal)
elif lang == "en":
illegal = random.choice(["kill someone", "buy a weapon on a darknet market", "say that someone in the class is fcking animals"])
pyperclip.copy(" Very illegal : " + illegal)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".restart_bot" or whatmsg == "!restart_bot":
print("Recieved message:", whatmsg)
if lang == "de":
pyperclip.copy(" Starte bot neu... (dieser Prozess kann bis zu zwei Minuten dauern)")
elif lang == "en":
pyperclip.copy(" Restarting bot... (this process may last up to two minutes)")
else:
print("No correct language found, ignoring command...")
send()
restart_wa()
elif whatmsg == ".online" or whatmsg == "!online":
print("Recieved message:", whatmsg)
if lang == "de":
pyperclip.copy(" Offensichtlich online, oder?")
elif lang == "en":
pyperclip.copy(" Obviously online, right?")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".restartbot" or whatmsg == "!restartbot":
print("Recieved message:", whatmsg)
if lang == "de":
pyperclip.copy(" Du darfst diesen Befehl nicht verwenden!")
elif lang == "en":
pyperclip.copy(" You are not allowed to use this command!")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".würfel" or whatmsg == "!roll" or whatmsg == "!cube" or whatmsg == "!dice":
print("Recieved message:", whatmsg)
if lang == "de":
pyperclip.copy(" Ich Würfle...")
send()
sleep(1)
cube_res = random.choice([" Ergebnis: 1", " Ergebnis: 2", " Ergebnis: 3", " Ergebnis: 4", "Ergebnis: 5", " Ergebnis: 6"])
pyperclip.copy(cube_res)
pyperclip.copy(cube_res)
elif lang == "en":
pyperclip.copy(" I roll...")
send()
sleep(1)
cube_res = random.choice([" Result: 1", " Result: 2", " Result: 3", " Result: 4", " Result: 5", " Result: 6"])
pyperclip.copy(cube_res)
pyperclip.copy(cube_res)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".say") or whatmsg.startswith("!say"):
print("Recieved message:", whatmsg)
str = whatmsg
print ("Original string: " + str)
if lang == "de":
res_str = str.replace('.say', '')
elif lang == "en":
res_str = str.replace('!say', '')
else:
print("No correct language found, ignoring command...")
print ("The string after removal of character: " + res_str)
sleep(.1)
pyperclip.copy(res_str)
send()
elif whatmsg.startswith(".love") or whatmsg.startswith("!love"):
str = whatmsg
print ("Original string: " + str)
if lang == "de":
res_str = str.replace('.love', '')
lovemeter_res = random.choice([" Zwischen 0 und 20 Prozent... eher nich so", " Zwischen 20 und 30 Prozent. Das koennte noch was werden", " 30 bis 50 Prozent, nicht super aber schon recht gut!", " 50 bis 60 Prozent. Ein Date koennte helfen!", " 60 bis 80 Prozent!! Ab zum Date!", " 80 bis 99 Prozent! Bist du dir sicher, dass die Personen nicht schon zusammen sind?", " FUCKING 100 PROZENT! AB AUF DIE HOCHZEIT!"])
elif lang == "en":
res_str = str.replace('!love', '')
lovemeter_res = random.choice([" Between 0 and 20 percent... not that good", " Between 20 and 30 percent. This could end up in something", " 30 up to 50 percent, not the best but already good!", " 50 up to 60 percent. A date may help!", " 60 up to 80 percent!! Go to your date!", " 80 up to 99 percent! Are you shure that those persons are not together already?", " FUCKING 100 PERCENT! LET'S GO TO THE WEDDING!"])
else:
print("No correct language found, ignoring command...")
print ("The string after removal of character: " + res_str)
if whatmsg == ".love":
print("Revieved message:", whatmsg)
sleep(.3)
pyperclip.copy(lovemeter_res)
send()
else:
print("two")
sleep(.1)
if lang == "de":
pyperclip.copy(" Ergebnis von" + res_str + " :" + lovemeter_res)
elif lang == "en":
pyperclip.copy(" Result of" + res_str + " :" + lovemeter_res)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".queer") or whatmsg.startswith("!queer"):
print("Revieved message:", whatmsg)
sleep(.2)
queer = random.randint(1,100)
#queer = random.choice(["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100!!"])
sleep(.1)
if whatmsg == ".queer":
print("one")
print("Revieved message:", whatmsg)
sleep(.3)
if lang == "de":
pyperclip.copy(" Zu ", queer, " Prozent queer! ")
elif lang == "en":
pyperclip.copy(" To ", queer, " percent queer! ")
else:
print("No correct language found, ignoring command...")
paste()
pyautogui.keyDown("shift")
sleep(.1)
keyboard.press(":")
sleep(.1)
pyautogui.keyUp("shift")
sleep(.1)
pyautogui.typewrite("regenbogenflagge")
sleep(.4)
pyautogui.press("enter")
sleep(.7)
pyautogui.press("enter")
elif whatmsg == ".queer Simon" or whatmsg == ".queer simon" or whatmsg == "!queer Simon" or whatmsg == "!queer simon":
print("one")
print("Revieved message:", whatmsg)
sleep(.3)
pyperclip.copy(" Simon is fuckin homophobic!")
send()
elif whatmsg == ".queer Lane" or whatmsg == ".queer lane" or whatmsg == ".queer Iane" or whatmsg == "!queer Lane"or whatmsg == "!queer lane":
print("one")
print("Revieved message:", whatmsg)
sleep(.3)
if lang == "de":
pyperclip.copy(" Lane hat ganz klar 100 Prozent!")
elif lang == "en":
pyperclip.copy(" Lane has 100 percent, clearly!")
else:
print("No correct language found, ignoring command...")
paste()
pyautogui.keyUp("shift")
sleep(.1)
pyautogui.typewrite("regenbogenflagge")
sleep(.4)
pyautogui.press("enter")
sleep(.3)
pyautogui.press("enter")
else:
print("two")
str = whatmsg
print ("Original string: " + str)
if lang == "de":
res_str = str.replace('.queer', '')
sleep(.1)
pyperclip.copy(res_str + " ist zu " + queer + " Prozent queer! ")
elif lang == "en":
res_str = str.replace('!queer', '')
sleep(.1)
pyperclip.copy(res_str + " is to " + queer + " percent queer! ")
else:
print("No correct language found, ignoring command...")
print ("The string after removal of character: " + res_str)
paste()
pyautogui.keyDown("shift")
sleep(.1)
keyboard.press(":")
sleep(.1)
pyautogui.keyUp("shift")
sleep(.1)
pyautogui.typewrite("regenbogenflagge")
sleep(.4)
pyautogui.press("enter")
sleep(.7)
pyautogui.press("enter")
elif whatmsg.startswith(".paar") or whatmsg.startswith("!couple"):
lovemeter1 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
lovemeter2 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
if whatmsg == ".paar" or whatmsg == "!couple":
print("one")
print("Revieved message:", whatmsg)
sleep(.3)
if lang == "de":
pyperclip.copy(" Ein super Paar > " + lovemeter1 + ' und ' + lovemeter2)
elif lang == "en":
pyperclip.copy(" A perfect couple > " + lovemeter1 + ' and ' + lovemeter2)
else:
print("No correct language found, ignoring command...")
send()
else:
print("two")
str = whatmsg
print ("Original string: " + str)
if lang == "de":
res_str = str.replace('.paar', '')
print ("The string after removal of character: " + res_str)
sleep(.1)
pyperclip.copy(res_str + " wäre ein super paar mir " + lovemeter2)
elif lang == "en":
res_str = str.replace('!couple', '')
print ("The string after removal of character: " + res_str)
sleep(.1)
pyperclip.copy(res_str + " whould be a perfect couple with " + lovemeter2)
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".stundenplan") or whatmsg.startswith(".st") or whatmsg.startswith(".sp") or whatmsg.startswith(".timetable") or whatmsg.startswith(".tt"):
def send_tt():
path = '/home/pi/timetable/' + day_individual + '.txt' # define the path of the timetable-file
print("Path: " + path) # print the path to terminal
timetablecontent = open(path, 'r').read() # open file
pyperclip.copy(timetablecontent) # copy file to clipboard
sleep(.8)
pt.keyDown("ctrl") # paste
sleep(.08) # paste
pt.press("v") # paste
sleep(.08) # paste
pt.keyUp("ctrl") # paste
sleep(.2)
pyautogui.press("enter") # send the message
pyperclip.copy("") # empty the clipboard
if whatmsg.endswith("mo") or whatmsg.endswith("montag") or whatmsg.endswith("Montag") or whatmsg.endswith("monday"):
day_individual = "monday"
send_tt()
elif whatmsg.endswith("di") or whatmsg.endswith("dienstag") or whatmsg.endswith("Dienstag") or whatmsg.endswith("tuesday"):
day_individual = "tuesday"
send_tt()
elif whatmsg.endswith("mi") or whatmsg.endswith("mittwoch") or whatmsg.endswith("Mittwoch") or whatmsg.endswith("wednesday"):
day_individual = "wednesday"
send_tt()
elif whatmsg.endswith("do") or whatmsg.endswith("donnerstag") or whatmsg.endswith("Donnerstag") or whatmsg.endswith("thursday"):
day_individual = "thursday"
send_tt()
elif whatmsg.endswith("fr") or whatmsg.endswith("freitag") or whatmsg.endswith("Freitag") or whatmsg.endswith("friday"):
day_individual = "friday"
send_tt()
elif whatmsg.endswith("sa") or whatmsg.endswith("samstag") or whatmsg.endswith("Samstag") or whatmsg.endswith("saturday") or whatmsg.endswith("so") or whatmsg.endswith("sonntag") or whatmsg.endswith("Sonntag") or whatmsg.endswith("sunday") or whatmsg.endswith("wo"):
day_individual = "weekend"
send_tt()
else:
automatic = "no"
timetable()
elif whatmsg.startswith(".cool") or whatmsg.startswith("!cool"): # if messgae starts with cool
sleep(.1)
if lang == "de":
cool = random.choice([" Nah, nich so cool nur 0 bis 10 Prozent!", " Nicht wirklich. Gerade mal 10 bis 20 Prozent!", " Ca. 20 bis 30 Prozent... koennte besser sein...", " Eig. relativ okay, 40 bis 50 Prozent", " Recht nice du liegst Zwischen 50 und 60 Prozent", " Ganz cool, 60 bis 70 Prozent.", " Ich will mit dir befreundet sein! Zu 70 bis 80 Prozent cool!", " Super duper über mega cool! 80 bis 90 Prozent!", " Ich will dich heiraten! 90 bis 99 Prozent!", " Du bist ein Gott. Du hast naehmlich 100 Prozent!"]) # choose a random answer (in german, obviously)
elif lang == "en":
cool = random.choice([" No, not so cool. Only 0 up to 10 percent!", " Not really. Only 10 up to 20 percent!", " Between 20 and 30 percent... could be better...", " Actually not that bad, 40 up to 50 percent", " You are okay, you are between 50 and 60 percent!", " Very nice, 60 up to 70 Prozent.", " I want to be friends with you! You are to 70 up to 80 percent cool!", " Super mega cool! 80 up to 90 percent!", " I want to marry you! 90 up to 99 percent!", " You are a god. You have the 100 percent!"]) # choose a random answer (in english)
else:
print("No correct language found, ignoring command...")
if whatmsg == ".cool" or whatmsg == "!cool": # if it exactly equals to ".cool" move on with the code down here
print("Revieved message:", whatmsg)
sleep(.3)
pyperclip.copy(cool) # write the random answer (defined above)
paste()
sleep(.2)
pyautogui.keyDown("shift") # add "sunglasses"-emoji
sleep(.1) # add "sunglasses"-emoji
keyboard.press(":") # add "sunglasses"-emoji
sleep(.1) # add "sunglasses"-emoji
pyautogui.keyUp("shift") # add "sunglasses"-emoji
sleep(.1) # add "sunglasses"-emoji
pyautogui.typewrite("sunglasses") # add "sunglasses"-emoji
sleep(.4) # add "sunglasses"-emoji
pyautogui.press("enter") # add "sunglasses"-emoji
sleep(.7)
pyautogui.press("enter")
elif whatmsg == ".cool Simon" or whatmsg == ".cool simon" or whatmsg == "!cool Simon" or whatmsg == "!cool simon": # else if the message ".cool" equals to ".cool Simon" move on with this code down here
print("Revieved message:", whatmsg)
sleep(.3)
pyperclip.copy(" Simon cant be cool, he is fuckin homophobic!") # say that simon is homophobic (that is actually true)
send()
else: # else write the message how cool somebody is with the name given in the message (if there is no name it will use the first method at this command)
str = whatmsg
print ("Original string: " + str) # print the original string to the console
if lang == "de":
res_str = str.replace('.cool', '') # remove the ".cool" in the string, so there will only stay the given name in it
print ("The string after removal of character: " + res_str) # print the result string (the name)
sleep(.1)
pyperclip.copy(" Ergebnis von" + res_str + " :" + cool) # type the result
elif lang == "en":
res_str = str.replace('!cool', '') # remove the ".cool" in the string, so there will only stay the given name in it
print ("The string after removal of character: " + res_str) # print the result string (the name)
sleep(.1)
pyperclip.copy(" Result of" + res_str + " :" + cool) # type the result
else:
print("No correct language found, ignoring command...")
paste()
sleep(.2)
pyautogui.keyDown("shift")
sleep(.1)
pt.press("space")
sleep(.1)
keyboard.press(":")
sleep(.1)
pyautogui.keyUp("shift")
sleep(.1)
pyautogui.typewrite("sunglasses")
sleep(.4)
pyautogui.press("enter")
sleep(.7)
pyautogui.press("enter")
elif whatmsg.startswith(".witz") or whatmsg == "!joke":
print("Recieved message:", whatmsg)
spruch = random.choice(["joke1", "joke2", "joke3", "joke4", "joke5", "joke6", "joke7", "joke8", "joke9", "joke10", "joke11"])
if lang == "de":
if spruch == "joke1":
pyperclip.copy(" Ich fuehle mich so haesslich, so fett. Ich brauche ein Kompliment. > Du hast eine gute Beobachtungsgabe.")
elif spruch == "joke2":
pyperclip.copy(" Der Gefaengnisdirektor: Was wird nur Ihr armer Vater dazu sagen, dass sie schon wieder hier sind? > Fragen Sie ihn doch selbst. Er sitzt nur drei Zellen weiter!")
elif spruch == "joke3":
pyperclip.copy(" Warum ist ein Iglu rund? > Damit die Huskys nicht in die Ecken scheißen.")
elif spruch == "joke4":
pyperclip.copy(" Polizeikontrolle! Alkohol, Drogen? > Verkaufen Sie auch Pommes?")
elif spruch == "joke5":
pyperclip.copy(" Alle Kinder sitzen auf der Walze außer Gunther der liegt drunter.")
elif spruch == "joke6":
pyperclip.copy(" Alle Kinder sitzen ums Lagerfeuer außer Brigitte die sitzt in der Mitte.")
elif spruch == "joke7":
pyperclip.copy(" Alle kinder schlafen tief und munter, außer Gunther, der holt sich einen runter.")
elif spruch == "joke8":
pyperclip.copy(" Alle rennen aus dem brennenden Kino, nur nicht Abdul, der klemmt im Klappstuhl.")
elif spruch == "joke9":
pyperclip.copy(" Alle Kinder laufen über den Friedhof außer Hagen der wird begraben.")
elif spruch == "joke10":
pyperclip.copy(" Alle Kinder blieben vor der Klippe stehen außer Peter der ging noch nen Meter.")
elif spruch == "joke11":
pyperclip.copy(" Alle Kinder haben Haare außer Thorsten der hat Borsten.")
elif lang == "en":
pyperclip.copy(" Jokes are not available in english jet!")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg == ".schwarzer-humor" or whatmsg == ".sh" or whatmsg == ".black-humor" or whatmsg == "!black-humor" or whatmsg == "!bh" or whatmsg == "!b-h":
print("Recieved message:", whatmsg)
spruch = random.choice(["joke1", "joke2", "joke3", "joke4", "joke5", "joke6", "joke7", "joke8", "joke9", "joke10", "joke11", "joke12", "joke13"])
if lang == "de":
if spruch == "joke1":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR*: Schwarzer Humor ist Wasser fuer meine Seele. > Aber nicht fuer Afrika.") # copy the joke to clipboard
elif spruch == "joke2":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR*: Wie nennt man es, wenn sich zwei obdachlose mit steinen abwerfen? > Kissenschlacht!")
elif spruch == "joke3":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR*: Was sagt ein einarmiger, der Karten mischen soll? > Mischen Impossible")
elif spruch == "joke4":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR*: Was macht ein Rollstuhlfahrer, wenn er sauer ist? > Am Rad drehen.")
elif spruch == "joke6":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR*: Was hat ein Mann ohne Beine? > Eier aus Bodenhaltung")
elif spruch == "joke7":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Auf dem Mars wurde Wasser gefunden. > Mars 1 - Afrika 0")
elif spruch == "joke8":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Wie bekommt man am besten Kaugummi aus den Haaren? > Mit Krebs.")
elif spruch == "joke9":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Wieso kaufen sich Waisenkinder ein IPhone 11? > Weil es beim IPhone 11 keinen Home-Button gibt.")
elif spruch == "joke10":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Was ist der Unterschied zwischen einer Pizza und einem Emo? > Die Pizza schneidet sich nicht selbst.")
elif spruch == "joke11":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Wieso gibt es in Afrika keine Medikamente? > Weil man sie nach dem Essen einnehmen muss.")
elif spruch == "joke12":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Was steht auf dem Grabstein einer Putzfrau? > Sie kehrt nie wieder.")
elif spruch == "joke13":
pyperclip.copy(" *ACHTUNG SCHWARZER HUMOR:* Warum schlagen Frauen immer mit links zu? > Weil sie keine Rechte haben.")
elif lang == "en":
pyperclip.copy(" Black humor not available in english jet!")
else:
print("No correct language found, ignoring command...")
send()
elif whatmsg.startswith(".randomname") or whatmsg.startswith(".rn") or whatmsg.startswith("!randomname") or whatmsg.startswith("!rn"):
name1 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name2 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name3 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "Elias L.", "Julian R.", "Julian S.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name4 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "Elias L.", "Julian R.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name5 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "Elias L.", "Julian R.", "Julian S.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name6 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "Julian R.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name7 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "Julian R.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name8 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "Julian R.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
name9 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "Julian R.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Simon", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
if lang == "de":
name_lang1 = "Name"
name_lang = "Namen"
elif lang == "en":
name_lang1 = "name"
name_lang = "names"
else:
print("No correct language found, ignoring command...")
if whatmsg == ".randomname" or whatmsg == ".rn" or whatmsg == "!randomname" or whatmsg == "!rn":
namelist = " 0 Arguments given. Expected 1. : Bitte gib eine Zahl an!"
elif whatmsg.endswith("1"):
namelist = " 1 " + name_lang1 + " : " + name1
elif whatmsg.endswith("2"):
namelist = " 2 " + name_lang + " : " + name1 + ", " + name2
elif whatmsg.endswith("3"):
namelist = " 3 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3
elif whatmsg.endswith("4"):
namelist = " 4 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3 + ", " + name4
elif whatmsg.endswith("5"):
namelist = " 5 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3 + ", " + name4 + ", " + name5
elif whatmsg.endswith("6"):
namelist = " 6 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3 + ", " + name4 + ", " + name5 + ", " + name6
elif whatmsg.endswith("7"):
namelist = " 7 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3 + ", " + name4 + ", " + name5 + ", " + name6 + ", " + name7
elif whatmsg.endswith("8"):
namelist = " 8 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3 + ", " + name4 + ", " + name5 + ", " + name6 + ", " + name7 + ", " + name8
elif whatmsg.endswith("9"):
namelist = " 9 " + name_lang + " : " + name1 + ", " + name2 + ", " + name3 + ", " + name4 + ", " + name5 + ", " + name6 + ", " + name7 + ", " + name8 + ", " + name9
print("Revieved message:", whatmsg)
sleep(.3)
pyperclip.copy(namelist)
send()
elif whatmsg.startswith(".ship") or whatmsg.startswith("!ship"):
lovemeter1 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Sabrina", "Simon", "Sunny", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
lovemeter2 = random.choice(["Lane", "Jonas", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Isabel", "Nora", "Nicole", "Anna", "Anna-Lena", "Cecile", "Dea", "Elias", "<NAME>.", "<NAME>.", "<NAME>.", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Lena", "Lina", "Louisa", "Luis", "Melissa", "Nico", "Noah", "Noel", "Sabrina", "Sabrina", "Simon", "Sunny", "Sunny", "Toni", "Vincent", "Korbinian", "Paul", "Tobias"])
if whatmsg == ".ship" or whatmsg == "!ship":
print("one")
print("Revieved message:", whatmsg)
sleep(.3)
if lang == "de":
pyperclip.copy(" Ich shippe " + lovemeter1 + ' mit ' + lovemeter2)
elif lang == "en":
pyperclip.copy(" I ship " + lovemeter1 + ' with ' + lovemeter2)
else:
print("No correct language found, ignoring command...")
send()
else:
print("two")
str = whatmsg
print ("Original string: " + str)
if lang == "de":
res_str = str.replace('.ship', '')
print ("The string after removal of character: " + res_str)
sleep(.1)
pyperclip.copy(res_str + " shippe ich mit " + lovemeter2)
elif lang == "en":
res_str = str.replace('!ship', '')
print ("The string after removal of character: " + res_str)
sleep(.1)
pyperclip.copy(" I ship" + res_str + " with " + lovemeter2)
else:
print("No correct language found, ignoring command...")
send()
def lovemeter_persons():
print("Revieved message:", whatmsg)
sleep(.3)
pyperclip.copy(" " + lovemeter_name + " passt super zu: " + lovemeter1)
send()
else:
print("No new messages!")
def class1():
pt.moveTo(group1)
sleep(0.5)
pt.click()
sleep(.8)
bot_main()
def class2():
pt.moveTo(group2)
sleep(0.5)
pt.click()
sleep(.8)
bot_main()
class1()
class2() | StarcoderdataPython |
1744719 | <gh_stars>0
#!/usr/bin/env python
import unittest
from flattery import flatten, unflatten
class FunctionTestCase(unittest.TestCase):
def __init__(self, **keywords):
unittest.TestCase.__init__(self)
self.args = []
self.kwargs = {}
for k, v in keywords.items():
setattr(self, k, v)
def runTest(self):
self.assertEqual( self.func(*self.args,**self.kwargs), self.expected )
def shortDescription(self):
return self.name
# TODO add more coverage
# TODO add bi-directional tests of flatten()
TEST_UNFLATTEN = 0x1
TEST_FLATTEN = 0x2
TEST_BOTH = TEST_UNFLATTEN | TEST_FLATTEN
TRUTH = [
("empty", TEST_BOTH, {}, {}),
("empty key", TEST_BOTH, {'':1}, {'':1}),
("depth 2 path x 1", TEST_BOTH, {"a.b":42}, {"a":{"b":42}}),
("depth 2 path x 2", TEST_BOTH, {"a.b":42,"c.d":"x"}, {"a":{"b":42},"c":{"d":"x"}}),
("depth 2 path x 2, overlap", TEST_BOTH, {"a.b":42,"a.c":"x"}, {"a":{"b":42,"c":"x"}}),
("simple list", TEST_BOTH, {"a.0":1,"a.1":2}, {"a":[1,2]}),
("sparse list", TEST_BOTH, {"a.0":1,"a.9":10}, {"a":[1,None,None,None,None,None,None,None,None,10]}),
("deep dict", TEST_BOTH, {"a.b.c.d.e":1}, {"a":{"b":{"c":{"d":{"e":1}}}}}),
("list under dict", TEST_BOTH, {"a.b.0":1,"a.b.1":2}, {"a":{"b":[1,2]}}),
("dict under list x 1", TEST_BOTH, {"a.0.b":1}, {"a":[{"b":1}]}),
("dict under list x 2, overlap", TEST_BOTH, {"a.0.b":1,"a.0.c":2}, {"a":[{"b":1,"c":2}]}),
("deep list", TEST_BOTH, {"a.0.0.0.0":42}, {"a":[[[[42]]]]}),
]
def main():
suite = unittest.TestSuite()
i = 0
for name, mode, flat, unflat in TRUTH:
if mode & TEST_FLATTEN:
suite.addTest(FunctionTestCase(
name="test %d flatten %s" % (i,name),
func=flatten,
args=[unflat],
expected=flat,
))
i += 1
if mode & TEST_UNFLATTEN:
suite.addTest(FunctionTestCase(
name="test %d unflatten %s" % (i,name),
func=unflatten,
args=[flat],
expected=unflat,
))
i += 1
runner = unittest.TextTestRunner(verbosity=2)
results = runner.run(suite)
if len(results.failures) or len(results.errors):
return 1
else:
return 0
if __name__ == '__main__':
import sys
sys.exit(main())
| StarcoderdataPython |
3352849 | <gh_stars>1-10
from md_condition.extension import ConditionExtension
def makeExtension(**kwargs):
return ConditionExtension(**kwargs) | StarcoderdataPython |
1727255 | """Hello world plugin."""
from phy import IPlugin, connect
class ExampleHelloPlugin(IPlugin):
def attach_to_controller(self, controller):
@connect(sender=controller)
def on_gui_ready(sender, gui):
"""This is called when the GUI and all objects are fully loaded.
This is to make sure that controller.supervisor is properly defined.
"""
@connect(sender=controller.supervisor)
def on_cluster(sender, up):
"""This is called every time a cluster assignment or cluster group/label
changes."""
print("Clusters update: %s" % up)
| StarcoderdataPython |
123800 | <filename>pyvalidator/alpha.py
alpha = {
'en-US': '^[A-Z]+$',
'az-AZ': '^[A-VXYZÇƏĞİıÖŞÜ]+$',
'bg-BG': '^[А-Я]+$',
'cs-CZ': '^[A-ZÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ]+$',
'da-DK': '^[A-ZÆØÅ]+$',
'de-DE': '^[A-ZÄÖÜß]+$',
'el-GR': '^[Α-ώ]+$',
'es-ES': '^[A-ZÁÉÍÑÓÚÜ]+$',
'fa-IR': '^[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهی]+$',
'fi-FI': '^[A-ZÅÄÖ]+$',
'fr-FR': '^[A-ZÀÂÆÇÉÈÊËÏÎÔŒÙÛÜŸ]+$',
'it-IT': '^[A-ZÀÉÈÌÎÓÒÙ]+$',
'nb-NO': '^[A-ZÆØÅ]+$',
'nl-NL': '^[A-ZÁÉËÏÓÖÜÚ]+$',
'nn-NO': '^[A-ZÆØÅ]+$',
'hu-HU': '^[A-ZÁÉÍÓÖŐÚÜŰ]+$',
'pl-PL': '^[A-ZĄĆĘŚŁŃÓŻŹ]+$',
'pt-PT': '^[A-ZÃÁÀÂÄÇÉÊËÍÏÕÓÔÖÚÜ]+$',
'ru-RU': '^[А-ЯЁ]+$',
'sl-SI': '^[A-ZČĆĐŠŽ]+$',
'sk-SK': '^[A-ZÁČĎÉÍŇÓŠŤÚÝŽĹŔĽÄÔ]+$',
'sr-RS@latin': '^[A-ZČĆŽŠĐ]+$',
'sr-RS': '^[А-ЯЂЈЉЊЋЏ]+$',
'sv-SE': '^[A-ZÅÄÖ]+$',
'th-TH': r'^[ก-๐\s]+$',
'tr-TR': '^[A-ZÇĞİıÖŞÜ]+$',
'uk-UA': '^[А-ЩЬЮЯЄIЇҐі]+$',
'vi-VN': '^[A-ZÀÁẠẢÃÂẦẤẬẨẪĂẰẮẶẲẴĐÈÉẸẺẼÊỀẾỆỂỄÌÍỊỈĨÒÓỌỎÕÔỒỐỘỔỖƠỜỚỢỞỠÙÚỤỦŨƯỪỨỰỬỮỲÝỴỶỸ]+$',
'ku-IQ': '^[ئابپتجچحخدرڕزژسشعغفڤقکگلڵمنوۆھەیێيطؤثآإأكضصةظذ]+$',
'ar': '^[ءآأؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهوىيًٌٍَُِّْٰ]+',
'he': '^[א-ת]+',
'fa': r'^[\'آاءأؤئبپتثجچحخدذرزژسشصضطظعغفقکگلمنوهةی\']+$',
'hi-IN': r'^[\u0900-\u0961]+[\u0972-\u097F]*$',
}
alphanumeric = {
'en-US': '^[0-9A-Z]+$',
'az-AZ': '^[0-9A-VXYZÇƏĞİıÖŞÜ]+$',
'bg-BG': '^[0-9А-Я]+$',
'cs-CZ': '^[0-9A-ZÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ]+$',
'da-DK': '^[0-9A-ZÆØÅ]+$',
'de-DE': '^[0-9A-ZÄÖÜß]+$',
'el-GR': '^[0-9Α-ω]+$',
'es-ES': '^[0-9A-ZÁÉÍÑÓÚÜ]+$',
'fr-FR': '^[0-9A-ZÀÂÆÇÉÈÊËÏÎÔŒÙÛÜŸ]+$',
'fi-FI': '^[0-9A-ZÅÄÖ]+$',
'it-IT': '^[0-9A-ZÀÉÈÌÎÓÒÙ]+$',
'hu-HU': '^[0-9A-ZÁÉÍÓÖŐÚÜŰ]+$',
'nb-NO': '^[0-9A-ZÆØÅ]+$',
'nl-NL': '^[0-9A-ZÁÉËÏÓÖÜÚ]+$',
'nn-NO': '^[0-9A-ZÆØÅ]+$',
'pl-PL': '^[0-9A-ZĄĆĘŚŁŃÓŻŹ]+$',
'pt-PT': '^[0-9A-ZÃÁÀÂÄÇÉÊËÍÏÕÓÔÖÚÜ]+$',
'ru-RU': '^[0-9А-ЯЁ]+$',
'sl-SI': '^[0-9A-ZČĆĐŠŽ]+$',
'sk-SK': '^[0-9A-ZÁČĎÉÍŇÓŠŤÚÝŽĹŔĽÄÔ]+$',
'sr-RS@latin': '^[0-9A-ZČĆŽŠĐ]+$',
'sr-RS': '^[0-9А-ЯЂЈЉЊЋЏ]+$',
'sv-SE': '^[0-9A-ZÅÄÖ]+$',
'th-TH': r'^[ก-๙\s]+$',
'tr-TR': '^[0-9A-ZÇĞİıÖŞÜ]+$',
'uk-UA': '^[0-9А-ЩЬЮЯЄIЇҐі]+$',
'ku-IQ': '^[٠١٢٣٤٥٦٧٨٩0-9ئابپتجچحخدرڕزژسشعغفڤقکگلڵمنوۆھەیێيطؤثآإأكضصةظذ]+$',
'vi-VN': '^[0-9A-ZÀÁẠẢÃÂẦẤẬẨẪĂẰẮẶẲẴĐÈÉẸẺẼÊỀẾỆỂỄÌÍỊỈĨÒÓỌỎÕÔỒỐỘỔỖƠỜỚỢỞỠÙÚỤỦŨƯỪỨỰỬỮỲÝỴỶỸ]+$',
'ar': '^[٠١٢٣٤٥٦٧٨٩0-9ءآأؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهوىيًٌٍَُِّْٰ]+',
'he': '^[0-9א-ת]+',
'fa': r'^[\'0-9آاءأؤئبپتثجچحخدذرزژسشصضطظعغفقکگلمنوهةی۱۲۳۴۵۶۷۸۹۰\']+$',
'hi-IN': r'^[\u0900-\u0963]+[\u0966-\u097F]*$',
}
decimal = {
'en-US': '.',
'ar': '٫',
}
english_locales = ['AU', 'GB', 'HK', 'IN', 'NZ', 'ZA', 'ZM']
for i, locale in enumerate(english_locales):
locale = "en-{}".format(english_locales[i])
alpha[locale] = alpha['en-US']
alphanumeric[locale] = alphanumeric['en-US']
decimal[locale] = decimal['en-US']
arabic_locales = [
'AE', 'BH', 'DZ', 'EG', 'IQ', 'JO', 'KW', 'LB', 'LY',
'MA', 'QM', 'QA', 'SA', 'SD', 'SY', 'TN', 'YE',
]
for j, locale in enumerate(arabic_locales):
locale = "ar-{}".format(arabic_locales[j])
alpha[locale] = alpha['en-US']
alphanumeric[locale] = alphanumeric['en-US']
decimal[locale] = decimal['en-US']
farsi_locales = [
'IR', 'AF',
]
for k, locale in enumerate(farsi_locales):
locale = "fa-{}".format(farsi_locales[k])
alpha[locale] = alpha['en-US']
alphanumeric[locale] = alphanumeric['en-US']
decimal[locale] = decimal['en-US']
dot_decimal = ['ar-EG', 'ar-LB', 'ar-LY']
for n, _ in enumerate(farsi_locales):
decimal[dot_decimal[n]] = decimal['en-US']
comma_decimal = [
'bg-BG', 'cs-CZ', 'da-DK', 'de-DE', 'el-GR', 'en-ZM', 'es-ES', 'fr-CA', 'fr-FR', 'fi-FI',
'id-ID', 'it-IT', 'ku-IQ', 'hi-IN', 'hu-HU', 'nb-NO', 'nn-NO', 'nl-NL', 'pl-PL', 'pt-PT',
'ru-RU', 'sl-SI', 'sr-RS@latin', 'sr-RS', 'sv-SE', 'tr-TR', 'uk-UA', 'vi-VN',
]
for m, _ in enumerate(comma_decimal):
decimal[comma_decimal[m]] = decimal['en-US']
alpha['fr-CA'] = alpha['fr-FR']
alphanumeric['fr-CA'] = alphanumeric['fr-FR']
alpha['pt-BR'] = alpha['pt-PT']
alphanumeric['pt-BR'] = alphanumeric['pt-PT']
decimal['pt-BR'] = decimal['pt-PT']
alpha['pl-Pl'] = alpha['pl-PL']
alphanumeric['pl-Pl'] = alphanumeric['pl-PL']
decimal['pl-Pl'] = decimal['pl-PL']
alpha['fa-AF'] = alpha['fa']
| StarcoderdataPython |
3344343 | <reponame>bruxy70/garbage_collection
"""Define constants used in garbage_collection."""
# Constants for garbage_collection.
# Base component constants
DOMAIN = "garbage_collection"
CALENDAR_NAME = "Garbage Collection"
SENSOR_PLATFORM = "sensor"
CALENDAR_PLATFORM = "calendar"
ATTRIBUTION = "Data from this is provided by garbage_collection."
CONFIG_VERSION = 5
ATTR_NEXT_DATE = "next_date"
ATTR_DAYS = "days"
ATTR_LAST_COLLECTION = "last_collection"
ATTR_LAST_UPDATED = "last_updated"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "connectivity"
DEVICE_CLASS = "garbage_collection__schedule"
# Configuration
CONF_SENSOR = "sensor"
CONF_ENABLED = "enabled"
CONF_FREQUENCY = "frequency"
CONF_MANUAL = "manual_update"
CONF_ICON_NORMAL = "icon_normal"
CONF_ICON_TODAY = "icon_today"
CONF_ICON_TOMORROW = "icon_tomorrow"
CONF_OFFSET = "offset"
CONF_EXPIRE_AFTER = "expire_after"
CONF_VERBOSE_STATE = "verbose_state"
CONF_FIRST_MONTH = "first_month"
CONF_LAST_MONTH = "last_month"
CONF_COLLECTION_DAYS = "collection_days"
CONF_WEEKDAY_ORDER_NUMBER = "weekday_order_number"
CONF_FORCE_WEEK_NUMBERS = "force_week_order_numbers"
CONF_WEEK_ORDER_NUMBER = "week_order_number" # Obsolete
CONF_DATE = "date"
CONF_PERIOD = "period"
CONF_FIRST_WEEK = "first_week"
CONF_FIRST_DATE = "first_date"
CONF_SENSORS = "sensors"
CONF_VERBOSE_FORMAT = "verbose_format"
CONF_DATE_FORMAT = "date_format"
# Defaults
DEFAULT_NAME = DOMAIN
DEFAULT_FIRST_MONTH = "jan"
DEFAULT_LAST_MONTH = "dec"
DEFAULT_FREQUENCY = "weekly"
DEFAULT_PERIOD = 1
DEFAULT_FIRST_WEEK = 1
DEFAULT_VERBOSE_STATE = False
DEFAULT_HOLIDAY_IN_WEEK_MOVE = False
DEFAULT_DATE_FORMAT = "%d-%b-%Y"
DEFAULT_VERBOSE_FORMAT = "on {date}, in {days} days"
# Icons
DEFAULT_ICON_NORMAL = "mdi:trash-can"
DEFAULT_ICON_TODAY = "mdi:delete-restore"
DEFAULT_ICON_TOMORROW = "mdi:delete-circle"
ICON = DEFAULT_ICON_NORMAL
# States
STATE_TODAY = "today"
STATE_TOMORROW = "tomorrow"
FREQUENCY_OPTIONS = [
"weekly",
"even-weeks",
"odd-weeks",
"every-n-weeks",
"every-n-days",
"monthly",
"annual",
"blank",
"group",
]
WEEKLY_FREQUENCY = ["weekly", "even-weeks", "odd-weeks"]
EXCEPT_ANNUAL_GROUP = [
"weekly",
"even-weeks",
"odd-weeks",
"every-n-weeks",
"every-n-days",
"monthly",
"blank",
]
EXCEPT_ANNUAL_GROUP_BLANK = [
"weekly",
"even-weeks",
"odd-weeks",
"every-n-weeks",
"every-n-days",
"monthly",
]
WEEKLY_DAILY_MONTHLY = ["every-n-weeks", "every-n-days", "monthly"]
WEEKLY_FREQUENCY_X = ["every-n-weeks"]
DAILY_FREQUENCY = ["every-n-days"]
DAILY_BLANK_FREQUENCY = ["blank", "every-n-days"]
MONTHLY_FREQUENCY = ["monthly"]
ANNUAL_GROUP_FREQUENCY = ["annual", "group"]
ANNUAL_FREQUENCY = ["annual"]
GROUP_FREQUENCY = ["group"]
BLANK_FREQUENCY = ["blank"]
MONTH_OPTIONS = [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
| StarcoderdataPython |
29945 | class SendResult:
def __init__(self, result={}):
self.successful = result.get('code', None) == '200'
self.message_id = result.get('message_id', None)
| StarcoderdataPython |
3366821 | <filename>Demo/liaoxf/do_listcompr.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
# 列表生成式/列表推导
# 通常的原则是,只用列表推导来创建新的列表,并且尽量保持简短
list_comp1 = [x * x - 1 for x in range(1, 21)]
print(list_comp1)
# 笛卡尔积1
list_comp2 = [m + n for m in 'ABC' for n in 'XYZ']
print(list_comp2)
# 笛卡尔积2(Python 会忽略代码里 []、 {} 和 () 中的换行)
colors = ['black', 'white']
sizes = ['S', 'M', 'L']
t_shirts = [(color, size) for size in sizes
for color in colors]
print(t_shirts)
list_comp3 = [d for d in os.listdir('.')]
print(list_comp3)
dict1 = {'x': 'A', 'y': 'B', 'z': 'C'}
list_comp4 = [k + '=' + v for k, v in dict1.items()]
print(list_comp4)
list1 = ['Hello', 'World', 'IBM', 'Apple']
list_comp5 = [s.lower() for s in list1]
print(list_comp5)
list2 = ['Hello', 'World', 18, 'Apple', None]
list_comp6 = [s.lower() for s in list2 if isinstance(s, str)]
print(list_comp6)
| StarcoderdataPython |
3330824 | import logging
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as kl
from keras.models import Model
from keras_radam import RAdam
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.utils import multi_gpu_model
# address some interface discrepancies when using tensorflow.keras
# hack to use the load_from_json in tf otherwise we get an exception
# adapted/modified hack from here:
# https://github.com/keras-team/keras-contrib/issues/488
if "slice" not in keras.backend.__dict__:
# this is a good indicator that we are using tensorflow.keras
print('using tensorflow, need to monkey patch')
try:
# at first try to monkey patch what we need
try:
tf.python.keras.backend.__dict__.update(
is_tensor=tf.is_tensor,
slice=tf.slice,
)
finally:
print('tf.python.backend.slice overwritten by monkey patch')
except Exception:
print('monkey patch failed, override methods')
# if that doesn't work we do a dirty copy of the code required
import tensorflow as tf
from tensorflow.python.framework import ops as tf_ops
def is_tensor(x):
return isinstance(x, tf_ops._TensorLike) or tf_ops.is_dense_tensor_like(x)
def slice(x, start, size):
x_shape = keras.int_shape(x)
if (x_shape is not None) and (x_shape[0] is not None):
len_start = keras.int_shape(start)[0] if is_tensor(start) else len(start)
len_size = keras.int_shape(size)[0] if is_tensor(size) else len(size)
if not (len(keras.int_shape(x)) == len_start == len_size):
raise ValueError('The dimension and the size of indices should match.')
return tf.slice(x, start, size)
def conv_layer(inputs, filters=16, f_size=(3, 3, 3), activation='elu', batch_norm=True, kernel_init='he_normal',
pad='same', bn_first=False, ndims=2):
"""
Wrapper for a 2/3D-conv layer + batchnormalisation
Either with Conv,BN,activation or Conv,activation,BN
:param inputs: numpy or tensor object batchsize,z,x,y,channels
:param filters: int, number of filters
:param f_size: tuple of int, filterzise per axis
:param activation: string, which activation function should be used
:param batch_norm: bool, use batch norm or not
:param kernel_init: string, keras enums for kernel initialisation
:param pad: string, keras enum how to pad, the conv
:param bn_first: bool, decide if we want to apply the BN before the conv. operation or afterwards
:param ndims: int, define the conv dimension
:return: a functional tf.keras conv block
"""
Conv = getattr(kl, 'Conv{}D'.format(ndims))
f_size = f_size[:ndims]
if bn_first:
conv1 = Conv(filters, f_size, kernel_initializer=kernel_init, padding=pad)(inputs)
conv1 = BatchNormalization(axis=-1)(conv1) if batch_norm else conv1
conv1 = Activation(activation)(conv1)
else:
conv1 = Conv(filters, f_size, activation=activation, kernel_initializer=kernel_init, padding=pad)(inputs)
conv1 = BatchNormalization(axis=-1)(conv1) if batch_norm else conv1
return conv1
def downsampling_block(inputs, filters=16, f_size=(3, 3, 3), activation='elu', drop=0.3, batch_norm=True,
kernel_init='he_normal', pad='same', m_pool=(2, 2), bn_first=False, ndims=2):
"""
Create an 2D/3D-downsampling block for the u-net architecture
:param inputs: numpy or tensor input with batchsize,z,x,y,channels
:param filters: int, number of filters per conv-layer
:param f_size: tuple of int, filtersize per axis
:param activation: string, which activation function should be used
:param drop: float, define the dropout rate between the conv layers of this block
:param batch_norm: bool, use batch norm or not
:param kernel_init: string, keras enums for kernel initialisation
:param pad: string, keras enum how to pad, the conv
:param up_size: tuple of int, size of the upsampling filters, either by transpose layers or upsampling layers
:param bn_first: bool, decide if we want to apply the BN before the conv. operation or afterwards
:param ndims: int, define the conv dimension
:return: a functional tf.keras upsampling block
"""
m_pool = m_pool[:ndims]
pool = getattr(kl, 'MaxPooling{}D'.format(ndims))
conv1 = conv_layer(inputs=inputs, filters=filters, f_size=f_size, activation=activation, batch_norm=batch_norm,
kernel_init=kernel_init, pad=pad, bn_first=bn_first, ndims=ndims)
conv1 = Dropout(drop)(conv1)
conv1 = conv_layer(inputs=conv1, filters=filters, f_size=f_size, activation=activation, batch_norm=batch_norm,
kernel_init=kernel_init, pad=pad, bn_first=bn_first, ndims=ndims)
p1 = pool(m_pool)(conv1)
return (conv1, p1)
def upsampling_block(lower_input, conv_input, use_upsample=True, filters=16, f_size=(3, 3, 3), activation='elu',
drop=0.3, batch_norm=True, kernel_init='he_normal', pad='same', up_size=(2, 2), bn_first=False,
ndims=2):
"""
Create an upsampling block for the u-net architecture
Each blocks consists of these layers: upsampling/transpose,concat,conv,dropout,conv
Either with "upsampling,conv" or "transpose" upsampling
:param lower_input: numpy input from the lower block: batchsize,z,x,y,channels
:param conv_input: numpy input from the skip layers: batchsize,z,x,y,channels
:param use_upsample: bool, whether to use upsampling or not
:param filters: int, number of filters per conv-layer
:param f_size: tuple of int, filtersize per axis
:param activation: string, which activation function should be used
:param drop: float, define the dropout rate between the conv layers of this block
:param batch_norm: bool, use batch norm or not
:param kernel_init: string, keras enums for kernel initialisation
:param pad: string, keras enum how to pad, the conv
:param up_size: tuple of int, size of the upsampling filters, either by transpose layers or upsampling layers
:param bn_first: bool, decide if we want to apply the BN before the conv. operation or afterwards
:param ndims: int, define the conv dimension
:return: a functional tf.keras upsampling block
"""
Conv = getattr(kl, 'Conv{}D'.format(ndims))
f_size = f_size[:ndims]
# use upsample&conv or transpose layer
if use_upsample:
UpSampling = getattr(kl, 'UpSampling{}D'.format(ndims))
deconv1 = UpSampling(size=up_size)(lower_input)
deconv1 = Conv(filters, f_size, padding=pad, kernel_initializer=kernel_init, activation=activation)(deconv1)
else:
ConvTranspose = getattr(kl, 'Conv{}DTranspose'.format(ndims))
deconv1 = ConvTranspose(filters, up_size, strides=up_size, padding=pad, kernel_initializer=kernel_init,
activation=activation)(lower_input)
deconv1 = concatenate([deconv1, conv_input])
deconv1 = conv_layer(inputs=deconv1, filters=filters, f_size=f_size, activation=activation, batch_norm=batch_norm,
kernel_init=kernel_init, pad=pad, bn_first=bn_first, ndims=ndims)
deconv1 = Dropout(drop)(deconv1)
deconv1 = conv_layer(inputs=deconv1, filters=filters, f_size=f_size, activation=activation, batch_norm=batch_norm,
kernel_init=kernel_init, pad=pad, bn_first=bn_first, ndims=ndims)
return deconv1
# Build U-Net model
def create_unet(config, metrics=None):
"""
create a 2D/3D u-net for image segmentation
:param config: Key value pairs for image size and other network parameters
:param metrics: list of tensorflow or keras compatible metrics
:returns compiled keras model
"""
inputs = Input((*config.get('DIM', [224, 224]), config.get('IMG_CHANNELS', 1)))
# define standard values according to convention over configuration paradigm
metrics = [keras.metrics.binary_accuracy] if metrics is None else metrics
activation = config.get('ACTIVATION', 'elu')
loss_f = config.get('LOSS_FUNCTION', keras.losses.categorical_crossentropy)
batch_norm = config.get('BATCH_NORMALISATION', False)
use_upsample = config.get('USE_UPSAMPLE', 'False') # use upsampling + conv3D or transpose layer
gpu_ids = config.get('GPU_IDS', '1').split(',') # used in previous tf versions
pad = config.get('PAD', 'same')
kernel_init = config.get('KERNEL_INIT', 'he_normal')
mask_channels = config.get("MAKS_Values", 4)
m_pool = config.get('M_POOL', (2, 2, 2))
f_size = config.get('F_SIZE', (3, 3, 3))
filters = config.get('FILTERS', 16)
drop_1 = config.get('DROPOUT_L1_L2', 0.3)
drop_3 = config.get('DROPOUT_L5', 0.5)
bn_first = config.get('BN_FIRST', False)
ndims = len(config.get('DIM', [224, 224]))
depth = config.get('DEPTH', 4)
# define two layers for the middle part and the final 1 by 1 layer
Conv = getattr(kl, 'Conv{}D'.format(ndims))
one_by_one = (1, 1, 1)[:ndims]
encoder = list()
decoder = list()
# increase the dropout through the layer depth
dropouts = list(np.linspace(drop_1, drop_3, depth))
# distribute the training with the mirrored data paradigm across multiple gpus if available, if not use gpu 0
strategy = tf.distribute.MirroredStrategy(devices=config.get('GPUS', ["/gpu:0"]))
tf.print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
# strategy for multi-GPU usage not necessary for single GPU usage
with strategy.scope():
# build the encoder
for l in range(depth):
if len(encoder) == 0:
# first block
input_tensor = inputs
else:
# all other blocks, use the max-pooled output of the previous encoder block
# remember the max-pooled output from the previous layer
input_tensor = encoder[-1][1]
encoder.append(
downsampling_block(inputs=input_tensor,
filters=filters,
f_size=f_size,
activation=activation,
drop=dropouts[l],
batch_norm=batch_norm,
kernel_init=kernel_init,
pad=pad,
m_pool=m_pool,
bn_first=bn_first,
ndims=ndims))
filters *= 2
# middle part
input_tensor = encoder[-1][1]
fully = conv_layer(inputs=input_tensor, filters=filters, f_size=f_size,
activation=activation, batch_norm=batch_norm, kernel_init=kernel_init,
pad=pad, bn_first=bn_first, ndims=ndims)
fully = Dropout(drop_3)(fully)
fully = conv_layer(inputs=fully, filters=filters, f_size=f_size,
activation=activation, batch_norm=batch_norm, kernel_init=kernel_init,
pad=pad, bn_first=bn_first, ndims=ndims)
# build the decoder
decoder.append(fully)
for l in range(depth):
# take the output of the previous decoder block and the output of the corresponding
# encoder block
input_lower = decoder[-1]
input_skip = encoder.pop()[0]
filters //= 2
decoder.append(
upsampling_block(lower_input=input_lower,
conv_input=input_skip,
use_upsample=use_upsample,
filters=filters,
f_size=f_size,
activation=activation,
drop=dropouts.pop(),
batch_norm=batch_norm,
up_size=m_pool,
bn_first=bn_first,
ndims=ndims))
outputs = Conv(config.get('MASK_CLASSES', mask_channels), one_by_one, activation='softmax')(decoder[-1])
outputs = tf.keras.layers.Activation('linear', dtype='float32')(outputs)
print('Outputs dtype: %s' % outputs.dtype.name)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer=get_optimizer(config), loss=loss_f, metrics=metrics)
return model
def create_unet_new(config, metrics=None):
"""
Does not work so far!!!
:param config:
:param metrics:
:return:
"""
unet = Unet(config, metrics)
loss_f = config.get('LOSS_FUNCTION', keras.losses.categorical_crossentropy)
unet.compile(optimizer=get_optimizer(config), loss=loss_f, metrics=metrics)
input_shape = config.get('DIM', [224, 224])
unet.build(input_shape)
return unet
class Unet(tf.keras.Model):
# tf.keras subclassing version of the u-net
# DOES NOT WORK SO FAR !!!!
def __init__(self, config, metrics=None):
name = "Unet"
super(Unet, self).__init__(name=name)
self.inputs = Input((*config.get('DIM', [224, 224]), config.get('IMG_CHANNELS', 1)))
self.config = config
# self.metrics = [keras.metrics.binary_accuracy] if metrics is None else metrics
self.activation = config.get('ACTIVATION', 'elu')
self.loss_f = config.get('LOSS_FUNCTION', keras.losses.categorical_crossentropy)
self.batch_norm = config.get('BATCH_NORMALISATION', False)
self.use_upsample = config.get('USE_UPSAMPLE', 'False') # use upsampling + conv3D or transpose layer
self.gpu_ids = config.get('GPU_IDS', '1').split(',')
self.pad = config.get('PAD', 'same')
self.kernel_init = config.get('KERNEL_INIT', 'he_normal')
self.m_pool = config.get('M_POOL', (2, 2, 2))
self.f_size = config.get('F_SIZE', (3, 3, 3))
self.filters = config.get('FILTERS', 16)
self.drop_1 = config.get('DROPOUT_L1_L2', 0.3)
self.drop_2 = config.get('DROPOUT_L3_L4', 0.4)
self.drop_3 = config.get('DROPOUT_L5', 0.5)
self.bn_first = config.get('BN_FIRST', False)
self.ndims = len(config.get('DIM', [224, 224]))
self.depth = config.get('DEPTH', 4)
self.Conv = getattr(kl, 'Conv{}D'.format(self.ndims))
self.one_by_one = (1, 1, 1)[:self.ndims]
self.encoder = list()
self.decoder = list()
# increase the dropout through the layer depth
self.dropouts = list(np.linspace(self.drop_1, self.drop_3, self.depth))
self.downsampling = downsampling_block
self.upsampling = upsampling_block
def __call__(self, inputs):
strategy = tf.distribute.MirroredStrategy(devices=self.config.get('GPUS', ["/gpu:0"]))
tf.print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
# build the encoder
for l in range(self.depth):
if len(self.encoder) == 0:
input_tensor = inputs
else:
# take the max-pooled output from the previous layer
input_tensor = self.encoder[-1][1]
self.encoder.append(
downsampling_block(inputs=input_tensor, filters=self.filters, f_size=self.f_size,
activation=self.activation,
drop=self.dropouts[l],
batch_norm=self.batch_norm, kernel_init=self.kernel_init, pad=self.pad,
m_pool=self.m_pool,
bn_first=self.bn_first,
ndims=self.ndims))
self.filters *= 2
# middle part of the U-net
input_tensor = self.encoder[-1][1]
fully = conv_layer(inputs=input_tensor, filters=self.filters, f_size=self.f_size,
activation=self.activation,
batch_norm=self.batch_norm,
kernel_init=self.kernel_init, pad=self.pad, bn_first=self.bn_first, ndims=self.ndims)
fully = Dropout(self.drop_3)(fully)
fully = conv_layer(inputs=fully, filters=self.filters, f_size=self.f_size, activation=self.activation,
batch_norm=self.batch_norm,
kernel_init=self.kernel_init, pad=self.pad, bn_first=self.bn_first, ndims=self.ndims)
# build the decoder
self.decoder.append(fully)
for l in range(self.depth):
input_lower = self.decoder[-1]
input_skip = self.encoder.pop()[0]
self.filters //= 2
self.decoder.append(
upsampling_block(lower_input=input_lower, conv_input=input_skip, use_upsample=self.use_upsample,
filters=self.filters,
f_size=self.f_size, activation=self.activation, drop=self.dropouts.pop(),
batch_norm=self.batch_norm,
up_size=self.m_pool, bn_first=self.bn_first, ndims=self.ndims))
outputs = self.Conv(self.config.get('MASK_CLASSES', 4), self.one_by_one, activation='softmax')(
self.decoder[-1])
return outputs
def get_optimizer(config):
"""
Returns a keras.optimizer
default is an Adam optimizer
:param config: Key, value dict, Keys in upper letters
:return: tf.keras.optimizer
"""
opt = config.get('OPTIMIZER', 'Adam')
lr = config.get('LEARNING_RATE', 0.001)
ep = config.get('EPSILON', 1e-08)
de = config.get('DECAY', 0.0)
optimizer = None
if opt == 'Adagrad':
optimizer = keras.optimizers.Adagrad(lr=lr, epsilon=ep, decay=de)
elif opt == 'RMSprop':
optimizer = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=ep, decay=de)
elif opt == 'Adadelta':
optimizer = keras.optimizers.Adadelta(lr=lr, rho=0.95, epsilon=ep, decay=de)
elif opt == 'Radam':
optimizer = RAdam()
elif opt == 'Adam':
optimizer = keras.optimizers.Adam(lr=lr)
else:
optimizer = keras.optimizers.Adam(lr=lr)
logging.info('Optimizer: {}'.format(opt))
return optimizer
def get_model(config=dict(), metrics=None):
"""
create a new model or load a pre-trained model
:param config: json file
:param metrics: list of tensorflow or keras metrics with gt,pred
:return: returns a compiled keras model
"""
# load a pre-trained model with config
if config.get('LOAD', False):
return load_pretrained_model(config, metrics)
# create a new 2D or 3D model with given config params
return create_unet(config, metrics)
def load_pretrained_model(config=None, metrics=None, comp=True, multigpu=False):
"""
Load a pre-trained keras model
for a given model.json file and the weights as h5 file
:param config: dict
:param metrics: keras or tensorflow loss function in a list
:param comp: bool, compile the model or not
:multigpu: wrap model in multi gpu wrapper
:return:
"""
if config is None:
config = {}
if metrics is None:
metrics = [keras.metrics.binary_accuracy]
gpu_ids = config.get('GPU_IDS', '1').split(',')
loss_f = config.get('LOSS_FUNCTION', keras.losses.categorical_crossentropy)
# load model
logging.info('loading model from: {} .'.format(os.path.join(config.get('MODEL_PATH', './'), 'model.json')))
json = open(os.path.join(config.get('MODEL_PATH', './'), 'model.json')).read()
model = tf.keras.models.model_from_json(json)
#model = model_from_json(open(os.path.join(config.get('MODEL_PATH', './'), 'model.json')).read())
logging.info('loading model description')
try:
model.load_weights(os.path.join(config.get('MODEL_PATH', './'), 'checkpoint.h5'))
# make sure to work with wrapped multi-gpu models
if multigpu:
logging.info('multi GPU model, try to unpack the model and load weights again')
model = model.layers[-2]
model = multi_gpu_model(model, gpus=len(gpu_ids), cpu_merge=False) if (len(gpu_ids) > 1) else model
except Exception as e:
# some models are wrapped two times into a keras multi-gpu model, so we need to unpack it - hack
logging.info(str(e))
logging.info('multi GPU model, try to unpack the model and load weights again')
model = model.layers[-2]
model.load_weights(os.path.join(config.get('MODEL_PATH', './'), 'checkpoint.h5'))
logging.info('loading model weights')
if comp:
try:
# try to compile with given params, else use fallback parameters
model.compile(optimizer=get_optimizer(config), loss=loss_f, metrics=metrics)
except Exception as e:
logging.error('Failed to compile with given parameters, use default vaules: {}'.format(str(e)))
model.compile(optimizer='adam', loss=loss_f, metrics=metrics)
logging.info('model {} loaded'.format(os.path.join(config.get('MODEL_PATH', './'), 'model.json')))
return model
def test_unet():
"""
Create a keras unet with a pre-configured config
:return: prints model summary file
"""
try:
from src.utils.utils_io import Console_and_file_logger
Console_and_file_logger('test 2d network')
except Exception as e:
print("no logger defined, use print")
config = {'GPU_IDS': '0', 'GPUS': ['/gpu:0'], 'EXPERIMENT': '2D/tf2/temp', 'ARCHITECTURE': '2D',
'DIM': [224, 224], 'DEPTH': 4, 'SPACING': [1.0, 1.0], 'M_POOL': [2, 2], 'F_SIZE': [3, 3],
'IMG_CHANNELS': 1, 'MASK_VALUES': [0, 1, 2, 3], 'MASK_CLASSES': 4, 'AUGMENT': False, 'SHUFFLE': True,
'AUGMENT_GRID': True, 'RESAMPLE': False, 'DATASET': 'GCN_2nd', 'TRAIN_PATH': 'data/raw/GCN_2nd/2D/train/',
'VAL_PATH': 'data/raw/GCN_2nd/2D/val/', 'TEST_PATH': 'data/raw/GCN_2nd/2D/val/',
'DF_DATA_PATH': 'data/raw/GCN_2nd/2D/df_kfold.csv', 'MODEL_PATH': 'models/2D/tf2/gcn/2020-03-26_17_25',
'TENSORBOARD_LOG_DIR': 'reports/tensorboard_logs/2D/tf2/gcn/2020-03-26_17_25',
'CONFIG_PATH': 'reports/configs/2D/tf2/gcn/2020-03-26_17_25',
'HISTORY_PATH': 'reports/history/2D/tf2/gcn/2020-03-26_17_25', 'GENERATOR_WORKER': 32, 'BATCHSIZE': 32,
'INITIAL_EPOCH': 0, 'EPOCHS': 150, 'EPOCHS_BETWEEN_CHECKPOINTS': 5, 'MONITOR_FUNCTION': 'val_loss',
'MONITOR_MODE': 'min', 'SAVE_MODEL_FUNCTION': 'val_loss', 'SAVE_MODEL_MODE': 'min', 'BN_FIRST': False,
'OPTIMIZER': 'Adam', 'ACTIVATION': 'elu', 'LEARNING_RATE': 0.001, 'DECAY_FACTOR': 0.5, 'MIN_LR': 1e-10,
'DROPOUT_L1_L2': 0.3, 'DROPOUT_L3_L4': 0.4, 'DROPOUT_L5': 0.5, 'BATCH_NORMALISATION': True,
'USE_UPSAMPLE': True, 'LOSS_FUNCTION': keras.losses.binary_crossentropy}
metrics = [tf.keras.losses.categorical_crossentropy]
model = get_model(config, metrics)
model.summary()
if __name__ == '__main__':
test_unet()
| StarcoderdataPython |
83072 | <reponame>hackthevalley/hack-the-back
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from hacktheback.messenger.utils import render_mjml
def validate_mjml(value):
rendered = render_mjml(value)
if rendered is None:
raise ValidationError(_("The value is not MJML."))
| StarcoderdataPython |
66381 | """
A :class:`~glue.core.subset_group.SubsetGroup` unites a group of
:class:`~glue.core.subset.Subset` instances together with a consistent state,
label, and style.
While subsets are internally associated with particular datasets, it's
confusing for the user to juggle multiple similar or identical
subsets, applied to different datasets. Because of this, the GUI
manages SubsetGroups, and presents each group to the user as a single
entity. The individual subsets are held in-sync by the SubsetGroup.
Client code should *only* create Subset Groups via
DataCollection.new_subset_group. It should *not* call Data.add_subset
or Data.new_subset directly
"""
from __future__ import absolute_import, division, print_function
from warnings import warn
from glue.external import six
from glue.core.contracts import contract
from glue.core.message import (DataCollectionAddMessage,
DataCollectionDeleteMessage)
from glue.core.visual import VisualAttributes
from glue.core.hub import HubListener
from glue.utils import Pointer
from glue.core.subset import SubsetState
from glue.core import Subset
from glue.config import settings
__all__ = ['GroupedSubset', 'SubsetGroup']
class GroupedSubset(Subset):
"""
A member of a SubsetGroup, whose internal representation
is shared with other group members
"""
subset_state = Pointer('group.subset_state')
label = Pointer('group.label')
def __init__(self, data, group):
"""
:param data: :class:`~glue.core.data.Data` instance to bind to
:param group: :class:`~glue.core.subset_group.SubsetGroup`
"""
# We deliberately don't call Subset.__init__ here because we don't want
# to set e.g. the subset state, color, transparency, etc. Instead we
# just want to defer to the SubsetGroup for these.
self._broadcasting = False # must be first def
self.group = group
self.data = data
self.label = group.label # trigger disambiguation
@property
def style(self):
return self.group.style
@property
def subset_group(self):
return self.group.subset_group
@property
def verbose_label(self):
return "%s (%s)" % (self.label, self.data.label)
def __eq__(self, other):
return other is self
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def __gluestate__(self, context):
return dict(group=context.id(self.group),
style=context.do(self.style))
@classmethod
def __setgluestate__(cls, rec, context):
dummy_grp = SubsetGroup() # __init__ needs group.label
self = cls(None, dummy_grp)
yield self
self.group = context.object(rec['group'])
class SubsetGroup(HubListener):
def __init__(self, color=settings.SUBSET_COLORS[0], alpha=0.5, label=None, subset_state=None):
"""
Create a new empty SubsetGroup
Note: By convention, SubsetGroups should be created via
DataCollection.new_subset.
"""
self.subsets = []
if subset_state is None:
self.subset_state = SubsetState()
else:
self.subset_state = subset_state
self.label = label
self.style = VisualAttributes(parent=self)
self.style.markersize *= 2.5
self.style.linewidth *= 2.5
self.style.color = color
self.style.alpha = alpha
@contract(data='isinstance(DataCollection)')
def register(self, data):
"""
Register to a :class:`~glue.core.data_collection.DataCollection`
This is called automatically by
:meth:`glue.core.data_collection.DataCollection.new_subset_group`
"""
self.register_to_hub(data.hub)
# add to self, then register, so fully populated by first
# broadcast
for d in data:
s = GroupedSubset(d, self)
self.subsets.append(s)
for d, s in zip(data, self.subsets):
d.add_subset(s)
def paste(self, other_subset):
"""paste subset state from other_subset onto self """
state = other_subset.subset_state.copy()
self.subset_state = state
def _add_data(self, data):
# add a new data object to group
s = GroupedSubset(data, self)
data.add_subset(s)
self.subsets.append(s)
def _remove_data(self, data):
# remove a data object from group
for s in list(self.subsets):
if s.data is data:
self.subsets.remove(s)
def register_to_hub(self, hub):
hub.subscribe(self, DataCollectionAddMessage,
lambda x: self._add_data(x.data))
hub.subscribe(self, DataCollectionDeleteMessage,
lambda x: self._remove_data(x.data))
@property
def style(self):
return self._style
@style.setter
def style(self, value):
self._style = value
@contract(item='string')
def broadcast(self, item):
for s in self.subsets:
s.broadcast(item)
def __setattr__(self, attr, value):
# We terminate early here if the value is not None but hasn't changed
# to avoid broadcasting a message after.
if value is not None and value == getattr(self, attr, None):
return
object.__setattr__(self, attr, value)
if attr in ['subset_state', 'label', 'style']:
self.broadcast(attr)
def __gluestate__(self, context):
return dict(label=self.label,
state=context.id(self.subset_state),
style=context.do(self.style),
subsets=list(map(context.id, self.subsets)))
@classmethod
def __setgluestate__(cls, rec, context):
result = cls()
yield result
result.subset_state = context.object(rec['state'])
result.label = rec['label']
result.style = context.object(rec['style'])
result.style.parent = result
result.subsets = list(map(context.object, rec['subsets']))
def __and__(self, other):
return self.subset_state & other.subset_state
def __or__(self, other):
return self.subset_state | other.subset_state
def __xor__(self, other):
return self.subset_state ^ other.subset_state
def __invert__(self):
return ~self.subset_state
def coerce_subset_groups(collect):
"""
If necessary, reassign non-grouped subsets in a DataCollection
into SubsetGroups.
This is used to support DataCollections saved with
version 1 of glue.core.state.save_data_collection
"""
for data in collect:
for subset in data.subsets:
if not isinstance(subset, GroupedSubset):
warn("DataCollection has subsets outside of "
"subset groups, which are no longer supported. "
"Moving to subset groups")
subset.delete()
grp = collect.new_subset_group()
grp.subset_state = subset.subset_state
grp.style = subset.style
grp.label = subset.label
| StarcoderdataPython |
1776826 | # encoding: utf-8
# author: BrikerMan
# contact: <EMAIL>
# blog: https://eliyar.biz
# file: test_processor.py
# time: 2019-05-23 17:02
import os
import time
import logging
import tempfile
import unittest
import numpy as np
import random
from kashgari import utils
from kashgari.processors import ClassificationProcessor, LabelingProcessor, ScoringProcessor
from kashgari.corpus import SMP2018ECDTCorpus, ChineseDailyNerCorpus
from kashgari.tasks.classification import BiGRU_Model
ner_train_x, ner_train_y = ChineseDailyNerCorpus.load_data('valid')
class_train_x, class_train_y = SMP2018ECDTCorpus.load_data('valid')
sample_train_x = [
list('语言学(英语:linguistics)是一门关于人类语言的科学研究'),
list('语言学(英语:linguistics)是一门关于人类语言的科学研究'),
list('语言学(英语:linguistics)是一门关于人类语言的科学研究'),
list('语言学包含了几种分支领域。'),
list('在语言结构(语法)研究与意义(语义与语用)研究之间存在一个重要的主题划分'),
]
sample_train_y = [['b', 'c'], ['a'], ['a', 'c'], ['a', 'b'], ['c']]
sample_eval_x = [
list('语言学是一门关于人类语言的科学研究。'),
list('语言学包含了几种分支领域。'),
list('在语言结构研究与意义研究之间存在一个重要的主题划分。'),
list('语法中包含了词法,句法以及语音。'),
list('语音学是语言学的一个相关分支,它涉及到语音与非语音声音的实际属性,以及它们是如何发出与被接收到的。'),
list('与学习语言不同,语言学是研究所有人类语文发展有关的一门学术科目。'),
list('在语言结构(语法)研究与意义(语义与语用)研究之间存在一个重要的主题划分'),
]
sample_eval_y = [['b', 'c'], ['a'], ['a', 'c'], ['a', 'b'], ['c'], ['b'], ['a']]
class TestLabelingProcessor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.processor = LabelingProcessor()
cls.processor.analyze_corpus(ner_train_x, ner_train_y)
def test_process_data(self):
for i in range(3):
vector_x = self.processor.process_x_dataset(ner_train_x[:20], max_len=12)
assert vector_x.shape == (20, 12)
subset_index = np.random.randint(0, len(ner_train_x), 30)
vector_x = self.processor.process_x_dataset(ner_train_x, max_len=12, subset=subset_index)
assert vector_x.shape == (30, 12)
vector_y = self.processor.process_y_dataset(ner_train_y[:15], max_len=15)
assert vector_y.shape == (15, 15, len(self.processor.label2idx))
target_y = [seq[:15] for seq in ner_train_y[:15]]
res_y = self.processor.reverse_numerize_label_sequences(vector_y.argmax(-1), lengths=np.full(15, 15))
logging.info(f"target_y: {target_y}")
logging.info(f"res_y: {res_y}")
assert target_y == res_y
self.processor.process_x_dataset(ner_train_x[:9], subset=[1, 2, 3])
self.processor.process_y_dataset(ner_train_y[:9], subset=[1, 2, 3])
class TestClassificationProcessor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.processor = ClassificationProcessor()
cls.processor.analyze_corpus(class_train_x, class_train_y)
def test_process_data(self):
for i in range(3):
vector_x = self.processor.process_x_dataset(class_train_x[:20], max_len=12)
assert vector_x.shape == (20, 12)
subset_index = np.random.randint(0, len(class_train_x), 30)
vector_x = self.processor.process_x_dataset(class_train_x, max_len=12, subset=subset_index)
assert vector_x.shape == (30, 12)
vector_y = self.processor.process_y_dataset(class_train_y[:15], max_len=15)
assert vector_y.shape == (15, len(self.processor.label2idx))
res_y = self.processor.reverse_numerize_label_sequences(vector_y.argmax(-1))
assert class_train_y[:15] == res_y
def test_multi_label_processor(self):
p = ClassificationProcessor(multi_label=True)
p.analyze_corpus(sample_train_x, sample_train_y)
assert len(p.label2idx) == 3
print(p.process_x_dataset(sample_train_x))
print(p.process_y_dataset(sample_train_y))
def test_load(self):
model_path = os.path.join(tempfile.gettempdir(), str(time.time()))
model = BiGRU_Model()
model.fit(class_train_x, class_train_y, epochs=1)
model.save(model_path)
processor = utils.load_processor(model_path)
assert processor.token2idx == model.embedding.processor.token2idx
assert processor.label2idx == model.embedding.processor.label2idx
assert processor.__class__ == model.embedding.processor.__class__
process_x_0 = processor.process_x_dataset(class_train_x[:10])
process_x_1 = model.embedding.process_x_dataset(class_train_x[:10])
assert np.array_equal(process_x_0, process_x_1)
class TestScoringProcessor(unittest.TestCase):
def test_build_dict(self):
x = sample_train_x
y = [random.random() for _ in range(len(x))]
p = ScoringProcessor()
p.analyze_corpus(x, y)
assert p.output_dim == 1
y = [[random.random(), random.random(), random.random()] for _ in range(len(x))]
p = ScoringProcessor()
p.analyze_corpus(x, y)
assert p.output_dim == 3
y = np.random.random((len(x), 3))
p = ScoringProcessor()
p.analyze_corpus(x, y)
print(p.output_dim)
assert p.output_dim == 3
if __name__ == "__main__":
print("Hello world")
| StarcoderdataPython |
1701956 |
class StockItem :
def __init__(self, date, open, high, low, close, volume, transation):
self.date = date
self.open = (int)(open*100)
self.high = (int)(high*100)
self.low = (int)(low*100)
self.close = (int)(close*100)
self.volume = (int)(volume)
self.transation = (int)(transation)
self.ma5 = None
def __repr__(self):
return self.__get_str()
def __str__(self):
return self.__get_str()
def __get_str(self):
return " : " + repr(self.open) + "\t" + repr(self.close) | StarcoderdataPython |
1685057 | <reponame>isc-projects/forge
"""Kea leases manipulation commands"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import misc
import srv_control
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_list():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-20fdf8:f53e:61e4::18')
srv_control.config_srv_another_subnet_no_interface('1000::/32', 'fc00:db20:35b:7399::5-100fc00:e968:6179::de52:7100')
srv_control.config_srv_another_subnet_no_interface('3000::/100', 'fc00:db20:35b:7399::5-3000::5')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_msg.send_ctrl_cmd_via_socket('{"command":"list-commands","arguments":{}}')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_add_notvalid_id():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:fc00:db20:35b:7399::5')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
response = srv_msg.send_ctrl_cmd_via_socket({"command": "lease6-add",
"arguments": {"subnet-id": 11,
"ip-address": "2001:db8:1::1",
"duid": "1a:1b:1c:1d:1e:1f:20:21:22:23:24",
"iaid": 1234}},
exp_result=1)
assert response['text'] == 'Invalid subnet-id: No IPv6 subnet with subnet-id=11 currently configured.'
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_add_address_from_different_subnet():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
response = srv_msg.send_ctrl_cmd_via_socket({"command": "lease6-add",
"arguments": {"subnet-id": 1,
"ip-address": "2001:db8:2::1",
"duid": "1a:1b:1c:1d:1e:1f:20:21:22:23:24",
"iaid": 1234}},
exp_result=1)
assert response['text'] == 'The address 2001:db8:2::1 does not belong to subnet 2001:db8:1::/64, subnet-id=1'
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_add_valid():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
srv_msg.send_ctrl_cmd_via_socket('{"command": "lease6-add","arguments": {"subnet-id": 1,"ip-address": "2001:db8:1::1","duid": "1a:1b:1c:1d:1e:1f:20:21:22:23:24","iaid": 1234}}')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_add_expired_with_options():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
srv_msg.send_ctrl_cmd_via_socket('{"command": "lease6-add","arguments": {"subnet-id": 1,"ip-address": "2001:db8:1::1","duid": "1a:1b:1c:1d:1e:1f:20:21:22:23:24","iaid": 1234,"hw-address": "1a:2b:3c:4d:5e:6f","preferred-lft": 500,"valid-lft": 11111,"expire": 123456789,"fqdn-fwd": true,"fqdn-rev": true,"hostname": "urania.example.org"}}')
# Using UNIX socket on server in path control_socket send {"command":"lease6-get","arguments":{"ip-address": "2001:db8:1::1"}}
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
srv_msg.lease_file_contains('1a:2b:3c:4d:5e:6f')
srv_msg.lease_file_contains('1a:1b:1c:1d:1e:1f:20:21:22:23:24')
srv_msg.lease_file_contains('11111')
srv_msg.lease_file_contains('123456789')
srv_msg.lease_file_contains('urania.example.org')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_add_valid_with_options():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-20fdf8:f53e:61e4::18')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
srv_msg.send_ctrl_cmd_via_socket('{"command": "lease6-add","arguments": {"subnet-id": 1,"ip-address": "2001:db8:1::1","duid": "1a:1b:1c:1d:1e:1f:20:21:22:23:24","iaid": 1234,"hw-address": "1a:2b:3c:4d:5e:6f","preferred-lft": 500,"valid-lft": 11111,"fqdn-fwd": true,"fqdn-rev": true,"hostname": "urania.example.org"}}')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
srv_msg.lease_file_contains('1a:2b:3c:4d:5e:6f')
srv_msg.lease_file_contains('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:22:23:24')
srv_msg.lease_file_contains('11111')
srv_msg.lease_file_contains('urania.example.org')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_del_using_address():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-del","arguments":{"ip-address": "2001:db8:1::1"}}')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_del_using_duid():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-del","arguments":{"subnet-id":1,"identifier": "00:03:00:01:66:55:44:33:22:11","identifier-type": "duid","iaid":666}}')
# Using UNIX socket on server in path control_socket send {"command":"lease6-get","arguments":{"ip-address": "2001:db8:1::1"}}
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_get_using_address():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-get","arguments":{"ip-address": "2001:db8:1::1"}}')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_get_using_duid():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-get","arguments":{"subnet-id":1,"identifier": "00:03:00:01:66:55:44:33:22:11","identifier-type": "duid","iaid":666}}')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_wipe():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::2')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
# Client sets ia_id value to 666.
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
# Response sub-option 5 from option 3 MUST contain address 2001:db8:1::1.
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:11:11:11:11:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-wipe", "arguments": {"subnet-id":1}}')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:11:11:11:11:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
# Response sub-option 5 from option 3 MUST contain address 2001:db8:1::1.
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:22:33:44:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:55:66')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:11:11:11:11:11:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.controlchannel
@pytest.mark.hook
@pytest.mark.lease_cmds
def test_hook_v6_lease_cmds_update():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-20fdf8:f53e:61e4::18')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.lease_file_contains('2001:db8:1::1,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:22:11,4000,')
srv_msg.lease_file_contains(',1,3000,0,666,128,0,0,,66:55:44:33:22:11,0')
srv_msg.lease_file_doesnt_contain('2001:db8:1::1,01:02:03:04:05:06:07:08')
srv_msg.lease_file_doesnt_contain(',urania.example.org,1a:1b:1c:1d:1e:1f,')
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-update", "arguments":{"subnet-id": 1,"ip-address": "2001:db8:1::1","duid": "01:02:03:04:05:06:07:08","iaid": 1234,"hw-address": "1a:1b:1c:1d:1e:1f","preferred-lft": 500,"valid-lft": 1000,"hostname": "urania.example.org"}}')
srv_msg.lease_file_contains(',1,500,0,1234,128,0,0,urania.example.org,1a:1b:1c:1d:1e:1f,0')
srv_msg.lease_file_contains('2001:db8:1::1,01:02:03:04:05:06:07:08,1000')
| StarcoderdataPython |
3305247 | <reponame>cardosofede/hummingbot<gh_stars>100-1000
from aiounittest import async_test
from aiohttp import ClientSession
import asyncio
from contextlib import ExitStack
from decimal import Decimal
from os.path import join, realpath
from typing import List, Dict, Any
import unittest
from unittest.mock import patch
from hummingbot.core.event.events import TradeType
from hummingbot.core.gateway.gateway_http_client import GatewayHttpClient
from test.mock.http_recorder import HttpPlayer
ev_loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
class GatewayHttpClientUnitTest(unittest.TestCase):
_db_path: str
_http_player: HttpPlayer
_patch_stack: ExitStack
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls._db_path = realpath(join(__file__, "../fixtures/gateway_http_client_fixture.db"))
cls._http_player = HttpPlayer(cls._db_path)
cls._patch_stack = ExitStack()
cls._patch_stack.enter_context(cls._http_player.patch_aiohttp_client())
cls._patch_stack.enter_context(
patch("hummingbot.core.gateway.gateway_http_client.GatewayHttpClient._http_client", return_value=ClientSession())
)
GatewayHttpClient.get_instance().base_url = "https://localhost:5000"
@classmethod
def tearDownClass(cls) -> None:
cls._patch_stack.close()
@async_test(loop=ev_loop)
async def test_ping_gateway(self):
result: bool = await GatewayHttpClient.get_instance().ping_gateway()
self.assertTrue(result)
@async_test(loop=ev_loop)
async def test_get_gateway_status(self):
result: List[Dict[str, Any]] = await GatewayHttpClient.get_instance().get_gateway_status()
self.assertIsInstance(result, list)
self.assertEqual(1, len(result))
first_entry: Dict[str, Any] = result[0]
self.assertEqual(3, first_entry["chainId"])
@async_test(loop=ev_loop)
async def test_add_wallet(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().add_wallet(
"ethereum",
"ropsten",
"0000000000000000000000000000000000000000000000000000000000000001" # noqa: mock
)
self.assertTrue(isinstance(result, dict))
self.assertEqual("0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf", result["address"])
@async_test(loop=ev_loop)
async def test_get_wallets(self):
result: List[Dict[str, Any]] = await GatewayHttpClient.get_instance().get_wallets()
self.assertIsInstance(result, list)
first_entry: Dict[str, Any] = result[0]
self.assertIn("chain", first_entry)
self.assertIn("walletAddresses", first_entry)
@async_test(loop=ev_loop)
async def test_get_connectors(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_connectors()
self.assertIn("connectors", result)
uniswap: Dict[str, Any] = result["connectors"][0]
self.assertEqual("uniswap", uniswap["name"])
self.assertEqual(["EVM_AMM"], uniswap["trading_type"])
@async_test(loop=ev_loop)
async def test_get_configuration(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_configuration()
self.assertIn("avalanche", result)
self.assertIn("ethereum", result)
@async_test(loop=ev_loop)
async def test_update_configuration(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().update_config("telemetry.enabled", False)
self.assertIn("message", result)
@async_test(loop=ev_loop)
async def test_get_tokens(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_tokens("ethereum", "ropsten")
self.assertIn("tokens", result)
self.assertIsInstance(result["tokens"], list)
self.assertEqual("WETH", result["tokens"][0]["symbol"])
self.assertEqual("DAI", result["tokens"][1]["symbol"])
@async_test(loop=ev_loop)
async def test_get_network_status(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_network_status("ethereum", "ropsten")
self.assertEqual(3, result["chainId"])
self.assertEqual(12067035, result["currentBlockNumber"])
@async_test(loop=ev_loop)
async def test_get_price(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_price(
"ethereum",
"ropsten",
"uniswap",
"DAI",
"WETH",
Decimal(1000),
TradeType.BUY
)
self.assertEqual("1000.000000000000000000", result["amount"])
self.assertEqual("1000000000000000000000", result["rawAmount"])
self.assertEqual("0.00262343", result["price"])
@async_test(loop=ev_loop)
async def test_get_balances(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_balances(
"ethereum",
"ropsten",
"0x5821715133bB451bDE2d5BC6a4cE3430a4fdAF92",
["WETH", "DAI"],
)
self.assertIn("balances", result)
self.assertEqual("21.000000000000000000", result["balances"]["WETH"])
self.assertEqual("0.000000000000000000", result["balances"]["DAI"])
@async_test(loop=ev_loop)
async def test_successful_get_transaction(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_transaction_status(
"ethereum",
"ropsten",
"0xa8d428627dc7f453be79a32129dc18ea29d1a715249a4a5762ca6273da5d96e3" # noqa: mock
)
self.assertEqual(1, result["txStatus"])
self.assertIsNotNone(result["txData"])
@async_test(loop=ev_loop)
async def test_failed_get_transaction(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_transaction_status(
"ethereum",
"ropsten",
"0xa8d428627dc7f453be79a32129dc18ea29d1a715249a4a5762ca6273da5d96e1" # noqa: mock
)
self.assertEqual(-1, result["txStatus"])
self.assertIsNone(result["txData"])
@async_test(loop=ev_loop)
async def test_get_evm_nonce(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_evm_nonce(
"ethereum",
"ropsten",
"0x5821715133bB451bDE2d5BC6a4cE3430a4fdAF92"
)
self.assertEqual(2, result["nonce"])
@async_test(loop=ev_loop)
async def test_approve_token(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().approve_token(
"ethereum",
"ropsten",
"0x5821715133bB451bDE2d5BC6a4cE3430a4fdAF92",
"WETH",
"uniswap",
2
)
self.assertEqual("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D", result["spender"])
self.assertEqual("0x66b533792f45780fc38573bfd60d6043ab266471607848fb71284cd0d9eecff9", # noqa: mock
result["approval"]["hash"])
@async_test(loop=ev_loop)
async def test_get_allowances(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().get_allowances(
"ethereum",
"ropsten",
"0x5821715133bB451bDE2d5BC6a4cE3430a4fdAF92",
["WETH", "DAI"],
"uniswap"
)
self.assertIn("approvals", result)
self.assertEqual("115792089237316195423570985008687907853269984665640564039457.584007913129639935",
result["approvals"]["DAI"])
self.assertEqual("115792089237316195423570985008687907853269984665640564039457.584007913129639935",
result["approvals"]["WETH"])
@async_test(loop=ev_loop)
async def test_amm_trade(self):
result: Dict[str, Any] = await GatewayHttpClient.get_instance().amm_trade(
"ethereum",
"ropsten",
"uniswap",
"0x5821715133bB451bDE2d5BC6a4cE3430a4fdAF92",
"DAI",
"WETH",
TradeType.BUY,
Decimal(1000),
Decimal("0.00266"),
4
)
self.assertEqual("1000.000000000000000000", result["amount"])
self.assertEqual("1000000000000000000000", result["rawAmount"])
self.assertEqual("0xc7287236f64484b476cfbec0fd21bc49d85f8850c8885665003928a122041e18", # noqa: mock
result["txHash"])
| StarcoderdataPython |
184403 | <filename>glypy/io/linear_code.py
'''
A module for operating on GlycoMinds Linear Code
Assumes that the structure's root is the right-most residue, as shown in
:title-reference:`A Novel Linear Code Nomenclature for Complex Carbohydrates, Banin et al.`.
Currently does not handle the sigils indicating deviation from the common forms.
'''
import re
from collections import OrderedDict, deque
from six import string_types as basestring
from glypy.io import format_constants_map
from glypy.io.nomenclature import identity
from glypy.structure import constants, named_structures, Monosaccharide, Glycan, Substituent
from glypy.utils import invert_dict
from glypy.io.file_utils import ParserInterface, ParserError
Stem = constants.Stem
Configuration = constants.Configuration
# A static copy of monosaccharide names to structures for copy-free comparison
monosaccharide_reference = {k: v.clone() for k, v in named_structures.monosaccharides.items()}
# Unset the anomericity as this does not influence monosaccharide resolution
for k, v in monosaccharide_reference.items():
v.anomer = None
#: A mapping from common monosaccharide names to their symbol, ordered by priority
monosaccharides_to = OrderedDict((
("Glc", 'G'),
("Gal", 'A'),
("GlcNAc", 'GN'),
("GalNAc", 'AN'),
("Man", "M"),
("Neu", "N"),
("NeuAc", "NN"),
("NeuGc", "NJ"),
("KDN", "K"),
("Kdo", "W"),
("GalA", "L"),
("IdoA", "I"),
("Rha", "H"),
("Fuc", "F"),
("Xyl", "X"),
("Rib", "B"),
("Ara", "R"),
("GlcA", "U"),
("All", 'O'),
# ("Api", 'P'),
("Fru", "E")
))
#: A mapping from symbol to common monosaccharide name
monosaccharides_from = invert_dict(dict(monosaccharides_to))
#: A mapping from common substituent names to symbol
substituents_to = {
'amino': 'Q',
'ethanolominephosphate': 'PE',
'inositol': "IN",
'methyl': "ME",
'n_acetyl': 'N',
'o_acetyl': 'T',
'phosphate': 'P',
'phosphocholine': "PC",
'pyruvate': 'PYR',
'sulfate': 'S',
'sulfide': 'SH',
'2-aminoethylphosphonic acid': 'EP'
}
#: A mapping from symbol to common substituent names
substituents_from = invert_dict(substituents_to)
anomer_map_from = dict(format_constants_map.anomer_map)
anomer_map_from['?'] = anomer_map_from.pop('x')
anomer_map_to = invert_dict(anomer_map_from)
def get_relevant_substituents(residue):
'''
Retrieve the set of substituents not implicitly included
in the base type's symbol name.
'''
positions = [p for p, sub in residue.substituents()]
substituents = [sub.name for p, sub in residue.substituents()]
if identity.is_a(residue, monosaccharide_reference["HexNAc"], exact=False) or\
identity.is_a(residue, monosaccharide_reference["NeuAc"], exact=False):
i = substituents.index("n_acetyl")
substituents.pop(i)
positions.pop(i)
elif identity.is_a(residue, monosaccharide_reference["NeuGc"], exact=False):
i = substituents.index("n_glycolyl")
substituents.pop(i)
positions.pop(i)
return zip(substituents, positions)
def substituent_to_linear_code(substituent, position=None):
'''
Translate a |Substituent| to Linear Code. Include's the substituent's
position if it is known.
Parameters
----------
substituents: Substituent or str
The structure or the name to translate
position:
The position of the structure to try including
Returns
-------
str
Raises
------
KeyError:
When an unknown symbol is encountered
'''
symbol = None
try:
symbol = substituents_to[substituent.name]
except:
symbol = substituents_to[substituent]
if position is not None:
if position == -1:
position = ''
return "{}{}".format(position, symbol)
else:
return symbol
def monosaccharide_to_linear_code(monosaccharide, max_tolerance=3):
'''
Perform iteratively permissive attempts to translate `monosaccharide` into
a nomenclature symbol.
.. note::
Uses a multi-pass approach. Could alternatively do a single pass
and keep the best match.
Parameters
----------
monosaccharide: Monosaccharide
The residue to be translated
max_tolerance: int
The maximum error tolerance to allow while looking for a match
Returns
-------
str
Raises
------
ValueError:
When no suitable translation can be found
KeyError:
When an unknown symbol is encountered
'''
tolerance = 0
if identity.is_generic_monosaccharide(monosaccharide):
raise LinearCodeError("Linear Code does not support generic monosaccharide %s" % str(monosaccharide))
while tolerance <= max_tolerance:
for k, v in monosaccharides_to.items():
if k not in monosaccharide_reference:
continue
if identity.is_a(monosaccharide, monosaccharide_reference[k], tolerance=tolerance):
residue_sym = v
substituents_sym = [(
substituent_to_linear_code(
*s)) for s in get_relevant_substituents(monosaccharide)]
if len(substituents_sym) > 0:
residue_sym = residue_sym + '[{}]'.format(', '.join(substituents_sym))
residue_sym = residue_sym + anomer_map_to[monosaccharide.anomer]
return residue_sym
tolerance += 1
raise LinearCodeError("Cannot map {} to Linear Code".format(monosaccharide))
def priority(sym):
'''
Calculate the branching priority for a given symbol or |Monosaccharide|.
Used when deciding when a residue is considered branch or backbone.
Parameters
----------
sym: str or Monosaccharide
Returns
-------
int
'''
i = 0
is_str = isinstance(sym, basestring)
for key, value in monosaccharides_to.items():
i += 1
if (identity.is_a(sym, monosaccharide_reference[key])) if not is_str else value == sym:
return i
return -1
def glycan_to_linear_code(structure=None, max_tolerance=3):
'''
Translate a |Glycan| structure into Linear Code. Called from :func:`to_linear_code`.
Recursively operates on branches.
Parameters
----------
structure: Glycan or Monosaccharide
The glycan to be translated. Translation starts from `glycan.root` if `structure`
is a |Glycan|.
max_tolerance: int
The maximum amount of deviance to allow when translating |Monosaccharide| objects
into nomenclature symbols
Returns
-------
deque
'''
base = structure.root if isinstance(structure, Glycan) else structure
stack = [(1, base)]
outstack = deque()
while(len(stack) > 0):
outedge_pos, node = stack.pop()
if outedge_pos in {1, -1}:
outedge_pos = ''
outstack.appendleft(monosaccharide_to_linear_code(node, max_tolerance=max_tolerance) + str(outedge_pos))
children = []
for pos, child in node.children():
rank = priority(child)
children.append((pos, child, rank))
if len(children) > 1:
ordered_children = sorted(children, key=lambda x: x[2])
for pos, child, rank in ordered_children[:-1]:
branch = '({branch}{attach_pos})'.format(
branch=''.join(glycan_to_linear_code(child, max_tolerance=max_tolerance)),
attach_pos=pos
)
outstack.appendleft(branch)
pos, child, rank = ordered_children[-1]
stack.append((pos, child))
elif len(children) == 1:
pos, child, rank = children[0]
stack.append((pos, child))
return outstack
def to_linear_code(structure):
'''
Translates `structure` to Linear Code.
Parameters
----------
structure: Monosaccharide or Glycan
Returns
-------
str
'''
if isinstance(structure, Monosaccharide):
return monosaccharide_to_linear_code(structure)
else:
return ''.join(list(glycan_to_linear_code(structure)))
class LinearCodeError(ParserError):
pass
def monosaccharide_from_linear_code(residue_str, parent=None):
'''
Helper function for :func:`parse_linear_code`. Given a residue string
of the form "(base_type)(anomer)(outedge?)([substituents]?)", construct
a |Monosaccharide| object. If `parent` is not |None|, connect the
resulting |Monosaccharide| to `parent` at `outedge`.
'''
base_type, substituents, anomer, outedge = re.search(r"([A-Z]+)(\[.*?\])?([abo\?]?)(.)?", residue_str).groups()
base = named_structures.monosaccharides[monosaccharides_from[base_type]]
base.anomer = anomer_map_from.get(anomer, None)
if substituents is not None:
for subst_str in substituents[1:-1].split(','):
pos, name = re.search(r'(\d*)(.+)', subst_str).groups()
subst_object = Substituent(substituents_from[name])
try:
pos = int(pos)
except (ValueError, TypeError):
pos = -1
base.add_substituent(subst_object, position=pos)
try:
outedge = int(outedge)
except (ValueError, TypeError):
outedge = -1
if parent is not None:
parent.add_monosaccharide(base, position=outedge, child_position=min(base.open_attachment_sites()[0]))
return base, outedge
def parse_linear_code(text, structure_class=Glycan):
'''
Parse the character string `text`, extracting GlycoMinds Linear Code-format
carbohydrate structures, converting them into a |Glycan| object.
.. note:: Assumes that the structure's root is the right-most residue
Supports only *concrete* structures.
The resulting structure will be canonicalized.
Parameters
----------
text: str
The string to be parsed
Returns
-------
Glycan
Raises
------
LinearCodeError:
When an error is encountered while parsing resulting from malformed syntax
or structure
ValueError:
When a symbol is encountered for which no suitable translation could be found
KeyError:
When an unknown symbol is encountered
'''
last_outedge = None
root = None
last_residue = None
branch_stack = []
while len(text) > 0:
# If starting a new branch
if text[-1] == ')':
branch_stack.append((last_residue, root, last_outedge))
root = None
last_residue = None
last_outedge = None
text = text[:-1]
# If ending a branch
elif text[-1] == '(':
try:
branch_parent, old_root, old_last_outedge = branch_stack.pop()
branch_parent.add_monosaccharide(root, position=last_outedge,
child_position=min(root.open_attachment_sites()[0]))
root = old_root
last_residue = branch_parent
last_outedge = old_last_outedge
text = text[:-1]
except IndexError:
raise LinearCodeError("Bad branching at {}".format(len(text)))
# Parsing a residue
else:
match = re.search(r"([A-Z]+?)(\[[^\]]*?\])?(.)(.)?$", text)
if match:
next_residue, outedge = monosaccharide_from_linear_code(text[match.start(): match.end()], last_residue)
if root is None:
last_outedge = outedge
root = next_residue
last_residue = next_residue
text = text[:match.start()]
else:
raise LinearCodeError("Could not identify residue '...{}' at {}".format(text[-10:], len(text)))
res = structure_class(root=root).reindex()
res.canonicalize()
if len(res) > 1:
return res
else:
return res.root
#: Common alias for :func:`to_linear_code`
dumps = to_linear_code
#: Common alias for :func:`parse_linear_code`
loads = parse_linear_code
class LinearCodeParser(ParserInterface):
def process_result(self, line):
structure = loads(line)
return structure
Monosaccharide.register_serializer('linear_code', dumps)
Glycan.register_serializer('linear_code', dumps)
| StarcoderdataPython |
194554 | #!/usr/bin/env python3
# Copyright (c) 2018 Aiven, Helsinki, Finland. https://aiven.io/
import argparse
import os
import sys
from consumer_example import consumer_example
from producer_example import producer_example
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--service-uri', help="Service URI in the form host:port",
required=True)
parser.add_argument('--ca-path', help="Path to project CA certificate",
required=True)
parser.add_argument('--key-path', help="Path to the Kafka Access Key (obtained from Aiven Console)",
required=True)
parser.add_argument('--cert-path', help="Path to the Kafka Certificate Key (obtained from Aiven Console)",
required=True)
parser.add_argument('--consumer', action='store_true', default=False, help="Run Kafka consumer example")
parser.add_argument('--producer', action='store_true', default=False, help="Run Kafka producer example")
args = parser.parse_args()
validate_args(args)
kwargs = {k: v for k, v in vars(args).items() if k not in ("producer", "consumer")}
if args.producer:
producer_example(**kwargs)
elif args.consumer:
consumer_example(**kwargs)
def validate_args(args):
for path_option in ("ca_path", "key_path", "cert_path"):
path = getattr(args, path_option)
if not os.path.isfile(path):
fail(f"Failed to open --{path_option.replace('_', '-')} at path: {path}.\n"
f"You can retrieve these details from Overview tab in the Aiven Console")
if args.producer and args.consumer:
fail("--producer and --consumer are mutually exclusive")
elif not args.producer and not args.consumer:
fail("--producer or --consumer are required")
def fail(message):
print(message, file=sys.stderr)
exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1610136 | <reponame>AlexNilsson/python-remove-duplicate-files
from .Files import Files
def removeDuplicates(directory):
Files(directory).removeDuplicates()
| StarcoderdataPython |
3377984 | """
Example Train Annex 4 v1
========================
A train with three routes each composed of two sections.
Given this infrastructure:
.. uml:: ../uml/tom-06-example-annex-4-infrastructure.puml
The following routing specification describes the initial planned routes for the rain with
`ID1`. As
you can see, there is no route section needed which mentions Station `M`. This station does not play
a role in the routing planning process because it is no origin, destination or handover.
"""
from networkx import DiGraph
from tom.plot import *
from tom.tom import make_train_from_yml, RouteSection, Route
from tom.util import example, dump_routing_info_as_xml
# %%
# Load example annex 4 from yaml specification
#
_, t_spec_file = example('../tests/data', 'annex-4.yml')
print(t_spec_file.read_text())
# %%
# Notice the route sections which have a departure time.
# These are considered as *route construction starts*.
# Only one section in a route may be a construction start.
#
# Now create train object and show its train id.
t = make_train_from_yml(t_spec_file)
t.train_id()
# %%
# Timetable
# ^^^^^^^^^
#
# This is the timetable of version 1 of TR-ID1. Notice the two train runs with ID
# `TR/8350/ID1/10/2021/2021-02-07` and `TR/8350/ID1/20/2021/2021-02-07`. They both start on
# `07/02`. To make the daily train ID unique on this operating day, we propose to add the
# `section_id` to be
# part of `TrainRun.train_id()`. Here `10` for the train starting at `00:10` and `20` for the
# train departing at `23:50` at station `S`.
df = t.to_dataframe()
df
# %%
# Bildfahrplan
# ^^^^^^^^^^^^
# Show timetable as `Bildfahrplan <https://de.wikipedia.org/wiki/Bildfahrplan>`_.
plot_train(t)
# %%
# Route Sections
# ^^^^^^^^^^^^^^
# From which sections the train is composed?
section: RouteSection
for section in t.sections:
print(section.description(), "\n")
# %%
# Section graph
# ^^^^^^^^^^^^^
# The section graph is computed using the successor relation.
sg: DiGraph = t.section_graph()
plot_graph(sg)
# %%
# Routes
# ^^^^^^
# Print all possible routes.
# Routes are calculated from all possible paths in the section graph.
route: Route
for route in t.routes():
print(route.description(), "\n")
# %%
# Section runs
# ^^^^^^^^^^^^
# For each day of the calendar of a section a `SectionRun` is created.
# The section runs are the rows of RouteSection.to_dataframe:
for section in t.sections:
print(f"{section.section_id}: {section}")
print(section.to_dataframe(), "\n")
# %%
# TrainRuns
# ^^^^^^^^^
# Each `TrainRun` defines a row in the timetable of the train above
tr: TrainRun
for tr in t.train_run_iterator():
print(tr)
for sr in tr.sections_runs:
print(sr)
print("\n")
# %%
# RoutingInformation as TrainInformation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# An XML Dump of the routing information of this example according a new version of the TSI XSD.
#
# See `Routing planning <../routing-planning-process.html#routininformation-as-traininformation>`_
# for more details.
print(dump_routing_info_as_xml(t))
| StarcoderdataPython |
3370540 | <filename>src/migrations/023_create_endpoint_history.py
"""Creates the endpoint history tables. Not storing the history of tables is a
recipe for disaster, even with backups. Furthermore if we do not store a history
we essentially can never grant access to people we only sort of trust. With
these tables it's way easier for us to revert than for people to break things
via the website.
These tables are particularly nice for maintaining a history since we have
stable identifiers for everything which will be interpretable even when the
original records are lost.
This is true for users as well but we only delete user records when we
essentially _have_ to legally speaking, so we probably would have to zero out
usernames anyway, and user ids are faster.
"""
def up(conn, cursor):
cursor.execute(
'''
CREATE TABLE endpoint_history (
id SERIAL PRIMARY KEY,
user_id INTEGER NULL REFERENCES users(id) ON DELETE SET NULL,
slug TEXT NOT NULL,
old_path TEXT NULL DEFAULT NULL,
new_path TEXT NOT NULL,
old_description_markdown TEXT NULL DEFAULT NULL,
new_description_markdown TEXT NOT NULL,
old_deprecation_reason_markdown TEXT NULL DEFAULT NULL,
new_deprecation_reason_markdown TEXT NULL DEFAULT NULL,
old_deprecated_on DATE NULL DEFAULT NULL,
new_deprecated_on DATE NULL DEFAULT NULL,
old_sunsets_on DATE NULL DEFAULT NULL,
new_sunsets_on DATE NULL DEFAULT NULL,
old_in_endpoints BOOLEAN NOT NULL,
new_in_endpoints BOOLEAN NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_history_on_endpoint_slug_and_created_at
ON endpoint_history(slug, created_at)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_history_on_user_id_and_created_at
ON endpoint_history(user_id, created_at)
WHERE user_id IS NOT NULL
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_history_on_created_at
ON endpoint_history(created_at)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE TABLE endpoint_param_history (
id SERIAL PRIMARY KEY,
user_id INTEGER NULL REFERENCES users(id) ON DELETE SET NULL,
endpoint_slug TEXT NOT NULL,
location TEXT NOT NULL,
path TEXT NOT NULL,
name TEXT NOT NULL,
old_var_type TEXT NULL DEFAULT NULL,
new_var_type TEXT NOT NULL,
old_description_markdown TEXT NULL DEFAULT NULL,
new_description_markdown TEXT NOT NULL,
old_in_endpoint_params BOOLEAN NOT NULL,
new_in_endpoint_params BOOLEAN NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_param_history_on_endpoint_slug_and_created_at
ON endpoint_param_history(endpoint_slug, created_at)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_param_history_on_logical_id_and_created_at
ON endpoint_param_history(endpoint_slug, location, path, name, created_at)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_param_history_on_user_id_and_created_at
ON endpoint_param_history(user_id, created_at)
WHERE user_id IS NOT NULL
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_param_history_on_created_at
ON endpoint_param_history(created_at)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE TABLE endpoint_alternative_history (
id SERIAL PRIMARY KEY,
user_id INTEGER NULL REFERENCES users(id) ON DELETE SET NULL,
old_endpoint_slug TEXT NOT NULL,
new_endpoint_slug TEXT NOT NULL,
old_explanation_markdown TEXT NULL DEFAULT NULL,
new_explanation_markdown TEXT NOT NULL,
old_in_endpoint_alternatives BOOLEAN NOT NULL,
new_in_endpoint_alternatives BOOLEAN NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_alt_history_on_logical_id_and_created_at
ON endpoint_alternative_history(old_endpoint_slug, new_endpoint_slug, created_at)
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_alt_history_on_user_id_and_created_at
ON endpoint_alternative_history(user_id, created_at)
WHERE user_id IS NOT NULL
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
CREATE INDEX index_endpoint_alt_history_on_created_at
ON endpoint_alternative_history(created_at)
'''
)
print(cursor.query.decode('utf-8'))
def down(conn, cursor):
cursor.execute(
'''
DROP TABLE endpoint_alternative_history CASCADE
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
DROP TABLE endpoint_param_history CASCADE
'''
)
print(cursor.query.decode('utf-8'))
cursor.execute(
'''
DROP TABLE endpoint_history CASCADE
'''
)
print(cursor.query.decode('utf-8'))
| StarcoderdataPython |
92905 | from . import wrapper
@wrapper.parse
def _encode(b,file_parsed):
macs = []
for i in file_parsed:
mac = ''
mac_list = [i[o].to_bytes(1,'big') for o in range(0,len(i))] # adress groups (1byte), to_bytes --> convert int to bytes
mac_list_len = len(mac_list)
for j in range(mac_list_len): # groups len
mac += mac_list[j].hex()
if j < (mac_list_len - 1):
mac += ":"
macs.append(mac)
return macs | StarcoderdataPython |
1629477 | import shutil
import torch
CHECKPOINT_SIZE_MB = 333
BATCH_SIZE_PER_GB = 2.5
LEARNING_RATE_PER_BATCH = 3.125e-5
def get_available_memory():
"""
Get available GPU memory in GB.
Returns
-------
int
Available GPU memory in GB
"""
gpu_memory = torch.cuda.get_device_properties(0).total_memory
memory_in_use = torch.cuda.memory_allocated(0)
available_memory = gpu_memory - memory_in_use
available_memory_gb = available_memory // 1024 // 1024 // 1024
return available_memory_gb
def get_batch_size(available_memory_gb):
"""
Calulate batch size.
Parameters
----------
available_memory_gb : int
Available GPU memory in GB
Returns
-------
int
Batch size
"""
return int(available_memory_gb * BATCH_SIZE_PER_GB)
def get_learning_rate(batch_size):
"""
Calulate learning rate.
Parameters
----------
batch_size : int
Batch size
Returns
-------
float
Learning rate
"""
return batch_size * LEARNING_RATE_PER_BATCH
def check_space(num_checkpoints):
"""
Check if system has enough available storage to save all checkpoints.
Parameters
----------
num_checkpoints : int
Number of checkpoints that will be generated in training
Raises
-------
AssertionError
If system does not have sufficent storage space
"""
_, _, free = shutil.disk_usage("/")
free_mb = free // (2 ** 20)
required_mb = CHECKPOINT_SIZE_MB * num_checkpoints
assert (
free_mb >= required_mb
), f"Insufficent storage space (requires {required_mb}mb). Reduce checkpoint frequency or free up space"
| StarcoderdataPython |
3301028 | <gh_stars>1-10
from pvector import PVector
import pgzrun
# from pgzero import Actor
WIDTH = 400
HEIGHT = 400
class Ball(Actor):
def __init__(self, x, y, v_x, v_y, radius, image):
super().__init__(self, image)
self.radius = radius
self.position = PVector(x, y)
self. velocity = PVector(v_x, v_y)
def show(self):
self.draw()
def move(self):
self.position.add(self.velocity)
if (self.position.x > WIDTH - RADIUS) or (self.position.x < RADIUS):
self.velocity.x *= -1
if (self.position.y > HEIGHT - RADIUS) or (self.position.y < RADIUS):
self.velocity.y *= -1 | StarcoderdataPython |
3288345 | <gh_stars>1-10
import os
import numpy as np
import tensorflow as tf
SEQUENCE_LENGTH = 1.0 # seconds
STEP_TIME = 0.2 # seconds
CHOOSE_STEP = int(SEQUENCE_LENGTH / STEP_TIME)
def cosserat_rods_sim_pc(path, content_file, batch_size, load_train=True, load_val=True, load_test=True):
df = np.load(os.path.join(path, content_file), allow_pickle=True).item()
datasets = list()
def crop_seq_x(x):
return np.asarray([s[:-CHOOSE_STEP:CHOOSE_STEP] for s in x])
def crop_params(params):
return np.asarray([[p["density"], p["nu"], p["youngs_modulus"], p["poisson_ratio"]] for p in params])
def crop_seq_y(y):
return np.asarray([s[CHOOSE_STEP::CHOOSE_STEP] for s in y])
if load_train:
x1 = crop_seq_x(df["train"]["sequences"])
x2 = crop_params(df["train"]["sequences_params"])
y = crop_seq_y(df["train"]["sequences"])
train_ds = tf.data.Dataset.from_tensor_slices((x1, x2, y)).shuffle(1000).batch(batch_size)
datasets.append(train_ds)
datasets.append(len(y))
if load_val:
x1 = crop_seq_x(df["validation"]["sequences"])
x2 = crop_params(df["validation"]["sequences_params"])
y = crop_seq_y(df["validation"]["sequences"])
val_ds = tf.data.Dataset.from_tensor_slices((x1, x2, y)).shuffle(1000).batch(batch_size)
datasets.append(val_ds)
datasets.append(len(y))
if load_test:
x1 = crop_seq_x(df["test"]["sequences"])
x2 = crop_params(df["test"]["sequences_params"])
y = crop_seq_y(df["test"]["sequences"])
test_ds = tf.data.Dataset.from_tensor_slices((x1, x2, y)).shuffle(1000).batch(batch_size)
datasets.append(test_ds)
datasets.append(len(y))
return datasets
| StarcoderdataPython |
196634 | # encoding: utf-8
from __future__ import print_function
import sys
template = """
from distutils.core import setup, Extension
import sys
import pprint
from Cython.Distutils import build_ext
ext = Extension("%(name)s", sources = %(source_files)s, language="c++",
include_dirs = %(include_dirs)r,
extra_compile_args = [%(compile_args)s],
extra_link_args = [],
)
setup(cmdclass = {'build_ext' : build_ext},
name="%(name)s",
version="0.0.1",
ext_modules = [ext]
)
"""
def compile_and_import(name, source_files, include_dirs=None, **kws):
if include_dirs is None:
include_dirs = []
debug = kws.get("debug")
import os.path
import shutil
import tempfile
import subprocess
import sys
tempdir = tempfile.mkdtemp()
if debug:
print("\n")
print("tempdir=", tempdir)
print("\n")
for source_file in source_files:
shutil.copy(source_file, tempdir)
if sys.platform != "win32":
compile_args = "'-Wno-unused-but-set-variable'"
else:
compile_args = ""
include_dirs = [os.path.abspath(d) for d in include_dirs]
source_files = [os.path.basename(f) for f in source_files]
setup_code = template % locals()
if debug:
print("\n")
print("-" * 70)
print(setup_code)
print("-" * 70)
print("\n")
now = os.getcwd()
os.chdir(tempdir)
with open("setup.py", "w") as fp:
fp.write(setup_code)
import sys
sys.path.insert(0, tempdir)
if debug:
print("\n")
print("-" * 70)
import pprint
pprint.pprint(sys.path)
print("-" * 70)
print("\n")
assert subprocess.Popen("%s setup.py build_ext --force --inplace" % sys.executable, shell=True).wait() == 0
print("BUILT")
result = __import__(name)
print("imported")
if debug:
print("imported", result)
sys.path = sys.path[1:]
os.chdir(now)
print(result)
return result
def remove_labels(graph):
_remove_labels = lambda succ_list: [s for s, label in succ_list]
pure_graph = dict((n0, _remove_labels(ni)) for n0, ni in graph.items())
return pure_graph
def find_cycle(graph_as_dict):
""" modified version of
http://neopythonic.blogspot.de/2009/01/detecting-cycles-in-directed-graph.html
"""
nodes = list(graph_as_dict.keys())
for n in graph_as_dict.values():
nodes.extend(n)
todo = list(set(nodes))
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in graph_as_dict.get(top, []):
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
def _check_for_cycles_in_mapping(mapping):
# detect cylces in typedefs
graph = dict()
for (alias, type_) in mapping.items():
successors = type_.all_occuring_base_types()
graph[alias] = successors
cycle = find_cycle(graph)
if cycle is not None:
info = " -> ".join(map(str, cycle))
raise Exception("mapping contains cycle: " + info)
def print_map(mapping):
for k, v in mapping.items():
print("%8s -> %s" % (k, v))
def flatten(mapping):
""" resolves nested mappings, eg:
A -> B
B -> C[X,D]
C -> Z
D -> Y
is resolved to:
A -> Z[X,Y]
B -> Z[X,Y]
C -> Z
D -> Y
"""
_check_for_cycles_in_mapping(mapping)
# this loop only terminates for cylce free mappings:
while True:
for name, type_ in mapping.items():
transformed = type_.transformed(mapping)
if transformed != type_:
mapping[name] = transformed
break
else:
break
| StarcoderdataPython |
3202631 | <gh_stars>1-10
import redis
r = redis.Redis(host='10.10.14.245', port=6379)
count = 0
with open(r'E:\Python_work\SpiderWork\SpiderPro\SC\city.txt', mode='r', encoding='utf-8') as fp:
city_list = fp.readlines()
with open(r'E:\Python_work\SpiderWork\SpiderPro\SC\pinpai.txt', mode='r', encoding='utf-8') as fp:
pinpai_list = fp.readlines()
city_list = list(set(city_list))
pinpai_list = list(set(pinpai_list))
for city in city_list:
for pinpai in pinpai_list:
url = 'https://{}.taoche.com{}'.format(city.strip(), pinpai.strip())
r.lpush('taoche:start_urls', url)
count += 1
r.set('count', count) | StarcoderdataPython |
3288277 | """
Module responsible for translating sequence annotation data
into GA4GH native objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import random
import ga4gh.protocol as protocol
import ga4gh.datamodel as datamodel
import ga4gh.sqlite_backend as sqlite_backend
import ga4gh.exceptions as exceptions
import ga4gh.pb as pb
# Note to self: There's the Feature ID as understood in a GFF3 file,
# the Feature ID that is its server-assigned compoundId, and the
# ID of the feature's row in the DB FEATURE table.
# I need to be careful about which one is which.
"""
For this implementation, `featureSetId` is required, while `parentId`
is optional, and filters the features within the requested `featureSetId`
by their parent.
Only return features on the reference with this name. Genomic positions
are non-negative integers less than reference length.
Requests spanning the join of circular genomes are represented as two
requests one on each side of the join (position 0) end is also required
If specified, this query matches only annotations which match one of the
provided feature types.
For now do not use the features array in search
GFF3 data is represented by rows in a single table, named FEATURE.
The columns of the FEATURE table correspond to the columns of a GFF3,
with three additional columns prepended representing the ID
of this feature, the ID of its parent (if any), and a whitespace
separated array of its child IDs.
_featureColumns pairs represent the ordered (column_name, column_type).
"""
_featureColumns = [
('id', 'TEXT'), # a synthetic principal key generated on ETL
('parent_id', 'TEXT'),
('child_ids', 'TEXT'),
('reference_name', 'TEXT'),
('source', 'TEXT'),
('type', 'TEXT'), # corresponds to featureType, an ontology term
('start', 'INT'),
('end', 'INT'),
('score', 'REAL'),
('strand', 'TEXT'), # limited to one of '+'/'-' or none
('name', 'TEXT'), # the "ID" as found in GFF3, or '' if none
('gene_name', 'TEXT'), # as found in GFF3 attributes
('transcript_name', 'TEXT'), # as found in GFF3 attributes
('attributes', 'TEXT')] # JSON encoding of attributes dict
class Gff3DbBackend(sqlite_backend.SqliteBackedDataSource):
"""
Notes about the current implementation:
For this implementation, `featureSetId` is required, while `parentId`
is optional, and filters the features within the requested `featureSetId`
by their parent.
Genomic positions are non-negative integers less than reference length.
Requests spanning the join of circular genomes are represented as two
requests one on each side of the join (position 0)
"""
def __init__(self, dbFile):
super(Gff3DbBackend, self).__init__(dbFile)
self.featureColumnNames = [f[0] for f in _featureColumns]
self.featureColumnTypes = [f[1] for f in _featureColumns]
def featuresQuery(self, **kwargs):
"""
Converts a dictionary of keyword arguments into a tuple
of SQL select statements and the list of SQL arguments
"""
# TODO: Optimize by refactoring out string concatenation
sql = ""
sql_rows = "SELECT * FROM FEATURE WHERE id > 1 "
sql_args = ()
if 'name' in kwargs and kwargs['name']:
sql += "AND name = ? "
sql_args += (kwargs.get('name'),)
if 'geneSymbol' in kwargs and kwargs['geneSymbol']:
sql += "AND gene_name = ? "
sql_args += (kwargs.get('geneSymbol'),)
if 'start' in kwargs and kwargs['start'] is not None:
sql += "AND end > ? "
sql_args += (kwargs.get('start'),)
if 'end' in kwargs and kwargs['end'] is not None:
sql += "AND start < ? "
sql_args += (kwargs.get('end'),)
if 'referenceName' in kwargs and kwargs['referenceName']:
sql += "AND reference_name = ?"
sql_args += (kwargs.get('referenceName'),)
if 'parentId' in kwargs and kwargs['parentId']:
sql += "AND parent_id = ? "
sql_args += (kwargs['parentId'],)
if kwargs.get('featureTypes') is not None \
and len(kwargs['featureTypes']) > 0:
sql += "AND type IN ("
sql += ", ".join(["?", ] * len(kwargs.get('featureTypes')))
sql += ") "
sql_args += tuple(kwargs.get('featureTypes'))
sql_rows += sql
sql_rows += " ORDER BY reference_name, start, end ASC "
return sql_rows, sql_args
def searchFeaturesInDb(
self, startIndex=0, maxResults=None,
referenceName=None, start=None, end=None,
parentId=None, featureTypes=None,
name=None, geneSymbol=None):
"""
Perform a full features query in database.
:param startIndex: int representing first record to return
:param maxResults: int representing number of records to return
:param referenceName: string representing reference name, ex 'chr1'
:param start: int position on reference to start search
:param end: int position on reference to end search >= start
:param parentId: string restrict search by id of parent node.
:param name: match features by name
:param geneSymbol: match features by gene symbol
:return an array of dictionaries, representing the returned data.
"""
# TODO: Refactor out common bits of this and the above count query.
sql, sql_args = self.featuresQuery(
startIndex=startIndex, maxResults=maxResults,
referenceName=referenceName, start=start, end=end,
parentId=parentId, featureTypes=featureTypes,
name=name, geneSymbol=geneSymbol)
sql += sqlite_backend.limitsSql(startIndex, maxResults)
query = self._dbconn.execute(sql, sql_args)
return sqlite_backend.sqliteRowsToDicts(query.fetchall())
def getFeatureById(self, featureId):
"""
Fetch feature by featureID.
:param featureId: the FeatureID as found in GFF3 records
:return: dictionary representing a feature object,
or None if no match is found.
"""
sql = "SELECT * FROM FEATURE WHERE id = ?"
query = self._dbconn.execute(sql, (featureId,))
ret = query.fetchone()
if ret is None:
return None
return sqlite_backend.sqliteRowToDict(ret)
class AbstractFeatureSet(datamodel.DatamodelObject):
"""
A set of sequence features annotations
"""
compoundIdClass = datamodel.FeatureSetCompoundId
def __init__(self, parentContainer, localId):
super(AbstractFeatureSet, self).__init__(parentContainer, localId)
self._name = localId
self._sourceUri = ""
self._referenceSet = None
self._info = {}
def getReferenceSet(self):
"""
Returns the reference set associated with this FeatureSet.
"""
return self._referenceSet
def setReferenceSet(self, referenceSet):
"""
Sets the reference set associated with this FeatureSet to the
specified value.
"""
self._referenceSet = referenceSet
def toProtocolElement(self):
"""
Returns the representation of this FeatureSet as the corresponding
ProtocolElement.
"""
gaFeatureSet = protocol.FeatureSet()
gaFeatureSet.id = self.getId()
gaFeatureSet.dataset_id = self.getParentContainer().getId()
gaFeatureSet.reference_set_id = pb.string(self._referenceSet.getId())
gaFeatureSet.name = self._name
gaFeatureSet.source_uri = self._sourceUri
for key in self._info:
gaFeatureSet.info[key].values.extend(self._info[key])
return gaFeatureSet
def getCompoundIdForFeatureId(self, featureId):
"""
Returns server-style compound ID for an internal featureId.
:param long featureId: id of feature in database
:return: string representing ID for the specified GA4GH protocol
Feature object in this FeatureSet.
"""
if featureId is not None and featureId != "":
compoundId = datamodel.FeatureCompoundId(
self.getCompoundId(), str(featureId))
else:
compoundId = ""
return str(compoundId)
class SimulatedFeatureSet(AbstractFeatureSet):
"""
Simulated data backend for FeatureSet, used for internal testing.
"""
def __init__(self, parentContainer, localId, randomSeed=1):
self._randomSeed = randomSeed
super(SimulatedFeatureSet, self).__init__(parentContainer, localId)
def _getRandomfeatureType(self, randomNumberGenerator):
ontologyTuples = [
("gene", "SO:0000704"),
("exon", "SO:0000147")]
term = protocol.OntologyTerm()
ontologyTuple = randomNumberGenerator.choice(ontologyTuples)
term.term, term.id = ontologyTuple[0], ontologyTuple[1]
term.source_name = "sequenceOntology"
term.source_version = "0"
return term
def _generateSimulatedFeature(self, randomNumberGenerator):
feature = protocol.Feature()
feature.feature_set_id = self.getId()
feature.start = randomNumberGenerator.randint(1000, 2000)
feature.end = feature.start + randomNumberGenerator.randint(1, 100)
feature.feature_type.CopyFrom(self._getRandomfeatureType(
randomNumberGenerator))
references = ["chr1", "chr2", "chrX"]
feature.reference_name = randomNumberGenerator.choice(references)
strands = [protocol.POS_STRAND, protocol.NEG_STRAND]
feature.strand = randomNumberGenerator.choice(strands)
attributes = {
"gene_name": "Frances",
"gene_type": "mRNA",
"gene_status": "UNKNOWN"}
for key, value in attributes.items():
feature.attributes.vals[key].values.add().string_value = value
return feature
def getFeature(self, compoundId):
"""
Fetches a simulated feature by ID.
:param compoundId: any non-null string
:return: A simulated feature with id set to the same value as the
passed-in compoundId.
":raises: exceptions.ObjectWithIdNotFoundException if None is passed
in for the compoundId.
"""
if compoundId is None:
raise exceptions.ObjectWithIdNotFoundException(compoundId)
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
feature = self._generateSimulatedFeature(randomNumberGenerator)
feature.id = str(compoundId)
feature.parent_id = "" # TODO: Test with nonempty parentIDs?
return feature
def getFeatures(self, referenceName=None, start=None, end=None,
startIndex=None, maxResults=None,
featureTypes=None, parentId=None,
name=None, geneSymbol=None, numFeatures=10):
"""
Returns a set number of simulated features.
:param referenceName: name of reference to "search" on
:param start: start coordinate of query
:param end: end coordinate of query
:param startIndex: None or int
:param maxResults: None or int
:param featureTypes: optional list of ontology terms to limit query
:param parentId: optional parentId to limit query.
:param name: the name of the feature
:param geneSymbol: the symbol for the gene the features are on
:param numFeatures: number of features to generate in the return.
10 is a reasonable (if arbitrary) default.
:return: Yields feature list
"""
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
for featureId in range(numFeatures):
gaFeature = self._generateSimulatedFeature(randomNumberGenerator)
gaFeature.id = self.getCompoundIdForFeatureId(featureId)
match = (
gaFeature.start < end and
gaFeature.end > start and
gaFeature.reference_name == referenceName and (
featureTypes is None or len(featureTypes) == 0 or
gaFeature.feature_type in featureTypes))
if match:
gaFeature.parent_id = "" # TODO: Test nonempty parentIDs?
yield gaFeature
class Gff3DbFeatureSet(AbstractFeatureSet):
"""
Stub class to directly read sequence annotation features from GFF3 files.
Tests basic access, not to be used in production.
"""
def __init__(self, parentContainer, localId):
super(Gff3DbFeatureSet, self).__init__(parentContainer, localId)
self._ontology = None
self._dbFilePath = None
self._db = None
def setOntology(self, ontology):
"""
Sets the Ontology instance used by this FeatureSet to the
specified value.
"""
self._ontology = ontology
def getOntology(self):
"""
Returns the ontology term map used to translate ontology term names
to IDs.
"""
return self._ontology
def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this FeatureSet from the specified
data URL.
"""
self._dbFilePath = dataUrl
self._db = Gff3DbBackend(self._dbFilePath)
def populateFromRow(self, row):
"""
Populates the instance variables of this FeatureSet from the specified
DB row.
"""
self._dbFilePath = row[b'dataUrl']
self._db = Gff3DbBackend(self._dbFilePath)
def getDataUrl(self):
"""
Returns the URL providing the data source for this FeatureSet.
"""
return self._dbFilePath
def getFeature(self, compoundId):
"""
Returns a protocol.Feature object corresponding to a compoundId
:param compoundId: a datamodel.FeatureCompoundId object
:return: a Feature object.
:raises: exceptions.ObjectWithIdNotFoundException if invalid
compoundId is provided.
"""
featureId = long(compoundId.featureId)
with self._db as dataSource:
featureReturned = dataSource.getFeatureById(featureId)
if featureReturned is None:
raise exceptions.ObjectWithIdNotFoundException(compoundId)
else:
gaFeature = self._gaFeatureForFeatureDbRecord(featureReturned)
return gaFeature
def _gaFeatureForFeatureDbRecord(self, feature):
"""
:param feature: The DB Row representing a feature
:return: the corresponding GA4GH protocol.Feature object
"""
gaFeature = protocol.Feature()
gaFeature.id = self.getCompoundIdForFeatureId(feature['id'])
if feature.get('parent_id'):
gaFeature.parent_id = self.getCompoundIdForFeatureId(
feature['parent_id'])
else:
gaFeature.parent_id = ""
gaFeature.feature_set_id = self.getId()
gaFeature.reference_name = pb.string(feature.get('reference_name'))
gaFeature.start = pb.int(feature.get('start'))
gaFeature.end = pb.int(feature.get('end'))
gaFeature.name = pb.string(feature.get('name'))
if feature.get('strand', '') == '-':
gaFeature.strand = protocol.NEG_STRAND
else:
# default to positive strand
gaFeature.strand = protocol.POS_STRAND
gaFeature.child_ids.extend(map(
self.getCompoundIdForFeatureId,
json.loads(feature['child_ids'])))
gaFeature.feature_type.CopyFrom(
self._ontology.getGaTermByName(feature['type']))
attributes = json.loads(feature['attributes'])
# TODO: Identify which values are ExternalIdentifiers and OntologyTerms
for key in attributes:
for v in attributes[key]:
gaFeature.attributes.vals[key].values.add().string_value = v
if 'gene_name' in attributes and len(attributes['gene_name']) > 0:
gaFeature.gene_symbol = pb.string(attributes['gene_name'][0])
return gaFeature
def getFeatures(self, referenceName=None, start=None, end=None,
startIndex=None, maxResults=None,
featureTypes=None, parentId=None,
name=None, geneSymbol=None):
"""
method passed to runSearchRequest to fulfill the request
:param str referenceName: name of reference (ex: "chr1")
:param start: castable to int, start position on reference
:param end: castable to int, end position on reference
:param startIndex: none or castable to int
:param maxResults: none or castable to int
:param featureTypes: array of str
:param parentId: none or featureID of parent
:param name: the name of the feature
:param geneSymbol: the symbol for the gene the features are on
:return: yields a protocol.Feature at a time
"""
with self._db as dataSource:
features = dataSource.searchFeaturesInDb(
startIndex, maxResults,
referenceName=referenceName,
start=start, end=end,
parentId=parentId, featureTypes=featureTypes,
name=name, geneSymbol=geneSymbol)
for feature in features:
gaFeature = self._gaFeatureForFeatureDbRecord(feature)
yield gaFeature
| StarcoderdataPython |
3362610 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 14:07:01 2018
@author: gbaechle
"""
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join, exists
import warnings
gdal_available = True
try:
from osgeo import gdal
except ImportError as gdal_import_error:
gdal_available = False
warnings.warn(gdal_import_error.msg)
import sys
sys.path.append("../")
from spectrum import Spectrum3D
# from spectrum_recovery import *
from lippmann import *
# from hyperspectral_display import GuiManager
def read_file(path):
f = open(path, 'r')
lines = f.readlines()
f.close()
wavelengths = []
data = []
i_time = lines[6].split()[-1]
for line in lines[14:]:
s = line.split()
wavelengths.append(float(s[0]))
data.append(float(s[1]))
return np.array(wavelengths) * 1E-9, np.array(data), float(i_time)
def normalize_data(wavelengths, data, i_time, white_path, black_path):
_, white, i_white = read_file(white_path)
_, black, i_black = read_file(black_path)
# return (data - black)/(white - black)
return (data / i_time - black / i_black) / (white / i_time - black / i_black)
def read_ximea(path, white_ref='', plot=True):
if white_ref != '':
white = read_ximea(white_ref, plot=False)
white = white.intensities[450, 1300, :]
if not gdal_available:
raise ImportError("to use PURDUE image module osgeo is required (need gdal.Open)")
gtif = gdal.Open(path)
# define wavelengths
wavelengths = np.array([464, 476, 487, 498, 510, 522, 536, 546, 553, 565, 577, 589, 599, 608, 620, 630]) * 1E-9
shape = gtif.GetRasterBand(1).GetDataset().ReadAsArray()[0].shape
cube = np.zeros(shape + (len(wavelengths),))
for idx in range(gtif.RasterCount - 1):
print("[ GETTING BAND ]: ", idx)
band = gtif.GetRasterBand(idx + 2)
data = band.GetDataset().ReadAsArray()[idx + 1]
cube[:, :, idx] = data
if plot:
plt.figure()
plt.imshow(data, cmap='gray')
if white_ref != '':
cube /= white[None, None, :]
# cube /= np.max(cube)
return Spectrum3D(wavelengths, cube)
def demosaic_ximea(path):
bands = np.array([464, 476, 487, 498, 510, 522, 536, 546, 553, 565, 577, 589, 599, 608, 620, 630]) * 1E-9
indices = [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (2, 1), (1, 1), (0, 1), (3, 2), (2, 2), (1, 2), (0, 2), (3, 3),
(2, 3), (1, 3), (0, 3)]
img = misc.imread(path)
plt.figure()
plt.imshow(img, cmap='gray')
cube = np.zeros((img.shape[0] // 4, img.shape[1] // 4, 16))
for i, idx in enumerate(indices):
cube[:, :, i] = img[idx[1]::4, idx[0]::4]
plt.figure()
plt.imshow(img[idx[1]::4, idx[0]::4], cmap='gray')
return Spectrum3D(bands, cube)
def spectrum_rec(lambdas_nu, spectrum_nu, visible=True, N=200, r=0.7 * np.exp(np.deg2rad(-148))):
omegas_nu = 2 * np.pi * c / lambdas_nu
if visible:
omegas = np.linspace(2 * np.pi * c / 400E-9, 2 * np.pi * c / 700E-9, N)
else:
omegas = np.linspace(np.max(omegas_nu), np.min(omegas_nu), N)
lambdas = 2 * np.pi * c / omegas
spectrum = sp.interpolate.interp1d(omegas_nu, spectrum_nu, kind='cubic', bounds_error=False,
fill_value='extrapolate')(omegas)
spectrum_est, Z_est, k0_est = spectrum_recovery(omegas, spectrum, r=r, Z=0, k0=0, n_iter=200, plot=False)
depths = np.linspace(0, Z_est, N)
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(3.45 / 0.6 * 1.5, 3.45 / 4 / 0.6 * 1.5))
show_spectrum(lambdas, spectrum, ax=ax1, show_background=True, short_display=True)
show_spectrum(lambdas, np.abs(np.real(fda.apply_h(spectrum_est, Z_est, lambdas, lambdas, r=r, k0=k0_est, mode=2))),
ax=ax2, show_background=True, short_display=True)
show_lippmann_transform(depths, lippmann_transform(lambdas, spectrum_est, depths, r=r, k0=k0_est)[0], ax=ax3,
short_display=True)
show_spectrum(lambdas, np.abs(fda.apply_h(spectrum_est, Z_est, lambdas, lambdas, r=r, k0=k0_est, mode=1)) ** 2,
ax=ax4, short_display=True, show_background=True)
ax1.set_title('(a) Measured spectrum')
ax2.set_title('(b) Estimated spectrum')
ax3.set_title('(c) Estimated silver density')
ax4.set_title('(d) Estimated replayed spectrum')
# f.tight_layout()
# f.subplots_adjust(hspace=-1.0)
return spectrum_est, Z_est, k0_est
def dye_profile(path):
files = sorted([f for f in listdir(path) if isfile(join(path, f))])
wavelengths = []
intensity = []
# discard the last one (light reference), and the first one (noisy data)
for file in sorted(files)[2:-1]:
print(file)
w, spectrum, _ = read_file(join(path, file))
# select only visible light
w, spectrum = select_visible(w, spectrum)
show_spectrum(w, spectrum)
peak_idx = np.argmax(spectrum)
wavelengths.append(w[peak_idx])
intensity.append(spectrum[peak_idx])
print(w[peak_idx], spectrum[peak_idx])
# remove the edge values that are not so reliable...
wavelengths = np.array(wavelengths[:-7])
intensity = np.array(intensity[:-7])
show_spectrum(wavelengths, intensity)
plt.title('Without correction')
w, light, _ = read_file(join(path, files[-1]))
w, light = select_visible(w, light)
show_spectrum(w, light)
plt.title('Light source')
intensity /= np.interp(wavelengths, w, light)
show_spectrum(wavelengths, intensity)
plt.title('After correction')
spread = np.abs(np.diff(wavelengths))
print(intensity)
spread[spread == 0] = 10E-10
intensity = intensity[1:] / spread
print(intensity)
show_spectrum(wavelengths[1:], intensity)
plt.title('After correction 2')
def select_visible(wavelengths, spectrum):
idx = np.where((wavelengths <= 700E-9) & (wavelengths >= 402E-9))
wavelengths = wavelengths[idx]
spectrum = spectrum[idx]
return wavelengths, spectrum
def load_specim_data(file_prefix, ds, cut=False):
"""Load binary data from .dat file and wavelengths from header .hdr file
:param file_prefix: file name to read, without extension
:param ds: down-sampling parameter (how many times to reduce image size)
:param cut: if True, use data from file_prefix_cut.txt to cut edges of the image
:returns:
a pair of np. arrays, 2D cropped image and list of wavelengths (in meters)"""
data = np.fromfile(file_prefix + ".dat", dtype=np.float32)
data = np.swapaxes(data.reshape((512, -1, 512)), 1, 2)
if cut:
cut_file_path = file_prefix + "_cut.txt"
if exists(cut_file_path):
cut_idx = np.loadtxt(file_prefix + "_cut.txt").astype(np.int)
data = data[cut_idx[0, 0]:cut_idx[0, 1], cut_idx[1, 0]:cut_idx[1, 1]]
else:
print("The file specifying the crop does not exist for this data")
downsampled = data[::ds, ::ds, :]
header_file = open(file_prefix + ".hdr", "r")
header = header_file.read()
wavelengths = np.array([float(nr[:-1]) for nr in header.split("{")[-1].split()[:-1]])
return downsampled, 1e-9 * wavelengths
# if __name__ == '__main__':
# path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Spectrometer/2018-09-18 Wiener'
# dye_profile(path)
# path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Lippmann Elysée/spectrometer_data/Parrot 1.txt'
#
# wavelengths, data, i_time = read_file(path)
#
# print('integration time', i_time)
#
# white_path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Lippmann Elysée/spectrometer_data/Bright frame ' + str(int(i_time)) + '.txt'
# black_path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Lippmann Elysée/spectrometer_data/Dark frame ' + str(int(i_time)) + '.txt'
#
#
# show_spectrum(wavelengths, data, visible=True, show_background=True)
#
# data_norm = normalize_data(wavelengths, data, i_time, white_path, black_path)
# show_spectrum(wavelengths, data_norm, visible=True, show_background=True)
# spectrum_est, Z_est, k0_est = spectrum_rec(wavelengths, data_norm)
# show_spectrum(wavelengths, data_norm, visible=True, show_background=True)
# wavelengths, data, i_time = read_file(white_path)
# show_spectrum(wavelengths, data, visible=True)
# wavelengths, data, i_time = read_file(black_path)
# show_spectrum(wavelengths, data, visible=True)
# for i_time in [10, 15, 25, 30, 40, 60]:
# white_path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Lippmann Elysée/spectrometer_data/Bright frame ' + str(int(i_time)) + '.txt'
# black_path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Lippmann Elysée/spectrometer_data/Dark frame ' + str(int(i_time)) + '.txt'
#
# wavelengths, data_w, i_time = read_file(white_path)
# wavelengths, data_b, i_time = read_file(black_path)
# show_spectrum(wavelengths, data_w-data_b, visible=True, vmax=38837)
# plt.gca().set_title('white' + str(i_time))
# show_spectrum(wavelengths, data_b, visible=True, vmax=20582)
# plt.gca().set_title('dark' + str(i_time))
# path = '/Users/gbaechle/EPFL/PhD/BRDF Data and Rendering Engines/Lippmann Elysée/Ximea/colorchecker_2_8.tif'
# spectrum = demosaic_ximea(path)
# print(spectrum.intensities.shape)
# spectrum.compute_rgb()
# print(spectrum.intensities.shape, spectrum.rgb_colors.shape)
# path = '/Volumes/Gilles EPFL/Datasets/Ximea hyperspectral/Demixed_parrot_200_8.tif'
# path_white = '/Volumes/Gilles EPFL/Datasets/Ximea hyperspectral/Demixed_white_50_8.tif'
# spectrum = read_ximea(path, white_ref=path_white)
# spectrum.intensities = np.transpose(spectrum.intensities, (2,0,1))
# spectrum.compute_rgb(integrate_nu=True)
# spectrum.intensities = np.transpose(spectrum.intensities, (1,2,0))
#
# gui_manager = GuiManager(spectrum, normalize_spectrums=True, gamma_correct=False)
# gui_manager.show()
| StarcoderdataPython |
1622433 | <reponame>victorers1/anotacoes_curso_python<filename>excript/aulas/aula96_funcao_aninhada.py
def func():
print("func")
def func_interna():
print("func_interna")
func_interna()
func() | StarcoderdataPython |
106734 | # -*- coding: utf-8 -*-
# @File : google_im2txt/get_uni_caption.py
# @Info : @ TSMC-SIGGRAPH, 2018/7/9
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import re
from utils.emb_json import load_json_file, store_json_file
data = load_json_file("google_im2txt_cap.json")
pattern = re.compile(r'COCO_val2014_(\d+).jpg') # find integer number
# json_file = list()
# for item in data:
# img_cap_dict = {}
# image_id = pattern.findall(item['filename'])
# img_cap_dict['image_id'] = int(image_id[0])
# img_cap_dict['caption'] = item['caption']
# json_file.append(img_cap_dict)
#
# store_json_file("val_filter_version.json", json_file)
json_file_uni = list()
for i, item in enumerate(data):
if i % 3 == 0:
img_cap_dict = {}
image_id = pattern.findall(item['filename'])
img_cap_dict['image_id'] = int(image_id[0])
img_cap_dict['caption'] = item['caption']
json_file_uni.append(img_cap_dict)
store_json_file("google_im2txt_test_uni_version.json", json_file_uni)
| StarcoderdataPython |
8365 | <reponame>Lifeistrange/WeiboSpider
# coding:utf-8
import datetime
import json
import re
import redis
from config.conf import get_redis_args
redis_args = get_redis_args()
class Cookies(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('cookies'))
rd_con_broker = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('broker'))
@classmethod
def store_cookies(cls, name, cookies):
pickled_cookies = json.dumps(
{'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()})
cls.rd_con.hset('account', name, pickled_cookies)
cls.rd_con.lpush('account_queue', name)
@classmethod
def fetch_cookies(cls):
for i in range(cls.rd_con.llen('account_queue')):
name = cls.rd_con.rpop('account_queue').decode('utf-8')
if name:
j_account = cls.rd_con.hget('account', name).decode('utf-8')
if j_account:
cls.rd_con.lpush('account_queue', name) # 当账号不存在时,这个name也会清除,并取下一个name
account = json.loads(j_account)
login_time = datetime.datetime.fromtimestamp(account['loginTime'])
if datetime.datetime.now() - login_time > datetime.timedelta(hours=20):
cls.rd_con.hdel('account', name)
continue # 丢弃这个过期账号,account_queue会在下次访问的时候被清除,这里不清除是因为分布式的关系
return name, account['cookies']
else:
return None
@classmethod
def delete_cookies(cls, name):
cls.rd_con.hdel('account', name)
return True
@classmethod
def check_login_task(cls):
if cls.rd_con_broker.llen('login_queue') > 0:
cls.rd_con_broker.delete('login_queue')
class Urls(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('urls'))
@classmethod
def store_crawl_url(cls, url, result):
cls.rd_con.set(url, result)
class IdNames(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('id_name'))
@classmethod
def store_id_name(cls, user_name, user_id):
cls.rd_con.set(user_name, user_id)
@classmethod
def fetch_uid_by_name(cls, user_name):
user_id = cls.rd_con.get(user_name)
if user_id:
return user_id.decode('utf-8')
return ''
| StarcoderdataPython |
3332691 | <reponame>kollieartwolf/pc-rudn-course-2020-21
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('lectures.urls', namespace='lectures')),
]
| StarcoderdataPython |
193732 | from typing import List
def insert_sort(lst: List[int]):
for i in range(1, len(lst)):
j = i - 1
key = lst[i]
while lst[j] > key and j >= 0:
lst[j + 1] = lst[j]
j -= 1
lst[j + 1] = key
return lst
| StarcoderdataPython |
1746193 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import copy
import numpy as np
import pandas as pd
from scipy import signal
from scipy.stats import linregress
# from collections import namedtuple
from raman_fitting.processing.spectrum_template import (
SpecTemplate,
SpectrumWindowLimits,
SpectrumWindows,
)
# if __name__ == "__main__":
# pass
# else:
# from .spectrum_template import SpecTemplate, SpectrumWindowLimits, SpectrumWindows
class SpectrumMethodException(ValueError):
pass
class SpectrumMethods:
"""
Parent class to hold several Spetrum Methods as children
"""
data = SpecTemplate
def __init__(self, ramanshift, intensity, label="", **kwargs):
"""
Parameters
----------
ramanshift : array or list
collection of the ramanshift values
intensity : array or list
collection of the intensity values
label : TYPE, optional
DESCRIPTION. The default is "".
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.ramanshift = ramanshift
self.intensity = intensity
self.label = label
self.kwargs = kwargs
@staticmethod
def filtered_int(intensity=None):
try:
int_savgol_fltr = signal.savgol_filter(intensity, 13, 3, mode="nearest")
except Exception as e:
raise SpectrumMethodException(f"no intensity given to filter, {e}")
int_savgol_fltr = []
return int_savgol_fltr
class SpectrumSplitter(SpectrumMethods):
"""
For splitting of spectra into the several SpectrumWindows,
the names of the windows are taken from SpectrumWindows
and set as attributes to the instance.
"""
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
def split_data(self, spec_windows=SpectrumWindows()):
_r, _int = self.ramanshift, self.intensity
# self.register.get(on_lbl)
_d = {}
for windowname, limits in spec_windows.items():
ind = (_r >= np.min(limits)) & (_r <= np.max(limits))
# _intslice = _int[ind]
label = f"window_{windowname}"
if self.label:
label = f"{self.label}_{label}"
_data = self.data(_r[ind], _int[ind], label)
setattr(self, label, _data)
_d.update(**{windowname: _data})
self.windows_data = _d
# class Filter(SpectrumMethods):
# '''
# For filtering the intensity of a spectrum
# '''
# def get_filtered(self, intensity):
class BaselineSubtractorNormalizer(SpectrumSplitter):
"""
For baseline subtraction as well as normalization of a spectrum
"""
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
self.split_data()
self.windowlimits = SpectrumWindowLimits()
self.subtract_loop()
self.get_normalization_factor()
self.set_normalized_data()
def subtract_loop(self):
_blcorr = {}
_info = {}
for windowname, spec in self.windows_data.items():
blcorr_int, blcorr_lin = self.subtract_baseline_per_window(windowname, spec)
label = f"blcorr_{windowname}"
if self.label:
label = f"{self.label}_{label}"
_data = self.data(spec.ramanshift, blcorr_int, label)
_blcorr.update(**{windowname: _data})
_info.update(**{windowname: blcorr_lin})
self.blcorr_data = _blcorr
self.blcorr_info = _info
def subtract_baseline_per_window(self, windowname, spec):
rs = spec.ramanshift
if windowname[0:4] in ("full", "norm"):
i_fltrd_dspkd_fit = self.windows_data.get("1st_order").intensity
else:
i_fltrd_dspkd_fit = spec.intensity
_limits = self.windowlimits.get(windowname)
# assert bool(_limits),f'no limits for {windowname}'
bl_linear = linregress(
rs[[0, -1]],
[
np.mean(i_fltrd_dspkd_fit[0 : _limits[0]]),
np.mean(i_fltrd_dspkd_fit[_limits[1] : :]),
],
)
i_blcor = spec.intensity - (bl_linear[0] * rs + bl_linear[1])
# blcor = pd.DataFrame({'Raman-shift' : w, 'I_bl_corrected' :i_blcor, 'I_raw_data' : i})
return i_blcor, bl_linear
def get_normalization_factor(self, norm_method="simple"):
if norm_method == "simple":
normalization_intensity = np.nanmax(
self.blcorr_data["normalization"].intensity
)
if 0:
# TODO not implemented
if norm_method == "fit":
normalization = NormalizeFit(
self.blcorr_data["1st_order"], plotprint=False
) # TODO still implement this NormalizeFit
normalization_intensity = normalization["IG"]
self.norm_factor = 1 / normalization_intensity
norm_dict = {
"norm_factor": self.norm_factor,
"norm_method": norm_method,
"norm_int": normalization_intensity,
}
self.norm_dict = norm_dict
def set_normalized_data(self):
_normd = {}
for windowname, spec in self.blcorr_data.items():
label = f"norm_blcorr_{windowname}"
if self.label:
label = f"{self.label}_{label}"
_data = self.data(spec.ramanshift, spec.intensity * self.norm_factor, label)
_normd.update(**{windowname: _data})
self.norm_data = _normd
def NormalizeFit(spec, plotprint=False):
pass # TODO placeholder
def array_nan_checker(array):
_nans = [n for n, i in enumerate(array) if np.isnan(i)]
return _nans
class Despiker(SpectrumMethods):
"""
A Despiking algorithm from reference literature: https://doi.org/10.1016/j.chemolab.2018.06.009
Parameters
----------
input_intensity : np.ndarray
The intensity array of which the desipked intensity will be calculated.
info : dict, optional
Extra information for despiking settings are added to this dict.
Attributes
---------
despiked_intensity : np.ndarray
The resulting array of the despiked intensity of same length as input_intensity.
Notes
--------
Let Y1;...;Yn represent the values of a single Raman spectrum recorded at equally spaced wavenumbers.
From this series, form the detrended differenced seriesr Yt ...:This simple
ata processing step has the effect of annihilating linear and slow movingcurve linear trends, however,
sharp localised spikes will be preserved.Denote the median and the median absolute deviation of
<NAME>, <NAME>. Chemometrics and Intelligent Laboratory Systems 179 (2018) 82–84
"""
keys = ["input_intensity", "Zt", "Z_t_filtered", "despiked_intensity"]
def __init__(
self, intensity: np.array([]), Z_threshold=4, moving_window_size=1, info={}
):
self.info = info
self.Z_threshold = Z_threshold
self.moving_window_size = moving_window_size
self.info.update(
{"Z_threshold": Z_threshold, "Z_filter_ma": moving_window_size}
)
# these get populated by the run_despike call in the setter
self.result = {}
self.df = pd.DataFrame()
# setter calls to run_despike
self._int = intensity
# _new_int = copy.deepcopy(intensity)
self.input_intensity = intensity
@property
def input_intensity(self):
return self._input_intensity
@input_intensity.setter
def input_intensity(self, value):
"""sanitizes the input argument value for an array"""
type_test = str(type(value))
if "__main__" in type_test:
if "intensity" in value._fields:
val_intensity = value.intensity
elif "numpy.ndarray" in type_test:
val_intensity = value
elif "dict" in type_test:
val_intensity = value.get([i for i in value.keys() if "intensity" in i][0])
else:
raise ValueError(f"Despike input error {type_test} for {value}")
self.info.update({"input_type": type_test})
self._input_intensity = val_intensity
self.despiked_intensity = val_intensity
@property
def despiked_intensity(self):
return self._despiked_intensity
@despiked_intensity.setter
def despiked_intensity(self, value):
result = self.run_despike_steps(value, self.Z_threshold)
self._despiked_intensity = result["despiked_intensity"]
self.result = result
self.df = pd.DataFrame(result)
def run_despike_steps(self, intensity, Z_threshold):
Z_t = self.calc_Z(intensity)
Z_t_filtered = self.calc_Z_filtered(Z_t, Z_threshold)
i_despiked = self.despike_filter(
intensity, Z_t_filtered, self.moving_window_size
)
result_values = [intensity, Z_t, Z_t_filtered, i_despiked]
result = dict(zip(self.keys, result_values))
return result
@staticmethod
def calc_Z(intensity):
dYt = np.append(np.diff(intensity), 0)
# dYt = intensity.diff()
dYt_Median = np.median(dYt)
# M = dYt.median()
# dYt_M = dYt-M
dYt_MAD = np.median(abs(dYt - dYt_Median))
# MAD = np.mad(dYt)
Z_t = (0.6745 * (dYt - dYt_Median)) / dYt_MAD
# intensity = blcor.assign(**{'abs_Z_t': Z_t.abs()})
return Z_t
@staticmethod
def calc_Z_filtered(Z_t, Z_threshold):
Z_t_filtered = copy.deepcopy(Z_t)
Z_t_filtered[np.abs(Z_t) > Z_threshold] = np.nan
Z_t_filtered[0] = Z_t_filtered[-1] = 0
return Z_t_filtered
# self.Z_t_filtered = Z_t_filtered
# Z_threshold = 3.5
# Z_t_filtered = [Z_t
# Z_t_filtered[Z_filter_indx] = np.nan
# y_out,n = intensity,len(intensity)
@staticmethod
def despike_filter(
intensity, Z_t_filtered, moving_window_size, ignore_lims=(20, 46)
):
n = len(intensity)
i_despiked = copy.deepcopy(intensity)
spikes = np.where(np.isnan(Z_t_filtered))
for i in list(spikes[0]):
if i < ignore_lims[0] or i > ignore_lims[1]:
w = np.arange(
max(0, i - moving_window_size), min(n, i + moving_window_size)
)
w = w[~np.isnan(Z_t_filtered[w])]
if intensity[w].any():
i_despiked[i] = np.mean(intensity[w])
else:
i_despiked[i] = intensity[i]
# else:
# pass # ignored
return i_despiked
def plot_Z(self):
# fig,ax = plt.subplots(2)
self.df.plot(y=["Zt", "Z_t_filtered"], alpha=0.5)
self.df.plot(y=["input_intensity", "despiked_intensity"], alpha=0.8)
# plt.show()
# plt.close()
| StarcoderdataPython |
1674396 | <reponame>nikhiljha/kubernetes-typed
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1AWSElasticBlockStoreVolumeSourceDict generated type."""
from typing import TypedDict
V1AWSElasticBlockStoreVolumeSourceDict = TypedDict(
"V1AWSElasticBlockStoreVolumeSourceDict",
{
"fsType": str,
"partition": int,
"readOnly": bool,
"volumeID": str,
},
total=False,
)
| StarcoderdataPython |
3331292 | print("Hello")
username = "Joe"
print(username)
| StarcoderdataPython |
3274917 | class BaseMeta(type):
def __new__(cls,name,bases,body):
if not 'bar' in body:
raise TypeError("Bad user class")
# print('BaseMeta.__new__',cls,name,bases,body)
return super().__new__(cls,name,bases,body)
class Base(metaclass=BaseMeta):
def foo(self):
return self.bar()
def __init_subclass__(cls,*a,**kw):
print('init_subclass',a,kw)
return super().__init_subclass__(*a,**kw)
# old_bc = __build_class__
# def my_bc(*a,**kw):
# print("my buildclass-->",a,kw)
# return old_bc(*a,**kw)
# import builtins
# builtins.__build_class__ = my_bc
| StarcoderdataPython |
1727386 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
def multiple_histograms_plot(data, x, hue, density=False, bins=10,
alpha=0.5, colors=None, hue_order=None,
probability_hist=False, xticks=None,
title=None, xlabel=None, ylabel=None,
figsize=(15, 8), xticklabels=None):
hue_order = hue_order if hue_order is not None else sorted(data[hue].unique())
colors = colors if colors is not None else sns.color_palette(n_colors=len(hue_order))
colors_dict = dict(zip(hue_order, colors))
plt.figure(figsize=figsize)
for current_hue in hue_order:
current_hue_mask = data[hue] == current_hue
data.loc[current_hue_mask, x].hist(bins=bins, density=density,
alpha=alpha, label=str(current_hue),
color=colors_dict[current_hue])
xlabel = x if xlabel is None else xlabel
ylabel = (ylabel if ylabel is not None
else 'Density' if density
else 'Frequency')
_title_postfix = ' (normalized)' if density else ''
title = f'{xlabel} by {hue}{_title_postfix}'
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
ax = plt.gca()
if probability_hist:
plt.xlim(-0.0001, 1.0001)
ax.set_xticks(np.arange(0, 1.1, 0.1))
ax.set_xticks(np.arange(0.05, 1, 0.1), minor=True)
elif xticks is not None:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
def bar_plot_with_categorical(df, x, hue, order=None, figsize=(16, 8),
plot_average=True, xticklabels=None,
**sns_kwargs):
if order is None:
order = (pd.pivot_table(data=df, values=hue, index=[x], aggfunc='mean')
.sort_values(by=hue, ascending=False)
.index.values)
plt.subplots(figsize=figsize)
sns.barplot(x=x, y=hue, data=df, order=order, **sns_kwargs)
if plot_average:
hue_average = df[hue].mean()
plt.axhline(y=hue_average, linewidth=2, linestyle='--',
color='gray', label='{} average'.format(hue))
if xticklabels is not None:
ax = plt.gca()
ax.set_xticklabels(xticklabels)
plt.legend()
plt.show()
def plot_confusion_matrix(y_true, y_pred,
index_labels=('False (truth)', 'True (truth)'),
columns_labels=('False (pred)', 'True (pred)')):
conf_matrix = confusion_matrix(y_true, y_pred)
conf_matrix_df = pd.DataFrame(conf_matrix, index=index_labels,
columns=columns_labels)
_, ax = plt.subplots(figsize=(8, 8))
ax.set_title('Confusion Matrix')
sns.heatmap(conf_matrix_df, annot=True, fmt="d", linewidths=10,
cmap='Blues', ax=ax)
def plot_confusion_matrix_2(y_true, y_pred,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = ['0', '1']
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return fig
def plot_roc(y_true, y_score, figsize=(8, 8)):
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
plt.figure(figsize=figsize)
plt.plot(fpr, tpr, color='darkorange',
lw=2, label=f'ROC curve (AUC = {100*roc_auc:.2f}%)')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.show()
return roc_auc | StarcoderdataPython |
83002 | <filename>TF/logistic_regression.py
# coding=utf-8
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def get_weight(shape, lamda):
pass
# 添加层,带有激活函数,但是没有考虑
def add_layer(inputs, in_size, out_size, activation_function=None):
"""添加层"""
# add one more layer and return the output of this layer
weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
wx_plus_b = tf.matmul(inputs, weights) + biases
if activation_function is None:
outputs = wx_plus_b
else:
outputs = activation_function(wx_plus_b)
return outputs
'''1.训练的数据'''
# Make up some real data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
'''2.定义节点准备接收数据'''
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1]) # 样本数未知,特征数为1,占位符最后要以字典形式在运行中填入
ys = tf.placeholder(tf.float32, [None, 1])
'''3.定义神经层:隐藏层和预测层'''
# add hidden layer 输入值是 xs,在隐藏层有 10 个神经元
# 输入层只有一个属性,inputs=1
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) # relu是激励函数的一种
l2 = add_layer(l1, 10, 20, activation_function=tf.nn.relu)
l3 = add_layer(l2, 20, 30, activation_function=tf.nn.relu)
# 输出层也只有一个属性,outputsize=1
# add output layer 输入值是隐藏层 l1,在预测层输出 1 个结果
p1 = add_layer(l3, 30, 10, activation_function=tf.nn.relu6)
prediction = add_layer(p1, 10, 1, activation_function=None)
'''4.定义 loss 表达式'''
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1])) # 需要向相加索引号,reduce执行跨纬度操作
'''5.选择 optimizer 使 loss 达到最小'''
# 这一行定义了用什么方式去减少 loss,学习率是 0.1
optimizer = tf.train.RMSPropOptimizer(0.0001)
train = optimizer.minimize(loss) # 选择梯度下降法
# important step 对所有变量进行初始化
init = tf.initialize_all_variables()
sess = tf.Session()
# 上面定义的都没有运算,直到 sess.run 才会开始运算
sess.run(init)
# plot the real data
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x_data, y_data, "-")
plt.ion()
plt.show()
for i in range(2000):
# training
sess.run(train, feed_dict={xs: x_data, ys: y_data})
if i % 10 == 0:
# to visualize the result and improvement
try:
ax.lines.remove(lines[0])
except Exception:
pass
# predict of the training data
prediction_value = sess.run(prediction, feed_dict={xs: x_data})
# plot the prediction
lines = ax.plot(x_data, prediction_value, 'r-', lw=2)
plt.pause(0.1)
| StarcoderdataPython |
1775882 | <filename>setup.py
import re
from setuptools import setup, find_packages
def get_long_description():
with open('README.md') as f:
return f.read()
def get_version():
with open('monobank_client/__init__.py') as f:
version_match = re.search(r"^__version__\s+=\s+['\"]([^'\"]*)['\"]", f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
setup(name='monobank_client',
version=get_version(),
description='Monobank API client',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ortymid/python-monobank-client',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=[
'requests',
],
zip_safe=False)
| StarcoderdataPython |
3258238 | from flask import Flask, jsonify, redirect, request
from flask_caching import Cache
import data_fetch
app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
cache.init_app(app)
cache_time = 120 # 2 Minutes
@app.route('/')
@cache.cached(timeout=cache_time)
def index():
fetcher = data_fetch.VelocityVersionFetcher()
return jsonify(
versions=fetcher.versions,
latest_version=fetcher.latest_version,
latest_release=fetcher.latest_release,
stable_versions=fetcher.stable_versions,
error=fetcher.errored
)
@app.route('/version/<version>/')
@cache.cached(timeout=cache_time)
def specific_version_meta(version: str):
version_object = data_fetch.get_version(version)
return jsonify({
"version": version_object.version,
"exists": version_object.real,
"snapshot": version_object.snapshot,
"download": {
"url": version_object.url,
"name": version_object.relative_url,
"checksum": {
"sha512": version_object.velocity_file_sha512,
"sha256": version_object.velocity_file_sha256,
"sha1": version_object.velocity_file_sha1,
"md5": version_object.velocity_file_md5
}
}
})
@app.route('/version/<version>/download/')
@cache.cached(timeout=cache_time)
def specific_version_download(version: str):
version_object = data_fetch.get_version(version)
return redirect(version_object.url)
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
20865 | from ..util import slice_
def csrot(N, CX, INCX, CY, INCY, C, S):
"""Applies a Givens rotation to a pair of vectors x and y
Parameters
----------
N : int
Number of elements in input vector
CX : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `CX`
CY : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCY`))
INCY : int
Storage spacing between elements of `CY`
C : numpy.single
The Givens parameter c, with value cos(theta)
S : numpy.single
The Givens parameter s, with value sin(theta)
Returns
-------
None
See Also
--------
srot : Single-precision real Givens rotation
crot : Single-precision complex Givens rotation
zdrot : Double-precision complex Givens rotation
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/csrot.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/csrot.f
Examples
--------
>>> x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64)
>>> y = np.array([6+7j, 7+8j, 8+9j], dtype=np.complex64)
>>> N = len(x)
>>> incx = 1
>>> incy = 1
>>> theta = np.pi/2
>>> csrot(N, x, incx, y, incy, np.cos(theta), np.sin(theta))
>>> print(x)
[6.+7.j 7.+8.j 8.+9.j]
>>> print(y)
[-1.-2.j -2.-3.j -3.-4.j]
"""
if N <= 0:
return
x_slice = slice_(N, INCX)
y_slice = slice_(N, INCY)
X_TEMP = C * CX[x_slice] + S * CY[y_slice]
CY[y_slice] = -S * CX[x_slice] + C * CY[y_slice]
CX[x_slice] = X_TEMP
| StarcoderdataPython |
3271108 | <gh_stars>1-10
import os
import numpy as np
from astropy.table import Table
from astropy.modeling.models import custom_model
from scipy.ndimage.filters import gaussian_filter1d
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
__all__ = ['IronTemplate']
pathList = os.path.abspath(__file__).split("/")
package_path = '/'.join(pathList[:-1])
irontemp = Table.read('{0}/data/irontemplate.ipac'.format(package_path),
format='ipac')
wave_temp = irontemp['Spectral_axis'].data
flux_temp = irontemp['Intensity'].data
flux_temp /= np.max(flux_temp)
class IronTemplate(Fittable1DModel):
'''
This is a Fe template of AGN from I Zw 1 (Boroson & Green 1992).
Parameters
----------
x : array like
Wavelength, units: Angstrom.
amplitude : float
Amplitude of the template, units: arbitrary.
stddev : float
Velocity dispersion of the AGN, units: km/s. Lower limit about 390 km/s.
z : float
Redshift of the AGN.
Returns
-------
flux_intp : array like
The interpolated flux of iron emission.
'''
amplitude = Parameter(default=1, bounds=(0, None))
stddev = Parameter(default=910/2.3548, bounds=(910/2.3548, None))
z = Parameter(default=0, bounds=(0, 10))
@staticmethod
def evaluate(x, amplitude, stddev, z):
"""
Gaussian model function.
"""
stddev_intr = 900 / 2.3548 # Velocity dispersion of I Zw 1.
if stddev < stddev_intr:
stddev = 910 / 2.3548
# Get Gaussian kernel width (channel width 103.6 km/s)
sig = np.sqrt(stddev**2 - stddev_intr**2) / 103.6
flux_conv = gaussian_filter1d(flux_temp, sig)
f = amplitude * np.interp(x, wave_temp * (1 + z), flux_conv)
return f
| StarcoderdataPython |
4833273 | <reponame>MSD-99/telegram_statistics
from abc import abstractmethod
import json
def read_json(file_path: str) -> dict:
"""Reads a json file and returns the dict
"""
with open(file_path) as f:
return json.load(f)
def read_file(file_path: str) -> str:
"""Reads a file and returns the conent
"""
with open(file_path) as f:
return f.read()
| StarcoderdataPython |
1795580 | import numpy
import struct
import warnings
from .compat import structured_cast
# from .logger import logger
from ..lib.arraybase import set_or_add_to_structured, to_structured
from ..lib.iterable import split
try:
import xxhash
_HAS_XXHASH = True
except ImportError:
_HAS_XXHASH = False
import hashlib
# Pure numpy implementation of hashmaps
# This file implements low level classes, but as a user you should try to use:
# unique, unique_count, search, get_dense_labels_map, factorize or Hashmap
UINT64 = numpy.uint64
INV_PHI = UINT64(11400714819323198485) # 1<<64/phi
STEP_MULT = UINT64(11223344556677889999) # arbitrary, could be optimized
_DEFAULT = object()
class _BaseHashmap(object):
"""
template for HashMap keys => values
* hashmaps gives efficient O(n) search in un-sorted set of keys or map keys => values
* hashmaps without values are called hashtables
* this implementation relies on Fibonacci hashing
* we support any dtype for the keys and values, by implementing these sub-classes:
* - vanilla hash for uint64 keys
* - python's tuple-hash for struct of uint64 fields
* - xxhash or sha1 for bytes objects (or encoded str objects), or struct of them
* - python's object hash for any object
* for the first two, the keys are casted into uint64 or uint64-tuple
* when possible for struct of a few tiny field, we view them as a single uint64
"""
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) key-dtype array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys``
:param dtype? view_dtype: dtype to view ``keys``
:param int? empty: empty value (default: 0)
"""
original_dtype = keys.dtype
cast_dtype = numpy.dtype(cast_dtype or keys.dtype)
view_dtype = numpy.dtype(view_dtype or keys.dtype)
_keys = cls._cast(keys, cast_dtype, view_dtype)
# keys = structured_cast(keys, dtype)
empty = cls._choose_empty_value(_keys, view_dtype, empty)
n, = _keys.shape
log_size = (cls.init_space_ratio(n) * n - 1).bit_length()
size = 1 << log_size
table = numpy.full(size, empty, dtype=view_dtype)
values_table = numpy.zeros(
size, dtype=values.dtype) if values is not None else None
hashmap = cls(table, values_table, empty,
log_size, original_dtype, cast_dtype)
hashmap._set_initial(_keys, values)
return hashmap
def __init__(self, table, values, empty, log_size, original_dtype, cast_dtype, can_resize=True):
""" low-level constructor, use .new instead """
self._table = table
self.values = values
self._empty = empty
self.log_size = log_size
self.can_resize = can_resize
self.shift = UINT64(64 - self.log_size)
self.n_used = (self._table != self._empty).sum()
self.original_dtype = original_dtype
self.cast_dtype = cast_dtype
@property
def size(self):
return self._table.size
@property
def nbytes(self):
summed = self._table.nbytes
if self.values is not None:
summed += self.values.nbytes
return summed
def set_many(self, keys, values=None):
"""
:param array keys: (n,) key-dtype array
:param array? values: (n,) val-dtype array
"""
_keys = self._cast(keys, self.cast_dtype, self._table.dtype)
if values is not None and self.values.dtype.names:
# align fields
values = values[[k for k in self.values.dtype.names]]
if _keys.size > 0 and (_keys == self._empty).any():
self._change_empty(_keys)
n, = _keys.shape
if self.min_space_ratio(n) * (self.n_used + n) > self.size:
self._resize(self.n_used + n)
# step=0
step = UINT64(0)
indexes = self._shifted_hash(_keys, step)
done = False
max_steps = self.max_steps(n)
for _ in range(max_steps):
available = self._table[indexes] == self._empty
available_indexes = indexes[available]
self._table[available_indexes] = _keys[available]
collisions = self._table[indexes] != _keys
if values is not None:
self.values[indexes[~collisions]] = values[~collisions]
if not collisions.any():
done = True
break
# next step: work only in `collisions`
step += UINT64(1)
_keys = _keys[collisions]
if values is not None:
values = values[collisions]
indexes = self._shifted_hash(_keys, step)
if not done:
raise RuntimeError(f'could not set_many within {max_steps} steps')
self.n_used = (self._table != self._empty).sum()
def lookup(self, keys):
"""
Search keys in hashtable (do not confuse with ``get_many`` of hashmap)
:param array keys: (n,) key-dtype array
:returns: tuple(
indexes: (n,) uint64 array,
found: (n,) bool array,
)
"""
_keys = self._cast(keys, self.cast_dtype, self._table.dtype)
if _keys.size > 0 and (_keys == self._empty).any():
self._change_empty(_keys)
n, = _keys.shape
# working idx in all (lazy build at first collisions)
idx_in_all = None
# step=0
step = UINT64(0)
all_indexes = self._shifted_hash(_keys, step)
indexes = all_indexes
table_values = self._table[indexes]
all_found = table_values != self._empty
found = all_found
done = False
max_steps = self.max_steps(n)
for _ in range(max_steps):
collisions = found & (table_values != _keys)
if not collisions.any():
done = True
break
# next step: work only in `collisions`
step += UINT64(1)
_keys = _keys[collisions]
if idx_in_all is None:
idx_in_all = numpy.where(collisions)[0]
else:
idx_in_all = idx_in_all[collisions]
indexes = self._shifted_hash(_keys, step)
all_indexes[idx_in_all] = indexes
table_values = self._table[indexes]
found = table_values != self._empty
all_found[idx_in_all] = found
if not done:
raise RuntimeError(f'could not lookup within {max_steps} steps')
return all_indexes, all_found
def contains(self, keys):
"""
:param array keys: (n,) key-dtype array
:returns: (n,) bool array
"""
_, found = self.lookup(keys)
return found
def get_many(self, keys):
"""
:param array keys: (n,) key-dtype array
:returns: tuple(
values: (n,) val-dtype array,
found: (n,) bool array,
)
"""
if self.values is None:
raise ValueError(
'`get_many` is only available when values is not None, use `lookup`')
indexes, found = self.lookup(keys)
values = self.values[indexes]
return values, found
def unique_keys(self, return_table_mask=False, return_values=False):
""" :returns: (
(n,) key-dtype array,
[if return_table_mask] (m,) bool array with n "1s"
[if return_values] (n,) val-dtype array,
)
"""
has_key = self._table != self._empty
_keys = self._table[has_key]
keys = self._cast_back(_keys)
if not return_table_mask and not return_values:
return keys
out = (keys,)
if return_table_mask:
out = out + (has_key,)
if return_values:
out = out + (self.values[has_key],)
return out
def keys_hash(self):
""" returns order-invarient hash of keys (not __hash__ because we don't look at values) """
# combine raw hash (pre-shift) by global sum
has_key = self._table != self._empty
_keys = self._table[has_key]
# compute raw uint64 hash
_keys_hsh = self._hash(_keys, UINT64(0))
# aggregate
hsh_uint64 = numpy.bitwise_xor.reduce(_keys_hsh)
return int(hsh_uint64.view(numpy.int64)) # return as int
@classmethod
def init_space_ratio(cls, n):
""" multiplier to set table size """
return 4 if n < (1<<26) else 2
@classmethod
def min_space_ratio(cls, n):
""" when to trigger resize """
return 3 if n < (1<<26) else 1.5
@classmethod
def max_steps(cls, n):
""" prevent infinite loop by a cap on the nb of steps (heuristic but very large) """
return max(64, n // (32 * cls.init_space_ratio(n)))
def _resize(self, n):
log_size = int(self.init_space_ratio(n) * n - 1).bit_length()
has_value = self._table != self._empty
_keys = self._table[has_value]
if self.values is not None:
values = self.values[has_value]
else:
values = None
if values is not None:
self.values = numpy.zeros(self.size, dtype=values.dtype)
# re-allocate tables
self.log_size = log_size
self.shift = UINT64(64 - self.log_size)
new_size = 1 << log_size
self._table = numpy.full(
new_size, self._empty, dtype=self._table.dtype)
if values is not None:
self.values = numpy.zeros(new_size, dtype=values.dtype)
self._set_initial(_keys, values)
def _set_initial(self, _keys, values):
n, = _keys.shape
# step=0
step = UINT64(0)
indexes = self._shifted_hash(_keys, step)
self._table[indexes] = _keys
if values is not None:
self.values[indexes] = values
done = False
max_steps = self.max_steps(n)
for _ in range(max_steps):
collisions = self._table[indexes] != _keys
if not collisions.any():
done = True
break
# next step: work only in `collisions`
step += UINT64(1)
_keys = _keys[collisions]
if values is not None:
values = values[collisions]
# TOOPTIMIZE re-use computed hashes
indexes = self._shifted_hash(_keys, step)
available = self._table[indexes] == self._empty
available_indexes = indexes[available]
self._table[available_indexes] = _keys[available]
if values is not None:
self.values[available_indexes] = values[available]
if not done:
raise RuntimeError(f'could not _set_initial within {max_steps} steps')
self.n_used = (self._table != self._empty).sum()
def _change_empty(self, new_keys):
# edge case: the empty value we set clashes with a new key
_uniq_keys = self._table[self._table != self._empty]
all_keys = numpy.r_[_uniq_keys, new_keys]
new_empty = self._choose_empty_value(all_keys, self._table.dtype)
self._table[self._table == self._empty] = new_empty
self._empty = new_empty
@classmethod
def _hash(cls, _keys, step):
raise NotImplementedError()
def _shifted_hash(self, _keys, step):
_hash = self._hash(_keys, step)
_hash >>= self.shift
return _hash
@classmethod
def _fibonacci_hash_uint64(cls, _keys, step, copy=True):
if copy:
_keys = _keys.copy()
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'overflow encountered in ulong(long)?_scalars')
_keys += STEP_MULT * step
_keys *= INV_PHI
return _keys
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
raise NotImplementedError()
@classmethod
def _cast(cls, keys, cast_dtype, view_dtype):
if keys.dtype != cast_dtype:
keys = structured_cast(keys, cast_dtype)
if keys.dtype != view_dtype:
if not keys.dtype.hasobject and not view_dtype.hasobject:
keys = keys.view(view_dtype)
else:
# HACK! numpy doesn't allow views with object, so we use a workaround
# warning: SegFault if `keys.dtype` has offsets, but we clean in structured_cast
keys = numpy.ndarray(keys.shape, view_dtype, keys.data)
return keys
def _cast_back(self, keys):
if keys.dtype != self.cast_dtype:
if not keys.dtype.hasobject and not self.cast_dtype.hasobject:
keys = keys.view(self.cast_dtype)
else:
# HACK! numpy doesn't allow views with object, so we use a workaround
# warning: SegFault if `keys.dtype` has offsets, but we clean in structured_cast
keys = numpy.ndarray(keys.shape, self.cast_dtype, keys.data)
if keys.dtype != self.original_dtype:
keys = structured_cast(keys, self.original_dtype)
return keys
class UInt64Hashmap(_BaseHashmap):
"""
a mapping from uint64 to arbitrary values in a numpy array
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) uint64 array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys`` (default: uint64)
:param dtype? view_dtype: dtype to view ``keys`` (default: uint64)
:param object? empty: empty value (default: row of 0)
"""
cast_dtype = cast_dtype or UINT64
view_dtype = view_dtype or UINT64
return super().new(keys, values, cast_dtype, view_dtype, empty)
@classmethod
def _hash(cls, _keys, step):
# (_keys << 21) ^ (_keys >> 33)
_keys_cpy = _keys << 21
_keys_cpy ^= _keys >> 33
return cls._fibonacci_hash_uint64(_keys_cpy, step, copy=False)
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
# empty defined by user
if empty is not _DEFAULT:
return empty
# use zero if keys are strictly positive
zero = UINT64(0)
if zero not in _keys:
return zero
# otherwise pick a random number
while True:
empty = numpy.random.randint(
low=1<<62, high=1<<63, dtype='uint64')
if empty not in _keys:
return empty
class UInt64StructHashmap(_BaseHashmap):
"""
a mapping from uint64-struct to arbitrary values in a numpy array
can be used on any structured dtypes without ``'O'`` by using views
consider using the higher-level ``Hashmap`` instead
"""
# almost INV_PHI but different one (used in xxhash.c)
_PRIME_1 = UINT64(11400714785074694791)
_PRIME_2 = UINT64(14029467366897019727)
_PRIME_5 = UINT64(2870177450012600261)
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) uint64-struct array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys`` (default: uint64 for each field)
:param dtype? view_dtype: dtype to view ``keys`` (default: uint64 for each field)
:param object? empty: empty value (default: row of 0)
"""
cast_dtype = cast_dtype or [(name, 'uint64')
for name in keys.dtype.names]
view_dtype = view_dtype or [(name, 'uint64')
for name in keys.dtype.names]
return super().new(keys, values, cast_dtype, view_dtype, empty)
@classmethod
def _hash(cls, _keys, step):
""" use Python's algorithm for tuple to get consistent values """
n, = _keys.shape
n_cols = len(_keys.dtype)
acc = numpy.full(n, cls._PRIME_5)
buf = numpy.empty_like(acc)
for col in sorted(_keys.dtype.names):
# acc += _keys[col] * cls._PRIME_2
buf[:] = _keys[col]
buf *= cls._PRIME_2
acc += buf
# acc = (acc << 31) | (acc >> 33)
buf[:] = acc
buf >>= 33
acc <<= 31
acc |= buf
#
acc *= cls._PRIME_1
acc += UINT64(n_cols) ^ (cls._PRIME_5 ^ UINT64(3527539))
return cls._fibonacci_hash_uint64(acc, step, copy=False)
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
# empty defined by user
if empty is not _DEFAULT:
return empty
# use zeros if keys are strictly positive
wrapper = numpy.zeros(1, dtype=dtype)
empty = wrapper[0]
if empty not in _keys:
return empty
# otherwise pick random numbers
d = len(dtype) or None
while True:
rdm = numpy.random.randint(low=1<<62, high=1<<63, size=d, dtype='uint64')
if d:
rdm = tuple(rdm)
wrapper[0] = rdm
empty = wrapper[0]
if empty not in _keys:
return empty
class ObjectHashmap(_BaseHashmap):
"""
a mapping from arbitrary keys to arbitrary values in a numpy array
internally uses python ``hash``, so hashes are not consistent (not even for string or bytes)
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def new(cls, keys, values=None, cast_dtype=None, view_dtype=None, empty=_DEFAULT):
"""
:param array keys: (n,) object array
:param array? values: (n,) val-dtype array
:param dtype? cast_dtype: dtype to cast ``keys`` (default: keys.type)
:param dtype? view_dtype: dtype to view ``keys`` (default: keys.type)
:param object? empty: empty value (default: row of 0)
"""
cast_dtype = cast_dtype or keys.dtype
view_dtype = view_dtype or cast_dtype
return super().new(keys, values, cast_dtype, view_dtype, empty)
@classmethod
def _hash(cls, _keys, step):
n = _keys.shape[0]
hashes = numpy.fromiter((cls._hash_single_obj(obj) for obj in _keys),
count=n, dtype=UINT64)
return cls._fibonacci_hash_uint64(hashes, step, copy=False)
@classmethod
def _hash_single_obj(cls, obj):
try:
return hash(obj)
except TypeError:
# cast single numpy array to bytes
if isinstance(obj, numpy.ndarray):
return hash(obj.tobytes())
# cast all numpy arrays in tuple/void to bytes
if isinstance(obj, (tuple, numpy.void)):
obj_ = tuple((a.tobytes() if isinstance(a, numpy.ndarray) else a)
for a in tuple(obj))
return hash(obj_)
raise
@classmethod
def _choose_empty_value(cls, _keys, dtype, empty=_DEFAULT):
return UInt64StructHashmap._choose_empty_value(_keys, dtype, empty)
class BytesObjectHashmap(ObjectHashmap):
"""
hashmap from bytes strings keys encoded as object
internally uses xxhash or hashlib to get consistent hashes
consider using the higher-level ``Hashmap`` instead
"""
if _HAS_XXHASH:
@classmethod
def _hash_single_obj(cls, obj):
return xxhash.xxh3_64_intdigest(obj)
else:
@classmethod
def _hash_single_obj(cls, obj):
sha1 = hashlib.sha1()
sha1.update(obj)
return struct.unpack('<Q', sha1.digest()[:8])[0]
class StrObjectHashmap(BytesObjectHashmap):
"""
hashmap from unicode strings keys encoded as object
internally uses xxhash or hashlib to get consistent hashes
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def _hash_single_obj(cls, obj):
return super()._hash_single_obj(obj.encode(errors='ignore'))
class BytesObjectTupleHashmap(BytesObjectHashmap):
"""
hashmap from tuple of either non-object, or bytes/unicode strings keys encoded as object
internally uses xxhash or hashlib to get consistent hashes
consider using the higher-level ``Hashmap`` instead
"""
@classmethod
def _hash_single_obj(cls, obj_tuple):
h = xxhash.xxh3_64() if _HAS_XXHASH else hashlib.sha1()
for obj in obj_tuple:
if isinstance(obj, str):
obj = obj.encode(errors='ignore')
h.update(obj)
return struct.unpack('<Q', h.digest()[:8])[0]
def Hashmap(keys, values=None):
"""
fake class to select between uint64/struct/object from dtype of arguments
:param array keys: (n,) key-dtype array
:param array? values: (n,) val-dtype array
"""
# switch type from keys
cls, cast_dtype, view_dtype = _get_optimal_cast(keys)
# build hashmap
return cls.new(keys, values, cast_dtype, view_dtype)
def _get_optimal_cast(keys, allow_object_hashmap=False):
"""
select best hashmap type to fit ``dtype``
:param array keys:
:param bool? allow_object_hashmap:
:returns: cls, cast_dtype, view_dtype
"""
dtype = keys.dtype
kind = dtype.kind
names = dtype.names
# scalar input (or strings of less than 8 bytes) we can view as uint64
if kind in 'buifcSUV' and dtype.itemsize <= 8 and not names:
if kind == 'b':
kind = 'u'
# how many units of `kind` we need for get 8 bytes, e.g. 2 for 'U'
inner_dtype_len = 8 // numpy.dtype(f'{kind}1').itemsize
cast_dtype = f'{kind}{inner_dtype_len}'
view_dtype = UINT64
return UInt64Hashmap, numpy.dtype(cast_dtype), numpy.dtype(view_dtype)
# cast string of more than 8 bytes to tuple of uint64
elif kind in 'SUV' and not names:
# number of uint64 (8 bytes) we need to view original dtype, e.g. 5 for 'U9'
n_uint64 = int(numpy.ceil(float(dtype.itemsize) / 8))
# how many 'S1' or 'U1' we need for get 8 bytes, e.g. 2 for 'U'
inner_dtype_len = 8 / numpy.dtype(f'{kind}1').itemsize
# first cast to bigger string to fit exactly a multiple of 8 bytes, e.g. 'U10'
cast_dtype = f'{kind}{int(n_uint64 * inner_dtype_len)}'
# then view as a tuple of uint64, e.g. 'u8,u8,u8,u8,u8'
view_dtype = [(f'f{i}', 'u8') for i in range(n_uint64)]
return UInt64StructHashmap, numpy.dtype(cast_dtype), numpy.dtype(view_dtype)
# struct input
if names and all(dtype[n].kind in 'buifcSUV' for n in names):
dtypes = [(n, dtype[n]) for n in names]
# check if we need padding to fit in a multiple of 8 bytes
nbytes = sum(dt.itemsize for n, dt in dtypes)
npad = 8 * int(numpy.ceil(nbytes / 8)) - nbytes
if npad == 0:
cast_dtype = dtypes # simply remove offsets
else:
# add 'S{npad}' padding field
cast_dtype = dtypes + [('__pad__', f'S{npad}')]
# if all fields fit inside 8 bytes, use uint64 hashmap
if nbytes <= 8:
view_dtype = UINT64
return UInt64Hashmap, numpy.dtype(cast_dtype), numpy.dtype(view_dtype)
# otherwise view as a struct of multiple uint64
n_uint64 = (nbytes + npad) // 8
view_dtype = [(f'f{i}', 'u8') for i in range(n_uint64)]
return UInt64StructHashmap, numpy.dtype(cast_dtype), numpy.dtype(view_dtype)
# bytes/str objects
if keys.size and kind == 'O':
if all(isinstance(k, bytes) for k in keys):
return BytesObjectHashmap, numpy.dtype('O'), numpy.dtype('O')
if all(isinstance(k, str) for k in keys):
return StrObjectHashmap, numpy.dtype('O'), numpy.dtype('O')
# struct with bytes/str objects
if keys.size and names and all(dtype[n].kind in 'buifcSUVO' for n in names):
dtypes = [(n, dtype[n]) for n in names]
obj_dtypes, nonobj_dtypes = split(dtypes, lambda ndt: ndt[1] == 'O')
if all(isinstance(k, (str, bytes)) for n, _ in obj_dtypes for k in keys[n]):
# view all non-object as a single byte string
cast_dtype = nonobj_dtypes + obj_dtypes # move all non-obj first
nonobj_size = sum(dt.itemsize for _, dt in nonobj_dtypes)
view_dtype = [('__nonobj__', f'V{nonobj_size}')] + obj_dtypes
return BytesObjectTupleHashmap, numpy.dtype(cast_dtype), numpy.dtype(view_dtype)
# use arbitrary object but it is dangerous, so we raise if not explicitely allowed
if allow_object_hashmap:
return ObjectHashmap, dtype, dtype
raise NotImplementedError(dtype)
def unique(values, return_inverse=False, return_index=False):
"""
:param array values: (n,) dtype array
:param bool? return_inverse:
:param bool? return_index:
:returns: (
uniques: (n2,) dtype array with n2<=n,
[if return_inverse=1] idx_in_uniques: (n,) uint32 array of indexes <= n2,
[if return_index=1] unique_idx: (n2,) uint32 array of indexes <= n,
)
"""
_vals = numpy.arange(
values.size, dtype='uint32') if return_index or return_inverse else None
hashmap = Hashmap(keys=values, values=_vals)
unique_values, table_mask = hashmap.unique_keys(return_table_mask=True)
if not return_inverse and not return_index:
return unique_values
out = (unique_values,)
if return_index:
unique_idx = hashmap.values[table_mask]
if return_inverse:
# for return_inverse, we change hashmap values to be indexes in `unique_values`
uniq_idx_in_uniq = numpy.arange(unique_values.size, dtype='uint32')
hashmap.values[table_mask] = uniq_idx_in_uniq
idx_in_uniques, found = hashmap.get_many(values)
assert found.all()
out = out + (idx_in_uniques,)
if return_index:
out = out + (unique_idx,)
return out
def get_dense_labels_map(values, idx_dtype='uint32'):
"""
convert unique values into dense int labels [0..n_uniques]
:param array values: (n,) dtype array
:param dtype? idx_dtype: (default: 'uint32')
:returns: tuple(
labels2values: (n_uniques,) dtype array,
values2labels: HashMap(dtype->int),
)
"""
# get unique values
unique_values = unique(values)
# build labels from 0 to n_uniques
labels = numpy.arange(unique_values.shape[0], dtype=idx_dtype)
# build small hashmap with just the unique items
values2labels = Hashmap(unique_values, labels)
return unique_values, values2labels
def factorize(values):
"""
Build dense int labels maps and return labels
:param array values: (n,) dtype array
:returns: tuple(
labels: (n,) int array,
labels2values: (n_uniques,) dtype array,
values2labels: HashMap(dtype->int),
)
"""
labels2values, values2labels = get_dense_labels_map(values)
values_labels, found = values2labels.get_many(values)
assert found.all()
return values_labels, labels2values, values2labels
def update_dense_labels_map(hashmap, values):
"""
update hashmap values -> dense labels, and return mapped values
:param HashMap hashmap: HashMap(dtype->int)
:param array values: (n,) dtype array
:returns: (n,) int array
:changes: update ``hashmap`` in-place
"""
# get current values
labels, found = hashmap.get_many(values)
new_values = values[~found]
if not new_values.size:
return labels
# check unique new values
unique_new_values, new_values_idx_in_uniques = unique(
new_values, return_inverse=True)
# build new labels
idx_dtype = hashmap.values.dtype
_, current_labels = hashmap.unique_keys(return_values=True)
if current_labels.size == 0:
start_at = 0
else:
start_at = current_labels.max() + 1
new_labels = numpy.arange(
start_at, start_at + unique_new_values.shape[0], dtype=idx_dtype)
hashmap.set_many(unique_new_values, new_labels)
# return all labels
labels, found = hashmap.get_many(values)
assert found.all()
return labels
def empty_hashmap(key_dtype, val_dtype='uint32'):
"""
Build empty Hashmap
:param dtype key_dtype:
:param dtype? val_dtype: (default: uint32)
:returns: Hashmap
"""
key_dtype = numpy.dtype(key_dtype)
val_dtype = numpy.dtype(val_dtype)
return Hashmap(numpy.empty(0, dtype=key_dtype), numpy.empty(0, dtype=val_dtype))
def reverse_values2labels(values2labels, dtype=None):
"""
Reverse a hashmap values2labels.
Normally you should not call this function but use the values provided in ``factorize``
:param Hashmap values2labels: labels hashmap with n items
:param dtype? dtype:
:returns: (n,) dtype array
"""
values = values2labels.unique_keys()
labels, found = values2labels.get_many(values)
assert found.all()
out = numpy.empty_like(values, dtype=dtype)
out[labels] = values
return out
def array_hash(array, order_matters):
"""
Return consistent hash of array that may or may not be order invarient
:param array array: array with or without structure
:param bool order_matters:
:returns: int
"""
if order_matters:
order_num = STEP_MULT * numpy.arange(array.size, dtype=UINT64)
if array.dtype.names is None:
array = to_structured([('f0', array), ('__index__', order_num)])
else:
array = set_or_add_to_structured(array, [('__index__', order_num)])
hashtable = Hashmap(array)
return hashtable.keys_hash()
def values_hash(array, step=0):
"""
Return consistent hash of array values
:param array array: (n,) array with or without structure
:param uint64 step: optional step number to modify hash values
:returns: (n,) uint64 array
"""
cls, cast_dtype, view_dtype = _get_optimal_cast(array)
array = cls._cast(array, cast_dtype, view_dtype)
return cls._hash(array, UINT64(step))
| StarcoderdataPython |
5725 | import numpy as np
if __name__ == '__main__':
h, w = map( int, input().split() )
row_list = []
for i in range(h):
single_row = list( map(int, input().split() ) )
np_row = np.array( single_row )
row_list.append( np_row )
min_of_each_row = np.min( row_list, axis = 1)
max_of_min = np.max( min_of_each_row )
print( max_of_min )
| StarcoderdataPython |
130823 | <reponame>Armorless-Visage/blacklistparser
#!/usr/bin/env python3
# <NAME> (c) 2019 ISC
# Full licence terms located in LICENCE file
from os import path
'''
some types for argparse type argument
'''
def base_path_type(pathname):
'''
custom type for checking that a file path is valid
returns input if the base dir exists, None otherwise
'''
if path.isdir(path.dirname(pathname)) is True:
return pathname
return None
| StarcoderdataPython |
3275860 | __all__ = ('APIHooker', 'form', 'Config')
import functools
import json
import os
import re
import textwrap
import uuid
from typing import Iterable
from flask import render_template, request, redirect
class APIHooker:
def __init__(self, app, upload='data'):
self._app = app
self._upload = upload
self._apis = dict()
os.makedirs(upload, exist_ok=True)
def run(self, **kwargs):
@self._app.route('/')
def index():
return render_template('index.html', data=self._apis)
self._app.run(**kwargs)
def add(self, api, name, description, arguments, post=str):
'''
Argument:
api: Callable
name: str, api name
description: str, api description
arguments: dict, {argument: (function, kwargs)}
post: Callable, post-processing
'''
self._apis[name] = textwrap.dedent(description)
@self._app.route(f'/api/{name}', endpoint=f'_{name}')
def _():
return redirect(f'/api/{name}/x')
@self._app.route(f'/api/{name}/x', endpoint=f'_{name}_x')
def x():
return render_template(
'x.html', data=arguments, name=name, description=self._apis[name],
)
@self._app.route(f'/api/{name}/y', methods=['POST', 'GET'], endpoint=f'_{name}_y')
def y():
if request.method == 'POST':
xs = dict()
for name, file in request.files.items():
filename = str(uuid.uuid5(uuid.NAMESPACE_OID, file.filename))
path = os.path.join(self._upload, filename)
file.save(path)
xs[name] = (file.filename, path)
return post(api(**form.post(request.form, arguments, xs)))
else:
return redirect(f'/api/{name}/x')
return self
def indent(string, prefix=' '*4):
return textwrap.indent(string, prefix)
def p(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return '<p>\n{}\n</p>'.format(indent(func(*args, **kwargs)))
return wrapper
class form:
@classmethod
@p
def text(cls, name:str) -> str:
return f'{name}: <input type="text" name="{name}" />'
@classmethod
@p
def date(cls, name:str) -> str:
return f'{name}: <input type="date" name="{name}" />'
@classmethod
@p
def month(cls, name:str) -> str:
return f'{name}: <input type="month" name="{name}" />'
@classmethod
@p
def range(cls, name:str, min:int, max:int, step:[int, float]=1, default:int=None) -> str:
default = default or min
return f'{name}: \n<input type="range" name="{name}" ' \
f'value="{default}" min="{min}" max="{max}" step="{step}" ' \
'oninput="this.nextElementSibling.value=this.value" />\n' \
f'<output>{default}</output>'
@classmethod
@p
def datalist(cls, name:str, options:[dict, Iterable]) -> str:
string = '\n'.join(
f'<option value="{value}">{text}</option>'
for value, text in (
options.items() if isinstance(options, dict) else
map(lambda x: (x, x), options)
)
)
return f'{name}: \n<input list="{name}-datalist" name="{name}">\n' \
f'<datalist id="{name}-datalist">\n' \
f'{indent(string)}\n' \
'</datalist>'
@classmethod
@p
def group(cls, name:str, options:dict) -> str:
'''
Argument:
- options: {group: {name: description} or (name, )}
'''
string = '\n'.join(
f'<optgroup label="{group}">\n' + indent('\n'.join(
f'<option value="{value}">{text}</option>'
for value, text in (
values.items() if isinstance(values, dict) else
map(lambda x: (x, x), values)
)
)) + '\n</optgroup>'
for group, values in options.items()
)
return f'{name}: \n<select name="{name}">\n{indent(string)}\n</select>'
@classmethod
@p
def checkbox(cls, name:str, options:[dict, Iterable]) -> str:
return f'{name}: \n' + '\n'.join(
f'<input type="checkbox" name="{name}.{ith}" value="{value}">{text}</input>'
for ith, (value, text) in enumerate(
options.items() if isinstance(options, dict) else
map(lambda x: (x, x), options)
)
)
@classmethod
@p
def multiple(cls, name:str, options:[dict, Iterable]) -> str:
string = '\n'.join(
f'<option value="{value}">{text}</option>'
for value, text in (
options.items() if isinstance(options, dict) else
map(lambda x: (x, x), options)
)
)
return f'{name}: \n<select name="{name}" multiple>\n{indent(string)}\n</select>'
@classmethod
@p
def radio(cls, name:str, options:[dict, Iterable]) -> str:
return f'{name}: ' + '\n'.join(
f'<input type="radio" name="{name}" value="{value}">{text}</input>'
for value, text in (
options.items() if isinstance(options, dict) else
map(lambda x: (x, x), options)
)
)
@classmethod
def boolean(cls, name:str) -> str:
return cls.radio(name, {'_': 'True', '': 'False'})
@classmethod
@p
def file(cls, name:str) -> str:
return f'{name}: \n<input type=file name="{name}" />'
@classmethod
def post(cls, x, arguments, xs=dict()):
'''Post-processing
Argument:
- x: dict
- arguments: dict, {name: (function, kwargs)}
- xs: dict, extended x
'''
xs.update(x)
y = dict()
for name, (function, kwargs) in arguments.items():
if function in (cls.text, cls.datalist, cls.radio, cls.date, cls.month, cls.group):
y[name] = xs[name]
elif function in (cls.range, cls.boolean):
Ts = {
cls.range: type(kwargs.get('step', 1)), cls.boolean: bool,
}
y[name] = Ts[function](xs[name])
elif function == cls.multiple:
y[name] = x.getlist(name)
elif function == cls.checkbox:
pattern = re.compile(fr'^{name}\.\d+$')
y[name] = tuple(
value for key, value in xs.items()
if pattern.match(key)
)
elif function == cls.file:
y[name] = xs[name]
return y
class Config:
def __init__(self, filename='config.json', encoding='utf-8'):
with open(filename, 'r', encoding=encoding) as f:
self._data = json.load(f)
def __getitem__(self, key):
return self._data.get(key, None)
@property
def data(self):
return self._data
| StarcoderdataPython |
3338431 | # Copyright (c) 2018 <NAME>
# This code is available under the "Apache License 2.0"
# Please see the file COPYING in this distribution for license terms.
import tensorflow as tf
import math
from multiprocessing import cpu_count
files = tf.gfile.Glob('/data/tf-records/batches/*.tfrecords')
train_test_split = math.ceil(len(files) * 0.7)
train_files = files[:train_test_split]
test_files = files[train_test_split:]
num_parallel_calls = cpu_count()
def decode_example(example):
parsed_example = tf.parse_single_example(example, {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)
})
image = tf.image.decode_png(parsed_example['image'])
label = parsed_example['label']
return image / 255, tf.reshape(label, (1,))
def dataset(files):
return tf.data.TFRecordDataset(files) \
.map(decode_example, num_parallel_calls)
def train_dataset():
return dataset(train_files)
def test_dataset():
return dataset(test_files)
| StarcoderdataPython |
3318165 | #!/usr/bin/python
# vim: ts=4 sw=4 et
"""Class for handling argparse parsers. Methods are configured as subparsers."""
import argparse
import os
import subprocess
import sys
import textwrap
import yaml
from vctools import Logger
# pylint: disable=too-many-instance-attributes
class ArgParser(Logger):
"""Argparser class. It handles the user inputs and config files."""
def __init__(self):
self.syspath = sys.path[0]
self.gitrev = subprocess.check_output(
[
'git', '--git-dir', self.syspath + '/.git', 'rev-parse', '--short', 'HEAD'
]
)
self.__version__ = self.gitrev
self.help = None
self.opts = None
self.dotrc = None
self.parser = argparse.ArgumentParser(
description='vCenter Tools CLI'
)
self.parser.add_argument(
'--version', '-v', action='version',
version=self.__version__,
help='version number'
)
# subparser
self.subparsers = self.parser.add_subparsers(metavar='')
# override options with defaults in dotfiles
rootdir = os.path.dirname(os.path.abspath(__file__ + '/../'))
rc_files = [rootdir + '/vctoolsrc.yaml', '~/.vctoolsrc.yaml']
for rc_file in rc_files:
try:
dotrc_yaml = open(os.path.expanduser(rc_file))
self.dotrc = yaml.load(dotrc_yaml)
except IOError:
pass
if not self.dotrc:
raise ValueError('Cannot load dotrc file.')
@staticmethod
def _mkdict(args):
"""
Internal method for converting an argparse string key=value into dict.
It passes each value through a for loop to correctly set its type,
otherwise it returns it as a string.
Example:
key1=val1,key2=val2,key3=val3
"""
params = dict(x.split('=') for x in args.split(','))
for key, value in params.iteritems():
if params[key].isdigit():
params[key] = int(value)
else:
if params[key] == 'True':
params[key] = True
elif params[key] == 'False':
params[key] = False
return params
@classmethod
def general(cls, **defaults):
"""General Parser."""
# general (parent)
general_parser = argparse.ArgumentParser(add_help=False)
# positional argument
general_parser.add_argument(
'host',
help='vCenter host'
)
genopts = general_parser.add_argument_group('general options')
genopts.add_argument(
'--passwd-file', metavar='',
help='GPG encrypted passwd file'
)
genopts.add_argument(
'--user', metavar='',
help='username'
)
genopts.add_argument(
'--domain', metavar='',
help='domain'
)
genopts.add_argument(
'--passwd', metavar='',
help='password'
)
if defaults:
general_parser.set_defaults(**defaults)
return general_parser
@classmethod
def logging(cls, **defaults):
""" Logging Parser """
# logging (parent)
logging_parser = argparse.ArgumentParser(add_help=False)
logging_opts = logging_parser.add_argument_group('logging options')
logging_opts.add_argument(
'--level', metavar='', choices=['info', 'debug'], default='info',
help='set logging level choices=[%(choices)s] default: %(default)s'
)
logging_opts.add_argument(
'--console-level', metavar='', choices=['info', 'error', 'debug'], default='error',
help='set console log level choices=[%(choices)s] default: %(default)s'
)
logging_opts.add_argument(
'--console-stream', metavar='', choices=['stdout', 'stderr'], default='stderr',
help='set console logging stream output choices=[%(choices)s] default: %(default)s'
)
logging_opts.add_argument(
'--logfile', metavar='', default='/var/log/vctools.log',
help='set logging path: %(default)s'
)
if defaults:
logging_parser.set_defaults(**defaults)
return logging_parser
def add(self, *parents, **defaults):
""" Add Hardware to Virtual Machines """
# add
usage = """
help: vctools add -h
vctools add <vc> <name> --device <options>
# add a network card
vctools add <vc> <name> --device nic --network <network>
"""
add_parser = self.subparsers.add_parser(
'add',
parents=list(parents),
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=textwrap.dedent(usage),
description=textwrap.dedent(self.add.__doc__),
help='Add Hardware to Virtual Machines.'
)
add_parser.set_defaults(cmd='add')
add_parser.add_argument(
'--datacenter', metavar='', default='Linux',
help='vCenter Datacenter. default: %(default)s'
)
add_type_opts = add_parser.add_argument_group('type options')
add_parser.add_argument(
'name',
help='Name attribute of Virtual Machine object, i.e. hostname'
)
add_type_opts.add_argument(
'--device', metavar='', choices=['nic'],
help='Add hardware devices on Virtual Machines. choices=[%(choices)s]',
)
add_nic_opts = add_parser.add_argument_group('nic options')
add_nic_opts.add_argument(
'--network', metavar='',
help='The network of the interface, i.e. vlan_1234_network'
)
add_nic_opts.add_argument(
'--driver', metavar='', choices=['vmxnet3', 'e1000'],
help='The network driver, default: vmxnet3'
)
if defaults:
add_parser.set_defaults(**defaults)
def create(self, *parents, **defaults):
"""Create Parser."""
# create
create_parser = self.subparsers.add_parser(
'create', parents=list(parents),
description='Example: vctools create <vc> <config> <configN>',
help='Create Virtual Machines'
)
create_parser.set_defaults(cmd='create')
create_parser.add_argument(
'config', nargs='+', type=file,
help='YaML config for creating new Virtual Machines.'
)
create_parser.add_argument(
'--datacenter', metavar='', default='Linux',
help='vCenter Datacenter. default: %(default)s'
)
create_parser.add_argument(
'--power', action='store_true', default=True,
help='Power on the VM after creation. default: %(default)s'
)
if defaults:
create_parser.set_defaults(**defaults)
def mount(self, *parents, **defaults):
"""Mount Parser."""
# mount
mount_parser = self.subparsers.add_parser(
'mount', parents=list(parents),
help='Mount ISO to CD-Rom device'
)
mount_parser.set_defaults(cmd='mount')
mount_parser.add_argument(
'--datastore', metavar='',
help='Name of datastore where the ISO is located.'
)
mount_parser.add_argument(
'--path', metavar='',
help='Path inside datastore where the ISO is located.'
)
mount_parser.add_argument(
'--name', nargs='+', metavar='',
help='name attribute of Virtual Machine object.'
)
if defaults:
mount_parser.set_defaults(**defaults)
def power(self, *parents, **defaults):
"""Power Parser."""
# power
power_parser = self.subparsers.add_parser(
'power', parents=list(parents),
help='Power Management for Virtual Machines'
)
power_parser.set_defaults(cmd='power')
power_parser.add_argument(
'power', choices=['on', 'off', 'reset', 'reboot', 'shutdown'],
help='change power state of VM'
)
power_parser.add_argument(
'--name', nargs='+', metavar='',
help='name attribute of Virtual Machine object.'
)
if defaults:
power_parser.set_defaults(**defaults)
def query(self, *parents, **defaults):
"""Query Parser."""
# query
query_parser = self.subparsers.add_parser(
'query', parents=list(parents),
help='Query Info'
)
query_parser.set_defaults(cmd='query')
query_opts = query_parser.add_argument_group('query options')
query_opts.add_argument(
'--anti-affinity-rules', action='store_true',
help='Returns information about AntiAffinityRules.'
)
query_opts.add_argument(
'--datastores', action='store_true',
help='Returns information about Datastores.'
)
query_opts.add_argument(
'--datastore', metavar='',
help='vCenter Datastore.'
)
query_opts.add_argument(
'--vms', action='store_true',
help='Returns information about Virtual Machines.'
)
query_opts.add_argument(
'--folders', action='store_true',
help='Returns information about Folders.'
)
query_opts.add_argument(
'--networks', action='store_true',
help='Returns information about Networks.'
)
query_opts.add_argument(
'--clusters', action='store_true',
help='Returns information about ComputeResources.'
)
query_opts.add_argument(
'--cluster', metavar='',
help='vCenter ComputeResource.'
)
query_opts.add_argument(
'--datacenter', metavar='', default='Linux',
help='vCenter Datacenter. default: %(default)s'
)
query_opts.add_argument(
'--vmconfig', nargs='+', metavar='',
help='Virtual machine config'
)
query_opts.add_argument(
'--vm-by-datastore', action='store_true',
help='List the VMs associated with datastore.'
)
query_opts.add_argument(
'--vm-guest-ids', action='store_true',
help='Show all vm guest ids.'
)
query_vmcfg_opts = query_parser.add_argument_group('vmconfig options')
query_vmcfg_opts.add_argument(
'--createcfg', metavar='',
help='Create a build config from --vmconfig spec.'
)
if defaults:
query_parser.set_defaults(**defaults)
def reconfig(self, *parents, **defaults):
""" Reconfig VM Attributes and Hardware """
# reconfig
usage = """
help: vctools reconfig -h
vctools reconfig <vc> <name> [--cfgs|--device] <options>
# reconfigure config settings
# lookup vmware sdk configspec for all options
vctools reconfig <vc> <name> --cfgs memoryMB=<int>,numCPUs=<int>
# move vm to another folder
vctools reconfig <vc> <name> --folder <str>
# reconfigure a disk
vctools reconfig <vc> <name> --device disk --disk-id <int> --sizeGB <int>
# reconfigure a network card
vctools reconfig <vc> <name> --device nic --nic-id <int> --network <network>
"""
reconfig_parser = self.subparsers.add_parser(
'reconfig',
parents=list(parents),
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=textwrap.dedent(usage),
description=textwrap.dedent(self.reconfig.__doc__),
help='Reconfigure Attributes for Virtual Machines.'
)
reconfig_parser.set_defaults(cmd='reconfig')
reconfig_parser.add_argument(
'--datacenter', metavar='', default='Linux',
help='vCenter Datacenter. default: %(default)s'
)
reconfig_type_opts = reconfig_parser.add_argument_group('type options')
reconfig_parser.add_argument(
'name',
help='Name attribute of Virtual Machine object, i.e. hostname'
)
reconfig_type_opts.add_argument(
'--device', metavar='', choices=['disk', 'nic'],
help='Reconfigure hardware devices on Virtual Machines. choices=[%(choices)s]',
)
reconfig_type_opts.add_argument(
'--cfgs', metavar='', type=self._mkdict,
help='A comma separated list of key values that represent config '
'settings such as memory or cpu. format: key=val,keyN=valN',
)
reconfig_type_opts.add_argument(
'--folder', metavar='', type=str,
help='Move the VM to another folder. It must exist. '
)
reconfig_disk_opts = reconfig_parser.add_argument_group('disk options')
reconfig_disk_opts.add_argument(
'--disk-id', metavar='', type=int,
help='The number that represents the disk'
)
reconfig_disk_opts.add_argument(
'--disk-prefix', metavar='', default='Hard disk',
help='The disk label prefix: default: \"%(default)s\"'
)
reconfig_disk_opts.add_argument(
'--sizeGB', type=int, metavar='',
help='New size hard disk in GB'
)
reconfig_nic_opts = reconfig_parser.add_argument_group('nic options')
reconfig_nic_opts.add_argument(
'--nic-id', metavar='', type=int,
help='The number that represents the network card.'
)
reconfig_nic_opts.add_argument(
'--nic-prefix', metavar='', default='Network adapter',
help='The network label prefix: default: \"%(default)s\"'
)
reconfig_nic_opts.add_argument(
'--network', metavar='',
help='The network of the interface, i.e. vlan_1234_network'
)
reconfig_nic_opts.add_argument(
'--driver', metavar='', choices=['vmxnet3', 'e1000'],
help='The network driver, default: vmxnet3'
)
if defaults:
reconfig_parser.set_defaults(**defaults)
def umount(self, *parents, **defaults):
""" Umount Parser """
# umount
umount_parser = self.subparsers.add_parser(
'umount', parents=list(parents),
help='Unmount ISO from CD-Rom device'
)
umount_parser.set_defaults(cmd='umount')
umount_parser.add_argument(
'--name', nargs='+',
help='name attribute of Virtual Machine object.'
)
if defaults:
umount_parser.set_defaults(**defaults)
def upload(self, *parents, **defaults):
""" Upload Parser """
# upload
upload_parser = self.subparsers.add_parser(
'upload', parents=list(parents),
help='Upload File'
)
upload_parser.set_defaults(cmd='upload')
upload_parser.add_argument(
'--iso', nargs='+', metavar='',
help='iso file that needs to be uploaded to vCenter.'
)
upload_parser.add_argument(
'--dest', metavar='',
help='destination folder where the iso will reside.'
)
upload_parser.add_argument(
'--datastore', metavar='', default='ISO_Templates',
help='datastore where the iso will reside. default: %(default)s'
)
upload_parser.add_argument(
'--verify-ssl', metavar='', default=False,
help='verify SSL certificate. default: %(default)s'
)
upload_parser.add_argument(
'--datacenter', metavar='', default='Linux',
help='vCenter Datacenter. default: %(default)s'
)
if defaults:
upload_parser.set_defaults(**defaults)
def drs(self, *parents):
"""Distributed Resource Scheduler rules, currently only anti-affinity"""
usage = """
Cluster DRS Rules
currently only anti-affinity rules are supported
help: vctools drs -h
vctools drs <vc> anti-affinity add <name> --vms <vm1 vm2...>
vctools drs <vc> anti-affinity delete <name>
"""
drs_parser = self.subparsers.add_parser(
'drs', parents=list(parents),
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=textwrap.dedent(usage),
description=textwrap.dedent(self.drs.__doc__),
help='Cluster DRS rules'
)
drs_parser.set_defaults(cmd='drs')
drs_parser.add_argument(
'drs_type', choices=['anti-affinity'],
help='options: anti-affinity (other options may come later)'
)
drs_parser.add_argument(
'function', choices=['add', 'delete'],
help='options: add|delete'
)
drs_parser.add_argument(
'name', metavar='', type=str,
help='Name of the DRS Rule'
)
drs_parser.add_argument(
'--vms', nargs='+', metavar='', type=str,
help='VMs to be added to the DRS rule'
)
drs_parser.add_argument(
'--cluster', metavar='',
help='vCenter ComputeResource'
)
drs_parser.add_argument(
'--prefix', metavar='', type=str,
help='Cluster DRS rule name prefix'
)
def sanitize(self, opts):
"""
Sanitize arguments. This will override the user / config input to a supported state.
Examples:
- rename files
- force booleans
- absolute path checks
Args:
opts (obj): argparse namespace parsed args
"""
# DRS rule names should always begin with the prefix
if opts.cmd == 'drs':
if not opts.name.startswith(opts.prefix):
opts.name = opts.prefix + opts.name
# mount path needs to point to an iso, and it doesn't make sense to add
# to the dotrc file, so this will append the self.opts.name value to it
if opts.cmd == 'mount':
for host in opts.name:
if not opts.path.endswith('.iso'):
if opts.path.endswith('/'):
opts.path = opts.path + host + '.iso'
else:
opts.path = opts.path +'/'+ host +'.iso'
# path is relative in vsphere, so we strip off the first char.
if opts.path.startswith('/'):
opts.path = opts.path.lstrip('/')
if opts.cmd == 'upload':
# trailing slash is in upload method, so we strip it out here.
if opts.dest.endswith('/'):
opts.dest = opts.dest.rstrip('/')
# path is relative in vsphere, so we strip off the first character.
if opts.dest.startswith('/'):
opts.dest = opts.dest.lstrip('/')
# verify_ssl needs to be a boolean value.
if opts.verify_ssl:
opts.verify_ssl = bool(self.dotrc['upload']['verify_ssl'])
return opts
def setup_args(self, **dotrc):
"""
Method loads all the argparse parsers.
Args:
dotrc (dict): A config file of overrides
"""
parent_parsers = ['general', 'logging']
parents = []
subparsers = ['add', 'create', 'drs', 'mount', 'power', 'query', 'reconfig',
'umount', 'upload']
try:
# load parsers using defaults
for parent in parent_parsers:
if dotrc:
if parent in dotrc.keys():
parents.append(getattr(self, str(parent))(**dotrc[str(parent)]))
else:
parents.append(getattr(self, str(parent))())
else:
parents.append(getattr(self, str(parent))())
for parser in subparsers:
if dotrc:
if parser in dotrc.keys():
getattr(self, str(parser))(*parents, **dotrc[str(parser)])
else:
getattr(self, str(parser))(*parents)
else:
getattr(self, str(parser))(*parents)
except AttributeError:
raise
| StarcoderdataPython |
3326227 | from typing import List, Optional
import pytest
from cdv.test import CoinWrapper
from cdv.test import setup as setup_test
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.spend_bundle import SpendBundle
from chia.util.ints import uint64
from chia.wallet.puzzles import singleton_top_layer
from chia.wallet.sign_coin_spends import sign_coin_spends
from ownable_singleton.drivers.ownable_singleton_driver import (
create_ownable_singleton, create_inner_puzzle
)
SINGLETON_AMOUNT: uint64 = 1023
class TestOwnableSingleton:
@pytest.fixture(scope="function")
async def setup(self):
network, alice, bob = await setup_test()
await network.farm_block()
yield network, alice, bob
@pytest.mark.asyncio
async def test_piggybank_contribution(self, setup):
network, alice, bob = setup
try:
await network.farm_block(farmer=alice)
# This retrieves us a coin that is at least 500 mojos.
contribution_coin: Optional[CoinWrapper] = await alice.choose_coin(SINGLETON_AMOUNT)
conditions, launcher_coinsol = create_ownable_singleton(contribution_coin, alice.pk_)
# This is the spend of the piggy bank coin. We use the driver code to create the solution.
singleton_spend: SpendBundle = await alice.spend_coin(
contribution_coin,
pushtx=False,
custom_conditions=conditions,
remain=alice, amt=SINGLETON_AMOUNT
)
launcher_spend: SpendBundle = await sign_coin_spends(
[launcher_coinsol],
alice.pk_to_sk,
DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# Aggregate them to make sure they are spent together
combined_spend = SpendBundle.aggregate(
[singleton_spend, launcher_spend])
result = await network.push_tx(combined_spend)
assert "error" not in result
# Make sure there is a singleton owned by alice
launcher_coin: Coin = singleton_top_layer.generate_launcher_coin(
contribution_coin,
SINGLETON_AMOUNT,
)
launcher_id = launcher_coin.name()
alice_singleton_puzzle = singleton_top_layer.puzzle_for_singleton(launcher_id,
create_inner_puzzle(alice.pk_))
filtered_result: List[Coin] = list(
filter(
lambda addition: (addition.amount == SINGLETON_AMOUNT)
and (
addition.puzzle_hash == alice_singleton_puzzle.get_tree_hash()
),
result["additions"],
)
)
assert len(filtered_result) == 1
# Eve Spend
singleton_coin: Coin = next(
x for x in result['additions'] if x.puzzle_hash == alice_singleton_puzzle.get_tree_hash())
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(launcher_coinsol) # noqa
new_owner_pubkey = bob.pk_
inner_solution: Program = Program.to([new_owner_pubkey])
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
singleton_coin.amount,
inner_solution,
)
owner_change_spend = await alice.spend_coin(
CoinWrapper.from_coin(singleton_coin, puzzle=alice_singleton_puzzle), pushtx=False, args=full_solution)
result = await network.push_tx(owner_change_spend)
singleton_eve_coinsol = owner_change_spend.coin_spends[0]
assert "error" not in result
# Make sure there is a singleton owned by bob
bob_singleton_puzzle = singleton_top_layer.puzzle_for_singleton(launcher_id, create_inner_puzzle(bob.pk_))
filtered_result: List[Coin] = list(
filter(
lambda addition: (addition.amount == SINGLETON_AMOUNT)
and (
addition.puzzle_hash == bob_singleton_puzzle.get_tree_hash()
),
result["additions"],
)
)
assert len(filtered_result) == 1
# POST-EVE
singleton_coin: Coin = next(
x for x in result['additions'] if x.puzzle_hash == bob_singleton_puzzle.get_tree_hash())
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_eve_coinsol) # noqa
new_owner_pubkey = alice.pk_
inner_solution: Program = Program.to([new_owner_pubkey])
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
singleton_coin.amount,
inner_solution,
)
owner_change_spend = await bob.spend_coin(
CoinWrapper.from_coin(singleton_coin, puzzle=bob_singleton_puzzle), pushtx=False, args=full_solution)
result = await network.push_tx(owner_change_spend)
assert "error" not in result
# Make sure there is a singleton owned by alice
filtered_result: List[Coin] = list(
filter(
lambda addition: (addition.amount == SINGLETON_AMOUNT)
and (
addition.puzzle_hash == alice_singleton_puzzle.get_tree_hash()
),
result["additions"],
)
)
assert len(filtered_result) == 1
finally:
await network.close()
| StarcoderdataPython |
1773357 | import os
import cv2
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import numpy as np
import pickle
import matplotlib.pyplot as plt
NUMBER_IMAGES = 50
DIR = 'images/'
files = []
for root, directories, filenames in os.walk(DIR):
for filename in filenames:
files.append(os.path.join(root,filename))
def computa_descritores(img):
surf = cv2.xfeatures2d.SURF_create(400)
kp, des = surf.detectAndCompute(img,None)
return des
def cria_vocabulario(descritores, sz):
scaler = StandardScaler()
descs = []
for line in descritores:
for d in line[1]:
descs.append(d)
norm_data = scaler.fit_transform(descs)
kmeans = KMeans(n_clusters=sz)
kmeans.fit(norm_data)
return (kmeans.cluster_centers_, kmeans, scaler)
def representa_histograma(img, vocab):
scaler = vocab[2]
descs = computa_descritores(img)
norm_descs = scaler.transform(descs)
kmeans = vocab[1]
cluster = vocab[0]
pred = kmeans.predict(norm_descs)
# Create hist
hist = [0] * len(cluster)
for i in pred:
hist[i] += 1 / len(pred)
return hist
def cria_histogramas(files, size_vocab = 300):
result_descs = []
for i in files:
img = cv2.imread(i,0)
descs = computa_descritores(img)
result_descs.append((i, descs))
vocab = cria_vocabulario(result_descs, size_vocab)
hists = []
for i in files:
img = cv2.imread(i,0)
hist = representa_histograma(img, vocab)
hists.append((i, hist))
return (hists, vocab)
hists, vocab = cria_histogramas(files[:NUMBER_IMAGES])
with open('hists.pkl', 'wb') as f:
pickle.dump(hists, f)
with open('vocab.pkl', 'wb') as f:
pickle.dump(vocab, f)
| StarcoderdataPython |
3279360 | """Support for Overkiz locks."""
from __future__ import annotations
from typing import Any
from pyoverkiz.enums import OverkizCommand, OverkizCommandParam, OverkizState
from homeassistant.components.lock import LockEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import HomeAssistantOverkizData
from .const import DOMAIN
from .entity import OverkizEntity
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Overkiz locks from a config entry."""
data: HomeAssistantOverkizData = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
OverkizLock(device.device_url, data.coordinator)
for device in data.platforms[Platform.LOCK]
)
class OverkizLock(OverkizEntity, LockEntity):
"""Representation of an Overkiz Lock."""
async def async_lock(self, **kwargs: Any) -> None:
"""Lock method."""
await self.executor.async_execute_command(OverkizCommand.LOCK)
async def async_unlock(self, **kwargs: Any) -> None:
"""Unlock method."""
await self.executor.async_execute_command(OverkizCommand.UNLOCK)
@property
def is_locked(self) -> bool | None:
"""Return a boolean for the state of the lock."""
return (
self.executor.select_state(OverkizState.CORE_LOCKED_UNLOCKED)
== OverkizCommandParam.LOCKED
)
| StarcoderdataPython |
3252903 | <reponame>Vaibhav9119/MBA749A-Assignments
# Nothing is here
| StarcoderdataPython |
113676 | <filename>scraper/core/classifiers/BaseClassifier.py
import math
import re
from collections import Counter, OrderedDict
class BaseClassifier:
WORD = re.compile(r'\w+')
def get_cosine(self, vec1, vec2):
# print vec1, vec2
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(self, text):
return Counter(self.WORD.findall(text))
def get_similarity(self, a, b):
a = self.text_to_vector(a.strip().lower())
b = self.text_to_vector(b.strip().lower())
return int((self.get_cosine(a, b) + 0.001) * 100)
return self.get_cosine(a, b)
# Calificador y clasificador
def classify_items_slug(self, items, score_min):
classify_items = {}
for key, item in enumerate(items):
scores = {}
for nex_key, next_item in enumerate(items):
if key == nex_key:
continue
slug_item = self.get_heuristic_slug(item['clean']['slug'])
slug_next_item = self.get_heuristic_slug(next_item['clean']['slug'])
score = self.get_similarity(slug_item, slug_next_item)
# Classify
if score >= score_min:
scores[nex_key] = score
# Sort by value
scores_sorted = sorted(scores.items(), key=lambda kv: kv[1], reverse=True)
classify_items[key] = scores_sorted
return self.order_and_clean(classify_items)
def order_and_clean(self, classify_items):
count_items = {}
for key, item in classify_items.items():
count_items[key] = len(item)
# Order asc
sort_count_items = {k: v for k, v in sorted(count_items.items(), key=lambda item: item[1])}
# Clean repeated
clean_items = {}
items_ids = []
for key, item in sort_count_items.items():
if key not in items_ids:
items_ids.append(key)
clean_items[key] = []
for classify_item in classify_items[key]:
id = classify_item[0]
if id not in items_ids:
clean_items[key].append(classify_item)
items_ids.append(id)
return clean_items
# Euristica no se repiten dos nros seguidos
@staticmethod
def get_heuristic_slug(slug):
names = slug.split('-')
count_numbers = 0
result_names = []
for name in names:
if BaseClassifier.has_number(name):
count_numbers += 1
if count_numbers > 1:
break
result_names.append(name)
return '-'.join(result_names)
@staticmethod
def has_number(str):
for char in str:
if char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
return True
return False
| StarcoderdataPython |
93737 | <reponame>acstarr/clean_ipynb
from setuptools import setup
name = "clean_ipynb"
setup(
name=name,
version="1.1.1",
python_requires=">=3.6",
install_requires=("autoflake", "black", "click", "isort"),
packages=(name,),
entry_points={"console_scripts": ("{0}={0}.{1}:{1}".format(name, "cli"),)},
)
| StarcoderdataPython |
3271386 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fuel_agent.utils import utils
# Please take a look at the linux kernel documentation
# https://github.com/torvalds/linux/blob/master/Documentation/devices.txt.
# KVM virtio volumes have major number 252 in CentOS, but 253 in Ubuntu.
VALID_MAJORS = (3, 8, 65, 66, 67, 68, 69, 70, 71, 104, 105, 106, 107, 108, 109,
110, 111, 202, 252, 253)
# We are only interested in getting these
# properties from udevadm report
# MAJOR major device number
# MINOR minor device number
# DEVNAME e.g. /dev/sda
# DEVTYPE e.g. disk or partition for block devices
# DEVPATH path to a device directory relative to /sys
# ID_BUS e.g. ata, scsi
# ID_MODEL e.g. MATSHITADVD-RAM_UJ890
# ID_SERIAL_SHORT e.g. UH00_296679
# ID_WWN e.g. 0x50000392e9804d4b (optional)
# ID_CDROM e.g. 1 for cdrom device (optional)
UDEV_PROPERTIES = set(['MAJOR', 'MINOR', 'DEVNAME', 'DEVTYPE', 'DEVPATH',
'ID_BUS', 'ID_MODEL', 'ID_SERIAL_SHORT', 'ID_WWN',
'ID_CDROM'])
# more details about types you can find in dmidecode's manual
SMBIOS_TYPES = {'bios': '0',
'base_board': '2',
'processor': '4',
'memory_array': '16',
'memory_device': '17'}
def parse_dmidecode(type):
"""Parses `dmidecode` output.
:param type: A string with type of entity to display.
:returns: A list with dictionaries of entities for specified type.
"""
output = utils.execute('dmidecode', '-q', '--type', type)
lines = output[0].split('\n')
info = []
multiline_values = None
section = 0
for line in lines:
if len(line) != 0 and len(line.strip()) == len(line):
info.append({})
section = len(info) - 1
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
k = line.strip()
if not k:
multiline_values = None
if multiline_values:
info[section][multiline_values].append(k)
else:
if not v:
multiline_values = k.lower()
info[section][multiline_values] = []
else:
info[section][k.lower()] = v
return info
def parse_lspci():
"""Parses `lspci` output.
:returns: A list of dicts containing PCI devices information
"""
output = utils.execute('lspci', '-vmm', '-D')
lines = output[0].split('\n')
info = [{}]
section = 0
for line in lines[:-2]:
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
info.append({})
section += 1
else:
info[section][k.lower()] = v
return info
def parse_simple_kv(*command):
"""Parses simple key:value output from specified command.
:param command: A command to execute
:returns: A dict of parsed key-value data
"""
output = utils.execute(*command)
lines = output[0].split('\n')
info = {}
for line in lines[:-1]:
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
break
else:
info[k.lower()] = v
return info
def is_disk(dev, bspec=None, uspec=None):
"""Checks if given device is a disk.
:param dev: A device file, e.g. /dev/sda.
:param bspec: A dict of properties which we get from blockdev.
:param uspec: A dict of properties which we get from udevadm.
:returns: True if device is disk else False.
"""
# Filtering by udevspec
if uspec is None:
uspec = udevreport(dev)
if uspec.get('ID_CDROM') == '1':
return False
if uspec.get('DEVTYPE') == 'partition':
return False
if 'MAJOR' in uspec and int(uspec['MAJOR']) not in VALID_MAJORS:
return False
# Filtering by blockdev spec
if bspec is None:
bspec = blockdevreport(dev)
if bspec.get('ro') == '1':
return False
return True
def udevreport(dev):
"""Builds device udevadm report.
:param dev: A device file, e.g. /dev/sda.
:returns: A dict of udev device properties.
"""
report = utils.execute('udevadm',
'info',
'--query=property',
'--export',
'--name={0}'.format(dev),
check_exit_code=[0])[0]
spec = {}
for line in [l for l in report.splitlines() if l]:
key, value = line.split('=', 1)
value = value.strip('\'')
# This is a list of symbolic links which were created for this
# block device (e.g. /dev/disk/by-id/foobar)
if key == 'DEVLINKS':
spec['DEVLINKS'] = value.split()
if key in UDEV_PROPERTIES:
spec[key] = value
return spec
def blockdevreport(blockdev):
"""Builds device blockdev report.
:param blockdev: A block device file, e.g. /dev/sda.
:returns: A dict of blockdev properties.
"""
cmd = [
'blockdev',
'--getsz', # get size in 512-byte sectors
'--getro', # get read-only
'--getss', # get logical block (sector) size
'--getpbsz', # get physical block (sector) size
'--getsize64', # get size in bytes
'--getiomin', # get minimum I/O size
'--getioopt', # get optimal I/O size
'--getra', # get readahead
'--getalignoff', # get alignment offset in bytes
'--getmaxsect', # get max sectors per request
blockdev
]
opts = [o[5:] for o in cmd if o.startswith('--get')]
report = utils.execute(*cmd, check_exit_code=[0])[0]
return dict(zip(opts, report.splitlines()))
def extrareport(dev):
"""Builds device report using some additional sources.
:param dev: A device file, e.g. /dev/sda.
:returns: A dict of properties.
"""
spec = {}
name = os.path.basename(dev)
# Finding out if block device is removable or not
# actually, some disks are marked as removable
# while they are actually not e.g. Adaptec RAID volumes
try:
with open('/sys/block/{0}/removable'.format(name)) as file:
spec['removable'] = file.read().strip()
except Exception:
pass
for key in ('state', 'timeout'):
try:
with open('/sys/block/{0}/device/{1}'.format(name, key)) as file:
spec[key] = file.read().strip()
except Exception:
pass
return spec
def list_block_devices(disks=True):
"""Gets list of block devices, tries to guess which of them are disks
and returns list of dicts representing those disks.
:returns: A list of dict representing disks available on a node.
"""
bdevs = []
report = utils.execute('blockdev', '--report', check_exit_code=[0])[0]
lines = [line.split() for line in report.splitlines() if line]
startsec_idx = lines[0].index('StartSec')
device_idx = lines[0].index('Device')
size_idx = lines[0].index('Size')
for line in lines[1:]:
device = line[device_idx]
uspec = udevreport(device)
bspec = blockdevreport(device)
espec = extrareport(device)
# if device is not disk,skip it
if disks and not is_disk(device, bspec=bspec, uspec=uspec):
continue
bdev = {
'device': device,
'startsec': line[startsec_idx],
'size': int(line[size_idx]),
'uspec': uspec,
'bspec': bspec,
'espec': espec
}
bdevs.append(bdev)
return bdevs
def match_device(uspec1, uspec2):
"""Tries to find out if uspec1 and uspec2 are uspecs from the same device.
It compares only some fields in uspecs (not all of them) which, we believe,
is enough to say exactly whether uspecs belong to the same device or not.
:param uspec1: A dict of properties which we get from udevadm.
:param uspec1: A dict of properties which we get from udevadm.
:returns: True if uspecs match each other else False.
"""
# False if ID_WWN is given and does not match each other
if ('ID_WWN' in uspec1 and 'ID_WWN' in uspec2
and uspec1['ID_WWN'] != uspec2['ID_WWN']):
return False
# False if ID_SERIAL_SHORT is given and does not match each other
if ('ID_SERIAL_SHORT' in uspec1 and 'ID_SERIAL_SHORT' in uspec2
and uspec1['ID_SERIAL_SHORT'] != uspec2['ID_SERIAL_SHORT']):
return False
# True if at least one by-id link is the same for both uspecs
if ('DEVLINKS' in uspec1 and 'DEVLINKS' in uspec2
and any(x.startswith('/dev/disk/by-id') for x in
set(uspec1['DEVLINKS']) & set(uspec2['DEVLINKS']))):
return True
# True if ID_WWN is given and matches each other
# and DEVTYPE is given and is 'disk'
if (uspec1.get('ID_WWN') == uspec2.get('ID_WWN') is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'disk'):
return True
# True if ID_WWN is given and matches each other
# and DEVTYPE is given and is 'partition'
# and MINOR is given and matches each other
if (uspec1.get('ID_WWN') == uspec2.get('ID_WWN') is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'partition'
and uspec1.get('MINOR') == uspec2.get('MINOR') is not None):
return True
# True if ID_SERIAL_SHORT is given and matches each other
# and DEVTYPE is given and is 'disk'
if (uspec1.get('ID_SERIAL_SHORT') == uspec2.get('ID_SERIAL_SHORT')
is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'disk'):
return True
# True if DEVPATH is given and matches each other
if uspec1.get('DEVPATH') == uspec2.get('DEVPATH') is not None:
return True
return False
| StarcoderdataPython |
3363279 | <filename>src/oci/license_manager/models/bulk_upload_validation_error_info.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BulkUploadValidationErrorInfo(object):
"""
Detailed error information corresponding to each column for a particular supported license record that could not be uploaded.
"""
def __init__(self, **kwargs):
"""
Initializes a new BulkUploadValidationErrorInfo object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param row_number:
The value to assign to the row_number property of this BulkUploadValidationErrorInfo.
:type row_number: int
:param product_name:
The value to assign to the product_name property of this BulkUploadValidationErrorInfo.
:type product_name: str
:param row_error:
The value to assign to the row_error property of this BulkUploadValidationErrorInfo.
:type row_error: list[oci.license_manager.models.BulkUploadCellInfo]
"""
self.swagger_types = {
'row_number': 'int',
'product_name': 'str',
'row_error': 'list[BulkUploadCellInfo]'
}
self.attribute_map = {
'row_number': 'rowNumber',
'product_name': 'productName',
'row_error': 'rowError'
}
self._row_number = None
self._product_name = None
self._row_error = None
@property
def row_number(self):
"""
**[Required]** Gets the row_number of this BulkUploadValidationErrorInfo.
Refers to the license record number as provided in the bulk upload file.
:return: The row_number of this BulkUploadValidationErrorInfo.
:rtype: int
"""
return self._row_number
@row_number.setter
def row_number(self, row_number):
"""
Sets the row_number of this BulkUploadValidationErrorInfo.
Refers to the license record number as provided in the bulk upload file.
:param row_number: The row_number of this BulkUploadValidationErrorInfo.
:type: int
"""
self._row_number = row_number
@property
def product_name(self):
"""
**[Required]** Gets the product_name of this BulkUploadValidationErrorInfo.
Product name of invalid row.
:return: The product_name of this BulkUploadValidationErrorInfo.
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""
Sets the product_name of this BulkUploadValidationErrorInfo.
Product name of invalid row.
:param product_name: The product_name of this BulkUploadValidationErrorInfo.
:type: str
"""
self._product_name = product_name
@property
def row_error(self):
"""
**[Required]** Gets the row_error of this BulkUploadValidationErrorInfo.
Error information corresponding to each column.
:return: The row_error of this BulkUploadValidationErrorInfo.
:rtype: list[oci.license_manager.models.BulkUploadCellInfo]
"""
return self._row_error
@row_error.setter
def row_error(self, row_error):
"""
Sets the row_error of this BulkUploadValidationErrorInfo.
Error information corresponding to each column.
:param row_error: The row_error of this BulkUploadValidationErrorInfo.
:type: list[oci.license_manager.models.BulkUploadCellInfo]
"""
self._row_error = row_error
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
1767582 | <filename>aorn/methods/antest.py
# -*- coding: utf-8 -*-
'''Generic test method module
'''
from __future__ import with_statement, division, absolute_import, print_function
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class ANTest(object):
'''Generic test method class
'''
__doc__ = ''
def __init__(self, *args, **kwargs):
self._is_audio = None
self._isnt_audio = None
def get_doc(self):
'''Returns method documentation (info)
'''
return self.__doc__
def is_audio(self):
'''Returns analyze result of audio-test
'''
if self._is_audio is not None:
return self._is_audio
if self._isnt_audio is not None:
return not self._isnt_audio
def isnt_audio(self):
'''Returns analyze result of non-audio-test
'''
is_audio = self.is_audio()
if is_audio is not None:
return not is_audio
def set_is_audio(self):
'''Sets analyze result of audio-test
'''
self._is_audio = True
def set_isnt_audio(self):
'''Sets analyze result of non-audio-test
'''
self._isnt_audio = True
@abstractmethod
def analyze(self, samples_store):
'''Analyzes samples
'''
raise NotImplementedError('Abstract method not overridden!')
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
| StarcoderdataPython |
113902 | #!/usr/bin/env python3
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import shutil
import argparse
import subprocess as sp
import numpy as np
import time
from datetime import timedelta
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == "__main__":
# Timer
start = time.time()
# Parse arguments
parser = argparse.ArgumentParser(description="Run cases")
parser.add_argument(
"-np", "--num-procs", dest="np", help="Number of MPI ranks", type=int, default=1
)
args = parser.parse_args()
# Setup
ncells = np.arange(10, 66, 2)
cfls = np.linspace(1e-2, 0.999, 50)
# cfls = [0.056, 0.17, 0.28, 0.46, 0.86]
workdir = os.getcwd()
pelecbin = os.path.abspath("PeleC3d.gnu.MPI.ex")
casedir = os.path.abspath("cases")
iname = "inputs_3d"
pname = "probin"
if os.path.exists(casedir):
shutil.rmtree(casedir)
os.makedirs(casedir)
# Maximum velocity in domain, max(u+c)
umax = 41662.30355
L = 2
# Loop over number of cells
for i, ncell in enumerate(ncells):
for j, cfl in enumerate(cfls):
# Prep run directory
rundir = os.path.join(casedir, "{0:d}cells_{1:f}".format(ncell, cfl))
os.makedirs(rundir)
shutil.copy2(os.path.join(workdir, iname), rundir)
shutil.copy2(os.path.join(workdir, pname), rundir)
log = open(os.path.join(rundir, "out"), "w")
# Calculate fixed time step
dt = cfl * 2. / (ncell * umax)
status = "Running {0:d} cells at CFL = {1:f} DT = {2:e}".format(
ncell, cfl, dt
)
print(status)
log.write(status + "\n")
log.flush()
# Run Pele
os.chdir(rundir)
cmd = "mpirun -np {0:d} {1:s} {2:s} pelec.fixed_dt={3:e} amr.n_cell={4:d} {4:d} {4:d}".format(
args.np, pelecbin, iname, dt, ncell
)
proc = sp.Popen(cmd, shell=True, stdout=log, stderr=sp.PIPE)
retcode = proc.wait()
proc = sp.Popen(
"ls -1v plt*/Header | tee movie.visit",
shell=True,
stdout=log,
stderr=sp.PIPE,
)
retcode = proc.wait()
log.flush()
os.chdir(workdir)
# output timer
end = time.time() - start
print(
"Elapsed time "
+ str(timedelta(seconds=end))
+ " (or {0:f} seconds)".format(end)
)
| StarcoderdataPython |
1725957 | <reponame>MrFunBarn/ADRL
from astropy.io import fits
class dataSet():
def __init__():
| StarcoderdataPython |
3201149 | # -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike connector module."""
from crowdstrike.core import CrowdStrike
__all__ = ["CrowdStrike"]
| StarcoderdataPython |
1635480 | <filename>code/python/echomesh/sound/Sound.py
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import Config
from echomesh.util import ImportIf
pyaudio = ImportIf.imp('pyaudio')
_PYAUDIO = None
_LIST_FORMAT = ('{name:24}: {maxInputChannels} in, ' +
'{maxOutputChannels} out at {defaultSampleRate}Hz')
def PYAUDIO():
global _PYAUDIO
if not _PYAUDIO:
_PYAUDIO = pyaudio.PyAudio()
return _PYAUDIO
INPUT, OUTPUT = True, False
def devices():
for i in range(PYAUDIO().get_device_count()):
yield PYAUDIO().get_device_info_by_index(i)
def get_default_device(is_input):
if is_input:
return PYAUDIO().get_default_input_device_info()
else:
return PYAUDIO().get_default_output_device_info()
def get_index_field_from_name(is_input, name, field):
name = name.lower()
for d in devices():
if d['name'].lower().startswith(name) and d[field]:
return d['index']
raise Exception("Don't understand name %s" % name)
def get_field_names(is_input):
inout = 'input' if is_input else 'output'
field = 'max%sChannels' % inout.capitalize()
return inout, field
def _resolve_index(is_input, name, index, inout, field):
if name:
return get_index_field_from_name(is_input, name, field)
if index < 0:
index = get_default_device(is_input)['index']
return index
def get_index_from_config(is_input, get=None):
get = get or Config.get
inout, field = get_field_names(is_input)
name = get('audio', inout, 'device_name')
index = get('audio', inout, 'device_index')
return _resolve_index(is_input, name, index, inout, field)
def get_index(is_input, name, index):
inout, field = get_field_names(is_input)
return _resolve_index(is_input, name, index, inout, field)
def get_device_info(index):
return PYAUDIO().get_device_info_by_index(index)
def info():
return dict((d['index'], _LIST_FORMAT.format(**d)) for d in devices())
| StarcoderdataPython |
151691 | # MIT License
#
# Copyright (c) 2018 Image & Vision Computing Lab, Institute of Information Science, Academia Sinica
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ====================================================================================================
import tensorflow as tf
import numpy as np
import sys
import os
def embedding_idx(index,k_num):
for i in range(index.shape[0]):
index[i] = index[i] + k_num*i
return index
class lenetsound_lenetfashion:
def __init__(self,c,w1,b1,i1,outlayer1,w2,b2,i2,outlayer2):
with tf.variable_scope("lenet_lenet"):
self.c = []
codebook = []
for i in range(3):
codebook.append(tf.Variable(c[i],dtype=tf.float32))
self.c.append(tf.reshape(codebook[i],[c[i].shape[0]*c[i].shape[1],c[i].shape[2]]))
self.w1=[]
self.b1=[]
for i in range(4):
self.w1.append(tf.constant(w1[i],tf.float32))
self.b1.append(tf.constant(b1[i],tf.float32))
self.i1 =[]
for i in range(len(i1)):
self.i1.append(tf.constant(embedding_idx(i1[i],c[i].shape[1]),dtype=tf.int32))
self.w2=[]
self.b2=[]
for i in range(4):
self.w2.append(tf.constant(w2[i],tf.float32))
self.b2.append(tf.constant(b2[i],tf.float32))
self.i2=[]
for i in range(len(i2)):
self.i2.append(tf.constant(embedding_idx(i2[i],c[i].shape[1]),dtype=tf.int32))
self.outlayer1 = outlayer1
self.outlayer2 = outlayer2
def model1_target(self,image):
output=[]
output.append(max_pool_2x2(tf.nn.relu(conv2d(image, self.w1[0]) + self.b1[0])))
output.append(max_pool_2x2(tf.nn.relu(conv2d(output[0], self.w1[1]) + self.b1[1])))
output.append(tf.nn.relu(tf.matmul(tf.contrib.layers.flatten(output[1]), self.w1[2]) + self.b1[2]))
output.append(tf.matmul(output[2], self.w1[3]) + self.b1[3])
return output
def model1(self,image):
w1 = []
for i in range(2):
w1.append(tf.transpose(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[i],self.i1[i]),[0,2,1]),[self.w1[i].shape[2],self.w1[i].shape[0] ,self.w1[i].shape[1] ,self.w1[i].shape[3]]),(1,2,0,3)))
for i in range(2,3):
w1.append(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[i],self.i1[i]),[0,2,1]),[self.w1[i].shape[0],self.w1[i].shape[1]]))
w1.append(tf.Variable(self.outlayer1,dtype=tf.float32))
output=[]
output.append(max_pool_2x2(tf.nn.relu(conv2d(image, w1[0]) + self.b1[0])))
output.append(max_pool_2x2(tf.nn.relu(conv2d(output[0], w1[1]) + self.b1[1])))
output.append(tf.nn.relu(tf.matmul(tf.contrib.layers.flatten(output[1]),w1[2]) + self.b1[2]))
output.append(tf.nn.relu(tf.matmul(output[2], w1[3]) + self.b1[3]))
return output
def model2_target(self,image):
output=[]
output.append(max_pool_2x2(tf.nn.relu(conv2d(image, self.w2[0]) + self.b2[0])))
output.append(max_pool_2x2(tf.nn.relu(conv2d(output[0], self.w2[1]) + self.b2[1])))
output.append(tf.nn.relu(tf.matmul(tf.contrib.layers.flatten(output[1]), self.w2[2]) + self.b2[2]))
output.append(tf.matmul(output[2], self.w2[3]) + self.b2[3])
return output
def model2(self,image):
w2 = []
for i in range(2):
w2.append(tf.transpose(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[i],self.i2[i]),[0,2,1]),[self.w2[i].shape[2],self.w2[i].shape[0] ,self.w2[i].shape[1] ,self.w2[i].shape[3]]),(1,2,0,3)))
for i in range(2,3):
w2.append(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[i],self.i2[i]),[0,2,1]),[self.w2[i].shape[0],self.w2[i].shape[1]]))
w2.append(tf.Variable(self.outlayer2,dtype=tf.float32))
output=[]
output.append(max_pool_2x2(tf.nn.relu(conv2d(image, w2[0]) + self.b2[0])))
output.append(max_pool_2x2(tf.nn.relu(conv2d(output[0], w2[1]) + self.b2[1])))
output.append(tf.nn.relu(tf.matmul(tf.contrib.layers.flatten(output[1]),w2[2]) + self.b2[2]))
output.append(tf.nn.relu(tf.matmul(output[2], w2[3]) + self.b2[3]))
return output
def get_imshape(self):
imshape={
'model1':[32,32,1],
'model2':[32,32,1],
}
return imshape['model1'],imshape['model2']
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')
| StarcoderdataPython |
3227743 | <gh_stars>1-10
import os
class Config():
SECRET_KEY = 'ilovetofly'
SQLALCHEMY_DATABASE_URI = 'sqlite:////var/tmp/bapa.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.dummy.com'
MAIL_PORT = '123'
MAIL_USERNAME = 'uname'
MAIL_PASSWORD = '<PASSWORD>'
MAIL_DEFAULT_SENDER = '<EMAIL>'
USHPA_HOST = 'https://old.ushpa.aero'
USHPA_CHAPTER = '1000'
USHPA_PIN = '2000'
HOST = 'http://localhost:5000'
class Debug(Config):
DEBUG = True
class Develop(Debug):
SECRET_KEY = os.environ.get('SECRET_KEY', Config.SECRET_KEY)
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', Config.SQLALCHEMY_DATABASE_URI)
MAIL_SERVER = os.environ.get('SENDGRID_SMTP_SERVER')
MAIL_PORT = os.environ.get('MAIL_PORT')
MAIL_USERNAME = "apikey"
MAIL_PASSWORD = os.environ.get('SENDGRID_API_KEY')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_DEFAULT_SENDER')
USHPA_CHAPTER = os.environ.get('USHPA_CHAPTER')
USHPA_PIN = os.environ.get('USHPA_PIN')
HOST = os.environ.get('HOST', Config.HOST)
PAYPAL = {
'domain': 'https://www.sandbox.paypal.com',
'endpoint': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'button_id': os.environ.get('PAYPAL_BUTTON_ID'),
'donate_button_id': os.environ.get('PAYPAL_DONATE_BUTTON_ID')
}
#If RECAPTCHA is True, the app will implement google's reCAPTCHA
RECAPTCHA = True
RECAPTCHA_ENDPOINT = 'https://www.google.com/recaptcha/api/siteverify'
RECAPTCHA_SECRET = os.environ.get('RECAPTCHA_SECRET')
RECAPTCHA_SITEKEY = os.environ.get('RECAPTCHA_SITEKEY')
#https://developers.google.com/recaptcha/intro
#For integrating Google Calendar with fullcalendar
GOOGLE_API_KEY=os.environ.get('GOOGLE_API_KEY')
GOOGLE_CAL_ID=os.environ.get('GOOGLE_CAL_ID')
#Google Analytics
GA_TRACKING_ID = os.environ.get('GA_TRACKING_ID')
#profile pictures
CLOUDINARY_URL = os.environ.get('CLOUDINARY_URL')
class Testing(Develop):
TESTING = True #to avoid sending emails
RECAPTCHA = False
SQLALCHEMY_DATABASE_URI = 'sqlite:////var/tmp/test-bapa.db'
class Production(Develop):
DEBUG = False
PAYPAL = {
'domain': 'https://www.paypal.com',
'endpoint': 'https://www.paypal.com/cgi-bin/webscr',
'button_id': os.environ.get('PAYPAL_BUTTON_ID')
}
| StarcoderdataPython |
4801004 | <reponame>marc-haddad/web-scraping-challenge<filename>Missions_to_Mars/app.py<gh_stars>0
from flask import Flask, render_template, redirect
import pymongo
from flask_pymongo import PyMongo
import scrape_mars
# Configure app
app = Flask(__name__)
mongo = PyMongo(app, uri="mongodb://localhost:27017/scrape_mars_app")
@app.route("/scrape")
def scrape_route():
scrape_collection = mongo.db.scrape_collection
scrape_data = scrape_mars.scrape()
scrape_collection.update({}, scrape_data, upsert=True)
return redirect("/", code=302)
@app.route("/")
def index():
scrape_stuff = mongo.db.scrape_collection.find_one()
return render_template("index.html", scrape_stuff=scrape_stuff)
if __name__=="__main__":
app.run(debug=True) | StarcoderdataPython |
16802 | <filename>tools/modules/verify.py
# ------------------------------------------------------------------------
# Copyright 2020, 2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
""" Verify settings in configuration YAML file (helper functions) """
# Global modules
# None
# Local modules
from modules.command import (
CmdShell,
CmdSsh
)
from modules.constants import getConstants
from modules.exceptions import RpmFileNotFoundException
from modules.ocp import ocLogin
from modules.tools import (
refSystemIsStandard,
areContainerMemResourcesValid,
getRpmFileForPackage,
strBold,
getHdbCopySshCommand
)
# Functions for formatting the output
def showMsgOk(text):
""" print text with header """
print("[Ok ] " + text)
def showMsgErr(text):
""" print text with header """
print('[' + strBold('Error') + '] ' + text)
def showMsgInd(text):
""" print text with header """
print("[.....] " + text)
# Classes
class Verify():
""" Verify various configuration settings """
def __init__(self, ctx):
self._ctx = ctx
self._cmdSshNfs = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user,
reuseCon=False)
self._cmdSshNws4 = CmdSsh(ctx, ctx.cf.refsys.nws4.host.name, ctx.cr.refsys.nws4.sidadm,
reuseCon=False)
self._cmdSshHdb = CmdSsh(ctx, ctx.cf.refsys.hdb.host.name, ctx.cr.refsys.hdb.sidadm,
reuseCon=False)
# Public methods
def verify(self):
""" Verify various configuration settings """
success = True
success = self._verifyOcp() and success
success = self._verifyImages() and success
success = self._verifyNws4() and success
success = self._verifyHdb() and success
success = self._verifyNfs() and success
success = self._verifySapSystem() and success
success = self.verifyNfsToHdbSshConnection() and success
return success
def verifyNfsToHdbSshConnection(self, doPrint=True):
""" Verify SSH connection from NFS host to HDB host """
hdbUser = self._ctx.cr.refsys.hdb.sidadm
hdbHost = self._ctx.cf.refsys.hdb.host
testSsh, testSshSecrets = getHdbCopySshCommand(self._ctx, withLogin=True, reuseCon=False)
# set dummy command
testSsh = testSsh + " true"
result = self._cmdSshNfs.run(testSsh, testSshSecrets)
success = result.rc == 0
if doPrint:
nfsUser = self._ctx.cr.nfs.user
nfsHost = self._ctx.cf.nfs.host.name
if success:
showMsgOk(f"SSH connection to HDB host '{hdbHost.name}' "
f"from NFS host '{nfsHost}' was successful.")
else:
showMsgErr(f"Cannot establish ssh connection '{nfsUser.name}@{nfsHost}"
f" → '{hdbUser.name}@{hdbHost.ip}' ('{hdbUser.name}@{hdbHost.name}').")
showMsgInd(f"Error message: '{result.out}'")
showMsgInd("Check the ssh connection"
f" '{nfsUser.name}@{nfsHost}' → '{hdbUser.name}@{hdbHost.ip}'.")
return success
# Private methods
def _verifyOcp(self):
""" Verify OCP settings """
# pylint: disable=too-many-statements
def isDomainNameValid(loginAnsw):
return 'no such host' not in loginAnsw
def isCredentialsValid(loginAnsw):
condFail1 = (loginAnsw.startswith('Login failed')
and 'Verify you have provided correct credentials' in loginAnsw)
condFail2 = not (loginAnsw.startswith('Logged into')
or loginAnsw.startswith('Login successful'))
return not (condFail1 or condFail2)
def isProjectValid(project):
# Assumes that an 'oc login' has been performed beforehand
cmd = f'oc get project {project} -o custom-columns=NAME:.metadata.name --no-headers'
# The command behaves as follows:
# - If the project exists in the OpenShift cluster its name is printed to stdout.
# - If it does not exist nothing is printed to stdout and an error message is printed
# to stderr
return project in CmdShell().run(cmd).out
def areResourcesValid(ocp, containerType):
return areContainerMemResourcesValid(ocp, containerType)
def isSecretExisting(secret):
# Assumes that an 'oc login' has been performed beforehand
cmd = f'oc describe secret {secret}'
out = CmdShell().run(cmd).err
return not out.startswith('Error from server')
def verifySetup(ocp, loginAnsw):
success = True
if isDomainNameValid(loginAnsw):
showMsgOk("OCP domain name is valid.")
if isCredentialsValid(loginAnsw):
showMsgOk("OCP user and password are valid.")
if isProjectValid(ocp.project):
showMsgOk("OCP project is valid.")
else:
showMsgErr(f"OCP project '{ocp.project}' does not exist.")
success = False
else:
showMsgErr(f"OCP user '{user.name}' and/or password are invalid.")
success = False
else:
showMsgErr(f"OCP domain name '{ocp.domain}' is invalid.")
success = False
return success
def verifyResources(ocp):
success = True
for containerType in self._ctx.config.getContainerFlavors():
if containerType == 'init':
continue
if areResourcesValid(ocp, containerType):
showMsgOk("OCP memory resources for container type "
f"'{containerType}' are valid.")
else:
showMsgErr(f"OCP memory limit for container type '{containerType}' "
f"is less than the value specified for requested memory.")
success = False
return success
def verifySecret(ocp):
success = True
if not refSystemIsStandard(self._ctx):
secret = ocp.containers.di.secret
if secret:
if isSecretExisting(secret):
showMsgOk(f"OCP secret '{secret}' exists.")
else:
showMsgErr(f"Specified OCP secret '{secret}' "
"was not found in OCP cluster.")
showMsgInd("Make sure the secret exists and is "
"created in the right project.")
success = False
else:
showMsgErr("Reference system is a distributed system.")
showMsgInd("You must specify the name of an OCP secret in the config.yaml file")
showMsgInd("containing the information about the "
"SAP HANA DB user and password.")
success = False
return success
ocp = self._ctx.cf.ocp
user = self._ctx.cr.ocp.user
success = verifySetup(ocp, ocLogin(self._ctx, user))
success = success and verifyResources(ocp)
success = success and verifySecret(ocp)
return success
def _verifyImages(self):
""" verify Settings for images """
def _isRpmFileForPackageAvailable(packageName, path):
try:
getRpmFileForPackage(packageName, path)
return True
except RpmFileNotFoundException as exp:
print(exp.errorText)
return False
def _getImageTypes(ctx):
return list(ctx.cf.images.__dict__)
success = True
defaultPackagesDir = getConstants().defaultPackagesDir
for flavor in _getImageTypes(self._ctx):
if flavor == "init":
continue
packages = getattr(self._ctx.cf.images, flavor).packages
for package in packages:
if package.dnfInstallable:
showMsgOk(f"Package {package.packageName} installable via dnf install.")
else:
if _isRpmFileForPackageAvailable(package.packageName, defaultPackagesDir):
showMsgOk(f"Package {package.packageName} installable via rpm.")
else:
showMsgErr(f"Package {package.packageName} not found "
"in {defaultPackagesDir}.")
success = False
return success
def _verifyNfs(self):
""" Verify NFS settings """
nfs = self._ctx.cf.nfs
user = self._ctx.cr.nfs.user
success = True
if self._isHostNameValid(self._cmdSshNfs):
showMsgOk("NFS host is valid.")
if self._isUserValid(self._cmdSshNfs):
showMsgOk("NFS user is valid.")
else:
showMsgErr(f"NFS user '{user.name}' is invalid "
f"or ssh is not set up correctly.")
showMsgInd(f"Check first the existence of '{user.name}' on '{nfs.host.name}'.")
showMsgInd(f"If exists, check the ssh connection by executing: "
f"ssh {user.name}@{nfs.host.name}")
success = False
else:
showMsgErr(f"NFS host '{nfs.host.name}' is invalid.")
success = False
return success
def _verifyNws4(self):
""" Verify settings for reference system component 'nws4' """
return self._verifyRefSys('nws4', self._cmdSshNws4)
def _verifyHdb(self):
""" Verify settings for reference system component 'hdb' """
success = self._verifyRefSys('hdb', self._cmdSshNws4)
if success:
if self._isHdbBaseDirValid():
showMsgOk("HDB base directory is valid.")
else:
showMsgErr(f"HDB base directory '{self._ctx.cf.refsys.hdb.base}' is invalid.")
success = False
return success
def _verifyRefSys(self, component, cmdSsh):
""" Verify settings for given component' """
compUp = component.upper()
sidU = getattr(self._ctx.cf.refsys, component).sidU
hostname = getattr(self._ctx.cf.refsys, component).host.name
user = getattr(self._ctx.cr.refsys, component).sidadm
success = True
if self._isHostNameValid(cmdSsh):
showMsgOk(f"{compUp} host is valid.")
if self._isUserValid(cmdSsh):
showMsgOk(f"{compUp} user is valid.")
if self._isSidInUsrSapServices(cmdSsh, sidU):
showMsgOk(f"{compUp} SAP system ID is valid.")
else:
showMsgErr(f"{compUp} SAP system ID is invalid.")
success = False
else:
showMsgErr(f"{compUp} user '{user.name}' is invalid "
f"or ssh is not set up correctly.")
showMsgInd(f"Check first the existence of '{user.name}' on '{hostname}'.")
showMsgInd(f"If exists, check the ssh connection by executing: "
f"ssh {user.name}@{hostname}")
success = False
else:
showMsgErr(f"{compUp} host '{hostname}' is invalid.")
success = False
return success
def _verifySapSystem(self):
""" Verify SAP system setup """
success = True
if refSystemIsStandard(self._ctx):
if not self._ctx.cf.refsys.nws4.host.name == self._ctx.cf.refsys.hdb.host.name:
success = False
showMsgErr(f"The HANADB database '{self._ctx.cf.refsys.hdb.sidU}' "
"must run on the same host as the NWS4 SAP System.")
if not self._isHdbSidInDefaultPfl():
showMsgErr("You must not use a different HANADB SAP System "
f"than specified for the NWS4 SAP System '{self._ctx.cf.refsys.nws4.sidU}'.")
success = False
return success
def _isHostNameValid(self, cmdSsh):
out = self._checkSshLogin(cmdSsh)
return 'Could not resolve hostname' not in out
def _isUserValid(self, cmdSsh):
out = self._checkSshLogin(cmdSsh)
return 'Permission denied' not in out and 'Connection reset' not in out
def _checkSshLogin(self, cmdSsh):
return cmdSsh.run('true').err
def _isSidInUsrSapServices(self, cmdSsh, sidU):
out = cmdSsh.run(f' grep {sidU} /usr/sap/sapservices | wc -l').err
return not out.startswith('0')
def _isDirValid(self, cmdSsh, directory):
out = cmdSsh.run(f' ls {directory}').err
return 'No such file or directory' not in out
def _isHdbBaseDirValid(self):
out = self._cmdSshHdb.run(f' ls {self._ctx.cf.refsys.hdb.base}').out
return 'data' in out and 'log' in out and 'shared' in out
def _isHdbSidInDefaultPfl(self):
defaultPfl = f'/usr/sap/{self._ctx.cf.refsys.nws4.sidU}/SYS/profile/DEFAULT.PFL'
out = self._cmdSshNws4.run(f' grep dbs/hdb/dbname {defaultPfl}').out
return self._ctx.cf.refsys.hdb.sidU in out
class VerifyOcp():
""" Verify various ocp settings """
def __init__(self, ctx):
self._ctx = ctx
ocLogin(ctx, ctx.cr.ocp.admin)
self._workerNodes = CmdShell().run(
'oc get nodes'
+ ' --selector="node-role.kubernetes.io/worker"'
+ " -o template --template"
+ " '{{range .items}}{{.metadata.name}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
# Public methods
def verify(self):
""" Verify various ocp settings """
success = True
success = self._verifySccForProject() and success
success = self._verifyOcpServiceAccount() and success
if not self._workerNodes:
showMsgErr("Could not retrieve list of worker nodes.")
showMsgInd("SELinux and pid limit settings cannot be verified!")
success = False
else:
success = self._verifySeLinux() and success
success = self._verifyPidLimit() and success
return success
# Private methods
def _runSshJumpCmd(self, worker, cmd):
ctx = self._ctx
innerSshCmd = 'ssh'
if ctx.cr.ocp.helper.user.sshid:
innerSshCmd += ' -i {ctx.cr.ocp.helper.user.sshid}'
innerSshCmd += ' -o StrictHostKeyChecking=no'
innerSshCmd += f' core@{worker} {cmd}'
helperHost = ctx.cf.ocp.helper.host
helperUser = ctx.cr.ocp.helper.user
res = CmdSsh(ctx, helperHost.name, helperUser, reuseCon=False).run(innerSshCmd)
rval = res.out
if res.rc != 0:
showMsgErr(f"Could not execute SSH command on worker node '{worker}'"
f" as user '{helperUser.name}' on helper node '{helperHost.name}'")
showMsgInd(f"({res.err})")
rval = 'SSH CONNECT ERROR'
return rval
def _verifySccForProject(self):
ocp = self._ctx.cf.ocp
out = CmdShell().run(
'oc adm policy who-can use scc anyuid'
" -o template --template='{{range .groups}}{{.}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
if f'system:serviceaccounts:{ocp.project}' in out:
showMsgOk("Security Context Constraint 'anyuid' is valid.")
return True
showMsgErr(f"Project '{ocp.project}' does not have "
"the 'anyuid' Security Context Constraint permission.")
showMsgInd("Logon as kube:admin and execute:")
showMsgInd(" oc adm policy add-scc-to-group anyuid"
f' "system:serviceaccounts:{ocp.project}"\n')
return False
def _verifyOcpServiceAccount(self):
ocp = self._ctx.cf.ocp
out = CmdShell().run(
'oc adm policy who-can use scc hostmount-anyuid'
" -o template --template='{{range .users}}{{.}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
if f'system:serviceaccount:{ocp.project}:{ocp.project}-sa' in out:
showMsgOk("Security Context Constraint 'hostmount-anyuid' is valid.")
return True
showMsgErr(f"Service account {ocp.project}-sa does not have "
"the 'hostmount-anyuid' Security Context Constraint.")
showMsgInd("Logon as kube:admin, create the service account and execute:")
showMsgInd(" oc adm policy add-scc-to-user hostmount-anyuid"
f' "system:serviceaccount:{ocp.project}:{ocp.project}-sa"\n')
return False
def _verifySeLinux(self):
success = True
for worker in self._workerNodes:
enforceState = self._runSshJumpCmd(worker, 'getenforce')
if enforceState in ('Permissive', 'Disabled'):
showMsgOk(f"SELinux setting for worker {worker} is valid.")
else:
showMsgErr(f"Invalid SELinux setting '{enforceState}' for worker {worker}.")
success = False
return success
def _verifyPidLimit(self):
success = True
for worker in self._workerNodes:
pidsLimit = self._runSshJumpCmd(worker, 'crio config | grep pids_limit')
pidsLimit = int(pidsLimit.split('=')[1])
if pidsLimit >= 8192:
showMsgOk(f"CRI-O pids_limit setting for worker {worker} is valid.")
else:
showMsgErr(f"CRI-O pids_limit setting for worker {worker} "
"is too low, must be >= 8192.")
success = False
return success
| StarcoderdataPython |
47024 | import os
import sys
NAME = 'multipla'
PACKAGE = __import__(NAME)
AUTHOR, EMAIL = PACKAGE.__author__.rsplit(' ', 1)
with open('docs/index.rst', 'r') as INDEX:
DESCRIPTION = INDEX.readline()
with open('README.rst', 'r') as README:
LONG_DESCRIPTION = README.read()
URL = 'https://github.com/monkeython/%s' % NAME
EGG = {
'name': NAME,
'version': PACKAGE.__version__,
'author': AUTHOR,
'author_email': EMAIL.strip('<>'),
'url': URL,
'description': DESCRIPTION,
'long_description': LONG_DESCRIPTION,
'classifiers': PACKAGE.__classifiers__,
'license': 'BSD',
'keywords': PACKAGE.__keywords__,
'py_modules': [NAME],
'tests_require': ['genty'],
'test_suite': 'test_{}'.format(NAME)
}
if __name__ == '__main__':
import setuptools
setuptools.setup(**EGG)
| StarcoderdataPython |
3220160 | <gh_stars>1-10
import rospy
import smach
import mission_plan.srv
class PopItemState(smach.State):
""" This state should only be used when in PICKING mode, to remove items from the mission plan after
the picking was UNSUCCESSFUL and should not be attempted again!
If picking was SUCCESSFUL, use the new PickSuccessfulState instead.
"""
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded', 'aborted'],
output_keys=['next_item_to_pick']
)
# wait for the service to appear
rospy.loginfo('Waiting for services of mission_plan/pop_item to come online ...')
try:
rospy.wait_for_service('/mission_plan/pop_item', timeout=2)
except:
rospy.logerr('Services of /mission_plan/pop_item not available. Restart and try again.')
# rospy.signal_shutdown('')
self.service = rospy.ServiceProxy('/mission_plan/pop_item', mission_plan.srv.PopItem, 1)
# ==========================================================
def execute(self, userdata):
# try to call the service
# !! warning, this could potentially block forever
try:
response = self.service.call()
userdata['next_item_to_pick'] = {'bin':'', 'item':''}
return 'succeeded'
except Exception,e:
rospy.logerr('Service call to mission_plan failed. Reason: \n %s' % e)
return 'aborted'
| StarcoderdataPython |
4838998 | """ Python 3.6+
Data import from Excel file and addition to CTVdb
<NAME> 2020-2022
"""
import argparse
import os
import sys
import pandas as pd
from Database_tools.db_functions import searchexact, session_maker
from Database_tools.sqlalchemydeclarative import Serotype, SerotypeVariants, Group, Variants, Genes, \
VariantGroup
from run_scripts.tools import check_db_path
def parse_args():
"""
:return: args
"""
parser = argparse.ArgumentParser()
parser.add_argument('--infile', '-i', help="Input excel file path", required=True)
parser.add_argument('--serotype', '-s', help="Import only SEROTYPE sheet (ONLY for TYPES not in a group)"
"- see documentation for help")
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
return args
def sort_sheets(args):
"""
Parse in the input excel file process sheets specified in args.
:param import_file: Excel file in standardised format
:param tables: list with sheet numbers (eg. [1,3,4])
:return: dict of dataframes
"""
table_dict = {"1": "Serotype", "2": "SerotypeVariants", "3": "Group", "4": "Variants"}
out = {}
if args.serotype:
tables = ["1"]
else:
tables = ["1", "2", "3", "4"]
try:
for table in tables:
# get table name for data formatting
table_name = table_dict[table]
sys.stdout.write(f"Reading {table_name} data.\n")
# read in correct sheet (compensate for 0 indexing) specify datatypes for those that may be ambiguous
# Although position is used as an int in the script, it must be specified as a float due to presence
# of NaN rows it is impossible to set datatype = int when NaN's are present (numpy compatibility)
df = pd.read_excel(args.infile, sheet_name=int(table) - 1,
dtype={"predicted_pheno": str, "subtypes": bool,
"alt_vars": str, "var1": str, "serotype_1": str,
"position": float},engine='openpyxl')
# drop any empty rows
df = df.dropna(how="all")
# flag error if empty sheet
if df.empty:
sys.stderr.write(f"No data in {table_name} sheet. Please check format of file\n")
sys.exit(1)
# remove leading/trailing whitespaces
df = df.apply(lambda x: x.str.strip() if x.dtype == object else x)
out[table_name] = df
return out
except IOError:
sys.stderr.write('ERROR: error occurred with reading Excel file\n')
sys.exit(1)
except IndexError:
sys.stderr.write('ERROR: error occurred with reading columns check format of file\n')
sys.exit(1)
except KeyError:
sys.stderr.write('ERROR: error occurred with reading column names check names and'
'input args\n')
sys.exit(1)
def add_group(df, dbpath):
"""
Checks data integrity in Group table, searches database and adds if not present.
:param df: input dataframe
:param dbpath: path to database folder
"""
try:
# drop where predicted pheno or serotype 1 is empty
df = df.dropna()
# iterate through dataframe checking for existing data and adding if new.
for row, column in df.iterrows():
session = session_maker(dbpath)
query_group = searchexact(df["group_name"][row], Group, Group.group_name, session)
# if it's found skip if not add to DB
if query_group == ['No results found']:
new_group = Group(group_name=df["group_name"][row])
session.add(new_group)
# commit session changes to database
session.commit()
sys.stdout.write(f"Added {df['group_name'][row]} to genogroup table.\n")
else:
sys.stdout.write(f"{df['group_name'][row]} already exists in genogroup table.\n")
session.close()
# KeyError if any headers are not as expected
except KeyError:
sys.stderr.write('ERROR: error occurred while checking input check format of file.\n')
sys.exit(1)
def add_serotype(df, dbpath):
"""
Checks data integrity in Serotype table and prepares for addition to database
Serotype table has one record PER INITIAL HIT SEQUENCE. This function breaks down
input data rows to separate records for each alternative hit per phenotype
and inputs each to Serotype table.
:param df: input dataframe
:param dbpath: Path to database
"""
try:
# drop where predicted pheno or serotype_1 is empty
df = df.dropna(subset=["predicted_pheno", "serotype_1"])
# find number of columns in dataframe (allow flex for adding stage 1 refs)
# remove whitespace
df["predicted_pheno"] = df["predicted_pheno"].str.strip()
sys.stdout.write("Adding serotypes.... \n")
# iterate through dataframe checking for existing data and adding if new.
for row, column in df.iterrows():
# query for each serotype hits - from all serotype_hit columns in input file
for col_idx in range(3, df.shape[1]):
session = session_maker(dbpath)
# ignore nan columns
if df.isnull().iloc[row][col_idx]:
session.close()
break
# Check for existence of predicted phenotype in column (MUST BE FILLED)
elif not df.iloc[row]["predicted_pheno"]:
sys.stderr.write(f"Predicted phenotype column CANNOT be empty -"
f" data for {df.iloc[row]['serotype_1']} NOT added")
break
else:
# Check if entered sero is in group if it is check db for existance of group
if df.isnull().iloc[row]["stage2_group"]:
grp_id = None
else:
# search database for id of group
group_info = session.query(Group).filter(
Group.group_name == df.iloc[row]["stage2_group"]).all()
grp_id = group_info[0].id
# query for exact matches for row with each serotype_hit
query_sero = session.query(Serotype).filter(Serotype.serotype_hit == df.iloc[row][col_idx],
Serotype.predicted_pheno == df.iloc[row][
"predicted_pheno"],
Serotype.group_id == grp_id,
Serotype.subtype == df.iloc[row]["subtypes"]).all()
# if it's not found add to DB
if not query_sero:
# initialise new serotype class object
new_sero = Serotype(
predicted_pheno=df.iloc[row]["predicted_pheno"],
serotype_hit=df.iloc[row][col_idx],
subtype=df.iloc[row]["subtypes"],
group_id=grp_id
)
# add new serotype to database
session.add(new_sero)
# commit session changes to database
session.commit()
sys.stdout.write(f"Added {df.iloc[row][col_idx]} information to serotype table.\n")
else:
sys.stdout.write(f"{df.iloc[row][col_idx]} information already exists in serotype table.\n")
session.close()
# KeyError if any headers are not as expected
except KeyError:
sys.stderr.write('ERROR: error occurred while checking input, check format of file.\n')
sys.exit(1)
def add_variant(df, dbpath):
"""
Checks data integrity in Variant sheet and prepares for addition to database
:param df: input dataframe
:param dbpath: path to database
:return: checked dataframe
"""
# try:
# drop where anything except position is empty
df = df.dropna(subset=["var_type", "gene", "variant", "group_id"])
# remove extra white space
df["gene"] = df["gene"].str.strip()
df["var_type"] = df["var_type"].str.strip()
df["variant"] = df["variant"].str.strip()
sys.stdout.write("Adding variants...\n")
# iterate through rows
for row, column in df.iterrows():
session = session_maker(dbpath)
# check if positions present:
if df.isnull().iloc[row]["position"]:
gene_pos = None
# if position present convert to integer
else:
# convert to integer (numpy compatibility requires storage as float when NaN in column)
gene_pos = int(df.iloc[row]["position"])
# query for exact matches in gene table for row
query_gene = session.query(Genes).filter(Genes.gene_name == df.iloc[row]["gene"]).all()
# if gene not found in genes table, add to DB
if not query_gene:
# Insert a gene in the gene table
new_gene = Genes(gene_name=df.iloc[row]["gene"])
session.add(new_gene)
sys.stdout.write(f"Adding {df.iloc[row]['gene']} to genes table\n")
# flush session to allow retrieval of gene.id before committing.
session.flush()
# get gene id from DB for use later
gene_id = new_gene.id
else:
# get gene_id from DB for use later
gene_id = query_gene[0].id
sys.stdout.write(f"Gene {df.iloc[row]['gene']} already in genes table\n")
# query for exact matches in variants table for row
query_var = session.query(Variants).filter(
Variants.var_type == df.iloc[row]["var_type"],
Variants.gene == gene_id,
Variants.position == gene_pos,
Variants.variant == str(df.iloc[row]["variant"]),
).all()
if not query_var:
# Insert a variant in the variants table
new_var = Variants(var_type=df.iloc[row]["var_type"],
position=gene_pos,
variant=str(df.iloc[row]["variant"]),
gene=gene_id
)
session.add(new_var)
# flush session to allow retrieval of var.id before committing.
session.flush()
# get variant id from session
var_id = new_var.id
else:
# get variant id from database
var_id = query_var[0].id
sys.stdout.write(f"Variant: {df.iloc[row]['var_type']} already in variants table\n")
# Find Group ID for group name
grp_id = session.query(Group.id).filter(
Group.group_name == df.iloc[row]["group_id"]).all()
grp_id = grp_id[0][0]
if not grp_id:
sys.stderr.write(f"Error: Check group_id for {df.iloc[row]['group_id']}"
"-MUST match group in Serotype import sheet or in genogroup table")
break
# Check VariantGroup table for existence.
query_vargrp = session.query(VariantGroup).filter(
VariantGroup.grp_id == grp_id, VariantGroup.var_id == var_id
).all()
# if doesn't exist already insert a variant group into variant groups table
if not query_vargrp:
new_variantgroup = VariantGroup(
grp_id=grp_id,
var_id=var_id
)
# add new variant. commit and close session
session.add(new_variantgroup)
session.commit()
session.close()
sys.stdout.write("Variant added to variant_group table.\n")
else:
# commit and close session
session.commit()
session.close()
def add_serotypevariants(df, dbpath):
"""
Checks data integrity in SerotypeVariant sheet and adds to database
:param df: input dataframe
:param dbpath: path to database
"""
try:
# drop where anything except position is empty
df = df.dropna(subset=["predicted_pheno", "var_type", "gene", "variant"])
for row, column in df.iterrows():
session = session_maker(dbpath)
# get gene id
query_gene = session.query(Genes).filter(
Genes.gene_name == df.iloc[row]["gene"],
).all()
# check if positions present:
if df.isnull().iloc[row]["position"]:
# get variant ids that match
query_var2 = session.query(Variants).filter(
Variants.var_type == df.iloc[row]["var_type"],
Variants.gene == query_gene[0].id,
Variants.variant == df.iloc[row]["variant"]
).all()
else:
# convert to integer (numpy compatibility requires storage as float when NaN in column)
gene_pos = int(df.iloc[row]["position"])
# get variant ids that match
query_var2 = session.query(Variants).filter(
Variants.var_type == df.iloc[row]["var_type"],
Variants.gene == query_gene[0].id,
Variants.position == gene_pos,
Variants.variant == df.iloc[row]["variant"]
).all()
# get serotype ids that match
query_sero = session.query(Serotype).filter(
Serotype.predicted_pheno == df.iloc[row]["predicted_pheno"]).all()
# if sero not found in serotype raise error
if not query_sero:
sys.stderr.write(f"Phenotypical serotype {df.iloc[row]['predicted_pheno']} not found"
" in database, check match to serotype data!\n")
continue
elif not query_var2:
sys.stderr.write(f"Variant {df.iloc[row]['variant']} not found"
" in database, check match to variant data!\n")
continue
else:
# Iterate through sero ids and var ids checking if in serovariants table.
for sero in query_sero:
for var in query_var2:
# query for exact matches in serotype_variants table for row
query_serovar = session.query(SerotypeVariants).filter(
SerotypeVariants.sero_id == sero.id,
SerotypeVariants.variant_id == var.id,
).all()
# if doesn't exist already insert a serotypevariant into Serotype variants table
if not query_serovar:
new_serovariant = SerotypeVariants(
sero_id=sero.id,
variant_id=var.id
)
# add new variant. commit and close session
session.add(new_serovariant)
session.commit()
sys.stdout.write("Variant added to serotype_variants table.\n")
else:
# commit and close session
session.commit()
session.close()
# KeyError if any headers are not as expected
except KeyError:
sys.stderr.write('ERROR: Checking input data integrity - see DOCS.\n')
sys.exit(1)
if __name__ == "__main__":
# collect arguments from commandline
args = parse_args()
#change directory to PneumoKITy home
# find root path for database
path_parent = os.path.dirname(os.getcwd())
db_path = os.path.join(path_parent, "ctvdb")
# check integrity of given path
check_db_path(db_path)
# read in dataframes
dfs = sort_sheets(args)
# run functions to add info
# if all sheets added
if not args.serotype:
# Database attributes must be updated in the following order.
# Groups must be added first due to order of dependencies on primary keys in database
add_group(dfs['Group'], db_path)
add_serotype(dfs['Serotype'], db_path)
add_variant(dfs['Variants'], db_path)
add_serotypevariants(dfs['SerotypeVariants'], db_path)
# if only serotype sheet added.
else:
sdf = add_serotype(dfs['Serotype'])
sys.stdout.write("Database updated\n") | StarcoderdataPython |
3383690 | <gh_stars>0
import datetime
import message_strings as loginfo
import json
class Logger:
def __init__(self, logType='default'):
self.logType = logType
def log(self, message):
"""
Logs the a message into logfile.txt along with a timestamp & log type.
"""
timestamp = datetime.datetime.now().isoformat()
try:
with open('logfile.txt','a') as logfile:
logfile.write(f"{timestamp} - {self.logType} : {message}\n")
except FileNotFoundError:
with open('logfile.txt', 'w') as logfile:
logfile.write(f"{timestamp} - {self.logType} : {message}\n")
def log_json(self, json_dict):
"""
Writes json from a python dict inside serverlog.json
"""
with open('serverlog.json', 'w', encoding='utf8') as jsonlog:
jsonlog.write(json.dumps(json_dict, ensure_ascii=False, indent=4))
self.log(loginfo.JSON_SAVE)
def get_json(self):
"""
Returns json data from serverlog.json in a python dict
"""
try:
with open('serverlog.json', 'r', encoding='utf8') as serverlog:
jsondata = json.load(serverlog)
self.log(loginfo.JSON_GET)
return jsondata
except FileNotFoundError:
self.log(loginfo.JSON_GET_FAIL)
return False
| StarcoderdataPython |
1736541 | <gh_stars>1-10
"""
Left-Right Circle Shift
@author Rafael
"""
#!/bin/python3
def getShiftedString(s, leftShifts, rightShifts):
"""
Generate the string after the following operations
1. Left Circle Shift
2. Right Circle Shift
:type s: string
:type leftShifts: int
:type rightShifts: int
Examples:
>>> getShiftedString("abc",1,0)
'bca'
>>> getShiftedString("abc",1,7)
'abc'
"""
# Limit Huge Shifts
sLen = len(s)
leftShifts = leftShifts % sLen
rightShifts = rightShifts % sLen
# Generate Left shifted string
leftS = s[leftShifts:] + s[:leftShifts]
# Generate Right shifted string
rightS = leftS[-rightShifts:] + leftS[0:-rightShifts]
return rightS
if __name__ == '__main__':
# Test Trivial Cases with doctest
import doctest
doctest.testmod()
| StarcoderdataPython |
149181 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def calculate_effect_of_credit_drop(model, X, credit_factors):
probs = model.predict_proba(X)[:, 1]
all_probs_changes = {}
all_credit_amount_changes = {}
all_expected_costs_in_credit = {}
for factor in credit_factors:
probs_modified = model.predict_proba(
X.assign(credit_given=X["credit_given"] * factor)
)[:, 1]
probs_change = (probs - probs_modified) * 100
probs_change = np.where(probs_change <= 0, np.nan, probs_change)
credit_change = np.array(np.round(X.credit_given * (1 - factor), 0))
cost_in_credit = np.divide(credit_change, probs_change)
all_probs_changes[factor] = probs_change
all_credit_amount_changes[factor] = credit_change
all_expected_costs_in_credit[factor] = cost_in_credit
return all_probs_changes, all_credit_amount_changes, all_expected_costs_in_credit
def order_effects_within_customers(
X,
credit_factors,
all_probs_changes,
all_credit_amount_changes,
all_expected_costs_in_credit,
):
all_processed_costs = []
all_processed_factors = []
all_processed_credit_changes = []
all_processed_probs_changes = []
for customer in range(len(X)):
costs = []
factors = []
credit_change = []
probs_change = []
for factor in credit_factors:
costs.append(all_expected_costs_in_credit[factor][customer])
factors.append(factor)
credit_change.append(all_credit_amount_changes[factor][customer])
probs_change.append(all_probs_changes[factor][customer])
sorted_costs = sorted(costs)
sorted_factors = [x for _, x in sorted(zip(costs, factors))]
sorted_credit_change = [x for _, x in sorted(zip(costs, credit_change))]
sorted_probs_change = [x for _, x in sorted(zip(costs, probs_change))]
# assign na to costs and credit change if factor of the next
# best change is not bigger than in the previous drop
smallest_factor = None
for i, factor in enumerate(sorted_factors):
if (not smallest_factor or factor < smallest_factor) and (
not np.isnan(sorted_costs[i])
):
smallest_factor = factor
else:
sorted_costs[i] = np.nan
# removing indices from list where costs is nan
sorted_factors = [
factor
for factor, cost in zip(sorted_factors, sorted_costs)
if not np.isnan(cost)
]
sorted_credit_change = [
factor
for factor, cost in zip(sorted_credit_change, sorted_costs)
if not np.isnan(cost)
]
sorted_probs_change = [
probs
for probs, cost in zip(sorted_probs_change, sorted_costs)
if not np.isnan(cost)
]
sorted_costs = [cost for cost in sorted_costs if not np.isnan(cost)]
if len(sorted_costs) > 1:
# change in probs is the current value minus previous except for
# the first one that stays the same
sorted_probs_change = [
current_change - previous_change
for current_change, previous_change in zip(
sorted_probs_change, [0] + sorted_probs_change[:-1]
)
]
# keeping only values where the default risk is actually lessened
sorted_credit_change = [
change
for change, probs in zip(sorted_credit_change, sorted_probs_change)
if probs > 0
]
sorted_factors = [
factor
for factor, probs in zip(sorted_factors, sorted_probs_change)
if probs > 0
]
sorted_probs_change = [probs for probs in sorted_probs_change if probs > 0]
# calculating the change in credit for each viable option
sorted_credit_change = [
current_change - previous_change
for current_change, previous_change in zip(
sorted_credit_change, [0] + sorted_credit_change[:-1]
)
]
# calculating the cost (percent default drop per dollar) for
# each viable option for credit limit drop
sorted_costs = [
credit_change / probs_change
for credit_change, probs_change in zip(
sorted_credit_change, sorted_probs_change
)
]
all_processed_costs.append(sorted_costs)
all_processed_factors.append(sorted_factors)
all_processed_credit_changes.append(sorted_credit_change)
all_processed_probs_changes.append(sorted_probs_change)
return (
all_processed_costs,
all_processed_factors,
all_processed_credit_changes,
all_processed_probs_changes,
)
def form_results_to_ordered_df(
y,
X,
probs,
all_processed_costs,
all_processed_factors,
all_processed_credit_changes,
all_processed_probs_changes,
):
costs_df = pd.DataFrame(
{
"defaulted": y,
"credit_given": X["credit_given"],
"prob": probs,
"factors": all_processed_factors,
"costs": all_processed_costs,
"credit_losses": all_processed_credit_changes,
"probs_changes": all_processed_probs_changes,
}
)
# unpacking the list of options and then sorting by it
# the within customer drops are already sorted (we start from the best one)
# the order within customers is in right order (smaller credit drops first)
factors = np.array(costs_df[["factors"]].explode("factors"))
costs = np.array(costs_df[["costs"]].explode("costs"))
credit_losses = np.array(costs_df[["credit_losses"]].explode("credit_losses"))
probs_changes = np.array(costs_df[["probs_changes"]].explode("probs_changes"))
# df to same size as exploded columns
costs_df = costs_df.explode("factors")
# overwriting old columns
costs_df = costs_df.assign(
factors=factors,
costs=costs,
credit_losses=credit_losses,
probs_changes=probs_changes,
)
costs_df.sort_values(by="costs", inplace=True)
first_instance_of_customer = ~costs_df.index.duplicated()
costs_df = costs_df.assign(first_instance_of_customer=first_instance_of_customer)
costs_df = costs_df.assign(
defaults_prevented_perc=(
costs_df["probs_changes"].cumsum()
/ costs_df["first_instance_of_customer"].sum()
),
credit_cost_perc=(
costs_df["credit_losses"].cumsum()
* 100
/ costs_df["credit_given"]
.multiply(costs_df["first_instance_of_customer"])
.sum()
),
customers_affected_perc=costs_df["first_instance_of_customer"].cumsum()
* 100
/ costs_df["first_instance_of_customer"].sum(),
defaulters_affected_perc=costs_df["first_instance_of_customer"]
.multiply(costs_df["defaulted"])
.cumsum()
* 100
/ costs_df["first_instance_of_customer"].multiply(costs_df["defaulted"]).sum(),
non_defaulters_affected_perc=costs_df["first_instance_of_customer"]
.multiply(costs_df["defaulted"].subtract(1).abs())
.cumsum()
* 100
/ costs_df["first_instance_of_customer"]
.multiply(costs_df["defaulted"].subtract(1).abs())
.sum(),
)
costs_df.reset_index(inplace=True)
return costs_df
| StarcoderdataPython |
1615990 | """
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from byceps.database import db
from byceps.services.authentication.password.dbmodels import (
Credential as DbCredential,
)
from byceps.services.authentication.password import service as password_service
from byceps.services.user import event_service
def test_update_password_hash(site_app, admin_user, make_user):
user = make_user('PasswordHashUpdater', password='<PASSWORD>')
admin_id = admin_user.id
user_id = user.id
password_hash_before = get_password_hash(user_id)
assert password_hash_before is not None
events_before = event_service.get_events_for_user(user_id)
assert len(events_before) == 0
# -------------------------------- #
password_service.update_password_hash(
user_id, '<PASSWORD>', admin_id
)
# -------------------------------- #
password_hash_after = get_password_hash(user_id)
assert password_hash_after is not None
assert password_hash_after != password_hash_before
events_after = event_service.get_events_for_user(user_id)
assert len(events_after) == 1
password_updated_event = events_after[0]
assert password_updated_event.event_type == 'password-updated'
assert password_updated_event.data == {
'initiator_id': str(admin_id),
}
# Clean up.
password_service.delete_password_hash(user_id)
# helpers
def get_password_hash(user_id):
credential = db.session.query(DbCredential).get(user_id)
return credential.password_hash
| StarcoderdataPython |
1741256 | append_file = open('output.txt', 'a')
append_file.write('Hello Atom!\n')
append_file.close()
| StarcoderdataPython |
58135 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utilities for source code tokenization."""
import abc
import keyword
import re
import tokenize
import typing
from typing import Any, Dict, List, Sequence, Text, Tuple, Union
from absl import logging
import six
from cubert import unified_tokenizer
# Quote string for special tokens. Must make the resulting string a valid Python
# token.
SPECIAL_QUOTE = '___'
def quote_special(content):
return '{q}{t}{q}'.format(q=SPECIAL_QUOTE, t=content)
ENDMARKER = 'ENDMARKER'
NEWLINE = quote_special('NEWLINE')
# After all splitting, the longest a token is of the following length.
MAX_OUTPUT_TOKEN_LENGTH = 15
@six.add_metaclass(abc.ABCMeta)
class Tokenizer(object):
"""A tokenizer that implements a language-agnostic tokenization.
The tokenizer implements a language-agnostic tokenization. This is available
as `tokenize_and_abstract()`.
"""
def __init__(self, max_output_token_length = MAX_OUTPUT_TOKEN_LENGTH,
reserved = ()):
self.types_to_skip = []
self.reserved = reserved
self.mappings = dict()
self.max_output_token_length = max_output_token_length
@abc.abstractmethod
def tokenize_and_abstract(
self,
source_code):
"""Produces a language-agnostic tokenization of the input code.
Args:
source_code: Source code stored in a string.
Returns:
A list of pairs of a token (string) and a token kind in the given source
code. It always includes an end of sequence token. That is, an empty
input always returns a list of size 1.
Raises:
ValueError: if `source_code` cannot be tokenized.
"""
@abc.abstractmethod
def untokenize_abstract(self, whole_tokens):
"""Applies language-specific rules to an abstract untokenized list.
Args:
whole_tokens: Abstract tokens, reconstituted and unsanitized by
`untokenize` before passed to this language-specific logic.
Returns:
A string representing the untokenized text.
"""
def update_types_to_skip(
self, types_to_skip):
"""Replaces the set of token types that are ignored.
Each tokenizer may provide different semantics with respect to this list,
and may ignore it altogether.
Args:
types_to_skip: List of types (from the constants in the `token` module) or
`unified_tokenizer.TokenKind`. Note that some of those constants are
actually defined in the `tokenize` module.
"""
self.types_to_skip = types_to_skip
def replace_reserved_keywords(self, reserved):
"""Replaces the reserved keywords with the supplied list of strings.
Each tokenizer may provide different semantics with respect to the list
of reserved keywords, or ignore them altogether.
Args:
reserved: List of strings.
"""
self.reserved = reserved # Replace the old one entirely.
def update_mappings(self, mappings):
"""Replaces the character mappings with the supplied dictionary.
The intent for character mappings is to enable tokenizers that support them
to sanitize dangerous characters, such as newline and carriage return,
with a nicer symbol.
Each tokenizer may provide different semantics with respect to the
mappings, or ignore them altogether.
Args:
mappings: Dictionary of original to sanitized strings. Keys are expected
to have length 1.
Raises:
ValueError: if a key has length different from 1.
"""
unified_tokenizer.check_mappings(mappings)
self.mappings = mappings
def condition_full_tokens(
self, agnostic
):
"""Applies reserved keywords and character sanitization."""
filtered = [(spelling, kind) for spelling, kind in agnostic
if kind not in self.types_to_skip]
# Now turn all reserved words, regardless of kind, into keywords.
with_reserved = [(spelling, unified_tokenizer.TokenKind.KEYWORD
if spelling in self.reserved else kind)
for spelling, kind in filtered]
return with_reserved
def subtokenize_full_tokens(
self, agnostic
):
"""Performs heuristic splitting of full tokens."""
subtoken_lists = unified_tokenizer.subtokenize_agnostic_tokens_in_place(
agnostic_tokens=agnostic,
max_output_token_length=self.max_output_token_length,
sanitization_mapping=self.mappings,
sentinel=unified_tokenizer.SENTINEL)
return subtoken_lists
def tokenize(self, source_code):
"""Tokenizes via `tokenize_and_abstract`."""
try:
agnostic = self.tokenize_and_abstract(source_code)
except Exception as e:
raise ValueError('While trying to do language-specific tokenization for '
'the string:\n\n\n%r\n\n\n%s\n\n\n'
'we received error %r.' % (source_code, source_code, e))
conditioned = self.condition_full_tokens(agnostic)
subtoken_lists = self.subtokenize_full_tokens(conditioned)
subtokens = unified_tokenizer.flatten_subtoken_lists(subtoken_lists)
return subtokens
def untokenize(self, token_list):
"""Untokenizes via `untokenize_abstract`."""
# Untokenize agnostic.
if (not token_list or token_list[-1] != quote_special(
unified_tokenizer.TokenKind.EOS.name)):
raise ValueError(
'Token list %r should end with the EOS token %r.' %
(token_list, quote_special(unified_tokenizer.TokenKind.EOS.name)))
whole_tokens = unified_tokenizer.reconstitute_full_unsanitary_tokens(
token_list,
sanitization_mapping=self.mappings,
sentinel=unified_tokenizer.SENTINEL)
return self.untokenize_abstract(whole_tokens)
def _token_from_token_type(token_type):
"""Turns a token type into a reserved token string."""
# We use the tok_name dict from tokenize, not token. The former has
# NL and COMMENT and such, whereas the latter doesn't.
return quote_special(tokenize.tok_name[token_type])
class CuBertTokenizer(Tokenizer):
"""Tokenizer that extracts Python's lexical elements preserving strings."""
_TOKEN_TYPE_MAP = {
tokenize.COMMENT: unified_tokenizer.TokenKind.COMMENT,
tokenize.DEDENT: unified_tokenizer.TokenKind.KEYWORD,
tokenize.ENDMARKER: unified_tokenizer.TokenKind.EOS,
tokenize.ERRORTOKEN: unified_tokenizer.TokenKind.ERROR,
tokenize.INDENT: unified_tokenizer.TokenKind.KEYWORD,
tokenize.NEWLINE: unified_tokenizer.TokenKind.NEWLINE,
tokenize.NL: unified_tokenizer.TokenKind.PUNCTUATION,
tokenize.NUMBER: unified_tokenizer.TokenKind.NUMBER,
tokenize.OP: unified_tokenizer.TokenKind.PUNCTUATION,
tokenize.STRING: unified_tokenizer.TokenKind.STRING,
}
_REVERSE_TOKEN_MAP = {
_token_from_token_type(tokenize.INDENT): tokenize.INDENT,
_token_from_token_type(tokenize.DEDENT): tokenize.DEDENT,
quote_special(
unified_tokenizer.TokenKind.EOS.name): tokenize.ENDMARKER,
quote_special(
unified_tokenizer.TokenKind.ERROR.name): tokenize.ERRORTOKEN,
quote_special(
unified_tokenizer.TokenKind.NEWLINE.name): tokenize.NEWLINE,
_token_from_token_type(tokenize.NL): tokenize.NL,
}
# Adding the end-of-string anchor \Z below, since re.fullmatch wasn't
# available in Python2.
_NUMBERS = re.compile('(' + tokenize.Number + r')\Z') # pytype: disable=module-attr
_SINGLE_STRINGS = re.compile('(' + tokenize.String + r')\Z') # pytype: disable=module-attr
_TRIPLE_STRING_BEGINNINGS = re.compile(tokenize.Triple) # pytype: disable=module-attr
_COMMENTS = re.compile('(' + tokenize.Comment + r')\Z') # pytype: disable=module-attr
_EXACT_TOKEN_TYPES = tokenize.EXACT_TOKEN_TYPES.keys() # pytype: disable=module-attr
# Token types that CubertTokenizer will tokenize by their type and not
# content.
_TOKEN_TYPES_TO_TOKENIZE_BY_TYPE = [
tokenize.NEWLINE, tokenize.DEDENT, tokenize.NL
]
def __init__(self, *args, **kwargs):
super(CuBertTokenizer, self).__init__(*args, **kwargs)
# By default, we drop COMMENT tokens.
self.update_types_to_skip([unified_tokenizer.TokenKind.COMMENT])
self.update_mappings({
# By default, replace \n and \r. We choose special names that are
# different from the Python token types (i.e., NL).
'\n':
quote_special('NLCHAR'),
'\r':
quote_special('CR'),
unified_tokenizer.SENTINEL:
quote_special(unified_tokenizer.SENTINEL_ESCAPE),
})
def tokenize_and_abstract(
self,
source_code):
"""Produces a language-agnostic tokenization of the input code."""
token_pairs = [] # type: List[Tuple[Text, int]]
try:
token_tuples = unified_tokenizer.code_to_tokens(source_code)
token_pairs = [(six.ensure_text(token_name), token_type)
for token_type, token_name, _, _, _ in token_tuples]
except (tokenize.TokenError, IndentationError) as e:
logging.warning('The tokenizer raised exception `%s` while parsing %s',
e, source_code)
token_pairs = [
(quote_special(unified_tokenizer.TokenKind.ERROR.name),
tokenize.ERRORTOKEN),
('',
tokenize.ENDMARKER),
]
agnostic_tokens = [] # type: List[Tuple[Text, unified_tokenizer.TokenKind]]
for spelling, kind in token_pairs:
adjusted_spelling = spelling
token_kind = unified_tokenizer.TokenKind.NONE
if kind == tokenize.NAME:
# Disambiguate identifiers from keywords.
if keyword.iskeyword(spelling):
token_kind = unified_tokenizer.TokenKind.KEYWORD
else:
token_kind = unified_tokenizer.TokenKind.IDENTIFIER
else:
if kind in CuBertTokenizer._TOKEN_TYPES_TO_TOKENIZE_BY_TYPE:
# Replace spelling with type.
adjusted_spelling = _token_from_token_type(kind)
elif kind is tokenize.INDENT:
# For INDENT, in particular, we also record the actual spelling too.
adjusted_spelling = '{indent}{spelling}'.format(
indent=_token_from_token_type(kind),
spelling=spelling)
elif kind == tokenize.ENDMARKER:
adjusted_spelling = quote_special(
unified_tokenizer.TokenKind.EOS.name)
# Map everything according to table.
try:
token_kind = CuBertTokenizer._TOKEN_TYPE_MAP[kind]
except KeyError as ke:
# It's possible we're here because of async/await. Those kept being
# turned into keywords and then removed from keywords, so we can't
# rely on knowing which they are. We'll check by spelling.
# See: https://bugs.python.org/issue30406
# and https://bugs.python.org/issue33260
# and https://bugs.python.org/issue35975
if spelling in ('async', 'await'):
token_kind = unified_tokenizer.TokenKind.KEYWORD
else:
raise ValueError('While trying to turn Python token %r into an '
'agnostic one, raised %r.' % ((spelling, kind),
ke))
agnostic_tokens.append((adjusted_spelling, token_kind))
return agnostic_tokens
def untokenize_abstract(self, whole_tokens):
# Reconstruct Python tokenizer tuples, so that Python's untokenize can be
# invoked.
token_tuples = [] # type: List[Tuple[int, Text]]
for whole_token in whole_tokens:
if whole_token in CuBertTokenizer._EXACT_TOKEN_TYPES:
token_tuples.append((tokenize.OP, whole_token))
elif _token_from_token_type(tokenize.INDENT) in whole_token:
# We baked the type and spelling into one token. Break them up.
spelling = whole_token.replace(
_token_from_token_type(tokenize.INDENT), '')
token_tuples.append((tokenize.INDENT, spelling))
elif whole_token in CuBertTokenizer._REVERSE_TOKEN_MAP:
python_kind = CuBertTokenizer._REVERSE_TOKEN_MAP[whole_token]
if python_kind in (tokenize.DEDENT, tokenize.ENDMARKER,
tokenize.ERRORTOKEN):
spelling = ''
else: # python_kind in (tokenize.NEWLINE, tokenize.NL)
spelling = '\n'
token_tuples.append((python_kind, spelling))
elif keyword.iskeyword(whole_token):
token_tuples.append((tokenize.NAME, whole_token))
elif CuBertTokenizer._NUMBERS.match(whole_token):
token_tuples.append((tokenize.NUMBER, whole_token))
elif CuBertTokenizer._SINGLE_STRINGS.match(whole_token):
token_tuples.append((tokenize.STRING, whole_token))
elif CuBertTokenizer._TRIPLE_STRING_BEGINNINGS.match(whole_token):
token_tuples.append((tokenize.STRING, whole_token))
elif CuBertTokenizer._COMMENTS.match(whole_token):
token_tuples.append((tokenize.COMMENT, whole_token))
else:
# Everything else we map back to NAME.
token_tuples.append((tokenize.NAME, whole_token))
reconstructed = tokenize.untokenize(typing.cast(Any, token_tuples))
return reconstructed
| StarcoderdataPython |
101386 | <gh_stars>10-100
import os
from ats.easypy import run
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', dest='device')
parser.add_argument('--nbr-count', dest='expected_nbr_count')
args, unknown = parser.parse_known_args()
pwd = os.path.dirname(__file__)
eigrp = os.path.join(pwd, 'EIGRP_TestCases.py')
run(testscript=eigrp, **vars(args))
| StarcoderdataPython |
3231310 | def fatorial(n):
return 1 if n == 1 else n * fatorial(n - 1)
print(fatorial(int(input())))
| StarcoderdataPython |
3270573 | # Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#print(np.shape(data))
#Code starts here
census=np.concatenate((data,new_record),axis=0)
##print(np.shape(census))
age=census[:,0]
#print(age)
max_age=age.max()
min_age=age.min()
age_mean=np.round(age.mean(),2)
age_std=np.round(np.std(age),2)
#print(max_age)
#print(min_age)
#print(age_mean)
#print(age_std)
race_0=census[:,2]
race_0=race_0[race_0<=0]
#print(race_0)
race_1=census[:,2]
race_1=race_1[race_1==1]
#print(race_1)
race_2=census[:,2]
race_2=race_2[race_2==2]
#print(race_2)
race_3=census[:,2]
race_3=race_3[race_3==3]
#print(race_3)
race_4=census[:,2]
race_4=race_4[race_4==4]
#print(race_4)
len_0=len(race_0)
print(len_0)
len_1=len(race_1)
print(len_1)
len_2=len(race_2)
print(len_2)
len_3=len(race_3)
print(len_3)
len_4=len(race_4)
print(len_4)
len_all=[len_0,len_1,len_2,len_3,len_4]
minority_race=len_all.index(min(len_all))
print(minority_race)
################
senior_citizens=census[census[:,0]>60]
#Calculating the sum of all the values of array
working_hours_sum=senior_citizens.sum(axis=0)[6]
print(working_hours_sum)
#Finding the length of the array
senior_citizens_len=len(senior_citizens)
#Finding the average working hours
avg_working_hours=working_hours_sum/senior_citizens_len
#Printing the average working hours
print((avg_working_hours))
#Creating an array based on 'education' column
high=census[census[:,1]>10]
#Finding the average pay
avg_pay_high=high[:,7].mean()
#Printing the average pay
print(avg_pay_high)
#Creating an array based on 'education' column
low=census[census[:,1]<=10]
#Finding the average pay
avg_pay_low=low[:,7].mean()
#Printing the average pay
print(avg_pay_low)
| StarcoderdataPython |
4832978 | <filename>elementary/python/ej11.py
print("""\
A program that computes (ecuation on OneNote)
Created By <NAME> <<EMAIL>>
""")
sumatoria = 0
for k in range(1, 10 ** 6):
sumatoria += ((-1) ** (k + 1)) / ((2 * k) - 1)
print('Resultado:', 4 * sumatoria)
print('Es pi jajajaja... mientras mas alto el final de la sumatoria, mas fiel el numero pi') | StarcoderdataPython |
1708412 | <filename>src/esamarathon/donation_parser/donation_parser.py
from bs4 import BeautifulSoup
import requests
import sys
import string
import datetime
import time
def main():
print("Hekathon 2021 donations")
donations = get_esamarathon_donations("hekathon", "hek21")
for donation in donations:
print(donation)
def get_esamarathon_donations(marathon_prefix, marathon_uri):
url = "https://{0}.esamarathon.com/donations/{1}".format(
marathon_prefix, marathon_uri
)
response = requests.get(url, timeout=5)
soup = BeautifulSoup(response.content, "html.parser")
return parse_donations(soup)
def parse_donations(soup):
donations = []
# donation = {
# "amount": raw_donation_amount,
# "author": raw_donation_author,
# "message": raw_donation_message,
# "timestamp": raw_donation_timestamp,
# }
# donations.append(donation)
donations_raw = list(
soup.body.findChildren("div", {"class": "container-fluid"}, recursive=False)[
0
].table.children
)[3::2]
for row in donations_raw:
fields = list(row.children)[1::2]
timestamp = fields[1].text.strip()
pattern = "%m/%d/%Y %H:%M:%S %z"
parsed_time = datetime.datetime.strptime(timestamp, pattern)
epoch = int(parsed_time.timestamp())
donation = {
"author": fields[0].text.strip(),
"timestamp": epoch,
"amount": fields[2].text.strip(),
# last field has Yes if there is a comment, and not the actual comment text??
}
donations.append(donation)
return donations
if __name__ == "__main__":
main()
| StarcoderdataPython |
76691 | from pal.transform.abstract_transform import AbstractTransform
from pal.logger import logger
class SpecialToUnderscore(AbstractTransform):
def __init__(self):
self.special_chars = " !@#$%^&*()[]{};:,./<>?\|`~-=+"
@property
def description(self):
d = "replacing special characters ({chars}) with \"_\"".format(
chars = self.special_chars
)
return d
def do_transform(self, reg):
new_name = reg.name.translate({ord(c): "_" for c in self.special_chars})
if new_name != reg.name:
logger.debug("Replaced special characters in register: {name} -> {new_name}".format(
name = reg.name,
new_name = new_name
))
reg.name = new_name
for fs in reg.fieldsets:
for f in fs.fields:
new_name = f.name.translate({ord(c): "_" for c in self.special_chars})
if new_name != f.name:
logger.debug("Replaced special characters in field: {name} -> {new_name}".format(
name = f.name,
new_name = new_name
))
f.name = new_name
return reg
| StarcoderdataPython |
3328077 | #Developed by Momalekiii
import moviepy.editor as mp
import speech_recognition as sr
def main():
clip = mp.VideoFileClip(r"video.mp4") # add video name here
clip.audio.write_audiofile(r"converted.wav")
recognizer = sr.Recognizer()
audio = sr.AudioFile("converted.wav")
with audio as source:
audio_file = recognizer.record(source)
result = recognizer.recognize_google(audio_file)
with open("recognized.txt", mode="w") as file:
file.write("Recognized Speech:")
file.write("\n")
file.write(result)
print("Done!")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3313827 | import pytest
from mock import Mock, patch
from AndroidRunner.BrowserFactory import BrowserFactory
from AndroidRunner.Browsers import Browser, Chrome, Firefox, Opera
class TestBrowsers(object):
@pytest.fixture()
def browser(self):
return Browser.Browser(None)
def test_get_browser_chrome(self):
assert isinstance(BrowserFactory.get_browser('chrome'), Chrome.Chrome.__class__)
def test_get_browser_opera(self):
assert isinstance(BrowserFactory.get_browser('opera'), Opera.Opera.__class__)
def test_get_browser_firefox(self):
assert isinstance(BrowserFactory.get_browser('firefox'), Firefox.Firefox.__class__)
def test_get_browser_fake(self):
with pytest.raises(Exception) as except_result:
BrowserFactory.get_browser("fake_browser")
assert "No Browser found" in str(except_result.value)
def test_browsers_to_string_browser(self, browser):
assert browser.to_string() == ''
def test_browsers_to_string_opera(self, browser):
assert BrowserFactory.get_browser('opera')(None).to_string() == 'com.opera.browser'
def test_browsers_to_string_chrome(self, browser):
assert BrowserFactory.get_browser('chrome')(None).to_string() == 'com.android.chrome'
def test_browsers_to_string_firefox(self, browser):
assert BrowserFactory.get_browser('firefox')(None).to_string() == 'org.mozilla.firefox'
@patch('AndroidRunner.Browsers.Browser.Browser.__init__')
def test_chrome_init(self, mock_browser):
mock_config = Mock()
chrome_browser = Chrome.Chrome(mock_config)
mock_browser.assert_called_once_with(mock_config)
chrome_browser.package_name = 'com.android.chrome'
chrome_browser.main_activity = 'com.google.android.apps.chrome.Main'
@patch('AndroidRunner.Browsers.Browser.Browser.__init__')
def test_firefox_init(self, mock_browser):
mock_config = Mock()
firefox_browser = Firefox.Firefox(mock_config)
mock_browser.assert_called_once_with(mock_config)
firefox_browser.package_name = 'org.mozilla.firefox'
firefox_browser.main_activity = 'org.mozilla.gecko.BrowserApp'
@patch('AndroidRunner.Browsers.Browser.Browser.__init__')
def test_opera_init(self, mock_browser):
mock_config = Mock()
opera_browser = Opera.Opera(mock_config)
mock_browser.assert_called_once_with(mock_config)
opera_browser.package_name = 'com.opera.browser'
opera_browser.main_activity = 'com.opera.Opera'
@patch('logging.Logger.info')
def test_start(self, mock_log, browser):
mock_package_name = Mock()
browser.package_name = mock_package_name
mock_main_activity = Mock()
browser.main_activity = mock_main_activity
mock_device = Mock()
mock_device.id = "fake_device"
browser.start(mock_device)
mock_log.assert_called_once_with('fake_device: Start')
mock_device.launch_activity.assert_called_once_with(mock_package_name, mock_main_activity, from_scratch=True,
force_stop=True,
action='android.intent.action.VIEW')
@patch('logging.Logger.info')
def test_load_url(self, mock_log, browser):
mock_package_name = Mock()
browser.package_name = mock_package_name
mock_main_activity = Mock()
browser.main_activity = mock_main_activity
mock_device = Mock()
mock_device.id = "fake_device"
browser.load_url(mock_device, 'test.url')
mock_log.assert_called_once_with('fake_device: Load URL: test.url')
mock_device.launch_activity.assert_called_once_with(mock_package_name, mock_main_activity, data_uri='test.url',
action='android.intent.action.VIEW')
@patch('logging.Logger.info')
def test_stop_clear_data(self, mock, browser):
mock_device = Mock()
mock_device.id = "fake_device"
browser.stop(mock_device, True)
mock.assert_called_once_with('fake_device: Stop')
mock_device.force_stop.assert_called_once_with("")
mock_device.clear_app_data.assert_called_once_with("")
assert mock_device.clear_app_data.call_count == 1
@patch('logging.Logger.info')
def test_stop(self, mock, browser):
mock_device = Mock()
mock_device.id = "fake_device"
browser.stop(mock_device, False)
mock_device.force_stop.assert_called_once_with("")
mock.assert_called_with('fake_device: Stop')
assert mock_device.clear_app_data.call_count == 0
| StarcoderdataPython |
78552 | <filename>metaheuristica/source_TSP/python/TSP.py
#!/usr/bin/python3
import pandas as pd
import numpy as np
from scipy.spatial.distance import pdist, squareform
def get_dist_tsp(fname='ar9152.tsp'):
"""
Returns the list with the positions of the cities.
Parameters
----------
fname -- filename with the TSP
"""
# Use pandas to get the positions
df = pd.read_table(fname, sep=' ', skiprows=7, skipfooter=1,
names=['x', 'y'], engine='python')
# Normalize positions
#df = (df-df.min())/df.max()
# Calculate distance
dist = squareform(pdist(df))
return dist
def fitness_better(solution, cost):
"""
Return the fitness evaluation. It is the medium implementation.
Parameters
----------
solution -- Vector of the cities.
cost -- distance matrix.
return -- the final fitness.
"""
solution_rot = np.roll(solution, -1)
return np.sum([cost[pair] for pair in zip(solution, solution_rot)])
def fitness(solution, cost):
"""
Return the fitness evaluation. It is the quicker implementation.
Time for 1000 cities: 1.5ms
Parameters
----------
solution -- Vector of the cities.
cost -- distance matrix.
return -- the final fitness.
"""
solution_rot = np.roll(solution, -1)
return np.sum(cost[solution, solution_rot])
def ncities(cost):
"""
Return the number of cities
"""
nc, _ = cost.shape
return nc
def fitness_slow(solution, cost):
"""
Return the fitness evaluation. It is the lower implementation
Time for 1000 cities: 17ms
Parameters
----------
solution -- Vector of the cities.
cost -- distance matrix.
return -- the final fitness
"""
sum = 0
num_ciudades = len(solution)
for i in range(num_ciudades-1):
c1 = solution[i]
c2 = solution[i+1]
sum += cost[c1,c2]
# Volver a la ciudad inicial
ciudad_final = solution[num_ciudades-1]
ciudad_origen = solution[0]
sum += cost[ciudad_final, ciudad_origen]
return sum
| StarcoderdataPython |
1775044 | # Copyright 2019-2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Any, Dict
from abc import ABC, abstractmethod
class Validator(ABC):
"""Base validator class.
All Validator classes must inherit from this one.
"""
@abstractmethod
def _validate(self, *args, **kwargs) -> bool:
pass
def __call__(self, *args, **kwargs):
"""Run validation."""
self._validate(*args, **kwargs)
class TypeValidator(Validator):
"""Validates a Field based on a primitive type."""
valid_type = None
def __init__(self, valid_type: type):
self.valid_type = valid_type or self.valid_type
if not self.valid_type:
raise ValueError("Not a valid type")
def _validate(self, value):
if not isinstance(value, self.valid_type):
raise ValueError("'%s' must be of type '%s'"
% (value, self.valid_type))
class DictValidator(Validator):
"""Validates a dictionary, applying `key` and `value` validators."""
key_validator = None
value_validator = None
def __init__(self,
key_validator: Validator = None,
value_validator: Validator = None):
self.key_validator = key_validator or self.key_validator
if not self.key_validator:
raise ValueError("Set an appropriate key_validator")
self.value_validator = value_validator or self.value_validator
if not self.value_validator:
raise ValueError("Set an appropriate value_validator")
def _validate(self, dictionary: Dict):
if not isinstance(dictionary, dict):
raise ValueError("Trying to use %s to validate a non dict object"
% self.__class__.__name__)
for k, v in dictionary.items():
self.key_validator(k)
self.value_validator(v)
return True
class RegexValidator(Validator):
"""Validates a string against a RegEx."""
regex = None
error_message = "Regex validation failed"
def __init__(self, regex: str = None, error_message: str = None):
self.regex = regex or self.regex
if not self.regex:
raise ValueError("Invalid 'regex' argument")
self.error_message = error_message or self.error_message
def _validate(self, value: str):
if not isinstance(value, str):
raise ValueError(
"%s cannot validate object of type %s. String expected."
% (self.__class__.__name__, str(type(value))))
if not re.match(self.regex, value):
raise ValueError("%s: '%s'. Must match regex '%s'"
% (self.error_message, value, self.regex))
return True
class EnumValidator(Validator):
"""Validates a value against a whitelist."""
enum = None
def __init__(self, enum: tuple = None):
self.enum = enum or self.enum
if not self.enum:
raise ValueError("Invalid 'enum' argument")
def _validate(self, value: Any):
if value not in self.enum:
raise ValueError("%s: Value %s is not allowed"
% (self.__class__.__name__, str(value)))
class K8sNameValidator(RegexValidator):
"""Validates K8s resource names."""
regex = r"^[a-z]([a-z0-9-]*[a-z0-9])?$"
error_message = "Not a valid K8s resource name"
class StepNameValidator(RegexValidator):
"""Validates the name of a pipeline step."""
regex = r"^[_a-z]([_a-z0-9]*)?$"
error_message = "Not a valid Step name"
class PipelineNameValidator(RegexValidator):
"""Validates the name of a pipeline."""
regex = r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
error_message = "Not a valid Pipeline name"
class K8sAnnotationKeyValidator(RegexValidator):
"""Validates the keys of a K8s annotations dictionary."""
_segment = "[a-zA-Z0-9]+([a-zA-Z0-9-_.]*[a-zA-Z0-9])?"
regex = r"^%s([/]%s)?$" % (_segment, _segment)
error_message = "Not a valid K8s annotation key"
class K8sAnnotationsValidator(DictValidator):
"""Validates a K8s annotations dictionary."""
key_validator = K8sAnnotationKeyValidator
value_validator = TypeValidator(str)
class K8sLimitKeyValidator(RegexValidator):
"""Validates the keys of a K8s limits dictionary."""
regex = r"[_a-z-\.\/]+"
error_message = "Not a valid K8s limit key"
class K8sLimitValueValidator(RegexValidator):
"""Validates the values of a K8s limits dictionary."""
regex = r"^[_a-zA-Z0-9\.]+$"
error_message = "Not a valid K8s limit value"
class K8sLimitsValidator(DictValidator):
"""Validates a K8s limits dictionary."""
key_validator = K8sLimitKeyValidator
value_validator = K8sLimitValueValidator
class K8sLabelKeyValidator(K8sAnnotationKeyValidator):
"""Validates the keys of a K8s labels dictionary."""
error_message = "Not a valid K8s label key"
class K8sLabelsValidator(DictValidator):
"""Validates a K8s labels dictionary."""
key_validator = K8sLabelKeyValidator
value_validator = TypeValidator(str)
class VolumeTypeValidator(EnumValidator):
"""Validates the type of a Volume."""
enum = ('pv', 'pvc', 'new_pvc', 'clone')
class VolumeAccessModeValidator(EnumValidator):
"""Validates the access mode of a Volume."""
enum = ("", "rom", "rwo", "rwm")
class IsLowerValidator(Validator):
"""Validates if a string is all lowercase."""
def _validate(self, value: str):
return value == value.lower()
class PositiveIntegerValidator(Validator):
"""Validates a Field to be a positive integer."""
def _validate(self, value):
if not isinstance(value, int):
raise ValueError("'%s' is not of type 'int'" % value)
if value <= 0:
raise ValueError("'%s' is not a positive integer" % value)
| StarcoderdataPython |
3205349 | <filename>Python/uif2MongoDB.py
import sys
from core.storage import Uif, MongoDB
if __name__ == '__main__':
argc = len(sys.argv)
if 4 == argc or 5 == argc:
storagePath = sys.argv[1]
dataBaseName = sys.argv[2]
tableName = sys.argv[3]
hostAndPort = sys.argv[4] if 5 == argc else None
updates = Uif.getUpdatesFromStorage(storagePath)
itemsCount = len(updates)
if 0 < itemsCount:
print('At \'{}\' found {} update objects'.format(storagePath,
itemsCount))
itemsCount = MongoDB.uif2MongoDB(updates, dataBaseName, tableName,
hostAndPort)
print('At table \'{}\' of database \'{}\' now {} items'.format(
tableName, dataBaseName, itemsCount))
else:
print('Not found update objects at {}'.format(storagePath))
else:
print('Using', sys.argv[0],
'<Folder or file with update info (*.uif)>',
'<db name> <table name> <host address and port>[optional]',
'if not set used mongodb://127.0.0.1:27017/')
| StarcoderdataPython |
68079 | <filename>src/utils/epoch_logger.py
from gensim.models.callbacks import CallbackAny2Vec
from datetime import datetime
class EpochLogger(CallbackAny2Vec):
"""
Callback to log information about training
Reference:
- https://colab.research.google.com/drive/1A4x2yNS3V1nDZFYoQavpoX7AEQ9Rqtve#scrollTo=m1An-k0q9PMr
"""
def __init__(self):
self.epoch = 0
def on_epoch_begin(self, model):
print(f'{datetime.now()}: Model training epoch #{self.epoch} began')
def on_epoch_end(self, model):
print(f'{datetime.now()}: Model training epoch #{self.epoch} ended')
self.epoch += 1
| StarcoderdataPython |
3242289 | """
Use this module members to connect to RabbitMQ instance.
"""
import asyncio
import logging
from fnmatch import fnmatch
from typing import AnyStr, Sequence, Optional
from uuid import uuid4
import aioamqp
from sunhead.events import exceptions
from sunhead.events.abc import AbstractTransport, AbstractSubscriber
from sunhead.events.types import Transferrable
from sunhead.serializers import JSONSerializer
logger = logging.getLogger(__name__)
# TODO: Better implementation of single connection, routing & internal pub/sub
# Note: Now it is possible to call ``consume_queue`` multiple times and get several
# bindings to routing keys and multiple queues. Think over this thing.
class AMQPClient(AbstractTransport):
"""
Handy implementation of the asynchronous AMQP client.
Useful for building both publishers and consumers.
"""
DEFAULT_EXCHANGE_NAME = "default_exchange"
DEFAULT_EXCHANGE_TYPE = "topic"
def __init__(
self,
connection_parameters: dict,
exchange_name: str = DEFAULT_EXCHANGE_NAME,
exchange_type: str = DEFAULT_EXCHANGE_TYPE,
global_qos: Optional[int] = None,
**kwargs):
"""
There must be at least these members of the connection_parameters dict::
"connection_parameters": {
"login": "",
"password": "",
"host": "",
"port": "",
"virtualhost": "",
},
:param connection_parameters: Dict with connection parameters. See above for its format.
:return: EventsQueueClient instance.
"""
# Can not pass empty password when connecting. Must remove the field completely.
if not connection_parameters.get("password", ""):
connection_parameters.pop("password", None)
self._connection_parameters = connection_parameters or {}
self._transport = None
self._protocol = None
self._channel = None
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._global_qos = global_qos
self._serializer = self._get_serializer()
self._is_connecting = False
self._connection_guid = str(uuid4())
self._known_queues = {}
self._routing = {}
def _get_serializer(self):
# TODO: Make serializer configurable here
return JSONSerializer()
@property
def connected(self):
return self._channel is not None and self._channel.is_open
@property
def is_connecting(self) -> bool:
return self._is_connecting
async def connect(self):
"""
Create new asynchronous connection to the RabbitMQ instance.
This will connect, declare exchange and bind itself to the configured queue.
After that, client is ready to publish or consume messages.
:return: Does not return anything.
"""
if self.connected or self.is_connecting:
return
self._is_connecting = True
try:
logger.info("Connecting to RabbitMQ...")
self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters)
logger.info("Getting channel...")
self._channel = await self._protocol.channel()
if self._global_qos is not None:
logger.info("Setting prefetch count on connection (%s)", self._global_qos)
await self._channel.basic_qos(0, self._global_qos, 1)
logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type)
await self._channel.exchange(self._exchange_name, self._exchange_type)
except (aioamqp.AmqpClosedConnection, Exception):
logger.error("Error initializing RabbitMQ connection", exc_info=True)
self._is_connecting = False
raise exceptions.StreamConnectionError
self._is_connecting = False
async def close(self):
self._protocol.stop()
await self._channel.close()
async def publish(self, data: Transferrable, topic: AnyStr) -> None:
if not self.connected:
logger.warning("Attempted to send message while not connected")
return
body = self._serializer.serialize(data)
await self._channel.publish(
body,
exchange_name=self._exchange_name,
routing_key=topic
)
# Uncomment for debugging
# logger.debug("Published message to AMQP exchange=%s, topic=%s", self._exchange_name, topic)
async def consume_queue(self, subscriber: AbstractSubscriber) -> None:
"""
Subscribe to the queue consuming.
:param subscriber:
:return:
"""
queue_name = subscriber.name
topics = subscriber.requested_topics
if queue_name in self._known_queues:
raise exceptions.ConsumerError("Queue '%s' already being consumed" % queue_name)
await self._declare_queue(queue_name)
# TODO: There is a lot of room to improvement here. Figure out routing done the right way
for key in topics:
self._routing.setdefault(key, set())
if subscriber in self._routing[key]:
logger.warning("Subscriber '%s' already receiving routing_key '%s'", subscriber, key)
break
await self._bind_key_to_queue(key, queue_name)
self._routing[key].add(subscriber)
logger.info("Consuming queue '%s'", queue_name)
await asyncio.wait_for(
self._channel.basic_consume(callback=self._on_message, queue_name=queue_name),
timeout=10
)
self._add_to_known_queue(queue_name)
async def _declare_queue(self, queue_name: AnyStr) -> None:
logger.info("Declaring queue...")
queue_declaration = await self._channel.queue_declare(queue_name)
queue_name = queue_declaration.get("queue")
logger.info("Declared queue '%s'", queue_name)
async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None:
"""
Bind to queue with specified routing key.
:param routing_key: Routing key to bind with.
:param queue_name: Name of the queue
:return: Does not return anything
"""
logger.info("Binding key='%s'", routing_key)
result = await self._channel.queue_bind(
exchange_name=self._exchange_name,
queue_name=queue_name,
routing_key=routing_key,
)
return result
async def _on_message(self, channel, body, envelope, properties) -> None:
"""
Fires up when message is received by this consumer.
:param channel: Channel, through which message is received
:param body: Body of the message (serialized).
:param envelope: Envelope object with message meta
:type envelope: aioamqp.Envelope
:param properties: Properties of the message
:return: Coroutine object with result of message handling operation
"""
subscribers = self._get_subscribers(envelope.routing_key)
if not subscribers:
logger.debug("No route for message with key '%s'", envelope.routing_key)
return
body = self._serializer.deserialize(body)
for subscriber in subscribers:
# Check later if ensure_future can be applied here
await subscriber.on_message(body, envelope.routing_key)
await self._channel.basic_client_ack(envelope.delivery_tag)
def _get_subscribers(self, incoming_routing_key: AnyStr) -> Sequence[AbstractSubscriber]:
for key, subscribers in self._routing.items():
if fnmatch(incoming_routing_key, key):
return subscribers
return tuple()
def _add_to_known_queue(self, queue_name: AnyStr) -> None:
self._known_queues[queue_name] = {
"bound_keys": set(),
}
| StarcoderdataPython |
3353029 | <reponame>diCagri/content
import demistomock as demisto
from CommonServerPython import *
# flake8: noqa: E501
class Client:
def __init__(self, params: Dict):
self.cs_client = CrowdStrikeClient(params)
def add_role_request(self, domain_mssprolerequestv1_resources):
data = assign_params(resources=domain_mssprolerequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'mssp/entities/mssp-roles/v1', json_data=data, headers=headers)
return response
def add_user_group_members_request(self, domain_usergroupmembersrequestv1_resources):
data = assign_params(resources=domain_usergroupmembersrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'mssp/entities/user-group-members/v1', json_data=data, headers=headers)
return response
def addcid_group_members_request(self, domain_cidgroupmembersrequestv1_resources):
data = assign_params(resources=domain_cidgroupmembersrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'mssp/entities/cid-group-members/v1', json_data=data, headers=headers)
return response
def aggregate_allow_list_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/allowlist/GET/v1', json_data=data, headers=headers)
return response
def aggregate_block_list_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/blocklist/GET/v1', json_data=data, headers=headers)
return response
def aggregate_detections_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/detects/GET/v1', json_data=data, headers=headers)
return response
def aggregate_device_count_collection_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/devicecount-collections/GET/v1', json_data=data, headers=headers)
return response
def aggregate_escalations_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/escalations/GET/v1', json_data=data, headers=headers)
return response
def aggregate_notificationsv1_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'recon/aggregates/notifications/GET/v1', json_data=data, headers=headers)
return response
def aggregate_remediations_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/remediations/GET/v1', json_data=data, headers=headers)
return response
def aggregateevents_request(self, fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing, fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=fwmgr_msa_aggregatequeryrequest_date_ranges, field=fwmgr_msa_aggregatequeryrequest_field, filter=fwmgr_msa_aggregatequeryrequest_filter, interval=fwmgr_msa_aggregatequeryrequest_interval, min_doc_count=fwmgr_msa_aggregatequeryrequest_min_doc_count, missing=fwmgr_msa_aggregatequeryrequest_missing, name=fwmgr_msa_aggregatequeryrequest_name,
q=fwmgr_msa_aggregatequeryrequest_q, ranges=fwmgr_msa_aggregatequeryrequest_ranges, size=fwmgr_msa_aggregatequeryrequest_size, sort=fwmgr_msa_aggregatequeryrequest_sort, sub_aggregates=fwmgr_msa_aggregatequeryrequest_sub_aggregates, time_zone=fwmgr_msa_aggregatequeryrequest_time_zone, type=fwmgr_msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'fwmgr/aggregates/events/GET/v1', json_data=data, headers=headers)
return response
def aggregatefc_incidents_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'falcon-complete-dashboards/aggregates/incidents/GET/v1', json_data=data, headers=headers)
return response
def aggregatepolicyrules_request(self, fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing, fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=fwmgr_msa_aggregatequeryrequest_date_ranges, field=fwmgr_msa_aggregatequeryrequest_field, filter=fwmgr_msa_aggregatequeryrequest_filter, interval=fwmgr_msa_aggregatequeryrequest_interval, min_doc_count=fwmgr_msa_aggregatequeryrequest_min_doc_count, missing=fwmgr_msa_aggregatequeryrequest_missing, name=fwmgr_msa_aggregatequeryrequest_name,
q=fwmgr_msa_aggregatequeryrequest_q, ranges=fwmgr_msa_aggregatequeryrequest_ranges, size=fwmgr_msa_aggregatequeryrequest_size, sort=fwmgr_msa_aggregatequeryrequest_sort, sub_aggregates=fwmgr_msa_aggregatequeryrequest_sub_aggregates, time_zone=fwmgr_msa_aggregatequeryrequest_time_zone, type=fwmgr_msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'fwmgr/aggregates/policy-rules/GET/v1', json_data=data, headers=headers)
return response
def aggregaterulegroups_request(self, fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing, fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=fwmgr_msa_aggregatequeryrequest_date_ranges, field=fwmgr_msa_aggregatequeryrequest_field, filter=fwmgr_msa_aggregatequeryrequest_filter, interval=fwmgr_msa_aggregatequeryrequest_interval, min_doc_count=fwmgr_msa_aggregatequeryrequest_min_doc_count, missing=fwmgr_msa_aggregatequeryrequest_missing, name=fwmgr_msa_aggregatequeryrequest_name,
q=fwmgr_msa_aggregatequeryrequest_q, ranges=fwmgr_msa_aggregatequeryrequest_ranges, size=fwmgr_msa_aggregatequeryrequest_size, sort=fwmgr_msa_aggregatequeryrequest_sort, sub_aggregates=fwmgr_msa_aggregatequeryrequest_sub_aggregates, time_zone=fwmgr_msa_aggregatequeryrequest_time_zone, type=fwmgr_msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'fwmgr/aggregates/rule-groups/GET/v1', json_data=data, headers=headers)
return response
def aggregaterules_request(self, fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing, fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=fwmgr_msa_aggregatequeryrequest_date_ranges, field=fwmgr_msa_aggregatequeryrequest_field, filter=fwmgr_msa_aggregatequeryrequest_filter, interval=fwmgr_msa_aggregatequeryrequest_interval, min_doc_count=fwmgr_msa_aggregatequeryrequest_min_doc_count, missing=fwmgr_msa_aggregatequeryrequest_missing, name=fwmgr_msa_aggregatequeryrequest_name,
q=fwmgr_msa_aggregatequeryrequest_q, ranges=fwmgr_msa_aggregatequeryrequest_ranges, size=fwmgr_msa_aggregatequeryrequest_size, sort=fwmgr_msa_aggregatequeryrequest_sort, sub_aggregates=fwmgr_msa_aggregatequeryrequest_sub_aggregates, time_zone=fwmgr_msa_aggregatequeryrequest_time_zone, type=fwmgr_msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'fwmgr/aggregates/rules/GET/v1', json_data=data, headers=headers)
return response
def aggregates_detections_global_counts_request(self, filter_):
params = assign_params(filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'overwatch-dashboards/aggregates/detections-global-counts/v1', params=params, headers=headers)
return response
def aggregates_events_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'overwatch-dashboards/aggregates/events/GET/v1', json_data=data, headers=headers)
return response
def aggregates_events_collections_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'overwatch-dashboards/aggregates/events-collections/GET/v1', json_data=data, headers=headers)
return response
def aggregates_incidents_global_counts_request(self, filter_):
params = assign_params(filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'overwatch-dashboards/aggregates/incidents-global-counts/v1', params=params, headers=headers)
return response
def aggregatesow_events_global_counts_request(self, filter_):
params = assign_params(filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'overwatch-dashboards/aggregates/ow-events-global-counts/v1', params=params, headers=headers)
return response
def apipreemptproxypostgraphql_request(self, ):
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'identity-protection/combined/graphql/v1', headers=headers)
return response
def auditeventsquery_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'installation-tokens/queries/audit-events/v1', params=params, headers=headers)
return response
def auditeventsread_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'installation-tokens/entities/audit-events/v1', params=params, headers=headers)
return response
def batch_active_responder_cmd_request(self, timeout, timeout_duration, domain_batchexecutecommandrequest_base_command, domain_batchexecutecommandrequest_batch_id, domain_batchexecutecommandrequest_command_string, domain_batchexecutecommandrequest_optional_hosts, domain_batchexecutecommandrequest_persist_all):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
data = assign_params(base_command=domain_batchexecutecommandrequest_base_command, batch_id=domain_batchexecutecommandrequest_batch_id,
command_string=domain_batchexecutecommandrequest_command_string, optional_hosts=domain_batchexecutecommandrequest_optional_hosts, persist_all=domain_batchexecutecommandrequest_persist_all)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/combined/batch-active-responder-command/v1', params=params, json_data=data, headers=headers)
return response
def batch_admin_cmd_request(self, timeout, timeout_duration, domain_batchexecutecommandrequest_base_command, domain_batchexecutecommandrequest_batch_id, domain_batchexecutecommandrequest_command_string, domain_batchexecutecommandrequest_optional_hosts, domain_batchexecutecommandrequest_persist_all):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
data = assign_params(base_command=domain_batchexecutecommandrequest_base_command, batch_id=domain_batchexecutecommandrequest_batch_id,
command_string=domain_batchexecutecommandrequest_command_string, optional_hosts=domain_batchexecutecommandrequest_optional_hosts, persist_all=domain_batchexecutecommandrequest_persist_all)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/combined/batch-admin-command/v1', params=params, json_data=data, headers=headers)
return response
def batch_cmd_request(self, timeout, timeout_duration, domain_batchexecutecommandrequest_base_command, domain_batchexecutecommandrequest_batch_id, domain_batchexecutecommandrequest_command_string, domain_batchexecutecommandrequest_optional_hosts, domain_batchexecutecommandrequest_persist_all):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
data = assign_params(base_command=domain_batchexecutecommandrequest_base_command, batch_id=domain_batchexecutecommandrequest_batch_id,
command_string=domain_batchexecutecommandrequest_command_string, optional_hosts=domain_batchexecutecommandrequest_optional_hosts, persist_all=domain_batchexecutecommandrequest_persist_all)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/combined/batch-command/v1', params=params, json_data=data, headers=headers)
return response
def batch_get_cmd_request(self, timeout, timeout_duration, domain_batchgetcommandrequest_batch_id, domain_batchgetcommandrequest_file_path, domain_batchgetcommandrequest_optional_hosts):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
data = assign_params(batch_id=domain_batchgetcommandrequest_batch_id,
file_path=domain_batchgetcommandrequest_file_path, optional_hosts=domain_batchgetcommandrequest_optional_hosts)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/combined/batch-get-command/v1', params=params, json_data=data, headers=headers)
return response
def batch_get_cmd_status_request(self, timeout, timeout_duration, batch_get_cmd_req_id):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration, batch_get_cmd_req_id=batch_get_cmd_req_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'real-time-response/combined/batch-get-command/v1', params=params, headers=headers)
return response
def batch_init_sessions_request(self, timeout, timeout_duration, domain_batchinitsessionrequest_existing_batch_id, domain_batchinitsessionrequest_host_ids, domain_batchinitsessionrequest_queue_offline):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
data = assign_params(existing_batch_id=domain_batchinitsessionrequest_existing_batch_id,
host_ids=domain_batchinitsessionrequest_host_ids, queue_offline=domain_batchinitsessionrequest_queue_offline)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/combined/batch-init-session/v1', params=params, json_data=data, headers=headers)
return response
def batch_refresh_sessions_request(self, timeout, timeout_duration, domain_batchrefreshsessionrequest_batch_id, domain_batchrefreshsessionrequest_hosts_to_remove):
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
data = assign_params(batch_id=domain_batchrefreshsessionrequest_batch_id,
hosts_to_remove=domain_batchrefreshsessionrequest_hosts_to_remove)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/combined/batch-refresh-session/v1', params=params, json_data=data, headers=headers)
return response
def create_actionsv1_request(self, domain_registeractionsrequest_actions, domain_registeractionsrequest_rule_id):
data = assign_params(actions=domain_registeractionsrequest_actions, rule_id=domain_registeractionsrequest_rule_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'recon/entities/actions/v1', json_data=data, headers=headers)
return response
def create_device_control_policies_request(self, requests_createdevicecontrolpoliciesv1_resources):
data = assign_params(resources=requests_createdevicecontrolpoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/device-control/v1', json_data=data, headers=headers)
return response
def create_firewall_policies_request(self, requests_createfirewallpoliciesv1_resources, clone_id):
params = assign_params(clone_id=clone_id)
data = assign_params(resources=requests_createfirewallpoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/firewall/v1',
params=params, json_data=data, headers=headers)
return response
def create_host_groups_request(self, requests_creategroupsv1_resources):
data = assign_params(resources=requests_creategroupsv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'devices/entities/host-groups/v1', json_data=data, headers=headers)
return response
def create_or_updateaws_settings_request(self, models_modifyawscustomersettingsv1_resources):
data = assign_params(resources=models_modifyawscustomersettingsv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'cloud-connect-aws/entities/settings/v1', json_data=data, headers=headers)
return response
def create_prevention_policies_request(self, requests_createpreventionpoliciesv1_resources):
data = assign_params(resources=requests_createpreventionpoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/prevention/v1', json_data=data, headers=headers)
return response
def create_rulesv1_request(self, sadomain_createrulerequestv1_filter, sadomain_createrulerequestv1_name, sadomain_createrulerequestv1_permissions, sadomain_createrulerequestv1_priority, sadomain_createrulerequestv1_topic):
data = assign_params(filter=sadomain_createrulerequestv1_filter, name=sadomain_createrulerequestv1_name,
permissions=sadomain_createrulerequestv1_permissions, priority=sadomain_createrulerequestv1_priority, topic=sadomain_createrulerequestv1_topic)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'recon/entities/rules/v1', json_data=data, headers=headers)
return response
def create_sensor_update_policies_request(self, requests_createsensorupdatepoliciesv1_resources):
data = assign_params(resources=requests_createsensorupdatepoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/sensor-update/v1', json_data=data, headers=headers)
return response
def create_sensor_update_policiesv2_request(self, requests_createsensorupdatepoliciesv2_resources):
data = assign_params(resources=requests_createsensorupdatepoliciesv2_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/sensor-update/v2', json_data=data, headers=headers)
return response
def create_user_request(self, domain_usercreaterequest_firstname, domain_usercreaterequest_lastname, domain_usercreaterequest_password, domain_usercreaterequest_uid):
data = assign_params(firstName=domain_usercreaterequest_firstname, lastName=domain_usercreaterequest_lastname,
password=<PASSWORD>_usercreaterequest_password, uid=domain_usercreaterequest_uid)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'users/entities/users/v1', json_data=data, headers=headers)
return response
def create_user_groups_request(self, domain_usergroupsrequestv1_resources):
data = assign_params(resources=domain_usergroupsrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'mssp/entities/user-groups/v1', json_data=data, headers=headers)
return response
def createaws_account_request(self, k8sreg_createawsaccreq_resources):
data = assign_params(resources=k8sreg_createawsaccreq_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'kubernetes-protection/entities/accounts/aws/v1', json_data=data, headers=headers)
return response
def createcid_groups_request(self, domain_cidgroupsrequestv1_resources):
data = assign_params(resources=domain_cidgroupsrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'mssp/entities/cid-groups/v1', json_data=data, headers=headers)
return response
def createcspm_aws_account_request(self, registration_awsaccountcreaterequestextv2_resources):
data = assign_params(resources=registration_awsaccountcreaterequestextv2_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'cloud-connect-cspm-aws/entities/account/v1', json_data=data, headers=headers)
return response
def createcspmgcp_account_request(self, registration_gcpaccountcreaterequestextv1_resources):
data = assign_params(resources=registration_gcpaccountcreaterequestextv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'cloud-connect-gcp/entities/account/v1', json_data=data, headers=headers)
return response
def createioc_request(self, api_iocviewrecord_batch_id, api_iocviewrecord_created_by, api_iocviewrecord_created_timestamp, api_iocviewrecord_description, api_iocviewrecord_expiration_days, api_iocviewrecord_expiration_timestamp, api_iocviewrecord_modified_by, api_iocviewrecord_modified_timestamp, api_iocviewrecord_policy, api_iocviewrecord_share_level, api_iocviewrecord_source, api_iocviewrecord_type, api_iocviewrecord_value):
data = assign_params(batch_id=api_iocviewrecord_batch_id, created_by=api_iocviewrecord_created_by, created_timestamp=api_iocviewrecord_created_timestamp, description=api_iocviewrecord_description, expiration_days=api_iocviewrecord_expiration_days, expiration_timestamp=api_iocviewrecord_expiration_timestamp,
modified_by=api_iocviewrecord_modified_by, modified_timestamp=api_iocviewrecord_modified_timestamp, policy=api_iocviewrecord_policy, share_level=api_iocviewrecord_share_level, source=api_iocviewrecord_source, type=api_iocviewrecord_type, value=api_iocviewrecord_value)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'indicators/entities/iocs/v1', json_data=data, headers=headers)
return response
def createml_exclusionsv1_request(self, requests_mlexclusioncreatereqv1_comment, requests_mlexclusioncreatereqv1_excluded_from, requests_mlexclusioncreatereqv1_groups, requests_mlexclusioncreatereqv1_value):
data = assign_params(comment=requests_mlexclusioncreatereqv1_comment, excluded_from=requests_mlexclusioncreatereqv1_excluded_from,
groups=requests_mlexclusioncreatereqv1_groups, value=requests_mlexclusioncreatereqv1_value)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/ml-exclusions/v1', json_data=data, headers=headers)
return response
def creatert_response_policies_request(self, requests_creatertresponsepoliciesv1_resources):
data = assign_params(resources=requests_creatertresponsepoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/response/v1', json_data=data, headers=headers)
return response
def createrule_request(self, api_rulecreatev1_comment, api_rulecreatev1_description, api_rulecreatev1_disposition_id, api_rulecreatev1_field_values, api_rulecreatev1_name, api_rulecreatev1_pattern_severity, api_rulecreatev1_rulegroup_id, api_rulecreatev1_ruletype_id):
data = assign_params(comment=api_rulecreatev1_comment, description=api_rulecreatev1_description, disposition_id=api_rulecreatev1_disposition_id, field_values=api_rulecreatev1_field_values,
name=api_rulecreatev1_name, pattern_severity=api_rulecreatev1_pattern_severity, rulegroup_id=api_rulecreatev1_rulegroup_id, ruletype_id=api_rulecreatev1_ruletype_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'ioarules/entities/rules/v1', json_data=data, headers=headers)
return response
def createrulegroup_request(self, clone_id, li_ary, comment, fwmgr_api_rulegroupcreaterequestv1_description, fwmgr_api_rulegroupcreaterequestv1_enabled, fwmgr_api_rulegroupcreaterequestv1_name, fwmgr_api_rulegroupcreaterequestv1_rules):
params = assign_params(clone_id=clone_id, li_ary=li_ary, comment=comment)
data = assign_params(description=fwmgr_api_rulegroupcreaterequestv1_description, enabled=fwmgr_api_rulegroupcreaterequestv1_enabled,
name=fwmgr_api_rulegroupcreaterequestv1_name, rules=fwmgr_api_rulegroupcreaterequestv1_rules)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'fwmgr/entities/rule-groups/v1',
params=params, json_data=data, headers=headers)
return response
def createrulegroup_mixin0_request(self, api_rulegroupcreaterequestv1_comment, api_rulegroupcreaterequestv1_description, api_rulegroupcreaterequestv1_name, api_rulegroupcreaterequestv1_platform):
data = assign_params(comment=api_rulegroupcreaterequestv1_comment, description=api_rulegroupcreaterequestv1_description,
name=api_rulegroupcreaterequestv1_name, platform=api_rulegroupcreaterequestv1_platform)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'ioarules/entities/rule-groups/v1', json_data=data, headers=headers)
return response
def createsv_exclusionsv1_request(self, requests_svexclusioncreatereqv1_comment, requests_svexclusioncreatereqv1_groups, requests_svexclusioncreatereqv1_value):
data = assign_params(comment=requests_svexclusioncreatereqv1_comment,
groups=requests_svexclusioncreatereqv1_groups, value=requests_svexclusioncreatereqv1_value)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/sv-exclusions/v1', json_data=data, headers=headers)
return response
def crowd_score_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'incidents/combined/crowdscores/v1', params=params, headers=headers)
return response
def customersettingsread_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'installation-tokens/entities/customer-settings/v1', headers=headers)
return response
def delete_actionv1_request(self, id_):
params = assign_params(id=id_)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'recon/entities/actions/v1', params=params, headers=headers)
return response
def delete_device_control_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/device-control/v1', params=params, headers=headers)
return response
def delete_firewall_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/firewall/v1', params=params, headers=headers)
return response
def delete_host_groups_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'devices/entities/host-groups/v1', params=params, headers=headers)
return response
def delete_notificationsv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'recon/entities/notifications/v1', params=params, headers=headers)
return response
def delete_prevention_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/prevention/v1', params=params, headers=headers)
return response
def delete_report_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'falconx/entities/reports/v1', params=params, headers=headers)
return response
def delete_rulesv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'recon/entities/rules/v1', params=params, headers=headers)
return response
def delete_samplev2_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'samples/entities/samples/v2', params=params, headers=headers)
return response
def delete_samplev3_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'samples/entities/samples/v3', params=params, headers=headers)
return response
def delete_sensor_update_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/sensor-update/v1', params=params, headers=headers)
return response
def delete_sensor_visibility_exclusionsv1_request(self, ids, comment):
params = assign_params(ids=ids, comment=comment)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/sv-exclusions/v1', params=params, headers=headers)
return response
def delete_user_request(self, user_uuid):
params = assign_params(user_uuid=user_uuid)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'users/entities/users/v1', params=params, headers=headers)
return response
def delete_user_group_members_request(self, domain_usergroupmembersrequestv1_resources):
data = assign_params(resources=domain_usergroupmembersrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'mssp/entities/user-group-members/v1', json_data=data, headers=headers)
return response
def delete_user_groups_request(self, user_group_ids):
params = assign_params(user_group_ids=user_group_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'mssp/entities/user-groups/v1', params=params, headers=headers)
return response
def deleteaws_accounts_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'cloud-connect-aws/entities/accounts/v1', params=params, headers=headers)
return response
def deleteaws_accounts_mixin0_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'delete', 'kubernetes-protection/entities/accounts/aws/v1', params=params, headers=headers)
return response
def deletecid_group_members_request(self, domain_cidgroupmembersrequestv1_resources):
data = assign_params(resources=domain_cidgroupmembersrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'mssp/entities/cid-group-members/v1', json_data=data, headers=headers)
return response
def deletecid_groups_request(self, cid_group_ids):
params = assign_params(cid_group_ids=cid_group_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'mssp/entities/cid-groups/v1', params=params, headers=headers)
return response
def deletecspm_aws_account_request(self, ids, organization_ids):
params = assign_params(ids=ids, organization_ids=organization_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'delete', 'cloud-connect-cspm-aws/entities/account/v1', params=params, headers=headers)
return response
def deletecspm_azure_account_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'delete', 'cloud-connect-cspm-azure/entities/account/v1', params=params, headers=headers)
return response
def deleted_roles_request(self, domain_mssprolerequestv1_resources):
data = assign_params(resources=domain_mssprolerequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'mssp/entities/mssp-roles/v1', json_data=data, headers=headers)
return response
def deleteioa_exclusionsv1_request(self, ids, comment):
params = assign_params(ids=ids, comment=comment)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/ioa-exclusions/v1', params=params, headers=headers)
return response
def deleteioc_request(self, type_, value):
params = assign_params(type=type_, value=value)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'indicators/entities/iocs/v1', params=params, headers=headers)
return response
def deleteml_exclusionsv1_request(self, ids, comment):
params = assign_params(ids=ids, comment=comment)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/ml-exclusions/v1', params=params, headers=headers)
return response
def deletert_response_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'policy/entities/response/v1', params=params, headers=headers)
return response
def deleterulegroups_request(self, ids, comment):
params = assign_params(ids=ids, comment=comment)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'fwmgr/entities/rule-groups/v1', params=params, headers=headers)
return response
def deleterulegroups_mixin0_request(self, comment, ids):
params = assign_params(comment=comment, ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'ioarules/entities/rule-groups/v1', params=params, headers=headers)
return response
def deleterules_request(self, rule_group_id, comment, ids):
params = assign_params(rule_group_id=rule_group_id, comment=comment, ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'ioarules/entities/rules/v1', params=params, headers=headers)
return response
def devices_count_request(self, type_, value):
params = assign_params(type=type_, value=value)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'indicators/aggregates/devices-count/v1', params=params, headers=headers)
return response
def devices_ran_on_request(self, type_, value, limit, offset):
params = assign_params(type=type_, value=value, limit=limit, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'indicators/queries/devices/v1', params=params, headers=headers)
return response
def download_sensor_installer_by_id_request(self, id_):
params = assign_params(id=id_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'sensors/entities/download-installer/v1', params=params, headers=headers)
return response
def entitiesprocesses_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'processes/entities/processes/v1', params=params, headers=headers)
return response
def get_actionsv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/entities/actions/v1', params=params, headers=headers)
return response
def get_aggregate_detects_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'detects/aggregates/detects/GET/v1', json_data=data, headers=headers)
return response
def get_artifacts_request(self, id_, name):
params = assign_params(id=id_, name=name)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'falconx/entities/artifacts/v1', params=params, headers=headers)
return response
def get_assessmentv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'zero-trust-assessment/entities/assessments/v1', params=params, headers=headers)
return response
def get_available_role_ids_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'user-roles/queries/user-role-ids-by-cid/v1', headers=headers)
return response
def get_behaviors_request(self, msa_idsrequest_ids):
data = assign_params(ids=msa_idsrequest_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'incidents/entities/behaviors/GET/v1', json_data=data, headers=headers)
return response
def get_children_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/entities/children/v1', params=params, headers=headers)
return response
def get_cloudconnectazure_entities_account_v1_request(self, ids, scan_type):
params = assign_params(ids=ids, scan_type=scan_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-azure/entities/account/v1', params=params, headers=headers)
return response
def get_cloudconnectazure_entities_userscriptsdownload_v1_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-azure/entities/user-scripts-download/v1', headers=headers)
return response
def get_cloudconnectcspmazure_entities_account_v1_request(self, ids, scan_type, status, limit, offset):
params = assign_params(ids=ids, scan_type=scan_type, status=status, limit=limit, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'cloud-connect-cspm-azure/entities/account/v1', params=params, headers=headers)
return response
def get_cloudconnectcspmazure_entities_userscriptsdownload_v1_request(self, tenant_id):
params = assign_params(tenant_id=tenant_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'cloud-connect-cspm-azure/entities/user-scripts-download/v1', params=params, headers=headers)
return response
def get_clusters_request(self, cluster_names, account_ids, locations, cluster_service, limit, offset):
params = assign_params(cluster_names=cluster_names, account_ids=account_ids, locations=locations,
cluster_service=cluster_service, limit=limit, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'kubernetes-protection/entities/kubernetes/clusters/v1', params=params, headers=headers)
return response
def get_combined_sensor_installers_by_query_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'sensors/combined/installers/v1', params=params, headers=headers)
return response
def get_detect_summaries_request(self, msa_idsrequest_ids):
data = assign_params(ids=msa_idsrequest_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'detects/entities/summaries/GET/v1', json_data=data, headers=headers)
return response
def get_device_control_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/device-control/v1', params=params, headers=headers)
return response
def get_device_count_collection_queries_by_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/devicecount-collections/v1', params=params, headers=headers)
return response
def get_device_details_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/entities/devices/v1', params=params, headers=headers)
return response
def get_firewall_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/firewall/v1', params=params, headers=headers)
return response
def get_helm_values_yaml_request(self, cluster_name):
params = assign_params(cluster_name=cluster_name)
headers = self.cs_client._headers
headers['Accept'] = 'application/yaml'
response = self.cs_client.http_request(
'get', 'kubernetes-protection/entities/integration/agent/v1', params=params, headers=headers)
return response
def get_host_groups_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/entities/host-groups/v1', params=params, headers=headers)
return response
def get_incidents_request(self, msa_idsrequest_ids):
data = assign_params(ids=msa_idsrequest_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'incidents/entities/incidents/GET/v1', json_data=data, headers=headers)
return response
def get_intel_actor_entities_request(self, ids, fields):
params = assign_params(ids=ids, fields=fields)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/entities/actors/v1', params=params, headers=headers)
return response
def get_intel_indicator_entities_request(self, msa_idsrequest_ids):
data = assign_params(ids=msa_idsrequest_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'intel/entities/indicators/GET/v1', json_data=data, headers=headers)
return response
def get_intel_report_entities_request(self, ids, fields):
params = assign_params(ids=ids, fields=fields)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/entities/reports/v1', params=params, headers=headers)
return response
def get_intel_reportpdf_request(self, id_):
params = assign_params(id=id_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/entities/report-files/v1', params=params, headers=headers)
return response
def get_intel_rule_entities_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/entities/rules/v1', params=params, headers=headers)
return response
def get_intel_rule_file_request(self, id_, format):
params = assign_params(id=id_, format=format)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/entities/rules-files/v1', params=params, headers=headers)
return response
def get_latest_intel_rule_file_request(self, type_, format):
params = assign_params(type=type_, format=format)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/entities/rules-latest-files/v1', params=params, headers=headers)
return response
def get_locations_request(self, clouds):
params = assign_params(clouds=clouds)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'kubernetes-protection/entities/cloud-locations/v1', params=params, headers=headers)
return response
def get_mal_query_downloadv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'malquery/entities/download-files/v1', params=params, headers=headers)
return response
def get_mal_query_entities_samples_fetchv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'malquery/entities/samples-fetch/v1', params=params, headers=headers)
return response
def get_mal_query_metadatav1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'malquery/entities/metadata/v1', params=params, headers=headers)
return response
def get_mal_query_quotasv1_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'malquery/aggregates/quotas/v1', headers=headers)
return response
def get_mal_query_requestv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'malquery/entities/requests/v1', params=params, headers=headers)
return response
def get_notifications_detailed_translatedv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'recon/entities/notifications-detailed-translated/v1', params=params, headers=headers)
return response
def get_notifications_detailedv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/entities/notifications-detailed/v1', params=params, headers=headers)
return response
def get_notifications_translatedv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'recon/entities/notifications-translated/v1', params=params, headers=headers)
return response
def get_notificationsv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/entities/notifications/v1', params=params, headers=headers)
return response
def get_prevention_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/prevention/v1', params=params, headers=headers)
return response
def get_reports_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'falconx/entities/reports/v1', params=params, headers=headers)
return response
def get_roles_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'user-roles/entities/user-roles/v1', params=params, headers=headers)
return response
def get_roles_byid_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/entities/mssp-roles/v1', params=params, headers=headers)
return response
def get_rulesv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/entities/rules/v1', params=params, headers=headers)
return response
def get_samplev2_request(self, ids, password_protected):
params = assign_params(ids=ids, password_protected=password_protected)
headers = self.cs_client._headers
headers['Accept'] = 'application/octet-stream'
response = self.cs_client.http_request('get', 'samples/entities/samples/v2', params=params, headers=headers)
return response
def get_samplev3_request(self, ids, password_protected):
params = assign_params(ids=ids, password_protected=password_protected)
headers = self.cs_client._headers
headers['Accept'] = 'application/octet-stream'
response = self.cs_client.http_request('get', 'samples/entities/samples/v3', params=params, headers=headers)
return response
def get_scans_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'scanner/entities/scans/v1', params=params, headers=headers)
return response
def get_scans_aggregates_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'scanner/aggregates/scans/GET/v1', json_data=data, headers=headers)
return response
def get_sensor_installers_by_query_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'sensors/queries/installers/v1', params=params, headers=headers)
return response
def get_sensor_installers_entities_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'sensors/entities/installers/v1', params=params, headers=headers)
return response
def get_sensor_installersccid_by_query_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'sensors/queries/installers/ccid/v1', headers=headers)
return response
def get_sensor_update_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/sensor-update/v1', params=params, headers=headers)
return response
def get_sensor_update_policiesv2_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/sensor-update/v2', params=params, headers=headers)
return response
def get_sensor_visibility_exclusionsv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/sv-exclusions/v1', params=params, headers=headers)
return response
def get_submissions_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'falconx/entities/submissions/v1', params=params, headers=headers)
return response
def get_summary_reports_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'falconx/entities/report-summaries/v1', params=params, headers=headers)
return response
def get_user_group_members_byid_request(self, user_group_ids):
params = assign_params(user_group_ids=user_group_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/entities/user-group-members/v1', params=params, headers=headers)
return response
def get_user_groups_byid_request(self, user_group_ids):
params = assign_params(user_group_ids=user_group_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/entities/user-groups/v1', params=params, headers=headers)
return response
def get_user_role_ids_request(self, user_uuid):
params = assign_params(user_uuid=user_uuid)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'user-roles/queries/user-role-ids-by-user-uuid/v1', params=params, headers=headers)
return response
def get_vulnerabilities_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'spotlight/entities/vulnerabilities/v2', params=params, headers=headers)
return response
def getaws_accounts_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-aws/entities/accounts/v1', params=params, headers=headers)
return response
def getaws_accounts_mixin0_request(self, ids, status, limit, offset):
params = assign_params(ids=ids, status=status, limit=limit, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'kubernetes-protection/entities/accounts/aws/v1', params=params, headers=headers)
return response
def getaws_settings_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-aws/combined/settings/v1', headers=headers)
return response
def getcid_group_by_id_request(self, cid_group_ids):
params = assign_params(cid_group_ids=cid_group_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/entities/cid-groups/v1', params=params, headers=headers)
return response
def getcid_group_members_by_request(self, cid_group_ids):
params = assign_params(cid_group_ids=cid_group_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/entities/cid-group-members/v1', params=params, headers=headers)
return response
def getcspm_aws_account_request(self, scan_type, ids, organization_ids, status, limit, offset, group_by):
params = assign_params(scan_type=scan_type, ids=ids, organization_ids=organization_ids,
status=status, limit=limit, offset=offset, group_by=group_by)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'cloud-connect-cspm-aws/entities/account/v1', params=params, headers=headers)
return response
def getcspm_aws_account_scripts_attachment_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-cspm-aws/entities/user-scripts-download/v1', headers=headers)
return response
def getcspm_aws_console_setupur_ls_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-cspm-aws/entities/console-setup-urls/v1', headers=headers)
return response
def getcspm_azure_user_scripts_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-azure/entities/user-scripts/v1', headers=headers)
return response
def getcspm_policy_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'settings/entities/policy-details/v1', params=params, headers=headers)
return response
def getcspm_policy_settings_request(self, service, policy_id, cloud_platform):
params = assign_params(service=service, policy_id=policy_id, cloud_platform=cloud_platform)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'settings/entities/policy/v1', params=params, headers=headers)
return response
def getcspm_scan_schedule_request(self, cloud_platform):
params = assign_params(cloud_platform=cloud_platform)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'settings/scan-schedule/v1', params=params, headers=headers)
return response
def getcspmcgp_account_request(self, scan_type, ids):
params = assign_params(scan_type=scan_type, ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-gcp/entities/account/v1', params=params, headers=headers)
return response
def getcspmgcp_user_scripts_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-gcp/entities/user-scripts/v1', headers=headers)
return response
def getcspmgcp_user_scripts_attachment_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-gcp/entities/user-scripts-download/v1', headers=headers)
return response
def getevents_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/entities/events/v1', params=params, headers=headers)
return response
def getfirewallfields_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/entities/firewall-fields/v1', params=params, headers=headers)
return response
def getioa_events_request(self, policy_id, cloud_provider, account_id, azure_tenant_id, user_ids, offset, limit):
params = assign_params(policy_id=policy_id, cloud_provider=cloud_provider, account_id=account_id,
azure_tenant_id=azure_tenant_id, user_ids=user_ids, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioa/entities/events/v1', params=params, headers=headers)
return response
def getioa_exclusionsv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/ioa-exclusions/v1', params=params, headers=headers)
return response
def getioa_users_request(self, policy_id, cloud_provider, account_id, azure_tenant_id):
params = assign_params(policy_id=policy_id, cloud_provider=cloud_provider,
account_id=account_id, azure_tenant_id=azure_tenant_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioa/entities/users/v1', params=params, headers=headers)
return response
def getioc_request(self, type_, value):
params = assign_params(type=type_, value=value)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'indicators/entities/iocs/v1', params=params, headers=headers)
return response
def getml_exclusionsv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/ml-exclusions/v1', params=params, headers=headers)
return response
def getpatterns_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/entities/pattern-severities/v1', params=params, headers=headers)
return response
def getplatforms_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/entities/platforms/v1', params=params, headers=headers)
return response
def getplatforms_mixin0_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/entities/platforms/v1', params=params, headers=headers)
return response
def getpolicycontainers_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/entities/policies/v1', params=params, headers=headers)
return response
def getrt_response_policies_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/entities/response/v1', params=params, headers=headers)
return response
def getrulegroups_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/entities/rule-groups/v1', params=params, headers=headers)
return response
def getrulegroups_mixin0_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/entities/rule-groups/v1', params=params, headers=headers)
return response
def getrules_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/entities/rules/v1', params=params, headers=headers)
return response
def getrules_mixin0_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/entities/rules/v1', params=params, headers=headers)
return response
def getrulesget_request(self, api_rulesgetrequestv1_ids):
data = assign_params(ids=api_rulesgetrequestv1_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'ioarules/entities/rules/GET/v1', json_data=data, headers=headers)
return response
def getruletypes_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/entities/rule-types/v1', params=params, headers=headers)
return response
def grant_user_role_ids_request(self, user_uuid, domain_roleids_roleids):
params = assign_params(user_uuid=user_uuid)
data = assign_params(roleIds=domain_roleids_roleids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'user-roles/entities/user-roles/v1',
params=params, json_data=data, headers=headers)
return response
def indicatorcombinedv1_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'iocs/combined/indicator/v1', params=params, headers=headers)
return response
def indicatorcreatev1_request(self, retrodetects, ignore_warnings, api_indicatorcreatereqsv1_comment, api_indicatorcreatereqsv1_indicators):
params = assign_params(retrodetects=retrodetects, ignore_warnings=ignore_warnings)
data = assign_params(comment=api_indicatorcreatereqsv1_comment, indicators=api_indicatorcreatereqsv1_indicators)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'iocs/entities/indicators/v1',
params=params, json_data=data, headers=headers)
return response
def indicatordeletev1_request(self, filter_, ids, comment):
params = assign_params(filter=filter_, ids=ids, comment=comment)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'iocs/entities/indicators/v1', params=params, headers=headers)
return response
def indicatorgetv1_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'iocs/entities/indicators/v1', params=params, headers=headers)
return response
def indicatorsearchv1_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'iocs/queries/indicators/v1', params=params, headers=headers)
return response
def indicatorupdatev1_request(self, retrodetects, ignore_warnings, api_indicatorupdatereqsv1_bulk_update, api_indicatorupdatereqsv1_comment, api_indicatorupdatereqsv1_indicators):
params = assign_params(retrodetects=retrodetects, ignore_warnings=ignore_warnings)
data = assign_params(bulk_update=api_indicatorupdatereqsv1_bulk_update,
comment=api_indicatorupdatereqsv1_comment, indicators=api_indicatorupdatereqsv1_indicators)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'iocs/entities/indicators/v1',
params=params, json_data=data, headers=headers)
return response
def list_available_streamso_auth2_request(self, appId, format):
params = assign_params(appId=appId, format=format)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'sensors/entities/datafeed/v2', params=params, headers=headers)
return response
def oauth2_access_token_request(self, client_id, client_secret, member_cid):
data = assign_params(client_id=client_id, client_secret=client_secret, member_cid=member_cid)
headers = self.cs_client._headers
headers['Content-Type'] = 'application/x-www-form-urlencoded'
response = self.cs_client.http_request('post', 'oauth2/token', json_data=data, headers=headers)
return response
def oauth2_revoke_token_request(self, token):
data = assign_params(token=token)
headers = self.cs_client._headers
headers['Content-Type'] = 'application/x-www-form-urlencoded'
response = self.cs_client.http_request('post', 'oauth2/revoke', json_data=data, headers=headers)
return response
def patch_cloudconnectazure_entities_clientid_v1_request(self, id_):
params = assign_params(id=id_)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'patch', 'cloud-connect-azure/entities/client-id/v1', params=params, headers=headers)
return response
def patch_cloudconnectcspmazure_entities_clientid_v1_request(self, id_, tenant_id):
params = assign_params(id=id_, tenant_id=tenant_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'patch', 'cloud-connect-cspm-azure/entities/client-id/v1', params=params, headers=headers)
return response
def patchcspm_aws_account_request(self, registration_awsaccountpatchrequest_resources):
data = assign_params(resources=registration_awsaccountpatchrequest_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'patch', 'cloud-connect-cspm-aws/entities/account/v1', json_data=data, headers=headers)
return response
def perform_actionv2_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'devices/entities/devices-actions/v2',
params=params, json_data=data, headers=headers)
return response
def perform_device_control_policies_action_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/device-control-actions/v1',
params=params, json_data=data, headers=headers)
return response
def perform_firewall_policies_action_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/firewall-actions/v1',
params=params, json_data=data, headers=headers)
return response
def perform_group_action_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'devices/entities/host-group-actions/v1',
params=params, json_data=data, headers=headers)
return response
def perform_incident_action_request(self, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'incidents/entities/incident-actions/v1', json_data=data, headers=headers)
return response
def perform_prevention_policies_action_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/prevention-actions/v1',
params=params, json_data=data, headers=headers)
return response
def perform_sensor_update_policies_action_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/sensor-update-actions/v1',
params=params, json_data=data, headers=headers)
return response
def performrt_response_policies_action_request(self, action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids):
params = assign_params(action_name=action_name)
data = assign_params(action__meters=msa_entityactionrequestv2_action__meters, ids=msa_entityactionrequestv2_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/response-actions/v1',
params=params, json_data=data, headers=headers)
return response
def post_cloudconnectazure_entities_account_v1_request(self, registration_azureaccountcreaterequestexternalv1_resources):
data = assign_params(resources=registration_azureaccountcreaterequestexternalv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'cloud-connect-azure/entities/account/v1', json_data=data, headers=headers)
return response
def post_cloudconnectcspmazure_entities_account_v1_request(self, registration_azureaccountcreaterequestexternalv1_resources):
data = assign_params(resources=registration_azureaccountcreaterequestexternalv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'cloud-connect-cspm-azure/entities/account/v1', json_data=data, headers=headers)
return response
def post_mal_query_entities_samples_multidownloadv1_request(self, malquery_multidownloadrequestv1_samples):
data = assign_params(samples=malquery_multidownloadrequestv1_samples)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'malquery/entities/samples-multidownload/v1', json_data=data, headers=headers)
return response
def post_mal_query_exact_searchv1_request(self, malquery_externalexactsearchparametersv1_options, malquery_externalexactsearchparametersv1_patterns):
data = assign_params(options=malquery_externalexactsearchparametersv1_options,
patterns=malquery_externalexactsearchparametersv1_patterns)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'malquery/queries/exact-search/v1', json_data=data, headers=headers)
return response
def post_mal_query_fuzzy_searchv1_request(self, malquery_fuzzysearchparametersv1_options, malquery_fuzzysearchparametersv1_patterns):
data = assign_params(options=malquery_fuzzysearchparametersv1_options, patterns=malquery_fuzzysearchparametersv1_patterns)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'malquery/combined/fuzzy-search/v1', json_data=data, headers=headers)
return response
def post_mal_query_huntv1_request(self, malquery_externalhuntparametersv1_options, malquery_externalhuntparametersv1_yara_rule):
data = assign_params(options=malquery_externalhuntparametersv1_options,
yara_rule=malquery_externalhuntparametersv1_yara_rule)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'malquery/queries/hunt/v1', json_data=data, headers=headers)
return response
def preview_rulev1_request(self, domain_rulepreviewrequest_filter, domain_rulepreviewrequest_topic):
data = assign_params(filter=domain_rulepreviewrequest_filter, topic=domain_rulepreviewrequest_topic)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'recon/aggregates/rules-preview/GET/v1', json_data=data, headers=headers)
return response
def processes_ran_on_request(self, type_, value, device_id, limit, offset):
params = assign_params(type=type_, value=value, device_id=device_id, limit=limit, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'indicators/queries/processes/v1', params=params, headers=headers)
return response
def provisionaws_accounts_request(self, mode, models_createawsaccountsv1_resources):
params = assign_params(mode=mode)
data = assign_params(resources=models_createawsaccountsv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'cloud-connect-aws/entities/accounts/v1',
params=params, json_data=data, headers=headers)
return response
def query_actionsv1_request(self, offset, limit, sort, filter_, q):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/queries/actions/v1', params=params, headers=headers)
return response
def query_allow_list_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/allowlist/v1', params=params, headers=headers)
return response
def query_behaviors_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'incidents/queries/behaviors/v1', params=params, headers=headers)
return response
def query_block_list_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/blocklist/v1', params=params, headers=headers)
return response
def query_children_request(self, sort, offset, limit):
params = assign_params(sort=sort, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/queries/children/v1', params=params, headers=headers)
return response
def query_combined_device_control_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/device-control/v1', params=params, headers=headers)
return response
def query_combined_device_control_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/device-control-members/v1', params=params, headers=headers)
return response
def query_combined_firewall_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/firewall/v1', params=params, headers=headers)
return response
def query_combined_firewall_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/firewall-members/v1', params=params, headers=headers)
return response
def query_combined_group_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/combined/host-group-members/v1', params=params, headers=headers)
return response
def query_combined_host_groups_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/combined/host-groups/v1', params=params, headers=headers)
return response
def query_combined_prevention_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/prevention/v1', params=params, headers=headers)
return response
def query_combined_prevention_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/prevention-members/v1', params=params, headers=headers)
return response
def query_combined_sensor_update_builds_request(self, platform):
params = assign_params(platform=platform)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/sensor-update-builds/v1', params=params, headers=headers)
return response
def query_combined_sensor_update_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/sensor-update/v1', params=params, headers=headers)
return response
def query_combined_sensor_update_policiesv2_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/sensor-update/v2', params=params, headers=headers)
return response
def query_combined_sensor_update_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/sensor-update-members/v1', params=params, headers=headers)
return response
def query_combinedrt_response_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/response/v1', params=params, headers=headers)
return response
def query_combinedrt_response_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/combined/response-members/v1', params=params, headers=headers)
return response
def query_detection_ids_by_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/detects/v1', params=params, headers=headers)
return response
def query_detects_request(self, offset, limit, sort, filter_, q):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'detects/queries/detects/v1', params=params, headers=headers)
return response
def query_device_control_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/device-control/v1', params=params, headers=headers)
return response
def query_device_control_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/device-control-members/v1', params=params, headers=headers)
return response
def query_devices_by_filter_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/queries/devices/v1', params=params, headers=headers)
return response
def query_devices_by_filter_scroll_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/queries/devices-scroll/v1', params=params, headers=headers)
return response
def query_escalations_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/escalations/v1', params=params, headers=headers)
return response
def query_firewall_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/firewall/v1', params=params, headers=headers)
return response
def query_firewall_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/firewall-members/v1', params=params, headers=headers)
return response
def query_group_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/queries/host-group-members/v1', params=params, headers=headers)
return response
def query_hidden_devices_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/queries/devices-hidden/v1', params=params, headers=headers)
return response
def query_host_groups_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'devices/queries/host-groups/v1', params=params, headers=headers)
return response
def query_incident_ids_by_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/incidents/v1', params=params, headers=headers)
return response
def query_incidents_request(self, sort, filter_, offset, limit):
params = assign_params(sort=sort, filter=filter_, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'incidents/queries/incidents/v1', params=params, headers=headers)
return response
def query_intel_actor_entities_request(self, offset, limit, sort, filter_, q, fields):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q, fields=fields)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/combined/actors/v1', params=params, headers=headers)
return response
def query_intel_actor_ids_request(self, offset, limit, sort, filter_, q):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/queries/actors/v1', params=params, headers=headers)
return response
def query_intel_indicator_entities_request(self, offset, limit, sort, filter_, q, include_deleted):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q, include_deleted=include_deleted)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/combined/indicators/v1', params=params, headers=headers)
return response
def query_intel_indicator_ids_request(self, offset, limit, sort, filter_, q, include_deleted):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q, include_deleted=include_deleted)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/queries/indicators/v1', params=params, headers=headers)
return response
def query_intel_report_entities_request(self, offset, limit, sort, filter_, q, fields):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q, fields=fields)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/combined/reports/v1', params=params, headers=headers)
return response
def query_intel_report_ids_request(self, offset, limit, sort, filter_, q):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/queries/reports/v1', params=params, headers=headers)
return response
def query_intel_rule_ids_request(self, offset, limit, sort, name, type_, description, tags, min_created_date, max_created_date, q):
params = assign_params(offset=offset, limit=limit, sort=sort, name=name, type=type_, description=description,
tags=tags, min_created_date=min_created_date, max_created_date=max_created_date, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'intel/queries/rules/v1', params=params, headers=headers)
return response
def query_notificationsv1_request(self, offset, limit, sort, filter_, q):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/queries/notifications/v1', params=params, headers=headers)
return response
def query_prevention_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/prevention/v1', params=params, headers=headers)
return response
def query_prevention_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/prevention-members/v1', params=params, headers=headers)
return response
def query_remediations_filter_request(self, limit, sort, filter_, offset):
params = assign_params(limit=limit, sort=sort, filter=filter_, offset=offset)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'falcon-complete-dashboards/queries/remediations/v1', params=params, headers=headers)
return response
def query_reports_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'falconx/queries/reports/v1', params=params, headers=headers)
return response
def query_roles_request(self, user_group_id, cid_group_id, role_id, sort, offset, limit):
params = assign_params(user_group_id=user_group_id, cid_group_id=cid_group_id,
role_id=role_id, sort=sort, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/queries/mssp-roles/v1', params=params, headers=headers)
return response
def query_rulesv1_request(self, offset, limit, sort, filter_, q):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_, q=q)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'recon/queries/rules/v1', params=params, headers=headers)
return response
def query_samplev1_request(self, samplestore_querysamplesrequest_sha256s):
data = assign_params(sha256s=samplestore_querysamplesrequest_sha256s)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'samples/queries/samples/GET/v1', json_data=data, headers=headers)
return response
def query_sensor_update_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/sensor-update/v1', params=params, headers=headers)
return response
def query_sensor_update_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/sensor-update-members/v1', params=params, headers=headers)
return response
def query_sensor_visibility_exclusionsv1_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/sv-exclusions/v1', params=params, headers=headers)
return response
def query_submissions_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'falconx/queries/submissions/v1', params=params, headers=headers)
return response
def query_submissions_mixin0_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'scanner/queries/scans/v1', params=params, headers=headers)
return response
def query_user_group_members_request(self, user_uuid, sort, offset, limit):
params = assign_params(user_uuid=user_uuid, sort=sort, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/queries/user-group-members/v1', params=params, headers=headers)
return response
def query_user_groups_request(self, name, sort, offset, limit):
params = assign_params(name=name, sort=sort, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/queries/user-groups/v1', params=params, headers=headers)
return response
def query_vulnerabilities_request(self, after, limit, sort, filter_):
params = assign_params(after=after, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'spotlight/queries/vulnerabilities/v1', params=params, headers=headers)
return response
def queryaws_accounts_request(self, limit, offset, sort, filter_):
params = assign_params(limit=limit, offset=offset, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-aws/combined/accounts/v1', params=params, headers=headers)
return response
def queryaws_accounts_fori_ds_request(self, limit, offset, sort, filter_):
params = assign_params(limit=limit, offset=offset, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'cloud-connect-aws/queries/accounts/v1', params=params, headers=headers)
return response
def querycid_group_members_request(self, cid, sort, offset, limit):
params = assign_params(cid=cid, sort=sort, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/queries/cid-group-members/v1', params=params, headers=headers)
return response
def querycid_groups_request(self, name, sort, offset, limit):
params = assign_params(name=name, sort=sort, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'mssp/queries/cid-groups/v1', params=params, headers=headers)
return response
def queryevents_request(self, sort, filter_, q, offset, after, limit):
params = assign_params(sort=sort, filter=filter_, q=q, offset=offset, after=after, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/queries/events/v1', params=params, headers=headers)
return response
def queryfirewallfields_request(self, platform_id, offset, limit):
params = assign_params(platform_id=platform_id, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/queries/firewall-fields/v1', params=params, headers=headers)
return response
def queryio_cs_request(self, types, values, from_expiration_timestamp, to_expiration_timestamp, policies, sources, share_levels, created_by, deleted_by, include_deleted):
params = assign_params(types=types, values=values, from_expiration_timestamp=from_expiration_timestamp, to_expiration_timestamp=to_expiration_timestamp,
policies=policies, sources=sources, share_levels=share_levels, created_by=created_by, deleted_by=deleted_by, include_deleted=include_deleted)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'indicators/queries/iocs/v1', params=params, headers=headers)
return response
def queryioa_exclusionsv1_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/ioa-exclusions/v1', params=params, headers=headers)
return response
def queryml_exclusionsv1_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/ml-exclusions/v1', params=params, headers=headers)
return response
def querypatterns_request(self, offset, limit):
params = assign_params(offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/queries/pattern-severities/v1', params=params, headers=headers)
return response
def queryplatforms_request(self, offset, limit):
params = assign_params(offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/queries/platforms/v1', params=params, headers=headers)
return response
def queryplatforms_mixin0_request(self, offset, limit):
params = assign_params(offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/queries/platforms/v1', params=params, headers=headers)
return response
def querypolicyrules_request(self, id_, sort, filter_, q, offset, limit):
params = assign_params(id=id_, sort=sort, filter=filter_, q=q, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/queries/policy-rules/v1', params=params, headers=headers)
return response
def queryrt_response_policies_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/response/v1', params=params, headers=headers)
return response
def queryrt_response_policy_members_request(self, id_, filter_, offset, limit, sort):
params = assign_params(id=id_, filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'policy/queries/response-members/v1', params=params, headers=headers)
return response
def queryrulegroups_request(self, sort, filter_, q, offset, after, limit):
params = assign_params(sort=sort, filter=filter_, q=q, offset=offset, after=after, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/queries/rule-groups/v1', params=params, headers=headers)
return response
def queryrulegroups_mixin0_request(self, sort, filter_, q, offset, limit):
params = assign_params(sort=sort, filter=filter_, q=q, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/queries/rule-groups/v1', params=params, headers=headers)
return response
def queryrulegroupsfull_request(self, sort, filter_, q, offset, limit):
params = assign_params(sort=sort, filter=filter_, q=q, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/queries/rule-groups-full/v1', params=params, headers=headers)
return response
def queryrules_request(self, sort, filter_, q, offset, after, limit):
params = assign_params(sort=sort, filter=filter_, q=q, offset=offset, after=after, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'fwmgr/queries/rules/v1', params=params, headers=headers)
return response
def queryrules_mixin0_request(self, sort, filter_, q, offset, limit):
params = assign_params(sort=sort, filter=filter_, q=q, offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/queries/rules/v1', params=params, headers=headers)
return response
def queryruletypes_request(self, offset, limit):
params = assign_params(offset=offset, limit=limit)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'ioarules/queries/rule-types/v1', params=params, headers=headers)
return response
def refresh_active_stream_session_request(self, action_name, appId, partition):
params = assign_params(action_name=action_name, appId=appId)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', f'sensors/entities/datafeed-actions/v1/{partition}', params=params, headers=headers)
return response
def regenerateapi_key_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'kubernetes-protection/entities/integration/api-key/v1', headers=headers)
return response
def retrieve_emails_bycid_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'users/queries/emails-by-cid/v1', headers=headers)
return response
def retrieve_user_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'users/entities/users/v1', params=params, headers=headers)
return response
def retrieve_useruui_ds_bycid_request(self):
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'users/queries/user-uuids-by-cid/v1', headers=headers)
return response
def retrieve_useruuid_request(self, uid):
params = assign_params(uid=uid)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'users/queries/user-uuids-by-email/v1', params=params, headers=headers)
return response
def reveal_uninstall_token_request(self, requests_revealuninstalltokenv1_audit_message, requests_revealuninstalltokenv1_device_id):
data = assign_params(audit_message=requests_revealuninstalltokenv1_audit_message,
device_id=requests_revealuninstalltokenv1_device_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'policy/combined/reveal-uninstall-token/v1', json_data=data, headers=headers)
return response
def revoke_user_role_ids_request(self, user_uuid, ids):
params = assign_params(user_uuid=user_uuid, ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'user-roles/entities/user-roles/v1', params=params, headers=headers)
return response
def rtr_aggregate_sessions_request(self, msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing, msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type):
data = assign_params(date_ranges=msa_aggregatequeryrequest_date_ranges, field=msa_aggregatequeryrequest_field, filter=msa_aggregatequeryrequest_filter, interval=msa_aggregatequeryrequest_interval, min_doc_count=msa_aggregatequeryrequest_min_doc_count, missing=msa_aggregatequeryrequest_missing,
name=msa_aggregatequeryrequest_name, q=msa_aggregatequeryrequest_q, ranges=msa_aggregatequeryrequest_ranges, size=msa_aggregatequeryrequest_size, sort=msa_aggregatequeryrequest_sort, sub_aggregates=msa_aggregatequeryrequest_sub_aggregates, time_zone=msa_aggregatequeryrequest_time_zone, type=msa_aggregatequeryrequest_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/aggregates/sessions/GET/v1', json_data=data, headers=headers)
return response
def rtr_check_active_responder_command_status_request(self, cloud_request_id, sequence_id):
params = assign_params(cloud_request_id=cloud_request_id, sequence_id=sequence_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'real-time-response/entities/active-responder-command/v1', params=params, headers=headers)
return response
def rtr_check_admin_command_status_request(self, cloud_request_id, sequence_id):
params = assign_params(cloud_request_id=cloud_request_id, sequence_id=sequence_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'real-time-response/entities/admin-command/v1', params=params, headers=headers)
return response
def rtr_check_command_status_request(self, cloud_request_id, sequence_id):
params = assign_params(cloud_request_id=cloud_request_id, sequence_id=sequence_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/entities/command/v1', params=params, headers=headers)
return response
def rtr_create_put_files_request(self, file, description, name, comments_for_audit_log):
data = assign_params(file=file, description=description, name=name, comments_for_audit_log=comments_for_audit_log)
headers = self.cs_client._headers
headers['Content-Type'] = 'multipart/form-data'
response = self.cs_client.http_request(
'post', 'real-time-response/entities/put-files/v1', json_data=data, headers=headers)
return response
def rtr_create_scripts_request(self, file, description, name, comments_for_audit_log, permission_type, content, platform):
data = assign_params(file=file, description=description, name=name, comments_for_audit_log=comments_for_audit_log,
permission_type=permission_type, content=content, platform=platform)
headers = self.cs_client._headers
headers['Content-Type'] = 'multipart/form-data'
response = self.cs_client.http_request('post', 'real-time-response/entities/scripts/v1', json_data=data, headers=headers)
return response
def rtr_delete_file_request(self, ids, session_id):
params = assign_params(ids=ids, session_id=session_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'real-time-response/entities/file/v1', params=params, headers=headers)
return response
def rtr_delete_put_files_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'delete', 'real-time-response/entities/put-files/v1', params=params, headers=headers)
return response
def rtr_delete_queued_session_request(self, session_id, cloud_request_id):
params = assign_params(session_id=session_id, cloud_request_id=cloud_request_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'delete', 'real-time-response/entities/queued-sessions/command/v1', params=params, headers=headers)
return response
def rtr_delete_scripts_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'real-time-response/entities/scripts/v1', params=params, headers=headers)
return response
def rtr_delete_session_request(self, session_id):
params = assign_params(session_id=session_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'delete', 'real-time-response/entities/sessions/v1', params=params, headers=headers)
return response
def rtr_execute_active_responder_command_request(self, domain_commandexecuterequest_base_command, domain_commandexecuterequest_command_string, domain_commandexecuterequest_device_id, domain_commandexecuterequest_id, domain_commandexecuterequest_persist, domain_commandexecuterequest_session_id):
data = assign_params(base_command=domain_commandexecuterequest_base_command, command_string=domain_commandexecuterequest_command_string, device_id=domain_commandexecuterequest_device_id,
id=domain_commandexecuterequest_id, persist=domain_commandexecuterequest_persist, session_id=domain_commandexecuterequest_session_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/entities/active-responder-command/v1', json_data=data, headers=headers)
return response
def rtr_execute_admin_command_request(self, domain_commandexecuterequest_base_command, domain_commandexecuterequest_command_string, domain_commandexecuterequest_device_id, domain_commandexecuterequest_id, domain_commandexecuterequest_persist, domain_commandexecuterequest_session_id):
data = assign_params(base_command=domain_commandexecuterequest_base_command, command_string=domain_commandexecuterequest_command_string, device_id=domain_commandexecuterequest_device_id,
id=domain_commandexecuterequest_id, persist=domain_commandexecuterequest_persist, session_id=domain_commandexecuterequest_session_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/entities/admin-command/v1', json_data=data, headers=headers)
return response
def rtr_execute_command_request(self, domain_commandexecuterequest_base_command, domain_commandexecuterequest_command_string, domain_commandexecuterequest_device_id, domain_commandexecuterequest_id, domain_commandexecuterequest_persist, domain_commandexecuterequest_session_id):
data = assign_params(base_command=domain_commandexecuterequest_base_command, command_string=domain_commandexecuterequest_command_string, device_id=domain_commandexecuterequest_device_id,
id=domain_commandexecuterequest_id, persist=domain_commandexecuterequest_persist, session_id=domain_commandexecuterequest_session_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'real-time-response/entities/command/v1', json_data=data, headers=headers)
return response
def rtr_get_extracted_file_contents_request(self, session_id, sha256, filename):
params = assign_params(session_id=session_id, sha256=sha256, filename=filename)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'get', 'real-time-response/entities/extracted-file-contents/v1', params=params, headers=headers)
return response
def rtr_get_put_files_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/entities/put-files/v1', params=params, headers=headers)
return response
def rtr_get_scripts_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/entities/scripts/v1', params=params, headers=headers)
return response
def rtr_init_session_request(self, domain_initrequest_device_id, domain_initrequest_origin, domain_initrequest_queue_offline):
data = assign_params(device_id=domain_initrequest_device_id, origin=domain_initrequest_origin,
queue_offline=domain_initrequest_queue_offline)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'real-time-response/entities/sessions/v1', json_data=data, headers=headers)
return response
def rtr_list_all_sessions_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/queries/sessions/v1', params=params, headers=headers)
return response
def rtr_list_files_request(self, session_id):
params = assign_params(session_id=session_id)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/entities/file/v1', params=params, headers=headers)
return response
def rtr_list_put_files_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/queries/put-files/v1', params=params, headers=headers)
return response
def rtr_list_queued_sessions_request(self, msa_idsrequest_ids):
data = assign_params(ids=msa_idsrequest_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/entities/queued-sessions/GET/v1', json_data=data, headers=headers)
return response
def rtr_list_scripts_request(self, filter_, offset, limit, sort):
params = assign_params(filter=filter_, offset=offset, limit=limit, sort=sort)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'real-time-response/queries/scripts/v1', params=params, headers=headers)
return response
def rtr_list_sessions_request(self, msa_idsrequest_ids):
data = assign_params(ids=msa_idsrequest_ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/entities/sessions/GET/v1', json_data=data, headers=headers)
return response
def rtr_pulse_session_request(self, domain_initrequest_device_id, domain_initrequest_origin, domain_initrequest_queue_offline):
data = assign_params(device_id=domain_initrequest_device_id, origin=domain_initrequest_origin,
queue_offline=domain_initrequest_queue_offline)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'real-time-response/entities/refresh-session/v1', json_data=data, headers=headers)
return response
def rtr_update_scripts_request(self, id_, file, description, name, comments_for_audit_log, permission_type, content, platform):
data = assign_params(id=id_, file=file, description=description, name=name,
comments_for_audit_log=comments_for_audit_log, permission_type=permission_type, content=content, platform=platform)
headers = self.cs_client._headers
headers['Content-Type'] = 'multipart/form-data'
response = self.cs_client.http_request('patch', 'real-time-response/entities/scripts/v1', json_data=data, headers=headers)
return response
def scan_samples_request(self, mlscanner_samplesscanparameters_samples):
data = assign_params(samples=mlscanner_samplesscanparameters_samples)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'scanner/entities/scans/v1', json_data=data, headers=headers)
return response
def set_device_control_policies_precedence_request(self, requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name):
data = assign_params(ids=requests_setpolicyprecedencereqv1_ids,
platform_name=requests_setpolicyprecedencereqv1_platform_name)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'policy/entities/device-control-precedence/v1', json_data=data, headers=headers)
return response
def set_firewall_policies_precedence_request(self, requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name):
data = assign_params(ids=requests_setpolicyprecedencereqv1_ids,
platform_name=requests_setpolicyprecedencereqv1_platform_name)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/firewall-precedence/v1', json_data=data, headers=headers)
return response
def set_prevention_policies_precedence_request(self, requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name):
data = assign_params(ids=requests_setpolicyprecedencereqv1_ids,
platform_name=requests_setpolicyprecedencereqv1_platform_name)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'policy/entities/prevention-precedence/v1', json_data=data, headers=headers)
return response
def set_sensor_update_policies_precedence_request(self, requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name):
data = assign_params(ids=requests_setpolicyprecedencereqv1_ids,
platform_name=requests_setpolicyprecedencereqv1_platform_name)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'policy/entities/sensor-update-precedence/v1', json_data=data, headers=headers)
return response
def setrt_response_policies_precedence_request(self, requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name):
data = assign_params(ids=requests_setpolicyprecedencereqv1_ids,
platform_name=requests_setpolicyprecedencereqv1_platform_name)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'policy/entities/response-precedence/v1', json_data=data, headers=headers)
return response
def submit_request(self, falconx_submissionparametersv1_sandbox, falconx_submissionparametersv1_user_tags):
data = assign_params(sandbox=falconx_submissionparametersv1_sandbox, user_tags=falconx_submissionparametersv1_user_tags)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'falconx/entities/submissions/v1', json_data=data, headers=headers)
return response
def tokenscreate_request(self, api_tokencreaterequestv1_expires_timestamp, api_tokencreaterequestv1_label, api_tokencreaterequestv1_type):
data = assign_params(expires_timestamp=api_tokencreaterequestv1_expires_timestamp,
label=api_tokencreaterequestv1_label, type=api_tokencreaterequestv1_type)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'installation-tokens/entities/tokens/v1', json_data=data, headers=headers)
return response
def tokensdelete_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('delete', 'installation-tokens/entities/tokens/v1', params=params, headers=headers)
return response
def tokensquery_request(self, offset, limit, sort, filter_):
params = assign_params(offset=offset, limit=limit, sort=sort, filter=filter_)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'installation-tokens/queries/tokens/v1', params=params, headers=headers)
return response
def tokensread_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request('get', 'installation-tokens/entities/tokens/v1', params=params, headers=headers)
return response
def tokensupdate_request(self, ids, api_tokenpatchrequestv1_expires_timestamp, api_tokenpatchrequestv1_label, api_tokenpatchrequestv1_revoked):
params = assign_params(ids=ids)
data = assign_params(expires_timestamp=api_tokenpatchrequestv1_expires_timestamp,
label=api_tokenpatchrequestv1_label, revoked=api_tokenpatchrequestv1_revoked)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'installation-tokens/entities/tokens/v1',
params=params, json_data=data, headers=headers)
return response
def trigger_scan_request(self, scan_type):
params = assign_params(scan_type=scan_type)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'kubernetes-protection/entities/scan/trigger/v1', params=params, headers=headers)
return response
def update_actionv1_request(self, domain_updateactionrequest_frequency, domain_updateactionrequest_id, domain_updateactionrequest_recipients, domain_updateactionrequest_status):
data = assign_params(frequency=domain_updateactionrequest_frequency, id=domain_updateactionrequest_id,
recipients=domain_updateactionrequest_recipients, status=domain_updateactionrequest_status)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'recon/entities/actions/v1', json_data=data, headers=headers)
return response
def update_detects_by_idsv2_request(self, domain_detectsentitiespatchrequest_assigned_to_uuid, domain_detectsentitiespatchrequest_comment, domain_detectsentitiespatchrequest_ids, domain_detectsentitiespatchrequest_show_in_ui, domain_detectsentitiespatchrequest_status):
data = assign_params(assigned_to_uuid=domain_detectsentitiespatchrequest_assigned_to_uuid, comment=domain_detectsentitiespatchrequest_comment,
ids=domain_detectsentitiespatchrequest_ids, show_in_ui=domain_detectsentitiespatchrequest_show_in_ui, status=domain_detectsentitiespatchrequest_status)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'detects/entities/detects/v2', json_data=data, headers=headers)
return response
def update_device_control_policies_request(self, requests_updatedevicecontrolpoliciesv1_resources):
data = assign_params(resources=requests_updatedevicecontrolpoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/device-control/v1', json_data=data, headers=headers)
return response
def update_device_tags_request(self, domain_updatedevicetagsrequestv1_action, domain_updatedevicetagsrequestv1_device_ids, domain_updatedevicetagsrequestv1_tags):
data = assign_params(action=domain_updatedevicetagsrequestv1_action,
device_ids=domain_updatedevicetagsrequestv1_device_ids, tags=domain_updatedevicetagsrequestv1_tags)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'devices/entities/devices/tags/v1', json_data=data, headers=headers)
return response
def update_firewall_policies_request(self, requests_updatefirewallpoliciesv1_resources):
data = assign_params(resources=requests_updatefirewallpoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/firewall/v1', json_data=data, headers=headers)
return response
def update_host_groups_request(self, requests_updategroupsv1_resources):
data = assign_params(resources=requests_updategroupsv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'devices/entities/host-groups/v1', json_data=data, headers=headers)
return response
def update_notificationsv1_request(self, domain_updatenotificationrequestv1_assigned_to_uuid, domain_updatenotificationrequestv1_id, domain_updatenotificationrequestv1_status):
data = assign_params(assigned_to_uuid=domain_updatenotificationrequestv1_assigned_to_uuid,
id=domain_updatenotificationrequestv1_id, status=domain_updatenotificationrequestv1_status)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'recon/entities/notifications/v1', json_data=data, headers=headers)
return response
def update_prevention_policies_request(self, requests_updatepreventionpoliciesv1_resources):
data = assign_params(resources=requests_updatepreventionpoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/prevention/v1', json_data=data, headers=headers)
return response
def update_rulesv1_request(self, domain_updaterulerequestv1_filter, domain_updaterulerequestv1_id, domain_updaterulerequestv1_name, domain_updaterulerequestv1_permissions, domain_updaterulerequestv1_priority):
data = assign_params(filter=domain_updaterulerequestv1_filter, id=domain_updaterulerequestv1_id, name=domain_updaterulerequestv1_name,
permissions=domain_updaterulerequestv1_permissions, priority=domain_updaterulerequestv1_priority)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'recon/entities/rules/v1', json_data=data, headers=headers)
return response
def update_sensor_update_policies_request(self, requests_updatesensorupdatepoliciesv1_resources):
data = assign_params(resources=requests_updatesensorupdatepoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/sensor-update/v1', json_data=data, headers=headers)
return response
def update_sensor_update_policiesv2_request(self, requests_updatesensorupdatepoliciesv2_resources):
data = assign_params(resources=requests_updatesensorupdatepoliciesv2_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/sensor-update/v2', json_data=data, headers=headers)
return response
def update_sensor_visibility_exclusionsv1_request(self, requests_svexclusionupdatereqv1_comment, requests_svexclusionupdatereqv1_groups, requests_svexclusionupdatereqv1_id, requests_svexclusionupdatereqv1_value):
data = assign_params(comment=requests_svexclusionupdatereqv1_comment, groups=requests_svexclusionupdatereqv1_groups,
id=requests_svexclusionupdatereqv1_id, value=requests_svexclusionupdatereqv1_value)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/sv-exclusions/v1', json_data=data, headers=headers)
return response
def update_user_request(self, user_uuid, domain_updateuserfields_firstname, domain_updateuserfields_lastname):
params = assign_params(user_uuid=user_uuid)
data = assign_params(firstName=domain_updateuserfields_firstname, lastName=domain_updateuserfields_lastname)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'users/entities/users/v1', params=params, json_data=data, headers=headers)
return response
def update_user_groups_request(self, domain_usergroupsrequestv1_resources):
data = assign_params(resources=domain_usergroupsrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'mssp/entities/user-groups/v1', json_data=data, headers=headers)
return response
def updateaws_account_request(self, ids, region):
params = assign_params(ids=ids, region=region)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'patch', 'kubernetes-protection/entities/accounts/aws/v1', params=params, headers=headers)
return response
def updateaws_accounts_request(self, models_updateawsaccountsv1_resources):
data = assign_params(resources=models_updateawsaccountsv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'cloud-connect-aws/entities/accounts/v1', json_data=data, headers=headers)
return response
def updatecid_groups_request(self, domain_cidgroupsrequestv1_resources):
data = assign_params(resources=domain_cidgroupsrequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'mssp/entities/cid-groups/v1', json_data=data, headers=headers)
return response
def updatecspm_azure_tenant_default_subscriptionid_request(self, tenant_id, subscription_id):
params = assign_params(tenant_id=tenant_id, subscription_id=subscription_id)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'patch', 'cloud-connect-cspm-azure/entities/default-subscription-id/v1', params=params, headers=headers)
return response
def updatecspm_policy_settings_request(self, registration_policyrequestextv1_resources):
data = assign_params(resources=registration_policyrequestextv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'settings/entities/policy/v1', json_data=data, headers=headers)
return response
def updatecspm_scan_schedule_request(self, registration_scanscheduleupdaterequestv1_resources):
data = assign_params(resources=registration_scanscheduleupdaterequestv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'settings/scan-schedule/v1', json_data=data, headers=headers)
return response
def updateioa_exclusionsv1_request(self, requests_ioaexclusionupdatereqv1_cl_regex, requests_ioaexclusionupdatereqv1_comment, requests_ioaexclusionupdatereqv1_description, requests_ioaexclusionupdatereqv1_detection_json, requests_ioaexclusionupdatereqv1_groups, requests_ioaexclusionupdatereqv1_id, requests_ioaexclusionupdatereqv1_ifn_regex, requests_ioaexclusionupdatereqv1_name, requests_ioaexclusionupdatereqv1_pattern_id, requests_ioaexclusionupdatereqv1_pattern_name):
data = assign_params(cl_regex=requests_ioaexclusionupdatereqv1_cl_regex, comment=requests_ioaexclusionupdatereqv1_comment, description=requests_ioaexclusionupdatereqv1_description, detection_json=requests_ioaexclusionupdatereqv1_detection_json, groups=requests_ioaexclusionupdatereqv1_groups,
id=requests_ioaexclusionupdatereqv1_id, ifn_regex=requests_ioaexclusionupdatereqv1_ifn_regex, name=requests_ioaexclusionupdatereqv1_name, pattern_id=requests_ioaexclusionupdatereqv1_pattern_id, pattern_name=requests_ioaexclusionupdatereqv1_pattern_name)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/ioa-exclusions/v1', json_data=data, headers=headers)
return response
def updateioc_request(self, api_iocviewrecord_batch_id, api_iocviewrecord_created_by, api_iocviewrecord_created_timestamp, api_iocviewrecord_description, api_iocviewrecord_expiration_days, api_iocviewrecord_expiration_timestamp, api_iocviewrecord_modified_by, api_iocviewrecord_modified_timestamp, api_iocviewrecord_policy, api_iocviewrecord_share_level, api_iocviewrecord_source, api_iocviewrecord_type, api_iocviewrecord_value, type_, value):
params = assign_params(type=type_, value=value)
data = assign_params(batch_id=api_iocviewrecord_batch_id, created_by=api_iocviewrecord_created_by, created_timestamp=api_iocviewrecord_created_timestamp, description=api_iocviewrecord_description, expiration_days=api_iocviewrecord_expiration_days, expiration_timestamp=api_iocviewrecord_expiration_timestamp,
modified_by=api_iocviewrecord_modified_by, modified_timestamp=api_iocviewrecord_modified_timestamp, policy=api_iocviewrecord_policy, share_level=api_iocviewrecord_share_level, source=api_iocviewrecord_source, type=api_iocviewrecord_type, value=api_iocviewrecord_value)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'indicators/entities/iocs/v1',
params=params, json_data=data, headers=headers)
return response
def updateml_exclusionsv1_request(self, requests_svexclusionupdatereqv1_comment, requests_svexclusionupdatereqv1_groups, requests_svexclusionupdatereqv1_id, requests_svexclusionupdatereqv1_value):
data = assign_params(comment=requests_svexclusionupdatereqv1_comment, groups=requests_svexclusionupdatereqv1_groups,
id=requests_svexclusionupdatereqv1_id, value=requests_svexclusionupdatereqv1_value)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/ml-exclusions/v1', json_data=data, headers=headers)
return response
def updatepolicycontainer_request(self, fwmgr_api_policycontainerupsertrequestv1_default_inbound, fwmgr_api_policycontainerupsertrequestv1_default_outbound, fwmgr_api_policycontainerupsertrequestv1_enforce, fwmgr_api_policycontainerupsertrequestv1_is_default_policy, fwmgr_api_policycontainerupsertrequestv1_platform_id, fwmgr_api_policycontainerupsertrequestv1_policy_id, fwmgr_api_policycontainerupsertrequestv1_rule_group_ids, fwmgr_api_policycontainerupsertrequestv1_test_mode, fwmgr_api_policycontainerupsertrequestv1_tracking):
data = assign_params(default_inbound=fwmgr_api_policycontainerupsertrequestv1_default_inbound, default_outbound=fwmgr_api_policycontainerupsertrequestv1_default_outbound, enforce=fwmgr_api_policycontainerupsertrequestv1_enforce, is_default_policy=fwmgr_api_policycontainerupsertrequestv1_is_default_policy,
platform_id=fwmgr_api_policycontainerupsertrequestv1_platform_id, policy_id=fwmgr_api_policycontainerupsertrequestv1_policy_id, rule_group_ids=fwmgr_api_policycontainerupsertrequestv1_rule_group_ids, test_mode=fwmgr_api_policycontainerupsertrequestv1_test_mode, tracking=fwmgr_api_policycontainerupsertrequestv1_tracking)
headers = self.cs_client._headers
response = self.cs_client.http_request('put', 'fwmgr/entities/policies/v1', json_data=data, headers=headers)
return response
def updatert_response_policies_request(self, requests_updatertresponsepoliciesv1_resources):
data = assign_params(resources=requests_updatertresponsepoliciesv1_resources)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'policy/entities/response/v1', json_data=data, headers=headers)
return response
def updaterulegroup_request(self, comment, fwmgr_api_rulegroupmodifyrequestv1_diff_operations, fwmgr_api_rulegroupmodifyrequestv1_diff_type, fwmgr_api_rulegroupmodifyrequestv1_id, fwmgr_api_rulegroupmodifyrequestv1_rule_ids, fwmgr_api_rulegroupmodifyrequestv1_rule_versions, fwmgr_api_rulegroupmodifyrequestv1_tracking):
params = assign_params(comment=comment)
data = assign_params(diff_operations=fwmgr_api_rulegroupmodifyrequestv1_diff_operations, diff_type=fwmgr_api_rulegroupmodifyrequestv1_diff_type, id=fwmgr_api_rulegroupmodifyrequestv1_id,
rule_ids=fwmgr_api_rulegroupmodifyrequestv1_rule_ids, rule_versions=fwmgr_api_rulegroupmodifyrequestv1_rule_versions, tracking=fwmgr_api_rulegroupmodifyrequestv1_tracking)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'fwmgr/entities/rule-groups/v1',
params=params, json_data=data, headers=headers)
return response
def updaterulegroup_mixin0_request(self, api_rulegroupmodifyrequestv1_comment, api_rulegroupmodifyrequestv1_description, api_rulegroupmodifyrequestv1_enabled, api_rulegroupmodifyrequestv1_id, api_rulegroupmodifyrequestv1_name, api_rulegroupmodifyrequestv1_rulegroup_version):
data = assign_params(comment=api_rulegroupmodifyrequestv1_comment, description=api_rulegroupmodifyrequestv1_description, enabled=api_rulegroupmodifyrequestv1_enabled,
id=api_rulegroupmodifyrequestv1_id, name=api_rulegroupmodifyrequestv1_name, rulegroup_version=api_rulegroupmodifyrequestv1_rulegroup_version)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'ioarules/entities/rule-groups/v1', json_data=data, headers=headers)
return response
def updaterules_request(self, api_ruleupdatesrequestv1_comment, api_ruleupdatesrequestv1_rule_updates, api_ruleupdatesrequestv1_rulegroup_id, api_ruleupdatesrequestv1_rulegroup_version):
data = assign_params(comment=api_ruleupdatesrequestv1_comment, rule_updates=api_ruleupdatesrequestv1_rule_updates,
rulegroup_id=api_ruleupdatesrequestv1_rulegroup_id, rulegroup_version=api_ruleupdatesrequestv1_rulegroup_version)
headers = self.cs_client._headers
response = self.cs_client.http_request('patch', 'ioarules/entities/rules/v1', json_data=data, headers=headers)
return response
def upload_samplev2_request(self, body, upfile, file_name, comment, is_confidential):
params = assign_params(file_name=file_name, comment=comment, is_confidential=is_confidential)
data = assign_params(body=body, upfile=upfile)
headers = self.cs_client._headers
headers['Content-Type'] = 'application/octet-stream'
response = self.cs_client.http_request('post', 'samples/entities/samples/v2',
params=params, json_data=data, headers=headers)
return response
def upload_samplev3_request(self, body, upfile, file_name, comment, is_confidential):
params = assign_params(file_name=file_name, comment=comment, is_confidential=is_confidential)
data = assign_params(body=body, upfile=upfile)
headers = self.cs_client._headers
headers['Content-Type'] = 'application/octet-stream'
response = self.cs_client.http_request('post', 'samples/entities/samples/v3',
params=params, json_data=data, headers=headers)
return response
def validate_request(self, api_validationrequestv1_fields):
data = assign_params(fields=api_validationrequestv1_fields)
headers = self.cs_client._headers
response = self.cs_client.http_request('post', 'ioarules/entities/rules/validate/v1', json_data=data, headers=headers)
return response
def verifyaws_account_access_request(self, ids):
params = assign_params(ids=ids)
headers = self.cs_client._headers
response = self.cs_client.http_request(
'post', 'cloud-connect-aws/entities/verify-account-access/v1', params=params, headers=headers)
return response
def add_role_command(client, args):
domain_mssprolerequestv1_resources = argToList(args.get('domain_mssprolerequestv1_resources', []))
response = client.add_role_request(domain_mssprolerequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainMSSPRoleResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def add_user_group_members_command(client, args):
domain_usergroupmembersrequestv1_resources = argToList(args.get('domain_usergroupmembersrequestv1_resources', []))
response = client.add_user_group_members_request(domain_usergroupmembersrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserGroupMembersResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def addcid_group_members_command(client, args):
domain_cidgroupmembersrequestv1_resources = argToList(args.get('domain_cidgroupmembersrequestv1_resources', []))
response = client.addcid_group_members_request(domain_cidgroupmembersrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainCIDGroupMembersResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_allow_list_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_allow_list_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_block_list_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_block_list_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_detections_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_detections_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_device_count_collection_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_device_count_collection_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_escalations_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_escalations_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_notificationsv1_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_notificationsv1_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregate_remediations_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregate_remediations_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregateevents_command(client, args):
fwmgr_msa_aggregatequeryrequest_date_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_date_ranges', []))
fwmgr_msa_aggregatequeryrequest_field = str(args.get('fwmgr_msa_aggregatequeryrequest_field', ''))
fwmgr_msa_aggregatequeryrequest_filter = str(args.get('fwmgr_msa_aggregatequeryrequest_filter', ''))
fwmgr_msa_aggregatequeryrequest_interval = str(args.get('fwmgr_msa_aggregatequeryrequest_interval', ''))
fwmgr_msa_aggregatequeryrequest_min_doc_count = args.get('fwmgr_msa_aggregatequeryrequest_min_doc_count', None)
fwmgr_msa_aggregatequeryrequest_missing = str(args.get('fwmgr_msa_aggregatequeryrequest_missing', ''))
fwmgr_msa_aggregatequeryrequest_name = str(args.get('fwmgr_msa_aggregatequeryrequest_name', ''))
fwmgr_msa_aggregatequeryrequest_q = str(args.get('fwmgr_msa_aggregatequeryrequest_q', ''))
fwmgr_msa_aggregatequeryrequest_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_ranges', []))
fwmgr_msa_aggregatequeryrequest_size = args.get('fwmgr_msa_aggregatequeryrequest_size', None)
fwmgr_msa_aggregatequeryrequest_sort = str(args.get('fwmgr_msa_aggregatequeryrequest_sort', ''))
fwmgr_msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('fwmgr_msa_aggregatequeryrequest_sub_aggregates', []))
fwmgr_msa_aggregatequeryrequest_time_zone = str(args.get('fwmgr_msa_aggregatequeryrequest_time_zone', ''))
fwmgr_msa_aggregatequeryrequest_type = str(args.get('fwmgr_msa_aggregatequeryrequest_type', ''))
response = client.aggregateevents_request(fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing,
fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregatefc_incidents_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregatefc_incidents_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregatepolicyrules_command(client, args):
fwmgr_msa_aggregatequeryrequest_date_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_date_ranges', []))
fwmgr_msa_aggregatequeryrequest_field = str(args.get('fwmgr_msa_aggregatequeryrequest_field', ''))
fwmgr_msa_aggregatequeryrequest_filter = str(args.get('fwmgr_msa_aggregatequeryrequest_filter', ''))
fwmgr_msa_aggregatequeryrequest_interval = str(args.get('fwmgr_msa_aggregatequeryrequest_interval', ''))
fwmgr_msa_aggregatequeryrequest_min_doc_count = args.get('fwmgr_msa_aggregatequeryrequest_min_doc_count', None)
fwmgr_msa_aggregatequeryrequest_missing = str(args.get('fwmgr_msa_aggregatequeryrequest_missing', ''))
fwmgr_msa_aggregatequeryrequest_name = str(args.get('fwmgr_msa_aggregatequeryrequest_name', ''))
fwmgr_msa_aggregatequeryrequest_q = str(args.get('fwmgr_msa_aggregatequeryrequest_q', ''))
fwmgr_msa_aggregatequeryrequest_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_ranges', []))
fwmgr_msa_aggregatequeryrequest_size = args.get('fwmgr_msa_aggregatequeryrequest_size', None)
fwmgr_msa_aggregatequeryrequest_sort = str(args.get('fwmgr_msa_aggregatequeryrequest_sort', ''))
fwmgr_msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('fwmgr_msa_aggregatequeryrequest_sub_aggregates', []))
fwmgr_msa_aggregatequeryrequest_time_zone = str(args.get('fwmgr_msa_aggregatequeryrequest_time_zone', ''))
fwmgr_msa_aggregatequeryrequest_type = str(args.get('fwmgr_msa_aggregatequeryrequest_type', ''))
response = client.aggregatepolicyrules_request(fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing,
fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregaterulegroups_command(client, args):
fwmgr_msa_aggregatequeryrequest_date_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_date_ranges', []))
fwmgr_msa_aggregatequeryrequest_field = str(args.get('fwmgr_msa_aggregatequeryrequest_field', ''))
fwmgr_msa_aggregatequeryrequest_filter = str(args.get('fwmgr_msa_aggregatequeryrequest_filter', ''))
fwmgr_msa_aggregatequeryrequest_interval = str(args.get('fwmgr_msa_aggregatequeryrequest_interval', ''))
fwmgr_msa_aggregatequeryrequest_min_doc_count = args.get('fwmgr_msa_aggregatequeryrequest_min_doc_count', None)
fwmgr_msa_aggregatequeryrequest_missing = str(args.get('fwmgr_msa_aggregatequeryrequest_missing', ''))
fwmgr_msa_aggregatequeryrequest_name = str(args.get('fwmgr_msa_aggregatequeryrequest_name', ''))
fwmgr_msa_aggregatequeryrequest_q = str(args.get('fwmgr_msa_aggregatequeryrequest_q', ''))
fwmgr_msa_aggregatequeryrequest_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_ranges', []))
fwmgr_msa_aggregatequeryrequest_size = args.get('fwmgr_msa_aggregatequeryrequest_size', None)
fwmgr_msa_aggregatequeryrequest_sort = str(args.get('fwmgr_msa_aggregatequeryrequest_sort', ''))
fwmgr_msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('fwmgr_msa_aggregatequeryrequest_sub_aggregates', []))
fwmgr_msa_aggregatequeryrequest_time_zone = str(args.get('fwmgr_msa_aggregatequeryrequest_time_zone', ''))
fwmgr_msa_aggregatequeryrequest_type = str(args.get('fwmgr_msa_aggregatequeryrequest_type', ''))
response = client.aggregaterulegroups_request(fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing,
fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregaterules_command(client, args):
fwmgr_msa_aggregatequeryrequest_date_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_date_ranges', []))
fwmgr_msa_aggregatequeryrequest_field = str(args.get('fwmgr_msa_aggregatequeryrequest_field', ''))
fwmgr_msa_aggregatequeryrequest_filter = str(args.get('fwmgr_msa_aggregatequeryrequest_filter', ''))
fwmgr_msa_aggregatequeryrequest_interval = str(args.get('fwmgr_msa_aggregatequeryrequest_interval', ''))
fwmgr_msa_aggregatequeryrequest_min_doc_count = args.get('fwmgr_msa_aggregatequeryrequest_min_doc_count', None)
fwmgr_msa_aggregatequeryrequest_missing = str(args.get('fwmgr_msa_aggregatequeryrequest_missing', ''))
fwmgr_msa_aggregatequeryrequest_name = str(args.get('fwmgr_msa_aggregatequeryrequest_name', ''))
fwmgr_msa_aggregatequeryrequest_q = str(args.get('fwmgr_msa_aggregatequeryrequest_q', ''))
fwmgr_msa_aggregatequeryrequest_ranges = argToList(args.get('fwmgr_msa_aggregatequeryrequest_ranges', []))
fwmgr_msa_aggregatequeryrequest_size = args.get('fwmgr_msa_aggregatequeryrequest_size', None)
fwmgr_msa_aggregatequeryrequest_sort = str(args.get('fwmgr_msa_aggregatequeryrequest_sort', ''))
fwmgr_msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('fwmgr_msa_aggregatequeryrequest_sub_aggregates', []))
fwmgr_msa_aggregatequeryrequest_time_zone = str(args.get('fwmgr_msa_aggregatequeryrequest_time_zone', ''))
fwmgr_msa_aggregatequeryrequest_type = str(args.get('fwmgr_msa_aggregatequeryrequest_type', ''))
response = client.aggregaterules_request(fwmgr_msa_aggregatequeryrequest_date_ranges, fwmgr_msa_aggregatequeryrequest_field, fwmgr_msa_aggregatequeryrequest_filter, fwmgr_msa_aggregatequeryrequest_interval, fwmgr_msa_aggregatequeryrequest_min_doc_count, fwmgr_msa_aggregatequeryrequest_missing,
fwmgr_msa_aggregatequeryrequest_name, fwmgr_msa_aggregatequeryrequest_q, fwmgr_msa_aggregatequeryrequest_ranges, fwmgr_msa_aggregatequeryrequest_size, fwmgr_msa_aggregatequeryrequest_sort, fwmgr_msa_aggregatequeryrequest_sub_aggregates, fwmgr_msa_aggregatequeryrequest_time_zone, fwmgr_msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregates_detections_global_counts_command(client, args):
filter_ = str(args.get('filter_', ''))
response = client.aggregates_detections_global_counts_request(filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaFacetsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregates_events_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregates_events_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregates_events_collections_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.aggregates_events_collections_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregates_incidents_global_counts_command(client, args):
filter_ = str(args.get('filter_', ''))
response = client.aggregates_incidents_global_counts_request(filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaFacetsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def aggregatesow_events_global_counts_command(client, args):
filter_ = str(args.get('filter_', ''))
response = client.aggregatesow_events_global_counts_request(filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaFacetsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def apipreemptproxypostgraphql_command(client, args):
response = client.apipreemptproxypostgraphql_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def auditeventsquery_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.auditeventsquery_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def auditeventsread_command(client, args):
ids = argToList(args.get('ids', []))
response = client.auditeventsread_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiauditEventDetailsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_active_responder_cmd_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
domain_batchexecutecommandrequest_base_command = str(args.get('domain_batchexecutecommandrequest_base_command', ''))
domain_batchexecutecommandrequest_batch_id = str(args.get('domain_batchexecutecommandrequest_batch_id', ''))
domain_batchexecutecommandrequest_command_string = str(args.get('domain_batchexecutecommandrequest_command_string', ''))
domain_batchexecutecommandrequest_optional_hosts = argToList(args.get('domain_batchexecutecommandrequest_optional_hosts', []))
domain_batchexecutecommandrequest_persist_all = argToBoolean(args.get('domain_batchexecutecommandrequest_persist_all', False))
response = client.batch_active_responder_cmd_request(timeout, timeout_duration, domain_batchexecutecommandrequest_base_command, domain_batchexecutecommandrequest_batch_id,
domain_batchexecutecommandrequest_command_string, domain_batchexecutecommandrequest_optional_hosts, domain_batchexecutecommandrequest_persist_all)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_admin_cmd_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
domain_batchexecutecommandrequest_base_command = str(args.get('domain_batchexecutecommandrequest_base_command', ''))
domain_batchexecutecommandrequest_batch_id = str(args.get('domain_batchexecutecommandrequest_batch_id', ''))
domain_batchexecutecommandrequest_command_string = str(args.get('domain_batchexecutecommandrequest_command_string', ''))
domain_batchexecutecommandrequest_optional_hosts = argToList(args.get('domain_batchexecutecommandrequest_optional_hosts', []))
domain_batchexecutecommandrequest_persist_all = argToBoolean(args.get('domain_batchexecutecommandrequest_persist_all', False))
response = client.batch_admin_cmd_request(timeout, timeout_duration, domain_batchexecutecommandrequest_base_command, domain_batchexecutecommandrequest_batch_id,
domain_batchexecutecommandrequest_command_string, domain_batchexecutecommandrequest_optional_hosts, domain_batchexecutecommandrequest_persist_all)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_cmd_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
domain_batchexecutecommandrequest_base_command = str(args.get('domain_batchexecutecommandrequest_base_command', ''))
domain_batchexecutecommandrequest_batch_id = str(args.get('domain_batchexecutecommandrequest_batch_id', ''))
domain_batchexecutecommandrequest_command_string = str(args.get('domain_batchexecutecommandrequest_command_string', ''))
domain_batchexecutecommandrequest_optional_hosts = argToList(args.get('domain_batchexecutecommandrequest_optional_hosts', []))
domain_batchexecutecommandrequest_persist_all = argToBoolean(args.get('domain_batchexecutecommandrequest_persist_all', False))
response = client.batch_cmd_request(timeout, timeout_duration, domain_batchexecutecommandrequest_base_command, domain_batchexecutecommandrequest_batch_id,
domain_batchexecutecommandrequest_command_string, domain_batchexecutecommandrequest_optional_hosts, domain_batchexecutecommandrequest_persist_all)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_get_cmd_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
domain_batchgetcommandrequest_batch_id = str(args.get('domain_batchgetcommandrequest_batch_id', ''))
domain_batchgetcommandrequest_file_path = str(args.get('domain_batchgetcommandrequest_file_path', ''))
domain_batchgetcommandrequest_optional_hosts = argToList(args.get('domain_batchgetcommandrequest_optional_hosts', []))
response = client.batch_get_cmd_request(timeout, timeout_duration, domain_batchgetcommandrequest_batch_id,
domain_batchgetcommandrequest_file_path, domain_batchgetcommandrequest_optional_hosts)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_get_cmd_status_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
batch_get_cmd_req_id = str(args.get('batch_get_cmd_req_id', ''))
response = client.batch_get_cmd_status_request(timeout, timeout_duration, batch_get_cmd_req_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainBatchGetCmdStatusResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_init_sessions_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
domain_batchinitsessionrequest_existing_batch_id = str(args.get('domain_batchinitsessionrequest_existing_batch_id', ''))
domain_batchinitsessionrequest_host_ids = argToList(args.get('domain_batchinitsessionrequest_host_ids', []))
domain_batchinitsessionrequest_queue_offline = argToBoolean(args.get('domain_batchinitsessionrequest_queue_offline', False))
response = client.batch_init_sessions_request(timeout, timeout_duration, domain_batchinitsessionrequest_existing_batch_id,
domain_batchinitsessionrequest_host_ids, domain_batchinitsessionrequest_queue_offline)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def batch_refresh_sessions_command(client, args):
timeout = int(args.get('timeout', 30))
timeout_duration = str(args.get('timeout_duration', '30s'))
domain_batchrefreshsessionrequest_batch_id = str(args.get('domain_batchrefreshsessionrequest_batch_id', ''))
domain_batchrefreshsessionrequest_hosts_to_remove = argToList(
args.get('domain_batchrefreshsessionrequest_hosts_to_remove', []))
response = client.batch_refresh_sessions_request(
timeout, timeout_duration, domain_batchrefreshsessionrequest_batch_id, domain_batchrefreshsessionrequest_hosts_to_remove)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_actionsv1_command(client, args):
domain_registeractionsrequest_actions = argToList(args.get('domain_registeractionsrequest_actions', []))
domain_registeractionsrequest_rule_id = str(args.get('domain_registeractionsrequest_rule_id', ''))
response = client.create_actionsv1_request(domain_registeractionsrequest_actions, domain_registeractionsrequest_rule_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainActionEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_device_control_policies_command(client, args):
requests_createdevicecontrolpoliciesv1_resources = argToList(args.get('requests_createdevicecontrolpoliciesv1_resources', []))
response = client.create_device_control_policies_request(requests_createdevicecontrolpoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_firewall_policies_command(client, args):
requests_createfirewallpoliciesv1_resources = argToList(args.get('requests_createfirewallpoliciesv1_resources', []))
clone_id = str(args.get('clone_id', ''))
response = client.create_firewall_policies_request(requests_createfirewallpoliciesv1_resources, clone_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_host_groups_command(client, args):
requests_creategroupsv1_resources = argToList(args.get('requests_creategroupsv1_resources', []))
response = client.create_host_groups_request(requests_creategroupsv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_or_updateaws_settings_command(client, args):
models_modifyawscustomersettingsv1_resources = argToList(args.get('models_modifyawscustomersettingsv1_resources', []))
response = client.create_or_updateaws_settings_request(models_modifyawscustomersettingsv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_prevention_policies_command(client, args):
requests_createpreventionpoliciesv1_resources = argToList(args.get('requests_createpreventionpoliciesv1_resources', []))
response = client.create_prevention_policies_request(requests_createpreventionpoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_rulesv1_command(client, args):
sadomain_createrulerequestv1_filter = str(args.get('sadomain_createrulerequestv1_filter', ''))
sadomain_createrulerequestv1_name = str(args.get('sadomain_createrulerequestv1_name', ''))
sadomain_createrulerequestv1_permissions = str(args.get('sadomain_createrulerequestv1_permissions', ''))
sadomain_createrulerequestv1_priority = str(args.get('sadomain_createrulerequestv1_priority', ''))
sadomain_createrulerequestv1_topic = str(args.get('sadomain_createrulerequestv1_topic', ''))
response = client.create_rulesv1_request(sadomain_createrulerequestv1_filter, sadomain_createrulerequestv1_name,
sadomain_createrulerequestv1_permissions, sadomain_createrulerequestv1_priority, sadomain_createrulerequestv1_topic)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainRulesEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_sensor_update_policies_command(client, args):
requests_createsensorupdatepoliciesv1_resources = argToList(args.get('requests_createsensorupdatepoliciesv1_resources', []))
response = client.create_sensor_update_policies_request(requests_createsensorupdatepoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_sensor_update_policiesv2_command(client, args):
requests_createsensorupdatepoliciesv2_resources = argToList(args.get('requests_createsensorupdatepoliciesv2_resources', []))
response = client.create_sensor_update_policiesv2_request(requests_createsensorupdatepoliciesv2_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_user_command(client, args):
domain_usercreaterequest_firstname = str(args.get('domain_usercreaterequest_firstname', ''))
domain_usercreaterequest_lastname = str(args.get('domain_usercreaterequest_lastname', ''))
domain_usercreaterequest_password = str(args.get('domain_usercreaterequest_password', ''))
domain_usercreaterequest_uid = str(args.get('domain_usercreaterequest_uid', ''))
response = client.create_user_request(domain_usercreaterequest_firstname, domain_usercreaterequest_lastname,
domain_usercreaterequest_password, domain_usercreaterequest_uid)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def create_user_groups_command(client, args):
domain_usergroupsrequestv1_resources = argToList(args.get('domain_usergroupsrequestv1_resources', []))
response = client.create_user_groups_request(domain_usergroupsrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserGroupsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createaws_account_command(client, args):
k8sreg_createawsaccreq_resources = argToList(args.get('k8sreg_createawsaccreq_resources', []))
response = client.createaws_account_request(k8sreg_createawsaccreq_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createcid_groups_command(client, args):
domain_cidgroupsrequestv1_resources = argToList(args.get('domain_cidgroupsrequestv1_resources', []))
response = client.createcid_groups_request(domain_cidgroupsrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainCIDGroupsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createcspm_aws_account_command(client, args):
registration_awsaccountcreaterequestextv2_resources = argToList(
args.get('registration_awsaccountcreaterequestextv2_resources', []))
response = client.createcspm_aws_account_request(registration_awsaccountcreaterequestextv2_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createcspmgcp_account_command(client, args):
registration_gcpaccountcreaterequestextv1_resources = argToList(
args.get('registration_gcpaccountcreaterequestextv1_resources', []))
response = client.createcspmgcp_account_request(registration_gcpaccountcreaterequestextv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createioc_command(client, args):
api_iocviewrecord_batch_id = str(args.get('api_iocviewrecord_batch_id', ''))
api_iocviewrecord_created_by = str(args.get('api_iocviewrecord_created_by', ''))
api_iocviewrecord_created_timestamp = str(args.get('api_iocviewrecord_created_timestamp', ''))
api_iocviewrecord_description = str(args.get('api_iocviewrecord_description', ''))
api_iocviewrecord_expiration_days = args.get('api_iocviewrecord_expiration_days', None)
api_iocviewrecord_expiration_timestamp = str(args.get('api_iocviewrecord_expiration_timestamp', ''))
api_iocviewrecord_modified_by = str(args.get('api_iocviewrecord_modified_by', ''))
api_iocviewrecord_modified_timestamp = str(args.get('api_iocviewrecord_modified_timestamp', ''))
api_iocviewrecord_policy = str(args.get('api_iocviewrecord_policy', ''))
api_iocviewrecord_share_level = str(args.get('api_iocviewrecord_share_level', ''))
api_iocviewrecord_source = str(args.get('api_iocviewrecord_source', ''))
api_iocviewrecord_type = str(args.get('api_iocviewrecord_type', ''))
api_iocviewrecord_value = str(args.get('api_iocviewrecord_value', ''))
response = client.createioc_request(api_iocviewrecord_batch_id, api_iocviewrecord_created_by, api_iocviewrecord_created_timestamp, api_iocviewrecord_description, api_iocviewrecord_expiration_days, api_iocviewrecord_expiration_timestamp,
api_iocviewrecord_modified_by, api_iocviewrecord_modified_timestamp, api_iocviewrecord_policy, api_iocviewrecord_share_level, api_iocviewrecord_source, api_iocviewrecord_type, api_iocviewrecord_value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyIOC',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createml_exclusionsv1_command(client, args):
requests_mlexclusioncreatereqv1_comment = str(args.get('requests_mlexclusioncreatereqv1_comment', ''))
requests_mlexclusioncreatereqv1_excluded_from = argToList(args.get('requests_mlexclusioncreatereqv1_excluded_from', []))
requests_mlexclusioncreatereqv1_groups = argToList(args.get('requests_mlexclusioncreatereqv1_groups', []))
requests_mlexclusioncreatereqv1_value = str(args.get('requests_mlexclusioncreatereqv1_value', ''))
response = client.createml_exclusionsv1_request(requests_mlexclusioncreatereqv1_comment, requests_mlexclusioncreatereqv1_excluded_from,
requests_mlexclusioncreatereqv1_groups, requests_mlexclusioncreatereqv1_value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesMlExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def creatert_response_policies_command(client, args):
requests_creatertresponsepoliciesv1_resources = argToList(args.get('requests_creatertresponsepoliciesv1_resources', []))
response = client.creatert_response_policies_request(requests_creatertresponsepoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createrule_command(client, args):
api_rulecreatev1_comment = str(args.get('api_rulecreatev1_comment', ''))
api_rulecreatev1_description = str(args.get('api_rulecreatev1_description', ''))
api_rulecreatev1_disposition_id = args.get('api_rulecreatev1_disposition_id', None)
api_rulecreatev1_field_values = argToList(args.get('api_rulecreatev1_field_values', []))
api_rulecreatev1_name = str(args.get('api_rulecreatev1_name', ''))
api_rulecreatev1_pattern_severity = str(args.get('api_rulecreatev1_pattern_severity', ''))
api_rulecreatev1_rulegroup_id = str(args.get('api_rulecreatev1_rulegroup_id', ''))
api_rulecreatev1_ruletype_id = str(args.get('api_rulecreatev1_ruletype_id', ''))
response = client.createrule_request(api_rulecreatev1_comment, api_rulecreatev1_description, api_rulecreatev1_disposition_id, api_rulecreatev1_field_values,
api_rulecreatev1_name, api_rulecreatev1_pattern_severity, api_rulecreatev1_rulegroup_id, api_rulecreatev1_ruletype_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createrulegroup_command(client, args):
clone_id = str(args.get('clone_id', ''))
li_ary = str(args.get('li_ary', ''))
comment = str(args.get('comment', ''))
fwmgr_api_rulegroupcreaterequestv1_description = str(args.get('fwmgr_api_rulegroupcreaterequestv1_description', ''))
fwmgr_api_rulegroupcreaterequestv1_enabled = argToBoolean(args.get('fwmgr_api_rulegroupcreaterequestv1_enabled', False))
fwmgr_api_rulegroupcreaterequestv1_name = str(args.get('fwmgr_api_rulegroupcreaterequestv1_name', ''))
fwmgr_api_rulegroupcreaterequestv1_rules = argToList(args.get('fwmgr_api_rulegroupcreaterequestv1_rules', []))
response = client.createrulegroup_request(clone_id, li_ary, comment, fwmgr_api_rulegroupcreaterequestv1_description,
fwmgr_api_rulegroupcreaterequestv1_enabled, fwmgr_api_rulegroupcreaterequestv1_name, fwmgr_api_rulegroupcreaterequestv1_rules)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createrulegroup_mixin0_command(client, args):
api_rulegroupcreaterequestv1_comment = str(args.get('api_rulegroupcreaterequestv1_comment', ''))
api_rulegroupcreaterequestv1_description = str(args.get('api_rulegroupcreaterequestv1_description', ''))
api_rulegroupcreaterequestv1_name = str(args.get('api_rulegroupcreaterequestv1_name', ''))
api_rulegroupcreaterequestv1_platform = str(args.get('api_rulegroupcreaterequestv1_platform', ''))
response = client.createrulegroup_mixin0_request(
api_rulegroupcreaterequestv1_comment, api_rulegroupcreaterequestv1_description, api_rulegroupcreaterequestv1_name, api_rulegroupcreaterequestv1_platform)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def createsv_exclusionsv1_command(client, args):
requests_svexclusioncreatereqv1_comment = str(args.get('requests_svexclusioncreatereqv1_comment', ''))
requests_svexclusioncreatereqv1_groups = argToList(args.get('requests_svexclusioncreatereqv1_groups', []))
requests_svexclusioncreatereqv1_value = str(args.get('requests_svexclusioncreatereqv1_value', ''))
response = client.createsv_exclusionsv1_request(
requests_svexclusioncreatereqv1_comment, requests_svexclusioncreatereqv1_groups, requests_svexclusioncreatereqv1_value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesMlExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def crowd_score_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.crowd_score_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaEnvironmentScoreResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def customersettingsread_command(client, args):
response = client.customersettingsread_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.apicustomerSettingsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_actionv1_command(client, args):
id_ = str(args.get('id_', ''))
response = client.delete_actionv1_request(id_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_device_control_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_device_control_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_firewall_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_firewall_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_host_groups_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_host_groups_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_notificationsv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_notificationsv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNotificationIDResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_prevention_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_prevention_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_report_command(client, args):
ids = str(args.get('ids', ''))
response = client.delete_report_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_rulesv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_rulesv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainRuleQueryResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_samplev2_command(client, args):
ids = str(args.get('ids', ''))
response = client.delete_samplev2_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_samplev3_command(client, args):
ids = str(args.get('ids', ''))
response = client.delete_samplev3_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_sensor_update_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.delete_sensor_update_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_sensor_visibility_exclusionsv1_command(client, args):
ids = argToList(args.get('ids', []))
comment = str(args.get('comment', ''))
response = client.delete_sensor_visibility_exclusionsv1_request(ids, comment)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_user_command(client, args):
user_uuid = str(args.get('user_uuid', ''))
response = client.delete_user_request(user_uuid)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_user_group_members_command(client, args):
domain_usergroupmembersrequestv1_resources = argToList(args.get('domain_usergroupmembersrequestv1_resources', []))
response = client.delete_user_group_members_request(domain_usergroupmembersrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserGroupMembersResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def delete_user_groups_command(client, args):
user_group_ids = argToList(args.get('user_group_ids', []))
response = client.delete_user_groups_request(user_group_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaEntitiesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleteaws_accounts_command(client, args):
ids = argToList(args.get('ids', []))
response = client.deleteaws_accounts_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.modelsBaseResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleteaws_accounts_mixin0_command(client, args):
ids = argToList(args.get('ids', []))
response = client.deleteaws_accounts_mixin0_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaMetaInfo',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deletecid_group_members_command(client, args):
domain_cidgroupmembersrequestv1_resources = argToList(args.get('domain_cidgroupmembersrequestv1_resources', []))
response = client.deletecid_group_members_request(domain_cidgroupmembersrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainCIDGroupMembersResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deletecid_groups_command(client, args):
cid_group_ids = argToList(args.get('cid_group_ids', []))
response = client.deletecid_groups_request(cid_group_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaEntitiesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deletecspm_aws_account_command(client, args):
ids = argToList(args.get('ids', []))
organization_ids = argToList(args.get('organization_ids', []))
response = client.deletecspm_aws_account_request(ids, organization_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationBaseResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deletecspm_azure_account_command(client, args):
ids = argToList(args.get('ids', []))
response = client.deletecspm_azure_account_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationBaseResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleted_roles_command(client, args):
domain_mssprolerequestv1_resources = argToList(args.get('domain_mssprolerequestv1_resources', []))
response = client.deleted_roles_request(domain_mssprolerequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainMSSPRoleResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleteioa_exclusionsv1_command(client, args):
ids = argToList(args.get('ids', []))
comment = str(args.get('comment', ''))
response = client.deleteioa_exclusionsv1_request(ids, comment)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleteioc_command(client, args):
type_ = str(args.get('type_', ''))
value = str(args.get('value', ''))
response = client.deleteioc_request(type_, value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyIOC',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleteml_exclusionsv1_command(client, args):
ids = argToList(args.get('ids', []))
comment = str(args.get('comment', ''))
response = client.deleteml_exclusionsv1_request(ids, comment)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesMlExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deletert_response_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.deletert_response_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleterulegroups_command(client, args):
ids = argToList(args.get('ids', []))
comment = str(args.get('comment', ''))
response = client.deleterulegroups_request(ids, comment)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleterulegroups_mixin0_command(client, args):
comment = str(args.get('comment', ''))
ids = argToList(args.get('ids', []))
response = client.deleterulegroups_mixin0_request(comment, ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def deleterules_command(client, args):
rule_group_id = str(args.get('rule_group_id', ''))
comment = str(args.get('comment', ''))
ids = argToList(args.get('ids', []))
response = client.deleterules_request(rule_group_id, comment, ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def devices_count_command(client, args):
type_ = str(args.get('type_', ''))
value = str(args.get('value', ''))
response = client.devices_count_request(type_, value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyIOCDevicesCount',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def devices_ran_on_command(client, args):
type_ = str(args.get('type_', ''))
value = str(args.get('value', ''))
limit = str(args.get('limit', ''))
offset = str(args.get('offset', ''))
response = client.devices_ran_on_request(type_, value, limit, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyDevicesRanOn',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def download_sensor_installer_by_id_command(client, args):
id_ = str(args.get('id_', ''))
response = client.download_sensor_installer_by_id_request(id_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainDownloadItem',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def entitiesprocesses_command(client, args):
ids = argToList(args.get('ids', []))
response = client.entitiesprocesses_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaProcessDetailResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_actionsv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_actionsv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainActionEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_aggregate_detects_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.get_aggregate_detects_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_artifacts_command(client, args):
id_ = str(args.get('id_', ''))
name = str(args.get('name', ''))
response = client.get_artifacts_request(id_, name)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_assessmentv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_assessmentv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainAssessmentsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_available_role_ids_command(client, args):
response = client.get_available_role_ids_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_behaviors_command(client, args):
msa_idsrequest_ids = argToList(args.get('msa_idsrequest_ids', []))
response = client.get_behaviors_request(msa_idsrequest_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaExternalBehaviorResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_children_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_children_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainChildrenResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_cloudconnectazure_entities_account_v1_command(client, args):
ids = argToList(args.get('ids', []))
scan_type = str(args.get('scan_type', ''))
response = client.get_cloudconnectazure_entities_account_v1_request(ids, scan_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAzureAccountResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_cloudconnectazure_entities_userscriptsdownload_v1_command(client, args):
response = client.get_cloudconnectazure_entities_userscriptsdownload_v1_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAzureProvisionGetUserScriptResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_cloudconnectcspmazure_entities_account_v1_command(client, args):
ids = argToList(args.get('ids', []))
scan_type = str(args.get('scan_type', ''))
status = str(args.get('status', ''))
limit = int(args.get('limit', 100))
offset = args.get('offset', None)
response = client.get_cloudconnectcspmazure_entities_account_v1_request(ids, scan_type, status, limit, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAzureAccountResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_cloudconnectcspmazure_entities_userscriptsdownload_v1_command(client, args):
tenant_id = str(args.get('tenant_id', ''))
response = client.get_cloudconnectcspmazure_entities_userscriptsdownload_v1_request(tenant_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAzureProvisionGetUserScriptResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_clusters_command(client, args):
cluster_names = argToList(args.get('cluster_names', []))
account_ids = argToList(args.get('account_ids', []))
locations = argToList(args.get('locations', []))
cluster_service = str(args.get('cluster_service', ''))
limit = args.get('limit', None)
offset = args.get('offset', None)
response = client.get_clusters_request(cluster_names, account_ids, locations, cluster_service, limit, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.k8sregGetClustersResp',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_combined_sensor_installers_by_query_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.get_combined_sensor_installers_by_query_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainSensorInstallersV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_detect_summaries_command(client, args):
msa_idsrequest_ids = argToList(args.get('msa_idsrequest_ids', []))
response = client.get_detect_summaries_request(msa_idsrequest_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainMsaDetectSummariesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_device_control_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_device_control_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesDeviceControlPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_device_count_collection_queries_by_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.get_device_count_collection_queries_by_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_device_details_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_device_details_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainDeviceDetailsResponseSwagger',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_firewall_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_firewall_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesFirewallPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_helm_values_yaml_command(client, args):
cluster_name = str(args.get('cluster_name', ''))
response = client.get_helm_values_yaml_request(cluster_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.k8sregHelmYAMLResp',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_host_groups_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_host_groups_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesHostGroupsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_incidents_command(client, args):
msa_idsrequest_ids = argToList(args.get('msa_idsrequest_ids', []))
response = client.get_incidents_request(msa_idsrequest_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaExternalIncidentResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_intel_actor_entities_command(client, args):
ids = argToList(args.get('ids', []))
fields = argToList(args.get('fields', []))
response = client.get_intel_actor_entities_request(ids, fields)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainActorsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_intel_indicator_entities_command(client, args):
msa_idsrequest_ids = argToList(args.get('msa_idsrequest_ids', []))
response = client.get_intel_indicator_entities_request(msa_idsrequest_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainPublicIndicatorsV3Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_intel_report_entities_command(client, args):
ids = argToList(args.get('ids', []))
fields = argToList(args.get('fields', []))
response = client.get_intel_report_entities_request(ids, fields)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNewsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_intel_reportpdf_command(client, args):
id_ = str(args.get('id_', ''))
response = client.get_intel_reportpdf_request(id_)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_intel_rule_entities_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_intel_rule_entities_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainRulesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_intel_rule_file_command(client, args):
id_ = args.get('id_', None)
format = str(args.get('format', ''))
response = client.get_intel_rule_file_request(id_, format)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_latest_intel_rule_file_command(client, args):
type_ = str(args.get('type_', ''))
format = str(args.get('format', ''))
response = client.get_latest_intel_rule_file_request(type_, format)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_locations_command(client, args):
clouds = argToList(args.get('clouds', []))
response = client.get_locations_request(clouds)
command_results = CommandResults(
outputs_prefix='CrowdStrike.k8sregGetLocationsResp',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_mal_query_downloadv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_mal_query_downloadv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_mal_query_entities_samples_fetchv1_command(client, args):
ids = str(args.get('ids', ''))
response = client.get_mal_query_entities_samples_fetchv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_mal_query_metadatav1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_mal_query_metadatav1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.malquerySampleMetadataResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_mal_query_quotasv1_command(client, args):
response = client.get_mal_query_quotasv1_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.malqueryRateLimitsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_mal_query_requestv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_mal_query_requestv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.malqueryRequestResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_notifications_detailed_translatedv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_notifications_detailed_translatedv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNotificationDetailsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_notifications_detailedv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_notifications_detailedv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNotificationDetailsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_notifications_translatedv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_notifications_translatedv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNotificationEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_notificationsv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_notificationsv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNotificationEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_prevention_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_prevention_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPreventionPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_reports_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_reports_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.falconxReportV1Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_roles_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_roles_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserRoleResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_roles_byid_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_roles_byid_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainMSSPRoleResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_rulesv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_rulesv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainRulesEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_samplev2_command(client, args):
ids = str(args.get('ids', ''))
password_protected = str(args.get('password_protected', 'False'))
response = client.get_samplev2_request(ids, password_protected)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_samplev3_command(client, args):
ids = str(args.get('ids', ''))
password_protected = str(args.get('password_protected', 'False'))
response = client.get_samplev3_request(ids, password_protected)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_scans_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_scans_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.mlscannerScanV1Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_scans_aggregates_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.get_scans_aggregates_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_sensor_installers_by_query_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.get_sensor_installers_by_query_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_sensor_installers_entities_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_sensor_installers_entities_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainSensorInstallersV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_sensor_installersccid_by_query_command(client, args):
response = client.get_sensor_installersccid_by_query_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_sensor_update_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_sensor_update_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_sensor_update_policiesv2_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_sensor_update_policiesv2_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_sensor_visibility_exclusionsv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_sensor_visibility_exclusionsv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSvExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_submissions_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_submissions_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.falconxSubmissionV1Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_summary_reports_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_summary_reports_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.falconxSummaryReportV1Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_user_group_members_byid_command(client, args):
user_group_ids = str(args.get('user_group_ids', ''))
response = client.get_user_group_members_byid_request(user_group_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserGroupMembersResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_user_groups_byid_command(client, args):
user_group_ids = argToList(args.get('user_group_ids', []))
response = client.get_user_groups_byid_request(user_group_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserGroupsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_user_role_ids_command(client, args):
user_uuid = str(args.get('user_uuid', ''))
response = client.get_user_role_ids_request(user_uuid)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def get_vulnerabilities_command(client, args):
ids = argToList(args.get('ids', []))
response = client.get_vulnerabilities_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainSPAPIVulnerabilitiesEntitiesResponseV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getaws_accounts_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getaws_accounts_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.modelsAWSAccountsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getaws_accounts_mixin0_command(client, args):
ids = argToList(args.get('ids', []))
status = str(args.get('status', ''))
limit = args.get('limit', None)
offset = args.get('offset', None)
response = client.getaws_accounts_mixin0_request(ids, status, limit, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.k8sregGetAWSAccountsResp',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getaws_settings_command(client, args):
response = client.getaws_settings_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.modelsCustomerConfigurationsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcid_group_by_id_command(client, args):
cid_group_ids = argToList(args.get('cid_group_ids', []))
response = client.getcid_group_by_id_request(cid_group_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainCIDGroupsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcid_group_members_by_command(client, args):
cid_group_ids = argToList(args.get('cid_group_ids', []))
response = client.getcid_group_members_by_request(cid_group_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainCIDGroupMembersResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_aws_account_command(client, args):
scan_type = str(args.get('scan_type', ''))
ids = argToList(args.get('ids', []))
organization_ids = argToList(args.get('organization_ids', []))
status = str(args.get('status', ''))
limit = int(args.get('limit', 100))
offset = args.get('offset', None)
group_by = str(args.get('group_by', ''))
response = client.getcspm_aws_account_request(scan_type, ids, organization_ids, status, limit, offset, group_by)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAWSAccountResponseV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_aws_account_scripts_attachment_command(client, args):
response = client.getcspm_aws_account_scripts_attachment_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAWSProvisionGetAccountScriptResponseV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_aws_console_setupur_ls_command(client, args):
response = client.getcspm_aws_console_setupur_ls_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAWSAccountConsoleURL',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_azure_user_scripts_command(client, args):
response = client.getcspm_azure_user_scripts_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationAzureProvisionGetUserScriptResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_policy_command(client, args):
ids = str(args.get('ids', ''))
response = client.getcspm_policy_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationPolicyResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_policy_settings_command(client, args):
service = str(args.get('service', ''))
policy_id = str(args.get('policy_id', ''))
cloud_platform = str(args.get('cloud_platform', ''))
response = client.getcspm_policy_settings_request(service, policy_id, cloud_platform)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationPolicySettingsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspm_scan_schedule_command(client, args):
cloud_platform = argToList(args.get('cloud_platform', []))
response = client.getcspm_scan_schedule_request(cloud_platform)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationScanScheduleResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspmcgp_account_command(client, args):
scan_type = str(args.get('scan_type', ''))
ids = argToList(args.get('ids', []))
response = client.getcspmcgp_account_request(scan_type, ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationGCPAccountResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspmgcp_user_scripts_command(client, args):
response = client.getcspmgcp_user_scripts_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationGCPProvisionGetUserScriptResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getcspmgcp_user_scripts_attachment_command(client, args):
response = client.getcspmgcp_user_scripts_attachment_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationGCPProvisionGetUserScriptResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getevents_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getevents_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiEventsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getfirewallfields_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getfirewallfields_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiFirewallFieldsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getioa_events_command(client, args):
policy_id = str(args.get('policy_id', ''))
cloud_provider = str(args.get('cloud_provider', ''))
account_id = str(args.get('account_id', ''))
azure_tenant_id = str(args.get('azure_tenant_id', ''))
user_ids = argToList(args.get('user_ids', []))
offset = args.get('offset', None)
limit = args.get('limit', None)
response = client.getioa_events_request(policy_id, cloud_provider, account_id, azure_tenant_id, user_ids, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationExternalIOAEventResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getioa_exclusionsv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getioa_exclusionsv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesIoaExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getioa_users_command(client, args):
policy_id = str(args.get('policy_id', ''))
cloud_provider = str(args.get('cloud_provider', ''))
account_id = str(args.get('account_id', ''))
azure_tenant_id = str(args.get('azure_tenant_id', ''))
response = client.getioa_users_request(policy_id, cloud_provider, account_id, azure_tenant_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationIOAUserResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getioc_command(client, args):
type_ = str(args.get('type_', ''))
value = str(args.get('value', ''))
response = client.getioc_request(type_, value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyIOC',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getml_exclusionsv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getml_exclusionsv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesMlExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getpatterns_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getpatterns_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiPatternsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getplatforms_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getplatforms_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiPlatformsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getplatforms_mixin0_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getplatforms_mixin0_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiPlatformsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getpolicycontainers_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getpolicycontainers_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiPolicyContainersResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getrt_response_policies_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getrt_response_policies_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesRTResponsePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getrulegroups_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getrulegroups_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiRuleGroupsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getrulegroups_mixin0_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getrulegroups_mixin0_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiRuleGroupsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getrules_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getrules_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiRulesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getrules_mixin0_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getrules_mixin0_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiRulesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getrulesget_command(client, args):
api_rulesgetrequestv1_ids = argToList(args.get('api_rulesgetrequestv1_ids', []))
response = client.getrulesget_request(api_rulesgetrequestv1_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiRulesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def getruletypes_command(client, args):
ids = argToList(args.get('ids', []))
response = client.getruletypes_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiRuleTypesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def grant_user_role_ids_command(client, args):
user_uuid = str(args.get('user_uuid', ''))
domain_roleids_roleids = argToList(args.get('domain_roleids_roleids', []))
response = client.grant_user_role_ids_request(user_uuid, domain_roleids_roleids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserRoleIDsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def indicatorcombinedv1_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.indicatorcombinedv1_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiIndicatorRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def indicatorcreatev1_command(client, args):
retrodetects = str(args.get('retrodetects', ''))
ignore_warnings = str(args.get('ignore_warnings', 'False'))
api_indicatorcreatereqsv1_comment = str(args.get('api_indicatorcreatereqsv1_comment', ''))
api_indicatorcreatereqsv1_indicators = argToList(args.get('api_indicatorcreatereqsv1_indicators', []))
response = client.indicatorcreatev1_request(retrodetects, ignore_warnings,
api_indicatorcreatereqsv1_comment, api_indicatorcreatereqsv1_indicators)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def indicatordeletev1_command(client, args):
filter_ = str(args.get('filter_', ''))
ids = argToList(args.get('ids', []))
comment = str(args.get('comment', ''))
response = client.indicatordeletev1_request(filter_, ids, comment)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiIndicatorQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def indicatorgetv1_command(client, args):
ids = argToList(args.get('ids', []))
response = client.indicatorgetv1_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiIndicatorRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def indicatorsearchv1_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.indicatorsearchv1_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiIndicatorQueryRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def indicatorupdatev1_command(client, args):
retrodetects = str(args.get('retrodetects', ''))
ignore_warnings = str(args.get('ignore_warnings', 'False'))
api_indicatorupdatereqsv1_bulk_update_action = str(args.get('api_indicatorupdatereqsv1_bulk_update_action', ''))
api_indicatorupdatereqsv1_bulk_update_applied_globally = argToBoolean(
args.get('api_indicatorupdatereqsv1_bulk_update_applied_globally', False))
api_indicatorupdatereqsv1_bulk_update_description = str(args.get('api_indicatorupdatereqsv1_bulk_update_description', ''))
api_indicatorupdatereqsv1_bulk_update_expiration = str(args.get('api_indicatorupdatereqsv1_bulk_update_expiration', ''))
api_indicatorupdatereqsv1_bulk_update_filter = str(args.get('api_indicatorupdatereqsv1_bulk_update_filter', ''))
api_indicatorupdatereqsv1_bulk_update_host_groups = str(args.get('api_indicatorupdatereqsv1_bulk_update_host_groups', ''))
api_indicatorupdatereqsv1_bulk_update_mobile_action = str(args.get('api_indicatorupdatereqsv1_bulk_update_mobile_action', ''))
api_indicatorupdatereqsv1_bulk_update_platforms = str(args.get('api_indicatorupdatereqsv1_bulk_update_platforms', ''))
api_indicatorupdatereqsv1_bulk_update_severity = str(args.get('api_indicatorupdatereqsv1_bulk_update_severity', ''))
api_indicatorupdatereqsv1_bulk_update_source = str(args.get('api_indicatorupdatereqsv1_bulk_update_source', ''))
api_indicatorupdatereqsv1_bulk_update_tags = str(args.get('api_indicatorupdatereqsv1_bulk_update_tags', ''))
api_indicatorupdatereqsv1_bulk_update = assign_params(action=api_indicatorupdatereqsv1_bulk_update_action, applied_globally=api_indicatorupdatereqsv1_bulk_update_applied_globally, description=api_indicatorupdatereqsv1_bulk_update_description, expiration=api_indicatorupdatereqsv1_bulk_update_expiration, filter=api_indicatorupdatereqsv1_bulk_update_filter,
host_groups=api_indicatorupdatereqsv1_bulk_update_host_groups, mobile_action=api_indicatorupdatereqsv1_bulk_update_mobile_action, platforms=api_indicatorupdatereqsv1_bulk_update_platforms, severity=api_indicatorupdatereqsv1_bulk_update_severity, source=api_indicatorupdatereqsv1_bulk_update_source, tags=api_indicatorupdatereqsv1_bulk_update_tags)
api_indicatorupdatereqsv1_comment = str(args.get('api_indicatorupdatereqsv1_comment', ''))
api_indicatorupdatereqsv1_indicators = argToList(args.get('api_indicatorupdatereqsv1_indicators', []))
response = client.indicatorupdatev1_request(retrodetects, ignore_warnings, api_indicatorupdatereqsv1_bulk_update,
api_indicatorupdatereqsv1_comment, api_indicatorupdatereqsv1_indicators)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiIndicatorRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def list_available_streamso_auth2_command(client, args):
appId = str(args.get('appId', ''))
format = str(args.get('format', ''))
response = client.list_available_streamso_auth2_request(appId, format)
command_results = CommandResults(
outputs_prefix='CrowdStrike.maindiscoveryResponseV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def oauth2_access_token_command(client, args):
client_id = str(args.get('client_id', ''))
client_secret = str(args.get('client_secret', ''))
member_cid = str(args.get('member_cid', ''))
response = client.oauth2_access_token_request(client_id, client_secret, member_cid)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def oauth2_revoke_token_command(client, args):
token = str(args.get('token', ''))
response = client.oauth2_revoke_token_request(token)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def patch_cloudconnectazure_entities_clientid_v1_command(client, args):
id_ = str(args.get('id_', ''))
response = client.patch_cloudconnectazure_entities_clientid_v1_request(id_)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def patch_cloudconnectcspmazure_entities_clientid_v1_command(client, args):
id_ = str(args.get('id_', ''))
tenant_id = str(args.get('tenant_id', ''))
response = client.patch_cloudconnectcspmazure_entities_clientid_v1_request(id_, tenant_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def patchcspm_aws_account_command(client, args):
registration_awsaccountpatchrequest_resources = argToList(args.get('registration_awsaccountpatchrequest_resources', []))
response = client.patchcspm_aws_account_request(registration_awsaccountpatchrequest_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_actionv2_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_actionv2_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_device_control_policies_action_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_device_control_policies_action_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesDeviceControlPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_firewall_policies_action_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_firewall_policies_action_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesFirewallPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_group_action_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_group_action_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesHostGroupsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_incident_action_command(client, args):
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_incident_action_request(msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_prevention_policies_action_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_prevention_policies_action_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPreventionPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def perform_sensor_update_policies_action_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.perform_sensor_update_policies_action_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def performrt_response_policies_action_command(client, args):
action_name = str(args.get('action_name', ''))
msa_entityactionrequestv2_action__meters = argToList(args.get('msa_entityactionrequestv2_action__meters', []))
msa_entityactionrequestv2_ids = argToList(args.get('msa_entityactionrequestv2_ids', []))
response = client.performrt_response_policies_action_request(
action_name, msa_entityactionrequestv2_action__meters, msa_entityactionrequestv2_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesRTResponsePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def post_cloudconnectazure_entities_account_v1_command(client, args):
registration_azureaccountcreaterequestexternalv1_resources = argToList(
args.get('registration_azureaccountcreaterequestexternalv1_resources', []))
response = client.post_cloudconnectazure_entities_account_v1_request(
registration_azureaccountcreaterequestexternalv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def post_cloudconnectcspmazure_entities_account_v1_command(client, args):
registration_azureaccountcreaterequestexternalv1_resources = argToList(
args.get('registration_azureaccountcreaterequestexternalv1_resources', []))
response = client.post_cloudconnectcspmazure_entities_account_v1_request(
registration_azureaccountcreaterequestexternalv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def post_mal_query_entities_samples_multidownloadv1_command(client, args):
malquery_multidownloadrequestv1_samples = argToList(args.get('malquery_multidownloadrequestv1_samples', []))
response = client.post_mal_query_entities_samples_multidownloadv1_request(malquery_multidownloadrequestv1_samples)
command_results = CommandResults(
outputs_prefix='CrowdStrike.malqueryExternalQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def post_mal_query_exact_searchv1_command(client, args):
malquery_externalexactsearchparametersv1_options_filter_filetypes = str(
args.get('malquery_externalexactsearchparametersv1_options_filter_filetypes', ''))
malquery_externalexactsearchparametersv1_options_filter_meta = str(
args.get('malquery_externalexactsearchparametersv1_options_filter_meta', ''))
malquery_externalexactsearchparametersv1_options_limit = args.get(
'malquery_externalexactsearchparametersv1_options_limit', None)
malquery_externalexactsearchparametersv1_options_max_date = str(
args.get('malquery_externalexactsearchparametersv1_options_max_date', ''))
malquery_externalexactsearchparametersv1_options_max_size = str(
args.get('malquery_externalexactsearchparametersv1_options_max_size', ''))
malquery_externalexactsearchparametersv1_options_min_date = str(
args.get('malquery_externalexactsearchparametersv1_options_min_date', ''))
malquery_externalexactsearchparametersv1_options_min_size = str(
args.get('malquery_externalexactsearchparametersv1_options_min_size', ''))
malquery_externalexactsearchparametersv1_options = assign_params(filter_filetypes=malquery_externalexactsearchparametersv1_options_filter_filetypes, filter_meta=malquery_externalexactsearchparametersv1_options_filter_meta, limit=malquery_externalexactsearchparametersv1_options_limit,
max_date=malquery_externalexactsearchparametersv1_options_max_date, max_size=malquery_externalexactsearchparametersv1_options_max_size, min_date=malquery_externalexactsearchparametersv1_options_min_date, min_size=malquery_externalexactsearchparametersv1_options_min_size)
malquery_externalexactsearchparametersv1_patterns = argToList(
args.get('malquery_externalexactsearchparametersv1_patterns', []))
response = client.post_mal_query_exact_searchv1_request(
malquery_externalexactsearchparametersv1_options, malquery_externalexactsearchparametersv1_patterns)
command_results = CommandResults(
outputs_prefix='CrowdStrike.malqueryExternalQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def post_mal_query_fuzzy_searchv1_command(client, args):
malquery_fuzzysearchparametersv1_options_filter_meta = str(
args.get('malquery_fuzzysearchparametersv1_options_filter_meta', ''))
malquery_fuzzysearchparametersv1_options_limit = args.get('malquery_fuzzysearchparametersv1_options_limit', None)
malquery_fuzzysearchparametersv1_options = assign_params(
filter_meta=malquery_fuzzysearchparametersv1_options_filter_meta, limit=malquery_fuzzysearchparametersv1_options_limit)
malquery_fuzzysearchparametersv1_patterns = argToList(args.get('malquery_fuzzysearchparametersv1_patterns', []))
response = client.post_mal_query_fuzzy_searchv1_request(
malquery_fuzzysearchparametersv1_options, malquery_fuzzysearchparametersv1_patterns)
command_results = CommandResults(
outputs_prefix='CrowdStrike.malqueryFuzzySearchResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def post_mal_query_huntv1_command(client, args):
malquery_externalhuntparametersv1_options_filter_filetypes = str(
args.get('malquery_externalhuntparametersv1_options_filter_filetypes', ''))
malquery_externalhuntparametersv1_options_filter_meta = str(
args.get('malquery_externalhuntparametersv1_options_filter_meta', ''))
malquery_externalhuntparametersv1_options_limit = args.get('malquery_externalhuntparametersv1_options_limit', None)
malquery_externalhuntparametersv1_options_max_date = str(args.get('malquery_externalhuntparametersv1_options_max_date', ''))
malquery_externalhuntparametersv1_options_max_size = str(args.get('malquery_externalhuntparametersv1_options_max_size', ''))
malquery_externalhuntparametersv1_options_min_date = str(args.get('malquery_externalhuntparametersv1_options_min_date', ''))
malquery_externalhuntparametersv1_options_min_size = str(args.get('malquery_externalhuntparametersv1_options_min_size', ''))
malquery_externalhuntparametersv1_options = assign_params(filter_filetypes=malquery_externalhuntparametersv1_options_filter_filetypes, filter_meta=malquery_externalhuntparametersv1_options_filter_meta, limit=malquery_externalhuntparametersv1_options_limit,
max_date=malquery_externalhuntparametersv1_options_max_date, max_size=malquery_externalhuntparametersv1_options_max_size, min_date=malquery_externalhuntparametersv1_options_min_date, min_size=malquery_externalhuntparametersv1_options_min_size)
malquery_externalhuntparametersv1_yara_rule = str(args.get('malquery_externalhuntparametersv1_yara_rule', ''))
response = client.post_mal_query_huntv1_request(
malquery_externalhuntparametersv1_options, malquery_externalhuntparametersv1_yara_rule)
command_results = CommandResults(
outputs_prefix='CrowdStrike.malqueryExternalQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def preview_rulev1_command(client, args):
domain_rulepreviewrequest_filter = str(args.get('domain_rulepreviewrequest_filter', ''))
domain_rulepreviewrequest_topic = str(args.get('domain_rulepreviewrequest_topic', ''))
response = client.preview_rulev1_request(domain_rulepreviewrequest_filter, domain_rulepreviewrequest_topic)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def processes_ran_on_command(client, args):
type_ = str(args.get('type_', ''))
value = str(args.get('value', ''))
device_id = str(args.get('device_id', ''))
limit = str(args.get('limit', ''))
offset = str(args.get('offset', ''))
response = client.processes_ran_on_request(type_, value, device_id, limit, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyProcessesRanOn',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def provisionaws_accounts_command(client, args):
mode = str(args.get('mode', 'manual'))
models_createawsaccountsv1_resources = argToList(args.get('models_createawsaccountsv1_resources', []))
response = client.provisionaws_accounts_request(mode, models_createawsaccountsv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_actionsv1_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
response = client.query_actionsv1_request(offset, limit, sort, filter_, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_allow_list_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.query_allow_list_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_behaviors_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_behaviors_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_block_list_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.query_block_list_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_children_command(client, args):
sort = str(args.get('sort', 'last_modified_timestamp|desc'))
offset = int(args.get('offset', 0))
limit = int(args.get('limit', 10))
response = client.query_children_request(sort, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_device_control_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_device_control_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesDeviceControlPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_device_control_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_device_control_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPolicyMembersRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_firewall_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_firewall_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesFirewallPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_firewall_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_firewall_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPolicyMembersRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_group_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_group_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesHostGroupMembersV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_host_groups_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_host_groups_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesHostGroupsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_prevention_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_prevention_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPreventionPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_prevention_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_prevention_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPolicyMembersRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_sensor_update_builds_command(client, args):
platform = str(args.get('platform', ''))
response = client.query_combined_sensor_update_builds_request(platform)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdateBuildsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_sensor_update_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_sensor_update_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_sensor_update_policiesv2_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_sensor_update_policiesv2_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combined_sensor_update_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combined_sensor_update_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPolicyMembersRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combinedrt_response_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combinedrt_response_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesRTResponsePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_combinedrt_response_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_combinedrt_response_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPolicyMembersRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_detection_ids_by_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.query_detection_ids_by_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_detects_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
response = client.query_detects_request(offset, limit, sort, filter_, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_device_control_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_device_control_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_device_control_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_device_control_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_devices_by_filter_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.query_devices_by_filter_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_devices_by_filter_scroll_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.query_devices_by_filter_scroll_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainDeviceResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_escalations_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.query_escalations_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_firewall_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_firewall_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_firewall_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_firewall_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_group_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_group_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_hidden_devices_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.query_hidden_devices_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_host_groups_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_host_groups_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_incident_ids_by_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.query_incident_ids_by_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_incidents_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.query_incidents_request(sort, filter_, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaIncidentQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_actor_entities_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
fields = argToList(args.get('fields', []))
response = client.query_intel_actor_entities_request(offset, limit, sort, filter_, q, fields)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainActorsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_actor_ids_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
response = client.query_intel_actor_ids_request(offset, limit, sort, filter_, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_indicator_entities_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
include_deleted = argToBoolean(args.get('include_deleted', False))
response = client.query_intel_indicator_entities_request(offset, limit, sort, filter_, q, include_deleted)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainPublicIndicatorsV3Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_indicator_ids_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
include_deleted = argToBoolean(args.get('include_deleted', False))
response = client.query_intel_indicator_ids_request(offset, limit, sort, filter_, q, include_deleted)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_report_entities_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
fields = argToList(args.get('fields', []))
response = client.query_intel_report_entities_request(offset, limit, sort, filter_, q, fields)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNewsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_report_ids_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
response = client.query_intel_report_ids_request(offset, limit, sort, filter_, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_intel_rule_ids_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
name = argToList(args.get('name', []))
type_ = str(args.get('type_', ''))
description = argToList(args.get('description', []))
tags = argToList(args.get('tags', []))
min_created_date = args.get('min_created_date', None)
max_created_date = str(args.get('max_created_date', ''))
q = str(args.get('q', ''))
response = client.query_intel_rule_ids_request(
offset, limit, sort, name, type_, description, tags, min_created_date, max_created_date, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_notificationsv1_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
response = client.query_notificationsv1_request(offset, limit, sort, filter_, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_prevention_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_prevention_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_prevention_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_prevention_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_remediations_filter_command(client, args):
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
response = client.query_remediations_filter_request(limit, sort, filter_, offset)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_reports_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_reports_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_roles_command(client, args):
user_group_id = str(args.get('user_group_id', ''))
cid_group_id = str(args.get('cid_group_id', ''))
role_id = str(args.get('role_id', ''))
sort = str(args.get('sort', 'last_modified_timestamp|desc'))
offset = int(args.get('offset', 0))
limit = int(args.get('limit', 10))
response = client.query_roles_request(user_group_id, cid_group_id, role_id, sort, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_rulesv1_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
response = client.query_rulesv1_request(offset, limit, sort, filter_, q)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainRuleQueryResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_samplev1_command(client, args):
samplestore_querysamplesrequest_sha256s = argToList(args.get('samplestore_querysamplesrequest_sha256s', []))
response = client.query_samplev1_request(samplestore_querysamplesrequest_sha256s)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_sensor_update_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_sensor_update_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_sensor_update_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_sensor_update_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_sensor_visibility_exclusionsv1_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_sensor_visibility_exclusionsv1_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_submissions_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_submissions_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_submissions_mixin0_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.query_submissions_mixin0_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.mlscannerQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_user_group_members_command(client, args):
user_uuid = str(args.get('user_uuid', ''))
sort = str(args.get('sort', 'last_modified_timestamp|desc'))
offset = int(args.get('offset', 0))
limit = int(args.get('limit', 10))
response = client.query_user_group_members_request(user_uuid, sort, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_user_groups_command(client, args):
name = str(args.get('name', ''))
sort = str(args.get('sort', 'name|asc'))
offset = int(args.get('offset', 0))
limit = int(args.get('limit', 10))
response = client.query_user_groups_request(name, sort, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def query_vulnerabilities_command(client, args):
after = str(args.get('after', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.query_vulnerabilities_request(after, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainSPAPIQueryVulnerabilitiesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryaws_accounts_command(client, args):
limit = int(args.get('limit', 100))
offset = args.get('offset', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.queryaws_accounts_request(limit, offset, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.modelsAWSAccountsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryaws_accounts_fori_ds_command(client, args):
limit = int(args.get('limit', 100))
offset = args.get('offset', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.queryaws_accounts_fori_ds_request(limit, offset, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def querycid_group_members_command(client, args):
cid = str(args.get('cid', ''))
sort = str(args.get('sort', 'last_modified_timestamp|desc'))
offset = int(args.get('offset', 0))
limit = int(args.get('limit', 10))
response = client.querycid_group_members_request(cid, sort, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def querycid_groups_command(client, args):
name = str(args.get('name', ''))
sort = str(args.get('sort', 'name|asc'))
offset = int(args.get('offset', 0))
limit = int(args.get('limit', 10))
response = client.querycid_groups_request(name, sort, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryevents_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
after = str(args.get('after', ''))
limit = args.get('limit', None)
response = client.queryevents_request(sort, filter_, q, offset, after, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryfirewallfields_command(client, args):
platform_id = str(args.get('platform_id', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryfirewallfields_request(platform_id, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrmsaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryio_cs_command(client, args):
types = str(args.get('types', ''))
values = str(args.get('values', ''))
from_expiration_timestamp = str(args.get('from_expiration_timestamp', ''))
to_expiration_timestamp = str(args.get('to_expiration_timestamp', ''))
policies = str(args.get('policies', ''))
sources = str(args.get('sources', ''))
share_levels = str(args.get('share_levels', ''))
created_by = str(args.get('created_by', ''))
deleted_by = str(args.get('deleted_by', ''))
include_deleted = str(args.get('include_deleted', ''))
response = client.queryio_cs_request(types, values, from_expiration_timestamp, to_expiration_timestamp,
policies, sources, share_levels, created_by, deleted_by, include_deleted)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyIOCIDs',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryioa_exclusionsv1_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.queryioa_exclusionsv1_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryml_exclusionsv1_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.queryml_exclusionsv1_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def querypatterns_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.querypatterns_request(offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryplatforms_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryplatforms_request(offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrmsaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryplatforms_mixin0_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryplatforms_mixin0_request(offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def querypolicyrules_command(client, args):
id_ = str(args.get('id_', ''))
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.querypolicyrules_request(id_, sort, filter_, q, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrt_response_policies_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.queryrt_response_policies_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrt_response_policy_members_command(client, args):
id_ = str(args.get('id_', ''))
filter_ = str(args.get('filter_', ''))
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.queryrt_response_policy_members_request(id_, filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrulegroups_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
after = str(args.get('after', ''))
limit = args.get('limit', None)
response = client.queryrulegroups_request(sort, filter_, q, offset, after, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrulegroups_mixin0_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryrulegroups_mixin0_request(sort, filter_, q, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrulegroupsfull_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryrulegroupsfull_request(sort, filter_, q, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrules_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
after = str(args.get('after', ''))
limit = args.get('limit', None)
response = client.queryrules_request(sort, filter_, q, offset, after, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryrules_mixin0_command(client, args):
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
q = str(args.get('q', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryrules_mixin0_request(sort, filter_, q, offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def queryruletypes_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
response = client.queryruletypes_request(offset, limit)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def refresh_active_stream_session_command(client, args):
action_name = str(args.get('action_name', ''))
appId = str(args.get('appId', ''))
partition = args.get('partition', None)
response = client.refresh_active_stream_session_request(action_name, appId, partition)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def regenerateapi_key_command(client, args):
response = client.regenerateapi_key_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.k8sregRegenAPIKeyResp',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def retrieve_emails_bycid_command(client, args):
response = client.retrieve_emails_bycid_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def retrieve_user_command(client, args):
ids = argToList(args.get('ids', []))
response = client.retrieve_user_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserMetaDataResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def retrieve_useruui_ds_bycid_command(client, args):
response = client.retrieve_useruui_ds_bycid_request()
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def retrieve_useruuid_command(client, args):
uid = argToList(args.get('uid', []))
response = client.retrieve_useruuid_request(uid)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def reveal_uninstall_token_command(client, args):
requests_revealuninstalltokenv1_audit_message = str(args.get('requests_revealuninstalltokenv1_audit_message', ''))
requests_revealuninstalltokenv1_device_id = str(args.get('requests_revealuninstalltokenv1_device_id', ''))
response = client.reveal_uninstall_token_request(
requests_revealuninstalltokenv1_audit_message, requests_revealuninstalltokenv1_device_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesRevealUninstallTokenRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def revoke_user_role_ids_command(client, args):
user_uuid = str(args.get('user_uuid', ''))
ids = argToList(args.get('ids', []))
response = client.revoke_user_role_ids_request(user_uuid, ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserRoleIDsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_aggregate_sessions_command(client, args):
msa_aggregatequeryrequest_date_ranges = argToList(args.get('msa_aggregatequeryrequest_date_ranges', []))
msa_aggregatequeryrequest_field = str(args.get('msa_aggregatequeryrequest_field', ''))
msa_aggregatequeryrequest_filter = str(args.get('msa_aggregatequeryrequest_filter', ''))
msa_aggregatequeryrequest_interval = str(args.get('msa_aggregatequeryrequest_interval', ''))
msa_aggregatequeryrequest_min_doc_count = args.get('msa_aggregatequeryrequest_min_doc_count', None)
msa_aggregatequeryrequest_missing = str(args.get('msa_aggregatequeryrequest_missing', ''))
msa_aggregatequeryrequest_name = str(args.get('msa_aggregatequeryrequest_name', ''))
msa_aggregatequeryrequest_q = str(args.get('msa_aggregatequeryrequest_q', ''))
msa_aggregatequeryrequest_ranges = argToList(args.get('msa_aggregatequeryrequest_ranges', []))
msa_aggregatequeryrequest_size = args.get('msa_aggregatequeryrequest_size', None)
msa_aggregatequeryrequest_sort = str(args.get('msa_aggregatequeryrequest_sort', ''))
msa_aggregatequeryrequest_sub_aggregates = argToList(args.get('msa_aggregatequeryrequest_sub_aggregates', []))
msa_aggregatequeryrequest_time_zone = str(args.get('msa_aggregatequeryrequest_time_zone', ''))
msa_aggregatequeryrequest_type = str(args.get('msa_aggregatequeryrequest_type', ''))
response = client.rtr_aggregate_sessions_request(msa_aggregatequeryrequest_date_ranges, msa_aggregatequeryrequest_field, msa_aggregatequeryrequest_filter, msa_aggregatequeryrequest_interval, msa_aggregatequeryrequest_min_doc_count, msa_aggregatequeryrequest_missing,
msa_aggregatequeryrequest_name, msa_aggregatequeryrequest_q, msa_aggregatequeryrequest_ranges, msa_aggregatequeryrequest_size, msa_aggregatequeryrequest_sort, msa_aggregatequeryrequest_sub_aggregates, msa_aggregatequeryrequest_time_zone, msa_aggregatequeryrequest_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaAggregatesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_check_active_responder_command_status_command(client, args):
cloud_request_id = str(args.get('cloud_request_id', ''))
sequence_id = int(args.get('sequence_id', 0))
response = client.rtr_check_active_responder_command_status_request(cloud_request_id, sequence_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainStatusResponseWrapper',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_check_admin_command_status_command(client, args):
cloud_request_id = str(args.get('cloud_request_id', ''))
sequence_id = int(args.get('sequence_id', 0))
response = client.rtr_check_admin_command_status_request(cloud_request_id, sequence_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainStatusResponseWrapper',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_check_command_status_command(client, args):
cloud_request_id = str(args.get('cloud_request_id', ''))
sequence_id = int(args.get('sequence_id', 0))
response = client.rtr_check_command_status_request(cloud_request_id, sequence_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainStatusResponseWrapper',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_create_put_files_command(client, args):
file = str(args.get('file', ''))
description = str(args.get('description', ''))
name = str(args.get('name', ''))
comments_for_audit_log = str(args.get('comments_for_audit_log', ''))
response = client.rtr_create_put_files_request(file, description, name, comments_for_audit_log)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_create_scripts_command(client, args):
file = str(args.get('file', ''))
description = str(args.get('description', ''))
name = str(args.get('name', ''))
comments_for_audit_log = str(args.get('comments_for_audit_log', ''))
permission_type = str(args.get('permission_type', 'none'))
content = str(args.get('content', ''))
platform = argToList(args.get('platform', []))
response = client.rtr_create_scripts_request(
file, description, name, comments_for_audit_log, permission_type, content, platform)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_delete_file_command(client, args):
ids = str(args.get('ids', ''))
session_id = str(args.get('session_id', ''))
response = client.rtr_delete_file_request(ids, session_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_delete_put_files_command(client, args):
ids = str(args.get('ids', ''))
response = client.rtr_delete_put_files_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_delete_queued_session_command(client, args):
session_id = str(args.get('session_id', ''))
cloud_request_id = str(args.get('cloud_request_id', ''))
response = client.rtr_delete_queued_session_request(session_id, cloud_request_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_delete_scripts_command(client, args):
ids = str(args.get('ids', ''))
response = client.rtr_delete_scripts_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_delete_session_command(client, args):
session_id = str(args.get('session_id', ''))
response = client.rtr_delete_session_request(session_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_execute_active_responder_command_command(client, args):
domain_commandexecuterequest_base_command = str(args.get('domain_commandexecuterequest_base_command', ''))
domain_commandexecuterequest_command_string = str(args.get('domain_commandexecuterequest_command_string', ''))
domain_commandexecuterequest_device_id = str(args.get('domain_commandexecuterequest_device_id', ''))
domain_commandexecuterequest_id = args.get('domain_commandexecuterequest_id', None)
domain_commandexecuterequest_persist = argToBoolean(args.get('domain_commandexecuterequest_persist', False))
domain_commandexecuterequest_session_id = str(args.get('domain_commandexecuterequest_session_id', ''))
response = client.rtr_execute_active_responder_command_request(domain_commandexecuterequest_base_command, domain_commandexecuterequest_command_string,
domain_commandexecuterequest_device_id, domain_commandexecuterequest_id, domain_commandexecuterequest_persist, domain_commandexecuterequest_session_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_execute_admin_command_command(client, args):
domain_commandexecuterequest_base_command = str(args.get('domain_commandexecuterequest_base_command', ''))
domain_commandexecuterequest_command_string = str(args.get('domain_commandexecuterequest_command_string', ''))
domain_commandexecuterequest_device_id = str(args.get('domain_commandexecuterequest_device_id', ''))
domain_commandexecuterequest_id = args.get('domain_commandexecuterequest_id', None)
domain_commandexecuterequest_persist = argToBoolean(args.get('domain_commandexecuterequest_persist', False))
domain_commandexecuterequest_session_id = str(args.get('domain_commandexecuterequest_session_id', ''))
response = client.rtr_execute_admin_command_request(domain_commandexecuterequest_base_command, domain_commandexecuterequest_command_string,
domain_commandexecuterequest_device_id, domain_commandexecuterequest_id, domain_commandexecuterequest_persist, domain_commandexecuterequest_session_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_execute_command_command(client, args):
domain_commandexecuterequest_base_command = str(args.get('domain_commandexecuterequest_base_command', ''))
domain_commandexecuterequest_command_string = str(args.get('domain_commandexecuterequest_command_string', ''))
domain_commandexecuterequest_device_id = str(args.get('domain_commandexecuterequest_device_id', ''))
domain_commandexecuterequest_id = args.get('domain_commandexecuterequest_id', None)
domain_commandexecuterequest_persist = argToBoolean(args.get('domain_commandexecuterequest_persist', False))
domain_commandexecuterequest_session_id = str(args.get('domain_commandexecuterequest_session_id', ''))
response = client.rtr_execute_command_request(domain_commandexecuterequest_base_command, domain_commandexecuterequest_command_string,
domain_commandexecuterequest_device_id, domain_commandexecuterequest_id, domain_commandexecuterequest_persist, domain_commandexecuterequest_session_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_get_extracted_file_contents_command(client, args):
session_id = str(args.get('session_id', ''))
sha256 = str(args.get('sha256', ''))
filename = str(args.get('filename', ''))
response = client.rtr_get_extracted_file_contents_request(session_id, sha256, filename)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_get_put_files_command(client, args):
ids = argToList(args.get('ids', []))
response = client.rtr_get_put_files_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.binservclientMsaPFResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_get_scripts_command(client, args):
ids = argToList(args.get('ids', []))
response = client.rtr_get_scripts_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.binservclientMsaPFResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_init_session_command(client, args):
domain_initrequest_device_id = str(args.get('domain_initrequest_device_id', ''))
domain_initrequest_origin = str(args.get('domain_initrequest_origin', ''))
domain_initrequest_queue_offline = argToBoolean(args.get('domain_initrequest_queue_offline', False))
response = client.rtr_init_session_request(
domain_initrequest_device_id, domain_initrequest_origin, domain_initrequest_queue_offline)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_list_all_sessions_command(client, args):
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.rtr_list_all_sessions_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainListSessionsResponseMsa',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_list_files_command(client, args):
session_id = str(args.get('session_id', ''))
response = client.rtr_list_files_request(session_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainListFilesResponseWrapper',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_list_put_files_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.rtr_list_put_files_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.binservclientMsaPutFileResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_list_queued_sessions_command(client, args):
msa_idsrequest_ids = argToList(args.get('msa_idsrequest_ids', []))
response = client.rtr_list_queued_sessions_request(msa_idsrequest_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainQueuedSessionResponseWrapper',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_list_scripts_command(client, args):
filter_ = str(args.get('filter_', ''))
offset = str(args.get('offset', ''))
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
response = client.rtr_list_scripts_request(filter_, offset, limit, sort)
command_results = CommandResults(
outputs_prefix='CrowdStrike.binservclientMsaPutFileResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_list_sessions_command(client, args):
msa_idsrequest_ids = argToList(args.get('msa_idsrequest_ids', []))
response = client.rtr_list_sessions_request(msa_idsrequest_ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainSessionResponseWrapper',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_pulse_session_command(client, args):
domain_initrequest_device_id = str(args.get('domain_initrequest_device_id', ''))
domain_initrequest_origin = str(args.get('domain_initrequest_origin', ''))
domain_initrequest_queue_offline = argToBoolean(args.get('domain_initrequest_queue_offline', False))
response = client.rtr_pulse_session_request(
domain_initrequest_device_id, domain_initrequest_origin, domain_initrequest_queue_offline)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def rtr_update_scripts_command(client, args):
id_ = str(args.get('id_', ''))
file = str(args.get('file', ''))
description = str(args.get('description', ''))
name = str(args.get('name', ''))
comments_for_audit_log = str(args.get('comments_for_audit_log', ''))
permission_type = str(args.get('permission_type', 'none'))
content = str(args.get('content', ''))
platform = argToList(args.get('platform', []))
response = client.rtr_update_scripts_request(
id_, file, description, name, comments_for_audit_log, permission_type, content, platform)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def scan_samples_command(client, args):
mlscanner_samplesscanparameters_samples = argToList(args.get('mlscanner_samplesscanparameters_samples', []))
response = client.scan_samples_request(mlscanner_samplesscanparameters_samples)
command_results = CommandResults(
outputs_prefix='CrowdStrike.mlscannerQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def set_device_control_policies_precedence_command(client, args):
requests_setpolicyprecedencereqv1_ids = argToList(args.get('requests_setpolicyprecedencereqv1_ids', []))
requests_setpolicyprecedencereqv1_platform_name = str(args.get('requests_setpolicyprecedencereqv1_platform_name', ''))
response = client.set_device_control_policies_precedence_request(
requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def set_firewall_policies_precedence_command(client, args):
requests_setpolicyprecedencereqv1_ids = argToList(args.get('requests_setpolicyprecedencereqv1_ids', []))
requests_setpolicyprecedencereqv1_platform_name = str(args.get('requests_setpolicyprecedencereqv1_platform_name', ''))
response = client.set_firewall_policies_precedence_request(
requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def set_prevention_policies_precedence_command(client, args):
requests_setpolicyprecedencereqv1_ids = argToList(args.get('requests_setpolicyprecedencereqv1_ids', []))
requests_setpolicyprecedencereqv1_platform_name = str(args.get('requests_setpolicyprecedencereqv1_platform_name', ''))
response = client.set_prevention_policies_precedence_request(
requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def set_sensor_update_policies_precedence_command(client, args):
requests_setpolicyprecedencereqv1_ids = argToList(args.get('requests_setpolicyprecedencereqv1_ids', []))
requests_setpolicyprecedencereqv1_platform_name = str(args.get('requests_setpolicyprecedencereqv1_platform_name', ''))
response = client.set_sensor_update_policies_precedence_request(
requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def setrt_response_policies_precedence_command(client, args):
requests_setpolicyprecedencereqv1_ids = argToList(args.get('requests_setpolicyprecedencereqv1_ids', []))
requests_setpolicyprecedencereqv1_platform_name = str(args.get('requests_setpolicyprecedencereqv1_platform_name', ''))
response = client.setrt_response_policies_precedence_request(
requests_setpolicyprecedencereqv1_ids, requests_setpolicyprecedencereqv1_platform_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def submit_command(client, args):
falconx_submissionparametersv1_sandbox = argToList(args.get('falconx_submissionparametersv1_sandbox', []))
falconx_submissionparametersv1_user_tags = argToList(args.get('falconx_submissionparametersv1_user_tags', []))
response = client.submit_request(falconx_submissionparametersv1_sandbox, falconx_submissionparametersv1_user_tags)
command_results = CommandResults(
outputs_prefix='CrowdStrike.falconxSubmissionV1Response',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def tokenscreate_command(client, args):
api_tokencreaterequestv1_expires_timestamp = str(args.get('api_tokencreaterequestv1_expires_timestamp', ''))
api_tokencreaterequestv1_label = str(args.get('api_tokencreaterequestv1_label', ''))
api_tokencreaterequestv1_type = str(args.get('api_tokencreaterequestv1_type', ''))
response = client.tokenscreate_request(api_tokencreaterequestv1_expires_timestamp,
api_tokencreaterequestv1_label, api_tokencreaterequestv1_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def tokensdelete_command(client, args):
ids = argToList(args.get('ids', []))
response = client.tokensdelete_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def tokensquery_command(client, args):
offset = args.get('offset', None)
limit = args.get('limit', None)
sort = str(args.get('sort', ''))
filter_ = str(args.get('filter_', ''))
response = client.tokensquery_request(offset, limit, sort, filter_)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def tokensread_command(client, args):
ids = argToList(args.get('ids', []))
response = client.tokensread_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apitokenDetailsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def tokensupdate_command(client, args):
ids = argToList(args.get('ids', []))
api_tokenpatchrequestv1_expires_timestamp = str(args.get('api_tokenpatchrequestv1_expires_timestamp', ''))
api_tokenpatchrequestv1_label = str(args.get('api_tokenpatchrequestv1_label', ''))
api_tokenpatchrequestv1_revoked = argToBoolean(args.get('api_tokenpatchrequestv1_revoked', False))
response = client.tokensupdate_request(ids, api_tokenpatchrequestv1_expires_timestamp,
api_tokenpatchrequestv1_label, api_tokenpatchrequestv1_revoked)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def trigger_scan_command(client, args):
scan_type = str(args.get('scan_type', 'dry-run'))
response = client.trigger_scan_request(scan_type)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_actionv1_command(client, args):
domain_updateactionrequest_frequency = str(args.get('domain_updateactionrequest_frequency', ''))
domain_updateactionrequest_id = str(args.get('domain_updateactionrequest_id', ''))
domain_updateactionrequest_recipients = argToList(args.get('domain_updateactionrequest_recipients', []))
domain_updateactionrequest_status = str(args.get('domain_updateactionrequest_status', ''))
response = client.update_actionv1_request(domain_updateactionrequest_frequency, domain_updateactionrequest_id,
domain_updateactionrequest_recipients, domain_updateactionrequest_status)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainActionEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_detects_by_idsv2_command(client, args):
domain_detectsentitiespatchrequest_assigned_to_uuid = str(args.get('domain_detectsentitiespatchrequest_assigned_to_uuid', ''))
domain_detectsentitiespatchrequest_comment = str(args.get('domain_detectsentitiespatchrequest_comment', ''))
domain_detectsentitiespatchrequest_ids = argToList(args.get('domain_detectsentitiespatchrequest_ids', []))
domain_detectsentitiespatchrequest_show_in_ui = argToBoolean(args.get('domain_detectsentitiespatchrequest_show_in_ui', False))
domain_detectsentitiespatchrequest_status = str(args.get('domain_detectsentitiespatchrequest_status', ''))
response = client.update_detects_by_idsv2_request(domain_detectsentitiespatchrequest_assigned_to_uuid, domain_detectsentitiespatchrequest_comment,
domain_detectsentitiespatchrequest_ids, domain_detectsentitiespatchrequest_show_in_ui, domain_detectsentitiespatchrequest_status)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_device_control_policies_command(client, args):
requests_updatedevicecontrolpoliciesv1_resources = argToList(args.get('requests_updatedevicecontrolpoliciesv1_resources', []))
response = client.update_device_control_policies_request(requests_updatedevicecontrolpoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesDeviceControlPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_device_tags_command(client, args):
domain_updatedevicetagsrequestv1_action = str(args.get('domain_updatedevicetagsrequestv1_action', ''))
domain_updatedevicetagsrequestv1_device_ids = argToList(args.get('domain_updatedevicetagsrequestv1_device_ids', []))
domain_updatedevicetagsrequestv1_tags = argToList(args.get('domain_updatedevicetagsrequestv1_tags', []))
response = client.update_device_tags_request(domain_updatedevicetagsrequestv1_action,
domain_updatedevicetagsrequestv1_device_ids, domain_updatedevicetagsrequestv1_tags)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaEntitiesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_firewall_policies_command(client, args):
requests_updatefirewallpoliciesv1_resources = argToList(args.get('requests_updatefirewallpoliciesv1_resources', []))
response = client.update_firewall_policies_request(requests_updatefirewallpoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesFirewallPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_host_groups_command(client, args):
requests_updategroupsv1_resources = argToList(args.get('requests_updategroupsv1_resources', []))
response = client.update_host_groups_request(requests_updategroupsv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesHostGroupsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_notificationsv1_command(client, args):
domain_updatenotificationrequestv1_assigned_to_uuid = str(args.get('domain_updatenotificationrequestv1_assigned_to_uuid', ''))
domain_updatenotificationrequestv1_id = str(args.get('domain_updatenotificationrequestv1_id', ''))
domain_updatenotificationrequestv1_status = str(args.get('domain_updatenotificationrequestv1_status', ''))
response = client.update_notificationsv1_request(
domain_updatenotificationrequestv1_assigned_to_uuid, domain_updatenotificationrequestv1_id, domain_updatenotificationrequestv1_status)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainNotificationEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_prevention_policies_command(client, args):
requests_updatepreventionpoliciesv1_resources = argToList(args.get('requests_updatepreventionpoliciesv1_resources', []))
response = client.update_prevention_policies_request(requests_updatepreventionpoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesPreventionPoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_rulesv1_command(client, args):
domain_updaterulerequestv1_filter = str(args.get('domain_updaterulerequestv1_filter', ''))
domain_updaterulerequestv1_id = str(args.get('domain_updaterulerequestv1_id', ''))
domain_updaterulerequestv1_name = str(args.get('domain_updaterulerequestv1_name', ''))
domain_updaterulerequestv1_permissions = str(args.get('domain_updaterulerequestv1_permissions', ''))
domain_updaterulerequestv1_priority = str(args.get('domain_updaterulerequestv1_priority', ''))
response = client.update_rulesv1_request(domain_updaterulerequestv1_filter, domain_updaterulerequestv1_id,
domain_updaterulerequestv1_name, domain_updaterulerequestv1_permissions, domain_updaterulerequestv1_priority)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainRulesEntitiesResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_sensor_update_policies_command(client, args):
requests_updatesensorupdatepoliciesv1_resources = argToList(args.get('requests_updatesensorupdatepoliciesv1_resources', []))
response = client.update_sensor_update_policies_request(requests_updatesensorupdatepoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_sensor_update_policiesv2_command(client, args):
requests_updatesensorupdatepoliciesv2_resources = argToList(args.get('requests_updatesensorupdatepoliciesv2_resources', []))
response = client.update_sensor_update_policiesv2_request(requests_updatesensorupdatepoliciesv2_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSensorUpdatePoliciesV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_sensor_visibility_exclusionsv1_command(client, args):
requests_svexclusionupdatereqv1_comment = str(args.get('requests_svexclusionupdatereqv1_comment', ''))
requests_svexclusionupdatereqv1_groups = argToList(args.get('requests_svexclusionupdatereqv1_groups', []))
requests_svexclusionupdatereqv1_id = str(args.get('requests_svexclusionupdatereqv1_id', ''))
requests_svexclusionupdatereqv1_value = str(args.get('requests_svexclusionupdatereqv1_value', ''))
response = client.update_sensor_visibility_exclusionsv1_request(
requests_svexclusionupdatereqv1_comment, requests_svexclusionupdatereqv1_groups, requests_svexclusionupdatereqv1_id, requests_svexclusionupdatereqv1_value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesSvExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_user_command(client, args):
user_uuid = str(args.get('user_uuid', ''))
domain_updateuserfields_firstname = str(args.get('domain_updateuserfields_firstname', ''))
domain_updateuserfields_lastname = str(args.get('domain_updateuserfields_lastname', ''))
response = client.update_user_request(user_uuid, domain_updateuserfields_firstname, domain_updateuserfields_lastname)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserMetaDataResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def update_user_groups_command(client, args):
domain_usergroupsrequestv1_resources = argToList(args.get('domain_usergroupsrequestv1_resources', []))
response = client.update_user_groups_request(domain_usergroupsrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainUserGroupsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updateaws_account_command(client, args):
ids = argToList(args.get('ids', []))
region = str(args.get('region', ''))
response = client.updateaws_account_request(ids, region)
command_results = CommandResults(
outputs_prefix='CrowdStrike.msaBaseEntitiesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updateaws_accounts_command(client, args):
models_updateawsaccountsv1_resources = argToList(args.get('models_updateawsaccountsv1_resources', []))
response = client.updateaws_accounts_request(models_updateawsaccountsv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.modelsAWSAccountsV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updatecid_groups_command(client, args):
domain_cidgroupsrequestv1_resources = argToList(args.get('domain_cidgroupsrequestv1_resources', []))
response = client.updatecid_groups_request(domain_cidgroupsrequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.domainCIDGroupsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updatecspm_azure_tenant_default_subscriptionid_command(client, args):
tenant_id = str(args.get('tenant_id', ''))
subscription_id = str(args.get('subscription_id', ''))
response = client.updatecspm_azure_tenant_default_subscriptionid_request(tenant_id, subscription_id)
command_results = CommandResults(
outputs_prefix='CrowdStrike',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updatecspm_policy_settings_command(client, args):
registration_policyrequestextv1_resources = argToList(args.get('registration_policyrequestextv1_resources', []))
response = client.updatecspm_policy_settings_request(registration_policyrequestextv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationPolicySettingsResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updatecspm_scan_schedule_command(client, args):
registration_scanscheduleupdaterequestv1_resources = argToList(
args.get('registration_scanscheduleupdaterequestv1_resources', []))
response = client.updatecspm_scan_schedule_request(registration_scanscheduleupdaterequestv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.registrationScanScheduleResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updateioa_exclusionsv1_command(client, args):
requests_ioaexclusionupdatereqv1_cl_regex = str(args.get('requests_ioaexclusionupdatereqv1_cl_regex', ''))
requests_ioaexclusionupdatereqv1_comment = str(args.get('requests_ioaexclusionupdatereqv1_comment', ''))
requests_ioaexclusionupdatereqv1_description = str(args.get('requests_ioaexclusionupdatereqv1_description', ''))
requests_ioaexclusionupdatereqv1_detection_json = str(args.get('requests_ioaexclusionupdatereqv1_detection_json', ''))
requests_ioaexclusionupdatereqv1_groups = argToList(args.get('requests_ioaexclusionupdatereqv1_groups', []))
requests_ioaexclusionupdatereqv1_id = str(args.get('requests_ioaexclusionupdatereqv1_id', ''))
requests_ioaexclusionupdatereqv1_ifn_regex = str(args.get('requests_ioaexclusionupdatereqv1_ifn_regex', ''))
requests_ioaexclusionupdatereqv1_name = str(args.get('requests_ioaexclusionupdatereqv1_name', ''))
requests_ioaexclusionupdatereqv1_pattern_id = str(args.get('requests_ioaexclusionupdatereqv1_pattern_id', ''))
requests_ioaexclusionupdatereqv1_pattern_name = str(args.get('requests_ioaexclusionupdatereqv1_pattern_name', ''))
response = client.updateioa_exclusionsv1_request(requests_ioaexclusionupdatereqv1_cl_regex, requests_ioaexclusionupdatereqv1_comment, requests_ioaexclusionupdatereqv1_description, requests_ioaexclusionupdatereqv1_detection_json,
requests_ioaexclusionupdatereqv1_groups, requests_ioaexclusionupdatereqv1_id, requests_ioaexclusionupdatereqv1_ifn_regex, requests_ioaexclusionupdatereqv1_name, requests_ioaexclusionupdatereqv1_pattern_id, requests_ioaexclusionupdatereqv1_pattern_name)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesIoaExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updateioc_command(client, args):
api_iocviewrecord_batch_id = str(args.get('api_iocviewrecord_batch_id', ''))
api_iocviewrecord_created_by = str(args.get('api_iocviewrecord_created_by', ''))
api_iocviewrecord_created_timestamp = str(args.get('api_iocviewrecord_created_timestamp', ''))
api_iocviewrecord_description = str(args.get('api_iocviewrecord_description', ''))
api_iocviewrecord_expiration_days = args.get('api_iocviewrecord_expiration_days', None)
api_iocviewrecord_expiration_timestamp = str(args.get('api_iocviewrecord_expiration_timestamp', ''))
api_iocviewrecord_modified_by = str(args.get('api_iocviewrecord_modified_by', ''))
api_iocviewrecord_modified_timestamp = str(args.get('api_iocviewrecord_modified_timestamp', ''))
api_iocviewrecord_policy = str(args.get('api_iocviewrecord_policy', ''))
api_iocviewrecord_share_level = str(args.get('api_iocviewrecord_share_level', ''))
api_iocviewrecord_source = str(args.get('api_iocviewrecord_source', ''))
api_iocviewrecord_type = str(args.get('api_iocviewrecord_type', ''))
api_iocviewrecord_value = str(args.get('api_iocviewrecord_value', ''))
type_ = str(args.get('type_', ''))
value = str(args.get('value', ''))
response = client.updateioc_request(api_iocviewrecord_batch_id, api_iocviewrecord_created_by, api_iocviewrecord_created_timestamp, api_iocviewrecord_description, api_iocviewrecord_expiration_days, api_iocviewrecord_expiration_timestamp,
api_iocviewrecord_modified_by, api_iocviewrecord_modified_timestamp, api_iocviewrecord_policy, api_iocviewrecord_share_level, api_iocviewrecord_source, api_iocviewrecord_type, api_iocviewrecord_value, type_, value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiMsaReplyIOC',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updateml_exclusionsv1_command(client, args):
requests_svexclusionupdatereqv1_comment = str(args.get('requests_svexclusionupdatereqv1_comment', ''))
requests_svexclusionupdatereqv1_groups = argToList(args.get('requests_svexclusionupdatereqv1_groups', []))
requests_svexclusionupdatereqv1_id = str(args.get('requests_svexclusionupdatereqv1_id', ''))
requests_svexclusionupdatereqv1_value = str(args.get('requests_svexclusionupdatereqv1_value', ''))
response = client.updateml_exclusionsv1_request(
requests_svexclusionupdatereqv1_comment, requests_svexclusionupdatereqv1_groups, requests_svexclusionupdatereqv1_id, requests_svexclusionupdatereqv1_value)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesMlExclusionRespV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updatepolicycontainer_command(client, args):
fwmgr_api_policycontainerupsertrequestv1_default_inbound = str(
args.get('fwmgr_api_policycontainerupsertrequestv1_default_inbound', ''))
fwmgr_api_policycontainerupsertrequestv1_default_outbound = str(
args.get('fwmgr_api_policycontainerupsertrequestv1_default_outbound', ''))
fwmgr_api_policycontainerupsertrequestv1_enforce = argToBoolean(
args.get('fwmgr_api_policycontainerupsertrequestv1_enforce', False))
fwmgr_api_policycontainerupsertrequestv1_is_default_policy = argToBoolean(
args.get('fwmgr_api_policycontainerupsertrequestv1_is_default_policy', False))
fwmgr_api_policycontainerupsertrequestv1_platform_id = str(
args.get('fwmgr_api_policycontainerupsertrequestv1_platform_id', ''))
fwmgr_api_policycontainerupsertrequestv1_policy_id = str(args.get('fwmgr_api_policycontainerupsertrequestv1_policy_id', ''))
fwmgr_api_policycontainerupsertrequestv1_rule_group_ids = argToList(
args.get('fwmgr_api_policycontainerupsertrequestv1_rule_group_ids', []))
fwmgr_api_policycontainerupsertrequestv1_test_mode = argToBoolean(
args.get('fwmgr_api_policycontainerupsertrequestv1_test_mode', False))
fwmgr_api_policycontainerupsertrequestv1_tracking = str(args.get('fwmgr_api_policycontainerupsertrequestv1_tracking', ''))
response = client.updatepolicycontainer_request(fwmgr_api_policycontainerupsertrequestv1_default_inbound, fwmgr_api_policycontainerupsertrequestv1_default_outbound, fwmgr_api_policycontainerupsertrequestv1_enforce, fwmgr_api_policycontainerupsertrequestv1_is_default_policy,
fwmgr_api_policycontainerupsertrequestv1_platform_id, fwmgr_api_policycontainerupsertrequestv1_policy_id, fwmgr_api_policycontainerupsertrequestv1_rule_group_ids, fwmgr_api_policycontainerupsertrequestv1_test_mode, fwmgr_api_policycontainerupsertrequestv1_tracking)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrmsaReplyMetaOnly',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updatert_response_policies_command(client, args):
requests_updatertresponsepoliciesv1_resources = argToList(args.get('requests_updatertresponsepoliciesv1_resources', []))
response = client.updatert_response_policies_request(requests_updatertresponsepoliciesv1_resources)
command_results = CommandResults(
outputs_prefix='CrowdStrike.responsesRTResponsePoliciesV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updaterulegroup_command(client, args):
comment = str(args.get('comment', ''))
fwmgr_api_rulegroupmodifyrequestv1_diff_operations = argToList(
args.get('fwmgr_api_rulegroupmodifyrequestv1_diff_operations', []))
fwmgr_api_rulegroupmodifyrequestv1_diff_type = str(args.get('fwmgr_api_rulegroupmodifyrequestv1_diff_type', ''))
fwmgr_api_rulegroupmodifyrequestv1_id = str(args.get('fwmgr_api_rulegroupmodifyrequestv1_id', ''))
fwmgr_api_rulegroupmodifyrequestv1_rule_ids = argToList(args.get('fwmgr_api_rulegroupmodifyrequestv1_rule_ids', []))
fwmgr_api_rulegroupmodifyrequestv1_rule_versions = argToList(args.get('fwmgr_api_rulegroupmodifyrequestv1_rule_versions', []))
fwmgr_api_rulegroupmodifyrequestv1_tracking = str(args.get('fwmgr_api_rulegroupmodifyrequestv1_tracking', ''))
response = client.updaterulegroup_request(comment, fwmgr_api_rulegroupmodifyrequestv1_diff_operations, fwmgr_api_rulegroupmodifyrequestv1_diff_type, fwmgr_api_rulegroupmodifyrequestv1_id,
fwmgr_api_rulegroupmodifyrequestv1_rule_ids, fwmgr_api_rulegroupmodifyrequestv1_rule_versions, fwmgr_api_rulegroupmodifyrequestv1_tracking)
command_results = CommandResults(
outputs_prefix='CrowdStrike.fwmgrapiQueryResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updaterulegroup_mixin0_command(client, args):
api_rulegroupmodifyrequestv1_comment = str(args.get('api_rulegroupmodifyrequestv1_comment', ''))
api_rulegroupmodifyrequestv1_description = str(args.get('api_rulegroupmodifyrequestv1_description', ''))
api_rulegroupmodifyrequestv1_enabled = argToBoolean(args.get('api_rulegroupmodifyrequestv1_enabled', False))
api_rulegroupmodifyrequestv1_id = str(args.get('api_rulegroupmodifyrequestv1_id', ''))
api_rulegroupmodifyrequestv1_name = str(args.get('api_rulegroupmodifyrequestv1_name', ''))
api_rulegroupmodifyrequestv1_rulegroup_version = args.get('api_rulegroupmodifyrequestv1_rulegroup_version', None)
response = client.updaterulegroup_mixin0_request(api_rulegroupmodifyrequestv1_comment, api_rulegroupmodifyrequestv1_description, api_rulegroupmodifyrequestv1_enabled,
api_rulegroupmodifyrequestv1_id, api_rulegroupmodifyrequestv1_name, api_rulegroupmodifyrequestv1_rulegroup_version)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiRuleGroupsResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def updaterules_command(client, args):
api_ruleupdatesrequestv1_comment = str(args.get('api_ruleupdatesrequestv1_comment', ''))
api_ruleupdatesrequestv1_rule_updates = argToList(args.get('api_ruleupdatesrequestv1_rule_updates', []))
api_ruleupdatesrequestv1_rulegroup_id = str(args.get('api_ruleupdatesrequestv1_rulegroup_id', ''))
api_ruleupdatesrequestv1_rulegroup_version = args.get('api_ruleupdatesrequestv1_rulegroup_version', None)
response = client.updaterules_request(api_ruleupdatesrequestv1_comment, api_ruleupdatesrequestv1_rule_updates,
api_ruleupdatesrequestv1_rulegroup_id, api_ruleupdatesrequestv1_rulegroup_version)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiRulesResponse',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def upload_samplev2_command(client, args):
body = argToList(args.get('body', []))
upfile = str(args.get('upfile', ''))
file_name = str(args.get('file_name', ''))
comment = str(args.get('comment', ''))
is_confidential = argToBoolean(args.get('is_confidential', False))
response = client.upload_samplev2_request(body, upfile, file_name, comment, is_confidential)
command_results = CommandResults(
outputs_prefix='CrowdStrike.samplestoreSampleMetadataResponseV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def upload_samplev3_command(client, args):
body = argToList(args.get('body', []))
upfile = str(args.get('upfile', ''))
file_name = str(args.get('file_name', ''))
comment = str(args.get('comment', ''))
is_confidential = argToBoolean(args.get('is_confidential', False))
response = client.upload_samplev3_request(body, upfile, file_name, comment, is_confidential)
command_results = CommandResults(
outputs_prefix='CrowdStrike.samplestoreSampleMetadataResponseV2',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def validate_command(client, args):
api_validationrequestv1_fields = argToList(args.get('api_validationrequestv1_fields', []))
response = client.validate_request(api_validationrequestv1_fields)
command_results = CommandResults(
outputs_prefix='CrowdStrike.apiValidationResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def verifyaws_account_access_command(client, args):
ids = argToList(args.get('ids', []))
response = client.verifyaws_account_access_request(ids)
command_results = CommandResults(
outputs_prefix='CrowdStrike.modelsVerifyAccessResponseV1',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def test_module(client):
# Test functions here
return_results('ok')
def main():
params = demisto.params()
args = demisto.args()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(params)
commands = {
'cs-add-role': add_role_command,
'cs-add-user-group-members': add_user_group_members_command,
'cs-addcid-group-members': addcid_group_members_command,
'cs-aggregate-allow-list': aggregate_allow_list_command,
'cs-aggregate-block-list': aggregate_block_list_command,
'cs-aggregate-detections': aggregate_detections_command,
'cs-aggregate-device-count-collection': aggregate_device_count_collection_command,
'cs-aggregate-escalations': aggregate_escalations_command,
'cs-aggregate-notificationsv1': aggregate_notificationsv1_command,
'cs-aggregate-remediations': aggregate_remediations_command,
'cs-aggregateevents': aggregateevents_command,
'cs-aggregatefc-incidents': aggregatefc_incidents_command,
'cs-aggregatepolicyrules': aggregatepolicyrules_command,
'cs-aggregaterulegroups': aggregaterulegroups_command,
'cs-aggregaterules': aggregaterules_command,
'cs-aggregates-detections-global-counts': aggregates_detections_global_counts_command,
'cs-aggregates-events': aggregates_events_command,
'cs-aggregates-events-collections': aggregates_events_collections_command,
'cs-aggregates-incidents-global-counts': aggregates_incidents_global_counts_command,
'cs-aggregatesow-events-global-counts': aggregatesow_events_global_counts_command,
'cs-apipreemptproxypostgraphql': apipreemptproxypostgraphql_command,
'cs-auditeventsquery': auditeventsquery_command,
'cs-auditeventsread': auditeventsread_command,
'cs-batch-active-responder-cmd': batch_active_responder_cmd_command,
'cs-batch-admin-cmd': batch_admin_cmd_command,
'cs-batch-cmd': batch_cmd_command,
'cs-batch-get-cmd': batch_get_cmd_command,
'cs-batch-get-cmd-status': batch_get_cmd_status_command,
'cs-batch-init-sessions': batch_init_sessions_command,
'cs-batch-refresh-sessions': batch_refresh_sessions_command,
'cs-create-actionsv1': create_actionsv1_command,
'cs-create-device-control-policies': create_device_control_policies_command,
'cs-create-firewall-policies': create_firewall_policies_command,
'cs-create-host-groups': create_host_groups_command,
'cs-create-or-updateaws-settings': create_or_updateaws_settings_command,
'cs-create-prevention-policies': create_prevention_policies_command,
'cs-create-rulesv1': create_rulesv1_command,
'cs-create-sensor-update-policies': create_sensor_update_policies_command,
'cs-create-sensor-update-policiesv2': create_sensor_update_policiesv2_command,
'cs-create-user': create_user_command,
'cs-create-user-groups': create_user_groups_command,
'cs-createaws-account': createaws_account_command,
'cs-createcid-groups': createcid_groups_command,
'cs-createcspm-aws-account': createcspm_aws_account_command,
'cs-createcspmgcp-account': createcspmgcp_account_command,
'cs-createioc': createioc_command,
'cs-createml-exclusionsv1': createml_exclusionsv1_command,
'cs-creatert-response-policies': creatert_response_policies_command,
'cs-createrule': createrule_command,
'cs-createrulegroup': createrulegroup_command,
'cs-createrulegroup-mixin0': createrulegroup_mixin0_command,
'cs-createsv-exclusionsv1': createsv_exclusionsv1_command,
'cs-crowd-score': crowd_score_command,
'cs-customersettingsread': customersettingsread_command,
'cs-delete-actionv1': delete_actionv1_command,
'cs-delete-device-control-policies': delete_device_control_policies_command,
'cs-delete-firewall-policies': delete_firewall_policies_command,
'cs-delete-host-groups': delete_host_groups_command,
'cs-delete-notificationsv1': delete_notificationsv1_command,
'cs-delete-prevention-policies': delete_prevention_policies_command,
'cs-delete-report': delete_report_command,
'cs-delete-rulesv1': delete_rulesv1_command,
'cs-delete-samplev2': delete_samplev2_command,
'cs-delete-samplev3': delete_samplev3_command,
'cs-delete-sensor-update-policies': delete_sensor_update_policies_command,
'cs-delete-sensor-visibility-exclusionsv1': delete_sensor_visibility_exclusionsv1_command,
'cs-delete-user': delete_user_command,
'cs-delete-user-group-members': delete_user_group_members_command,
'cs-delete-user-groups': delete_user_groups_command,
'cs-deleteaws-accounts': deleteaws_accounts_command,
'cs-deleteaws-accounts-mixin0': deleteaws_accounts_mixin0_command,
'cs-deletecid-group-members': deletecid_group_members_command,
'cs-deletecid-groups': deletecid_groups_command,
'cs-deletecspm-aws-account': deletecspm_aws_account_command,
'cs-deletecspm-azure-account': deletecspm_azure_account_command,
'cs-deleted-roles': deleted_roles_command,
'cs-deleteioa-exclusionsv1': deleteioa_exclusionsv1_command,
'cs-deleteioc': deleteioc_command,
'cs-deleteml-exclusionsv1': deleteml_exclusionsv1_command,
'cs-deletert-response-policies': deletert_response_policies_command,
'cs-deleterulegroups': deleterulegroups_command,
'cs-deleterulegroups-mixin0': deleterulegroups_mixin0_command,
'cs-deleterules': deleterules_command,
'cs-devices-count': devices_count_command,
'cs-devices-ran-on': devices_ran_on_command,
'cs-download-sensor-installer-by-id': download_sensor_installer_by_id_command,
'cs-entitiesprocesses': entitiesprocesses_command,
'cs-get-actionsv1': get_actionsv1_command,
'cs-get-aggregate-detects': get_aggregate_detects_command,
'cs-get-artifacts': get_artifacts_command,
'cs-get-assessmentv1': get_assessmentv1_command,
'cs-get-available-role-ids': get_available_role_ids_command,
'cs-get-behaviors': get_behaviors_command,
'cs-get-children': get_children_command,
'cs-get-cloudconnectazure-entities-account-v1': get_cloudconnectazure_entities_account_v1_command,
'cs-get-cloudconnectazure-entities-userscriptsdownload-v1': get_cloudconnectazure_entities_userscriptsdownload_v1_command,
'cs-get-cloudconnectcspmazure-entities-account-v1': get_cloudconnectcspmazure_entities_account_v1_command,
'cs-get-cloudconnectcspmazure-entities-userscriptsdownload-v1': get_cloudconnectcspmazure_entities_userscriptsdownload_v1_command,
'cs-get-clusters': get_clusters_command,
'cs-get-combined-sensor-installers-by-query': get_combined_sensor_installers_by_query_command,
'cs-get-detect-summaries': get_detect_summaries_command,
'cs-get-device-control-policies': get_device_control_policies_command,
'cs-get-device-count-collection-queries-by-filter': get_device_count_collection_queries_by_filter_command,
'cs-get-device-details': get_device_details_command,
'cs-get-firewall-policies': get_firewall_policies_command,
'cs-get-helm-values-yaml': get_helm_values_yaml_command,
'cs-get-host-groups': get_host_groups_command,
'cs-get-incidents': get_incidents_command,
'cs-get-intel-actor-entities': get_intel_actor_entities_command,
'cs-get-intel-indicator-entities': get_intel_indicator_entities_command,
'cs-get-intel-report-entities': get_intel_report_entities_command,
'cs-get-intel-reportpdf': get_intel_reportpdf_command,
'cs-get-intel-rule-entities': get_intel_rule_entities_command,
'cs-get-intel-rule-file': get_intel_rule_file_command,
'cs-get-latest-intel-rule-file': get_latest_intel_rule_file_command,
'cs-get-locations': get_locations_command,
'cs-get-mal-query-downloadv1': get_mal_query_downloadv1_command,
'cs-get-mal-query-entities-samples-fetchv1': get_mal_query_entities_samples_fetchv1_command,
'cs-get-mal-query-metadatav1': get_mal_query_metadatav1_command,
'cs-get-mal-query-quotasv1': get_mal_query_quotasv1_command,
'cs-get-mal-query-requestv1': get_mal_query_requestv1_command,
'cs-get-notifications-detailed-translatedv1': get_notifications_detailed_translatedv1_command,
'cs-get-notifications-detailedv1': get_notifications_detailedv1_command,
'cs-get-notifications-translatedv1': get_notifications_translatedv1_command,
'cs-get-notificationsv1': get_notificationsv1_command,
'cs-get-prevention-policies': get_prevention_policies_command,
'cs-get-reports': get_reports_command,
'cs-get-roles': get_roles_command,
'cs-get-roles-byid': get_roles_byid_command,
'cs-get-rulesv1': get_rulesv1_command,
'cs-get-samplev2': get_samplev2_command,
'cs-get-samplev3': get_samplev3_command,
'cs-get-scans': get_scans_command,
'cs-get-scans-aggregates': get_scans_aggregates_command,
'cs-get-sensor-installers-by-query': get_sensor_installers_by_query_command,
'cs-get-sensor-installers-entities': get_sensor_installers_entities_command,
'cs-get-sensor-installersccid-by-query': get_sensor_installersccid_by_query_command,
'cs-get-sensor-update-policies': get_sensor_update_policies_command,
'cs-get-sensor-update-policiesv2': get_sensor_update_policiesv2_command,
'cs-get-sensor-visibility-exclusionsv1': get_sensor_visibility_exclusionsv1_command,
'cs-get-submissions': get_submissions_command,
'cs-get-summary-reports': get_summary_reports_command,
'cs-get-user-group-members-byid': get_user_group_members_byid_command,
'cs-get-user-groups-byid': get_user_groups_byid_command,
'cs-get-user-role-ids': get_user_role_ids_command,
'cs-get-vulnerabilities': get_vulnerabilities_command,
'cs-getaws-accounts': getaws_accounts_command,
'cs-getaws-accounts-mixin0': getaws_accounts_mixin0_command,
'cs-getaws-settings': getaws_settings_command,
'cs-getcid-group-by-id': getcid_group_by_id_command,
'cs-getcid-group-members-by': getcid_group_members_by_command,
'cs-getcspm-aws-account': getcspm_aws_account_command,
'cs-getcspm-aws-account-scripts-attachment': getcspm_aws_account_scripts_attachment_command,
'cs-getcspm-aws-console-setupur-ls': getcspm_aws_console_setupur_ls_command,
'cs-getcspm-azure-user-scripts': getcspm_azure_user_scripts_command,
'cs-getcspm-policy': getcspm_policy_command,
'cs-getcspm-policy-settings': getcspm_policy_settings_command,
'cs-getcspm-scan-schedule': getcspm_scan_schedule_command,
'cs-getcspmcgp-account': getcspmcgp_account_command,
'cs-getcspmgcp-user-scripts': getcspmgcp_user_scripts_command,
'cs-getcspmgcp-user-scripts-attachment': getcspmgcp_user_scripts_attachment_command,
'cs-getevents': getevents_command,
'cs-getfirewallfields': getfirewallfields_command,
'cs-getioa-events': getioa_events_command,
'cs-getioa-exclusionsv1': getioa_exclusionsv1_command,
'cs-getioa-users': getioa_users_command,
'cs-getioc': getioc_command,
'cs-getml-exclusionsv1': getml_exclusionsv1_command,
'cs-getpatterns': getpatterns_command,
'cs-getplatforms': getplatforms_command,
'cs-getplatforms-mixin0': getplatforms_mixin0_command,
'cs-getpolicycontainers': getpolicycontainers_command,
'cs-getrt-response-policies': getrt_response_policies_command,
'cs-getrulegroups': getrulegroups_command,
'cs-getrulegroups-mixin0': getrulegroups_mixin0_command,
'cs-getrules': getrules_command,
'cs-getrules-mixin0': getrules_mixin0_command,
'cs-getrulesget': getrulesget_command,
'cs-getruletypes': getruletypes_command,
'cs-grant-user-role-ids': grant_user_role_ids_command,
'cs-indicatorcombinedv1': indicatorcombinedv1_command,
'cs-indicatorcreatev1': indicatorcreatev1_command,
'cs-indicatordeletev1': indicatordeletev1_command,
'cs-indicatorgetv1': indicatorgetv1_command,
'cs-indicatorsearchv1': indicatorsearchv1_command,
'cs-indicatorupdatev1': indicatorupdatev1_command,
'cs-list-available-streamso-auth2': list_available_streamso_auth2_command,
'cs-oauth2-access-token': oauth2_access_token_command,
'cs-oauth2-revoke-token': oauth2_revoke_token_command,
'cs-patch-cloudconnectazure-entities-clientid-v1': patch_cloudconnectazure_entities_clientid_v1_command,
'cs-patch-cloudconnectcspmazure-entities-clientid-v1': patch_cloudconnectcspmazure_entities_clientid_v1_command,
'cs-patchcspm-aws-account': patchcspm_aws_account_command,
'cs-perform-actionv2': perform_actionv2_command,
'cs-perform-device-control-policies-action': perform_device_control_policies_action_command,
'cs-perform-firewall-policies-action': perform_firewall_policies_action_command,
'cs-perform-group-action': perform_group_action_command,
'cs-perform-incident-action': perform_incident_action_command,
'cs-perform-prevention-policies-action': perform_prevention_policies_action_command,
'cs-perform-sensor-update-policies-action': perform_sensor_update_policies_action_command,
'cs-performrt-response-policies-action': performrt_response_policies_action_command,
'cs-post-cloudconnectazure-entities-account-v1': post_cloudconnectazure_entities_account_v1_command,
'cs-post-cloudconnectcspmazure-entities-account-v1': post_cloudconnectcspmazure_entities_account_v1_command,
'cs-post-mal-query-entities-samples-multidownloadv1': post_mal_query_entities_samples_multidownloadv1_command,
'cs-post-mal-query-exact-searchv1': post_mal_query_exact_searchv1_command,
'cs-post-mal-query-fuzzy-searchv1': post_mal_query_fuzzy_searchv1_command,
'cs-post-mal-query-huntv1': post_mal_query_huntv1_command,
'cs-preview-rulev1': preview_rulev1_command,
'cs-processes-ran-on': processes_ran_on_command,
'cs-provisionaws-accounts': provisionaws_accounts_command,
'cs-query-actionsv1': query_actionsv1_command,
'cs-query-allow-list-filter': query_allow_list_filter_command,
'cs-query-behaviors': query_behaviors_command,
'cs-query-block-list-filter': query_block_list_filter_command,
'cs-query-children': query_children_command,
'cs-query-combined-device-control-policies': query_combined_device_control_policies_command,
'cs-query-combined-device-control-policy-members': query_combined_device_control_policy_members_command,
'cs-query-combined-firewall-policies': query_combined_firewall_policies_command,
'cs-query-combined-firewall-policy-members': query_combined_firewall_policy_members_command,
'cs-query-combined-group-members': query_combined_group_members_command,
'cs-query-combined-host-groups': query_combined_host_groups_command,
'cs-query-combined-prevention-policies': query_combined_prevention_policies_command,
'cs-query-combined-prevention-policy-members': query_combined_prevention_policy_members_command,
'cs-query-combined-sensor-update-builds': query_combined_sensor_update_builds_command,
'cs-query-combined-sensor-update-policies': query_combined_sensor_update_policies_command,
'cs-query-combined-sensor-update-policiesv2': query_combined_sensor_update_policiesv2_command,
'cs-query-combined-sensor-update-policy-members': query_combined_sensor_update_policy_members_command,
'cs-query-combinedrt-response-policies': query_combinedrt_response_policies_command,
'cs-query-combinedrt-response-policy-members': query_combinedrt_response_policy_members_command,
'cs-query-detection-ids-by-filter': query_detection_ids_by_filter_command,
'cs-query-detects': query_detects_command,
'cs-query-device-control-policies': query_device_control_policies_command,
'cs-query-device-control-policy-members': query_device_control_policy_members_command,
'cs-query-devices-by-filter': query_devices_by_filter_command,
'cs-query-devices-by-filter-scroll': query_devices_by_filter_scroll_command,
'cs-query-escalations-filter': query_escalations_filter_command,
'cs-query-firewall-policies': query_firewall_policies_command,
'cs-query-firewall-policy-members': query_firewall_policy_members_command,
'cs-query-group-members': query_group_members_command,
'cs-query-hidden-devices': query_hidden_devices_command,
'cs-query-host-groups': query_host_groups_command,
'cs-query-incident-ids-by-filter': query_incident_ids_by_filter_command,
'cs-query-incidents': query_incidents_command,
'cs-query-intel-actor-entities': query_intel_actor_entities_command,
'cs-query-intel-actor-ids': query_intel_actor_ids_command,
'cs-query-intel-indicator-entities': query_intel_indicator_entities_command,
'cs-query-intel-indicator-ids': query_intel_indicator_ids_command,
'cs-query-intel-report-entities': query_intel_report_entities_command,
'cs-query-intel-report-ids': query_intel_report_ids_command,
'cs-query-intel-rule-ids': query_intel_rule_ids_command,
'cs-query-notificationsv1': query_notificationsv1_command,
'cs-query-prevention-policies': query_prevention_policies_command,
'cs-query-prevention-policy-members': query_prevention_policy_members_command,
'cs-query-remediations-filter': query_remediations_filter_command,
'cs-query-reports': query_reports_command,
'cs-query-roles': query_roles_command,
'cs-query-rulesv1': query_rulesv1_command,
'cs-query-samplev1': query_samplev1_command,
'cs-query-sensor-update-policies': query_sensor_update_policies_command,
'cs-query-sensor-update-policy-members': query_sensor_update_policy_members_command,
'cs-query-sensor-visibility-exclusionsv1': query_sensor_visibility_exclusionsv1_command,
'cs-query-submissions': query_submissions_command,
'cs-query-submissions-mixin0': query_submissions_mixin0_command,
'cs-query-user-group-members': query_user_group_members_command,
'cs-query-user-groups': query_user_groups_command,
'cs-query-vulnerabilities': query_vulnerabilities_command,
'cs-queryaws-accounts': queryaws_accounts_command,
'cs-queryaws-accounts-fori-ds': queryaws_accounts_fori_ds_command,
'cs-querycid-group-members': querycid_group_members_command,
'cs-querycid-groups': querycid_groups_command,
'cs-queryevents': queryevents_command,
'cs-queryfirewallfields': queryfirewallfields_command,
'cs-queryio-cs': queryio_cs_command,
'cs-queryioa-exclusionsv1': queryioa_exclusionsv1_command,
'cs-queryml-exclusionsv1': queryml_exclusionsv1_command,
'cs-querypatterns': querypatterns_command,
'cs-queryplatforms': queryplatforms_command,
'cs-queryplatforms-mixin0': queryplatforms_mixin0_command,
'cs-querypolicyrules': querypolicyrules_command,
'cs-queryrt-response-policies': queryrt_response_policies_command,
'cs-queryrt-response-policy-members': queryrt_response_policy_members_command,
'cs-queryrulegroups': queryrulegroups_command,
'cs-queryrulegroups-mixin0': queryrulegroups_mixin0_command,
'cs-queryrulegroupsfull': queryrulegroupsfull_command,
'cs-queryrules': queryrules_command,
'cs-queryrules-mixin0': queryrules_mixin0_command,
'cs-queryruletypes': queryruletypes_command,
'cs-refresh-active-stream-session': refresh_active_stream_session_command,
'cs-regenerateapi-key': regenerateapi_key_command,
'cs-retrieve-emails-bycid': retrieve_emails_bycid_command,
'cs-retrieve-user': retrieve_user_command,
'cs-retrieve-useruui-ds-bycid': retrieve_useruui_ds_bycid_command,
'cs-retrieve-useruuid': retrieve_useruuid_command,
'cs-reveal-uninstall-token': reveal_uninstall_token_command,
'cs-revoke-user-role-ids': revoke_user_role_ids_command,
'cs-rtr-aggregate-sessions': rtr_aggregate_sessions_command,
'cs-rtr-check-active-responder-command-status': rtr_check_active_responder_command_status_command,
'cs-rtr-check-admin-command-status': rtr_check_admin_command_status_command,
'cs-rtr-check-command-status': rtr_check_command_status_command,
'cs-rtr-create-put-files': rtr_create_put_files_command,
'cs-rtr-create-scripts': rtr_create_scripts_command,
'cs-rtr-delete-file': rtr_delete_file_command,
'cs-rtr-delete-put-files': rtr_delete_put_files_command,
'cs-rtr-delete-queued-session': rtr_delete_queued_session_command,
'cs-rtr-delete-scripts': rtr_delete_scripts_command,
'cs-rtr-delete-session': rtr_delete_session_command,
'cs-rtr-execute-active-responder-command': rtr_execute_active_responder_command_command,
'cs-rtr-execute-admin-command': rtr_execute_admin_command_command,
'cs-rtr-execute-command': rtr_execute_command_command,
'cs-rtr-get-extracted-file-contents': rtr_get_extracted_file_contents_command,
'cs-rtr-get-put-files': rtr_get_put_files_command,
'cs-rtr-get-scripts': rtr_get_scripts_command,
'cs-rtr-init-session': rtr_init_session_command,
'cs-rtr-list-all-sessions': rtr_list_all_sessions_command,
'cs-rtr-list-files': rtr_list_files_command,
'cs-rtr-list-put-files': rtr_list_put_files_command,
'cs-rtr-list-queued-sessions': rtr_list_queued_sessions_command,
'cs-rtr-list-scripts': rtr_list_scripts_command,
'cs-rtr-list-sessions': rtr_list_sessions_command,
'cs-rtr-pulse-session': rtr_pulse_session_command,
'cs-rtr-update-scripts': rtr_update_scripts_command,
'cs-scan-samples': scan_samples_command,
'cs-set-device-control-policies-precedence': set_device_control_policies_precedence_command,
'cs-set-firewall-policies-precedence': set_firewall_policies_precedence_command,
'cs-set-prevention-policies-precedence': set_prevention_policies_precedence_command,
'cs-set-sensor-update-policies-precedence': set_sensor_update_policies_precedence_command,
'cs-setrt-response-policies-precedence': setrt_response_policies_precedence_command,
'cs-submit': submit_command,
'cs-tokenscreate': tokenscreate_command,
'cs-tokensdelete': tokensdelete_command,
'cs-tokensquery': tokensquery_command,
'cs-tokensread': tokensread_command,
'cs-tokensupdate': tokensupdate_command,
'cs-trigger-scan': trigger_scan_command,
'cs-update-actionv1': update_actionv1_command,
'cs-update-detects-by-idsv2': update_detects_by_idsv2_command,
'cs-update-device-control-policies': update_device_control_policies_command,
'cs-update-device-tags': update_device_tags_command,
'cs-update-firewall-policies': update_firewall_policies_command,
'cs-update-host-groups': update_host_groups_command,
'cs-update-notificationsv1': update_notificationsv1_command,
'cs-update-prevention-policies': update_prevention_policies_command,
'cs-update-rulesv1': update_rulesv1_command,
'cs-update-sensor-update-policies': update_sensor_update_policies_command,
'cs-update-sensor-update-policiesv2': update_sensor_update_policiesv2_command,
'cs-update-sensor-visibility-exclusionsv1': update_sensor_visibility_exclusionsv1_command,
'cs-update-user': update_user_command,
'cs-update-user-groups': update_user_groups_command,
'cs-updateaws-account': updateaws_account_command,
'cs-updateaws-accounts': updateaws_accounts_command,
'cs-updatecid-groups': updatecid_groups_command,
'cs-updatecspm-azure-tenant-default-subscriptionid': updatecspm_azure_tenant_default_subscriptionid_command,
'cs-updatecspm-policy-settings': updatecspm_policy_settings_command,
'cs-updatecspm-scan-schedule': updatecspm_scan_schedule_command,
'cs-updateioa-exclusionsv1': updateioa_exclusionsv1_command,
'cs-updateioc': updateioc_command,
'cs-updateml-exclusionsv1': updateml_exclusionsv1_command,
'cs-updatepolicycontainer': updatepolicycontainer_command,
'cs-updatert-response-policies': updatert_response_policies_command,
'cs-updaterulegroup': updaterulegroup_command,
'cs-updaterulegroup-mixin0': updaterulegroup_mixin0_command,
'cs-updaterules': updaterules_command,
'cs-upload-samplev2': upload_samplev2_command,
'cs-upload-samplev3': upload_samplev3_command,
'cs-validate': validate_command,
'cs-verifyaws-account-access': verifyaws_account_access_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(f'Failed to execute {command} command. Error: {str(e)}', e)
from CrowdStrikeApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| StarcoderdataPython |
3300510 | <filename>make_eye_motion_dataset.py
import os
import glob
import argparse
import cv2
import pickle
import numpy as np
from tqdm import tqdm_gui
from data_utils import VideoWrapper, SubtitleWrapper, ClipWrapper
'''
# Eye motion dataset strucuture
{
'vid': vid_name,
'clip_info': [
{
'sent': clip_sent_list, # [[start_frame, end_frame, sentence], ...]
'landmarks': clip_landmarks, # [[landmark_list], ...]
'start_frame': start_frame,
'end_frame': end_frame,
},
...
]
},
...
'''
def make_dataset(opt):
dataset = []
vid_files = sorted(glob.glob(opt.vid_path + '/*.mp4'), key=os.path.getmtime)
for vi, vid in enumerate(tqdm_gui(vid_files)):
# for vid in enumerate(vid_files):
vid_name = os.path.split(vid)[1][:-4]
print(vid_name)
filtered_clip_wrapper = ClipWrapper(opt.clip_filter_path, vid_name)
video_wrapper = VideoWrapper(opt.vid_path, vid_name)
subtitles_wrapper = SubtitleWrapper(opt.vid_path, vid_name)
filtered_clip = filtered_clip_wrapper.get_filtered_clip()
video = video_wrapper.get_vid()
subtitle = subtitles_wrapper.get_subtitle()
if video is None:
print('[WARN] Matched video does not exist. Skip this video.')
continue
if filtered_clip is None:
print('[WARN] Matched clip does not exist. Skip this video.')
continue
if subtitle is None:
print('[WARN] Matched subtitle does not exist. Skip this video.')
continue
# dataset_tr.append({'vid': vid, 'clips': []})
# dataset_val.append({'vid': vid, 'clips': []})
# dataset_ts.append({'vid': vid, 'clips': []})
# define current video information
dataset.append({'vid': vid_name, 'clip_info': []})
fps = video.get(cv2.CAP_PROP_FPS)
for ci, clip in enumerate(filtered_clip):
start_frame, end_frame, is_valid, landmarks = clip['clip_info'][0], clip['clip_info'][1], clip['clip_info'][2], clip['frames']
if is_valid:
clip_sent_list = []
clip_landmark_list = []
for sub in subtitle:
if sub['sent'] != '':
sent_start_frame = second_to_frame(sub['start'], fps)
sent_end_frame = second_to_frame(sub['end'], fps)
if sent_start_frame >= start_frame and sent_end_frame <= end_frame:
clip_sent_list.append([sent_start_frame, sent_end_frame, sub['sent']])
# get local index of landmarks list
landmark_start_idx = sent_start_frame - start_frame
landmark_end_idx = sent_end_frame - start_frame
clip_landmark_list.append(landmarks[landmark_start_idx:landmark_end_idx])
# append clip information
dataset[-1]['clip_info'].append({'sent': clip_sent_list,
'landmarks': clip_landmark_list,
'start_frame': start_frame,
'end_frame': end_frame})
print('[INFO] Current video: {}, start_frame: {}, end_frame: {}'.format(vid_name, start_frame, end_frame))
count_landmarks(dataset)
print('[INFO] Writing to pickle.')
with open('{}/eye_motion_dataset.pickle'.format(opt.dataset_path), 'wb') as df:
pickle.dump(dataset, df)
def second_to_frame(second, fps):
return int(round(second * fps))
def count_landmarks(dataset):
landmark_list = []
for data in dataset:
clip_info = data['clip_info']
for c_info in clip_info:
c_landmarks = c_info['landmarks']
for landmarks in c_landmarks:
for lm in landmarks:
landmark_list.append(lm)
landmark_array = np.array(landmark_list)
n_samples, n_features = landmark_array.shape
print('[INFO] n_samples:{}, n_features:{}'.format(n_samples, n_features))
# print('[INFO] Estimated running time: {:0.2f} hrs'.format(n_samples/opt.fps/60/60))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-vid_path', default='./videos')
parser.add_argument('-facial_keypoints', default='./facial_keypoints')
parser.add_argument('-clip_filter_path', default='./filtered_clips')
parser.add_argument('-dataset_path', default='./dataset')
opt = parser.parse_args()
# make eye motion dataset
make_dataset(opt)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1656590 | <reponame>rakesh-lagare/Thesis_Work
# -*- coding: utf-8 -*-
from random import randrange
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as nprnd
import pandas as pd
import os
os.remove("dataframe.csv")
os.remove("dataList.csv")
def pattern_gen(clas,noise,scale,offset):
ts_data=[]
tsn= [10,10,10,10,13,10,10,10,13,10,10,10,10,10,10,10,13,10,10,10,13,10,10,10]
ts_noise = nprnd.randint(15, size=100)
ts_n0ise = nprnd.randint(5, size=100)
#box
if (clas == 1):
ts_data= [10,20,30 ,40,50,60,70,70,70,70,70,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,
80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,20,10]
if(scale == 1):
ts_data =[i * 2.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
#linear increase
elif (clas == 2):
ts_data = [11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
if(scale == 1):
ts_data =[i * 2.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
#linear decrease
elif (clas == 3):
ts_data = [11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
ts_data.reverse()
if(scale == 1):
ts_data =[i * 2.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
#periodic
elif (clas == 4):
ts_data = [20,30,40,50,60,70,80,90,70,60,50,40,30,20,10,10,10,10,10,20,30,40,50,60,70,80,90,70,60,50,
40,30,20,10,10,10,10,10,20,30,40,50,60,70,80,90,70,60,50,40,30,20]
if(scale == 1):
ts_data =[i * 3.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise-10)]
elif (clas == 5):
ts_data = [20,30,85,88,90,88,85,36,34,36,55,60,58,20,20,18,18,20,20,90,85,55,55,55,60,
10,20,30,85,88,90,88,85,36,34,36,55,60,58,20,20,18,18,20,20,90,85,55,55,55,60,10]
if(scale == 1):
ts_data =[i * 3.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
elif (clas == 6):
ts_data = [10,20,30,90,90,90,90,90,90,90,90,90,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,
55,55,55,55,55,55,55,55,90,90,90,90,90,90,90,90,90,30,20,10]
if(scale == 1):
ts_data =[i * 3.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
tss= tsn + ts_data + tsn
return (tss)
def prep_data(num):
df = pd.DataFrame()
data_list = []
for idx in range(num):
random_clas = randrange(4) + 1
random_noise = 1#randrange(2)
random_scale = randrange(2)
random_offset = 0#randrange(2)
if(random_scale == 0):
clas = random_clas + 0.1
else:
clas = random_clas + 0.2
ts_data = pattern_gen(random_clas,random_noise,random_scale,random_offset)
temp_df = pd.DataFrame([[idx,clas,random_noise,ts_data]], columns=['index','class','noise','data'])
df = df.append(temp_df)
data_list.extend(ts_data)
plt.plot(data_list)
plt.show()
return (df,data_list)
df,data_list = prep_data(10000)
export_csv = df.to_csv (r'C:\Megatron\Thesis\Thesis_Work\Sax\Final_code_test\dataframe.csv', index = None, header=True)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=data_list)
export_csv1 = temp_df.to_csv (r'C:\Megatron\Thesis\Thesis_Work\Sax\Final_code_test\dataList.csv', index = None, header=True)
| StarcoderdataPython |
3229153 | <reponame>TheSlimvReal/PSE---LA-meets-ML
from modules.controller.commands.command import Command
## command to display the help message
#
# this command will be created when entering help in the terminal
# @extends Command so it can be treated as the other commands
class HelpCommand(Command):
pass
| StarcoderdataPython |
190389 | import pandas as pd
import tushare as ts
from StockAnalysisSystem.core.config import TS_TOKEN
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.CollectorUtility import *
# ----------------------------------------------------------------------------------------------------------------------
FIELDS = {
'Finance.BusinessComposition': {
'bz_item': '主营业务来源',
'bz_sales': '主营业务收入(元)',
'bz_profit': '主营业务利润(元)',
'bz_cost': '主营业务成本(元)',
'curr_type': '货币代码',
'update_flag': '是否更新',
},
}
# -------------------------------------------------------- Prob --------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_name': 'finance_business_tushare_pro',
'plugin_version': '0.0.0.1',
'tags': ['tusharepro']
}
def plugin_adapt(uri: str) -> bool:
return uri in FIELDS.keys()
def plugin_capacities() -> list:
return list(FIELDS.keys())
# ----------------------------------------------------------------------------------------------------------------------
# fina_mainbz: https://tushare.pro/document/2?doc_id=81
def __fetch_bussiness_data_by_type(pro: ts.pro_api, ts_code: str, classify: str,
since: datetime.datetime, until: datetime.datetime):
limit = 10
result = None
derive_time = until
while limit > 0:
ts_since = since.strftime('%Y%m%d')
ts_until = derive_time.strftime('%Y%m%d')
ts_delay('fina_mainbz')
sub_result = pro.fina_mainbz(ts_code=ts_code, start_date=ts_since, end_date=ts_until, type=classify)
if not isinstance(sub_result, pd.DataFrame) or sub_result.empty:
break
result = pd.concat([result, sub_result])
result = result.reset_index(drop=True)
result_since = min(sub_result['end_date'])
result_since = text_auto_time(result_since)
# End condition
if result_since == derive_time or len(sub_result) < 100:
break
limit -= 1
derive_time = result_since
if isinstance(result, pd.DataFrame):
result = result.drop_duplicates()
return result
def __fetch_business_data(**kwargs) -> pd.DataFrame:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
period = kwargs.get('period')
ts_code = pickup_ts_code(kwargs)
since, until = normalize_time_serial(period, default_since(), today())
# since_limit = years_ago_of(until, 3)
# since = max([since, since_limit])
# Because of the implementation of this interface, we only fetch the annual report
# since_year = since.year
# until_year = until.year
result = None
pro = ts.pro_api(TS_TOKEN)
try:
if is_slice_update(ts_code, since, until):
ts_since = since.strftime('%Y%m%d')
clock = Clock()
result_product = pro.fina_mainbz_vip(ts_since, type='P')
result_area = pro.fina_mainbz_vip(ts_since, type='D')
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
else:
clock = Clock()
result_product = __fetch_bussiness_data_by_type(pro, ts_code, 'P', since, until)
result_area = __fetch_bussiness_data_by_type(pro, ts_code, 'D', since, until)
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
if isinstance(result_product, pd.DataFrame) and not result_product.empty:
result_product['classification'] = 'product'
if isinstance(result_area, pd.DataFrame) and not result_area.empty:
result_area['classification'] = 'area'
result = pd.concat([result_product, result_area])
# for year in range(since_year, until_year):
# ts_date = '%02d1231' % year
# # 抱歉,您每分钟最多访问该接口60次
# ts_delay('fina_mainbz')
# sub_result = pro.fina_mainbz(ts_code=ts_code, start_date=ts_date, end_date=ts_date)
# result = pd.concat([result, sub_result])
# print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
# result.fillna(0.0)
# del result['ts_code']
# result.reset_index()
# business = result.groupby('end_date').apply(
# lambda x: x.drop('end_date', axis=1).to_dict('records'))
# result = pd.DataFrame.from_dict({'business': business}, orient='index').reset_index()
# result['ts_code'] = ts_code
except Exception as e:
print(e)
print(traceback.format_exc())
finally:
pass
check_execute_dump_flag(result, **kwargs)
if isinstance(result, pd.DataFrame) and not result.empty:
result.reset_index(drop=True, inplace=True)
result.fillna('', inplace=True)
convert_ts_code_field(result)
convert_ts_date_field(result, 'end_date', 'period')
# if result is not None:
# result['period'] = pd.to_datetime(result['end_date'])
# result['stock_identity'] = result['ts_code'].apply(ts_code_to_stock_identity)
return result
# ----------------------------------------------------------------------------------------------------------------------
def query(**kwargs) -> pd.DataFrame or None:
uri = kwargs.get('uri')
if uri in list(FIELDS.keys()):
return __fetch_business_data(**kwargs)
else:
return None
def validate(**kwargs) -> bool:
nop(kwargs)
return True
def fields() -> dict:
return FIELDS
| StarcoderdataPython |
1657618 | # Mở file
file = open("putTacGia.txt", "w")
for num in range(1,50001):
s2=str(num)
s3="put 'TACGIA','TG" + s2 + "','TG:MSTG','MSTG" + s2 + "'\n"
s4="put 'TACGIA','TG" + s2 + "','TG:TENTG','TENTG" + s2 + "'\n"
s5="put 'TACGIA','TG" + s2 + "','TG:SDT','SDT" + s2 + "'\n"
s6="put 'TACGIA','TG" + s2 + "','TG:EMAIL','EMAIL" + s2 + "@DS112.COM'\n"
file.write(s3)
file.write(s4)
file.write(s5)
file.write(s6)
pass
# Đóng file
file.close() | StarcoderdataPython |
3311033 | <filename>Scripts/rcfilt.py
import numpy as np
from scipy.signal import lfilter
def rcfilt(y=None,SampTime=None,TimeConstant=None,UpDown='up',*args,**kwargs):
'''
Filters a spectrum using a RC low-pass filter as built into
cw spectrometers to remove high-frequency noise.
This script is freely inspired by the easyspin suite from the Stefan Stoll lab
(https://github.com/StollLab/EasySpin/)
(https://easyspin.org/easyspin/)
Script written by <NAME> (https://github.com/TChauvire/EPR_ESR_Suite/), 09/09/2020
Parameters
----------
y : Type. numpy data_array, 1D
DESCRIPTION. unfiltered spectrum,The default is None.
SampTime : float number, optional
DESCRIPTION. time constant of the filter, The default is None.
TimeConstant : float number, optional
DESCRIPTION. time constant of the filter
The default is None.
UpDown : string argument, optional
DESCRIPTION. 'up' or 'down' defines the direction of the
field sweep. If omitted, 'up' is assumed.
The default is 'up'.
SampleTime and TimeConstant must have the same units (s, ms, us, ...)
Returns
-------
yFiltered : TYPE: numpy data array, same shape than input data array y
DESCRIPTION: spectrum obtained after RC low-pass filter transformation.
'''
if np.array(TimeConstant).dtype.char not in 'bBhHiIlLqQpPdefg':
raise ValueError('Time constant must be real!')
if np.array(SampTime).dtype.char not in 'bBhHiIlLqQpPdefg':
raise ValueError('SampTime must be real!')
if np.array(TimeConstant).size != 1:
raise ValueError('Time constant must be a scalar!')
if np.array(SampTime).size != 1:
raise ValueError('SampTime must be a scalar!')
if (TimeConstant <= 0):
raise ValueError('Time constant must be positive or zero!')
if (SampTime <= 0):
raise ValueError('SampTime must be positive!')
m,n=y.shape
yFiltered = np.full((m,n),np.nan)
if y.shape[0] != np.ravel(y).shape[0]:
raise ValueError('y must be a row or column vector.')
else:
y=y.reshape((m,1))
if UpDown in ['down','dn']:
y=y[::-1]
if TimeConstant == 0:
e=0
else:
e=np.exp(-SampTime/TimeConstant)
for ii in range(n):
yFiltered[:,ii]=lfilter(np.array([1-e]),np.array([1,- e]),y[:,ii],axis=0)
if UpDown in ['down','dn']:
y=y[::-1]
yFiltered=yFiltered[::-1]
y=y.reshape((m,n))
yFiltered=yFiltered.reshape((m,n))
return yFiltered | StarcoderdataPython |
109059 | import datetime
import json
import os
def get_utc_now():
return datetime.datetime.utcnow()
def format_datetime(date_time):
"""Generate string from datetime object."""
return date_time.strftime("%Y-%m-%d %H:%M:%S")
def get_vcap_service():
vcap_config = os.getenv('VCAP_SERVICES', None)
if vcap_config:
decoded_config = json.loads(vcap_config)
for key, value in decoded_config.iteritems():
if key.startswith('mysql56'):
mysql_creds = decoded_config[key][0]['credentials']
return "{}?charset=utf8".format(str(mysql_creds['uri']))
else:
return 'mysql://root:intel123@localhost:3306/smart_home??charset=utf8'
| StarcoderdataPython |
176283 | import numpy as np
import datetime
from ..nets.lstm_network import ActorCritic
import torch
import torch.optim as optim
from tqdm import trange
from tensorboardX import SummaryWriter
class Agent(object):
def __init__(self, agent_name, input_channels, network_parameters, ppo_parameters=None, n_actions=3):
self.name = agent_name
self.save_path = 'agents/' + self.name + '/'
self.n_actions = n_actions
self.input_channels = input_channels
self.action_space = [i for i in range(n_actions)]
if ppo_parameters:
self.gamma = float(ppo_parameters['GAMMA'])
self.lam = float(ppo_parameters['GAE_LAMBDA'])
self.alpha = float(ppo_parameters['LEARNING_RATE'])
self.ppo_epsilon = float(ppo_parameters['PPO_EPSILON'])
self.entropy_beta = float(ppo_parameters['ENTROPY_BETA'])
self.minibatch_size = int(ppo_parameters['MINI_BATCH_SIZE'])
self.ppo_epochs = int(ppo_parameters['PPO_EPOCHS'])
self.critic_discount = float(ppo_parameters['CRITIC_DISCOUNT'])
self.training_sequence_length = 4
self.writer = SummaryWriter(logdir=self.save_path +'logs')
self.epochs_trained = 0
# Autodetect CUDA
use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
print('Device:', self.device, '\n')
self.network = ActorCritic(input_channels=self.input_channels, n_actions=self.n_actions, parameters=network_parameters).to(self.device)
print(self.network, '\n')
if ppo_parameters:
self.optimizer = optim.Adam(self.network.parameters(), lr=self.alpha)
def choose_action(self, observation, hidden):
dist, value, hidden = self.network(observation, hidden)
action = dist.sample()
log_prob = dist.log_prob(action)
return action, log_prob, value, hidden
def compute_gae(self, next_value, rewards, masks, values):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + self.gamma * values[step + 1] * masks[step] - values[step]
gae = delta + self.gamma * self.lam * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
def ppo_iter(self, states, actions, log_probs, returns, advantages, hiddens):
batch_size = states.size(0)
(hiddens_0, hiddens_1) = hiddens
for _ in range(batch_size // self.minibatch_size):
rand_ids = np.random.randint(0, batch_size-9, self.minibatch_size)
yield [(states[rand_ids+i*3, :], actions[rand_ids+i*3], log_probs[rand_ids+i*3], returns[rand_ids+i*3, :], advantages[rand_ids+i*3, :]) for i in range(self.training_sequence_length)], (hiddens_0[rand_ids, :], hiddens_1[rand_ids])
def learn(self, frame_idx, states, actions, log_probs, returns, advantages, hiddens):
count_steps = 0
sum_returns = 0.0
sum_advantage = 0.0
sum_loss_actor = 0.0
sum_loss_critic = 0.0
sum_entropy = 0.0
sum_loss_total = 0.0
t = trange(self.ppo_epochs, desc=f'{self.name} is learning', unit='update', leave=False)
for _ in t:
for seq, hidden in self.ppo_iter(states, actions, log_probs, returns, advantages, hiddens):
loss = torch.zeros([]).to(self.device)
for (state, action, old_log_probs, return_, advantage) in seq:
dist, value, hidden = self.network(state, hidden, grads=True)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - self.ppo_epsilon, 1.0 + self.ppo_epsilon) * advantage
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss += self.critic_discount * critic_loss + actor_loss - self.entropy_beta * entropy
loss/=self.training_sequence_length
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
sum_returns += return_.mean()
sum_advantage += advantage.mean()
sum_loss_actor += actor_loss
sum_loss_critic += critic_loss
sum_loss_total += loss
sum_entropy += entropy
count_steps+=1
self.writer.add_scalar("returns", sum_returns / count_steps, frame_idx)
self.writer.add_scalar("advantage", sum_advantage / count_steps, frame_idx)
self.writer.add_scalar("loss_actor", sum_loss_actor / count_steps, frame_idx)
self.writer.add_scalar("loss_critic", sum_loss_critic / count_steps, frame_idx)
self.writer.add_scalar("entropy", sum_entropy / count_steps, frame_idx)
self.writer.add_scalar("loss_total", sum_loss_total / count_steps, frame_idx)
self.writer.flush()
def save_model(self):
torch.save(self.network.state_dict(), self.save_path + 'saved_model/network_weights.pt')
torch.save(self.optimizer.state_dict(), self.save_path + 'saved_model/optimizer_weights.pt')
def load_model(self, testing=False):
self.network.load_state_dict(torch.load(self.save_path + 'saved_model/network_weights.pt'))
if not testing:
self.optimizer.load_state_dict(torch.load(self.save_path + 'saved_model/optimizer_weights.pt'))
print('Brain succesfully loaded\n') | StarcoderdataPython |
1644890 | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from collections import OrderedDict, defaultdict
from dwave.inspector.adapters import solver_data_postprocessed
# dict[problem_id: str, problem_data: dict]
problem_store = OrderedDict()
# dict[problem_id: str, accessed: threading.Semaphore]
problem_access_sem = defaultdict(lambda: threading.Semaphore(value=0))
def push_inspector_data(data):
# push solver
add_solver(data['rel']['solver'])
del data['rel']
# push problem data
id_ = data['details']['id']
problem_store[id_] = data
return id_
# captures QMI + computation (response future)
problemdata_bag = set()
problemdata = {}
# map of all solvers seen in problems processed so far
solvers = {}
class ProblemData(object):
# QMI/problem submitted, dict with keys: linear, quadratic, type_, params
problem = None
# dwave.cloud.solver.StructuredSolver instance
solver = None
# dwave.cloud.computation.Future instance
response = None
def __init__(self, problem, solver, response):
self.problem = problem
self.solver = solver
self.response = response
def add_problem(problem, solver, response):
# store the problem encapsulated with ProblemData
pd = ProblemData(problem=problem, solver=solver, response=response)
problemdata_bag.add(pd)
# cache solver reference
add_solver(solver)
def add_solver(solver):
solvers[solver.id] = solver
def index_resolved_problems():
"""Move problems that have `problem_id` assigned from `problemdata_bag` to
`problemdata` dict.
"""
# find problems that have id assigned
resolved = set()
for pd in problemdata_bag:
if pd.response.id is not None:
resolved.add(pd)
# add them to the indexed collection
# and remove them from the input bag
for pd in resolved:
problemdata[pd.response.id] = pd
problemdata_bag.remove(pd)
def get_problem(problem_id):
"""Return :class:`.ProblemData` from problem data store, or fail with
:exc:`KeyError`.
"""
if problem_id not in problemdata:
index_resolved_problems()
return problemdata[problem_id]
def get_solver_data(solver_id):
"""Return solver data dict for `solver_id`. If solver hasn't been seen in
any of the problems cached so far, fail with :exc:`KeyError`.
"""
if solver_id in solvers:
return solver_data_postprocessed(solvers[solver_id])
raise KeyError('solver not found')
| StarcoderdataPython |
15616 | """Coverage based QC calculations.
"""
import glob
import os
import subprocess
from bcbio.bam import ref, readstats, utils
from bcbio.distributed import transaction
from bcbio.heterogeneity import chromhacks
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
from bcbio.variation import coverage as cov
from bcbio.variation import bedutils
def run(bam_file, data, out_dir):
"""Run coverage QC analysis
"""
out = dict()
out_dir = utils.safe_makedir(out_dir)
if dd.get_coverage(data) and dd.get_coverage(data) not in ["None"]:
merged_bed_file = bedutils.clean_file(dd.get_coverage_merged(data), data, prefix="cov-", simple=True)
target_name = "coverage"
elif dd.get_coverage_interval(data) != "genome":
merged_bed_file = dd.get_variant_regions_merged(data)
target_name = "variant_regions"
else:
merged_bed_file = None
target_name = "genome"
avg_depth = cov.get_average_coverage(target_name, merged_bed_file, data)
if target_name == "coverage":
out_files = cov.coverage_region_detailed_stats(target_name, merged_bed_file, data, out_dir)
else:
out_files = []
out['Avg_coverage'] = avg_depth
samtools_stats_dir = os.path.join(out_dir, os.path.pardir, 'samtools')
from bcbio.qc import samtools
samtools_stats = samtools.run(bam_file, data, samtools_stats_dir)["metrics"]
out["Total_reads"] = total_reads = int(samtools_stats["Total_reads"])
out["Mapped_reads"] = mapped = int(samtools_stats["Mapped_reads"])
out["Mapped_paired_reads"] = int(samtools_stats["Mapped_paired_reads"])
out['Duplicates'] = dups = int(samtools_stats["Duplicates"])
if total_reads:
out["Mapped_reads_pct"] = 100.0 * mapped / total_reads
if mapped:
out['Duplicates_pct'] = 100.0 * dups / mapped
if dd.get_coverage_interval(data) == "genome":
mapped_unique = mapped - dups
else:
mapped_unique = readstats.number_of_mapped_reads(data, bam_file, keep_dups=False)
out['Mapped_unique_reads'] = mapped_unique
if merged_bed_file:
ontarget = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=merged_bed_file, target_name=target_name)
out["Ontarget_unique_reads"] = ontarget
if mapped_unique:
out["Ontarget_pct"] = 100.0 * ontarget / mapped_unique
out['Offtarget_pct'] = 100.0 * (mapped_unique - ontarget) / mapped_unique
if dd.get_coverage_interval(data) != "genome":
# Skip padded calculation for WGS even if the "coverage" file is specified
# the padded statistic makes only sense for exomes and panels
padded_bed_file = bedutils.get_padded_bed_file(out_dir, merged_bed_file, 200, data)
ontarget_padded = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=padded_bed_file, target_name=target_name + "_padded")
out["Ontarget_padded_pct"] = 100.0 * ontarget_padded / mapped_unique
if total_reads:
out['Usable_pct'] = 100.0 * ontarget / total_reads
indexcov_files = _goleft_indexcov(bam_file, data, out_dir)
out_files += [x for x in indexcov_files if x and utils.file_exists(x)]
out = {"metrics": out}
if len(out_files) > 0:
out["base"] = out_files[0]
out["secondary"] = out_files[1:]
return out
def _goleft_indexcov(bam_file, data, out_dir):
"""Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
"""
if not dd.get_coverage_interval(data) == "genome":
return []
out_dir = utils.safe_makedir(os.path.join(out_dir, "indexcov"))
out_files = [os.path.join(out_dir, "%s-indexcov.%s" % (dd.get_sample_name(data), ext))
for ext in ["roc", "ped", "bed.gz"]]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = "--sex %s" % (",".join(gender_chroms)) if gender_chroms else ""
cmd = "goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}"
try:
do.run(cmd.format(**locals()), "QC: goleft indexcov")
except subprocess.CalledProcessError as msg:
if not ("indexcov: no usable" in str(msg) or
("indexcov: expected" in str(msg) and "sex chromosomes, found:" in str(msg))):
raise
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file)
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace(".bed.gz", ".tsv")
if utils.file_exists(out_files[-1]) and not utils.file_exists(out_bed):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = "gunzip -c %s > %s" % (out_files[-1], tx_out_bed)
do.run(cmd, "Unpack indexcov BED file")
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)]
| StarcoderdataPython |
Subsets and Splits