repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
django-userena-ce/django-userena-ce | demo/profiles/forms.py | SignupFormExtra.save | def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user | python | def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user | [
"def",
"save",
"(",
"self",
")",
":",
"# First save the parent form and get the user.",
"new_user",
"=",
"super",
"(",
"SignupFormExtra",
",",
"self",
")",
".",
"save",
"(",
")",
"new_user",
".",
"first_name",
"=",
"self",
".",
"cleaned_data",
"[",
"'first_name'",
"]",
"new_user",
".",
"last_name",
"=",
"self",
".",
"cleaned_data",
"[",
"'last_name'",
"]",
"new_user",
".",
"save",
"(",
")",
"# Userena expects to get the new user from this form, so return the new",
"# user.",
"return",
"new_user"
] | Override the save method to save the first and last name to the user
field. | [
"Override",
"the",
"save",
"method",
"to",
"save",
"the",
"first",
"and",
"last",
"name",
"to",
"the",
"user",
"field",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/demo/profiles/forms.py#L38-L53 | train |
django-userena-ce/django-userena-ce | userena/contrib/umessages/forms.py | ComposeForm.save | def save(self, sender):
"""
Save the message and send it out into the wide world.
:param sender:
The :class:`User` that sends the message.
:param parent_msg:
The :class:`Message` that preceded this message in the thread.
:return: The saved :class:`Message`.
"""
um_to_user_list = self.cleaned_data['to']
body = self.cleaned_data['body']
msg = Message.objects.send_message(sender,
um_to_user_list,
body)
return msg | python | def save(self, sender):
"""
Save the message and send it out into the wide world.
:param sender:
The :class:`User` that sends the message.
:param parent_msg:
The :class:`Message` that preceded this message in the thread.
:return: The saved :class:`Message`.
"""
um_to_user_list = self.cleaned_data['to']
body = self.cleaned_data['body']
msg = Message.objects.send_message(sender,
um_to_user_list,
body)
return msg | [
"def",
"save",
"(",
"self",
",",
"sender",
")",
":",
"um_to_user_list",
"=",
"self",
".",
"cleaned_data",
"[",
"'to'",
"]",
"body",
"=",
"self",
".",
"cleaned_data",
"[",
"'body'",
"]",
"msg",
"=",
"Message",
".",
"objects",
".",
"send_message",
"(",
"sender",
",",
"um_to_user_list",
",",
"body",
")",
"return",
"msg"
] | Save the message and send it out into the wide world.
:param sender:
The :class:`User` that sends the message.
:param parent_msg:
The :class:`Message` that preceded this message in the thread.
:return: The saved :class:`Message`. | [
"Save",
"the",
"message",
"and",
"send",
"it",
"out",
"into",
"the",
"wide",
"world",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/forms.py#L15-L35 | train |
django-userena-ce/django-userena-ce | userena/forms.py | SignupForm.clean_username | def clean_username(self):
"""
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username__iexact=self.cleaned_data['username'])
except get_user_model().DoesNotExist:
pass
else:
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(user__username__iexact=self.cleaned_data['username']).exclude(activation_key=userena_settings.USERENA_ACTIVATED):
raise forms.ValidationError(_('This username is already taken but not confirmed. Please check your email for verification steps.'))
raise forms.ValidationError(_('This username is already taken.'))
if self.cleaned_data['username'].lower() in userena_settings.USERENA_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_('This username is not allowed.'))
return self.cleaned_data['username'] | python | def clean_username(self):
"""
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username__iexact=self.cleaned_data['username'])
except get_user_model().DoesNotExist:
pass
else:
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(user__username__iexact=self.cleaned_data['username']).exclude(activation_key=userena_settings.USERENA_ACTIVATED):
raise forms.ValidationError(_('This username is already taken but not confirmed. Please check your email for verification steps.'))
raise forms.ValidationError(_('This username is already taken.'))
if self.cleaned_data['username'].lower() in userena_settings.USERENA_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_('This username is not allowed.'))
return self.cleaned_data['username'] | [
"def",
"clean_username",
"(",
"self",
")",
":",
"try",
":",
"user",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"get",
"(",
"username__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
")",
"except",
"get_user_model",
"(",
")",
".",
"DoesNotExist",
":",
"pass",
"else",
":",
"if",
"userena_settings",
".",
"USERENA_ACTIVATION_REQUIRED",
"and",
"UserenaSignup",
".",
"objects",
".",
"filter",
"(",
"user__username__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
")",
".",
"exclude",
"(",
"activation_key",
"=",
"userena_settings",
".",
"USERENA_ACTIVATED",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'This username is already taken but not confirmed. Please check your email for verification steps.'",
")",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'This username is already taken.'",
")",
")",
"if",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
".",
"lower",
"(",
")",
"in",
"userena_settings",
".",
"USERENA_FORBIDDEN_USERNAMES",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'This username is not allowed.'",
")",
")",
"return",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]"
] | Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list. | [
"Validate",
"that",
"the",
"username",
"is",
"alphanumeric",
"and",
"is",
"not",
"already",
"in",
"use",
".",
"Also",
"validates",
"that",
"the",
"username",
"is",
"not",
"listed",
"in",
"USERENA_FORBIDDEN_USERNAMES",
"list",
"."
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/forms.py#L44-L61 | train |
django-userena-ce/django-userena-ce | userena/forms.py | SignupFormOnlyEmail.save | def save(self):
""" Generate a random username before falling back to parent signup form """
while True:
username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
try:
get_user_model().objects.get(username__iexact=username)
except get_user_model().DoesNotExist: break
self.cleaned_data['username'] = username
return super(SignupFormOnlyEmail, self).save() | python | def save(self):
""" Generate a random username before falling back to parent signup form """
while True:
username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
try:
get_user_model().objects.get(username__iexact=username)
except get_user_model().DoesNotExist: break
self.cleaned_data['username'] = username
return super(SignupFormOnlyEmail, self).save() | [
"def",
"save",
"(",
"self",
")",
":",
"while",
"True",
":",
"username",
"=",
"sha1",
"(",
"str",
"(",
"random",
".",
"random",
"(",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"5",
"]",
"try",
":",
"get_user_model",
"(",
")",
".",
"objects",
".",
"get",
"(",
"username__iexact",
"=",
"username",
")",
"except",
"get_user_model",
"(",
")",
".",
"DoesNotExist",
":",
"break",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
"=",
"username",
"return",
"super",
"(",
"SignupFormOnlyEmail",
",",
"self",
")",
".",
"save",
"(",
")"
] | Generate a random username before falling back to parent signup form | [
"Generate",
"a",
"random",
"username",
"before",
"falling",
"back",
"to",
"parent",
"signup",
"form"
] | 2d8b745eed25128134e961ca96c270802e730256 | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/forms.py#L110-L119 | train |
dogoncouch/logdissect | logdissect/parsers/linejson.py | ParseModule.parse_file | def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data | python | def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data | [
"def",
"parse_file",
"(",
"self",
",",
"sourcepath",
")",
":",
"# Open input file and read JSON array:",
"with",
"open",
"(",
"sourcepath",
",",
"'r'",
")",
"as",
"logfile",
":",
"jsonlist",
"=",
"logfile",
".",
"readlines",
"(",
")",
"# Set our attributes for this entry and add it to data.entries:",
"data",
"=",
"{",
"}",
"data",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"for",
"line",
"in",
"jsonlist",
":",
"entry",
"=",
"self",
".",
"parse_line",
"(",
"line",
")",
"data",
"[",
"'entries'",
"]",
".",
"append",
"(",
"entry",
")",
"if",
"self",
".",
"tzone",
":",
"for",
"e",
"in",
"data",
"[",
"'entries'",
"]",
":",
"e",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"tzone",
"# Return the parsed data",
"return",
"data"
] | Parse an object-per-line JSON file into a log data dict | [
"Parse",
"an",
"object",
"-",
"per",
"-",
"line",
"JSON",
"file",
"into",
"a",
"log",
"data",
"dict"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/linejson.py#L41-L59 | train |
dogoncouch/logdissect | logdissect/parsers/sojson.py | ParseModule.parse_file | def parse_file(self, sourcepath):
"""Parse single JSON object into a LogData object"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonstr = logfile.read()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = json.loads(jsonstr)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data | python | def parse_file(self, sourcepath):
"""Parse single JSON object into a LogData object"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonstr = logfile.read()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = json.loads(jsonstr)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data | [
"def",
"parse_file",
"(",
"self",
",",
"sourcepath",
")",
":",
"# Open input file and read JSON array:",
"with",
"open",
"(",
"sourcepath",
",",
"'r'",
")",
"as",
"logfile",
":",
"jsonstr",
"=",
"logfile",
".",
"read",
"(",
")",
"# Set our attributes for this entry and add it to data.entries:",
"data",
"=",
"{",
"}",
"data",
"[",
"'entries'",
"]",
"=",
"json",
".",
"loads",
"(",
"jsonstr",
")",
"if",
"self",
".",
"tzone",
":",
"for",
"e",
"in",
"data",
"[",
"'entries'",
"]",
":",
"e",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"tzone",
"# Return the parsed data",
"return",
"data"
] | Parse single JSON object into a LogData object | [
"Parse",
"single",
"JSON",
"object",
"into",
"a",
"LogData",
"object"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/sojson.py#L41-L56 | train |
dogoncouch/logdissect | logdissect/core.py | LogDissectCore.run_job | def run_job(self):
"""Execute a logdissect job"""
try:
self.load_parsers()
self.load_filters()
self.load_outputs()
self.config_args()
if self.args.list_parsers:
self.list_parsers()
if self.args.verbosemode: print('Loading input files')
self.load_inputs()
if self.args.verbosemode: print('Running parsers')
self.run_parse()
if self.args.verbosemode: print('Merging data')
self.data_set['finalized_data'] = \
logdissect.utils.merge_logs(
self.data_set['data_set'], sort=True)
if self.args.verbosemode: print('Running filters')
self.run_filters()
if self.args.verbosemode: print('Running output')
self.run_output()
except KeyboardInterrupt:
sys.exit(1) | python | def run_job(self):
"""Execute a logdissect job"""
try:
self.load_parsers()
self.load_filters()
self.load_outputs()
self.config_args()
if self.args.list_parsers:
self.list_parsers()
if self.args.verbosemode: print('Loading input files')
self.load_inputs()
if self.args.verbosemode: print('Running parsers')
self.run_parse()
if self.args.verbosemode: print('Merging data')
self.data_set['finalized_data'] = \
logdissect.utils.merge_logs(
self.data_set['data_set'], sort=True)
if self.args.verbosemode: print('Running filters')
self.run_filters()
if self.args.verbosemode: print('Running output')
self.run_output()
except KeyboardInterrupt:
sys.exit(1) | [
"def",
"run_job",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"load_parsers",
"(",
")",
"self",
".",
"load_filters",
"(",
")",
"self",
".",
"load_outputs",
"(",
")",
"self",
".",
"config_args",
"(",
")",
"if",
"self",
".",
"args",
".",
"list_parsers",
":",
"self",
".",
"list_parsers",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Loading input files'",
")",
"self",
".",
"load_inputs",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Running parsers'",
")",
"self",
".",
"run_parse",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Merging data'",
")",
"self",
".",
"data_set",
"[",
"'finalized_data'",
"]",
"=",
"logdissect",
".",
"utils",
".",
"merge_logs",
"(",
"self",
".",
"data_set",
"[",
"'data_set'",
"]",
",",
"sort",
"=",
"True",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Running filters'",
")",
"self",
".",
"run_filters",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Running output'",
")",
"self",
".",
"run_output",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] | Execute a logdissect job | [
"Execute",
"a",
"logdissect",
"job"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L58-L80 | train |
dogoncouch/logdissect | logdissect/core.py | LogDissectCore.run_parse | def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset) | python | def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset) | [
"def",
"run_parse",
"(",
"self",
")",
":",
"# Data set already has source file names from load_inputs",
"parsedset",
"=",
"{",
"}",
"parsedset",
"[",
"'data_set'",
"]",
"=",
"[",
"]",
"for",
"log",
"in",
"self",
".",
"input_files",
":",
"parsemodule",
"=",
"self",
".",
"parse_modules",
"[",
"self",
".",
"args",
".",
"parser",
"]",
"try",
":",
"if",
"self",
".",
"args",
".",
"tzone",
":",
"parsemodule",
".",
"tzone",
"=",
"self",
".",
"args",
".",
"tzone",
"except",
"NameError",
":",
"pass",
"parsedset",
"[",
"'data_set'",
"]",
".",
"append",
"(",
"parsemodule",
".",
"parse_file",
"(",
"log",
")",
")",
"self",
".",
"data_set",
"=",
"parsedset",
"del",
"(",
"parsedset",
")"
] | Parse one or more log files | [
"Parse",
"one",
"or",
"more",
"log",
"files"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L82-L95 | train |
dogoncouch/logdissect | logdissect/core.py | LogDissectCore.run_output | def run_output(self):
"""Output finalized data"""
for f in logdissect.output.__formats__:
ouroutput = self.output_modules[f]
ouroutput.write_output(self.data_set['finalized_data'],
args=self.args)
del(ouroutput)
# Output to terminal if silent mode is not set:
if not self.args.silentmode:
if self.args.verbosemode:
print('\n==== ++++ ==== Output: ==== ++++ ====\n')
for line in self.data_set['finalized_data']['entries']:
print(line['raw_text']) | python | def run_output(self):
"""Output finalized data"""
for f in logdissect.output.__formats__:
ouroutput = self.output_modules[f]
ouroutput.write_output(self.data_set['finalized_data'],
args=self.args)
del(ouroutput)
# Output to terminal if silent mode is not set:
if not self.args.silentmode:
if self.args.verbosemode:
print('\n==== ++++ ==== Output: ==== ++++ ====\n')
for line in self.data_set['finalized_data']['entries']:
print(line['raw_text']) | [
"def",
"run_output",
"(",
"self",
")",
":",
"for",
"f",
"in",
"logdissect",
".",
"output",
".",
"__formats__",
":",
"ouroutput",
"=",
"self",
".",
"output_modules",
"[",
"f",
"]",
"ouroutput",
".",
"write_output",
"(",
"self",
".",
"data_set",
"[",
"'finalized_data'",
"]",
",",
"args",
"=",
"self",
".",
"args",
")",
"del",
"(",
"ouroutput",
")",
"# Output to terminal if silent mode is not set:",
"if",
"not",
"self",
".",
"args",
".",
"silentmode",
":",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'\\n==== ++++ ==== Output: ==== ++++ ====\\n'",
")",
"for",
"line",
"in",
"self",
".",
"data_set",
"[",
"'finalized_data'",
"]",
"[",
"'entries'",
"]",
":",
"print",
"(",
"line",
"[",
"'raw_text'",
"]",
")"
] | Output finalized data | [
"Output",
"finalized",
"data"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L107-L120 | train |
dogoncouch/logdissect | logdissect/core.py | LogDissectCore.config_args | def config_args(self):
"""Set config options"""
# Module list options:
self.arg_parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
self.arg_parser.add_argument('--verbose',
action='store_true', dest = 'verbosemode',
help=_('set verbose terminal output'))
self.arg_parser.add_argument('-s',
action='store_true', dest = 'silentmode',
help=_('silence terminal output'))
self.arg_parser.add_argument('--list-parsers',
action='store_true', dest='list_parsers',
help=_('return a list of available parsers'))
self.arg_parser.add_argument('-p',
action='store', dest='parser', default='syslog',
help=_('select a parser (default: syslog)'))
self.arg_parser.add_argument('-z', '--unzip',
action='store_true', dest='unzip',
help=_('include files compressed with gzip'))
self.arg_parser.add_argument('-t',
action='store', dest='tzone',
help=_('specify timezone offset to UTC (e.g. \'+0500\')'))
self.arg_parser.add_argument('files',
# nargs needs to be * not + so --list-filters/etc
# will work without file arg
metavar='file', nargs='*',
help=_('specify input files'))
# self.arg_parser.add_argument_group(self.parse_args)
self.arg_parser.add_argument_group(self.filter_args)
self.arg_parser.add_argument_group(self.output_args)
self.args = self.arg_parser.parse_args() | python | def config_args(self):
"""Set config options"""
# Module list options:
self.arg_parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
self.arg_parser.add_argument('--verbose',
action='store_true', dest = 'verbosemode',
help=_('set verbose terminal output'))
self.arg_parser.add_argument('-s',
action='store_true', dest = 'silentmode',
help=_('silence terminal output'))
self.arg_parser.add_argument('--list-parsers',
action='store_true', dest='list_parsers',
help=_('return a list of available parsers'))
self.arg_parser.add_argument('-p',
action='store', dest='parser', default='syslog',
help=_('select a parser (default: syslog)'))
self.arg_parser.add_argument('-z', '--unzip',
action='store_true', dest='unzip',
help=_('include files compressed with gzip'))
self.arg_parser.add_argument('-t',
action='store', dest='tzone',
help=_('specify timezone offset to UTC (e.g. \'+0500\')'))
self.arg_parser.add_argument('files',
# nargs needs to be * not + so --list-filters/etc
# will work without file arg
metavar='file', nargs='*',
help=_('specify input files'))
# self.arg_parser.add_argument_group(self.parse_args)
self.arg_parser.add_argument_group(self.filter_args)
self.arg_parser.add_argument_group(self.output_args)
self.args = self.arg_parser.parse_args() | [
"def",
"config_args",
"(",
"self",
")",
":",
"# Module list options:",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'%(prog)s '",
"+",
"str",
"(",
"__version__",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'--verbose'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verbosemode'",
",",
"help",
"=",
"_",
"(",
"'set verbose terminal output'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-s'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'silentmode'",
",",
"help",
"=",
"_",
"(",
"'silence terminal output'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'--list-parsers'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'list_parsers'",
",",
"help",
"=",
"_",
"(",
"'return a list of available parsers'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'parser'",
",",
"default",
"=",
"'syslog'",
",",
"help",
"=",
"_",
"(",
"'select a parser (default: syslog)'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-z'",
",",
"'--unzip'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'unzip'",
",",
"help",
"=",
"_",
"(",
"'include files compressed with gzip'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-t'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'tzone'",
",",
"help",
"=",
"_",
"(",
"'specify timezone offset to UTC (e.g. \\'+0500\\')'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'files'",
",",
"# nargs needs to be * not + so --list-filters/etc",
"# will work without file arg",
"metavar",
"=",
"'file'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"_",
"(",
"'specify input files'",
")",
")",
"# self.arg_parser.add_argument_group(self.parse_args)",
"self",
".",
"arg_parser",
".",
"add_argument_group",
"(",
"self",
".",
"filter_args",
")",
"self",
".",
"arg_parser",
".",
"add_argument_group",
"(",
"self",
".",
"output_args",
")",
"self",
".",
"args",
"=",
"self",
".",
"arg_parser",
".",
"parse_args",
"(",
")"
] | Set config options | [
"Set",
"config",
"options"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L124-L156 | train |
dogoncouch/logdissect | logdissect/core.py | LogDissectCore.load_inputs | def load_inputs(self):
"""Load the specified inputs"""
for f in self.args.files:
if os.path.isfile(f):
fparts = str(f).split('.')
if fparts[-1] == 'gz':
if self.args.unzip:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
return 0
elif fparts[-1] == 'bz2' or fparts[-1] == 'zip':
return 0
else:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
print('File '+ f + ' not found')
return 1 | python | def load_inputs(self):
"""Load the specified inputs"""
for f in self.args.files:
if os.path.isfile(f):
fparts = str(f).split('.')
if fparts[-1] == 'gz':
if self.args.unzip:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
return 0
elif fparts[-1] == 'bz2' or fparts[-1] == 'zip':
return 0
else:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
print('File '+ f + ' not found')
return 1 | [
"def",
"load_inputs",
"(",
"self",
")",
":",
"for",
"f",
"in",
"self",
".",
"args",
".",
"files",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"fparts",
"=",
"str",
"(",
"f",
")",
".",
"split",
"(",
"'.'",
")",
"if",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'gz'",
":",
"if",
"self",
".",
"args",
".",
"unzip",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"str",
"(",
"f",
")",
")",
"self",
".",
"input_files",
".",
"append",
"(",
"fullpath",
")",
"else",
":",
"return",
"0",
"elif",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'bz2'",
"or",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'zip'",
":",
"return",
"0",
"else",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"str",
"(",
"f",
")",
")",
"self",
".",
"input_files",
".",
"append",
"(",
"fullpath",
")",
"else",
":",
"print",
"(",
"'File '",
"+",
"f",
"+",
"' not found'",
")",
"return",
"1"
] | Load the specified inputs | [
"Load",
"the",
"specified",
"inputs"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L161-L179 | train |
dogoncouch/logdissect | logdissect/core.py | LogDissectCore.list_parsers | def list_parsers(self, *args):
"""Return a list of available parsing modules"""
print('==== Available parsing modules: ====\n')
for parser in sorted(self.parse_modules):
print(self.parse_modules[parser].name.ljust(16) + \
': ' + self.parse_modules[parser].desc)
sys.exit(0) | python | def list_parsers(self, *args):
"""Return a list of available parsing modules"""
print('==== Available parsing modules: ====\n')
for parser in sorted(self.parse_modules):
print(self.parse_modules[parser].name.ljust(16) + \
': ' + self.parse_modules[parser].desc)
sys.exit(0) | [
"def",
"list_parsers",
"(",
"self",
",",
"*",
"args",
")",
":",
"print",
"(",
"'==== Available parsing modules: ====\\n'",
")",
"for",
"parser",
"in",
"sorted",
"(",
"self",
".",
"parse_modules",
")",
":",
"print",
"(",
"self",
".",
"parse_modules",
"[",
"parser",
"]",
".",
"name",
".",
"ljust",
"(",
"16",
")",
"+",
"': '",
"+",
"self",
".",
"parse_modules",
"[",
"parser",
"]",
".",
"desc",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | Return a list of available parsing modules | [
"Return",
"a",
"list",
"of",
"available",
"parsing",
"modules"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L182-L188 | train |
dogoncouch/logdissect | logdissect/utils.py | get_utc_date | def get_utc_date(entry):
"""Return datestamp converted to UTC"""
if entry['numeric_date_stamp'] == '0':
entry['numeric_date_stamp_utc'] = '0'
return entry
else:
if '.' in entry['numeric_date_stamp']:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S.%f')
else:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S')
tdelta = timedelta(hours = int(entry['tzone'][1:3]),
minutes = int(entry['tzone'][3:5]))
if entry['tzone'][0] == '-':
ut = t + tdelta
else:
ut = t - tdelta
entry['numeric_date_stamp_utc'] = ut.strftime('%Y%m%d%H%M%S.%f')
return entry | python | def get_utc_date(entry):
"""Return datestamp converted to UTC"""
if entry['numeric_date_stamp'] == '0':
entry['numeric_date_stamp_utc'] = '0'
return entry
else:
if '.' in entry['numeric_date_stamp']:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S.%f')
else:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S')
tdelta = timedelta(hours = int(entry['tzone'][1:3]),
minutes = int(entry['tzone'][3:5]))
if entry['tzone'][0] == '-':
ut = t + tdelta
else:
ut = t - tdelta
entry['numeric_date_stamp_utc'] = ut.strftime('%Y%m%d%H%M%S.%f')
return entry | [
"def",
"get_utc_date",
"(",
"entry",
")",
":",
"if",
"entry",
"[",
"'numeric_date_stamp'",
"]",
"==",
"'0'",
":",
"entry",
"[",
"'numeric_date_stamp_utc'",
"]",
"=",
"'0'",
"return",
"entry",
"else",
":",
"if",
"'.'",
"in",
"entry",
"[",
"'numeric_date_stamp'",
"]",
":",
"t",
"=",
"datetime",
".",
"strptime",
"(",
"entry",
"[",
"'numeric_date_stamp'",
"]",
",",
"'%Y%m%d%H%M%S.%f'",
")",
"else",
":",
"t",
"=",
"datetime",
".",
"strptime",
"(",
"entry",
"[",
"'numeric_date_stamp'",
"]",
",",
"'%Y%m%d%H%M%S'",
")",
"tdelta",
"=",
"timedelta",
"(",
"hours",
"=",
"int",
"(",
"entry",
"[",
"'tzone'",
"]",
"[",
"1",
":",
"3",
"]",
")",
",",
"minutes",
"=",
"int",
"(",
"entry",
"[",
"'tzone'",
"]",
"[",
"3",
":",
"5",
"]",
")",
")",
"if",
"entry",
"[",
"'tzone'",
"]",
"[",
"0",
"]",
"==",
"'-'",
":",
"ut",
"=",
"t",
"+",
"tdelta",
"else",
":",
"ut",
"=",
"t",
"-",
"tdelta",
"entry",
"[",
"'numeric_date_stamp_utc'",
"]",
"=",
"ut",
".",
"strftime",
"(",
"'%Y%m%d%H%M%S.%f'",
")",
"return",
"entry"
] | Return datestamp converted to UTC | [
"Return",
"datestamp",
"converted",
"to",
"UTC"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L145-L168 | train |
dogoncouch/logdissect | logdissect/utils.py | get_local_tzone | def get_local_tzone():
"""Get the current time zone on the local host"""
if localtime().tm_isdst:
if altzone < 0:
tzone = '+' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
tzone = '-' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
if altzone < 0:
tzone = \
'+' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
else:
tzone = \
'-' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
return tzone | python | def get_local_tzone():
"""Get the current time zone on the local host"""
if localtime().tm_isdst:
if altzone < 0:
tzone = '+' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
tzone = '-' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
if altzone < 0:
tzone = \
'+' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
else:
tzone = \
'-' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
return tzone | [
"def",
"get_local_tzone",
"(",
")",
":",
"if",
"localtime",
"(",
")",
".",
"tm_isdst",
":",
"if",
"altzone",
"<",
"0",
":",
"tzone",
"=",
"'+'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"else",
":",
"tzone",
"=",
"'-'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"else",
":",
"if",
"altzone",
"<",
"0",
":",
"tzone",
"=",
"'+'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"else",
":",
"tzone",
"=",
"'-'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"return",
"tzone"
] | Get the current time zone on the local host | [
"Get",
"the",
"current",
"time",
"zone",
"on",
"the",
"local",
"host"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L171-L200 | train |
dogoncouch/logdissect | logdissect/utils.py | merge_logs | def merge_logs(dataset, sort=True):
"""Merge log dictionaries together into one log dictionary"""
ourlog = {}
ourlog['entries'] = []
for d in dataset:
ourlog['entries'] = ourlog['entries'] + d['entries']
if sort:
ourlog['entries'].sort(key= lambda x: x['numeric_date_stamp_utc'])
return ourlog | python | def merge_logs(dataset, sort=True):
"""Merge log dictionaries together into one log dictionary"""
ourlog = {}
ourlog['entries'] = []
for d in dataset:
ourlog['entries'] = ourlog['entries'] + d['entries']
if sort:
ourlog['entries'].sort(key= lambda x: x['numeric_date_stamp_utc'])
return ourlog | [
"def",
"merge_logs",
"(",
"dataset",
",",
"sort",
"=",
"True",
")",
":",
"ourlog",
"=",
"{",
"}",
"ourlog",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"for",
"d",
"in",
"dataset",
":",
"ourlog",
"[",
"'entries'",
"]",
"=",
"ourlog",
"[",
"'entries'",
"]",
"+",
"d",
"[",
"'entries'",
"]",
"if",
"sort",
":",
"ourlog",
"[",
"'entries'",
"]",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'numeric_date_stamp_utc'",
"]",
")",
"return",
"ourlog"
] | Merge log dictionaries together into one log dictionary | [
"Merge",
"log",
"dictionaries",
"together",
"into",
"one",
"log",
"dictionary"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L203-L212 | train |
dogoncouch/logdissect | logdissect/output/log.py | OutputModule.write_output | def write_output(self, data, args=None, filename=None, label=None):
"""Write log data to a log file"""
if args:
if not args.outlog:
return 0
if not filename: filename=args.outlog
lastpath = ''
with open(str(filename), 'w') as output_file:
for entry in data['entries']:
if args.label:
if entry['source_path'] == lastpath:
output_file.write(entry['raw_text'] + '\n')
elif args.label == 'fname':
output_file.write('======== ' + \
entry['source_path'].split('/')[-1] + \
' >>>>\n' + entry['raw_text'] + '\n')
elif args.label == 'fpath':
output_file.write('======== ' + \
entry['source_path'] + \
' >>>>\n' + entry['raw_text'] + '\n')
else: output_file.write(entry['raw_text'] + '\n')
lastpath = entry['source_path'] | python | def write_output(self, data, args=None, filename=None, label=None):
"""Write log data to a log file"""
if args:
if not args.outlog:
return 0
if not filename: filename=args.outlog
lastpath = ''
with open(str(filename), 'w') as output_file:
for entry in data['entries']:
if args.label:
if entry['source_path'] == lastpath:
output_file.write(entry['raw_text'] + '\n')
elif args.label == 'fname':
output_file.write('======== ' + \
entry['source_path'].split('/')[-1] + \
' >>>>\n' + entry['raw_text'] + '\n')
elif args.label == 'fpath':
output_file.write('======== ' + \
entry['source_path'] + \
' >>>>\n' + entry['raw_text'] + '\n')
else: output_file.write(entry['raw_text'] + '\n')
lastpath = entry['source_path'] | [
"def",
"write_output",
"(",
"self",
",",
"data",
",",
"args",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"if",
"args",
":",
"if",
"not",
"args",
".",
"outlog",
":",
"return",
"0",
"if",
"not",
"filename",
":",
"filename",
"=",
"args",
".",
"outlog",
"lastpath",
"=",
"''",
"with",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"as",
"output_file",
":",
"for",
"entry",
"in",
"data",
"[",
"'entries'",
"]",
":",
"if",
"args",
".",
"label",
":",
"if",
"entry",
"[",
"'source_path'",
"]",
"==",
"lastpath",
":",
"output_file",
".",
"write",
"(",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"elif",
"args",
".",
"label",
"==",
"'fname'",
":",
"output_file",
".",
"write",
"(",
"'======== '",
"+",
"entry",
"[",
"'source_path'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"+",
"' >>>>\\n'",
"+",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"elif",
"args",
".",
"label",
"==",
"'fpath'",
":",
"output_file",
".",
"write",
"(",
"'======== '",
"+",
"entry",
"[",
"'source_path'",
"]",
"+",
"' >>>>\\n'",
"+",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"else",
":",
"output_file",
".",
"write",
"(",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"lastpath",
"=",
"entry",
"[",
"'source_path'",
"]"
] | Write log data to a log file | [
"Write",
"log",
"data",
"to",
"a",
"log",
"file"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/output/log.py#L37-L58 | train |
dogoncouch/logdissect | logdissect/output/sojson.py | OutputModule.write_output | def write_output(self, data, args=None, filename=None, pretty=False):
"""Write log data to a single JSON object"""
if args:
if not args.sojson:
return 0
pretty = args.pretty
if not filename: filename = args.sojson
if pretty:
logstring = json.dumps(
data['entries'], indent=2, sort_keys=True,
separators=(',', ': '))
else:
logstring = json.dumps(data['entries'], sort_keys=True)
with open(str(filename), 'w') as output_file:
output_file.write(logstring) | python | def write_output(self, data, args=None, filename=None, pretty=False):
"""Write log data to a single JSON object"""
if args:
if not args.sojson:
return 0
pretty = args.pretty
if not filename: filename = args.sojson
if pretty:
logstring = json.dumps(
data['entries'], indent=2, sort_keys=True,
separators=(',', ': '))
else:
logstring = json.dumps(data['entries'], sort_keys=True)
with open(str(filename), 'w') as output_file:
output_file.write(logstring) | [
"def",
"write_output",
"(",
"self",
",",
"data",
",",
"args",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"pretty",
"=",
"False",
")",
":",
"if",
"args",
":",
"if",
"not",
"args",
".",
"sojson",
":",
"return",
"0",
"pretty",
"=",
"args",
".",
"pretty",
"if",
"not",
"filename",
":",
"filename",
"=",
"args",
".",
"sojson",
"if",
"pretty",
":",
"logstring",
"=",
"json",
".",
"dumps",
"(",
"data",
"[",
"'entries'",
"]",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"else",
":",
"logstring",
"=",
"json",
".",
"dumps",
"(",
"data",
"[",
"'entries'",
"]",
",",
"sort_keys",
"=",
"True",
")",
"with",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"logstring",
")"
] | Write log data to a single JSON object | [
"Write",
"log",
"data",
"to",
"a",
"single",
"JSON",
"object"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/output/sojson.py#L38-L53 | train |
dogoncouch/logdissect | logdissect/output/linejson.py | OutputModule.write_output | def write_output(self, data, filename=None, args=None):
"""Write log data to a file with one JSON object per line"""
if args:
if not args.linejson:
return 0
if not filename: filename = args.linejson
entrylist = []
for entry in data['entries']:
entrystring = json.dumps(entry, sort_keys=True)
entrylist.append(entrystring)
with open(str(filename), 'w') as output_file:
output_file.write('\n'.join(entrylist)) | python | def write_output(self, data, filename=None, args=None):
"""Write log data to a file with one JSON object per line"""
if args:
if not args.linejson:
return 0
if not filename: filename = args.linejson
entrylist = []
for entry in data['entries']:
entrystring = json.dumps(entry, sort_keys=True)
entrylist.append(entrystring)
with open(str(filename), 'w') as output_file:
output_file.write('\n'.join(entrylist)) | [
"def",
"write_output",
"(",
"self",
",",
"data",
",",
"filename",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"if",
"args",
":",
"if",
"not",
"args",
".",
"linejson",
":",
"return",
"0",
"if",
"not",
"filename",
":",
"filename",
"=",
"args",
".",
"linejson",
"entrylist",
"=",
"[",
"]",
"for",
"entry",
"in",
"data",
"[",
"'entries'",
"]",
":",
"entrystring",
"=",
"json",
".",
"dumps",
"(",
"entry",
",",
"sort_keys",
"=",
"True",
")",
"entrylist",
".",
"append",
"(",
"entrystring",
")",
"with",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"entrylist",
")",
")"
] | Write log data to a file with one JSON object per line | [
"Write",
"log",
"data",
"to",
"a",
"file",
"with",
"one",
"JSON",
"object",
"per",
"line"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/output/linejson.py#L36-L48 | train |
dogoncouch/logdissect | logdissect/parsers/type.py | ParseModule.parse_file | def parse_file(self, sourcepath):
"""Parse a file into a LogData object"""
# Get regex objects:
self.date_regex = re.compile(
r'{}'.format(self.format_regex))
if self.backup_format_regex:
self.backup_date_regex = re.compile(
r'{}'.format(self.backup_format_regex))
data = {}
data['entries'] = []
data['parser'] = self.name
data['source_path'] = sourcepath
data['source_file'] = sourcepath.split('/')[-1]
# Set our start year:
data['source_file_mtime'] = os.path.getmtime(data['source_path'])
timestamp = datetime.fromtimestamp(data['source_file_mtime'])
data['source_file_year'] = timestamp.year
entryyear = timestamp.year
currentmonth = '99'
if self.datestamp_type == 'nodate':
self.datedata = {}
self.datedata['timestamp'] = timestamp
self.datedata['entry_time'] = int(timestamp.strftime('%H%M%S'))
# Set our timezone
if not self.tzone:
self.backuptzone = logdissect.utils.get_local_tzone()
# Parsing works in reverse. This helps with multi-line entries,
# and logs that span multiple years (December to January shift).
# Get our lines:
fparts = sourcepath.split('.')
if fparts[-1] == 'gz':
with gzip.open(sourcepath, 'r') as logfile:
loglines = reversed(logfile.readlines())
else:
with open(str(sourcepath), 'r') as logfile:
loglines = reversed(logfile.readlines())
# Parse our lines:
for line in loglines:
ourline = line.rstrip()
# Send the line to self.parse_line
entry = self.parse_line(ourline)
if entry:
if 'date_stamp' in self.fields:
# Check for Dec-Jan jump and set the year:
if self.datestamp_type == 'standard':
if int(entry['month']) > int(currentmonth):
entryyear = entryyear - 1
currentmonth = entry['month']
entry['numeric_date_stamp'] = str(entryyear) \
+ entry['month'] + entry['day'] + \
entry['tstamp']
entry['year'] = str(entryyear)
if self.tzone:
entry['tzone'] = self.tzone
else:
entry['tzone'] = self.backuptzone
entry = logdissect.utils.get_utc_date(entry)
entry['raw_text'] = ourline
entry['source_path'] = data['source_path']
# Append current entry
data['entries'].append(entry)
else:
continue
# Write the entries to the log object
data['entries'].reverse()
return data | python | def parse_file(self, sourcepath):
"""Parse a file into a LogData object"""
# Get regex objects:
self.date_regex = re.compile(
r'{}'.format(self.format_regex))
if self.backup_format_regex:
self.backup_date_regex = re.compile(
r'{}'.format(self.backup_format_regex))
data = {}
data['entries'] = []
data['parser'] = self.name
data['source_path'] = sourcepath
data['source_file'] = sourcepath.split('/')[-1]
# Set our start year:
data['source_file_mtime'] = os.path.getmtime(data['source_path'])
timestamp = datetime.fromtimestamp(data['source_file_mtime'])
data['source_file_year'] = timestamp.year
entryyear = timestamp.year
currentmonth = '99'
if self.datestamp_type == 'nodate':
self.datedata = {}
self.datedata['timestamp'] = timestamp
self.datedata['entry_time'] = int(timestamp.strftime('%H%M%S'))
# Set our timezone
if not self.tzone:
self.backuptzone = logdissect.utils.get_local_tzone()
# Parsing works in reverse. This helps with multi-line entries,
# and logs that span multiple years (December to January shift).
# Get our lines:
fparts = sourcepath.split('.')
if fparts[-1] == 'gz':
with gzip.open(sourcepath, 'r') as logfile:
loglines = reversed(logfile.readlines())
else:
with open(str(sourcepath), 'r') as logfile:
loglines = reversed(logfile.readlines())
# Parse our lines:
for line in loglines:
ourline = line.rstrip()
# Send the line to self.parse_line
entry = self.parse_line(ourline)
if entry:
if 'date_stamp' in self.fields:
# Check for Dec-Jan jump and set the year:
if self.datestamp_type == 'standard':
if int(entry['month']) > int(currentmonth):
entryyear = entryyear - 1
currentmonth = entry['month']
entry['numeric_date_stamp'] = str(entryyear) \
+ entry['month'] + entry['day'] + \
entry['tstamp']
entry['year'] = str(entryyear)
if self.tzone:
entry['tzone'] = self.tzone
else:
entry['tzone'] = self.backuptzone
entry = logdissect.utils.get_utc_date(entry)
entry['raw_text'] = ourline
entry['source_path'] = data['source_path']
# Append current entry
data['entries'].append(entry)
else:
continue
# Write the entries to the log object
data['entries'].reverse()
return data | [
"def",
"parse_file",
"(",
"self",
",",
"sourcepath",
")",
":",
"# Get regex objects:",
"self",
".",
"date_regex",
"=",
"re",
".",
"compile",
"(",
"r'{}'",
".",
"format",
"(",
"self",
".",
"format_regex",
")",
")",
"if",
"self",
".",
"backup_format_regex",
":",
"self",
".",
"backup_date_regex",
"=",
"re",
".",
"compile",
"(",
"r'{}'",
".",
"format",
"(",
"self",
".",
"backup_format_regex",
")",
")",
"data",
"=",
"{",
"}",
"data",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"data",
"[",
"'parser'",
"]",
"=",
"self",
".",
"name",
"data",
"[",
"'source_path'",
"]",
"=",
"sourcepath",
"data",
"[",
"'source_file'",
"]",
"=",
"sourcepath",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"# Set our start year:",
"data",
"[",
"'source_file_mtime'",
"]",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"data",
"[",
"'source_path'",
"]",
")",
"timestamp",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"data",
"[",
"'source_file_mtime'",
"]",
")",
"data",
"[",
"'source_file_year'",
"]",
"=",
"timestamp",
".",
"year",
"entryyear",
"=",
"timestamp",
".",
"year",
"currentmonth",
"=",
"'99'",
"if",
"self",
".",
"datestamp_type",
"==",
"'nodate'",
":",
"self",
".",
"datedata",
"=",
"{",
"}",
"self",
".",
"datedata",
"[",
"'timestamp'",
"]",
"=",
"timestamp",
"self",
".",
"datedata",
"[",
"'entry_time'",
"]",
"=",
"int",
"(",
"timestamp",
".",
"strftime",
"(",
"'%H%M%S'",
")",
")",
"# Set our timezone",
"if",
"not",
"self",
".",
"tzone",
":",
"self",
".",
"backuptzone",
"=",
"logdissect",
".",
"utils",
".",
"get_local_tzone",
"(",
")",
"# Parsing works in reverse. This helps with multi-line entries,",
"# and logs that span multiple years (December to January shift).",
"# Get our lines:",
"fparts",
"=",
"sourcepath",
".",
"split",
"(",
"'.'",
")",
"if",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'gz'",
":",
"with",
"gzip",
".",
"open",
"(",
"sourcepath",
",",
"'r'",
")",
"as",
"logfile",
":",
"loglines",
"=",
"reversed",
"(",
"logfile",
".",
"readlines",
"(",
")",
")",
"else",
":",
"with",
"open",
"(",
"str",
"(",
"sourcepath",
")",
",",
"'r'",
")",
"as",
"logfile",
":",
"loglines",
"=",
"reversed",
"(",
"logfile",
".",
"readlines",
"(",
")",
")",
"# Parse our lines:",
"for",
"line",
"in",
"loglines",
":",
"ourline",
"=",
"line",
".",
"rstrip",
"(",
")",
"# Send the line to self.parse_line",
"entry",
"=",
"self",
".",
"parse_line",
"(",
"ourline",
")",
"if",
"entry",
":",
"if",
"'date_stamp'",
"in",
"self",
".",
"fields",
":",
"# Check for Dec-Jan jump and set the year:",
"if",
"self",
".",
"datestamp_type",
"==",
"'standard'",
":",
"if",
"int",
"(",
"entry",
"[",
"'month'",
"]",
")",
">",
"int",
"(",
"currentmonth",
")",
":",
"entryyear",
"=",
"entryyear",
"-",
"1",
"currentmonth",
"=",
"entry",
"[",
"'month'",
"]",
"entry",
"[",
"'numeric_date_stamp'",
"]",
"=",
"str",
"(",
"entryyear",
")",
"+",
"entry",
"[",
"'month'",
"]",
"+",
"entry",
"[",
"'day'",
"]",
"+",
"entry",
"[",
"'tstamp'",
"]",
"entry",
"[",
"'year'",
"]",
"=",
"str",
"(",
"entryyear",
")",
"if",
"self",
".",
"tzone",
":",
"entry",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"tzone",
"else",
":",
"entry",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"backuptzone",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"get_utc_date",
"(",
"entry",
")",
"entry",
"[",
"'raw_text'",
"]",
"=",
"ourline",
"entry",
"[",
"'source_path'",
"]",
"=",
"data",
"[",
"'source_path'",
"]",
"# Append current entry",
"data",
"[",
"'entries'",
"]",
".",
"append",
"(",
"entry",
")",
"else",
":",
"continue",
"# Write the entries to the log object",
"data",
"[",
"'entries'",
"]",
".",
"reverse",
"(",
")",
"return",
"data"
] | Parse a file into a LogData object | [
"Parse",
"a",
"file",
"into",
"a",
"LogData",
"object"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/type.py#L46-L122 | train |
dogoncouch/logdissect | logdissect/parsers/type.py | ParseModule.parse_line | def parse_line(self, line):
"""Parse a line into a dictionary"""
match = re.findall(self.date_regex, line)
if match:
fields = self.fields
elif self.backup_format_regex and not match:
match = re.findall(self.backup_date_regex, line)
fields = self.backup_fields
if match:
entry = {}
entry['raw_text'] = line
entry['parser'] = self.name
matchlist = list(zip(fields, match[0]))
for f, v in matchlist:
entry[f] = v
if 'date_stamp' in entry.keys():
if self.datestamp_type == 'standard':
entry = logdissect.utils.convert_standard_datestamp(entry)
elif self.datestamp_type == 'iso':
entry = logdissect.utils.convert_iso_datestamp(
entry)
elif self.datestamp_type == 'webaccess':
entry = logdissect.utils.convert_webaccess_datestamp(
entry)
elif self.datestamp_type == 'nodate':
entry, self.datedata = \
logdissect.utils.convert_nodate_datestamp(
entry, self.datedata)
elif self.datestamp_type == 'unix':
entry = logdissect.utils.convert_unix_datestamp(
entry)
if self.datestamp_type == 'now':
entry = logdissect.utils.convert_now_datestamp(
entry)
entry = self.post_parse_action(entry)
return entry
else:
return None | python | def parse_line(self, line):
"""Parse a line into a dictionary"""
match = re.findall(self.date_regex, line)
if match:
fields = self.fields
elif self.backup_format_regex and not match:
match = re.findall(self.backup_date_regex, line)
fields = self.backup_fields
if match:
entry = {}
entry['raw_text'] = line
entry['parser'] = self.name
matchlist = list(zip(fields, match[0]))
for f, v in matchlist:
entry[f] = v
if 'date_stamp' in entry.keys():
if self.datestamp_type == 'standard':
entry = logdissect.utils.convert_standard_datestamp(entry)
elif self.datestamp_type == 'iso':
entry = logdissect.utils.convert_iso_datestamp(
entry)
elif self.datestamp_type == 'webaccess':
entry = logdissect.utils.convert_webaccess_datestamp(
entry)
elif self.datestamp_type == 'nodate':
entry, self.datedata = \
logdissect.utils.convert_nodate_datestamp(
entry, self.datedata)
elif self.datestamp_type == 'unix':
entry = logdissect.utils.convert_unix_datestamp(
entry)
if self.datestamp_type == 'now':
entry = logdissect.utils.convert_now_datestamp(
entry)
entry = self.post_parse_action(entry)
return entry
else:
return None | [
"def",
"parse_line",
"(",
"self",
",",
"line",
")",
":",
"match",
"=",
"re",
".",
"findall",
"(",
"self",
".",
"date_regex",
",",
"line",
")",
"if",
"match",
":",
"fields",
"=",
"self",
".",
"fields",
"elif",
"self",
".",
"backup_format_regex",
"and",
"not",
"match",
":",
"match",
"=",
"re",
".",
"findall",
"(",
"self",
".",
"backup_date_regex",
",",
"line",
")",
"fields",
"=",
"self",
".",
"backup_fields",
"if",
"match",
":",
"entry",
"=",
"{",
"}",
"entry",
"[",
"'raw_text'",
"]",
"=",
"line",
"entry",
"[",
"'parser'",
"]",
"=",
"self",
".",
"name",
"matchlist",
"=",
"list",
"(",
"zip",
"(",
"fields",
",",
"match",
"[",
"0",
"]",
")",
")",
"for",
"f",
",",
"v",
"in",
"matchlist",
":",
"entry",
"[",
"f",
"]",
"=",
"v",
"if",
"'date_stamp'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"datestamp_type",
"==",
"'standard'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_standard_datestamp",
"(",
"entry",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'iso'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_iso_datestamp",
"(",
"entry",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'webaccess'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_webaccess_datestamp",
"(",
"entry",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'nodate'",
":",
"entry",
",",
"self",
".",
"datedata",
"=",
"logdissect",
".",
"utils",
".",
"convert_nodate_datestamp",
"(",
"entry",
",",
"self",
".",
"datedata",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'unix'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_unix_datestamp",
"(",
"entry",
")",
"if",
"self",
".",
"datestamp_type",
"==",
"'now'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_now_datestamp",
"(",
"entry",
")",
"entry",
"=",
"self",
".",
"post_parse_action",
"(",
"entry",
")",
"return",
"entry",
"else",
":",
"return",
"None"
] | Parse a line into a dictionary | [
"Parse",
"a",
"line",
"into",
"a",
"dictionary"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/type.py#L125-L167 | train |
dogoncouch/logdissect | logdissect/parsers/tcpdump.py | ParseModule.post_parse_action | def post_parse_action(self, entry):
"""separate hosts and ports after entry is parsed"""
if 'source_host' in entry.keys():
host = self.ip_port_regex.findall(entry['source_host'])
if host:
hlist = host[0].split('.')
entry['source_host'] = '.'.join(hlist[:4])
entry['source_port'] = hlist[-1]
if 'dest_host' in entry.keys():
host = self.ip_port_regex.findall(entry['dest_host'])
if host:
hlist = host[0].split('.')
entry['dest_host'] = '.'.join(hlist[:4])
entry['dest_port'] = hlist[-1]
return entry | python | def post_parse_action(self, entry):
"""separate hosts and ports after entry is parsed"""
if 'source_host' in entry.keys():
host = self.ip_port_regex.findall(entry['source_host'])
if host:
hlist = host[0].split('.')
entry['source_host'] = '.'.join(hlist[:4])
entry['source_port'] = hlist[-1]
if 'dest_host' in entry.keys():
host = self.ip_port_regex.findall(entry['dest_host'])
if host:
hlist = host[0].split('.')
entry['dest_host'] = '.'.join(hlist[:4])
entry['dest_port'] = hlist[-1]
return entry | [
"def",
"post_parse_action",
"(",
"self",
",",
"entry",
")",
":",
"if",
"'source_host'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"host",
"=",
"self",
".",
"ip_port_regex",
".",
"findall",
"(",
"entry",
"[",
"'source_host'",
"]",
")",
"if",
"host",
":",
"hlist",
"=",
"host",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"entry",
"[",
"'source_host'",
"]",
"=",
"'.'",
".",
"join",
"(",
"hlist",
"[",
":",
"4",
"]",
")",
"entry",
"[",
"'source_port'",
"]",
"=",
"hlist",
"[",
"-",
"1",
"]",
"if",
"'dest_host'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"host",
"=",
"self",
".",
"ip_port_regex",
".",
"findall",
"(",
"entry",
"[",
"'dest_host'",
"]",
")",
"if",
"host",
":",
"hlist",
"=",
"host",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"entry",
"[",
"'dest_host'",
"]",
"=",
"'.'",
".",
"join",
"(",
"hlist",
"[",
":",
"4",
"]",
")",
"entry",
"[",
"'dest_port'",
"]",
"=",
"hlist",
"[",
"-",
"1",
"]",
"return",
"entry"
] | separate hosts and ports after entry is parsed | [
"separate",
"hosts",
"and",
"ports",
"after",
"entry",
"is",
"parsed"
] | 426b50264cbfa9665c86df3781e1e415ba8dbbd3 | https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/tcpdump.py#L42-L57 | train |
vtraag/louvain-igraph | src/functions.py | find_partition_multiplex | def find_partition_multiplex(graphs, partition_type, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
layer_weights = [1]*n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs))
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return partitions[0].membership, improvement | python | def find_partition_multiplex(graphs, partition_type, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
layer_weights = [1]*n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs))
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return partitions[0].membership, improvement | [
"def",
"find_partition_multiplex",
"(",
"graphs",
",",
"partition_type",
",",
"*",
"*",
"kwargs",
")",
":",
"n_layers",
"=",
"len",
"(",
"graphs",
")",
"partitions",
"=",
"[",
"]",
"layer_weights",
"=",
"[",
"1",
"]",
"*",
"n_layers",
"for",
"graph",
"in",
"graphs",
":",
"partitions",
".",
"append",
"(",
"partition_type",
"(",
"graph",
",",
"*",
"*",
"kwargs",
")",
")",
"optimiser",
"=",
"Optimiser",
"(",
")",
"improvement",
"=",
"optimiser",
".",
"optimise_partition_multiplex",
"(",
"partitions",
",",
"layer_weights",
")",
"return",
"partitions",
"[",
"0",
"]",
".",
"membership",
",",
"improvement"
] | Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition) | [
"Detect",
"communities",
"for",
"multiplex",
"graphs",
"."
] | 8de2c3bad736a9deea90b80f104d8444769d331f | https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/functions.py#L81-L136 | train |
vtraag/louvain-igraph | src/functions.py | find_partition_temporal | def find_partition_temporal(graphs, partition_type,
interslice_weight=1,
slice_attr='slice', vertex_id_attr='id',
edge_type_attr='type', weight_attr='weight',
**kwargs):
""" Detect communities for temporal graphs.
Each graph is considered to represent a time slice and does not necessarily
need to be defined on the same set of vertices. Nodes in two consecutive
slices are identified on the basis of the ``vertex_id_attr``, i.e. if two
nodes in two consecutive slices have an identical value of the
``vertex_id_attr`` they are coupled. The ``vertex_id_attr`` should hence be
unique in each slice. The nodes are then coupled with a weight of
``interslice_weight`` which is set in the edge attribute ``weight_attr``. No
weight is set if the ``interslice_weight`` is None (i.e. corresponding in
practice with a weight of 1). See :func:`time_slices_to_layers` for
a more detailed explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`VertexPartition.MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
interslice_weight : float
The weight of the coupling between two consecutive time slices.
slice_attr : string
The vertex attribute to use for indicating the slice of a node.
vertex_id_attr : string
The vertex to use to identify nodes.
edge_type_attr : string
The edge attribute to use for indicating the type of link (`interslice` or
`intraslice`).
weight_attr : string
The edge attribute used to indicate the weight.
**kwargs
Remaining keyword arguments, passed on to constructor of
``partition_type``.
Returns
-------
list of membership
list containing for each slice the membership vector.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
See Also
--------
:func:`time_slices_to_layers`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_1.vs['id'] = range(n)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> G_2.vs['id'] = range(n)
>>> membership, improvement = louvain.find_partition_temporal([G_1, G_2],
... louvain.ModularityVertexPartition,
... interslice_weight=1)
"""
# Create layers
G_layers, G_interslice, G = time_slices_to_layers(graphs,
interslice_weight,
slice_attr=slice_attr,
vertex_id_attr=vertex_id_attr,
edge_type_attr=edge_type_attr,
weight_attr=weight_attr)
# Optimise partitions
arg_dict = {}
if 'node_sizes' in partition_type.__init__.__code__.co_varnames:
arg_dict['node_sizes'] = 'node_size'
if 'weights' in partition_type.__init__.__code__.co_varnames:
arg_dict['weights'] = 'weight'
arg_dict.update(kwargs)
partitions = []
for H in G_layers:
arg_dict['graph'] = H
partitions.append(partition_type(**arg_dict))
# We can always take the same interslice partition, as this should have no
# cost in the optimisation.
partition_interslice = CPMVertexPartition(G_interslice, resolution_parameter=0,
node_sizes='node_size', weights=weight_attr)
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions + [partition_interslice])
# Transform results back into original form.
membership = {(v[slice_attr], v[vertex_id_attr]): m for v, m in zip(G.vs, partitions[0].membership)}
membership_time_slices = []
for slice_idx, H in enumerate(graphs):
membership_slice = [membership[(slice_idx, v[vertex_id_attr])] for v in H.vs]
membership_time_slices.append(list(membership_slice))
return membership_time_slices, improvement | python | def find_partition_temporal(graphs, partition_type,
interslice_weight=1,
slice_attr='slice', vertex_id_attr='id',
edge_type_attr='type', weight_attr='weight',
**kwargs):
""" Detect communities for temporal graphs.
Each graph is considered to represent a time slice and does not necessarily
need to be defined on the same set of vertices. Nodes in two consecutive
slices are identified on the basis of the ``vertex_id_attr``, i.e. if two
nodes in two consecutive slices have an identical value of the
``vertex_id_attr`` they are coupled. The ``vertex_id_attr`` should hence be
unique in each slice. The nodes are then coupled with a weight of
``interslice_weight`` which is set in the edge attribute ``weight_attr``. No
weight is set if the ``interslice_weight`` is None (i.e. corresponding in
practice with a weight of 1). See :func:`time_slices_to_layers` for
a more detailed explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`VertexPartition.MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
interslice_weight : float
The weight of the coupling between two consecutive time slices.
slice_attr : string
The vertex attribute to use for indicating the slice of a node.
vertex_id_attr : string
The vertex to use to identify nodes.
edge_type_attr : string
The edge attribute to use for indicating the type of link (`interslice` or
`intraslice`).
weight_attr : string
The edge attribute used to indicate the weight.
**kwargs
Remaining keyword arguments, passed on to constructor of
``partition_type``.
Returns
-------
list of membership
list containing for each slice the membership vector.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
See Also
--------
:func:`time_slices_to_layers`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_1.vs['id'] = range(n)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> G_2.vs['id'] = range(n)
>>> membership, improvement = louvain.find_partition_temporal([G_1, G_2],
... louvain.ModularityVertexPartition,
... interslice_weight=1)
"""
# Create layers
G_layers, G_interslice, G = time_slices_to_layers(graphs,
interslice_weight,
slice_attr=slice_attr,
vertex_id_attr=vertex_id_attr,
edge_type_attr=edge_type_attr,
weight_attr=weight_attr)
# Optimise partitions
arg_dict = {}
if 'node_sizes' in partition_type.__init__.__code__.co_varnames:
arg_dict['node_sizes'] = 'node_size'
if 'weights' in partition_type.__init__.__code__.co_varnames:
arg_dict['weights'] = 'weight'
arg_dict.update(kwargs)
partitions = []
for H in G_layers:
arg_dict['graph'] = H
partitions.append(partition_type(**arg_dict))
# We can always take the same interslice partition, as this should have no
# cost in the optimisation.
partition_interslice = CPMVertexPartition(G_interslice, resolution_parameter=0,
node_sizes='node_size', weights=weight_attr)
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions + [partition_interslice])
# Transform results back into original form.
membership = {(v[slice_attr], v[vertex_id_attr]): m for v, m in zip(G.vs, partitions[0].membership)}
membership_time_slices = []
for slice_idx, H in enumerate(graphs):
membership_slice = [membership[(slice_idx, v[vertex_id_attr])] for v in H.vs]
membership_time_slices.append(list(membership_slice))
return membership_time_slices, improvement | [
"def",
"find_partition_temporal",
"(",
"graphs",
",",
"partition_type",
",",
"interslice_weight",
"=",
"1",
",",
"slice_attr",
"=",
"'slice'",
",",
"vertex_id_attr",
"=",
"'id'",
",",
"edge_type_attr",
"=",
"'type'",
",",
"weight_attr",
"=",
"'weight'",
",",
"*",
"*",
"kwargs",
")",
":",
"# Create layers",
"G_layers",
",",
"G_interslice",
",",
"G",
"=",
"time_slices_to_layers",
"(",
"graphs",
",",
"interslice_weight",
",",
"slice_attr",
"=",
"slice_attr",
",",
"vertex_id_attr",
"=",
"vertex_id_attr",
",",
"edge_type_attr",
"=",
"edge_type_attr",
",",
"weight_attr",
"=",
"weight_attr",
")",
"# Optimise partitions",
"arg_dict",
"=",
"{",
"}",
"if",
"'node_sizes'",
"in",
"partition_type",
".",
"__init__",
".",
"__code__",
".",
"co_varnames",
":",
"arg_dict",
"[",
"'node_sizes'",
"]",
"=",
"'node_size'",
"if",
"'weights'",
"in",
"partition_type",
".",
"__init__",
".",
"__code__",
".",
"co_varnames",
":",
"arg_dict",
"[",
"'weights'",
"]",
"=",
"'weight'",
"arg_dict",
".",
"update",
"(",
"kwargs",
")",
"partitions",
"=",
"[",
"]",
"for",
"H",
"in",
"G_layers",
":",
"arg_dict",
"[",
"'graph'",
"]",
"=",
"H",
"partitions",
".",
"append",
"(",
"partition_type",
"(",
"*",
"*",
"arg_dict",
")",
")",
"# We can always take the same interslice partition, as this should have no",
"# cost in the optimisation.",
"partition_interslice",
"=",
"CPMVertexPartition",
"(",
"G_interslice",
",",
"resolution_parameter",
"=",
"0",
",",
"node_sizes",
"=",
"'node_size'",
",",
"weights",
"=",
"weight_attr",
")",
"optimiser",
"=",
"Optimiser",
"(",
")",
"improvement",
"=",
"optimiser",
".",
"optimise_partition_multiplex",
"(",
"partitions",
"+",
"[",
"partition_interslice",
"]",
")",
"# Transform results back into original form.",
"membership",
"=",
"{",
"(",
"v",
"[",
"slice_attr",
"]",
",",
"v",
"[",
"vertex_id_attr",
"]",
")",
":",
"m",
"for",
"v",
",",
"m",
"in",
"zip",
"(",
"G",
".",
"vs",
",",
"partitions",
"[",
"0",
"]",
".",
"membership",
")",
"}",
"membership_time_slices",
"=",
"[",
"]",
"for",
"slice_idx",
",",
"H",
"in",
"enumerate",
"(",
"graphs",
")",
":",
"membership_slice",
"=",
"[",
"membership",
"[",
"(",
"slice_idx",
",",
"v",
"[",
"vertex_id_attr",
"]",
")",
"]",
"for",
"v",
"in",
"H",
".",
"vs",
"]",
"membership_time_slices",
".",
"append",
"(",
"list",
"(",
"membership_slice",
")",
")",
"return",
"membership_time_slices",
",",
"improvement"
] | Detect communities for temporal graphs.
Each graph is considered to represent a time slice and does not necessarily
need to be defined on the same set of vertices. Nodes in two consecutive
slices are identified on the basis of the ``vertex_id_attr``, i.e. if two
nodes in two consecutive slices have an identical value of the
``vertex_id_attr`` they are coupled. The ``vertex_id_attr`` should hence be
unique in each slice. The nodes are then coupled with a weight of
``interslice_weight`` which is set in the edge attribute ``weight_attr``. No
weight is set if the ``interslice_weight`` is None (i.e. corresponding in
practice with a weight of 1). See :func:`time_slices_to_layers` for
a more detailed explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`VertexPartition.MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
interslice_weight : float
The weight of the coupling between two consecutive time slices.
slice_attr : string
The vertex attribute to use for indicating the slice of a node.
vertex_id_attr : string
The vertex to use to identify nodes.
edge_type_attr : string
The edge attribute to use for indicating the type of link (`interslice` or
`intraslice`).
weight_attr : string
The edge attribute used to indicate the weight.
**kwargs
Remaining keyword arguments, passed on to constructor of
``partition_type``.
Returns
-------
list of membership
list containing for each slice the membership vector.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
See Also
--------
:func:`time_slices_to_layers`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_1.vs['id'] = range(n)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> G_2.vs['id'] = range(n)
>>> membership, improvement = louvain.find_partition_temporal([G_1, G_2],
... louvain.ModularityVertexPartition,
... interslice_weight=1) | [
"Detect",
"communities",
"for",
"temporal",
"graphs",
"."
] | 8de2c3bad736a9deea90b80f104d8444769d331f | https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/functions.py#L138-L245 | train |
vtraag/louvain-igraph | setup.py | BuildConfiguration.build_ext | def build_ext(self):
"""Returns a class that can be used as a replacement for the
``build_ext`` command in ``distutils`` and that will download and
compile the C core of igraph if needed."""
try:
from setuptools.command.build_ext import build_ext
except ImportError:
from distutils.command.build_ext import build_ext
buildcfg = self
class custom_build_ext(build_ext):
def run(self):
# Print a warning if pkg-config is not available or does not know about igraph
if buildcfg.use_pkgconfig:
detected = buildcfg.detect_from_pkgconfig()
else:
detected = False
# Check whether we have already compiled igraph in a previous run.
# If so, it should be found in igraphcore/include and
# igraphcore/lib
if os.path.exists("igraphcore"):
buildcfg.use_built_igraph()
detected = True
# Download and compile igraph if the user did not disable it and
# we do not know the libraries from pkg-config yet
if not detected:
if buildcfg.download_igraph_if_needed and is_unix_like():
detected = buildcfg.download_and_compile_igraph()
if detected:
buildcfg.use_built_igraph()
# Fall back to an educated guess if everything else failed
if not detected:
buildcfg.use_educated_guess()
# Replaces library names with full paths to static libraries
# where possible
if buildcfg.static_extension:
buildcfg.replace_static_libraries(exclusions=["m"])
# Prints basic build information
buildcfg.print_build_info()
ext = first(extension for extension in self.extensions
if extension.name == "louvain._c_louvain")
buildcfg.configure(ext)
# Run the original build_ext command
build_ext.run(self)
return custom_build_ext | python | def build_ext(self):
"""Returns a class that can be used as a replacement for the
``build_ext`` command in ``distutils`` and that will download and
compile the C core of igraph if needed."""
try:
from setuptools.command.build_ext import build_ext
except ImportError:
from distutils.command.build_ext import build_ext
buildcfg = self
class custom_build_ext(build_ext):
def run(self):
# Print a warning if pkg-config is not available or does not know about igraph
if buildcfg.use_pkgconfig:
detected = buildcfg.detect_from_pkgconfig()
else:
detected = False
# Check whether we have already compiled igraph in a previous run.
# If so, it should be found in igraphcore/include and
# igraphcore/lib
if os.path.exists("igraphcore"):
buildcfg.use_built_igraph()
detected = True
# Download and compile igraph if the user did not disable it and
# we do not know the libraries from pkg-config yet
if not detected:
if buildcfg.download_igraph_if_needed and is_unix_like():
detected = buildcfg.download_and_compile_igraph()
if detected:
buildcfg.use_built_igraph()
# Fall back to an educated guess if everything else failed
if not detected:
buildcfg.use_educated_guess()
# Replaces library names with full paths to static libraries
# where possible
if buildcfg.static_extension:
buildcfg.replace_static_libraries(exclusions=["m"])
# Prints basic build information
buildcfg.print_build_info()
ext = first(extension for extension in self.extensions
if extension.name == "louvain._c_louvain")
buildcfg.configure(ext)
# Run the original build_ext command
build_ext.run(self)
return custom_build_ext | [
"def",
"build_ext",
"(",
"self",
")",
":",
"try",
":",
"from",
"setuptools",
".",
"command",
".",
"build_ext",
"import",
"build_ext",
"except",
"ImportError",
":",
"from",
"distutils",
".",
"command",
".",
"build_ext",
"import",
"build_ext",
"buildcfg",
"=",
"self",
"class",
"custom_build_ext",
"(",
"build_ext",
")",
":",
"def",
"run",
"(",
"self",
")",
":",
"# Print a warning if pkg-config is not available or does not know about igraph",
"if",
"buildcfg",
".",
"use_pkgconfig",
":",
"detected",
"=",
"buildcfg",
".",
"detect_from_pkgconfig",
"(",
")",
"else",
":",
"detected",
"=",
"False",
"# Check whether we have already compiled igraph in a previous run.",
"# If so, it should be found in igraphcore/include and",
"# igraphcore/lib",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"\"igraphcore\"",
")",
":",
"buildcfg",
".",
"use_built_igraph",
"(",
")",
"detected",
"=",
"True",
"# Download and compile igraph if the user did not disable it and",
"# we do not know the libraries from pkg-config yet",
"if",
"not",
"detected",
":",
"if",
"buildcfg",
".",
"download_igraph_if_needed",
"and",
"is_unix_like",
"(",
")",
":",
"detected",
"=",
"buildcfg",
".",
"download_and_compile_igraph",
"(",
")",
"if",
"detected",
":",
"buildcfg",
".",
"use_built_igraph",
"(",
")",
"# Fall back to an educated guess if everything else failed",
"if",
"not",
"detected",
":",
"buildcfg",
".",
"use_educated_guess",
"(",
")",
"# Replaces library names with full paths to static libraries",
"# where possible",
"if",
"buildcfg",
".",
"static_extension",
":",
"buildcfg",
".",
"replace_static_libraries",
"(",
"exclusions",
"=",
"[",
"\"m\"",
"]",
")",
"# Prints basic build information",
"buildcfg",
".",
"print_build_info",
"(",
")",
"ext",
"=",
"first",
"(",
"extension",
"for",
"extension",
"in",
"self",
".",
"extensions",
"if",
"extension",
".",
"name",
"==",
"\"louvain._c_louvain\"",
")",
"buildcfg",
".",
"configure",
"(",
"ext",
")",
"# Run the original build_ext command",
"build_ext",
".",
"run",
"(",
"self",
")",
"return",
"custom_build_ext"
] | Returns a class that can be used as a replacement for the
``build_ext`` command in ``distutils`` and that will download and
compile the C core of igraph if needed. | [
"Returns",
"a",
"class",
"that",
"can",
"be",
"used",
"as",
"a",
"replacement",
"for",
"the",
"build_ext",
"command",
"in",
"distutils",
"and",
"that",
"will",
"download",
"and",
"compile",
"the",
"C",
"core",
"of",
"igraph",
"if",
"needed",
"."
] | 8de2c3bad736a9deea90b80f104d8444769d331f | https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/setup.py#L353-L405 | train |
vtraag/louvain-igraph | src/VertexPartition.py | CPMVertexPartition.Bipartite | def Bipartite(graph, resolution_parameter_01,
resolution_parameter_0 = 0, resolution_parameter_1 = 0,
degree_as_node_size=False, types='type', **kwargs):
""" Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
"""
if types is not None:
if isinstance(types, str):
types = graph.vs[types]
else:
# Make sure it is a list
types = list(types)
if set(types) != set([0, 1]):
new_type = _ig.UniqueIdGenerator()
types = [new_type[t] for t in types]
if set(types) != set([0, 1]):
raise ValueError("More than one type specified.")
if degree_as_node_size:
if (graph.is_directed()):
raise ValueError("This method is not suitable for directed graphs " +
"when using degree as node sizes.")
node_sizes = graph.degree()
else:
node_sizes = [1]*graph.vcount()
partition_01 = CPMVertexPartition(graph,
node_sizes=node_sizes,
resolution_parameter=resolution_parameter_01,
**kwargs)
H_0 = graph.subgraph_edges([], delete_vertices=False)
partition_0 = CPMVertexPartition(H_0, weights=None,
node_sizes=[s if t == 0 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_0,
**kwargs)
H_1 = graph.subgraph_edges([], delete_vertices=False)
partition_1 = CPMVertexPartition(H_1, weights=None,
node_sizes=[s if t == 1 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_1,
**kwargs)
return partition_01, partition_0, partition_1 | python | def Bipartite(graph, resolution_parameter_01,
resolution_parameter_0 = 0, resolution_parameter_1 = 0,
degree_as_node_size=False, types='type', **kwargs):
""" Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
"""
if types is not None:
if isinstance(types, str):
types = graph.vs[types]
else:
# Make sure it is a list
types = list(types)
if set(types) != set([0, 1]):
new_type = _ig.UniqueIdGenerator()
types = [new_type[t] for t in types]
if set(types) != set([0, 1]):
raise ValueError("More than one type specified.")
if degree_as_node_size:
if (graph.is_directed()):
raise ValueError("This method is not suitable for directed graphs " +
"when using degree as node sizes.")
node_sizes = graph.degree()
else:
node_sizes = [1]*graph.vcount()
partition_01 = CPMVertexPartition(graph,
node_sizes=node_sizes,
resolution_parameter=resolution_parameter_01,
**kwargs)
H_0 = graph.subgraph_edges([], delete_vertices=False)
partition_0 = CPMVertexPartition(H_0, weights=None,
node_sizes=[s if t == 0 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_0,
**kwargs)
H_1 = graph.subgraph_edges([], delete_vertices=False)
partition_1 = CPMVertexPartition(H_1, weights=None,
node_sizes=[s if t == 1 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_1,
**kwargs)
return partition_01, partition_0, partition_1 | [
"def",
"Bipartite",
"(",
"graph",
",",
"resolution_parameter_01",
",",
"resolution_parameter_0",
"=",
"0",
",",
"resolution_parameter_1",
"=",
"0",
",",
"degree_as_node_size",
"=",
"False",
",",
"types",
"=",
"'type'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"types",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"types",
",",
"str",
")",
":",
"types",
"=",
"graph",
".",
"vs",
"[",
"types",
"]",
"else",
":",
"# Make sure it is a list",
"types",
"=",
"list",
"(",
"types",
")",
"if",
"set",
"(",
"types",
")",
"!=",
"set",
"(",
"[",
"0",
",",
"1",
"]",
")",
":",
"new_type",
"=",
"_ig",
".",
"UniqueIdGenerator",
"(",
")",
"types",
"=",
"[",
"new_type",
"[",
"t",
"]",
"for",
"t",
"in",
"types",
"]",
"if",
"set",
"(",
"types",
")",
"!=",
"set",
"(",
"[",
"0",
",",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"More than one type specified.\"",
")",
"if",
"degree_as_node_size",
":",
"if",
"(",
"graph",
".",
"is_directed",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"This method is not suitable for directed graphs \"",
"+",
"\"when using degree as node sizes.\"",
")",
"node_sizes",
"=",
"graph",
".",
"degree",
"(",
")",
"else",
":",
"node_sizes",
"=",
"[",
"1",
"]",
"*",
"graph",
".",
"vcount",
"(",
")",
"partition_01",
"=",
"CPMVertexPartition",
"(",
"graph",
",",
"node_sizes",
"=",
"node_sizes",
",",
"resolution_parameter",
"=",
"resolution_parameter_01",
",",
"*",
"*",
"kwargs",
")",
"H_0",
"=",
"graph",
".",
"subgraph_edges",
"(",
"[",
"]",
",",
"delete_vertices",
"=",
"False",
")",
"partition_0",
"=",
"CPMVertexPartition",
"(",
"H_0",
",",
"weights",
"=",
"None",
",",
"node_sizes",
"=",
"[",
"s",
"if",
"t",
"==",
"0",
"else",
"0",
"for",
"v",
",",
"s",
",",
"t",
"in",
"zip",
"(",
"graph",
".",
"vs",
",",
"node_sizes",
",",
"types",
")",
"]",
",",
"resolution_parameter",
"=",
"resolution_parameter_01",
"-",
"resolution_parameter_0",
",",
"*",
"*",
"kwargs",
")",
"H_1",
"=",
"graph",
".",
"subgraph_edges",
"(",
"[",
"]",
",",
"delete_vertices",
"=",
"False",
")",
"partition_1",
"=",
"CPMVertexPartition",
"(",
"H_1",
",",
"weights",
"=",
"None",
",",
"node_sizes",
"=",
"[",
"s",
"if",
"t",
"==",
"1",
"else",
"0",
"for",
"v",
",",
"s",
",",
"t",
"in",
"zip",
"(",
"graph",
".",
"vs",
",",
"node_sizes",
",",
"types",
")",
"]",
",",
"resolution_parameter",
"=",
"resolution_parameter_01",
"-",
"resolution_parameter_1",
",",
"*",
"*",
"kwargs",
")",
"return",
"partition_01",
",",
"partition_0",
",",
"partition_1"
] | Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes. | [
"Create",
"three",
"layers",
"for",
"bipartite",
"partitions",
"."
] | 8de2c3bad736a9deea90b80f104d8444769d331f | https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/VertexPartition.py#L865-L1009 | train |
vinta/pangu.py | pangu.py | spacing_file | def spacing_file(path):
"""
Perform paranoid text spacing from file.
"""
# TODO: read line by line
with open(os.path.abspath(path)) as f:
return spacing_text(f.read()) | python | def spacing_file(path):
"""
Perform paranoid text spacing from file.
"""
# TODO: read line by line
with open(os.path.abspath(path)) as f:
return spacing_text(f.read()) | [
"def",
"spacing_file",
"(",
"path",
")",
":",
"# TODO: read line by line",
"with",
"open",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"as",
"f",
":",
"return",
"spacing_text",
"(",
"f",
".",
"read",
"(",
")",
")"
] | Perform paranoid text spacing from file. | [
"Perform",
"paranoid",
"text",
"spacing",
"from",
"file",
"."
] | 89407cf08dedf9d895c13053dd518d11a20f6c95 | https://github.com/vinta/pangu.py/blob/89407cf08dedf9d895c13053dd518d11a20f6c95/pangu.py#L156-L162 | train |
EventRegistry/event-registry-python | eventregistry/EventForText.py | GetEventForText.compute | def compute(self,
text, # text for which to find the most similar event
lang = "eng"): # language in which the text is written
"""
compute the list of most similar events for the given text
"""
params = { "lang": lang, "text": text, "topClustersCount": self._nrOfEventsToReturn }
res = self._er.jsonRequest("/json/getEventForText/enqueueRequest", params)
requestId = res["requestId"]
for i in range(10):
time.sleep(1) # sleep for 1 second to wait for the clustering to perform computation
res = self._er.jsonRequest("/json/getEventForText/testRequest", { "requestId": requestId })
if isinstance(res, list) and len(res) > 0:
return res
return None | python | def compute(self,
text, # text for which to find the most similar event
lang = "eng"): # language in which the text is written
"""
compute the list of most similar events for the given text
"""
params = { "lang": lang, "text": text, "topClustersCount": self._nrOfEventsToReturn }
res = self._er.jsonRequest("/json/getEventForText/enqueueRequest", params)
requestId = res["requestId"]
for i in range(10):
time.sleep(1) # sleep for 1 second to wait for the clustering to perform computation
res = self._er.jsonRequest("/json/getEventForText/testRequest", { "requestId": requestId })
if isinstance(res, list) and len(res) > 0:
return res
return None | [
"def",
"compute",
"(",
"self",
",",
"text",
",",
"# text for which to find the most similar event",
"lang",
"=",
"\"eng\"",
")",
":",
"# language in which the text is written",
"params",
"=",
"{",
"\"lang\"",
":",
"lang",
",",
"\"text\"",
":",
"text",
",",
"\"topClustersCount\"",
":",
"self",
".",
"_nrOfEventsToReturn",
"}",
"res",
"=",
"self",
".",
"_er",
".",
"jsonRequest",
"(",
"\"/json/getEventForText/enqueueRequest\"",
",",
"params",
")",
"requestId",
"=",
"res",
"[",
"\"requestId\"",
"]",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"# sleep for 1 second to wait for the clustering to perform computation",
"res",
"=",
"self",
".",
"_er",
".",
"jsonRequest",
"(",
"\"/json/getEventForText/testRequest\"",
",",
"{",
"\"requestId\"",
":",
"requestId",
"}",
")",
"if",
"isinstance",
"(",
"res",
",",
"list",
")",
"and",
"len",
"(",
"res",
")",
">",
"0",
":",
"return",
"res",
"return",
"None"
] | compute the list of most similar events for the given text | [
"compute",
"the",
"list",
"of",
"most",
"similar",
"events",
"for",
"the",
"given",
"text"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/EventForText.py#L42-L57 | train |
EventRegistry/event-registry-python | eventregistry/Analytics.py | Analytics.annotate | def annotate(self, text, lang = None, customParams = None):
"""
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
"""
params = {"lang": lang, "text": text}
if customParams:
params.update(customParams)
return self._er.jsonRequestAnalytics("/api/v1/annotate", params) | python | def annotate(self, text, lang = None, customParams = None):
"""
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
"""
params = {"lang": lang, "text": text}
if customParams:
params.update(customParams)
return self._er.jsonRequestAnalytics("/api/v1/annotate", params) | [
"def",
"annotate",
"(",
"self",
",",
"text",
",",
"lang",
"=",
"None",
",",
"customParams",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"lang\"",
":",
"lang",
",",
"\"text\"",
":",
"text",
"}",
"if",
"customParams",
":",
"params",
".",
"update",
"(",
"customParams",
")",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/annotate\"",
",",
"params",
")"
] | identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict | [
"identify",
"the",
"list",
"of",
"entities",
"and",
"nonentities",
"mentioned",
"in",
"the",
"text"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L25-L36 | train |
EventRegistry/event-registry-python | eventregistry/Analytics.py | Analytics.sentiment | def sentiment(self, text, method = "vocabulary"):
"""
determine the sentiment of the provided text in English language
@param text: input text to categorize
@param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis)
and "rnn" (neural network based sentiment classification)
@returns: dict
"""
assert method == "vocabulary" or method == "rnn"
endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN"
return self._er.jsonRequestAnalytics("/api/v1/" + endpoint, { "text": text }) | python | def sentiment(self, text, method = "vocabulary"):
"""
determine the sentiment of the provided text in English language
@param text: input text to categorize
@param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis)
and "rnn" (neural network based sentiment classification)
@returns: dict
"""
assert method == "vocabulary" or method == "rnn"
endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN"
return self._er.jsonRequestAnalytics("/api/v1/" + endpoint, { "text": text }) | [
"def",
"sentiment",
"(",
"self",
",",
"text",
",",
"method",
"=",
"\"vocabulary\"",
")",
":",
"assert",
"method",
"==",
"\"vocabulary\"",
"or",
"method",
"==",
"\"rnn\"",
"endpoint",
"=",
"method",
"==",
"\"vocabulary\"",
"and",
"\"sentiment\"",
"or",
"\"sentimentRNN\"",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/\"",
"+",
"endpoint",
",",
"{",
"\"text\"",
":",
"text",
"}",
")"
] | determine the sentiment of the provided text in English language
@param text: input text to categorize
@param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis)
and "rnn" (neural network based sentiment classification)
@returns: dict | [
"determine",
"the",
"sentiment",
"of",
"the",
"provided",
"text",
"in",
"English",
"language"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L50-L60 | train |
EventRegistry/event-registry-python | eventregistry/Analytics.py | Analytics.semanticSimilarity | def semanticSimilarity(self, text1, text2, distanceMeasure = "cosine"):
"""
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
"""
return self._er.jsonRequestAnalytics("/api/v1/semanticSimilarity", { "text1": text1, "text2": text2, "distanceMeasure": distanceMeasure }) | python | def semanticSimilarity(self, text1, text2, distanceMeasure = "cosine"):
"""
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
"""
return self._er.jsonRequestAnalytics("/api/v1/semanticSimilarity", { "text1": text1, "text2": text2, "distanceMeasure": distanceMeasure }) | [
"def",
"semanticSimilarity",
"(",
"self",
",",
"text1",
",",
"text2",
",",
"distanceMeasure",
"=",
"\"cosine\"",
")",
":",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/semanticSimilarity\"",
",",
"{",
"\"text1\"",
":",
"text1",
",",
"\"text2\"",
":",
"text2",
",",
"\"distanceMeasure\"",
":",
"distanceMeasure",
"}",
")"
] | determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict | [
"determine",
"the",
"semantic",
"similarity",
"of",
"the",
"two",
"provided",
"documents"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L63-L71 | train |
EventRegistry/event-registry-python | eventregistry/Analytics.py | Analytics.extractArticleInfo | def extractArticleInfo(self, url, proxyUrl = None, headers = None, cookies = None):
"""
extract all available information about an article available at url `url`. Returned information will include
article title, body, authors, links in the articles, ...
@param url: article url to extract article information from
@param proxyUrl: proxy that should be used for downloading article information. format: {schema}://{username}:{pass}@{proxy url/ip}
@param headers: dict with headers to set in the request (optional)
@param cookies: dict with cookies to set in the request (optional)
@returns: dict
"""
params = { "url": url }
if proxyUrl:
params["proxyUrl"] = proxyUrl
if headers:
if isinstance(headers, dict):
headers = json.dumps(headers)
params["headers"] = headers
if cookies:
if isinstance(cookies, dict):
cookies = json.dumps(cookies)
params["cookies"] = cookies
return self._er.jsonRequestAnalytics("/api/v1/extractArticleInfo", params) | python | def extractArticleInfo(self, url, proxyUrl = None, headers = None, cookies = None):
"""
extract all available information about an article available at url `url`. Returned information will include
article title, body, authors, links in the articles, ...
@param url: article url to extract article information from
@param proxyUrl: proxy that should be used for downloading article information. format: {schema}://{username}:{pass}@{proxy url/ip}
@param headers: dict with headers to set in the request (optional)
@param cookies: dict with cookies to set in the request (optional)
@returns: dict
"""
params = { "url": url }
if proxyUrl:
params["proxyUrl"] = proxyUrl
if headers:
if isinstance(headers, dict):
headers = json.dumps(headers)
params["headers"] = headers
if cookies:
if isinstance(cookies, dict):
cookies = json.dumps(cookies)
params["cookies"] = cookies
return self._er.jsonRequestAnalytics("/api/v1/extractArticleInfo", params) | [
"def",
"extractArticleInfo",
"(",
"self",
",",
"url",
",",
"proxyUrl",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"cookies",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"url\"",
":",
"url",
"}",
"if",
"proxyUrl",
":",
"params",
"[",
"\"proxyUrl\"",
"]",
"=",
"proxyUrl",
"if",
"headers",
":",
"if",
"isinstance",
"(",
"headers",
",",
"dict",
")",
":",
"headers",
"=",
"json",
".",
"dumps",
"(",
"headers",
")",
"params",
"[",
"\"headers\"",
"]",
"=",
"headers",
"if",
"cookies",
":",
"if",
"isinstance",
"(",
"cookies",
",",
"dict",
")",
":",
"cookies",
"=",
"json",
".",
"dumps",
"(",
"cookies",
")",
"params",
"[",
"\"cookies\"",
"]",
"=",
"cookies",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/extractArticleInfo\"",
",",
"params",
")"
] | extract all available information about an article available at url `url`. Returned information will include
article title, body, authors, links in the articles, ...
@param url: article url to extract article information from
@param proxyUrl: proxy that should be used for downloading article information. format: {schema}://{username}:{pass}@{proxy url/ip}
@param headers: dict with headers to set in the request (optional)
@param cookies: dict with cookies to set in the request (optional)
@returns: dict | [
"extract",
"all",
"available",
"information",
"about",
"an",
"article",
"available",
"at",
"url",
"url",
".",
"Returned",
"information",
"will",
"include",
"article",
"title",
"body",
"authors",
"links",
"in",
"the",
"articles",
"..."
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L83-L104 | train |
EventRegistry/event-registry-python | eventregistry/Analytics.py | Analytics.trainTopicOnTweets | def trainTopicOnTweets(self, twitterQuery, useTweetText=True, useIdfNormalization=True,
normalization="linear", maxTweets=2000, maxUsedLinks=500, ignoreConceptTypes=[],
maxConcepts = 20, maxCategories = 10, notifyEmailAddress = None):
"""
create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address?
"""
assert maxTweets < 5000, "we can analyze at most 5000 tweets"
params = {"twitterQuery": twitterQuery, "useTweetText": useTweetText,
"useIdfNormalization": useIdfNormalization, "normalization": normalization,
"maxTweets": maxTweets, "maxUsedLinks": maxUsedLinks,
"maxConcepts": maxConcepts, "maxCategories": maxCategories }
if notifyEmailAddress:
params["notifyEmailAddress"] = notifyEmailAddress
if len(ignoreConceptTypes) > 0:
params["ignoreConceptTypes"] = ignoreConceptTypes
return self._er.jsonRequestAnalytics("/api/v1/trainTopicOnTwitter", params) | python | def trainTopicOnTweets(self, twitterQuery, useTweetText=True, useIdfNormalization=True,
normalization="linear", maxTweets=2000, maxUsedLinks=500, ignoreConceptTypes=[],
maxConcepts = 20, maxCategories = 10, notifyEmailAddress = None):
"""
create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address?
"""
assert maxTweets < 5000, "we can analyze at most 5000 tweets"
params = {"twitterQuery": twitterQuery, "useTweetText": useTweetText,
"useIdfNormalization": useIdfNormalization, "normalization": normalization,
"maxTweets": maxTweets, "maxUsedLinks": maxUsedLinks,
"maxConcepts": maxConcepts, "maxCategories": maxCategories }
if notifyEmailAddress:
params["notifyEmailAddress"] = notifyEmailAddress
if len(ignoreConceptTypes) > 0:
params["ignoreConceptTypes"] = ignoreConceptTypes
return self._er.jsonRequestAnalytics("/api/v1/trainTopicOnTwitter", params) | [
"def",
"trainTopicOnTweets",
"(",
"self",
",",
"twitterQuery",
",",
"useTweetText",
"=",
"True",
",",
"useIdfNormalization",
"=",
"True",
",",
"normalization",
"=",
"\"linear\"",
",",
"maxTweets",
"=",
"2000",
",",
"maxUsedLinks",
"=",
"500",
",",
"ignoreConceptTypes",
"=",
"[",
"]",
",",
"maxConcepts",
"=",
"20",
",",
"maxCategories",
"=",
"10",
",",
"notifyEmailAddress",
"=",
"None",
")",
":",
"assert",
"maxTweets",
"<",
"5000",
",",
"\"we can analyze at most 5000 tweets\"",
"params",
"=",
"{",
"\"twitterQuery\"",
":",
"twitterQuery",
",",
"\"useTweetText\"",
":",
"useTweetText",
",",
"\"useIdfNormalization\"",
":",
"useIdfNormalization",
",",
"\"normalization\"",
":",
"normalization",
",",
"\"maxTweets\"",
":",
"maxTweets",
",",
"\"maxUsedLinks\"",
":",
"maxUsedLinks",
",",
"\"maxConcepts\"",
":",
"maxConcepts",
",",
"\"maxCategories\"",
":",
"maxCategories",
"}",
"if",
"notifyEmailAddress",
":",
"params",
"[",
"\"notifyEmailAddress\"",
"]",
"=",
"notifyEmailAddress",
"if",
"len",
"(",
"ignoreConceptTypes",
")",
">",
"0",
":",
"params",
"[",
"\"ignoreConceptTypes\"",
"]",
"=",
"ignoreConceptTypes",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/trainTopicOnTwitter\"",
",",
"params",
")"
] | create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address? | [
"create",
"a",
"new",
"topic",
"and",
"train",
"it",
"using",
"the",
"tweets",
"that",
"match",
"the",
"twitterQuery"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L116-L144 | train |
EventRegistry/event-registry-python | eventregistry/Analytics.py | Analytics.trainTopicGetTrainedTopic | def trainTopicGetTrainedTopic(self, uri, maxConcepts = 20, maxCategories = 10,
ignoreConceptTypes=[], idfNormalization = True):
"""
retrieve topic for the topic for which you have already finished training
@param uri: uri of the topic (obtained by calling trainTopicCreateTopic method)
@param maxConcepts: number of top concepts to retrieve in the topic
@param maxCategories: number of top categories to retrieve in the topic
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts
@param returns: returns the trained topic: { concepts: [], categories: [] }
"""
return self._er.jsonRequestAnalytics("/api/v1/trainTopic", { "action": "getTrainedTopic", "uri": uri, "maxConcepts": maxConcepts, "maxCategories": maxCategories, "idfNormalization": idfNormalization }) | python | def trainTopicGetTrainedTopic(self, uri, maxConcepts = 20, maxCategories = 10,
ignoreConceptTypes=[], idfNormalization = True):
"""
retrieve topic for the topic for which you have already finished training
@param uri: uri of the topic (obtained by calling trainTopicCreateTopic method)
@param maxConcepts: number of top concepts to retrieve in the topic
@param maxCategories: number of top categories to retrieve in the topic
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts
@param returns: returns the trained topic: { concepts: [], categories: [] }
"""
return self._er.jsonRequestAnalytics("/api/v1/trainTopic", { "action": "getTrainedTopic", "uri": uri, "maxConcepts": maxConcepts, "maxCategories": maxCategories, "idfNormalization": idfNormalization }) | [
"def",
"trainTopicGetTrainedTopic",
"(",
"self",
",",
"uri",
",",
"maxConcepts",
"=",
"20",
",",
"maxCategories",
"=",
"10",
",",
"ignoreConceptTypes",
"=",
"[",
"]",
",",
"idfNormalization",
"=",
"True",
")",
":",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/trainTopic\"",
",",
"{",
"\"action\"",
":",
"\"getTrainedTopic\"",
",",
"\"uri\"",
":",
"uri",
",",
"\"maxConcepts\"",
":",
"maxConcepts",
",",
"\"maxCategories\"",
":",
"maxCategories",
",",
"\"idfNormalization\"",
":",
"idfNormalization",
"}",
")"
] | retrieve topic for the topic for which you have already finished training
@param uri: uri of the topic (obtained by calling trainTopicCreateTopic method)
@param maxConcepts: number of top concepts to retrieve in the topic
@param maxCategories: number of top categories to retrieve in the topic
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts
@param returns: returns the trained topic: { concepts: [], categories: [] } | [
"retrieve",
"topic",
"for",
"the",
"topic",
"for",
"which",
"you",
"have",
"already",
"finished",
"training"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L172-L183 | train |
EventRegistry/event-registry-python | eventregistry/examples/TopicPagesExamples.py | createTopicPage1 | def createTopicPage1():
"""
create a topic page directly
"""
topic = TopicPage(er)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
topic.addCategory(er.getCategoryUri("renewable"), 50)
# skip articles that are duplicates of other articles
topic.articleHasDuplicateFilter("skipHasDuplicates")
# return only articles that are about some event that we have detected
topic.articleHasEventFilter("skipArticlesWithoutEvent")
# get first 2 pages of articles sorted by relevance to the topic page
arts1 = topic.getArticles(page=1, sortBy="rel")
arts2 = topic.getArticles(page=2, sortBy="rel")
# get first page of events
events1 = topic.getEvents(page=1) | python | def createTopicPage1():
"""
create a topic page directly
"""
topic = TopicPage(er)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
topic.addCategory(er.getCategoryUri("renewable"), 50)
# skip articles that are duplicates of other articles
topic.articleHasDuplicateFilter("skipHasDuplicates")
# return only articles that are about some event that we have detected
topic.articleHasEventFilter("skipArticlesWithoutEvent")
# get first 2 pages of articles sorted by relevance to the topic page
arts1 = topic.getArticles(page=1, sortBy="rel")
arts2 = topic.getArticles(page=2, sortBy="rel")
# get first page of events
events1 = topic.getEvents(page=1) | [
"def",
"createTopicPage1",
"(",
")",
":",
"topic",
"=",
"TopicPage",
"(",
"er",
")",
"topic",
".",
"addKeyword",
"(",
"\"renewable energy\"",
",",
"30",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"biofuel\"",
")",
",",
"50",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"solar energy\"",
")",
",",
"50",
")",
"topic",
".",
"addCategory",
"(",
"er",
".",
"getCategoryUri",
"(",
"\"renewable\"",
")",
",",
"50",
")",
"# skip articles that are duplicates of other articles",
"topic",
".",
"articleHasDuplicateFilter",
"(",
"\"skipHasDuplicates\"",
")",
"# return only articles that are about some event that we have detected",
"topic",
".",
"articleHasEventFilter",
"(",
"\"skipArticlesWithoutEvent\"",
")",
"# get first 2 pages of articles sorted by relevance to the topic page",
"arts1",
"=",
"topic",
".",
"getArticles",
"(",
"page",
"=",
"1",
",",
"sortBy",
"=",
"\"rel\"",
")",
"arts2",
"=",
"topic",
".",
"getArticles",
"(",
"page",
"=",
"2",
",",
"sortBy",
"=",
"\"rel\"",
")",
"# get first page of events",
"events1",
"=",
"topic",
".",
"getEvents",
"(",
"page",
"=",
"1",
")"
] | create a topic page directly | [
"create",
"a",
"topic",
"page",
"directly"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/examples/TopicPagesExamples.py#L6-L26 | train |
EventRegistry/event-registry-python | eventregistry/examples/TopicPagesExamples.py | createTopicPage2 | def createTopicPage2():
"""
create a topic page directly, set the article threshold, restrict results to set concepts and keywords
"""
topic = TopicPage(er)
topic.addCategory(er.getCategoryUri("renewable"), 50)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
# require that the results will mention at least one of the concepts and keywords specified
# (even though they might have the category about renewable energy, that will not be enough
# for an article to be among the results)
topic.restrictToSetConceptsAndKeywords(True)
# limit results to English, German and Spanish results
topic.setLanguages(["eng", "deu", "spa"])
# get results that are at most 3 days old
topic.setMaxDaysBack(3)
# require that the articles that will be returned should get at least a total score of 30 points or more
# based on the specified list of conditions
topic.setArticleThreshold(30)
# get first page of articles sorted by date (from most recent backward) to the topic page
arts1 = topic.getArticles(page=1,
sortBy="date",
returnInfo=ReturnInfo(
articleInfo = ArticleInfoFlags(concepts=True, categories=True)
))
for art in arts1.get("articles", {}).get("results", []):
print(art) | python | def createTopicPage2():
"""
create a topic page directly, set the article threshold, restrict results to set concepts and keywords
"""
topic = TopicPage(er)
topic.addCategory(er.getCategoryUri("renewable"), 50)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
# require that the results will mention at least one of the concepts and keywords specified
# (even though they might have the category about renewable energy, that will not be enough
# for an article to be among the results)
topic.restrictToSetConceptsAndKeywords(True)
# limit results to English, German and Spanish results
topic.setLanguages(["eng", "deu", "spa"])
# get results that are at most 3 days old
topic.setMaxDaysBack(3)
# require that the articles that will be returned should get at least a total score of 30 points or more
# based on the specified list of conditions
topic.setArticleThreshold(30)
# get first page of articles sorted by date (from most recent backward) to the topic page
arts1 = topic.getArticles(page=1,
sortBy="date",
returnInfo=ReturnInfo(
articleInfo = ArticleInfoFlags(concepts=True, categories=True)
))
for art in arts1.get("articles", {}).get("results", []):
print(art) | [
"def",
"createTopicPage2",
"(",
")",
":",
"topic",
"=",
"TopicPage",
"(",
"er",
")",
"topic",
".",
"addCategory",
"(",
"er",
".",
"getCategoryUri",
"(",
"\"renewable\"",
")",
",",
"50",
")",
"topic",
".",
"addKeyword",
"(",
"\"renewable energy\"",
",",
"30",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"biofuel\"",
")",
",",
"50",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"solar energy\"",
")",
",",
"50",
")",
"# require that the results will mention at least one of the concepts and keywords specified",
"# (even though they might have the category about renewable energy, that will not be enough",
"# for an article to be among the results)",
"topic",
".",
"restrictToSetConceptsAndKeywords",
"(",
"True",
")",
"# limit results to English, German and Spanish results",
"topic",
".",
"setLanguages",
"(",
"[",
"\"eng\"",
",",
"\"deu\"",
",",
"\"spa\"",
"]",
")",
"# get results that are at most 3 days old",
"topic",
".",
"setMaxDaysBack",
"(",
"3",
")",
"# require that the articles that will be returned should get at least a total score of 30 points or more",
"# based on the specified list of conditions",
"topic",
".",
"setArticleThreshold",
"(",
"30",
")",
"# get first page of articles sorted by date (from most recent backward) to the topic page",
"arts1",
"=",
"topic",
".",
"getArticles",
"(",
"page",
"=",
"1",
",",
"sortBy",
"=",
"\"date\"",
",",
"returnInfo",
"=",
"ReturnInfo",
"(",
"articleInfo",
"=",
"ArticleInfoFlags",
"(",
"concepts",
"=",
"True",
",",
"categories",
"=",
"True",
")",
")",
")",
"for",
"art",
"in",
"arts1",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"results\"",
",",
"[",
"]",
")",
":",
"print",
"(",
"art",
")"
] | create a topic page directly, set the article threshold, restrict results to set concepts and keywords | [
"create",
"a",
"topic",
"page",
"directly",
"set",
"the",
"article",
"threshold",
"restrict",
"results",
"to",
"set",
"concepts",
"and",
"keywords"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/examples/TopicPagesExamples.py#L30-L63 | train |
EventRegistry/event-registry-python | eventregistry/QueryEvent.py | QueryEventArticlesIter.count | def count(self, eventRegistry):
"""
return the number of articles that match the criteria
@param eventRegistry: instance of EventRegistry class. used to obtain the necessary data
"""
self.setRequestedResult(RequestEventArticles(**self.queryParams))
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get(self.queryParams["eventUri"], {}).get("articles", {}).get("totalResults", 0)
return count | python | def count(self, eventRegistry):
"""
return the number of articles that match the criteria
@param eventRegistry: instance of EventRegistry class. used to obtain the necessary data
"""
self.setRequestedResult(RequestEventArticles(**self.queryParams))
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get(self.queryParams["eventUri"], {}).get("articles", {}).get("totalResults", 0)
return count | [
"def",
"count",
"(",
"self",
",",
"eventRegistry",
")",
":",
"self",
".",
"setRequestedResult",
"(",
"RequestEventArticles",
"(",
"*",
"*",
"self",
".",
"queryParams",
")",
")",
"res",
"=",
"eventRegistry",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"res",
"[",
"\"error\"",
"]",
")",
"count",
"=",
"res",
".",
"get",
"(",
"self",
".",
"queryParams",
"[",
"\"eventUri\"",
"]",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"totalResults\"",
",",
"0",
")",
"return",
"count"
] | return the number of articles that match the criteria
@param eventRegistry: instance of EventRegistry class. used to obtain the necessary data | [
"return",
"the",
"number",
"of",
"articles",
"that",
"match",
"the",
"criteria"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvent.py#L150-L160 | train |
EventRegistry/event-registry-python | eventregistry/QueryArticles.py | QueryArticles.initWithComplexQuery | def initWithComplexQuery(query):
"""
create a query using a complex article query
"""
q = QueryArticles()
# provided an instance of ComplexArticleQuery
if isinstance(query, ComplexArticleQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
else:
assert False, "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict"
return q | python | def initWithComplexQuery(query):
"""
create a query using a complex article query
"""
q = QueryArticles()
# provided an instance of ComplexArticleQuery
if isinstance(query, ComplexArticleQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
else:
assert False, "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict"
return q | [
"def",
"initWithComplexQuery",
"(",
"query",
")",
":",
"q",
"=",
"QueryArticles",
"(",
")",
"# provided an instance of ComplexArticleQuery",
"if",
"isinstance",
"(",
"query",
",",
"ComplexArticleQuery",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
".",
"getQuery",
"(",
")",
")",
")",
"# provided query as a string containing the json object",
"elif",
"isinstance",
"(",
"query",
",",
"six",
".",
"string_types",
")",
":",
"foo",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"query",
")",
"# provided query as a python dict",
"elif",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
")",
")",
"else",
":",
"assert",
"False",
",",
"\"The instance of query parameter was not a ComplexArticleQuery, a string or a python dict\"",
"return",
"q"
] | create a query using a complex article query | [
"create",
"a",
"query",
"using",
"a",
"complex",
"article",
"query"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryArticles.py#L218-L235 | train |
EventRegistry/event-registry-python | eventregistry/QueryArticles.py | QueryArticlesIter._getNextArticleBatch | def _getNextArticleBatch(self):
"""download next batch of articles based on the article uris in the uri list"""
# try to get more uris, if none
self._articlePage += 1
# if we have already obtained all pages, then exit
if self._totalPages != None and self._articlePage > self._totalPages:
return
self.setRequestedResult(RequestArticlesInfo(page=self._articlePage,
sortBy=self._sortBy, sortByAsc=self._sortByAsc,
returnInfo = self._returnInfo))
if self._er._verboseOutput:
print("Downloading article page %d..." % (self._articlePage))
res = self._er.execQuery(self)
if "error" in res:
print("Error while obtaining a list of articles: " + res["error"])
else:
self._totalPages = res.get("articles", {}).get("pages", 0)
results = res.get("articles", {}).get("results", [])
self._articleList.extend(results) | python | def _getNextArticleBatch(self):
"""download next batch of articles based on the article uris in the uri list"""
# try to get more uris, if none
self._articlePage += 1
# if we have already obtained all pages, then exit
if self._totalPages != None and self._articlePage > self._totalPages:
return
self.setRequestedResult(RequestArticlesInfo(page=self._articlePage,
sortBy=self._sortBy, sortByAsc=self._sortByAsc,
returnInfo = self._returnInfo))
if self._er._verboseOutput:
print("Downloading article page %d..." % (self._articlePage))
res = self._er.execQuery(self)
if "error" in res:
print("Error while obtaining a list of articles: " + res["error"])
else:
self._totalPages = res.get("articles", {}).get("pages", 0)
results = res.get("articles", {}).get("results", [])
self._articleList.extend(results) | [
"def",
"_getNextArticleBatch",
"(",
"self",
")",
":",
"# try to get more uris, if none",
"self",
".",
"_articlePage",
"+=",
"1",
"# if we have already obtained all pages, then exit",
"if",
"self",
".",
"_totalPages",
"!=",
"None",
"and",
"self",
".",
"_articlePage",
">",
"self",
".",
"_totalPages",
":",
"return",
"self",
".",
"setRequestedResult",
"(",
"RequestArticlesInfo",
"(",
"page",
"=",
"self",
".",
"_articlePage",
",",
"sortBy",
"=",
"self",
".",
"_sortBy",
",",
"sortByAsc",
"=",
"self",
".",
"_sortByAsc",
",",
"returnInfo",
"=",
"self",
".",
"_returnInfo",
")",
")",
"if",
"self",
".",
"_er",
".",
"_verboseOutput",
":",
"print",
"(",
"\"Downloading article page %d...\"",
"%",
"(",
"self",
".",
"_articlePage",
")",
")",
"res",
"=",
"self",
".",
"_er",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"\"Error while obtaining a list of articles: \"",
"+",
"res",
"[",
"\"error\"",
"]",
")",
"else",
":",
"self",
".",
"_totalPages",
"=",
"res",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"pages\"",
",",
"0",
")",
"results",
"=",
"res",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"results\"",
",",
"[",
"]",
")",
"self",
".",
"_articleList",
".",
"extend",
"(",
"results",
")"
] | download next batch of articles based on the article uris in the uri list | [
"download",
"next",
"batch",
"of",
"articles",
"based",
"on",
"the",
"article",
"uris",
"in",
"the",
"uri",
"list"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryArticles.py#L317-L335 | train |
EventRegistry/event-registry-python | eventregistry/QueryEvents.py | QueryEvents.initWithComplexQuery | def initWithComplexQuery(query):
"""
create a query using a complex event query
"""
q = QueryEvents()
# provided an instance of ComplexEventQuery
if isinstance(query, ComplexEventQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
# unrecognized value provided
else:
assert False, "The instance of query parameter was not a ComplexEventQuery, a string or a python dict"
return q | python | def initWithComplexQuery(query):
"""
create a query using a complex event query
"""
q = QueryEvents()
# provided an instance of ComplexEventQuery
if isinstance(query, ComplexEventQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
# unrecognized value provided
else:
assert False, "The instance of query parameter was not a ComplexEventQuery, a string or a python dict"
return q | [
"def",
"initWithComplexQuery",
"(",
"query",
")",
":",
"q",
"=",
"QueryEvents",
"(",
")",
"# provided an instance of ComplexEventQuery",
"if",
"isinstance",
"(",
"query",
",",
"ComplexEventQuery",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
".",
"getQuery",
"(",
")",
")",
")",
"# provided query as a string containing the json object",
"elif",
"isinstance",
"(",
"query",
",",
"six",
".",
"string_types",
")",
":",
"foo",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"query",
")",
"# provided query as a python dict",
"elif",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
")",
")",
"# unrecognized value provided",
"else",
":",
"assert",
"False",
",",
"\"The instance of query parameter was not a ComplexEventQuery, a string or a python dict\"",
"return",
"q"
] | create a query using a complex event query | [
"create",
"a",
"query",
"using",
"a",
"complex",
"event",
"query"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvents.py#L183-L201 | train |
EventRegistry/event-registry-python | eventregistry/QueryEvents.py | QueryEventsIter.count | def count(self, eventRegistry):
"""
return the number of events that match the criteria
"""
self.setRequestedResult(RequestEventsInfo())
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get("events", {}).get("totalResults", 0)
return count | python | def count(self, eventRegistry):
"""
return the number of events that match the criteria
"""
self.setRequestedResult(RequestEventsInfo())
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get("events", {}).get("totalResults", 0)
return count | [
"def",
"count",
"(",
"self",
",",
"eventRegistry",
")",
":",
"self",
".",
"setRequestedResult",
"(",
"RequestEventsInfo",
"(",
")",
")",
"res",
"=",
"eventRegistry",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"res",
"[",
"\"error\"",
"]",
")",
"count",
"=",
"res",
".",
"get",
"(",
"\"events\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"totalResults\"",
",",
"0",
")",
"return",
"count"
] | return the number of events that match the criteria | [
"return",
"the",
"number",
"of",
"events",
"that",
"match",
"the",
"criteria"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvents.py#L211-L220 | train |
EventRegistry/event-registry-python | eventregistry/ReturnInfo.py | ReturnInfoFlagsBase._setFlag | def _setFlag(self, name, val, defVal):
"""set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal"""
if not hasattr(self, "flags"):
self.flags = {}
if val != defVal:
self.flags[name] = val | python | def _setFlag(self, name, val, defVal):
"""set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal"""
if not hasattr(self, "flags"):
self.flags = {}
if val != defVal:
self.flags[name] = val | [
"def",
"_setFlag",
"(",
"self",
",",
"name",
",",
"val",
",",
"defVal",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"flags\"",
")",
":",
"self",
".",
"flags",
"=",
"{",
"}",
"if",
"val",
"!=",
"defVal",
":",
"self",
".",
"flags",
"[",
"name",
"]",
"=",
"val"
] | set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal | [
"set",
"the",
"objects",
"property",
"propName",
"if",
"the",
"dictKey",
"key",
"exists",
"in",
"dict",
"and",
"it",
"is",
"not",
"the",
"same",
"as",
"default",
"value",
"defVal"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L15-L20 | train |
EventRegistry/event-registry-python | eventregistry/ReturnInfo.py | ReturnInfoFlagsBase._setVal | def _setVal(self, name, val, defVal = None):
"""set value of name to val in case the val != defVal"""
if val == defVal:
return
if not hasattr(self, "vals"):
self.vals = {}
self.vals[name] = val | python | def _setVal(self, name, val, defVal = None):
"""set value of name to val in case the val != defVal"""
if val == defVal:
return
if not hasattr(self, "vals"):
self.vals = {}
self.vals[name] = val | [
"def",
"_setVal",
"(",
"self",
",",
"name",
",",
"val",
",",
"defVal",
"=",
"None",
")",
":",
"if",
"val",
"==",
"defVal",
":",
"return",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"vals\"",
")",
":",
"self",
".",
"vals",
"=",
"{",
"}",
"self",
".",
"vals",
"[",
"name",
"]",
"=",
"val"
] | set value of name to val in case the val != defVal | [
"set",
"value",
"of",
"name",
"to",
"val",
"in",
"case",
"the",
"val",
"!",
"=",
"defVal"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L30-L36 | train |
EventRegistry/event-registry-python | eventregistry/ReturnInfo.py | ReturnInfoFlagsBase._getVals | def _getVals(self, prefix = ""):
"""
return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name
"""
if not hasattr(self, "vals"):
self.vals = {}
dict = {}
for key in list(self.vals.keys()):
# if no prefix then lower the first letter
if prefix == "":
newkey = key[:1].lower() + key[1:] if key else ""
dict[newkey] = self.vals[key]
else:
newkey = key[:1].upper() + key[1:] if key else ""
dict[prefix + newkey] = self.vals[key]
return dict | python | def _getVals(self, prefix = ""):
"""
return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name
"""
if not hasattr(self, "vals"):
self.vals = {}
dict = {}
for key in list(self.vals.keys()):
# if no prefix then lower the first letter
if prefix == "":
newkey = key[:1].lower() + key[1:] if key else ""
dict[newkey] = self.vals[key]
else:
newkey = key[:1].upper() + key[1:] if key else ""
dict[prefix + newkey] = self.vals[key]
return dict | [
"def",
"_getVals",
"(",
"self",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"vals\"",
")",
":",
"self",
".",
"vals",
"=",
"{",
"}",
"dict",
"=",
"{",
"}",
"for",
"key",
"in",
"list",
"(",
"self",
".",
"vals",
".",
"keys",
"(",
")",
")",
":",
"# if no prefix then lower the first letter",
"if",
"prefix",
"==",
"\"\"",
":",
"newkey",
"=",
"key",
"[",
":",
"1",
"]",
".",
"lower",
"(",
")",
"+",
"key",
"[",
"1",
":",
"]",
"if",
"key",
"else",
"\"\"",
"dict",
"[",
"newkey",
"]",
"=",
"self",
".",
"vals",
"[",
"key",
"]",
"else",
":",
"newkey",
"=",
"key",
"[",
":",
"1",
"]",
".",
"upper",
"(",
")",
"+",
"key",
"[",
"1",
":",
"]",
"if",
"key",
"else",
"\"\"",
"dict",
"[",
"prefix",
"+",
"newkey",
"]",
"=",
"self",
".",
"vals",
"[",
"key",
"]",
"return",
"dict"
] | return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name | [
"return",
"the",
"values",
"in",
"the",
"vals",
"dict",
"in",
"case",
"prefix",
"is",
"change",
"the",
"first",
"letter",
"of",
"the",
"name",
"to",
"lowercase",
"otherwise",
"use",
"prefix",
"+",
"name",
"as",
"the",
"new",
"name"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L39-L55 | train |
EventRegistry/event-registry-python | eventregistry/ReturnInfo.py | ReturnInfo.loadFromFile | def loadFromFile(fileName):
"""
load the configuration for the ReturnInfo from a fileName
@param fileName: filename that contains the json configuration to use in the ReturnInfo
"""
assert os.path.exists(fileName), "File " + fileName + " does not exist"
conf = json.load(open(fileName))
return ReturnInfo(
articleInfo=ArticleInfoFlags(**conf.get("articleInfo", {})),
eventInfo=EventInfoFlags(**conf.get("eventInfo", {})),
sourceInfo=SourceInfoFlags(**conf.get("sourceInfo", {})),
categoryInfo=CategoryInfoFlags(**conf.get("categoryInfo", {})),
conceptInfo=ConceptInfoFlags(**conf.get("conceptInfo", {})),
locationInfo=LocationInfoFlags(**conf.get("locationInfo", {})),
storyInfo=StoryInfoFlags(**conf.get("storyInfo", {})),
conceptClassInfo=ConceptClassInfoFlags(**conf.get("conceptClassInfo", {})),
conceptFolderInfo=ConceptFolderInfoFlags(**conf.get("conceptFolderInfo", {}))
) | python | def loadFromFile(fileName):
"""
load the configuration for the ReturnInfo from a fileName
@param fileName: filename that contains the json configuration to use in the ReturnInfo
"""
assert os.path.exists(fileName), "File " + fileName + " does not exist"
conf = json.load(open(fileName))
return ReturnInfo(
articleInfo=ArticleInfoFlags(**conf.get("articleInfo", {})),
eventInfo=EventInfoFlags(**conf.get("eventInfo", {})),
sourceInfo=SourceInfoFlags(**conf.get("sourceInfo", {})),
categoryInfo=CategoryInfoFlags(**conf.get("categoryInfo", {})),
conceptInfo=ConceptInfoFlags(**conf.get("conceptInfo", {})),
locationInfo=LocationInfoFlags(**conf.get("locationInfo", {})),
storyInfo=StoryInfoFlags(**conf.get("storyInfo", {})),
conceptClassInfo=ConceptClassInfoFlags(**conf.get("conceptClassInfo", {})),
conceptFolderInfo=ConceptFolderInfoFlags(**conf.get("conceptFolderInfo", {}))
) | [
"def",
"loadFromFile",
"(",
"fileName",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"fileName",
")",
",",
"\"File \"",
"+",
"fileName",
"+",
"\" does not exist\"",
"conf",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"fileName",
")",
")",
"return",
"ReturnInfo",
"(",
"articleInfo",
"=",
"ArticleInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"articleInfo\"",
",",
"{",
"}",
")",
")",
",",
"eventInfo",
"=",
"EventInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"eventInfo\"",
",",
"{",
"}",
")",
")",
",",
"sourceInfo",
"=",
"SourceInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"sourceInfo\"",
",",
"{",
"}",
")",
")",
",",
"categoryInfo",
"=",
"CategoryInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"categoryInfo\"",
",",
"{",
"}",
")",
")",
",",
"conceptInfo",
"=",
"ConceptInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"conceptInfo\"",
",",
"{",
"}",
")",
")",
",",
"locationInfo",
"=",
"LocationInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"locationInfo\"",
",",
"{",
"}",
")",
")",
",",
"storyInfo",
"=",
"StoryInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"storyInfo\"",
",",
"{",
"}",
")",
")",
",",
"conceptClassInfo",
"=",
"ConceptClassInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"conceptClassInfo\"",
",",
"{",
"}",
")",
")",
",",
"conceptFolderInfo",
"=",
"ConceptFolderInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"conceptFolderInfo\"",
",",
"{",
"}",
")",
")",
")"
] | load the configuration for the ReturnInfo from a fileName
@param fileName: filename that contains the json configuration to use in the ReturnInfo | [
"load",
"the",
"configuration",
"for",
"the",
"ReturnInfo",
"from",
"a",
"fileName"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L453-L470 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.loadTopicPageFromER | def loadTopicPageFromER(self, uri):
"""
load an existing topic page from Event Registry based on the topic page URI
@param uri: uri of the topic page saved in your Event Registry account
"""
params = {
"action": "getTopicPageJson",
"includeConceptDescription": True,
"includeTopicPageDefinition": True,
"includeTopicPageOwner": True,
"uri": uri
}
self.topicPage = self._createEmptyTopicPage()
self.concept = self.eventRegistry.jsonRequest("/json/topicPage", params)
self.topicPage.update(self.concept.get("topicPage", {})) | python | def loadTopicPageFromER(self, uri):
"""
load an existing topic page from Event Registry based on the topic page URI
@param uri: uri of the topic page saved in your Event Registry account
"""
params = {
"action": "getTopicPageJson",
"includeConceptDescription": True,
"includeTopicPageDefinition": True,
"includeTopicPageOwner": True,
"uri": uri
}
self.topicPage = self._createEmptyTopicPage()
self.concept = self.eventRegistry.jsonRequest("/json/topicPage", params)
self.topicPage.update(self.concept.get("topicPage", {})) | [
"def",
"loadTopicPageFromER",
"(",
"self",
",",
"uri",
")",
":",
"params",
"=",
"{",
"\"action\"",
":",
"\"getTopicPageJson\"",
",",
"\"includeConceptDescription\"",
":",
"True",
",",
"\"includeTopicPageDefinition\"",
":",
"True",
",",
"\"includeTopicPageOwner\"",
":",
"True",
",",
"\"uri\"",
":",
"uri",
"}",
"self",
".",
"topicPage",
"=",
"self",
".",
"_createEmptyTopicPage",
"(",
")",
"self",
".",
"concept",
"=",
"self",
".",
"eventRegistry",
".",
"jsonRequest",
"(",
"\"/json/topicPage\"",
",",
"params",
")",
"self",
".",
"topicPage",
".",
"update",
"(",
"self",
".",
"concept",
".",
"get",
"(",
"\"topicPage\"",
",",
"{",
"}",
")",
")"
] | load an existing topic page from Event Registry based on the topic page URI
@param uri: uri of the topic page saved in your Event Registry account | [
"load",
"an",
"existing",
"topic",
"page",
"from",
"Event",
"Registry",
"based",
"on",
"the",
"topic",
"page",
"URI"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L51-L65 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.loadTopicPageFromFile | def loadTopicPageFromFile(self, fname):
"""
load topic page from an existing file
"""
assert os.path.exists(fname)
f = open(fname, "r", encoding="utf-8")
self.topicPage = json.load(f) | python | def loadTopicPageFromFile(self, fname):
"""
load topic page from an existing file
"""
assert os.path.exists(fname)
f = open(fname, "r", encoding="utf-8")
self.topicPage = json.load(f) | [
"def",
"loadTopicPageFromFile",
"(",
"self",
",",
"fname",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
"f",
"=",
"open",
"(",
"fname",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"self",
".",
"topicPage",
"=",
"json",
".",
"load",
"(",
"f",
")"
] | load topic page from an existing file | [
"load",
"topic",
"page",
"from",
"an",
"existing",
"file"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L76-L82 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.saveTopicPageDefinitionToFile | def saveTopicPageDefinitionToFile(self, fname):
"""
save the topic page definition to a file
"""
open(fname, "w", encoding="utf-8").write(json.dumps(self.topicPage, indent = 4, sort_keys = True)) | python | def saveTopicPageDefinitionToFile(self, fname):
"""
save the topic page definition to a file
"""
open(fname, "w", encoding="utf-8").write(json.dumps(self.topicPage, indent = 4, sort_keys = True)) | [
"def",
"saveTopicPageDefinitionToFile",
"(",
"self",
",",
"fname",
")",
":",
"open",
"(",
"fname",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"topicPage",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
")"
] | save the topic page definition to a file | [
"save",
"the",
"topic",
"page",
"definition",
"to",
"a",
"file"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L92-L96 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.setArticleThreshold | def setArticleThreshold(self, value):
"""
what is the minimum total weight that an article has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["articleTreshWgt"] = value | python | def setArticleThreshold(self, value):
"""
what is the minimum total weight that an article has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["articleTreshWgt"] = value | [
"def",
"setArticleThreshold",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"int",
")",
"assert",
"value",
">=",
"0",
"self",
".",
"topicPage",
"[",
"\"articleTreshWgt\"",
"]",
"=",
"value"
] | what is the minimum total weight that an article has to have in order to get it among the results?
@param value: threshold to use | [
"what",
"is",
"the",
"minimum",
"total",
"weight",
"that",
"an",
"article",
"has",
"to",
"have",
"in",
"order",
"to",
"get",
"it",
"among",
"the",
"results?"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L102-L109 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.setEventThreshold | def setEventThreshold(self, value):
"""
what is the minimum total weight that an event has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["eventTreshWgt"] = value | python | def setEventThreshold(self, value):
"""
what is the minimum total weight that an event has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["eventTreshWgt"] = value | [
"def",
"setEventThreshold",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"int",
")",
"assert",
"value",
">=",
"0",
"self",
".",
"topicPage",
"[",
"\"eventTreshWgt\"",
"]",
"=",
"value"
] | what is the minimum total weight that an event has to have in order to get it among the results?
@param value: threshold to use | [
"what",
"is",
"the",
"minimum",
"total",
"weight",
"that",
"an",
"event",
"has",
"to",
"have",
"in",
"order",
"to",
"get",
"it",
"among",
"the",
"results?"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L112-L119 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.setMaxDaysBack | def setMaxDaysBack(self, maxDaysBack):
"""
what is the maximum allowed age of the results?
"""
assert isinstance(maxDaysBack, int), "maxDaysBack value has to be a positive integer"
assert maxDaysBack >= 1
self.topicPage["maxDaysBack"] = maxDaysBack | python | def setMaxDaysBack(self, maxDaysBack):
"""
what is the maximum allowed age of the results?
"""
assert isinstance(maxDaysBack, int), "maxDaysBack value has to be a positive integer"
assert maxDaysBack >= 1
self.topicPage["maxDaysBack"] = maxDaysBack | [
"def",
"setMaxDaysBack",
"(",
"self",
",",
"maxDaysBack",
")",
":",
"assert",
"isinstance",
"(",
"maxDaysBack",
",",
"int",
")",
",",
"\"maxDaysBack value has to be a positive integer\"",
"assert",
"maxDaysBack",
">=",
"1",
"self",
".",
"topicPage",
"[",
"\"maxDaysBack\"",
"]",
"=",
"maxDaysBack"
] | what is the maximum allowed age of the results? | [
"what",
"is",
"the",
"maximum",
"allowed",
"age",
"of",
"the",
"results?"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L164-L170 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addConcept | def addConcept(self, conceptUri, weight, label = None, conceptType = None):
"""
add a relevant concept to the topic page
@param conceptUri: uri of the concept to be added
@param weight: importance of the provided concept (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
concept = {"uri": conceptUri, "wgt": weight}
if label != None: concept["label"] = label
if conceptType != None: concept["type"] = conceptType
self.topicPage["concepts"].append(concept) | python | def addConcept(self, conceptUri, weight, label = None, conceptType = None):
"""
add a relevant concept to the topic page
@param conceptUri: uri of the concept to be added
@param weight: importance of the provided concept (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
concept = {"uri": conceptUri, "wgt": weight}
if label != None: concept["label"] = label
if conceptType != None: concept["type"] = conceptType
self.topicPage["concepts"].append(concept) | [
"def",
"addConcept",
"(",
"self",
",",
"conceptUri",
",",
"weight",
",",
"label",
"=",
"None",
",",
"conceptType",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"concept",
"=",
"{",
"\"uri\"",
":",
"conceptUri",
",",
"\"wgt\"",
":",
"weight",
"}",
"if",
"label",
"!=",
"None",
":",
"concept",
"[",
"\"label\"",
"]",
"=",
"label",
"if",
"conceptType",
"!=",
"None",
":",
"concept",
"[",
"\"type\"",
"]",
"=",
"conceptType",
"self",
".",
"topicPage",
"[",
"\"concepts\"",
"]",
".",
"append",
"(",
"concept",
")"
] | add a relevant concept to the topic page
@param conceptUri: uri of the concept to be added
@param weight: importance of the provided concept (typically in range 1 - 50) | [
"add",
"a",
"relevant",
"concept",
"to",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L211-L221 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addKeyword | def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight}) | python | def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight}) | [
"def",
"addKeyword",
"(",
"self",
",",
"keyword",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"keywords\"",
"]",
".",
"append",
"(",
"{",
"\"keyword\"",
":",
"keyword",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] | add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50) | [
"add",
"a",
"relevant",
"keyword",
"to",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L224-L231 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addCategory | def addCategory(self, categoryUri, weight):
"""
add a relevant category to the topic page
@param categoryUri: uri of the category to be added
@param weight: importance of the provided category (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["categories"].append({"uri": categoryUri, "wgt": weight}) | python | def addCategory(self, categoryUri, weight):
"""
add a relevant category to the topic page
@param categoryUri: uri of the category to be added
@param weight: importance of the provided category (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["categories"].append({"uri": categoryUri, "wgt": weight}) | [
"def",
"addCategory",
"(",
"self",
",",
"categoryUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"categories\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"categoryUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] | add a relevant category to the topic page
@param categoryUri: uri of the category to be added
@param weight: importance of the provided category (typically in range 1 - 50) | [
"add",
"a",
"relevant",
"category",
"to",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L234-L241 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addSource | def addSource(self, sourceUri, weight):
"""
add a news source to the topic page
@param sourceUri: uri of the news source to add to the topic page
@param weight: importance of the news source (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sources"].append({"uri": sourceUri, "wgt": weight}) | python | def addSource(self, sourceUri, weight):
"""
add a news source to the topic page
@param sourceUri: uri of the news source to add to the topic page
@param weight: importance of the news source (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sources"].append({"uri": sourceUri, "wgt": weight}) | [
"def",
"addSource",
"(",
"self",
",",
"sourceUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sources\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] | add a news source to the topic page
@param sourceUri: uri of the news source to add to the topic page
@param weight: importance of the news source (typically in range 1 - 50) | [
"add",
"a",
"news",
"source",
"to",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L244-L251 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addSourceLocation | def addSourceLocation(self, sourceLocationUri, weight):
"""
add a list of relevant sources by identifying them by their geographic location
@param sourceLocationUri: uri of the location where the sources should be geographically located
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceLocations"].append({"uri": sourceLocationUri, "wgt": weight}) | python | def addSourceLocation(self, sourceLocationUri, weight):
"""
add a list of relevant sources by identifying them by their geographic location
@param sourceLocationUri: uri of the location where the sources should be geographically located
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceLocations"].append({"uri": sourceLocationUri, "wgt": weight}) | [
"def",
"addSourceLocation",
"(",
"self",
",",
"sourceLocationUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sourceLocations\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceLocationUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] | add a list of relevant sources by identifying them by their geographic location
@param sourceLocationUri: uri of the location where the sources should be geographically located
@param weight: importance of the provided list of sources (typically in range 1 - 50) | [
"add",
"a",
"list",
"of",
"relevant",
"sources",
"by",
"identifying",
"them",
"by",
"their",
"geographic",
"location"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L254-L261 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addSourceGroup | def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight}) | python | def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight}) | [
"def",
"addSourceGroup",
"(",
"self",
",",
"sourceGroupUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sourceGroups\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceGroupUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] | add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50) | [
"add",
"a",
"list",
"of",
"relevant",
"sources",
"by",
"specifying",
"a",
"whole",
"source",
"group",
"to",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L264-L271 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.addLocation | def addLocation(self, locationUri, weight):
"""
add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["locations"].append({"uri": locationUri, "wgt": weight}) | python | def addLocation(self, locationUri, weight):
"""
add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["locations"].append({"uri": locationUri, "wgt": weight}) | [
"def",
"addLocation",
"(",
"self",
",",
"locationUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"locations\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"locationUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] | add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50) | [
"add",
"relevant",
"location",
"to",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L274-L281 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.setLanguages | def setLanguages(self, languages):
"""
restrict the results to the list of specified languages
"""
if isinstance(languages, six.string_types):
languages = [languages]
for lang in languages:
assert len(lang) == 3, "Expected to get language in ISO3 code"
self.topicPage["langs"] = languages | python | def setLanguages(self, languages):
"""
restrict the results to the list of specified languages
"""
if isinstance(languages, six.string_types):
languages = [languages]
for lang in languages:
assert len(lang) == 3, "Expected to get language in ISO3 code"
self.topicPage["langs"] = languages | [
"def",
"setLanguages",
"(",
"self",
",",
"languages",
")",
":",
"if",
"isinstance",
"(",
"languages",
",",
"six",
".",
"string_types",
")",
":",
"languages",
"=",
"[",
"languages",
"]",
"for",
"lang",
"in",
"languages",
":",
"assert",
"len",
"(",
"lang",
")",
"==",
"3",
",",
"\"Expected to get language in ISO3 code\"",
"self",
".",
"topicPage",
"[",
"\"langs\"",
"]",
"=",
"languages"
] | restrict the results to the list of specified languages | [
"restrict",
"the",
"results",
"to",
"the",
"list",
"of",
"specified",
"languages"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L284-L292 | train |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | TopicPage.getArticles | def getArticles(self,
page=1,
count=100,
sortBy = "rel",
sortByAsc = False,
returnInfo=ReturnInfo()):
"""
return a list of articles that match the topic page
@param page: which page of the results to return (default: 1)
@param count: number of articles to return (default: 100)
@param sortBy: how are articles sorted. Options: id (internal id), date (publishing date), cosSim (closeness to the event centroid), rel (relevance to the query), sourceImportance (manually curated score of source importance - high value, high importance), sourceImportanceRank (reverse of sourceImportance), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares on social media), facebookShares (shares on Facebook only)
@param sortByAsc: should the results be sorted in ascending order (True) or descending (False)
@param returnInfo: what details should be included in the returned information
"""
assert page >= 1
assert count <= 100
params = {
"action": "getArticlesForTopicPage",
"resultType": "articles",
"dataType": self.topicPage["dataType"],
"articlesCount": count,
"articlesSortBy": sortBy,
"articlesSortByAsc": sortByAsc,
"page": page,
"topicPage": json.dumps(self.topicPage)
}
params.update(returnInfo.getParams("articles"))
return self.eventRegistry.jsonRequest("/json/article", params) | python | def getArticles(self,
page=1,
count=100,
sortBy = "rel",
sortByAsc = False,
returnInfo=ReturnInfo()):
"""
return a list of articles that match the topic page
@param page: which page of the results to return (default: 1)
@param count: number of articles to return (default: 100)
@param sortBy: how are articles sorted. Options: id (internal id), date (publishing date), cosSim (closeness to the event centroid), rel (relevance to the query), sourceImportance (manually curated score of source importance - high value, high importance), sourceImportanceRank (reverse of sourceImportance), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares on social media), facebookShares (shares on Facebook only)
@param sortByAsc: should the results be sorted in ascending order (True) or descending (False)
@param returnInfo: what details should be included in the returned information
"""
assert page >= 1
assert count <= 100
params = {
"action": "getArticlesForTopicPage",
"resultType": "articles",
"dataType": self.topicPage["dataType"],
"articlesCount": count,
"articlesSortBy": sortBy,
"articlesSortByAsc": sortByAsc,
"page": page,
"topicPage": json.dumps(self.topicPage)
}
params.update(returnInfo.getParams("articles"))
return self.eventRegistry.jsonRequest("/json/article", params) | [
"def",
"getArticles",
"(",
"self",
",",
"page",
"=",
"1",
",",
"count",
"=",
"100",
",",
"sortBy",
"=",
"\"rel\"",
",",
"sortByAsc",
"=",
"False",
",",
"returnInfo",
"=",
"ReturnInfo",
"(",
")",
")",
":",
"assert",
"page",
">=",
"1",
"assert",
"count",
"<=",
"100",
"params",
"=",
"{",
"\"action\"",
":",
"\"getArticlesForTopicPage\"",
",",
"\"resultType\"",
":",
"\"articles\"",
",",
"\"dataType\"",
":",
"self",
".",
"topicPage",
"[",
"\"dataType\"",
"]",
",",
"\"articlesCount\"",
":",
"count",
",",
"\"articlesSortBy\"",
":",
"sortBy",
",",
"\"articlesSortByAsc\"",
":",
"sortByAsc",
",",
"\"page\"",
":",
"page",
",",
"\"topicPage\"",
":",
"json",
".",
"dumps",
"(",
"self",
".",
"topicPage",
")",
"}",
"params",
".",
"update",
"(",
"returnInfo",
".",
"getParams",
"(",
"\"articles\"",
")",
")",
"return",
"self",
".",
"eventRegistry",
".",
"jsonRequest",
"(",
"\"/json/article\"",
",",
"params",
")"
] | return a list of articles that match the topic page
@param page: which page of the results to return (default: 1)
@param count: number of articles to return (default: 100)
@param sortBy: how are articles sorted. Options: id (internal id), date (publishing date), cosSim (closeness to the event centroid), rel (relevance to the query), sourceImportance (manually curated score of source importance - high value, high importance), sourceImportanceRank (reverse of sourceImportance), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares on social media), facebookShares (shares on Facebook only)
@param sortByAsc: should the results be sorted in ascending order (True) or descending (False)
@param returnInfo: what details should be included in the returned information | [
"return",
"a",
"list",
"of",
"articles",
"that",
"match",
"the",
"topic",
"page"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L333-L360 | train |
EventRegistry/event-registry-python | eventregistry/Query.py | CombinedQuery.AND | def AND(queryArr,
exclude = None):
"""
create a combined query with multiple items on which to perform an AND operation
@param queryArr: a list of items on which to perform an AND operation. Items can be either a CombinedQuery or BaseQuery instances.
@param exclude: a instance of BaseQuery, CombinedQuery or None. Used to filter out results matching the other criteria specified in this query
"""
assert isinstance(queryArr, list), "provided argument as not a list"
assert len(queryArr) > 0, "queryArr had an empty list"
q = CombinedQuery()
q.setQueryParam("$and", [])
for item in queryArr:
assert isinstance(item, (CombinedQuery, BaseQuery)), "item in the list was not a CombinedQuery or BaseQuery instance"
q.getQuery()["$and"].append(item.getQuery())
if exclude != None:
assert isinstance(exclude, (CombinedQuery, BaseQuery)), "exclude parameter was not a CombinedQuery or BaseQuery instance"
q.setQueryParam("$not", exclude.getQuery())
return q | python | def AND(queryArr,
exclude = None):
"""
create a combined query with multiple items on which to perform an AND operation
@param queryArr: a list of items on which to perform an AND operation. Items can be either a CombinedQuery or BaseQuery instances.
@param exclude: a instance of BaseQuery, CombinedQuery or None. Used to filter out results matching the other criteria specified in this query
"""
assert isinstance(queryArr, list), "provided argument as not a list"
assert len(queryArr) > 0, "queryArr had an empty list"
q = CombinedQuery()
q.setQueryParam("$and", [])
for item in queryArr:
assert isinstance(item, (CombinedQuery, BaseQuery)), "item in the list was not a CombinedQuery or BaseQuery instance"
q.getQuery()["$and"].append(item.getQuery())
if exclude != None:
assert isinstance(exclude, (CombinedQuery, BaseQuery)), "exclude parameter was not a CombinedQuery or BaseQuery instance"
q.setQueryParam("$not", exclude.getQuery())
return q | [
"def",
"AND",
"(",
"queryArr",
",",
"exclude",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"queryArr",
",",
"list",
")",
",",
"\"provided argument as not a list\"",
"assert",
"len",
"(",
"queryArr",
")",
">",
"0",
",",
"\"queryArr had an empty list\"",
"q",
"=",
"CombinedQuery",
"(",
")",
"q",
".",
"setQueryParam",
"(",
"\"$and\"",
",",
"[",
"]",
")",
"for",
"item",
"in",
"queryArr",
":",
"assert",
"isinstance",
"(",
"item",
",",
"(",
"CombinedQuery",
",",
"BaseQuery",
")",
")",
",",
"\"item in the list was not a CombinedQuery or BaseQuery instance\"",
"q",
".",
"getQuery",
"(",
")",
"[",
"\"$and\"",
"]",
".",
"append",
"(",
"item",
".",
"getQuery",
"(",
")",
")",
"if",
"exclude",
"!=",
"None",
":",
"assert",
"isinstance",
"(",
"exclude",
",",
"(",
"CombinedQuery",
",",
"BaseQuery",
")",
")",
",",
"\"exclude parameter was not a CombinedQuery or BaseQuery instance\"",
"q",
".",
"setQueryParam",
"(",
"\"$not\"",
",",
"exclude",
".",
"getQuery",
"(",
")",
")",
"return",
"q"
] | create a combined query with multiple items on which to perform an AND operation
@param queryArr: a list of items on which to perform an AND operation. Items can be either a CombinedQuery or BaseQuery instances.
@param exclude: a instance of BaseQuery, CombinedQuery or None. Used to filter out results matching the other criteria specified in this query | [
"create",
"a",
"combined",
"query",
"with",
"multiple",
"items",
"on",
"which",
"to",
"perform",
"an",
"AND",
"operation"
] | 534d20b616de02f5e1cd73665a02d189645dbeb6 | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Query.py#L121-L138 | train |
postlund/pyatv | pyatv/mrp/pairing.py | MrpPairingProcedure.start_pairing | async def start_pairing(self):
"""Start pairing procedure."""
self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_METHOD: b'\x00',
tlv8.TLV_SEQ_NO: b'\x01'})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
if tlv8.TLV_BACK_OFF in pairing_data:
time = int.from_bytes(
pairing_data[tlv8.TLV_BACK_OFF], byteorder='big')
raise Exception('back off {0}s'.format(time))
self._atv_salt = pairing_data[tlv8.TLV_SALT]
self._atv_pub_key = pairing_data[tlv8.TLV_PUBLIC_KEY] | python | async def start_pairing(self):
"""Start pairing procedure."""
self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_METHOD: b'\x00',
tlv8.TLV_SEQ_NO: b'\x01'})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
if tlv8.TLV_BACK_OFF in pairing_data:
time = int.from_bytes(
pairing_data[tlv8.TLV_BACK_OFF], byteorder='big')
raise Exception('back off {0}s'.format(time))
self._atv_salt = pairing_data[tlv8.TLV_SALT]
self._atv_pub_key = pairing_data[tlv8.TLV_PUBLIC_KEY] | [
"async",
"def",
"start_pairing",
"(",
"self",
")",
":",
"self",
".",
"srp",
".",
"initialize",
"(",
")",
"msg",
"=",
"messages",
".",
"crypto_pairing",
"(",
"{",
"tlv8",
".",
"TLV_METHOD",
":",
"b'\\x00'",
",",
"tlv8",
".",
"TLV_SEQ_NO",
":",
"b'\\x01'",
"}",
")",
"resp",
"=",
"await",
"self",
".",
"protocol",
".",
"send_and_receive",
"(",
"msg",
",",
"generate_identifier",
"=",
"False",
")",
"pairing_data",
"=",
"_get_pairing_data",
"(",
"resp",
")",
"if",
"tlv8",
".",
"TLV_BACK_OFF",
"in",
"pairing_data",
":",
"time",
"=",
"int",
".",
"from_bytes",
"(",
"pairing_data",
"[",
"tlv8",
".",
"TLV_BACK_OFF",
"]",
",",
"byteorder",
"=",
"'big'",
")",
"raise",
"Exception",
"(",
"'back off {0}s'",
".",
"format",
"(",
"time",
")",
")",
"self",
".",
"_atv_salt",
"=",
"pairing_data",
"[",
"tlv8",
".",
"TLV_SALT",
"]",
"self",
".",
"_atv_pub_key",
"=",
"pairing_data",
"[",
"tlv8",
".",
"TLV_PUBLIC_KEY",
"]"
] | Start pairing procedure. | [
"Start",
"pairing",
"procedure",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/pairing.py#L27-L45 | train |
postlund/pyatv | pyatv/mrp/pairing.py | MrpPairingProcedure.finish_pairing | async def finish_pairing(self, pin):
"""Finish pairing process."""
self.srp.step1(pin)
pub_key, proof = self.srp.step2(self._atv_pub_key, self._atv_salt)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_PUBLIC_KEY: pub_key,
tlv8.TLV_PROOF: proof})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
atv_proof = pairing_data[tlv8.TLV_PROOF]
log_binary(_LOGGER, 'Device', Proof=atv_proof)
encrypted_data = self.srp.step3()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x05',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
encrypted_data = pairing_data[tlv8.TLV_ENCRYPTED_DATA]
return self.srp.step4(encrypted_data) | python | async def finish_pairing(self, pin):
"""Finish pairing process."""
self.srp.step1(pin)
pub_key, proof = self.srp.step2(self._atv_pub_key, self._atv_salt)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_PUBLIC_KEY: pub_key,
tlv8.TLV_PROOF: proof})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
atv_proof = pairing_data[tlv8.TLV_PROOF]
log_binary(_LOGGER, 'Device', Proof=atv_proof)
encrypted_data = self.srp.step3()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x05',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
pairing_data = _get_pairing_data(resp)
encrypted_data = pairing_data[tlv8.TLV_ENCRYPTED_DATA]
return self.srp.step4(encrypted_data) | [
"async",
"def",
"finish_pairing",
"(",
"self",
",",
"pin",
")",
":",
"self",
".",
"srp",
".",
"step1",
"(",
"pin",
")",
"pub_key",
",",
"proof",
"=",
"self",
".",
"srp",
".",
"step2",
"(",
"self",
".",
"_atv_pub_key",
",",
"self",
".",
"_atv_salt",
")",
"msg",
"=",
"messages",
".",
"crypto_pairing",
"(",
"{",
"tlv8",
".",
"TLV_SEQ_NO",
":",
"b'\\x03'",
",",
"tlv8",
".",
"TLV_PUBLIC_KEY",
":",
"pub_key",
",",
"tlv8",
".",
"TLV_PROOF",
":",
"proof",
"}",
")",
"resp",
"=",
"await",
"self",
".",
"protocol",
".",
"send_and_receive",
"(",
"msg",
",",
"generate_identifier",
"=",
"False",
")",
"pairing_data",
"=",
"_get_pairing_data",
"(",
"resp",
")",
"atv_proof",
"=",
"pairing_data",
"[",
"tlv8",
".",
"TLV_PROOF",
"]",
"log_binary",
"(",
"_LOGGER",
",",
"'Device'",
",",
"Proof",
"=",
"atv_proof",
")",
"encrypted_data",
"=",
"self",
".",
"srp",
".",
"step3",
"(",
")",
"msg",
"=",
"messages",
".",
"crypto_pairing",
"(",
"{",
"tlv8",
".",
"TLV_SEQ_NO",
":",
"b'\\x05'",
",",
"tlv8",
".",
"TLV_ENCRYPTED_DATA",
":",
"encrypted_data",
"}",
")",
"resp",
"=",
"await",
"self",
".",
"protocol",
".",
"send_and_receive",
"(",
"msg",
",",
"generate_identifier",
"=",
"False",
")",
"pairing_data",
"=",
"_get_pairing_data",
"(",
"resp",
")",
"encrypted_data",
"=",
"pairing_data",
"[",
"tlv8",
".",
"TLV_ENCRYPTED_DATA",
"]",
"return",
"self",
".",
"srp",
".",
"step4",
"(",
"encrypted_data",
")"
] | Finish pairing process. | [
"Finish",
"pairing",
"process",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/pairing.py#L47-L74 | train |
postlund/pyatv | pyatv/mrp/pairing.py | MrpPairingVerifier.verify_credentials | async def verify_credentials(self):
"""Verify credentials with device."""
_, public_key = self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x01',
tlv8.TLV_PUBLIC_KEY: public_key})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
resp = _get_pairing_data(resp)
session_pub_key = resp[tlv8.TLV_PUBLIC_KEY]
encrypted = resp[tlv8.TLV_ENCRYPTED_DATA]
log_binary(_LOGGER,
'Device',
Public=self.credentials.ltpk,
Encrypted=encrypted)
encrypted_data = self.srp.verify1(
self.credentials, session_pub_key, encrypted)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
# TODO: check status code
self._output_key, self._input_key = self.srp.verify2() | python | async def verify_credentials(self):
"""Verify credentials with device."""
_, public_key = self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x01',
tlv8.TLV_PUBLIC_KEY: public_key})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
resp = _get_pairing_data(resp)
session_pub_key = resp[tlv8.TLV_PUBLIC_KEY]
encrypted = resp[tlv8.TLV_ENCRYPTED_DATA]
log_binary(_LOGGER,
'Device',
Public=self.credentials.ltpk,
Encrypted=encrypted)
encrypted_data = self.srp.verify1(
self.credentials, session_pub_key, encrypted)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b'\x03',
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
# TODO: check status code
self._output_key, self._input_key = self.srp.verify2() | [
"async",
"def",
"verify_credentials",
"(",
"self",
")",
":",
"_",
",",
"public_key",
"=",
"self",
".",
"srp",
".",
"initialize",
"(",
")",
"msg",
"=",
"messages",
".",
"crypto_pairing",
"(",
"{",
"tlv8",
".",
"TLV_SEQ_NO",
":",
"b'\\x01'",
",",
"tlv8",
".",
"TLV_PUBLIC_KEY",
":",
"public_key",
"}",
")",
"resp",
"=",
"await",
"self",
".",
"protocol",
".",
"send_and_receive",
"(",
"msg",
",",
"generate_identifier",
"=",
"False",
")",
"resp",
"=",
"_get_pairing_data",
"(",
"resp",
")",
"session_pub_key",
"=",
"resp",
"[",
"tlv8",
".",
"TLV_PUBLIC_KEY",
"]",
"encrypted",
"=",
"resp",
"[",
"tlv8",
".",
"TLV_ENCRYPTED_DATA",
"]",
"log_binary",
"(",
"_LOGGER",
",",
"'Device'",
",",
"Public",
"=",
"self",
".",
"credentials",
".",
"ltpk",
",",
"Encrypted",
"=",
"encrypted",
")",
"encrypted_data",
"=",
"self",
".",
"srp",
".",
"verify1",
"(",
"self",
".",
"credentials",
",",
"session_pub_key",
",",
"encrypted",
")",
"msg",
"=",
"messages",
".",
"crypto_pairing",
"(",
"{",
"tlv8",
".",
"TLV_SEQ_NO",
":",
"b'\\x03'",
",",
"tlv8",
".",
"TLV_ENCRYPTED_DATA",
":",
"encrypted_data",
"}",
")",
"resp",
"=",
"await",
"self",
".",
"protocol",
".",
"send_and_receive",
"(",
"msg",
",",
"generate_identifier",
"=",
"False",
")",
"# TODO: check status code",
"self",
".",
"_output_key",
",",
"self",
".",
"_input_key",
"=",
"self",
".",
"srp",
".",
"verify2",
"(",
")"
] | Verify credentials with device. | [
"Verify",
"credentials",
"with",
"device",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/pairing.py#L88-L116 | train |
postlund/pyatv | pyatv/dmap/tag_definitions.py | lookup_tag | def lookup_tag(name):
"""Look up a tag based on its key. Returns a DmapTag."""
return next((_TAGS[t] for t in _TAGS if t == name),
DmapTag(_read_unknown, 'unknown tag')) | python | def lookup_tag(name):
"""Look up a tag based on its key. Returns a DmapTag."""
return next((_TAGS[t] for t in _TAGS if t == name),
DmapTag(_read_unknown, 'unknown tag')) | [
"def",
"lookup_tag",
"(",
"name",
")",
":",
"return",
"next",
"(",
"(",
"_TAGS",
"[",
"t",
"]",
"for",
"t",
"in",
"_TAGS",
"if",
"t",
"==",
"name",
")",
",",
"DmapTag",
"(",
"_read_unknown",
",",
"'unknown tag'",
")",
")"
] | Look up a tag based on its key. Returns a DmapTag. | [
"Look",
"up",
"a",
"tag",
"based",
"on",
"its",
"key",
".",
"Returns",
"a",
"DmapTag",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/tag_definitions.py#L105-L108 | train |
postlund/pyatv | pyatv/__init__.py | connect_to_apple_tv | def connect_to_apple_tv(details, loop, protocol=None, session=None):
"""Connect and logins to an Apple TV."""
service = _get_service_used_to_connect(details, protocol)
# If no session is given, create a default one
if session is None:
session = ClientSession(loop=loop)
# AirPlay service is the same for both DMAP and MRP
airplay = _setup_airplay(loop, session, details)
# Create correct implementation depending on protocol
if service.protocol == PROTOCOL_DMAP:
return DmapAppleTV(loop, session, details, airplay)
return MrpAppleTV(loop, session, details, airplay) | python | def connect_to_apple_tv(details, loop, protocol=None, session=None):
"""Connect and logins to an Apple TV."""
service = _get_service_used_to_connect(details, protocol)
# If no session is given, create a default one
if session is None:
session = ClientSession(loop=loop)
# AirPlay service is the same for both DMAP and MRP
airplay = _setup_airplay(loop, session, details)
# Create correct implementation depending on protocol
if service.protocol == PROTOCOL_DMAP:
return DmapAppleTV(loop, session, details, airplay)
return MrpAppleTV(loop, session, details, airplay) | [
"def",
"connect_to_apple_tv",
"(",
"details",
",",
"loop",
",",
"protocol",
"=",
"None",
",",
"session",
"=",
"None",
")",
":",
"service",
"=",
"_get_service_used_to_connect",
"(",
"details",
",",
"protocol",
")",
"# If no session is given, create a default one",
"if",
"session",
"is",
"None",
":",
"session",
"=",
"ClientSession",
"(",
"loop",
"=",
"loop",
")",
"# AirPlay service is the same for both DMAP and MRP",
"airplay",
"=",
"_setup_airplay",
"(",
"loop",
",",
"session",
",",
"details",
")",
"# Create correct implementation depending on protocol",
"if",
"service",
".",
"protocol",
"==",
"PROTOCOL_DMAP",
":",
"return",
"DmapAppleTV",
"(",
"loop",
",",
"session",
",",
"details",
",",
"airplay",
")",
"return",
"MrpAppleTV",
"(",
"loop",
",",
"session",
",",
"details",
",",
"airplay",
")"
] | Connect and logins to an Apple TV. | [
"Connect",
"and",
"logins",
"to",
"an",
"Apple",
"TV",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L165-L180 | train |
postlund/pyatv | pyatv/__init__.py | _ServiceListener.add_service | def add_service(self, zeroconf, service_type, name):
"""Handle callback from zeroconf when a service has been discovered."""
self.lock.acquire()
try:
self._internal_add(zeroconf, service_type, name)
finally:
self.lock.release() | python | def add_service(self, zeroconf, service_type, name):
"""Handle callback from zeroconf when a service has been discovered."""
self.lock.acquire()
try:
self._internal_add(zeroconf, service_type, name)
finally:
self.lock.release() | [
"def",
"add_service",
"(",
"self",
",",
"zeroconf",
",",
"service_type",
",",
"name",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"self",
".",
"_internal_add",
"(",
"zeroconf",
",",
"service_type",
",",
"name",
")",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")"
] | Handle callback from zeroconf when a service has been discovered. | [
"Handle",
"callback",
"from",
"zeroconf",
"when",
"a",
"service",
"has",
"been",
"discovered",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L43-L49 | train |
postlund/pyatv | pyatv/__init__.py | _ServiceListener.add_hs_service | def add_hs_service(self, info, address):
"""Add a new device to discovered list."""
if self.protocol and self.protocol != PROTOCOL_DMAP:
return
name = info.properties[b'Name'].decode('utf-8')
hsgid = info.properties[b'hG'].decode('utf-8')
self._handle_service(
address, name, conf.DmapService(hsgid, port=info.port)) | python | def add_hs_service(self, info, address):
"""Add a new device to discovered list."""
if self.protocol and self.protocol != PROTOCOL_DMAP:
return
name = info.properties[b'Name'].decode('utf-8')
hsgid = info.properties[b'hG'].decode('utf-8')
self._handle_service(
address, name, conf.DmapService(hsgid, port=info.port)) | [
"def",
"add_hs_service",
"(",
"self",
",",
"info",
",",
"address",
")",
":",
"if",
"self",
".",
"protocol",
"and",
"self",
".",
"protocol",
"!=",
"PROTOCOL_DMAP",
":",
"return",
"name",
"=",
"info",
".",
"properties",
"[",
"b'Name'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"hsgid",
"=",
"info",
".",
"properties",
"[",
"b'hG'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"_handle_service",
"(",
"address",
",",
"name",
",",
"conf",
".",
"DmapService",
"(",
"hsgid",
",",
"port",
"=",
"info",
".",
"port",
")",
")"
] | Add a new device to discovered list. | [
"Add",
"a",
"new",
"device",
"to",
"discovered",
"list",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L75-L83 | train |
postlund/pyatv | pyatv/__init__.py | _ServiceListener.add_non_hs_service | def add_non_hs_service(self, info, address):
"""Add a new device without Home Sharing to discovered list."""
if self.protocol and self.protocol != PROTOCOL_DMAP:
return
name = info.properties[b'CtlN'].decode('utf-8')
self._handle_service(
address, name, conf.DmapService(None, port=info.port)) | python | def add_non_hs_service(self, info, address):
"""Add a new device without Home Sharing to discovered list."""
if self.protocol and self.protocol != PROTOCOL_DMAP:
return
name = info.properties[b'CtlN'].decode('utf-8')
self._handle_service(
address, name, conf.DmapService(None, port=info.port)) | [
"def",
"add_non_hs_service",
"(",
"self",
",",
"info",
",",
"address",
")",
":",
"if",
"self",
".",
"protocol",
"and",
"self",
".",
"protocol",
"!=",
"PROTOCOL_DMAP",
":",
"return",
"name",
"=",
"info",
".",
"properties",
"[",
"b'CtlN'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"_handle_service",
"(",
"address",
",",
"name",
",",
"conf",
".",
"DmapService",
"(",
"None",
",",
"port",
"=",
"info",
".",
"port",
")",
")"
] | Add a new device without Home Sharing to discovered list. | [
"Add",
"a",
"new",
"device",
"without",
"Home",
"Sharing",
"to",
"discovered",
"list",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L85-L92 | train |
postlund/pyatv | pyatv/__init__.py | _ServiceListener.add_mrp_service | def add_mrp_service(self, info, address):
"""Add a new MediaRemoteProtocol device to discovered list."""
if self.protocol and self.protocol != PROTOCOL_MRP:
return
name = info.properties[b'Name'].decode('utf-8')
self._handle_service(address, name, conf.MrpService(info.port)) | python | def add_mrp_service(self, info, address):
"""Add a new MediaRemoteProtocol device to discovered list."""
if self.protocol and self.protocol != PROTOCOL_MRP:
return
name = info.properties[b'Name'].decode('utf-8')
self._handle_service(address, name, conf.MrpService(info.port)) | [
"def",
"add_mrp_service",
"(",
"self",
",",
"info",
",",
"address",
")",
":",
"if",
"self",
".",
"protocol",
"and",
"self",
".",
"protocol",
"!=",
"PROTOCOL_MRP",
":",
"return",
"name",
"=",
"info",
".",
"properties",
"[",
"b'Name'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"_handle_service",
"(",
"address",
",",
"name",
",",
"conf",
".",
"MrpService",
"(",
"info",
".",
"port",
")",
")"
] | Add a new MediaRemoteProtocol device to discovered list. | [
"Add",
"a",
"new",
"MediaRemoteProtocol",
"device",
"to",
"discovered",
"list",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L94-L100 | train |
postlund/pyatv | pyatv/__init__.py | _ServiceListener.add_airplay_service | def add_airplay_service(self, info, address):
"""Add a new AirPlay device to discovered list."""
name = info.name.replace('._airplay._tcp.local.', '')
self._handle_service(address, name, conf.AirPlayService(info.port)) | python | def add_airplay_service(self, info, address):
"""Add a new AirPlay device to discovered list."""
name = info.name.replace('._airplay._tcp.local.', '')
self._handle_service(address, name, conf.AirPlayService(info.port)) | [
"def",
"add_airplay_service",
"(",
"self",
",",
"info",
",",
"address",
")",
":",
"name",
"=",
"info",
".",
"name",
".",
"replace",
"(",
"'._airplay._tcp.local.'",
",",
"''",
")",
"self",
".",
"_handle_service",
"(",
"address",
",",
"name",
",",
"conf",
".",
"AirPlayService",
"(",
"info",
".",
"port",
")",
")"
] | Add a new AirPlay device to discovered list. | [
"Add",
"a",
"new",
"AirPlay",
"device",
"to",
"discovered",
"list",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L102-L105 | train |
postlund/pyatv | pyatv/conf.py | AppleTV.usable_service | def usable_service(self):
"""Return a usable service or None if there is none.
A service is usable if enough configuration to be able to make a
connection is available. If several protocols are usable, MRP will be
preferred over DMAP.
"""
services = self._services
for protocol in self._supported_protocols:
if protocol in services and services[protocol].is_usable():
return services[protocol]
return None | python | def usable_service(self):
"""Return a usable service or None if there is none.
A service is usable if enough configuration to be able to make a
connection is available. If several protocols are usable, MRP will be
preferred over DMAP.
"""
services = self._services
for protocol in self._supported_protocols:
if protocol in services and services[protocol].is_usable():
return services[protocol]
return None | [
"def",
"usable_service",
"(",
"self",
")",
":",
"services",
"=",
"self",
".",
"_services",
"for",
"protocol",
"in",
"self",
".",
"_supported_protocols",
":",
"if",
"protocol",
"in",
"services",
"and",
"services",
"[",
"protocol",
"]",
".",
"is_usable",
"(",
")",
":",
"return",
"services",
"[",
"protocol",
"]",
"return",
"None"
] | Return a usable service or None if there is none.
A service is usable if enough configuration to be able to make a
connection is available. If several protocols are usable, MRP will be
preferred over DMAP. | [
"Return",
"a",
"usable",
"service",
"or",
"None",
"if",
"there",
"is",
"none",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/conf.py#L49-L61 | train |
postlund/pyatv | pyatv/conf.py | DmapService.superseeded_by | def superseeded_by(self, other_service):
"""Return True if input service has login id and this has not."""
if not other_service or \
other_service.__class__ != self.__class__ or \
other_service.protocol != self.protocol or \
other_service.port != self.port:
return False
# If this service does not have a login id but the other one does, then
# we should return True here
return not self.device_credentials and other_service.device_credentials | python | def superseeded_by(self, other_service):
"""Return True if input service has login id and this has not."""
if not other_service or \
other_service.__class__ != self.__class__ or \
other_service.protocol != self.protocol or \
other_service.port != self.port:
return False
# If this service does not have a login id but the other one does, then
# we should return True here
return not self.device_credentials and other_service.device_credentials | [
"def",
"superseeded_by",
"(",
"self",
",",
"other_service",
")",
":",
"if",
"not",
"other_service",
"or",
"other_service",
".",
"__class__",
"!=",
"self",
".",
"__class__",
"or",
"other_service",
".",
"protocol",
"!=",
"self",
".",
"protocol",
"or",
"other_service",
".",
"port",
"!=",
"self",
".",
"port",
":",
"return",
"False",
"# If this service does not have a login id but the other one does, then",
"# we should return True here",
"return",
"not",
"self",
".",
"device_credentials",
"and",
"other_service",
".",
"device_credentials"
] | Return True if input service has login id and this has not. | [
"Return",
"True",
"if",
"input",
"service",
"has",
"login",
"id",
"and",
"this",
"has",
"not",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/conf.py#L130-L140 | train |
postlund/pyatv | examples/autodiscover.py | print_what_is_playing | async def print_what_is_playing(loop):
"""Find a device and print what is playing."""
print('Discovering devices on network...')
atvs = await pyatv.scan_for_apple_tvs(loop, timeout=5)
if not atvs:
print('no device found', file=sys.stderr)
return
print('Connecting to {0}'.format(atvs[0].address))
atv = pyatv.connect_to_apple_tv(atvs[0], loop)
try:
playing = await atv.metadata.playing()
print('Currently playing:')
print(playing)
finally:
# Do not forget to logout
await atv.logout() | python | async def print_what_is_playing(loop):
"""Find a device and print what is playing."""
print('Discovering devices on network...')
atvs = await pyatv.scan_for_apple_tvs(loop, timeout=5)
if not atvs:
print('no device found', file=sys.stderr)
return
print('Connecting to {0}'.format(atvs[0].address))
atv = pyatv.connect_to_apple_tv(atvs[0], loop)
try:
playing = await atv.metadata.playing()
print('Currently playing:')
print(playing)
finally:
# Do not forget to logout
await atv.logout() | [
"async",
"def",
"print_what_is_playing",
"(",
"loop",
")",
":",
"print",
"(",
"'Discovering devices on network...'",
")",
"atvs",
"=",
"await",
"pyatv",
".",
"scan_for_apple_tvs",
"(",
"loop",
",",
"timeout",
"=",
"5",
")",
"if",
"not",
"atvs",
":",
"print",
"(",
"'no device found'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"print",
"(",
"'Connecting to {0}'",
".",
"format",
"(",
"atvs",
"[",
"0",
"]",
".",
"address",
")",
")",
"atv",
"=",
"pyatv",
".",
"connect_to_apple_tv",
"(",
"atvs",
"[",
"0",
"]",
",",
"loop",
")",
"try",
":",
"playing",
"=",
"await",
"atv",
".",
"metadata",
".",
"playing",
"(",
")",
"print",
"(",
"'Currently playing:'",
")",
"print",
"(",
"playing",
")",
"finally",
":",
"# Do not forget to logout",
"await",
"atv",
".",
"logout",
"(",
")"
] | Find a device and print what is playing. | [
"Find",
"a",
"device",
"and",
"print",
"what",
"is",
"playing",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/examples/autodiscover.py#L11-L29 | train |
postlund/pyatv | pyatv/dmap/pairing.py | DmapPairingHandler.start | async def start(self, **kwargs):
"""Start the pairing server and publish service."""
zeroconf = kwargs['zeroconf']
self._name = kwargs['name']
self._pairing_guid = kwargs.get('pairing_guid', None) or \
self._generate_random_guid()
self._web_server = web.Server(self.handle_request, loop=self._loop)
self._server = await self._loop.create_server(
self._web_server, '0.0.0.0')
# Get the allocated (random port) and include it in zeroconf service
allocated_port = self._server.sockets[0].getsockname()[1]
_LOGGER.debug('Started pairing web server at port %d', allocated_port)
self._setup_zeroconf(zeroconf, allocated_port) | python | async def start(self, **kwargs):
"""Start the pairing server and publish service."""
zeroconf = kwargs['zeroconf']
self._name = kwargs['name']
self._pairing_guid = kwargs.get('pairing_guid', None) or \
self._generate_random_guid()
self._web_server = web.Server(self.handle_request, loop=self._loop)
self._server = await self._loop.create_server(
self._web_server, '0.0.0.0')
# Get the allocated (random port) and include it in zeroconf service
allocated_port = self._server.sockets[0].getsockname()[1]
_LOGGER.debug('Started pairing web server at port %d', allocated_port)
self._setup_zeroconf(zeroconf, allocated_port) | [
"async",
"def",
"start",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"zeroconf",
"=",
"kwargs",
"[",
"'zeroconf'",
"]",
"self",
".",
"_name",
"=",
"kwargs",
"[",
"'name'",
"]",
"self",
".",
"_pairing_guid",
"=",
"kwargs",
".",
"get",
"(",
"'pairing_guid'",
",",
"None",
")",
"or",
"self",
".",
"_generate_random_guid",
"(",
")",
"self",
".",
"_web_server",
"=",
"web",
".",
"Server",
"(",
"self",
".",
"handle_request",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"self",
".",
"_server",
"=",
"await",
"self",
".",
"_loop",
".",
"create_server",
"(",
"self",
".",
"_web_server",
",",
"'0.0.0.0'",
")",
"# Get the allocated (random port) and include it in zeroconf service",
"allocated_port",
"=",
"self",
".",
"_server",
".",
"sockets",
"[",
"0",
"]",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"_LOGGER",
".",
"debug",
"(",
"'Started pairing web server at port %d'",
",",
"allocated_port",
")",
"self",
".",
"_setup_zeroconf",
"(",
"zeroconf",
",",
"allocated_port",
")"
] | Start the pairing server and publish service. | [
"Start",
"the",
"pairing",
"server",
"and",
"publish",
"service",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/pairing.py#L71-L86 | train |
postlund/pyatv | pyatv/dmap/pairing.py | DmapPairingHandler.stop | async def stop(self, **kwargs):
"""Stop pairing server and unpublish service."""
_LOGGER.debug('Shutting down pairing server')
if self._web_server is not None:
await self._web_server.shutdown()
self._server.close()
if self._server is not None:
await self._server.wait_closed() | python | async def stop(self, **kwargs):
"""Stop pairing server and unpublish service."""
_LOGGER.debug('Shutting down pairing server')
if self._web_server is not None:
await self._web_server.shutdown()
self._server.close()
if self._server is not None:
await self._server.wait_closed() | [
"async",
"def",
"stop",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"'Shutting down pairing server'",
")",
"if",
"self",
".",
"_web_server",
"is",
"not",
"None",
":",
"await",
"self",
".",
"_web_server",
".",
"shutdown",
"(",
")",
"self",
".",
"_server",
".",
"close",
"(",
")",
"if",
"self",
".",
"_server",
"is",
"not",
"None",
":",
"await",
"self",
".",
"_server",
".",
"wait_closed",
"(",
")"
] | Stop pairing server and unpublish service. | [
"Stop",
"pairing",
"server",
"and",
"unpublish",
"service",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/pairing.py#L88-L96 | train |
postlund/pyatv | pyatv/dmap/pairing.py | DmapPairingHandler.handle_request | async def handle_request(self, request):
"""Respond to request if PIN is correct."""
service_name = request.rel_url.query['servicename']
received_code = request.rel_url.query['pairingcode'].lower()
_LOGGER.info('Got pairing request from %s with code %s',
service_name, received_code)
if self._verify_pin(received_code):
cmpg = tags.uint64_tag('cmpg', int(self._pairing_guid, 16))
cmnm = tags.string_tag('cmnm', self._name)
cmty = tags.string_tag('cmty', 'iPhone')
response = tags.container_tag('cmpa', cmpg + cmnm + cmty)
self._has_paired = True
return web.Response(body=response)
# Code did not match, generate an error
return web.Response(status=500) | python | async def handle_request(self, request):
"""Respond to request if PIN is correct."""
service_name = request.rel_url.query['servicename']
received_code = request.rel_url.query['pairingcode'].lower()
_LOGGER.info('Got pairing request from %s with code %s',
service_name, received_code)
if self._verify_pin(received_code):
cmpg = tags.uint64_tag('cmpg', int(self._pairing_guid, 16))
cmnm = tags.string_tag('cmnm', self._name)
cmty = tags.string_tag('cmty', 'iPhone')
response = tags.container_tag('cmpa', cmpg + cmnm + cmty)
self._has_paired = True
return web.Response(body=response)
# Code did not match, generate an error
return web.Response(status=500) | [
"async",
"def",
"handle_request",
"(",
"self",
",",
"request",
")",
":",
"service_name",
"=",
"request",
".",
"rel_url",
".",
"query",
"[",
"'servicename'",
"]",
"received_code",
"=",
"request",
".",
"rel_url",
".",
"query",
"[",
"'pairingcode'",
"]",
".",
"lower",
"(",
")",
"_LOGGER",
".",
"info",
"(",
"'Got pairing request from %s with code %s'",
",",
"service_name",
",",
"received_code",
")",
"if",
"self",
".",
"_verify_pin",
"(",
"received_code",
")",
":",
"cmpg",
"=",
"tags",
".",
"uint64_tag",
"(",
"'cmpg'",
",",
"int",
"(",
"self",
".",
"_pairing_guid",
",",
"16",
")",
")",
"cmnm",
"=",
"tags",
".",
"string_tag",
"(",
"'cmnm'",
",",
"self",
".",
"_name",
")",
"cmty",
"=",
"tags",
".",
"string_tag",
"(",
"'cmty'",
",",
"'iPhone'",
")",
"response",
"=",
"tags",
".",
"container_tag",
"(",
"'cmpa'",
",",
"cmpg",
"+",
"cmnm",
"+",
"cmty",
")",
"self",
".",
"_has_paired",
"=",
"True",
"return",
"web",
".",
"Response",
"(",
"body",
"=",
"response",
")",
"# Code did not match, generate an error",
"return",
"web",
".",
"Response",
"(",
"status",
"=",
"500",
")"
] | Respond to request if PIN is correct. | [
"Respond",
"to",
"request",
"if",
"PIN",
"is",
"correct",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/pairing.py#L129-L145 | train |
postlund/pyatv | pyatv/log.py | log_binary | def log_binary(logger, message, **kwargs):
"""Log binary data if debug is enabled."""
if logger.isEnabledFor(logging.DEBUG):
output = ('{0}={1}'.format(k, binascii.hexlify(
bytearray(v)).decode()) for k, v in sorted(kwargs.items()))
logger.debug('%s (%s)', message, ', '.join(output)) | python | def log_binary(logger, message, **kwargs):
"""Log binary data if debug is enabled."""
if logger.isEnabledFor(logging.DEBUG):
output = ('{0}={1}'.format(k, binascii.hexlify(
bytearray(v)).decode()) for k, v in sorted(kwargs.items()))
logger.debug('%s (%s)', message, ', '.join(output)) | [
"def",
"log_binary",
"(",
"logger",
",",
"message",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"output",
"=",
"(",
"'{0}={1}'",
".",
"format",
"(",
"k",
",",
"binascii",
".",
"hexlify",
"(",
"bytearray",
"(",
"v",
")",
")",
".",
"decode",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'%s (%s)'",
",",
"message",
",",
"', '",
".",
"join",
"(",
"output",
")",
")"
] | Log binary data if debug is enabled. | [
"Log",
"binary",
"data",
"if",
"debug",
"is",
"enabled",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/log.py#L8-L13 | train |
postlund/pyatv | pyatv/__main__.py | _extract_command_with_args | def _extract_command_with_args(cmd):
"""Parse input command with arguments.
Parses the input command in such a way that the user may
provide additional argument to the command. The format used is this:
command=arg1,arg2,arg3,...
all the additional arguments are passed as arguments to the target
method.
"""
def _isint(value):
try:
int(value)
return True
except ValueError:
return False
equal_sign = cmd.find('=')
if equal_sign == -1:
return cmd, []
command = cmd[0:equal_sign]
args = cmd[equal_sign+1:].split(',')
converted = [x if not _isint(x) else int(x) for x in args]
return command, converted | python | def _extract_command_with_args(cmd):
"""Parse input command with arguments.
Parses the input command in such a way that the user may
provide additional argument to the command. The format used is this:
command=arg1,arg2,arg3,...
all the additional arguments are passed as arguments to the target
method.
"""
def _isint(value):
try:
int(value)
return True
except ValueError:
return False
equal_sign = cmd.find('=')
if equal_sign == -1:
return cmd, []
command = cmd[0:equal_sign]
args = cmd[equal_sign+1:].split(',')
converted = [x if not _isint(x) else int(x) for x in args]
return command, converted | [
"def",
"_extract_command_with_args",
"(",
"cmd",
")",
":",
"def",
"_isint",
"(",
"value",
")",
":",
"try",
":",
"int",
"(",
"value",
")",
"return",
"True",
"except",
"ValueError",
":",
"return",
"False",
"equal_sign",
"=",
"cmd",
".",
"find",
"(",
"'='",
")",
"if",
"equal_sign",
"==",
"-",
"1",
":",
"return",
"cmd",
",",
"[",
"]",
"command",
"=",
"cmd",
"[",
"0",
":",
"equal_sign",
"]",
"args",
"=",
"cmd",
"[",
"equal_sign",
"+",
"1",
":",
"]",
".",
"split",
"(",
"','",
")",
"converted",
"=",
"[",
"x",
"if",
"not",
"_isint",
"(",
"x",
")",
"else",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"args",
"]",
"return",
"command",
",",
"converted"
] | Parse input command with arguments.
Parses the input command in such a way that the user may
provide additional argument to the command. The format used is this:
command=arg1,arg2,arg3,...
all the additional arguments are passed as arguments to the target
method. | [
"Parse",
"input",
"command",
"with",
"arguments",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L362-L385 | train |
postlund/pyatv | pyatv/__main__.py | main | def main():
"""Start the asyncio event loop and runs the application."""
# Helper method so that the coroutine exits cleanly if an exception
# happens (which would leave resources dangling)
async def _run_application(loop):
try:
return await cli_handler(loop)
except KeyboardInterrupt:
pass # User pressed Ctrl+C, just ignore it
except SystemExit:
pass # sys.exit() was used - do nothing
except: # pylint: disable=bare-except # noqa
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.writelines(
'\n>>> An error occurred, full stack trace above\n')
return 1
try:
loop = asyncio.get_event_loop()
return loop.run_until_complete(_run_application(loop))
except KeyboardInterrupt:
pass
return 1 | python | def main():
"""Start the asyncio event loop and runs the application."""
# Helper method so that the coroutine exits cleanly if an exception
# happens (which would leave resources dangling)
async def _run_application(loop):
try:
return await cli_handler(loop)
except KeyboardInterrupt:
pass # User pressed Ctrl+C, just ignore it
except SystemExit:
pass # sys.exit() was used - do nothing
except: # pylint: disable=bare-except # noqa
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.writelines(
'\n>>> An error occurred, full stack trace above\n')
return 1
try:
loop = asyncio.get_event_loop()
return loop.run_until_complete(_run_application(loop))
except KeyboardInterrupt:
pass
return 1 | [
"def",
"main",
"(",
")",
":",
"# Helper method so that the coroutine exits cleanly if an exception",
"# happens (which would leave resources dangling)",
"async",
"def",
"_run_application",
"(",
"loop",
")",
":",
"try",
":",
"return",
"await",
"cli_handler",
"(",
"loop",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"# User pressed Ctrl+C, just ignore it",
"except",
"SystemExit",
":",
"pass",
"# sys.exit() was used - do nothing",
"except",
":",
"# pylint: disable=bare-except # noqa",
"import",
"traceback",
"traceback",
".",
"print_exc",
"(",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"stderr",
".",
"writelines",
"(",
"'\\n>>> An error occurred, full stack trace above\\n'",
")",
"return",
"1",
"try",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"return",
"loop",
".",
"run_until_complete",
"(",
"_run_application",
"(",
"loop",
")",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"return",
"1"
] | Start the asyncio event loop and runs the application. | [
"Start",
"the",
"asyncio",
"event",
"loop",
"and",
"runs",
"the",
"application",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L484-L513 | train |
postlund/pyatv | pyatv/__main__.py | GlobalCommands.commands | async def commands(self):
"""Print a list with available commands."""
_print_commands('Remote control', interface.RemoteControl)
_print_commands('Metadata', interface.Metadata)
_print_commands('Playing', interface.Playing)
_print_commands('AirPlay', interface.AirPlay)
_print_commands('Device', DeviceCommands)
_print_commands('Global', self.__class__)
return 0 | python | async def commands(self):
"""Print a list with available commands."""
_print_commands('Remote control', interface.RemoteControl)
_print_commands('Metadata', interface.Metadata)
_print_commands('Playing', interface.Playing)
_print_commands('AirPlay', interface.AirPlay)
_print_commands('Device', DeviceCommands)
_print_commands('Global', self.__class__)
return 0 | [
"async",
"def",
"commands",
"(",
"self",
")",
":",
"_print_commands",
"(",
"'Remote control'",
",",
"interface",
".",
"RemoteControl",
")",
"_print_commands",
"(",
"'Metadata'",
",",
"interface",
".",
"Metadata",
")",
"_print_commands",
"(",
"'Playing'",
",",
"interface",
".",
"Playing",
")",
"_print_commands",
"(",
"'AirPlay'",
",",
"interface",
".",
"AirPlay",
")",
"_print_commands",
"(",
"'Device'",
",",
"DeviceCommands",
")",
"_print_commands",
"(",
"'Global'",
",",
"self",
".",
"__class__",
")",
"return",
"0"
] | Print a list with available commands. | [
"Print",
"a",
"list",
"with",
"available",
"commands",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L43-L52 | train |
postlund/pyatv | pyatv/__main__.py | GlobalCommands.help | async def help(self):
"""Print help text for a command."""
if len(self.args.command) != 2:
print('Which command do you want help with?', file=sys.stderr)
return 1
iface = [interface.RemoteControl,
interface.Metadata,
interface.Playing,
interface.AirPlay,
self.__class__,
DeviceCommands]
for cmd in iface:
for key, value in cmd.__dict__.items():
if key.startswith('_') or key != self.args.command[1]:
continue
if inspect.isfunction(value):
signature = inspect.signature(value)
else:
signature = ' (property)'
print('COMMAND:\n>> {0}{1}\n\nHELP:\n{2}'.format(
key, signature, inspect.getdoc(value)))
return 0 | python | async def help(self):
"""Print help text for a command."""
if len(self.args.command) != 2:
print('Which command do you want help with?', file=sys.stderr)
return 1
iface = [interface.RemoteControl,
interface.Metadata,
interface.Playing,
interface.AirPlay,
self.__class__,
DeviceCommands]
for cmd in iface:
for key, value in cmd.__dict__.items():
if key.startswith('_') or key != self.args.command[1]:
continue
if inspect.isfunction(value):
signature = inspect.signature(value)
else:
signature = ' (property)'
print('COMMAND:\n>> {0}{1}\n\nHELP:\n{2}'.format(
key, signature, inspect.getdoc(value)))
return 0 | [
"async",
"def",
"help",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"args",
".",
"command",
")",
"!=",
"2",
":",
"print",
"(",
"'Which command do you want help with?'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"1",
"iface",
"=",
"[",
"interface",
".",
"RemoteControl",
",",
"interface",
".",
"Metadata",
",",
"interface",
".",
"Playing",
",",
"interface",
".",
"AirPlay",
",",
"self",
".",
"__class__",
",",
"DeviceCommands",
"]",
"for",
"cmd",
"in",
"iface",
":",
"for",
"key",
",",
"value",
"in",
"cmd",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'_'",
")",
"or",
"key",
"!=",
"self",
".",
"args",
".",
"command",
"[",
"1",
"]",
":",
"continue",
"if",
"inspect",
".",
"isfunction",
"(",
"value",
")",
":",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"value",
")",
"else",
":",
"signature",
"=",
"' (property)'",
"print",
"(",
"'COMMAND:\\n>> {0}{1}\\n\\nHELP:\\n{2}'",
".",
"format",
"(",
"key",
",",
"signature",
",",
"inspect",
".",
"getdoc",
"(",
"value",
")",
")",
")",
"return",
"0"
] | Print help text for a command. | [
"Print",
"help",
"text",
"for",
"a",
"command",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L54-L78 | train |
postlund/pyatv | pyatv/__main__.py | GlobalCommands.scan | async def scan(self):
"""Scan for Apple TVs on the network."""
atvs = await pyatv.scan_for_apple_tvs(
self.loop, timeout=self.args.scan_timeout, only_usable=False)
_print_found_apple_tvs(atvs)
return 0 | python | async def scan(self):
"""Scan for Apple TVs on the network."""
atvs = await pyatv.scan_for_apple_tvs(
self.loop, timeout=self.args.scan_timeout, only_usable=False)
_print_found_apple_tvs(atvs)
return 0 | [
"async",
"def",
"scan",
"(",
"self",
")",
":",
"atvs",
"=",
"await",
"pyatv",
".",
"scan_for_apple_tvs",
"(",
"self",
".",
"loop",
",",
"timeout",
"=",
"self",
".",
"args",
".",
"scan_timeout",
",",
"only_usable",
"=",
"False",
")",
"_print_found_apple_tvs",
"(",
"atvs",
")",
"return",
"0"
] | Scan for Apple TVs on the network. | [
"Scan",
"for",
"Apple",
"TVs",
"on",
"the",
"network",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L80-L86 | train |
postlund/pyatv | pyatv/__main__.py | DeviceCommands.cli | async def cli(self):
"""Enter commands in a simple CLI."""
print('Enter commands and press enter')
print('Type help for help and exit to quit')
while True:
command = await _read_input(self.loop, 'pyatv> ')
if command.lower() == 'exit':
break
elif command == 'cli':
print('Command not availble here')
continue
await _handle_device_command(
self.args, command, self.atv, self.loop) | python | async def cli(self):
"""Enter commands in a simple CLI."""
print('Enter commands and press enter')
print('Type help for help and exit to quit')
while True:
command = await _read_input(self.loop, 'pyatv> ')
if command.lower() == 'exit':
break
elif command == 'cli':
print('Command not availble here')
continue
await _handle_device_command(
self.args, command, self.atv, self.loop) | [
"async",
"def",
"cli",
"(",
"self",
")",
":",
"print",
"(",
"'Enter commands and press enter'",
")",
"print",
"(",
"'Type help for help and exit to quit'",
")",
"while",
"True",
":",
"command",
"=",
"await",
"_read_input",
"(",
"self",
".",
"loop",
",",
"'pyatv> '",
")",
"if",
"command",
".",
"lower",
"(",
")",
"==",
"'exit'",
":",
"break",
"elif",
"command",
"==",
"'cli'",
":",
"print",
"(",
"'Command not availble here'",
")",
"continue",
"await",
"_handle_device_command",
"(",
"self",
".",
"args",
",",
"command",
",",
"self",
".",
"atv",
",",
"self",
".",
"loop",
")"
] | Enter commands in a simple CLI. | [
"Enter",
"commands",
"in",
"a",
"simple",
"CLI",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L101-L115 | train |
postlund/pyatv | pyatv/__main__.py | DeviceCommands.artwork_save | async def artwork_save(self):
"""Download artwork and save it to artwork.png."""
artwork = await self.atv.metadata.artwork()
if artwork is not None:
with open('artwork.png', 'wb') as file:
file.write(artwork)
else:
print('No artwork is currently available.')
return 1
return 0 | python | async def artwork_save(self):
"""Download artwork and save it to artwork.png."""
artwork = await self.atv.metadata.artwork()
if artwork is not None:
with open('artwork.png', 'wb') as file:
file.write(artwork)
else:
print('No artwork is currently available.')
return 1
return 0 | [
"async",
"def",
"artwork_save",
"(",
"self",
")",
":",
"artwork",
"=",
"await",
"self",
".",
"atv",
".",
"metadata",
".",
"artwork",
"(",
")",
"if",
"artwork",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"'artwork.png'",
",",
"'wb'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"artwork",
")",
"else",
":",
"print",
"(",
"'No artwork is currently available.'",
")",
"return",
"1",
"return",
"0"
] | Download artwork and save it to artwork.png. | [
"Download",
"artwork",
"and",
"save",
"it",
"to",
"artwork",
".",
"png",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L117-L126 | train |
postlund/pyatv | pyatv/__main__.py | DeviceCommands.push_updates | async def push_updates(self):
"""Listen for push updates."""
print('Press ENTER to stop')
self.atv.push_updater.start()
await self.atv.login()
await self.loop.run_in_executor(None, sys.stdin.readline)
self.atv.push_updater.stop()
return 0 | python | async def push_updates(self):
"""Listen for push updates."""
print('Press ENTER to stop')
self.atv.push_updater.start()
await self.atv.login()
await self.loop.run_in_executor(None, sys.stdin.readline)
self.atv.push_updater.stop()
return 0 | [
"async",
"def",
"push_updates",
"(",
"self",
")",
":",
"print",
"(",
"'Press ENTER to stop'",
")",
"self",
".",
"atv",
".",
"push_updater",
".",
"start",
"(",
")",
"await",
"self",
".",
"atv",
".",
"login",
"(",
")",
"await",
"self",
".",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"sys",
".",
"stdin",
".",
"readline",
")",
"self",
".",
"atv",
".",
"push_updater",
".",
"stop",
"(",
")",
"return",
"0"
] | Listen for push updates. | [
"Listen",
"for",
"push",
"updates",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L128-L136 | train |
postlund/pyatv | pyatv/__main__.py | DeviceCommands.auth | async def auth(self):
"""Perform AirPlay device authentication."""
credentials = await self.atv.airplay.generate_credentials()
await self.atv.airplay.load_credentials(credentials)
try:
await self.atv.airplay.start_authentication()
pin = await _read_input(self.loop, 'Enter PIN on screen: ')
await self.atv.airplay.finish_authentication(pin)
print('You may now use these credentials:')
print(credentials)
return 0
except exceptions.DeviceAuthenticationError:
logging.exception('Failed to authenticate - invalid PIN?')
return 1 | python | async def auth(self):
"""Perform AirPlay device authentication."""
credentials = await self.atv.airplay.generate_credentials()
await self.atv.airplay.load_credentials(credentials)
try:
await self.atv.airplay.start_authentication()
pin = await _read_input(self.loop, 'Enter PIN on screen: ')
await self.atv.airplay.finish_authentication(pin)
print('You may now use these credentials:')
print(credentials)
return 0
except exceptions.DeviceAuthenticationError:
logging.exception('Failed to authenticate - invalid PIN?')
return 1 | [
"async",
"def",
"auth",
"(",
"self",
")",
":",
"credentials",
"=",
"await",
"self",
".",
"atv",
".",
"airplay",
".",
"generate_credentials",
"(",
")",
"await",
"self",
".",
"atv",
".",
"airplay",
".",
"load_credentials",
"(",
"credentials",
")",
"try",
":",
"await",
"self",
".",
"atv",
".",
"airplay",
".",
"start_authentication",
"(",
")",
"pin",
"=",
"await",
"_read_input",
"(",
"self",
".",
"loop",
",",
"'Enter PIN on screen: '",
")",
"await",
"self",
".",
"atv",
".",
"airplay",
".",
"finish_authentication",
"(",
"pin",
")",
"print",
"(",
"'You may now use these credentials:'",
")",
"print",
"(",
"credentials",
")",
"return",
"0",
"except",
"exceptions",
".",
"DeviceAuthenticationError",
":",
"logging",
".",
"exception",
"(",
"'Failed to authenticate - invalid PIN?'",
")",
"return",
"1"
] | Perform AirPlay device authentication. | [
"Perform",
"AirPlay",
"device",
"authentication",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L138-L154 | train |
postlund/pyatv | pyatv/__main__.py | DeviceCommands.pair | async def pair(self):
"""Pair pyatv as a remote control with an Apple TV."""
# Connect using the specified protocol
# TODO: config should be stored elsewhere so that API is same for both
protocol = self.atv.service.protocol
if protocol == const.PROTOCOL_DMAP:
await self.atv.pairing.start(zeroconf=Zeroconf(),
name=self.args.remote_name,
pairing_guid=self.args.pairing_guid)
elif protocol == const.PROTOCOL_MRP:
await self.atv.pairing.start()
# Ask for PIN if present or just wait for pairing to end
if self.atv.pairing.device_provides_pin:
pin = await _read_input(self.loop, 'Enter PIN on screen: ')
self.atv.pairing.pin(pin)
else:
self.atv.pairing.pin(self.args.pin_code)
print('Use {0} to pair with "{1}" (press ENTER to stop)'.format(
self.args.pin_code, self.args.remote_name))
if self.args.pin_code is None:
print('Use any pin to pair with "{}" (press ENTER to stop)'.format(
self.args.remote_name))
else:
print('Use pin {} to pair with "{}" (press ENTER to stop)'.format(
self.args.pin_code, self.args.remote_name))
await self.loop.run_in_executor(None, sys.stdin.readline)
await self.atv.pairing.stop()
# Give some feedback to the user
if self.atv.pairing.has_paired:
print('Pairing seems to have succeeded, yey!')
print('You may now use these credentials: {0}'.format(
self.atv.pairing.credentials))
else:
print('Pairing failed!')
return 1
return 0 | python | async def pair(self):
"""Pair pyatv as a remote control with an Apple TV."""
# Connect using the specified protocol
# TODO: config should be stored elsewhere so that API is same for both
protocol = self.atv.service.protocol
if protocol == const.PROTOCOL_DMAP:
await self.atv.pairing.start(zeroconf=Zeroconf(),
name=self.args.remote_name,
pairing_guid=self.args.pairing_guid)
elif protocol == const.PROTOCOL_MRP:
await self.atv.pairing.start()
# Ask for PIN if present or just wait for pairing to end
if self.atv.pairing.device_provides_pin:
pin = await _read_input(self.loop, 'Enter PIN on screen: ')
self.atv.pairing.pin(pin)
else:
self.atv.pairing.pin(self.args.pin_code)
print('Use {0} to pair with "{1}" (press ENTER to stop)'.format(
self.args.pin_code, self.args.remote_name))
if self.args.pin_code is None:
print('Use any pin to pair with "{}" (press ENTER to stop)'.format(
self.args.remote_name))
else:
print('Use pin {} to pair with "{}" (press ENTER to stop)'.format(
self.args.pin_code, self.args.remote_name))
await self.loop.run_in_executor(None, sys.stdin.readline)
await self.atv.pairing.stop()
# Give some feedback to the user
if self.atv.pairing.has_paired:
print('Pairing seems to have succeeded, yey!')
print('You may now use these credentials: {0}'.format(
self.atv.pairing.credentials))
else:
print('Pairing failed!')
return 1
return 0 | [
"async",
"def",
"pair",
"(",
"self",
")",
":",
"# Connect using the specified protocol",
"# TODO: config should be stored elsewhere so that API is same for both",
"protocol",
"=",
"self",
".",
"atv",
".",
"service",
".",
"protocol",
"if",
"protocol",
"==",
"const",
".",
"PROTOCOL_DMAP",
":",
"await",
"self",
".",
"atv",
".",
"pairing",
".",
"start",
"(",
"zeroconf",
"=",
"Zeroconf",
"(",
")",
",",
"name",
"=",
"self",
".",
"args",
".",
"remote_name",
",",
"pairing_guid",
"=",
"self",
".",
"args",
".",
"pairing_guid",
")",
"elif",
"protocol",
"==",
"const",
".",
"PROTOCOL_MRP",
":",
"await",
"self",
".",
"atv",
".",
"pairing",
".",
"start",
"(",
")",
"# Ask for PIN if present or just wait for pairing to end",
"if",
"self",
".",
"atv",
".",
"pairing",
".",
"device_provides_pin",
":",
"pin",
"=",
"await",
"_read_input",
"(",
"self",
".",
"loop",
",",
"'Enter PIN on screen: '",
")",
"self",
".",
"atv",
".",
"pairing",
".",
"pin",
"(",
"pin",
")",
"else",
":",
"self",
".",
"atv",
".",
"pairing",
".",
"pin",
"(",
"self",
".",
"args",
".",
"pin_code",
")",
"print",
"(",
"'Use {0} to pair with \"{1}\" (press ENTER to stop)'",
".",
"format",
"(",
"self",
".",
"args",
".",
"pin_code",
",",
"self",
".",
"args",
".",
"remote_name",
")",
")",
"if",
"self",
".",
"args",
".",
"pin_code",
"is",
"None",
":",
"print",
"(",
"'Use any pin to pair with \"{}\" (press ENTER to stop)'",
".",
"format",
"(",
"self",
".",
"args",
".",
"remote_name",
")",
")",
"else",
":",
"print",
"(",
"'Use pin {} to pair with \"{}\" (press ENTER to stop)'",
".",
"format",
"(",
"self",
".",
"args",
".",
"pin_code",
",",
"self",
".",
"args",
".",
"remote_name",
")",
")",
"await",
"self",
".",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"sys",
".",
"stdin",
".",
"readline",
")",
"await",
"self",
".",
"atv",
".",
"pairing",
".",
"stop",
"(",
")",
"# Give some feedback to the user",
"if",
"self",
".",
"atv",
".",
"pairing",
".",
"has_paired",
":",
"print",
"(",
"'Pairing seems to have succeeded, yey!'",
")",
"print",
"(",
"'You may now use these credentials: {0}'",
".",
"format",
"(",
"self",
".",
"atv",
".",
"pairing",
".",
"credentials",
")",
")",
"else",
":",
"print",
"(",
"'Pairing failed!'",
")",
"return",
"1",
"return",
"0"
] | Pair pyatv as a remote control with an Apple TV. | [
"Pair",
"pyatv",
"as",
"a",
"remote",
"control",
"with",
"an",
"Apple",
"TV",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L156-L198 | train |
postlund/pyatv | pyatv/convert.py | media_kind | def media_kind(kind):
"""Convert iTunes media kind to API representation."""
if kind in [1]:
return const.MEDIA_TYPE_UNKNOWN
if kind in [3, 7, 11, 12, 13, 18, 32]:
return const.MEDIA_TYPE_VIDEO
if kind in [2, 4, 10, 14, 17, 21, 36]:
return const.MEDIA_TYPE_MUSIC
if kind in [8, 64]:
return const.MEDIA_TYPE_TV
raise exceptions.UnknownMediaKind('Unknown media kind: ' + str(kind)) | python | def media_kind(kind):
"""Convert iTunes media kind to API representation."""
if kind in [1]:
return const.MEDIA_TYPE_UNKNOWN
if kind in [3, 7, 11, 12, 13, 18, 32]:
return const.MEDIA_TYPE_VIDEO
if kind in [2, 4, 10, 14, 17, 21, 36]:
return const.MEDIA_TYPE_MUSIC
if kind in [8, 64]:
return const.MEDIA_TYPE_TV
raise exceptions.UnknownMediaKind('Unknown media kind: ' + str(kind)) | [
"def",
"media_kind",
"(",
"kind",
")",
":",
"if",
"kind",
"in",
"[",
"1",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_UNKNOWN",
"if",
"kind",
"in",
"[",
"3",
",",
"7",
",",
"11",
",",
"12",
",",
"13",
",",
"18",
",",
"32",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_VIDEO",
"if",
"kind",
"in",
"[",
"2",
",",
"4",
",",
"10",
",",
"14",
",",
"17",
",",
"21",
",",
"36",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_MUSIC",
"if",
"kind",
"in",
"[",
"8",
",",
"64",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_TV",
"raise",
"exceptions",
".",
"UnknownMediaKind",
"(",
"'Unknown media kind: '",
"+",
"str",
"(",
"kind",
")",
")"
] | Convert iTunes media kind to API representation. | [
"Convert",
"iTunes",
"media",
"kind",
"to",
"API",
"representation",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L6-L17 | train |
postlund/pyatv | pyatv/convert.py | media_type_str | def media_type_str(mediatype):
"""Convert internal API media type to string."""
if mediatype == const.MEDIA_TYPE_UNKNOWN:
return 'Unknown'
if mediatype == const.MEDIA_TYPE_VIDEO:
return 'Video'
if mediatype == const.MEDIA_TYPE_MUSIC:
return 'Music'
if mediatype == const.MEDIA_TYPE_TV:
return 'TV'
return 'Unsupported' | python | def media_type_str(mediatype):
"""Convert internal API media type to string."""
if mediatype == const.MEDIA_TYPE_UNKNOWN:
return 'Unknown'
if mediatype == const.MEDIA_TYPE_VIDEO:
return 'Video'
if mediatype == const.MEDIA_TYPE_MUSIC:
return 'Music'
if mediatype == const.MEDIA_TYPE_TV:
return 'TV'
return 'Unsupported' | [
"def",
"media_type_str",
"(",
"mediatype",
")",
":",
"if",
"mediatype",
"==",
"const",
".",
"MEDIA_TYPE_UNKNOWN",
":",
"return",
"'Unknown'",
"if",
"mediatype",
"==",
"const",
".",
"MEDIA_TYPE_VIDEO",
":",
"return",
"'Video'",
"if",
"mediatype",
"==",
"const",
".",
"MEDIA_TYPE_MUSIC",
":",
"return",
"'Music'",
"if",
"mediatype",
"==",
"const",
".",
"MEDIA_TYPE_TV",
":",
"return",
"'TV'",
"return",
"'Unsupported'"
] | Convert internal API media type to string. | [
"Convert",
"internal",
"API",
"media",
"type",
"to",
"string",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L20-L30 | train |
postlund/pyatv | pyatv/convert.py | playstate | def playstate(state):
"""Convert iTunes playstate to API representation."""
# pylint: disable=too-many-return-statements
if state is None:
return const.PLAY_STATE_NO_MEDIA
if state == 0:
return const.PLAY_STATE_IDLE
if state == 1:
return const.PLAY_STATE_LOADING
if state == 3:
return const.PLAY_STATE_PAUSED
if state == 4:
return const.PLAY_STATE_PLAYING
if state == 5:
return const.PLAY_STATE_FAST_FORWARD
if state == 6:
return const.PLAY_STATE_FAST_BACKWARD
raise exceptions.UnknownPlayState('Unknown playstate: ' + str(state)) | python | def playstate(state):
"""Convert iTunes playstate to API representation."""
# pylint: disable=too-many-return-statements
if state is None:
return const.PLAY_STATE_NO_MEDIA
if state == 0:
return const.PLAY_STATE_IDLE
if state == 1:
return const.PLAY_STATE_LOADING
if state == 3:
return const.PLAY_STATE_PAUSED
if state == 4:
return const.PLAY_STATE_PLAYING
if state == 5:
return const.PLAY_STATE_FAST_FORWARD
if state == 6:
return const.PLAY_STATE_FAST_BACKWARD
raise exceptions.UnknownPlayState('Unknown playstate: ' + str(state)) | [
"def",
"playstate",
"(",
"state",
")",
":",
"# pylint: disable=too-many-return-statements",
"if",
"state",
"is",
"None",
":",
"return",
"const",
".",
"PLAY_STATE_NO_MEDIA",
"if",
"state",
"==",
"0",
":",
"return",
"const",
".",
"PLAY_STATE_IDLE",
"if",
"state",
"==",
"1",
":",
"return",
"const",
".",
"PLAY_STATE_LOADING",
"if",
"state",
"==",
"3",
":",
"return",
"const",
".",
"PLAY_STATE_PAUSED",
"if",
"state",
"==",
"4",
":",
"return",
"const",
".",
"PLAY_STATE_PLAYING",
"if",
"state",
"==",
"5",
":",
"return",
"const",
".",
"PLAY_STATE_FAST_FORWARD",
"if",
"state",
"==",
"6",
":",
"return",
"const",
".",
"PLAY_STATE_FAST_BACKWARD",
"raise",
"exceptions",
".",
"UnknownPlayState",
"(",
"'Unknown playstate: '",
"+",
"str",
"(",
"state",
")",
")"
] | Convert iTunes playstate to API representation. | [
"Convert",
"iTunes",
"playstate",
"to",
"API",
"representation",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L33-L51 | train |
postlund/pyatv | pyatv/convert.py | playstate_str | def playstate_str(state):
"""Convert internal API playstate to string."""
if state == const.PLAY_STATE_NO_MEDIA:
return 'No media'
if state == const.PLAY_STATE_IDLE:
return 'Idle'
if state == const.PLAY_STATE_LOADING:
return 'Loading'
if state == const.PLAY_STATE_PAUSED:
return 'Paused'
if state == const.PLAY_STATE_PLAYING:
return 'Playing'
if state == const.PLAY_STATE_FAST_FORWARD:
return 'Fast forward'
if state == const.PLAY_STATE_FAST_BACKWARD:
return 'Fast backward'
return 'Unsupported' | python | def playstate_str(state):
"""Convert internal API playstate to string."""
if state == const.PLAY_STATE_NO_MEDIA:
return 'No media'
if state == const.PLAY_STATE_IDLE:
return 'Idle'
if state == const.PLAY_STATE_LOADING:
return 'Loading'
if state == const.PLAY_STATE_PAUSED:
return 'Paused'
if state == const.PLAY_STATE_PLAYING:
return 'Playing'
if state == const.PLAY_STATE_FAST_FORWARD:
return 'Fast forward'
if state == const.PLAY_STATE_FAST_BACKWARD:
return 'Fast backward'
return 'Unsupported' | [
"def",
"playstate_str",
"(",
"state",
")",
":",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_NO_MEDIA",
":",
"return",
"'No media'",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_IDLE",
":",
"return",
"'Idle'",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_LOADING",
":",
"return",
"'Loading'",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_PAUSED",
":",
"return",
"'Paused'",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_PLAYING",
":",
"return",
"'Playing'",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_FAST_FORWARD",
":",
"return",
"'Fast forward'",
"if",
"state",
"==",
"const",
".",
"PLAY_STATE_FAST_BACKWARD",
":",
"return",
"'Fast backward'",
"return",
"'Unsupported'"
] | Convert internal API playstate to string. | [
"Convert",
"internal",
"API",
"playstate",
"to",
"string",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L55-L71 | train |
postlund/pyatv | pyatv/convert.py | repeat_str | def repeat_str(state):
"""Convert internal API repeat state to string."""
if state == const.REPEAT_STATE_OFF:
return 'Off'
if state == const.REPEAT_STATE_TRACK:
return 'Track'
if state == const.REPEAT_STATE_ALL:
return 'All'
return 'Unsupported' | python | def repeat_str(state):
"""Convert internal API repeat state to string."""
if state == const.REPEAT_STATE_OFF:
return 'Off'
if state == const.REPEAT_STATE_TRACK:
return 'Track'
if state == const.REPEAT_STATE_ALL:
return 'All'
return 'Unsupported' | [
"def",
"repeat_str",
"(",
"state",
")",
":",
"if",
"state",
"==",
"const",
".",
"REPEAT_STATE_OFF",
":",
"return",
"'Off'",
"if",
"state",
"==",
"const",
".",
"REPEAT_STATE_TRACK",
":",
"return",
"'Track'",
"if",
"state",
"==",
"const",
".",
"REPEAT_STATE_ALL",
":",
"return",
"'All'",
"return",
"'Unsupported'"
] | Convert internal API repeat state to string. | [
"Convert",
"internal",
"API",
"repeat",
"state",
"to",
"string",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L74-L82 | train |
postlund/pyatv | pyatv/convert.py | protocol_str | def protocol_str(protocol):
"""Convert internal API protocol to string."""
if protocol == const.PROTOCOL_MRP:
return 'MRP'
if protocol == const.PROTOCOL_DMAP:
return 'DMAP'
if protocol == const.PROTOCOL_AIRPLAY:
return 'AirPlay'
return 'Unknown' | python | def protocol_str(protocol):
"""Convert internal API protocol to string."""
if protocol == const.PROTOCOL_MRP:
return 'MRP'
if protocol == const.PROTOCOL_DMAP:
return 'DMAP'
if protocol == const.PROTOCOL_AIRPLAY:
return 'AirPlay'
return 'Unknown' | [
"def",
"protocol_str",
"(",
"protocol",
")",
":",
"if",
"protocol",
"==",
"const",
".",
"PROTOCOL_MRP",
":",
"return",
"'MRP'",
"if",
"protocol",
"==",
"const",
".",
"PROTOCOL_DMAP",
":",
"return",
"'DMAP'",
"if",
"protocol",
"==",
"const",
".",
"PROTOCOL_AIRPLAY",
":",
"return",
"'AirPlay'",
"return",
"'Unknown'"
] | Convert internal API protocol to string. | [
"Convert",
"internal",
"API",
"protocol",
"to",
"string",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L96-L104 | train |
postlund/pyatv | pyatv/dmap/parser.py | first | def first(dmap_data, *path):
"""Look up a value given a path in some parsed DMAP data."""
if not (path and isinstance(dmap_data, list)):
return dmap_data
for key in dmap_data:
if path[0] in key:
return first(key[path[0]], *path[1:])
return None | python | def first(dmap_data, *path):
"""Look up a value given a path in some parsed DMAP data."""
if not (path and isinstance(dmap_data, list)):
return dmap_data
for key in dmap_data:
if path[0] in key:
return first(key[path[0]], *path[1:])
return None | [
"def",
"first",
"(",
"dmap_data",
",",
"*",
"path",
")",
":",
"if",
"not",
"(",
"path",
"and",
"isinstance",
"(",
"dmap_data",
",",
"list",
")",
")",
":",
"return",
"dmap_data",
"for",
"key",
"in",
"dmap_data",
":",
"if",
"path",
"[",
"0",
"]",
"in",
"key",
":",
"return",
"first",
"(",
"key",
"[",
"path",
"[",
"0",
"]",
"]",
",",
"*",
"path",
"[",
"1",
":",
"]",
")",
"return",
"None"
] | Look up a value given a path in some parsed DMAP data. | [
"Look",
"up",
"a",
"value",
"given",
"a",
"path",
"in",
"some",
"parsed",
"DMAP",
"data",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/parser.py#L56-L65 | train |
postlund/pyatv | pyatv/dmap/parser.py | pprint | def pprint(data, tag_lookup, indent=0):
"""Return a pretty formatted string of parsed DMAP data."""
output = ''
if isinstance(data, dict):
for key, value in data.items():
tag = tag_lookup(key)
if isinstance(value, (dict, list)) and tag.type is not read_bplist:
output += '{0}{1}: {2}\n'.format(indent*' ', key, tag)
output += pprint(value, tag_lookup, indent+2)
else:
output += '{0}{1}: {2} {3}\n'.format(
indent*' ', key, str(value), tag)
elif isinstance(data, list):
for elem in data:
output += pprint(elem, tag_lookup, indent)
else:
raise exceptions.InvalidDmapDataError(
'invalid dmap data: ' + str(data))
return output | python | def pprint(data, tag_lookup, indent=0):
"""Return a pretty formatted string of parsed DMAP data."""
output = ''
if isinstance(data, dict):
for key, value in data.items():
tag = tag_lookup(key)
if isinstance(value, (dict, list)) and tag.type is not read_bplist:
output += '{0}{1}: {2}\n'.format(indent*' ', key, tag)
output += pprint(value, tag_lookup, indent+2)
else:
output += '{0}{1}: {2} {3}\n'.format(
indent*' ', key, str(value), tag)
elif isinstance(data, list):
for elem in data:
output += pprint(elem, tag_lookup, indent)
else:
raise exceptions.InvalidDmapDataError(
'invalid dmap data: ' + str(data))
return output | [
"def",
"pprint",
"(",
"data",
",",
"tag_lookup",
",",
"indent",
"=",
"0",
")",
":",
"output",
"=",
"''",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"tag",
"=",
"tag_lookup",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"dict",
",",
"list",
")",
")",
"and",
"tag",
".",
"type",
"is",
"not",
"read_bplist",
":",
"output",
"+=",
"'{0}{1}: {2}\\n'",
".",
"format",
"(",
"indent",
"*",
"' '",
",",
"key",
",",
"tag",
")",
"output",
"+=",
"pprint",
"(",
"value",
",",
"tag_lookup",
",",
"indent",
"+",
"2",
")",
"else",
":",
"output",
"+=",
"'{0}{1}: {2} {3}\\n'",
".",
"format",
"(",
"indent",
"*",
"' '",
",",
"key",
",",
"str",
"(",
"value",
")",
",",
"tag",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"for",
"elem",
"in",
"data",
":",
"output",
"+=",
"pprint",
"(",
"elem",
",",
"tag_lookup",
",",
"indent",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidDmapDataError",
"(",
"'invalid dmap data: '",
"+",
"str",
"(",
"data",
")",
")",
"return",
"output"
] | Return a pretty formatted string of parsed DMAP data. | [
"Return",
"a",
"pretty",
"formatted",
"string",
"of",
"parsed",
"DMAP",
"data",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/parser.py#L69-L87 | train |
postlund/pyatv | pyatv/interface.py | retrieve_commands | def retrieve_commands(obj):
"""Retrieve all commands and help texts from an API object."""
commands = {} # Name and help
for func in obj.__dict__:
if not inspect.isfunction(obj.__dict__[func]) and \
not isinstance(obj.__dict__[func], property):
continue
if func.startswith('_'):
continue
commands[func] = _get_first_sentence_in_pydoc(
obj.__dict__[func])
return commands | python | def retrieve_commands(obj):
"""Retrieve all commands and help texts from an API object."""
commands = {} # Name and help
for func in obj.__dict__:
if not inspect.isfunction(obj.__dict__[func]) and \
not isinstance(obj.__dict__[func], property):
continue
if func.startswith('_'):
continue
commands[func] = _get_first_sentence_in_pydoc(
obj.__dict__[func])
return commands | [
"def",
"retrieve_commands",
"(",
"obj",
")",
":",
"commands",
"=",
"{",
"}",
"# Name and help",
"for",
"func",
"in",
"obj",
".",
"__dict__",
":",
"if",
"not",
"inspect",
".",
"isfunction",
"(",
"obj",
".",
"__dict__",
"[",
"func",
"]",
")",
"and",
"not",
"isinstance",
"(",
"obj",
".",
"__dict__",
"[",
"func",
"]",
",",
"property",
")",
":",
"continue",
"if",
"func",
".",
"startswith",
"(",
"'_'",
")",
":",
"continue",
"commands",
"[",
"func",
"]",
"=",
"_get_first_sentence_in_pydoc",
"(",
"obj",
".",
"__dict__",
"[",
"func",
"]",
")",
"return",
"commands"
] | Retrieve all commands and help texts from an API object. | [
"Retrieve",
"all",
"commands",
"and",
"help",
"texts",
"from",
"an",
"API",
"object",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/interface.py#L28-L39 | train |
postlund/pyatv | pyatv/interface.py | Playing.hash | def hash(self):
"""Create a unique hash for what is currently playing.
The hash is based on title, artist, album and total time. It should
always be the same for the same content, but it is not guaranteed.
"""
base = '{0}{1}{2}{3}'.format(
self.title, self.artist, self.album, self.total_time)
return hashlib.sha256(base.encode('utf-8')).hexdigest() | python | def hash(self):
"""Create a unique hash for what is currently playing.
The hash is based on title, artist, album and total time. It should
always be the same for the same content, but it is not guaranteed.
"""
base = '{0}{1}{2}{3}'.format(
self.title, self.artist, self.album, self.total_time)
return hashlib.sha256(base.encode('utf-8')).hexdigest() | [
"def",
"hash",
"(",
"self",
")",
":",
"base",
"=",
"'{0}{1}{2}{3}'",
".",
"format",
"(",
"self",
".",
"title",
",",
"self",
".",
"artist",
",",
"self",
".",
"album",
",",
"self",
".",
"total_time",
")",
"return",
"hashlib",
".",
"sha256",
"(",
"base",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | Create a unique hash for what is currently playing.
The hash is based on title, artist, album and total time. It should
always be the same for the same content, but it is not guaranteed. | [
"Create",
"a",
"unique",
"hash",
"for",
"what",
"is",
"currently",
"playing",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/interface.py#L213-L221 | train |
postlund/pyatv | scripts/autogen_protobuf_extensions.py | extract_message_info | def extract_message_info():
"""Get information about all messages of interest."""
base_path = BASE_PACKAGE.replace('.', '/')
filename = os.path.join(base_path, 'ProtocolMessage.proto')
with open(filename, 'r') as file:
types_found = False
for line in file:
stripped = line.lstrip().rstrip()
# Look for the Type enum
if stripped == 'enum Type {':
types_found = True
continue
elif types_found and stripped == '}':
break
elif not types_found:
continue
constant = stripped.split(' ')[0]
title = constant.title().replace(
'_', '').replace('Hid', 'HID') # Hack...
accessor = title[0].lower() + title[1:]
if not os.path.exists(os.path.join(base_path, title + '.proto')):
continue
yield MessageInfo(
title + '_pb2', title, accessor, constant) | python | def extract_message_info():
"""Get information about all messages of interest."""
base_path = BASE_PACKAGE.replace('.', '/')
filename = os.path.join(base_path, 'ProtocolMessage.proto')
with open(filename, 'r') as file:
types_found = False
for line in file:
stripped = line.lstrip().rstrip()
# Look for the Type enum
if stripped == 'enum Type {':
types_found = True
continue
elif types_found and stripped == '}':
break
elif not types_found:
continue
constant = stripped.split(' ')[0]
title = constant.title().replace(
'_', '').replace('Hid', 'HID') # Hack...
accessor = title[0].lower() + title[1:]
if not os.path.exists(os.path.join(base_path, title + '.proto')):
continue
yield MessageInfo(
title + '_pb2', title, accessor, constant) | [
"def",
"extract_message_info",
"(",
")",
":",
"base_path",
"=",
"BASE_PACKAGE",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"'ProtocolMessage.proto'",
")",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"file",
":",
"types_found",
"=",
"False",
"for",
"line",
"in",
"file",
":",
"stripped",
"=",
"line",
".",
"lstrip",
"(",
")",
".",
"rstrip",
"(",
")",
"# Look for the Type enum",
"if",
"stripped",
"==",
"'enum Type {'",
":",
"types_found",
"=",
"True",
"continue",
"elif",
"types_found",
"and",
"stripped",
"==",
"'}'",
":",
"break",
"elif",
"not",
"types_found",
":",
"continue",
"constant",
"=",
"stripped",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
"title",
"=",
"constant",
".",
"title",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
".",
"replace",
"(",
"'Hid'",
",",
"'HID'",
")",
"# Hack...",
"accessor",
"=",
"title",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"+",
"title",
"[",
"1",
":",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"title",
"+",
"'.proto'",
")",
")",
":",
"continue",
"yield",
"MessageInfo",
"(",
"title",
"+",
"'_pb2'",
",",
"title",
",",
"accessor",
",",
"constant",
")"
] | Get information about all messages of interest. | [
"Get",
"information",
"about",
"all",
"messages",
"of",
"interest",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/scripts/autogen_protobuf_extensions.py#L54-L83 | train |
postlund/pyatv | scripts/autogen_protobuf_extensions.py | main | def main():
"""Script starts somewhere around here."""
message_names = set()
packages = []
messages = []
extensions = []
constants = []
# Extract everything needed to generate output file
for info in extract_message_info():
message_names.add(info.title)
packages.append(
'from {0} import {1}'.format(
BASE_PACKAGE, info.module))
messages.append(
'from {0}.{1} import {2}'.format(
BASE_PACKAGE, info.module, info.title))
extensions.append(
'ProtocolMessage.{0}: {1}.{2},'.format(
info.const, info.module, info.accessor))
constants.append(
'{0} = ProtocolMessage.{0}'.format(
info.const))
# Look for remaining messages
for module_name, message_name in extract_unreferenced_messages():
if message_name not in message_names:
message_names.add(message_name)
messages.append('from {0}.{1} import {2}'.format(
BASE_PACKAGE, module_name, message_name))
# Print file output with values inserted
print(OUTPUT_TEMPLATE.format(
packages='\n'.join(sorted(packages)),
messages='\n'.join(sorted(messages)),
extensions='\n '.join(sorted(extensions)),
constants='\n'.join(sorted(constants))))
return 0 | python | def main():
"""Script starts somewhere around here."""
message_names = set()
packages = []
messages = []
extensions = []
constants = []
# Extract everything needed to generate output file
for info in extract_message_info():
message_names.add(info.title)
packages.append(
'from {0} import {1}'.format(
BASE_PACKAGE, info.module))
messages.append(
'from {0}.{1} import {2}'.format(
BASE_PACKAGE, info.module, info.title))
extensions.append(
'ProtocolMessage.{0}: {1}.{2},'.format(
info.const, info.module, info.accessor))
constants.append(
'{0} = ProtocolMessage.{0}'.format(
info.const))
# Look for remaining messages
for module_name, message_name in extract_unreferenced_messages():
if message_name not in message_names:
message_names.add(message_name)
messages.append('from {0}.{1} import {2}'.format(
BASE_PACKAGE, module_name, message_name))
# Print file output with values inserted
print(OUTPUT_TEMPLATE.format(
packages='\n'.join(sorted(packages)),
messages='\n'.join(sorted(messages)),
extensions='\n '.join(sorted(extensions)),
constants='\n'.join(sorted(constants))))
return 0 | [
"def",
"main",
"(",
")",
":",
"message_names",
"=",
"set",
"(",
")",
"packages",
"=",
"[",
"]",
"messages",
"=",
"[",
"]",
"extensions",
"=",
"[",
"]",
"constants",
"=",
"[",
"]",
"# Extract everything needed to generate output file",
"for",
"info",
"in",
"extract_message_info",
"(",
")",
":",
"message_names",
".",
"add",
"(",
"info",
".",
"title",
")",
"packages",
".",
"append",
"(",
"'from {0} import {1}'",
".",
"format",
"(",
"BASE_PACKAGE",
",",
"info",
".",
"module",
")",
")",
"messages",
".",
"append",
"(",
"'from {0}.{1} import {2}'",
".",
"format",
"(",
"BASE_PACKAGE",
",",
"info",
".",
"module",
",",
"info",
".",
"title",
")",
")",
"extensions",
".",
"append",
"(",
"'ProtocolMessage.{0}: {1}.{2},'",
".",
"format",
"(",
"info",
".",
"const",
",",
"info",
".",
"module",
",",
"info",
".",
"accessor",
")",
")",
"constants",
".",
"append",
"(",
"'{0} = ProtocolMessage.{0}'",
".",
"format",
"(",
"info",
".",
"const",
")",
")",
"# Look for remaining messages",
"for",
"module_name",
",",
"message_name",
"in",
"extract_unreferenced_messages",
"(",
")",
":",
"if",
"message_name",
"not",
"in",
"message_names",
":",
"message_names",
".",
"add",
"(",
"message_name",
")",
"messages",
".",
"append",
"(",
"'from {0}.{1} import {2}'",
".",
"format",
"(",
"BASE_PACKAGE",
",",
"module_name",
",",
"message_name",
")",
")",
"# Print file output with values inserted",
"print",
"(",
"OUTPUT_TEMPLATE",
".",
"format",
"(",
"packages",
"=",
"'\\n'",
".",
"join",
"(",
"sorted",
"(",
"packages",
")",
")",
",",
"messages",
"=",
"'\\n'",
".",
"join",
"(",
"sorted",
"(",
"messages",
")",
")",
",",
"extensions",
"=",
"'\\n '",
".",
"join",
"(",
"sorted",
"(",
"extensions",
")",
")",
",",
"constants",
"=",
"'\\n'",
".",
"join",
"(",
"sorted",
"(",
"constants",
")",
")",
")",
")",
"return",
"0"
] | Script starts somewhere around here. | [
"Script",
"starts",
"somewhere",
"around",
"here",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/scripts/autogen_protobuf_extensions.py#L101-L140 | train |
postlund/pyatv | pyatv/mrp/srp.py | hkdf_expand | def hkdf_expand(salt, info, shared_secret):
"""Derive encryption keys from shared secret."""
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
hkdf = HKDF(
algorithm=hashes.SHA512(),
length=32,
salt=salt.encode(),
info=info.encode(),
backend=default_backend()
)
return hkdf.derive(shared_secret) | python | def hkdf_expand(salt, info, shared_secret):
"""Derive encryption keys from shared secret."""
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
hkdf = HKDF(
algorithm=hashes.SHA512(),
length=32,
salt=salt.encode(),
info=info.encode(),
backend=default_backend()
)
return hkdf.derive(shared_secret) | [
"def",
"hkdf_expand",
"(",
"salt",
",",
"info",
",",
"shared_secret",
")",
":",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
"import",
"hashes",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
".",
"kdf",
".",
"hkdf",
"import",
"HKDF",
"from",
"cryptography",
".",
"hazmat",
".",
"backends",
"import",
"default_backend",
"hkdf",
"=",
"HKDF",
"(",
"algorithm",
"=",
"hashes",
".",
"SHA512",
"(",
")",
",",
"length",
"=",
"32",
",",
"salt",
"=",
"salt",
".",
"encode",
"(",
")",
",",
"info",
"=",
"info",
".",
"encode",
"(",
")",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"return",
"hkdf",
".",
"derive",
"(",
"shared_secret",
")"
] | Derive encryption keys from shared secret. | [
"Derive",
"encryption",
"keys",
"from",
"shared",
"secret",
"."
] | 655dfcda4e2f9d1c501540e18da4f480d8bf0e70 | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/srp.py#L53-L65 | train |
Subsets and Splits