repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
uw-it-aca/uw-restclients-sws | uw_sws/section_status.py | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section_status.py#L21-L52 | def _json_to_sectionstatus(section_data):
"""
Returns a uw_sws.models.SectionStatus object
created from the passed json.
"""
section_status = SectionStatus()
if section_data["AddCodeRequired"] == 'true':
section_status.add_code_required = True
else:
section_status.add_code_required = False
section_status.current_enrollment = int(section_data["CurrentEnrollment"])
current_period = int(section_data["CurrentRegistrationPeriod"])
section_status.current_registration_period = current_period
if section_data["FacultyCodeRequired"] == 'true':
section_status.faculty_code_required = True
else:
section_status.faculty_code_required = False
limit_estimate = int(section_data["LimitEstimateEnrollment"])
section_status.limit_estimated_enrollment = limit_estimate
indicator = section_data["LimitEstimateEnrollmentIndicator"]
section_status.limit_estimate_enrollment_indicator = indicator
section_status.room_capacity = int(section_data["RoomCapacity"])
section_status.sln = int(section_data["SLN"])
section_status.space_available = int(section_data["SpaceAvailable"])
if section_data["Status"] == "open":
section_status.is_open = True
else:
section_status.is_open = False
return section_status | [
"def",
"_json_to_sectionstatus",
"(",
"section_data",
")",
":",
"section_status",
"=",
"SectionStatus",
"(",
")",
"if",
"section_data",
"[",
"\"AddCodeRequired\"",
"]",
"==",
"'true'",
":",
"section_status",
".",
"add_code_required",
"=",
"True",
"else",
":",
"section_status",
".",
"add_code_required",
"=",
"False",
"section_status",
".",
"current_enrollment",
"=",
"int",
"(",
"section_data",
"[",
"\"CurrentEnrollment\"",
"]",
")",
"current_period",
"=",
"int",
"(",
"section_data",
"[",
"\"CurrentRegistrationPeriod\"",
"]",
")",
"section_status",
".",
"current_registration_period",
"=",
"current_period",
"if",
"section_data",
"[",
"\"FacultyCodeRequired\"",
"]",
"==",
"'true'",
":",
"section_status",
".",
"faculty_code_required",
"=",
"True",
"else",
":",
"section_status",
".",
"faculty_code_required",
"=",
"False",
"limit_estimate",
"=",
"int",
"(",
"section_data",
"[",
"\"LimitEstimateEnrollment\"",
"]",
")",
"section_status",
".",
"limit_estimated_enrollment",
"=",
"limit_estimate",
"indicator",
"=",
"section_data",
"[",
"\"LimitEstimateEnrollmentIndicator\"",
"]",
"section_status",
".",
"limit_estimate_enrollment_indicator",
"=",
"indicator",
"section_status",
".",
"room_capacity",
"=",
"int",
"(",
"section_data",
"[",
"\"RoomCapacity\"",
"]",
")",
"section_status",
".",
"sln",
"=",
"int",
"(",
"section_data",
"[",
"\"SLN\"",
"]",
")",
"section_status",
".",
"space_available",
"=",
"int",
"(",
"section_data",
"[",
"\"SpaceAvailable\"",
"]",
")",
"if",
"section_data",
"[",
"\"Status\"",
"]",
"==",
"\"open\"",
":",
"section_status",
".",
"is_open",
"=",
"True",
"else",
":",
"section_status",
".",
"is_open",
"=",
"False",
"return",
"section_status"
] | Returns a uw_sws.models.SectionStatus object
created from the passed json. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"SectionStatus",
"object",
"created",
"from",
"the",
"passed",
"json",
"."
] | python | train |
tensorflow/cleverhans | cleverhans_tutorials/tutorial_models_tfe.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/tutorial_models_tfe.py#L98-L111 | def get_params(self):
"""
Provides access to the model's parameters.
Works arounds the non-availability of graph collections in
eager mode.
:return: A list of all Variables defining the model parameters.
"""
assert tf.executing_eagerly()
out = []
# Collecting params from each layer.
for layer_name in self.layers:
out += self.get_layer_params(layer_name)
return out | [
"def",
"get_params",
"(",
"self",
")",
":",
"assert",
"tf",
".",
"executing_eagerly",
"(",
")",
"out",
"=",
"[",
"]",
"# Collecting params from each layer.",
"for",
"layer_name",
"in",
"self",
".",
"layers",
":",
"out",
"+=",
"self",
".",
"get_layer_params",
"(",
"layer_name",
")",
"return",
"out"
] | Provides access to the model's parameters.
Works arounds the non-availability of graph collections in
eager mode.
:return: A list of all Variables defining the model parameters. | [
"Provides",
"access",
"to",
"the",
"model",
"s",
"parameters",
".",
"Works",
"arounds",
"the",
"non",
"-",
"availability",
"of",
"graph",
"collections",
"in",
"eager",
"mode",
".",
":",
"return",
":",
"A",
"list",
"of",
"all",
"Variables",
"defining",
"the",
"model",
"parameters",
"."
] | python | train |
ethereum/py-evm | eth/db/journal.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/journal.py#L354-L361 | def _validate_changeset(self, changeset_id: uuid.UUID) -> None:
"""
Checks to be sure the changeset is known by the journal
"""
if not self.journal.has_changeset(changeset_id):
raise ValidationError("Changeset not found in journal: {0}".format(
str(changeset_id)
)) | [
"def",
"_validate_changeset",
"(",
"self",
",",
"changeset_id",
":",
"uuid",
".",
"UUID",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"journal",
".",
"has_changeset",
"(",
"changeset_id",
")",
":",
"raise",
"ValidationError",
"(",
"\"Changeset not found in journal: {0}\"",
".",
"format",
"(",
"str",
"(",
"changeset_id",
")",
")",
")"
] | Checks to be sure the changeset is known by the journal | [
"Checks",
"to",
"be",
"sure",
"the",
"changeset",
"is",
"known",
"by",
"the",
"journal"
] | python | train |
Phyks/libbmc | libbmc/bibtex.py | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/bibtex.py#L156-L200 | def get_entry_by_filter(filename, filter_function, ignore_fields=None):
"""
Get an entry from a BibTeX file.
.. note ::
Returns the first matching entry.
:param filename: The name of the BibTeX file.
:param filter_function: A function returning ``True`` or ``False`` \
whether the entry should be included or not.
:param ignore_fields: An optional list of fields to strip from the BibTeX \
file.
:returns: A ``bibtexparser.BibDatabase`` object representing the \
first matching entry. ``None`` if entry was not found.
"""
# Handle default argument
if ignore_fields is None:
ignore_fields = []
# Open bibtex file
with open(filename, 'r') as fh:
bibtex = bibtexparser.load(fh)
matching_entry = None
try:
# Try to fetch the matching entry dict
for entry in bibtex.entries:
if filter_function(entry):
matching_entry = entry
except KeyError:
# If none found, return None
return None
if matching_entry is None:
return None
# Clean the entry dict if necessary
matching_entry = {k: matching_entry[k]
for k in matching_entry if k not in ignore_fields}
bib_db = bibtexparser.bibdatabase.BibDatabase()
bib_db.entries = [matching_entry]
return bib_db | [
"def",
"get_entry_by_filter",
"(",
"filename",
",",
"filter_function",
",",
"ignore_fields",
"=",
"None",
")",
":",
"# Handle default argument",
"if",
"ignore_fields",
"is",
"None",
":",
"ignore_fields",
"=",
"[",
"]",
"# Open bibtex file",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fh",
":",
"bibtex",
"=",
"bibtexparser",
".",
"load",
"(",
"fh",
")",
"matching_entry",
"=",
"None",
"try",
":",
"# Try to fetch the matching entry dict",
"for",
"entry",
"in",
"bibtex",
".",
"entries",
":",
"if",
"filter_function",
"(",
"entry",
")",
":",
"matching_entry",
"=",
"entry",
"except",
"KeyError",
":",
"# If none found, return None",
"return",
"None",
"if",
"matching_entry",
"is",
"None",
":",
"return",
"None",
"# Clean the entry dict if necessary",
"matching_entry",
"=",
"{",
"k",
":",
"matching_entry",
"[",
"k",
"]",
"for",
"k",
"in",
"matching_entry",
"if",
"k",
"not",
"in",
"ignore_fields",
"}",
"bib_db",
"=",
"bibtexparser",
".",
"bibdatabase",
".",
"BibDatabase",
"(",
")",
"bib_db",
".",
"entries",
"=",
"[",
"matching_entry",
"]",
"return",
"bib_db"
] | Get an entry from a BibTeX file.
.. note ::
Returns the first matching entry.
:param filename: The name of the BibTeX file.
:param filter_function: A function returning ``True`` or ``False`` \
whether the entry should be included or not.
:param ignore_fields: An optional list of fields to strip from the BibTeX \
file.
:returns: A ``bibtexparser.BibDatabase`` object representing the \
first matching entry. ``None`` if entry was not found. | [
"Get",
"an",
"entry",
"from",
"a",
"BibTeX",
"file",
"."
] | python | train |
coursera-dl/coursera-dl | coursera/formatting.py | https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/formatting.py#L25-L76 | def get_lecture_filename(combined_section_lectures_nums,
section_dir,
secnum,
lecnum,
lecname,
title,
fmt):
"""
Prepare a destination lecture filename.
@param combined_section_lectures_nums: Flag that indicates whether
section lectures should have combined numbering.
@type combined_section_lectures_nums: bool
@param section_dir: Path to current section directory.
@type section_dir: str
@param secnum: Section number.
@type secnum: int
@param lecnum: Lecture number.
@type lecnum: int
@param lecname: Lecture name.
@type lecname: str
@param title: Resource title.
@type title: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@return: Lecture file name.
@rtype: str
"""
# FIXME: this is a quick and dirty solution to Filename too long
# problem. We need to think of a more general way to solve this
# issue.
fmt = fmt[:FORMAT_MAX_LENGTH]
title = title[:TITLE_MAX_LENGTH]
# Format lecture file name
if combined_section_lectures_nums:
lecture_filename = os.path.join(
section_dir,
format_combine_number_resource(
secnum + 1, lecnum + 1, lecname, title, fmt))
else:
lecture_filename = os.path.join(
section_dir, format_resource(lecnum + 1, lecname, title, fmt))
return lecture_filename | [
"def",
"get_lecture_filename",
"(",
"combined_section_lectures_nums",
",",
"section_dir",
",",
"secnum",
",",
"lecnum",
",",
"lecname",
",",
"title",
",",
"fmt",
")",
":",
"# FIXME: this is a quick and dirty solution to Filename too long",
"# problem. We need to think of a more general way to solve this",
"# issue.",
"fmt",
"=",
"fmt",
"[",
":",
"FORMAT_MAX_LENGTH",
"]",
"title",
"=",
"title",
"[",
":",
"TITLE_MAX_LENGTH",
"]",
"# Format lecture file name",
"if",
"combined_section_lectures_nums",
":",
"lecture_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"section_dir",
",",
"format_combine_number_resource",
"(",
"secnum",
"+",
"1",
",",
"lecnum",
"+",
"1",
",",
"lecname",
",",
"title",
",",
"fmt",
")",
")",
"else",
":",
"lecture_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"section_dir",
",",
"format_resource",
"(",
"lecnum",
"+",
"1",
",",
"lecname",
",",
"title",
",",
"fmt",
")",
")",
"return",
"lecture_filename"
] | Prepare a destination lecture filename.
@param combined_section_lectures_nums: Flag that indicates whether
section lectures should have combined numbering.
@type combined_section_lectures_nums: bool
@param section_dir: Path to current section directory.
@type section_dir: str
@param secnum: Section number.
@type secnum: int
@param lecnum: Lecture number.
@type lecnum: int
@param lecname: Lecture name.
@type lecname: str
@param title: Resource title.
@type title: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@return: Lecture file name.
@rtype: str | [
"Prepare",
"a",
"destination",
"lecture",
"filename",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/image/detection.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/detection.py#L113-L115 | def dumps(self):
"""Override default."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.aug_list]] | [
"def",
"dumps",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
",",
"[",
"x",
".",
"dumps",
"(",
")",
"for",
"x",
"in",
"self",
".",
"aug_list",
"]",
"]"
] | Override default. | [
"Override",
"default",
"."
] | python | train |
mitsei/dlkit | dlkit/records/assessment/edx/drag_and_drop_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/edx/drag_and_drop_records.py#L74-L79 | def _init_map(self):
"""stub"""
super(EdXDragAndDropQuestionFormRecord, self)._init_map()
QuestionTextFormRecord._init_map(self)
QuestionFilesFormRecord._init_map(self)
self.my_osid_object_form._my_map['text']['text'] = '' | [
"def",
"_init_map",
"(",
"self",
")",
":",
"super",
"(",
"EdXDragAndDropQuestionFormRecord",
",",
"self",
")",
".",
"_init_map",
"(",
")",
"QuestionTextFormRecord",
".",
"_init_map",
"(",
"self",
")",
"QuestionFilesFormRecord",
".",
"_init_map",
"(",
"self",
")",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'text'",
"]",
"[",
"'text'",
"]",
"=",
"''"
] | stub | [
"stub"
] | python | train |
GearPlug/payu-python | payu/tokenization.py | https://github.com/GearPlug/payu-python/blob/47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e/payu/tokenization.py#L12-L44 | def create_single_token(self, *, payer_id, name, identification_number, payment_method, number, expiration_date):
"""
Using this feature you can register a customer’s credit card data and get a token sequential number.
Args:
payer_id:
name:
identification_number:
payment_method:
number:
expiration_date:
Returns:
"""
payload = {
"language": self.client.language.value,
"command": PaymentCommand.CREATE_TOKEN.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"creditCardToken": {
"payerId": payer_id,
"name": name,
"identificationNumber": identification_number,
"paymentMethod": payment_method,
"number": number,
"expirationDate": expiration_date
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) | [
"def",
"create_single_token",
"(",
"self",
",",
"*",
",",
"payer_id",
",",
"name",
",",
"identification_number",
",",
"payment_method",
",",
"number",
",",
"expiration_date",
")",
":",
"payload",
"=",
"{",
"\"language\"",
":",
"self",
".",
"client",
".",
"language",
".",
"value",
",",
"\"command\"",
":",
"PaymentCommand",
".",
"CREATE_TOKEN",
".",
"value",
",",
"\"merchant\"",
":",
"{",
"\"apiLogin\"",
":",
"self",
".",
"client",
".",
"api_login",
",",
"\"apiKey\"",
":",
"self",
".",
"client",
".",
"api_key",
"}",
",",
"\"creditCardToken\"",
":",
"{",
"\"payerId\"",
":",
"payer_id",
",",
"\"name\"",
":",
"name",
",",
"\"identificationNumber\"",
":",
"identification_number",
",",
"\"paymentMethod\"",
":",
"payment_method",
",",
"\"number\"",
":",
"number",
",",
"\"expirationDate\"",
":",
"expiration_date",
"}",
",",
"\"test\"",
":",
"self",
".",
"client",
".",
"is_test",
"}",
"return",
"self",
".",
"client",
".",
"_post",
"(",
"self",
".",
"url",
",",
"json",
"=",
"payload",
")"
] | Using this feature you can register a customer’s credit card data and get a token sequential number.
Args:
payer_id:
name:
identification_number:
payment_method:
number:
expiration_date:
Returns: | [
"Using",
"this",
"feature",
"you",
"can",
"register",
"a",
"customer’s",
"credit",
"card",
"data",
"and",
"get",
"a",
"token",
"sequential",
"number",
"."
] | python | train |
petebachant/PXL | pxl/timeseries.py | https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/timeseries.py#L146-L153 | def integral_scale(u, t, tau1=0.0, tau2=1.0):
"""Calculate the integral scale of a time series by integrating up to
the first zero crossing.
"""
tau, rho = autocorr_coeff(u, t, tau1, tau2)
zero_cross_ind = np.where(np.diff(np.sign(rho)))[0][0]
int_scale = np.trapz(rho[:zero_cross_ind], tau[:zero_cross_ind])
return int_scale | [
"def",
"integral_scale",
"(",
"u",
",",
"t",
",",
"tau1",
"=",
"0.0",
",",
"tau2",
"=",
"1.0",
")",
":",
"tau",
",",
"rho",
"=",
"autocorr_coeff",
"(",
"u",
",",
"t",
",",
"tau1",
",",
"tau2",
")",
"zero_cross_ind",
"=",
"np",
".",
"where",
"(",
"np",
".",
"diff",
"(",
"np",
".",
"sign",
"(",
"rho",
")",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"int_scale",
"=",
"np",
".",
"trapz",
"(",
"rho",
"[",
":",
"zero_cross_ind",
"]",
",",
"tau",
"[",
":",
"zero_cross_ind",
"]",
")",
"return",
"int_scale"
] | Calculate the integral scale of a time series by integrating up to
the first zero crossing. | [
"Calculate",
"the",
"integral",
"scale",
"of",
"a",
"time",
"series",
"by",
"integrating",
"up",
"to",
"the",
"first",
"zero",
"crossing",
"."
] | python | train |
numan/py-analytics | analytics/__init__.py | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/__init__.py#L27-L55 | def create_analytic_backend(settings):
"""
Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> })
"""
backend = settings.get('backend')
if isinstance(backend, basestring):
backend = import_string(backend)
elif backend:
backend = backend
else:
raise KeyError('backend')
return backend(settings.get("settings", {})) | [
"def",
"create_analytic_backend",
"(",
"settings",
")",
":",
"backend",
"=",
"settings",
".",
"get",
"(",
"'backend'",
")",
"if",
"isinstance",
"(",
"backend",
",",
"basestring",
")",
":",
"backend",
"=",
"import_string",
"(",
"backend",
")",
"elif",
"backend",
":",
"backend",
"=",
"backend",
"else",
":",
"raise",
"KeyError",
"(",
"'backend'",
")",
"return",
"backend",
"(",
"settings",
".",
"get",
"(",
"\"settings\"",
",",
"{",
"}",
")",
")"
] | Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> }) | [
"Creates",
"a",
"new",
"Analytics",
"backend",
"from",
"the",
"settings"
] | python | train |
wright-group/WrightTools | WrightTools/artists/_quick.py | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/artists/_quick.py#L163-L377 | def quick2D(
data,
xaxis=0,
yaxis=1,
at={},
channel=0,
*,
contours=0,
pixelated=True,
dynamic_range=False,
local=False,
contours_local=True,
autosave=False,
save_directory=None,
fname=None,
verbose=True
):
"""Quickly plot 2D slice(s) of data.
Parameters
----------
data : WrightTools.Data object.
Data to plot.
xaxis : string or integer (optional)
Expression or index of horizontal axis. Default is 0.
yaxis : string or integer (optional)
Expression or index of vertical axis. Default is 1.
at : dictionary (optional)
Dictionary of parameters in non-plotted dimension(s). If not
provided, plots will be made at each coordinate.
channel : string or integer (optional)
Name or index of channel to plot. Default is 0.
contours : integer (optional)
The number of black contour lines to add to the plot. Default is 0.
pixelated : boolean (optional)
Toggle between pcolor and contourf (deulaney) plotting backends.
Default is True (pcolor).
dynamic_range : boolean (optional)
Force the colorbar to use all of its colors. Only changes behavior
for signed channels. Default is False.
local : boolean (optional)
Toggle plotting locally. Default is False.
contours_local : boolean (optional)
Toggle plotting black contour lines locally. Default is True.
autosave : boolean (optional)
Toggle autosave. Default is False.
save_directory : string (optional)
Location to save image(s). Default is None (auto-generated).
fname : string (optional)
File name. If None, data name is used. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
list of strings
List of saved image files (if any).
"""
# channel index
channel_index = wt_kit.get_index(data.channel_names, channel)
shape = data.channels[channel_index].shape
collapse = [i for i in range(len(shape)) if shape[i] == 1]
at = at.copy()
at.update({c: 0 for c in collapse})
# prepare data
chopped = data.chop(xaxis, yaxis, at=at, verbose=False)
# colormap
# get colormap
if data.channels[channel_index].signed:
cmap = "signed"
else:
cmap = "default"
cmap = colormaps[cmap]
cmap.set_bad([0.75] * 3, 1.)
cmap.set_under([0.75] * 3, 1.)
# fname
if fname is None:
fname = data.natural_name
# autosave
if len(chopped) > 10:
if not autosave:
print("more than 10 images will be generated: forcing autosave")
autosave = True
# output folder
if autosave:
if save_directory:
pass
else:
if len(chopped) == 1:
save_directory = os.getcwd()
else:
folder_name = "quick2D " + wt_kit.TimeStamp().path
os.mkdir(folder_name)
save_directory = folder_name
# loop through image generation
out = []
for i, d in enumerate(chopped.values()):
# unpack data -----------------------------------------------------------------------------
xaxis = d.axes[0]
xlim = xaxis.min(), xaxis.max()
yaxis = d.axes[1]
ylim = yaxis.min(), yaxis.max()
channel = d.channels[channel_index]
zi = channel[:]
zi = np.ma.masked_invalid(zi)
# create figure ---------------------------------------------------------------------------
if xaxis.units == yaxis.units:
xr = xlim[1] - xlim[0]
yr = ylim[1] - ylim[0]
aspect = np.abs(yr / xr)
if 3 < aspect or aspect < 1 / 3.:
# TODO: raise warning here
aspect = np.clip(aspect, 1 / 3., 3.)
else:
aspect = 1
fig, gs = create_figure(
width="single", nrows=1, cols=[1, "cbar"], aspects=[[[0, 0], aspect]]
)
ax = plt.subplot(gs[0])
ax.patch.set_facecolor("w")
# levels ----------------------------------------------------------------------------------
if channel.signed:
if local:
limit = channel.mag()
else:
data_channel = data.channels[channel_index]
if dynamic_range:
limit = min(
abs(data_channel.null - data_channel.min()),
abs(data_channel.null - data_channel.max()),
)
else:
limit = data_channel.mag()
levels = np.linspace(-limit + channel.null, limit + channel.null, 200)
else:
if local:
levels = np.linspace(channel.null, np.nanmax(zi), 200)
else:
data_channel = data.channels[channel_index]
if data_channel.max() < data_channel.null:
levels = np.linspace(data_channel.min(), data_channel.null, 200)
else:
levels = np.linspace(data_channel.null, data_channel.max(), 200)
# colors ----------------------------------------------------------------------------------
if pixelated:
ax.pcolor(d, channel=channel_index, cmap=cmap, vmin=levels.min(), vmax=levels.max())
else:
ax.contourf(d, channel=channel_index, cmap=cmap, levels=levels)
# contour lines ---------------------------------------------------------------------------
if contours:
# get contour levels
# force top and bottom contour to be data range then clip them out
if channel.signed:
if contours_local:
limit = channel.mag()
else:
limit = data_channel.mag()
contour_levels = np.linspace(
-limit + channel.null, limit + channel.null, contours + 2
)[1:-1]
else:
if contours_local:
limit = channel.max()
else:
limit = data_channel.max()
contour_levels = np.linspace(channel.null, limit, contours + 2)[1:-1]
ax.contour(d, channel=channel_index, levels=contour_levels)
# decoration ------------------------------------------------------------------------------
plt.xticks(rotation=45, fontsize=14)
plt.yticks(fontsize=14)
ax.set_xlabel(xaxis.label, fontsize=18)
ax.set_ylabel(yaxis.label, fontsize=18)
ax.grid()
# lims
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# add zero lines
plt.axvline(0, lw=2, c="k")
plt.axhline(0, lw=2, c="k")
# add constants to title
ls = []
for constant in d.constants:
ls.append(constant.label)
title = ", ".join(ls)
_title(fig, data.natural_name, subtitle=title)
# variable marker lines
for constant in d.constants:
if constant.units is not None:
# x axis
if xaxis.units_kind == constant.units_kind:
constant.convert(xaxis.units)
plt.axvline(constant.value, color="k", linewidth=4, alpha=0.25)
# y axis
if yaxis.units_kind == constant.units_kind:
constant.convert(yaxis.units)
plt.axhline(constant.value, color="k", linewidth=4, alpha=0.25)
# colorbar
cax = plt.subplot(gs[1])
cbar_ticks = np.linspace(levels.min(), levels.max(), 11)
plot_colorbar(cax=cax, ticks=cbar_ticks, label=channel.natural_name, cmap=cmap)
plt.sca(ax)
# save figure -----------------------------------------------------------------------------
if autosave:
if fname:
file_name = fname + " " + str(i).zfill(3)
else:
file_name = str(i).zfill(3)
fpath = os.path.join(save_directory, file_name + ".png")
savefig(fpath, fig=fig)
plt.close()
if verbose:
print("image saved at", fpath)
out.append(fpath)
chopped.close()
return out | [
"def",
"quick2D",
"(",
"data",
",",
"xaxis",
"=",
"0",
",",
"yaxis",
"=",
"1",
",",
"at",
"=",
"{",
"}",
",",
"channel",
"=",
"0",
",",
"*",
",",
"contours",
"=",
"0",
",",
"pixelated",
"=",
"True",
",",
"dynamic_range",
"=",
"False",
",",
"local",
"=",
"False",
",",
"contours_local",
"=",
"True",
",",
"autosave",
"=",
"False",
",",
"save_directory",
"=",
"None",
",",
"fname",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"# channel index",
"channel_index",
"=",
"wt_kit",
".",
"get_index",
"(",
"data",
".",
"channel_names",
",",
"channel",
")",
"shape",
"=",
"data",
".",
"channels",
"[",
"channel_index",
"]",
".",
"shape",
"collapse",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"shape",
")",
")",
"if",
"shape",
"[",
"i",
"]",
"==",
"1",
"]",
"at",
"=",
"at",
".",
"copy",
"(",
")",
"at",
".",
"update",
"(",
"{",
"c",
":",
"0",
"for",
"c",
"in",
"collapse",
"}",
")",
"# prepare data",
"chopped",
"=",
"data",
".",
"chop",
"(",
"xaxis",
",",
"yaxis",
",",
"at",
"=",
"at",
",",
"verbose",
"=",
"False",
")",
"# colormap",
"# get colormap",
"if",
"data",
".",
"channels",
"[",
"channel_index",
"]",
".",
"signed",
":",
"cmap",
"=",
"\"signed\"",
"else",
":",
"cmap",
"=",
"\"default\"",
"cmap",
"=",
"colormaps",
"[",
"cmap",
"]",
"cmap",
".",
"set_bad",
"(",
"[",
"0.75",
"]",
"*",
"3",
",",
"1.",
")",
"cmap",
".",
"set_under",
"(",
"[",
"0.75",
"]",
"*",
"3",
",",
"1.",
")",
"# fname",
"if",
"fname",
"is",
"None",
":",
"fname",
"=",
"data",
".",
"natural_name",
"# autosave",
"if",
"len",
"(",
"chopped",
")",
">",
"10",
":",
"if",
"not",
"autosave",
":",
"print",
"(",
"\"more than 10 images will be generated: forcing autosave\"",
")",
"autosave",
"=",
"True",
"# output folder",
"if",
"autosave",
":",
"if",
"save_directory",
":",
"pass",
"else",
":",
"if",
"len",
"(",
"chopped",
")",
"==",
"1",
":",
"save_directory",
"=",
"os",
".",
"getcwd",
"(",
")",
"else",
":",
"folder_name",
"=",
"\"quick2D \"",
"+",
"wt_kit",
".",
"TimeStamp",
"(",
")",
".",
"path",
"os",
".",
"mkdir",
"(",
"folder_name",
")",
"save_directory",
"=",
"folder_name",
"# loop through image generation",
"out",
"=",
"[",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"chopped",
".",
"values",
"(",
")",
")",
":",
"# unpack data -----------------------------------------------------------------------------",
"xaxis",
"=",
"d",
".",
"axes",
"[",
"0",
"]",
"xlim",
"=",
"xaxis",
".",
"min",
"(",
")",
",",
"xaxis",
".",
"max",
"(",
")",
"yaxis",
"=",
"d",
".",
"axes",
"[",
"1",
"]",
"ylim",
"=",
"yaxis",
".",
"min",
"(",
")",
",",
"yaxis",
".",
"max",
"(",
")",
"channel",
"=",
"d",
".",
"channels",
"[",
"channel_index",
"]",
"zi",
"=",
"channel",
"[",
":",
"]",
"zi",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"zi",
")",
"# create figure ---------------------------------------------------------------------------",
"if",
"xaxis",
".",
"units",
"==",
"yaxis",
".",
"units",
":",
"xr",
"=",
"xlim",
"[",
"1",
"]",
"-",
"xlim",
"[",
"0",
"]",
"yr",
"=",
"ylim",
"[",
"1",
"]",
"-",
"ylim",
"[",
"0",
"]",
"aspect",
"=",
"np",
".",
"abs",
"(",
"yr",
"/",
"xr",
")",
"if",
"3",
"<",
"aspect",
"or",
"aspect",
"<",
"1",
"/",
"3.",
":",
"# TODO: raise warning here",
"aspect",
"=",
"np",
".",
"clip",
"(",
"aspect",
",",
"1",
"/",
"3.",
",",
"3.",
")",
"else",
":",
"aspect",
"=",
"1",
"fig",
",",
"gs",
"=",
"create_figure",
"(",
"width",
"=",
"\"single\"",
",",
"nrows",
"=",
"1",
",",
"cols",
"=",
"[",
"1",
",",
"\"cbar\"",
"]",
",",
"aspects",
"=",
"[",
"[",
"[",
"0",
",",
"0",
"]",
",",
"aspect",
"]",
"]",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"0",
"]",
")",
"ax",
".",
"patch",
".",
"set_facecolor",
"(",
"\"w\"",
")",
"# levels ----------------------------------------------------------------------------------",
"if",
"channel",
".",
"signed",
":",
"if",
"local",
":",
"limit",
"=",
"channel",
".",
"mag",
"(",
")",
"else",
":",
"data_channel",
"=",
"data",
".",
"channels",
"[",
"channel_index",
"]",
"if",
"dynamic_range",
":",
"limit",
"=",
"min",
"(",
"abs",
"(",
"data_channel",
".",
"null",
"-",
"data_channel",
".",
"min",
"(",
")",
")",
",",
"abs",
"(",
"data_channel",
".",
"null",
"-",
"data_channel",
".",
"max",
"(",
")",
")",
",",
")",
"else",
":",
"limit",
"=",
"data_channel",
".",
"mag",
"(",
")",
"levels",
"=",
"np",
".",
"linspace",
"(",
"-",
"limit",
"+",
"channel",
".",
"null",
",",
"limit",
"+",
"channel",
".",
"null",
",",
"200",
")",
"else",
":",
"if",
"local",
":",
"levels",
"=",
"np",
".",
"linspace",
"(",
"channel",
".",
"null",
",",
"np",
".",
"nanmax",
"(",
"zi",
")",
",",
"200",
")",
"else",
":",
"data_channel",
"=",
"data",
".",
"channels",
"[",
"channel_index",
"]",
"if",
"data_channel",
".",
"max",
"(",
")",
"<",
"data_channel",
".",
"null",
":",
"levels",
"=",
"np",
".",
"linspace",
"(",
"data_channel",
".",
"min",
"(",
")",
",",
"data_channel",
".",
"null",
",",
"200",
")",
"else",
":",
"levels",
"=",
"np",
".",
"linspace",
"(",
"data_channel",
".",
"null",
",",
"data_channel",
".",
"max",
"(",
")",
",",
"200",
")",
"# colors ----------------------------------------------------------------------------------",
"if",
"pixelated",
":",
"ax",
".",
"pcolor",
"(",
"d",
",",
"channel",
"=",
"channel_index",
",",
"cmap",
"=",
"cmap",
",",
"vmin",
"=",
"levels",
".",
"min",
"(",
")",
",",
"vmax",
"=",
"levels",
".",
"max",
"(",
")",
")",
"else",
":",
"ax",
".",
"contourf",
"(",
"d",
",",
"channel",
"=",
"channel_index",
",",
"cmap",
"=",
"cmap",
",",
"levels",
"=",
"levels",
")",
"# contour lines ---------------------------------------------------------------------------",
"if",
"contours",
":",
"# get contour levels",
"# force top and bottom contour to be data range then clip them out",
"if",
"channel",
".",
"signed",
":",
"if",
"contours_local",
":",
"limit",
"=",
"channel",
".",
"mag",
"(",
")",
"else",
":",
"limit",
"=",
"data_channel",
".",
"mag",
"(",
")",
"contour_levels",
"=",
"np",
".",
"linspace",
"(",
"-",
"limit",
"+",
"channel",
".",
"null",
",",
"limit",
"+",
"channel",
".",
"null",
",",
"contours",
"+",
"2",
")",
"[",
"1",
":",
"-",
"1",
"]",
"else",
":",
"if",
"contours_local",
":",
"limit",
"=",
"channel",
".",
"max",
"(",
")",
"else",
":",
"limit",
"=",
"data_channel",
".",
"max",
"(",
")",
"contour_levels",
"=",
"np",
".",
"linspace",
"(",
"channel",
".",
"null",
",",
"limit",
",",
"contours",
"+",
"2",
")",
"[",
"1",
":",
"-",
"1",
"]",
"ax",
".",
"contour",
"(",
"d",
",",
"channel",
"=",
"channel_index",
",",
"levels",
"=",
"contour_levels",
")",
"# decoration ------------------------------------------------------------------------------",
"plt",
".",
"xticks",
"(",
"rotation",
"=",
"45",
",",
"fontsize",
"=",
"14",
")",
"plt",
".",
"yticks",
"(",
"fontsize",
"=",
"14",
")",
"ax",
".",
"set_xlabel",
"(",
"xaxis",
".",
"label",
",",
"fontsize",
"=",
"18",
")",
"ax",
".",
"set_ylabel",
"(",
"yaxis",
".",
"label",
",",
"fontsize",
"=",
"18",
")",
"ax",
".",
"grid",
"(",
")",
"# lims",
"ax",
".",
"set_xlim",
"(",
"xlim",
")",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"# add zero lines",
"plt",
".",
"axvline",
"(",
"0",
",",
"lw",
"=",
"2",
",",
"c",
"=",
"\"k\"",
")",
"plt",
".",
"axhline",
"(",
"0",
",",
"lw",
"=",
"2",
",",
"c",
"=",
"\"k\"",
")",
"# add constants to title",
"ls",
"=",
"[",
"]",
"for",
"constant",
"in",
"d",
".",
"constants",
":",
"ls",
".",
"append",
"(",
"constant",
".",
"label",
")",
"title",
"=",
"\", \"",
".",
"join",
"(",
"ls",
")",
"_title",
"(",
"fig",
",",
"data",
".",
"natural_name",
",",
"subtitle",
"=",
"title",
")",
"# variable marker lines",
"for",
"constant",
"in",
"d",
".",
"constants",
":",
"if",
"constant",
".",
"units",
"is",
"not",
"None",
":",
"# x axis",
"if",
"xaxis",
".",
"units_kind",
"==",
"constant",
".",
"units_kind",
":",
"constant",
".",
"convert",
"(",
"xaxis",
".",
"units",
")",
"plt",
".",
"axvline",
"(",
"constant",
".",
"value",
",",
"color",
"=",
"\"k\"",
",",
"linewidth",
"=",
"4",
",",
"alpha",
"=",
"0.25",
")",
"# y axis",
"if",
"yaxis",
".",
"units_kind",
"==",
"constant",
".",
"units_kind",
":",
"constant",
".",
"convert",
"(",
"yaxis",
".",
"units",
")",
"plt",
".",
"axhline",
"(",
"constant",
".",
"value",
",",
"color",
"=",
"\"k\"",
",",
"linewidth",
"=",
"4",
",",
"alpha",
"=",
"0.25",
")",
"# colorbar",
"cax",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"1",
"]",
")",
"cbar_ticks",
"=",
"np",
".",
"linspace",
"(",
"levels",
".",
"min",
"(",
")",
",",
"levels",
".",
"max",
"(",
")",
",",
"11",
")",
"plot_colorbar",
"(",
"cax",
"=",
"cax",
",",
"ticks",
"=",
"cbar_ticks",
",",
"label",
"=",
"channel",
".",
"natural_name",
",",
"cmap",
"=",
"cmap",
")",
"plt",
".",
"sca",
"(",
"ax",
")",
"# save figure -----------------------------------------------------------------------------",
"if",
"autosave",
":",
"if",
"fname",
":",
"file_name",
"=",
"fname",
"+",
"\" \"",
"+",
"str",
"(",
"i",
")",
".",
"zfill",
"(",
"3",
")",
"else",
":",
"file_name",
"=",
"str",
"(",
"i",
")",
".",
"zfill",
"(",
"3",
")",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"save_directory",
",",
"file_name",
"+",
"\".png\"",
")",
"savefig",
"(",
"fpath",
",",
"fig",
"=",
"fig",
")",
"plt",
".",
"close",
"(",
")",
"if",
"verbose",
":",
"print",
"(",
"\"image saved at\"",
",",
"fpath",
")",
"out",
".",
"append",
"(",
"fpath",
")",
"chopped",
".",
"close",
"(",
")",
"return",
"out"
] | Quickly plot 2D slice(s) of data.
Parameters
----------
data : WrightTools.Data object.
Data to plot.
xaxis : string or integer (optional)
Expression or index of horizontal axis. Default is 0.
yaxis : string or integer (optional)
Expression or index of vertical axis. Default is 1.
at : dictionary (optional)
Dictionary of parameters in non-plotted dimension(s). If not
provided, plots will be made at each coordinate.
channel : string or integer (optional)
Name or index of channel to plot. Default is 0.
contours : integer (optional)
The number of black contour lines to add to the plot. Default is 0.
pixelated : boolean (optional)
Toggle between pcolor and contourf (deulaney) plotting backends.
Default is True (pcolor).
dynamic_range : boolean (optional)
Force the colorbar to use all of its colors. Only changes behavior
for signed channels. Default is False.
local : boolean (optional)
Toggle plotting locally. Default is False.
contours_local : boolean (optional)
Toggle plotting black contour lines locally. Default is True.
autosave : boolean (optional)
Toggle autosave. Default is False.
save_directory : string (optional)
Location to save image(s). Default is None (auto-generated).
fname : string (optional)
File name. If None, data name is used. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
list of strings
List of saved image files (if any). | [
"Quickly",
"plot",
"2D",
"slice",
"(",
"s",
")",
"of",
"data",
"."
] | python | train |
cokelaer/spectrum | doc/sphinxext/sphinx_gallery/gen_rst.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/doc/sphinxext/sphinx_gallery/gen_rst.py#L335-L377 | def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the \
generated images') | [
"def",
"scale_image",
"(",
"in_fname",
",",
"out_fname",
",",
"max_width",
",",
"max_height",
")",
":",
"# local import to avoid testing dependency on PIL:",
"try",
":",
"from",
"PIL",
"import",
"Image",
"except",
"ImportError",
":",
"import",
"Image",
"img",
"=",
"Image",
".",
"open",
"(",
"in_fname",
")",
"width_in",
",",
"height_in",
"=",
"img",
".",
"size",
"scale_w",
"=",
"max_width",
"/",
"float",
"(",
"width_in",
")",
"scale_h",
"=",
"max_height",
"/",
"float",
"(",
"height_in",
")",
"if",
"height_in",
"*",
"scale_w",
"<=",
"max_height",
":",
"scale",
"=",
"scale_w",
"else",
":",
"scale",
"=",
"scale_h",
"if",
"scale",
">=",
"1.0",
"and",
"in_fname",
"==",
"out_fname",
":",
"return",
"width_sc",
"=",
"int",
"(",
"round",
"(",
"scale",
"*",
"width_in",
")",
")",
"height_sc",
"=",
"int",
"(",
"round",
"(",
"scale",
"*",
"height_in",
")",
")",
"# resize the image",
"img",
".",
"thumbnail",
"(",
"(",
"width_sc",
",",
"height_sc",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"# insert centered",
"thumb",
"=",
"Image",
".",
"new",
"(",
"'RGB'",
",",
"(",
"max_width",
",",
"max_height",
")",
",",
"(",
"255",
",",
"255",
",",
"255",
")",
")",
"pos_insert",
"=",
"(",
"(",
"max_width",
"-",
"width_sc",
")",
"//",
"2",
",",
"(",
"max_height",
"-",
"height_sc",
")",
"//",
"2",
")",
"thumb",
".",
"paste",
"(",
"img",
",",
"pos_insert",
")",
"thumb",
".",
"save",
"(",
"out_fname",
")",
"# Use optipng to perform lossless compression on the resized image if",
"# software is installed",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'SKLEARN_DOC_OPTIPNG'",
",",
"False",
")",
":",
"try",
":",
"subprocess",
".",
"call",
"(",
"[",
"\"optipng\"",
",",
"\"-quiet\"",
",",
"\"-o\"",
",",
"\"9\"",
",",
"out_fname",
"]",
")",
"except",
"Exception",
":",
"warnings",
".",
"warn",
"(",
"'Install optipng to reduce the size of the \\\n generated images'",
")"
] | Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down | [
"Scales",
"an",
"image",
"with",
"the",
"same",
"aspect",
"ratio",
"centered",
"in",
"an",
"image",
"with",
"a",
"given",
"max_width",
"and",
"max_height",
"if",
"in_fname",
"==",
"out_fname",
"the",
"image",
"can",
"only",
"be",
"scaled",
"down"
] | python | valid |
dw/mitogen | ansible_mitogen/strategy.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/strategy.py#L43-L66 | def _patch_awx_callback():
"""
issue #400: AWX loads a display callback that suffers from thread-safety
issues. Detect the presence of older AWX versions and patch the bug.
"""
# AWX uses sitecustomize.py to force-load this package. If it exists, we're
# running under AWX.
try:
from awx_display_callback.events import EventContext
from awx_display_callback.events import event_context
except ImportError:
return
if hasattr(EventContext(), '_local'):
# Patched version.
return
def patch_add_local(self, **kwargs):
tls = vars(self._local)
ctx = tls.setdefault('_ctx', {})
ctx.update(kwargs)
EventContext._local = threading.local()
EventContext.add_local = patch_add_local | [
"def",
"_patch_awx_callback",
"(",
")",
":",
"# AWX uses sitecustomize.py to force-load this package. If it exists, we're",
"# running under AWX.",
"try",
":",
"from",
"awx_display_callback",
".",
"events",
"import",
"EventContext",
"from",
"awx_display_callback",
".",
"events",
"import",
"event_context",
"except",
"ImportError",
":",
"return",
"if",
"hasattr",
"(",
"EventContext",
"(",
")",
",",
"'_local'",
")",
":",
"# Patched version.",
"return",
"def",
"patch_add_local",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"tls",
"=",
"vars",
"(",
"self",
".",
"_local",
")",
"ctx",
"=",
"tls",
".",
"setdefault",
"(",
"'_ctx'",
",",
"{",
"}",
")",
"ctx",
".",
"update",
"(",
"kwargs",
")",
"EventContext",
".",
"_local",
"=",
"threading",
".",
"local",
"(",
")",
"EventContext",
".",
"add_local",
"=",
"patch_add_local"
] | issue #400: AWX loads a display callback that suffers from thread-safety
issues. Detect the presence of older AWX versions and patch the bug. | [
"issue",
"#400",
":",
"AWX",
"loads",
"a",
"display",
"callback",
"that",
"suffers",
"from",
"thread",
"-",
"safety",
"issues",
".",
"Detect",
"the",
"presence",
"of",
"older",
"AWX",
"versions",
"and",
"patch",
"the",
"bug",
"."
] | python | train |
saltstack/salt | salt/utils/event.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1327-L1356 | def fire_master(self, data, tag, preload=None):
'''
Fire an event off on the master server
CLI Example:
.. code-block:: bash
salt '*' event.fire_master 'stuff to be in the event' 'tag'
'''
load = {}
if preload:
load.update(preload)
load.update({
'id': self.opts['id'],
'tag': tag,
'data': data,
'cmd': '_minion_event',
'tok': self.auth.gen_token(b'salt'),
})
channel = salt.transport.client.ReqChannel.factory(self.opts)
try:
channel.send(load)
except Exception:
pass
finally:
channel.close()
return True | [
"def",
"fire_master",
"(",
"self",
",",
"data",
",",
"tag",
",",
"preload",
"=",
"None",
")",
":",
"load",
"=",
"{",
"}",
"if",
"preload",
":",
"load",
".",
"update",
"(",
"preload",
")",
"load",
".",
"update",
"(",
"{",
"'id'",
":",
"self",
".",
"opts",
"[",
"'id'",
"]",
",",
"'tag'",
":",
"tag",
",",
"'data'",
":",
"data",
",",
"'cmd'",
":",
"'_minion_event'",
",",
"'tok'",
":",
"self",
".",
"auth",
".",
"gen_token",
"(",
"b'salt'",
")",
",",
"}",
")",
"channel",
"=",
"salt",
".",
"transport",
".",
"client",
".",
"ReqChannel",
".",
"factory",
"(",
"self",
".",
"opts",
")",
"try",
":",
"channel",
".",
"send",
"(",
"load",
")",
"except",
"Exception",
":",
"pass",
"finally",
":",
"channel",
".",
"close",
"(",
")",
"return",
"True"
] | Fire an event off on the master server
CLI Example:
.. code-block:: bash
salt '*' event.fire_master 'stuff to be in the event' 'tag' | [
"Fire",
"an",
"event",
"off",
"on",
"the",
"master",
"server"
] | python | train |
tanghaibao/jcvi | jcvi/compara/catalog.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/catalog.py#L354-L373 | def omgparse(args):
"""
%prog omgparse work
Parse the OMG outputs to get gene lists.
"""
p = OptionParser(omgparse.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
work, = args
omgfiles = glob(op.join(work, "gf*.out"))
for omgfile in omgfiles:
omg = OMGFile(omgfile)
best = omg.best()
for bb in best:
genes, taxa = zip(*bb)
print("\t".join((",".join(genes), ",".join(taxa)))) | [
"def",
"omgparse",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"omgparse",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"work",
",",
"=",
"args",
"omgfiles",
"=",
"glob",
"(",
"op",
".",
"join",
"(",
"work",
",",
"\"gf*.out\"",
")",
")",
"for",
"omgfile",
"in",
"omgfiles",
":",
"omg",
"=",
"OMGFile",
"(",
"omgfile",
")",
"best",
"=",
"omg",
".",
"best",
"(",
")",
"for",
"bb",
"in",
"best",
":",
"genes",
",",
"taxa",
"=",
"zip",
"(",
"*",
"bb",
")",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"(",
"\",\"",
".",
"join",
"(",
"genes",
")",
",",
"\",\"",
".",
"join",
"(",
"taxa",
")",
")",
")",
")"
] | %prog omgparse work
Parse the OMG outputs to get gene lists. | [
"%prog",
"omgparse",
"work"
] | python | train |
saltstack/salt | salt/modules/redismod.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L132-L143 | def dbsize(host=None, port=None, db=None, password=None):
'''
Return the number of keys in the selected database
CLI Example:
.. code-block:: bash
salt '*' redis.dbsize
'''
server = _connect(host, port, db, password)
return server.dbsize() | [
"def",
"dbsize",
"(",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"db",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"server",
"=",
"_connect",
"(",
"host",
",",
"port",
",",
"db",
",",
"password",
")",
"return",
"server",
".",
"dbsize",
"(",
")"
] | Return the number of keys in the selected database
CLI Example:
.. code-block:: bash
salt '*' redis.dbsize | [
"Return",
"the",
"number",
"of",
"keys",
"in",
"the",
"selected",
"database"
] | python | train |
google/dotty | efilter/dispatch.py | https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/dispatch.py#L254-L335 | def _find_and_cache_best_function(self, dispatch_type):
"""Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
"""
result = self._dispatch_table.get(dispatch_type)
if result:
return result
# The outer try ensures the lock is always released.
with self._write_lock:
try:
dispatch_mro = dispatch_type.mro()
except TypeError:
# Not every type has an MRO.
dispatch_mro = ()
best_match = None
result_type = None
for candidate_type, candidate_func in self.implementations:
if not issubclass(dispatch_type, candidate_type):
# Skip implementations that are obviously unrelated.
continue
try:
# The candidate implementation may be for a type that's
# actually in the MRO, or it may be for an abstract type.
match = dispatch_mro.index(candidate_type)
except ValueError:
# This means we have an implementation for an abstract
# type, which ranks below all concrete types.
match = None
if best_match is None:
if result and match is None:
# Already have a result, and no order of preference.
# This is probably because the type is a member of two
# abstract types and we have separate implementations
# for those two abstract types.
if self._preferred(candidate_type, over=result_type):
result = candidate_func
result_type = candidate_type
elif self._preferred(result_type, over=candidate_type):
# No need to update anything.
pass
else:
raise TypeError(
"Two candidate implementations found for "
"multimethod function %s (dispatch type %s) "
"and neither is preferred." %
(self.func_name, dispatch_type))
else:
result = candidate_func
result_type = candidate_type
best_match = match
if (match or 0) < (best_match or 0):
result = candidate_func
result_type = candidate_type
best_match = match
self._dispatch_table[dispatch_type] = result
return result | [
"def",
"_find_and_cache_best_function",
"(",
"self",
",",
"dispatch_type",
")",
":",
"result",
"=",
"self",
".",
"_dispatch_table",
".",
"get",
"(",
"dispatch_type",
")",
"if",
"result",
":",
"return",
"result",
"# The outer try ensures the lock is always released.",
"with",
"self",
".",
"_write_lock",
":",
"try",
":",
"dispatch_mro",
"=",
"dispatch_type",
".",
"mro",
"(",
")",
"except",
"TypeError",
":",
"# Not every type has an MRO.",
"dispatch_mro",
"=",
"(",
")",
"best_match",
"=",
"None",
"result_type",
"=",
"None",
"for",
"candidate_type",
",",
"candidate_func",
"in",
"self",
".",
"implementations",
":",
"if",
"not",
"issubclass",
"(",
"dispatch_type",
",",
"candidate_type",
")",
":",
"# Skip implementations that are obviously unrelated.",
"continue",
"try",
":",
"# The candidate implementation may be for a type that's",
"# actually in the MRO, or it may be for an abstract type.",
"match",
"=",
"dispatch_mro",
".",
"index",
"(",
"candidate_type",
")",
"except",
"ValueError",
":",
"# This means we have an implementation for an abstract",
"# type, which ranks below all concrete types.",
"match",
"=",
"None",
"if",
"best_match",
"is",
"None",
":",
"if",
"result",
"and",
"match",
"is",
"None",
":",
"# Already have a result, and no order of preference.",
"# This is probably because the type is a member of two",
"# abstract types and we have separate implementations",
"# for those two abstract types.",
"if",
"self",
".",
"_preferred",
"(",
"candidate_type",
",",
"over",
"=",
"result_type",
")",
":",
"result",
"=",
"candidate_func",
"result_type",
"=",
"candidate_type",
"elif",
"self",
".",
"_preferred",
"(",
"result_type",
",",
"over",
"=",
"candidate_type",
")",
":",
"# No need to update anything.",
"pass",
"else",
":",
"raise",
"TypeError",
"(",
"\"Two candidate implementations found for \"",
"\"multimethod function %s (dispatch type %s) \"",
"\"and neither is preferred.\"",
"%",
"(",
"self",
".",
"func_name",
",",
"dispatch_type",
")",
")",
"else",
":",
"result",
"=",
"candidate_func",
"result_type",
"=",
"candidate_type",
"best_match",
"=",
"match",
"if",
"(",
"match",
"or",
"0",
")",
"<",
"(",
"best_match",
"or",
"0",
")",
":",
"result",
"=",
"candidate_func",
"result_type",
"=",
"candidate_type",
"best_match",
"=",
"match",
"self",
".",
"_dispatch_table",
"[",
"dispatch_type",
"]",
"=",
"result",
"return",
"result"
] | Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type. | [
"Finds",
"the",
"best",
"implementation",
"of",
"this",
"function",
"given",
"a",
"type",
"."
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/install.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/install.py#L55-L100 | def scons_copytree(src, dst, symlinks=False):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an CopytreeError is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
# [email protected] fix: check for dir before making dirs.
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
scons_copytree(srcname, dstname, symlinks)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the CopytreeError from the recursive copytree so that we can
# continue with other files
except CopytreeError as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except SCons.Util.WinError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise CopytreeError(errors) | [
"def",
"scons_copytree",
"(",
"src",
",",
"dst",
",",
"symlinks",
"=",
"False",
")",
":",
"names",
"=",
"os",
".",
"listdir",
"(",
"src",
")",
"# [email protected] fix: check for dir before making dirs.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dst",
")",
":",
"os",
".",
"makedirs",
"(",
"dst",
")",
"errors",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"srcname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"name",
")",
"dstname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"name",
")",
"try",
":",
"if",
"symlinks",
"and",
"os",
".",
"path",
".",
"islink",
"(",
"srcname",
")",
":",
"linkto",
"=",
"os",
".",
"readlink",
"(",
"srcname",
")",
"os",
".",
"symlink",
"(",
"linkto",
",",
"dstname",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"srcname",
")",
":",
"scons_copytree",
"(",
"srcname",
",",
"dstname",
",",
"symlinks",
")",
"else",
":",
"shutil",
".",
"copy2",
"(",
"srcname",
",",
"dstname",
")",
"# XXX What about devices, sockets etc.?",
"except",
"(",
"IOError",
",",
"os",
".",
"error",
")",
"as",
"why",
":",
"errors",
".",
"append",
"(",
"(",
"srcname",
",",
"dstname",
",",
"str",
"(",
"why",
")",
")",
")",
"# catch the CopytreeError from the recursive copytree so that we can",
"# continue with other files",
"except",
"CopytreeError",
"as",
"err",
":",
"errors",
".",
"extend",
"(",
"err",
".",
"args",
"[",
"0",
"]",
")",
"try",
":",
"shutil",
".",
"copystat",
"(",
"src",
",",
"dst",
")",
"except",
"SCons",
".",
"Util",
".",
"WinError",
":",
"# can't copy file access times on Windows",
"pass",
"except",
"OSError",
"as",
"why",
":",
"errors",
".",
"extend",
"(",
"(",
"src",
",",
"dst",
",",
"str",
"(",
"why",
")",
")",
")",
"if",
"errors",
":",
"raise",
"CopytreeError",
"(",
"errors",
")"
] | Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an CopytreeError is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
XXX Consider this example code rather than the ultimate tool. | [
"Recursively",
"copy",
"a",
"directory",
"tree",
"using",
"copy2",
"()",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/zmqshell.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/zmqshell.py#L494-L509 | def init_environment(self):
"""Configure the user's environment.
"""
env = os.environ
# These two ensure 'ls' produces nice coloring on BSD-derived systems
env['TERM'] = 'xterm-color'
env['CLICOLOR'] = '1'
# Since normal pagers don't work at all (over pexpect we don't have
# single-key control of the subprocess), try to disable paging in
# subprocesses as much as possible.
env['PAGER'] = 'cat'
env['GIT_PAGER'] = 'cat'
# And install the payload version of page.
install_payload_page() | [
"def",
"init_environment",
"(",
"self",
")",
":",
"env",
"=",
"os",
".",
"environ",
"# These two ensure 'ls' produces nice coloring on BSD-derived systems",
"env",
"[",
"'TERM'",
"]",
"=",
"'xterm-color'",
"env",
"[",
"'CLICOLOR'",
"]",
"=",
"'1'",
"# Since normal pagers don't work at all (over pexpect we don't have",
"# single-key control of the subprocess), try to disable paging in",
"# subprocesses as much as possible.",
"env",
"[",
"'PAGER'",
"]",
"=",
"'cat'",
"env",
"[",
"'GIT_PAGER'",
"]",
"=",
"'cat'",
"# And install the payload version of page.",
"install_payload_page",
"(",
")"
] | Configure the user's environment. | [
"Configure",
"the",
"user",
"s",
"environment",
"."
] | python | test |
Dentosal/python-sc2 | sc2/client.py | https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/client.py#L350-L380 | async def debug_text(self, texts: Union[str, list], positions: Union[list, set], color=(0, 255, 0), size_px=16):
""" Deprecated, may be removed soon """
if isinstance(positions, (set, list)):
if not positions:
return
if isinstance(texts, str):
texts = [texts] * len(positions)
assert len(texts) == len(positions)
await self._execute(
debug=sc_pb.RequestDebug(
debug=[
debug_pb.DebugCommand(
draw=debug_pb.DebugDraw(
text=[
debug_pb.DebugText(
text=t,
color=debug_pb.Color(r=color[0], g=color[1], b=color[2]),
world_pos=common_pb.Point(x=p.x, y=p.y, z=getattr(p, "z", 10)),
size=size_px,
)
for t, p in zip(texts, positions)
]
)
)
]
)
)
else:
await self.debug_text([texts], [positions], color) | [
"async",
"def",
"debug_text",
"(",
"self",
",",
"texts",
":",
"Union",
"[",
"str",
",",
"list",
"]",
",",
"positions",
":",
"Union",
"[",
"list",
",",
"set",
"]",
",",
"color",
"=",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"size_px",
"=",
"16",
")",
":",
"if",
"isinstance",
"(",
"positions",
",",
"(",
"set",
",",
"list",
")",
")",
":",
"if",
"not",
"positions",
":",
"return",
"if",
"isinstance",
"(",
"texts",
",",
"str",
")",
":",
"texts",
"=",
"[",
"texts",
"]",
"*",
"len",
"(",
"positions",
")",
"assert",
"len",
"(",
"texts",
")",
"==",
"len",
"(",
"positions",
")",
"await",
"self",
".",
"_execute",
"(",
"debug",
"=",
"sc_pb",
".",
"RequestDebug",
"(",
"debug",
"=",
"[",
"debug_pb",
".",
"DebugCommand",
"(",
"draw",
"=",
"debug_pb",
".",
"DebugDraw",
"(",
"text",
"=",
"[",
"debug_pb",
".",
"DebugText",
"(",
"text",
"=",
"t",
",",
"color",
"=",
"debug_pb",
".",
"Color",
"(",
"r",
"=",
"color",
"[",
"0",
"]",
",",
"g",
"=",
"color",
"[",
"1",
"]",
",",
"b",
"=",
"color",
"[",
"2",
"]",
")",
",",
"world_pos",
"=",
"common_pb",
".",
"Point",
"(",
"x",
"=",
"p",
".",
"x",
",",
"y",
"=",
"p",
".",
"y",
",",
"z",
"=",
"getattr",
"(",
"p",
",",
"\"z\"",
",",
"10",
")",
")",
",",
"size",
"=",
"size_px",
",",
")",
"for",
"t",
",",
"p",
"in",
"zip",
"(",
"texts",
",",
"positions",
")",
"]",
")",
")",
"]",
")",
")",
"else",
":",
"await",
"self",
".",
"debug_text",
"(",
"[",
"texts",
"]",
",",
"[",
"positions",
"]",
",",
"color",
")"
] | Deprecated, may be removed soon | [
"Deprecated",
"may",
"be",
"removed",
"soon"
] | python | train |
squaresLab/BugZoo | bugzoo/client/container.py | https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/client/container.py#L295-L342 | def exec(self,
container: Container,
command: str,
context: Optional[str] = None,
stdout: bool = True,
stderr: bool = False,
time_limit: Optional[int] = None
) -> ExecResponse:
"""
Executes a given command inside a provided container.
Parameters:
container: the container to which the command should be issued.
command: the command that should be executed.
context: the working directory that should be used to perform the
execution. If no context is provided, then the command will be
executed at the root of the container.
stdout: specifies whether or not output to the stdout should be
included in the execution summary.
stderr: specifies whether or not output to the stderr should be
included in the execution summary.
time_limit: an optional time limit that is applied to the
execution. If the command fails to execute within the time
limit, the command will be aborted and treated as a failure.
Returns:
a summary of the outcome of the execution.
Raises:
KeyError: if the container no longer exists on the server.
"""
# FIXME perhaps these should be encoded as path variables?
payload = {
'command': command,
'context': context,
'stdout': stdout,
'stderr': stderr,
'time-limit': time_limit
}
path = "containers/{}/exec".format(container.uid)
r = self.__api.post(path, json=payload)
if r.status_code == 200:
return ExecResponse.from_dict(r.json())
if r.status_code == 404:
raise KeyError("no container found with given UID: {}".format(container.uid))
self.__api.handle_erroneous_response(r) | [
"def",
"exec",
"(",
"self",
",",
"container",
":",
"Container",
",",
"command",
":",
"str",
",",
"context",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"stdout",
":",
"bool",
"=",
"True",
",",
"stderr",
":",
"bool",
"=",
"False",
",",
"time_limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"ExecResponse",
":",
"# FIXME perhaps these should be encoded as path variables?",
"payload",
"=",
"{",
"'command'",
":",
"command",
",",
"'context'",
":",
"context",
",",
"'stdout'",
":",
"stdout",
",",
"'stderr'",
":",
"stderr",
",",
"'time-limit'",
":",
"time_limit",
"}",
"path",
"=",
"\"containers/{}/exec\"",
".",
"format",
"(",
"container",
".",
"uid",
")",
"r",
"=",
"self",
".",
"__api",
".",
"post",
"(",
"path",
",",
"json",
"=",
"payload",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"return",
"ExecResponse",
".",
"from_dict",
"(",
"r",
".",
"json",
"(",
")",
")",
"if",
"r",
".",
"status_code",
"==",
"404",
":",
"raise",
"KeyError",
"(",
"\"no container found with given UID: {}\"",
".",
"format",
"(",
"container",
".",
"uid",
")",
")",
"self",
".",
"__api",
".",
"handle_erroneous_response",
"(",
"r",
")"
] | Executes a given command inside a provided container.
Parameters:
container: the container to which the command should be issued.
command: the command that should be executed.
context: the working directory that should be used to perform the
execution. If no context is provided, then the command will be
executed at the root of the container.
stdout: specifies whether or not output to the stdout should be
included in the execution summary.
stderr: specifies whether or not output to the stderr should be
included in the execution summary.
time_limit: an optional time limit that is applied to the
execution. If the command fails to execute within the time
limit, the command will be aborted and treated as a failure.
Returns:
a summary of the outcome of the execution.
Raises:
KeyError: if the container no longer exists on the server. | [
"Executes",
"a",
"given",
"command",
"inside",
"a",
"provided",
"container",
"."
] | python | train |
openpermissions/perch | perch/migrate.py | https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/migrate.py#L153-L173 | def run_migrations(migrations):
"""
Run migrations for a resource type
:param: a dicitionary of migrations
"""
for resource, resource_migrations in migrations.items():
for version in resource_migrations:
to_migrate = yield resource_version.get(
key=[resource.resource_type, version],
include_docs=True)
for x in to_migrate['rows']:
instance = resource(**x['doc'])
instance = _migrate_resource(
instance,
resource_migrations,
version
)
yield instance._save() | [
"def",
"run_migrations",
"(",
"migrations",
")",
":",
"for",
"resource",
",",
"resource_migrations",
"in",
"migrations",
".",
"items",
"(",
")",
":",
"for",
"version",
"in",
"resource_migrations",
":",
"to_migrate",
"=",
"yield",
"resource_version",
".",
"get",
"(",
"key",
"=",
"[",
"resource",
".",
"resource_type",
",",
"version",
"]",
",",
"include_docs",
"=",
"True",
")",
"for",
"x",
"in",
"to_migrate",
"[",
"'rows'",
"]",
":",
"instance",
"=",
"resource",
"(",
"*",
"*",
"x",
"[",
"'doc'",
"]",
")",
"instance",
"=",
"_migrate_resource",
"(",
"instance",
",",
"resource_migrations",
",",
"version",
")",
"yield",
"instance",
".",
"_save",
"(",
")"
] | Run migrations for a resource type
:param: a dicitionary of migrations | [
"Run",
"migrations",
"for",
"a",
"resource",
"type"
] | python | train |
crytic/slither | slither/printers/summary/data_depenency.py | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/printers/summary/data_depenency.py#L21-L46 | def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ''
for c in self.contracts:
txt += "\nContract %s\n"%c.name
table = PrettyTable(['Variable', 'Dependencies'])
for v in c.state_variables:
table.add_row([v.name, _get(v, c)])
txt += str(table)
txt += "\n"
for f in c.functions_and_modifiers_not_inherited:
txt += "\nFunction %s\n"%f.full_name
table = PrettyTable(['Variable', 'Dependencies'])
for v in f.variables:
table.add_row([v.name, _get(v, f)])
for v in c.state_variables:
table.add_row([v.canonical_name, _get(v, f)])
txt += str(table)
self.info(txt) | [
"def",
"output",
"(",
"self",
",",
"_filename",
")",
":",
"txt",
"=",
"''",
"for",
"c",
"in",
"self",
".",
"contracts",
":",
"txt",
"+=",
"\"\\nContract %s\\n\"",
"%",
"c",
".",
"name",
"table",
"=",
"PrettyTable",
"(",
"[",
"'Variable'",
",",
"'Dependencies'",
"]",
")",
"for",
"v",
"in",
"c",
".",
"state_variables",
":",
"table",
".",
"add_row",
"(",
"[",
"v",
".",
"name",
",",
"_get",
"(",
"v",
",",
"c",
")",
"]",
")",
"txt",
"+=",
"str",
"(",
"table",
")",
"txt",
"+=",
"\"\\n\"",
"for",
"f",
"in",
"c",
".",
"functions_and_modifiers_not_inherited",
":",
"txt",
"+=",
"\"\\nFunction %s\\n\"",
"%",
"f",
".",
"full_name",
"table",
"=",
"PrettyTable",
"(",
"[",
"'Variable'",
",",
"'Dependencies'",
"]",
")",
"for",
"v",
"in",
"f",
".",
"variables",
":",
"table",
".",
"add_row",
"(",
"[",
"v",
".",
"name",
",",
"_get",
"(",
"v",
",",
"f",
")",
"]",
")",
"for",
"v",
"in",
"c",
".",
"state_variables",
":",
"table",
".",
"add_row",
"(",
"[",
"v",
".",
"canonical_name",
",",
"_get",
"(",
"v",
",",
"f",
")",
"]",
")",
"txt",
"+=",
"str",
"(",
"table",
")",
"self",
".",
"info",
"(",
"txt",
")"
] | _filename is not used
Args:
_filename(string) | [
"_filename",
"is",
"not",
"used",
"Args",
":",
"_filename",
"(",
"string",
")"
] | python | train |
osrg/ryu | ryu/services/protocols/bgp/api/rtconf.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/api/rtconf.py#L198-L211 | def get_neighbor_attribute_map(neigh_ip_address, route_dist=None,
route_family=VRF_RF_IPV4):
"""Returns a neighbor attribute_map for given ip address if exists."""
core = CORE_MANAGER.get_core_service()
peer = core.peer_manager.get_by_addr(neigh_ip_address)
at_maps_key = const.ATTR_MAPS_LABEL_DEFAULT
if route_dist is not None:
at_maps_key = ':'.join([route_dist, route_family])
at_maps = peer.attribute_maps.get(at_maps_key)
if at_maps:
return at_maps.get(const.ATTR_MAPS_ORG_KEY)
else:
return [] | [
"def",
"get_neighbor_attribute_map",
"(",
"neigh_ip_address",
",",
"route_dist",
"=",
"None",
",",
"route_family",
"=",
"VRF_RF_IPV4",
")",
":",
"core",
"=",
"CORE_MANAGER",
".",
"get_core_service",
"(",
")",
"peer",
"=",
"core",
".",
"peer_manager",
".",
"get_by_addr",
"(",
"neigh_ip_address",
")",
"at_maps_key",
"=",
"const",
".",
"ATTR_MAPS_LABEL_DEFAULT",
"if",
"route_dist",
"is",
"not",
"None",
":",
"at_maps_key",
"=",
"':'",
".",
"join",
"(",
"[",
"route_dist",
",",
"route_family",
"]",
")",
"at_maps",
"=",
"peer",
".",
"attribute_maps",
".",
"get",
"(",
"at_maps_key",
")",
"if",
"at_maps",
":",
"return",
"at_maps",
".",
"get",
"(",
"const",
".",
"ATTR_MAPS_ORG_KEY",
")",
"else",
":",
"return",
"[",
"]"
] | Returns a neighbor attribute_map for given ip address if exists. | [
"Returns",
"a",
"neighbor",
"attribute_map",
"for",
"given",
"ip",
"address",
"if",
"exists",
"."
] | python | train |
kensho-technologies/graphql-compiler | graphql_compiler/query_formatting/gremlin_formatting.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/gremlin_formatting.py#L81-L92 | def _safe_gremlin_list(inner_type, argument_value):
"""Represent the list of "inner_type" objects in Gremlin form."""
if not isinstance(argument_value, list):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-list as a list: '
u'{}'.format(argument_value))
stripped_type = strip_non_null_from_type(inner_type)
components = (
_safe_gremlin_argument(stripped_type, x)
for x in argument_value
)
return u'[' + u','.join(components) + u']' | [
"def",
"_safe_gremlin_list",
"(",
"inner_type",
",",
"argument_value",
")",
":",
"if",
"not",
"isinstance",
"(",
"argument_value",
",",
"list",
")",
":",
"raise",
"GraphQLInvalidArgumentError",
"(",
"u'Attempting to represent a non-list as a list: '",
"u'{}'",
".",
"format",
"(",
"argument_value",
")",
")",
"stripped_type",
"=",
"strip_non_null_from_type",
"(",
"inner_type",
")",
"components",
"=",
"(",
"_safe_gremlin_argument",
"(",
"stripped_type",
",",
"x",
")",
"for",
"x",
"in",
"argument_value",
")",
"return",
"u'['",
"+",
"u','",
".",
"join",
"(",
"components",
")",
"+",
"u']'"
] | Represent the list of "inner_type" objects in Gremlin form. | [
"Represent",
"the",
"list",
"of",
"inner_type",
"objects",
"in",
"Gremlin",
"form",
"."
] | python | train |
mitsei/dlkit | dlkit/services/assessment.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L891-L899 | def use_plenary_bank_view(self):
"""Pass through to provider ItemBankSession.use_plenary_bank_view"""
self._bank_view = PLENARY
# self._get_provider_session('item_bank_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_bank_view()
except AttributeError:
pass | [
"def",
"use_plenary_bank_view",
"(",
"self",
")",
":",
"self",
".",
"_bank_view",
"=",
"PLENARY",
"# self._get_provider_session('item_bank_session') # To make sure the session is tracked",
"for",
"session",
"in",
"self",
".",
"_get_provider_sessions",
"(",
")",
":",
"try",
":",
"session",
".",
"use_plenary_bank_view",
"(",
")",
"except",
"AttributeError",
":",
"pass"
] | Pass through to provider ItemBankSession.use_plenary_bank_view | [
"Pass",
"through",
"to",
"provider",
"ItemBankSession",
".",
"use_plenary_bank_view"
] | python | train |
lowandrew/OLCTools | spadespipeline/GeneSeekr.py | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/GeneSeekr.py#L151-L171 | def makeblastdb(self):
"""Makes blast database files from targets as necessary"""
while True: # while daemon
fastapath = self.dqueue.get() # grabs fastapath from dqueue
# remove the path and the file extension for easier future globbing
db = os.path.splitext(fastapath)[0]
nhr = '{}.nhr'.format(db) # add nhr for searching
# fnull = open(os.devnull, 'w') # define /dev/null
if not os.path.isfile(str(nhr)): # if check for already existing dbs
# Create the databases
# TODO use MakeBLASTdb class
threadlock = threading.Lock()
command = 'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'.format(fastapath, db)
# subprocess.call(shlex.split('makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'
# .format(fastapath, db)), stdout=fnull, stderr=fnull)
out, err = run_subprocess(command)
threadlock.acquire()
write_to_logfile(command, command, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
threadlock.release()
self.dqueue.task_done() | [
"def",
"makeblastdb",
"(",
"self",
")",
":",
"while",
"True",
":",
"# while daemon",
"fastapath",
"=",
"self",
".",
"dqueue",
".",
"get",
"(",
")",
"# grabs fastapath from dqueue",
"# remove the path and the file extension for easier future globbing",
"db",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fastapath",
")",
"[",
"0",
"]",
"nhr",
"=",
"'{}.nhr'",
".",
"format",
"(",
"db",
")",
"# add nhr for searching",
"# fnull = open(os.devnull, 'w') # define /dev/null",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"str",
"(",
"nhr",
")",
")",
":",
"# if check for already existing dbs",
"# Create the databases",
"# TODO use MakeBLASTdb class",
"threadlock",
"=",
"threading",
".",
"Lock",
"(",
")",
"command",
"=",
"'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'",
".",
"format",
"(",
"fastapath",
",",
"db",
")",
"# subprocess.call(shlex.split('makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'",
"# .format(fastapath, db)), stdout=fnull, stderr=fnull)",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"command",
")",
"threadlock",
".",
"acquire",
"(",
")",
"write_to_logfile",
"(",
"command",
",",
"command",
",",
"self",
".",
"logfile",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"threadlock",
".",
"release",
"(",
")",
"self",
".",
"dqueue",
".",
"task_done",
"(",
")"
] | Makes blast database files from targets as necessary | [
"Makes",
"blast",
"database",
"files",
"from",
"targets",
"as",
"necessary"
] | python | train |
mikeboers/PyAV | setup.py | https://github.com/mikeboers/PyAV/blob/9414187088b9b8dbaa180cfe1db6ceba243184ea/setup.py#L76-L97 | def get_library_config(name):
"""Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``.
"""
try:
proc = Popen(['pkg-config', '--cflags', '--libs', name], stdout=PIPE, stderr=PIPE)
except OSError:
print('pkg-config is required for building PyAV')
exit(1)
raw_cflags, err = proc.communicate()
if proc.wait():
return
known, unknown = parse_cflags(raw_cflags.decode('utf8'))
if unknown:
print("pkg-config returned flags we don't understand: {}".format(unknown))
exit(1)
return known | [
"def",
"get_library_config",
"(",
"name",
")",
":",
"try",
":",
"proc",
"=",
"Popen",
"(",
"[",
"'pkg-config'",
",",
"'--cflags'",
",",
"'--libs'",
",",
"name",
"]",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"except",
"OSError",
":",
"print",
"(",
"'pkg-config is required for building PyAV'",
")",
"exit",
"(",
"1",
")",
"raw_cflags",
",",
"err",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"proc",
".",
"wait",
"(",
")",
":",
"return",
"known",
",",
"unknown",
"=",
"parse_cflags",
"(",
"raw_cflags",
".",
"decode",
"(",
"'utf8'",
")",
")",
"if",
"unknown",
":",
"print",
"(",
"\"pkg-config returned flags we don't understand: {}\"",
".",
"format",
"(",
"unknown",
")",
")",
"exit",
"(",
"1",
")",
"return",
"known"
] | Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``. | [
"Get",
"distutils",
"-",
"compatible",
"extension",
"extras",
"for",
"the",
"given",
"library",
"."
] | python | valid |
fastai/fastai | docs_src/nbval/plugin.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/plugin.py#L135-L160 | def find_comment_markers(cellsource):
"""Look through the cell source for comments which affect nbval's behaviour
Yield an iterable of ``(MARKER_TYPE, True)``.
"""
found = {}
for line in cellsource.splitlines():
line = line.strip()
if line.startswith('#'):
# print("Found comment in '{}'".format(line))
comment = line.lstrip('#').strip()
if comment in comment_markers:
# print("Found marker {}".format(comment))
marker = comment_markers[comment]
if not isinstance(marker, tuple):
# If not an explicit tuple ('option', True/False),
# imply ('option', True)
marker = (marker, True)
marker_type = marker[0]
if marker_type in found:
warnings.warn(
"Conflicting comment markers found, using the latest: "
" %s VS %s" %
(found[marker_type], comment))
found[marker_type] = comment
yield marker | [
"def",
"find_comment_markers",
"(",
"cellsource",
")",
":",
"found",
"=",
"{",
"}",
"for",
"line",
"in",
"cellsource",
".",
"splitlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"# print(\"Found comment in '{}'\".format(line))",
"comment",
"=",
"line",
".",
"lstrip",
"(",
"'#'",
")",
".",
"strip",
"(",
")",
"if",
"comment",
"in",
"comment_markers",
":",
"# print(\"Found marker {}\".format(comment))",
"marker",
"=",
"comment_markers",
"[",
"comment",
"]",
"if",
"not",
"isinstance",
"(",
"marker",
",",
"tuple",
")",
":",
"# If not an explicit tuple ('option', True/False),",
"# imply ('option', True)",
"marker",
"=",
"(",
"marker",
",",
"True",
")",
"marker_type",
"=",
"marker",
"[",
"0",
"]",
"if",
"marker_type",
"in",
"found",
":",
"warnings",
".",
"warn",
"(",
"\"Conflicting comment markers found, using the latest: \"",
"\" %s VS %s\"",
"%",
"(",
"found",
"[",
"marker_type",
"]",
",",
"comment",
")",
")",
"found",
"[",
"marker_type",
"]",
"=",
"comment",
"yield",
"marker"
] | Look through the cell source for comments which affect nbval's behaviour
Yield an iterable of ``(MARKER_TYPE, True)``. | [
"Look",
"through",
"the",
"cell",
"source",
"for",
"comments",
"which",
"affect",
"nbval",
"s",
"behaviour"
] | python | train |
nirum/descent | descent/objectives.py | https://github.com/nirum/descent/blob/074c8452f15a0da638668a4fe139fde06ccfae7f/descent/objectives.py#L44-L51 | def doublewell(theta):
"""Pointwise minimum of two quadratic bowls"""
k0, k1, depth = 0.01, 100, 0.5
shallow = 0.5 * k0 * theta ** 2 + depth
deep = 0.5 * k1 * theta ** 2
obj = float(np.minimum(shallow, deep))
grad = np.where(deep < shallow, k1 * theta, k0 * theta)
return obj, grad | [
"def",
"doublewell",
"(",
"theta",
")",
":",
"k0",
",",
"k1",
",",
"depth",
"=",
"0.01",
",",
"100",
",",
"0.5",
"shallow",
"=",
"0.5",
"*",
"k0",
"*",
"theta",
"**",
"2",
"+",
"depth",
"deep",
"=",
"0.5",
"*",
"k1",
"*",
"theta",
"**",
"2",
"obj",
"=",
"float",
"(",
"np",
".",
"minimum",
"(",
"shallow",
",",
"deep",
")",
")",
"grad",
"=",
"np",
".",
"where",
"(",
"deep",
"<",
"shallow",
",",
"k1",
"*",
"theta",
",",
"k0",
"*",
"theta",
")",
"return",
"obj",
",",
"grad"
] | Pointwise minimum of two quadratic bowls | [
"Pointwise",
"minimum",
"of",
"two",
"quadratic",
"bowls"
] | python | valid |
libyal/dtfabric | dtfabric/reader.py | https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/reader.py#L1078-L1100 | def _ReadUnionDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadDataTypeDefinitionWithMembers(
definitions_registry, definition_values, data_types.UnionDefinition,
definition_name, supports_conditions=False) | [
"def",
"_ReadUnionDataTypeDefinition",
"(",
"self",
",",
"definitions_registry",
",",
"definition_values",
",",
"definition_name",
",",
"is_member",
"=",
"False",
")",
":",
"return",
"self",
".",
"_ReadDataTypeDefinitionWithMembers",
"(",
"definitions_registry",
",",
"definition_values",
",",
"data_types",
".",
"UnionDefinition",
",",
"definition_name",
",",
"supports_conditions",
"=",
"False",
")"
] | Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | [
"Reads",
"an",
"union",
"data",
"type",
"definition",
"."
] | python | train |
revelc/pyaccumulo | pyaccumulo/proxy/AccumuloProxy.py | https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L1765-L1773 | def removeTableProperty(self, login, tableName, property):
"""
Parameters:
- login
- tableName
- property
"""
self.send_removeTableProperty(login, tableName, property)
self.recv_removeTableProperty() | [
"def",
"removeTableProperty",
"(",
"self",
",",
"login",
",",
"tableName",
",",
"property",
")",
":",
"self",
".",
"send_removeTableProperty",
"(",
"login",
",",
"tableName",
",",
"property",
")",
"self",
".",
"recv_removeTableProperty",
"(",
")"
] | Parameters:
- login
- tableName
- property | [
"Parameters",
":",
"-",
"login",
"-",
"tableName",
"-",
"property"
] | python | train |
utek/pyseaweed | pyseaweed/utils.py | https://github.com/utek/pyseaweed/blob/218049329885425a2b8370157fa44952e64516be/pyseaweed/utils.py#L41-L48 | def head(self, url, *args, **kwargs):
"""Returns response to http HEAD
on provided url
"""
res = self._conn.head(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res
return None | [
"def",
"head",
"(",
"self",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"res",
"=",
"self",
".",
"_conn",
".",
"head",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"_prepare_headers",
"(",
"*",
"*",
"kwargs",
")",
")",
"if",
"res",
".",
"status_code",
"==",
"200",
":",
"return",
"res",
"return",
"None"
] | Returns response to http HEAD
on provided url | [
"Returns",
"response",
"to",
"http",
"HEAD",
"on",
"provided",
"url"
] | python | train |
phaethon/kamene | kamene/crypto/cert.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L605-L661 | def _rsaes_pkcs1_v1_5_decrypt(self, C):
"""
Implements RSAES-PKCS1-V1_5-DECRYPT() function described in section
7.2.2 of RFC 3447.
Input:
C: ciphertext to be decrypted, an octet string of length k, where
k is the length in octets of the RSA modulus n.
Output:
an octet string of length k at most k - 11
on error, None is returned.
"""
# 1) Length checking
cLen = len(C)
k = self.modulusLen / 8
if cLen != k or k < 11:
warning("Key._rsaes_pkcs1_v1_5_decrypt() decryption error "
"(cLen != k or k < 11)")
return None
# 2) RSA decryption
c = pkcs_os2ip(C) # 2.a)
m = self._rsadp(c) # 2.b)
EM = pkcs_i2osp(m, k) # 2.c)
# 3) EME-PKCS1-v1_5 decoding
# I am aware of the note at the end of 7.2.2 regarding error
# conditions reporting but the one provided below are for _local_
# debugging purposes. --arno
if EM[0] != '\x00':
warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
"(first byte is not 0x00)")
return None
if EM[1] != '\x02':
warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
"(second byte is not 0x02)")
return None
tmp = EM[2:].split('\x00', 1)
if len(tmp) != 2:
warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
"(no 0x00 to separate PS from M)")
return None
PS, M = tmp
if len(PS) < 8:
warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
"(PS is less than 8 byte long)")
return None
return M | [
"def",
"_rsaes_pkcs1_v1_5_decrypt",
"(",
"self",
",",
"C",
")",
":",
"# 1) Length checking",
"cLen",
"=",
"len",
"(",
"C",
")",
"k",
"=",
"self",
".",
"modulusLen",
"/",
"8",
"if",
"cLen",
"!=",
"k",
"or",
"k",
"<",
"11",
":",
"warning",
"(",
"\"Key._rsaes_pkcs1_v1_5_decrypt() decryption error \"",
"\"(cLen != k or k < 11)\"",
")",
"return",
"None",
"# 2) RSA decryption",
"c",
"=",
"pkcs_os2ip",
"(",
"C",
")",
"# 2.a)",
"m",
"=",
"self",
".",
"_rsadp",
"(",
"c",
")",
"# 2.b)",
"EM",
"=",
"pkcs_i2osp",
"(",
"m",
",",
"k",
")",
"# 2.c)",
"# 3) EME-PKCS1-v1_5 decoding",
"# I am aware of the note at the end of 7.2.2 regarding error",
"# conditions reporting but the one provided below are for _local_",
"# debugging purposes. --arno",
"if",
"EM",
"[",
"0",
"]",
"!=",
"'\\x00'",
":",
"warning",
"(",
"\"Key._rsaes_pkcs1_v1_5_decrypt(): decryption error \"",
"\"(first byte is not 0x00)\"",
")",
"return",
"None",
"if",
"EM",
"[",
"1",
"]",
"!=",
"'\\x02'",
":",
"warning",
"(",
"\"Key._rsaes_pkcs1_v1_5_decrypt(): decryption error \"",
"\"(second byte is not 0x02)\"",
")",
"return",
"None",
"tmp",
"=",
"EM",
"[",
"2",
":",
"]",
".",
"split",
"(",
"'\\x00'",
",",
"1",
")",
"if",
"len",
"(",
"tmp",
")",
"!=",
"2",
":",
"warning",
"(",
"\"Key._rsaes_pkcs1_v1_5_decrypt(): decryption error \"",
"\"(no 0x00 to separate PS from M)\"",
")",
"return",
"None",
"PS",
",",
"M",
"=",
"tmp",
"if",
"len",
"(",
"PS",
")",
"<",
"8",
":",
"warning",
"(",
"\"Key._rsaes_pkcs1_v1_5_decrypt(): decryption error \"",
"\"(PS is less than 8 byte long)\"",
")",
"return",
"None",
"return",
"M"
] | Implements RSAES-PKCS1-V1_5-DECRYPT() function described in section
7.2.2 of RFC 3447.
Input:
C: ciphertext to be decrypted, an octet string of length k, where
k is the length in octets of the RSA modulus n.
Output:
an octet string of length k at most k - 11
on error, None is returned. | [
"Implements",
"RSAES",
"-",
"PKCS1",
"-",
"V1_5",
"-",
"DECRYPT",
"()",
"function",
"described",
"in",
"section",
"7",
".",
"2",
".",
"2",
"of",
"RFC",
"3447",
"."
] | python | train |
SteveMcGrath/pySecurityCenter | securitycenter/base.py | https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/securitycenter/base.py#L90-L96 | def post(self, path, **kwargs):
'''Calls the specified path with the POST method'''
resp = self._session.post(self._url(path), **self._builder(**kwargs))
if 'stream' in kwargs:
return resp
else:
return self._resp_error_check(resp) | [
"def",
"post",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"resp",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"self",
".",
"_url",
"(",
"path",
")",
",",
"*",
"*",
"self",
".",
"_builder",
"(",
"*",
"*",
"kwargs",
")",
")",
"if",
"'stream'",
"in",
"kwargs",
":",
"return",
"resp",
"else",
":",
"return",
"self",
".",
"_resp_error_check",
"(",
"resp",
")"
] | Calls the specified path with the POST method | [
"Calls",
"the",
"specified",
"path",
"with",
"the",
"POST",
"method"
] | python | train |
BernardFW/bernard | src/bernard/i18n/_formatter.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/_formatter.py#L80-L85 | def format_date(self, value, format_):
"""
Format the date using Babel
"""
date_ = make_date(value)
return dates.format_date(date_, format_, locale=self.lang) | [
"def",
"format_date",
"(",
"self",
",",
"value",
",",
"format_",
")",
":",
"date_",
"=",
"make_date",
"(",
"value",
")",
"return",
"dates",
".",
"format_date",
"(",
"date_",
",",
"format_",
",",
"locale",
"=",
"self",
".",
"lang",
")"
] | Format the date using Babel | [
"Format",
"the",
"date",
"using",
"Babel"
] | python | train |
tomnor/channelpack | channelpack/pack.py | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L344-L402 | def rebase(self, key, start=None, decimals=5):
"""Rebase a channel (key) on start.
The step (between elements) need to be constant all through,
else ValueError is raised. The exception to this is the border
step between data loaded from two different files.
key: int or str
The key for the channel to rebase.
start: int or float or None
If specified - replace the first element in the first loaded
data channel with start.
decimals: int
Diffs are rounded to this number of decimals before the step
through arrays are checked. The diffs are otherwise likely never to
be all equal.
Typically this would be used to make a time channel
continuous. Like, not start over from 0, when data is appended
from multiple files. Or simply to rebase a channel on 'start'.
If start is None, and the instance is loaded from one file only,
this method has no effect.
.. note::
The instance channel is modified on success.
"""
diffs = []
def diffsappend(d, sc):
diff = np.around(np.diff(d), decimals)
diffs.append((diff, diff[0], sc))
if hasattr(self, 'metamulti'):
for sc in self.metamulti['slices']:
diffsappend(self(key)[sc], sc)
else:
diffsappend(self(key), slice(0, self.rec_cnt))
for diff, d, sc in diffs:
if not np.all(diff == d):
raise ValueError('All diffs not equal within ' +
'indexes ' + str(sc))
S = set([t[1] for t in diffs])
if len(S) > 1:
raise ValueError('Diffs not equal between appended data files: ' +
str(S))
# Now modify:
if start is None:
start = self(key)[0]
self.D[self._key(key)] = np.linspace(start, d * self.rec_cnt + start,
num=self.rec_cnt, endpoint=False)
assert len(self(key)) == self.rec_cnt, 'Semantic error' | [
"def",
"rebase",
"(",
"self",
",",
"key",
",",
"start",
"=",
"None",
",",
"decimals",
"=",
"5",
")",
":",
"diffs",
"=",
"[",
"]",
"def",
"diffsappend",
"(",
"d",
",",
"sc",
")",
":",
"diff",
"=",
"np",
".",
"around",
"(",
"np",
".",
"diff",
"(",
"d",
")",
",",
"decimals",
")",
"diffs",
".",
"append",
"(",
"(",
"diff",
",",
"diff",
"[",
"0",
"]",
",",
"sc",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"'metamulti'",
")",
":",
"for",
"sc",
"in",
"self",
".",
"metamulti",
"[",
"'slices'",
"]",
":",
"diffsappend",
"(",
"self",
"(",
"key",
")",
"[",
"sc",
"]",
",",
"sc",
")",
"else",
":",
"diffsappend",
"(",
"self",
"(",
"key",
")",
",",
"slice",
"(",
"0",
",",
"self",
".",
"rec_cnt",
")",
")",
"for",
"diff",
",",
"d",
",",
"sc",
"in",
"diffs",
":",
"if",
"not",
"np",
".",
"all",
"(",
"diff",
"==",
"d",
")",
":",
"raise",
"ValueError",
"(",
"'All diffs not equal within '",
"+",
"'indexes '",
"+",
"str",
"(",
"sc",
")",
")",
"S",
"=",
"set",
"(",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"diffs",
"]",
")",
"if",
"len",
"(",
"S",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Diffs not equal between appended data files: '",
"+",
"str",
"(",
"S",
")",
")",
"# Now modify:",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"self",
"(",
"key",
")",
"[",
"0",
"]",
"self",
".",
"D",
"[",
"self",
".",
"_key",
"(",
"key",
")",
"]",
"=",
"np",
".",
"linspace",
"(",
"start",
",",
"d",
"*",
"self",
".",
"rec_cnt",
"+",
"start",
",",
"num",
"=",
"self",
".",
"rec_cnt",
",",
"endpoint",
"=",
"False",
")",
"assert",
"len",
"(",
"self",
"(",
"key",
")",
")",
"==",
"self",
".",
"rec_cnt",
",",
"'Semantic error'"
] | Rebase a channel (key) on start.
The step (between elements) need to be constant all through,
else ValueError is raised. The exception to this is the border
step between data loaded from two different files.
key: int or str
The key for the channel to rebase.
start: int or float or None
If specified - replace the first element in the first loaded
data channel with start.
decimals: int
Diffs are rounded to this number of decimals before the step
through arrays are checked. The diffs are otherwise likely never to
be all equal.
Typically this would be used to make a time channel
continuous. Like, not start over from 0, when data is appended
from multiple files. Or simply to rebase a channel on 'start'.
If start is None, and the instance is loaded from one file only,
this method has no effect.
.. note::
The instance channel is modified on success. | [
"Rebase",
"a",
"channel",
"(",
"key",
")",
"on",
"start",
"."
] | python | train |
LEMS/pylems | lems/parser/LEMS.py | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/LEMS.py#L655-L680 | def parse_event_writer(self, node):
"""
Parses <EventWriter>
@param node: Node containing the <EventWriter> element
@type node: xml.etree.Element
"""
if 'path' in node.lattrib:
path = node.lattrib['path']
else:
self.raise_error('<EventWriter> must specify a path.')
if 'filename' in node.lattrib:
file_path = node.lattrib['filename']
else:
self.raise_error("Event writer for '{0}' must specify a filename.",
path)
if 'format' in node.lattrib:
format = node.lattrib['format']
else:
self.raise_error("Event writer for '{0}' must specify a format.",
path)
self.current_simulation.add_event_writer(EventWriter(path, file_path, format)) | [
"def",
"parse_event_writer",
"(",
"self",
",",
"node",
")",
":",
"if",
"'path'",
"in",
"node",
".",
"lattrib",
":",
"path",
"=",
"node",
".",
"lattrib",
"[",
"'path'",
"]",
"else",
":",
"self",
".",
"raise_error",
"(",
"'<EventWriter> must specify a path.'",
")",
"if",
"'filename'",
"in",
"node",
".",
"lattrib",
":",
"file_path",
"=",
"node",
".",
"lattrib",
"[",
"'filename'",
"]",
"else",
":",
"self",
".",
"raise_error",
"(",
"\"Event writer for '{0}' must specify a filename.\"",
",",
"path",
")",
"if",
"'format'",
"in",
"node",
".",
"lattrib",
":",
"format",
"=",
"node",
".",
"lattrib",
"[",
"'format'",
"]",
"else",
":",
"self",
".",
"raise_error",
"(",
"\"Event writer for '{0}' must specify a format.\"",
",",
"path",
")",
"self",
".",
"current_simulation",
".",
"add_event_writer",
"(",
"EventWriter",
"(",
"path",
",",
"file_path",
",",
"format",
")",
")"
] | Parses <EventWriter>
@param node: Node containing the <EventWriter> element
@type node: xml.etree.Element | [
"Parses",
"<EventWriter",
">"
] | python | train |
tkem/uritools | uritools/split.py | https://github.com/tkem/uritools/blob/e77ba4acd937b68da9850138563debd4c925ef9f/uritools/split.py#L173-L176 | def getpath(self, encoding='utf-8', errors='strict'):
"""Return the normalized decoded URI path."""
path = self.__remove_dot_segments(self.path)
return uridecode(path, encoding, errors) | [
"def",
"getpath",
"(",
"self",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"path",
"=",
"self",
".",
"__remove_dot_segments",
"(",
"self",
".",
"path",
")",
"return",
"uridecode",
"(",
"path",
",",
"encoding",
",",
"errors",
")"
] | Return the normalized decoded URI path. | [
"Return",
"the",
"normalized",
"decoded",
"URI",
"path",
"."
] | python | train |
OpenKMIP/PyKMIP | kmip/core/messages/payloads/get.py | https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/get.py#L414-L470 | def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Get response payload and decode it
into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the object type, unique identifier, or
secret attributes are missing from the encoded payload.
"""
super(GetResponsePayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_stream):
self._object_type = primitives.Enumeration(
enum=enums.ObjectType,
tag=enums.Tags.OBJECT_TYPE
)
self._object_type.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError(
"Parsed payload encoding is missing the object type field."
)
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
else:
raise ValueError(
"Parsed payload encoding is missing the unique identifier "
"field."
)
self.secret = self.secret_factory.create(self.object_type)
if self.is_tag_next(self._secret.tag, local_stream):
self._secret.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError(
"Parsed payload encoding is missing the secret field."
)
self.is_oversized(local_stream) | [
"def",
"read",
"(",
"self",
",",
"input_stream",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"GetResponsePayload",
",",
"self",
")",
".",
"read",
"(",
"input_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_stream",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_stream",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"OBJECT_TYPE",
",",
"local_stream",
")",
":",
"self",
".",
"_object_type",
"=",
"primitives",
".",
"Enumeration",
"(",
"enum",
"=",
"enums",
".",
"ObjectType",
",",
"tag",
"=",
"enums",
".",
"Tags",
".",
"OBJECT_TYPE",
")",
"self",
".",
"_object_type",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Parsed payload encoding is missing the object type field.\"",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"UNIQUE_IDENTIFIER",
",",
"local_stream",
")",
":",
"self",
".",
"_unique_identifier",
"=",
"primitives",
".",
"TextString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"UNIQUE_IDENTIFIER",
")",
"self",
".",
"_unique_identifier",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Parsed payload encoding is missing the unique identifier \"",
"\"field.\"",
")",
"self",
".",
"secret",
"=",
"self",
".",
"secret_factory",
".",
"create",
"(",
"self",
".",
"object_type",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"self",
".",
"_secret",
".",
"tag",
",",
"local_stream",
")",
":",
"self",
".",
"_secret",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Parsed payload encoding is missing the secret field.\"",
")",
"self",
".",
"is_oversized",
"(",
"local_stream",
")"
] | Read the data encoding the Get response payload and decode it
into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the object type, unique identifier, or
secret attributes are missing from the encoded payload. | [
"Read",
"the",
"data",
"encoding",
"the",
"Get",
"response",
"payload",
"and",
"decode",
"it",
"into",
"its",
"constituent",
"parts",
"."
] | python | test |
robertmartin8/PyPortfolioOpt | pypfopt/efficient_frontier.py | https://github.com/robertmartin8/PyPortfolioOpt/blob/dfad1256cb6995c7fbd7a025eedb54b1ca04b2fc/pypfopt/efficient_frontier.py#L257-L276 | def portfolio_performance(self, verbose=False, risk_free_rate=0.02):
"""
After optimising, calculate (and optionally print) the performance of the optimal
portfolio. Currently calculates expected return, volatility, and the Sharpe ratio.
:param verbose: whether performance should be printed, defaults to False
:type verbose: bool, optional
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:raises ValueError: if weights have not been calcualted yet
:return: expected return, volatility, Sharpe ratio.
:rtype: (float, float, float)
"""
return base_optimizer.portfolio_performance(
self.expected_returns,
self.cov_matrix,
self.weights,
verbose,
risk_free_rate,
) | [
"def",
"portfolio_performance",
"(",
"self",
",",
"verbose",
"=",
"False",
",",
"risk_free_rate",
"=",
"0.02",
")",
":",
"return",
"base_optimizer",
".",
"portfolio_performance",
"(",
"self",
".",
"expected_returns",
",",
"self",
".",
"cov_matrix",
",",
"self",
".",
"weights",
",",
"verbose",
",",
"risk_free_rate",
",",
")"
] | After optimising, calculate (and optionally print) the performance of the optimal
portfolio. Currently calculates expected return, volatility, and the Sharpe ratio.
:param verbose: whether performance should be printed, defaults to False
:type verbose: bool, optional
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:raises ValueError: if weights have not been calcualted yet
:return: expected return, volatility, Sharpe ratio.
:rtype: (float, float, float) | [
"After",
"optimising",
"calculate",
"(",
"and",
"optionally",
"print",
")",
"the",
"performance",
"of",
"the",
"optimal",
"portfolio",
".",
"Currently",
"calculates",
"expected",
"return",
"volatility",
"and",
"the",
"Sharpe",
"ratio",
"."
] | python | train |
PedalPi/Raspberry-Physical | physical/liquidcristal/liquid_crystal.py | https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L149-L155 | def home(self):
"""Return to initial position (row=0, column=0)"""
self.row = 0
self.column = 0
self.command(Command.RETURN_HOME)
msleep(2) | [
"def",
"home",
"(",
"self",
")",
":",
"self",
".",
"row",
"=",
"0",
"self",
".",
"column",
"=",
"0",
"self",
".",
"command",
"(",
"Command",
".",
"RETURN_HOME",
")",
"msleep",
"(",
"2",
")"
] | Return to initial position (row=0, column=0) | [
"Return",
"to",
"initial",
"position",
"(",
"row",
"=",
"0",
"column",
"=",
"0",
")"
] | python | train |
CI-WATER/gsshapy | gsshapy/util/context.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/util/context.py#L13-L20 | def tmp_chdir(new_path):
"""Change directory temporarily and return when done."""
prev_cwd = os.getcwd()
os.chdir(new_path)
try:
yield
finally:
os.chdir(prev_cwd) | [
"def",
"tmp_chdir",
"(",
"new_path",
")",
":",
"prev_cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"new_path",
")",
"try",
":",
"yield",
"finally",
":",
"os",
".",
"chdir",
"(",
"prev_cwd",
")"
] | Change directory temporarily and return when done. | [
"Change",
"directory",
"temporarily",
"and",
"return",
"when",
"done",
"."
] | python | train |
PmagPy/PmagPy | programs/magic_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L557-L563 | def on_close_grid(self, event):
"""
If there is an open grid, save its data and close it.
"""
if self.parent.grid_frame:
self.parent.grid_frame.onSave(None)
self.parent.grid_frame.Destroy() | [
"def",
"on_close_grid",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"parent",
".",
"grid_frame",
":",
"self",
".",
"parent",
".",
"grid_frame",
".",
"onSave",
"(",
"None",
")",
"self",
".",
"parent",
".",
"grid_frame",
".",
"Destroy",
"(",
")"
] | If there is an open grid, save its data and close it. | [
"If",
"there",
"is",
"an",
"open",
"grid",
"save",
"its",
"data",
"and",
"close",
"it",
"."
] | python | train |
log2timeline/dfvfs | dfvfs/helpers/command_line.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/command_line.py#L213-L237 | def _WriteRow(self, output_writer, values, in_bold=False):
"""Writes a row of values aligned with the width to the output writer.
Args:
output_writer (CLIOutputWriter): output writer.
values (list[object]): values.
in_bold (Optional[bool]): True if the row should be written in bold.
"""
row_strings = []
for value_index, value_string in enumerate(values):
padding_size = self._column_sizes[value_index] - len(value_string)
padding_string = ' ' * padding_size
row_strings.extend([value_string, padding_string])
row_strings.pop()
row_strings = ''.join(row_strings)
if in_bold and not win32console:
# TODO: for win32console get current color and set intensity,
# write the header separately then reset intensity.
row_strings = '\x1b[1m{0:s}\x1b[0m'.format(row_strings)
output_writer.Write('{0:s}\n'.format(row_strings)) | [
"def",
"_WriteRow",
"(",
"self",
",",
"output_writer",
",",
"values",
",",
"in_bold",
"=",
"False",
")",
":",
"row_strings",
"=",
"[",
"]",
"for",
"value_index",
",",
"value_string",
"in",
"enumerate",
"(",
"values",
")",
":",
"padding_size",
"=",
"self",
".",
"_column_sizes",
"[",
"value_index",
"]",
"-",
"len",
"(",
"value_string",
")",
"padding_string",
"=",
"' '",
"*",
"padding_size",
"row_strings",
".",
"extend",
"(",
"[",
"value_string",
",",
"padding_string",
"]",
")",
"row_strings",
".",
"pop",
"(",
")",
"row_strings",
"=",
"''",
".",
"join",
"(",
"row_strings",
")",
"if",
"in_bold",
"and",
"not",
"win32console",
":",
"# TODO: for win32console get current color and set intensity,",
"# write the header separately then reset intensity.",
"row_strings",
"=",
"'\\x1b[1m{0:s}\\x1b[0m'",
".",
"format",
"(",
"row_strings",
")",
"output_writer",
".",
"Write",
"(",
"'{0:s}\\n'",
".",
"format",
"(",
"row_strings",
")",
")"
] | Writes a row of values aligned with the width to the output writer.
Args:
output_writer (CLIOutputWriter): output writer.
values (list[object]): values.
in_bold (Optional[bool]): True if the row should be written in bold. | [
"Writes",
"a",
"row",
"of",
"values",
"aligned",
"with",
"the",
"width",
"to",
"the",
"output",
"writer",
"."
] | python | train |
frasertweedale/ledgertools | ltlib/xn.py | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L168-L308 | def apply_outcomes(self, outcomes, uio, dropped=False, prevxn=None):
"""Apply the given outcomes to this rule.
If user intervention is required, outcomes are not applied
unless a ui.UI is supplied.
"""
if self.dropped and not dropped:
# do nothing for dropped xn, unless specifically told to
return
if 'drop' in outcomes:
highscore = score.score(outcomes['drop'].highest()[0])
if highscore >= threshold['y']:
# drop without prompting
self.dropped = True
elif highscore < threshold['n?']:
# do NOT drop, and don't even ask
pass
else:
uio.show('DROP was determined for transaction:')
uio.show('')
uio.show(self.summary())
if highscore >= threshold['y?']:
default = True
elif highscore >= threshold['?']:
default = None
else:
default = False
try:
self.dropped = uio.yn('DROP this transaction?', default)
except ui.RejectWarning:
# we assume they mean "no"
pass
if self.dropped and not dropped:
# do nothing further for dropped xn, unless specifically told to
return
# rebate outcomes
#
# A rebate is a rebate of the previous transaction.
# The proportions of credits in the prev xn are kept,
# inverted (i.e. made debits) and scaled to the rebate
# amount credit amount.
if 'rebate' in outcomes and not self.src and prevxn is not None:
ratio = self.amount / prevxn.amount
def scale(dst_ep):
amount = (dst_ep.amount * ratio).quantize(dst_ep.amount)
return Endpoint(dst_ep.account, -amount)
self.src = map(scale, prevxn.dst)
# handle rounding errors
self.src[0].amount -= self.amount + sum(x.amount for x in self.src)
# account outcomes
for outcome in ['src', 'dst']:
if outcome not in outcomes or getattr(self, outcome):
# no outcome, or the attribute was already set
continue
endpoints = []
highest = outcomes[outcome].highest()
try:
highscore = score.score(highest[0])
if len(highest) == 1:
if highscore >= threshold['y']:
# do it
endpoints = [
Endpoint(score.value(highest[0]), self.amount)
]
else:
uio.show('Choose ' + outcome + ' for transaction:')
uio.show('')
uio.show(self.summary())
prompt = 'Is the account {0}?'.format(
score.value(highest[0])
)
if highscore >= threshold['y?']:
default = True
elif highscore >= threshold['?']:
default = None
else:
default = False
if uio.yn(prompt, default):
endpoints = [
Endpoint(
score.value(highest[0]),
self.amount
)
]
else:
raise ui.RejectWarning('top score declined')
else:
# tied highest score, let user pick
uio.show('Choose ' + outcome + ' for transaction:')
uio.show('')
uio.show(self.summary())
prompt = 'Choose an account'
endpoints = [
Endpoint(
uio.choose(prompt, map(score.value, highest)),
self.amount
)
]
except ui.RejectWarning:
# user has rejected our offer(s)
uio.show("\n")
uio.show('Enter ' + outcome + ' endpoints:')
try:
endpoints = []
remaining = self.amount
while remaining:
uio.show('\n${0} remaining'.format(remaining))
account = uio.text(
' Enter account',
score.value(highest[0]) if highest else None
)
amount = uio.decimal(
' Enter amount',
default=remaining,
lower=0,
upper=remaining
)
endpoints.append(Endpoint(account, amount))
remaining = self.amount \
- sum(map(lambda x: x.amount, endpoints))
except ui.RejectWarning:
# bail out
sys.exit("bye!")
# flip amounts if it was a src outcome
if outcome == 'src':
endpoints = map(
lambda x: Endpoint(x.account, -x.amount),
endpoints
)
# set endpoints
setattr(self, outcome, endpoints) | [
"def",
"apply_outcomes",
"(",
"self",
",",
"outcomes",
",",
"uio",
",",
"dropped",
"=",
"False",
",",
"prevxn",
"=",
"None",
")",
":",
"if",
"self",
".",
"dropped",
"and",
"not",
"dropped",
":",
"# do nothing for dropped xn, unless specifically told to",
"return",
"if",
"'drop'",
"in",
"outcomes",
":",
"highscore",
"=",
"score",
".",
"score",
"(",
"outcomes",
"[",
"'drop'",
"]",
".",
"highest",
"(",
")",
"[",
"0",
"]",
")",
"if",
"highscore",
">=",
"threshold",
"[",
"'y'",
"]",
":",
"# drop without prompting",
"self",
".",
"dropped",
"=",
"True",
"elif",
"highscore",
"<",
"threshold",
"[",
"'n?'",
"]",
":",
"# do NOT drop, and don't even ask",
"pass",
"else",
":",
"uio",
".",
"show",
"(",
"'DROP was determined for transaction:'",
")",
"uio",
".",
"show",
"(",
"''",
")",
"uio",
".",
"show",
"(",
"self",
".",
"summary",
"(",
")",
")",
"if",
"highscore",
">=",
"threshold",
"[",
"'y?'",
"]",
":",
"default",
"=",
"True",
"elif",
"highscore",
">=",
"threshold",
"[",
"'?'",
"]",
":",
"default",
"=",
"None",
"else",
":",
"default",
"=",
"False",
"try",
":",
"self",
".",
"dropped",
"=",
"uio",
".",
"yn",
"(",
"'DROP this transaction?'",
",",
"default",
")",
"except",
"ui",
".",
"RejectWarning",
":",
"# we assume they mean \"no\"",
"pass",
"if",
"self",
".",
"dropped",
"and",
"not",
"dropped",
":",
"# do nothing further for dropped xn, unless specifically told to",
"return",
"# rebate outcomes",
"#",
"# A rebate is a rebate of the previous transaction.",
"# The proportions of credits in the prev xn are kept,",
"# inverted (i.e. made debits) and scaled to the rebate",
"# amount credit amount.",
"if",
"'rebate'",
"in",
"outcomes",
"and",
"not",
"self",
".",
"src",
"and",
"prevxn",
"is",
"not",
"None",
":",
"ratio",
"=",
"self",
".",
"amount",
"/",
"prevxn",
".",
"amount",
"def",
"scale",
"(",
"dst_ep",
")",
":",
"amount",
"=",
"(",
"dst_ep",
".",
"amount",
"*",
"ratio",
")",
".",
"quantize",
"(",
"dst_ep",
".",
"amount",
")",
"return",
"Endpoint",
"(",
"dst_ep",
".",
"account",
",",
"-",
"amount",
")",
"self",
".",
"src",
"=",
"map",
"(",
"scale",
",",
"prevxn",
".",
"dst",
")",
"# handle rounding errors",
"self",
".",
"src",
"[",
"0",
"]",
".",
"amount",
"-=",
"self",
".",
"amount",
"+",
"sum",
"(",
"x",
".",
"amount",
"for",
"x",
"in",
"self",
".",
"src",
")",
"# account outcomes",
"for",
"outcome",
"in",
"[",
"'src'",
",",
"'dst'",
"]",
":",
"if",
"outcome",
"not",
"in",
"outcomes",
"or",
"getattr",
"(",
"self",
",",
"outcome",
")",
":",
"# no outcome, or the attribute was already set",
"continue",
"endpoints",
"=",
"[",
"]",
"highest",
"=",
"outcomes",
"[",
"outcome",
"]",
".",
"highest",
"(",
")",
"try",
":",
"highscore",
"=",
"score",
".",
"score",
"(",
"highest",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"highest",
")",
"==",
"1",
":",
"if",
"highscore",
">=",
"threshold",
"[",
"'y'",
"]",
":",
"# do it",
"endpoints",
"=",
"[",
"Endpoint",
"(",
"score",
".",
"value",
"(",
"highest",
"[",
"0",
"]",
")",
",",
"self",
".",
"amount",
")",
"]",
"else",
":",
"uio",
".",
"show",
"(",
"'Choose '",
"+",
"outcome",
"+",
"' for transaction:'",
")",
"uio",
".",
"show",
"(",
"''",
")",
"uio",
".",
"show",
"(",
"self",
".",
"summary",
"(",
")",
")",
"prompt",
"=",
"'Is the account {0}?'",
".",
"format",
"(",
"score",
".",
"value",
"(",
"highest",
"[",
"0",
"]",
")",
")",
"if",
"highscore",
">=",
"threshold",
"[",
"'y?'",
"]",
":",
"default",
"=",
"True",
"elif",
"highscore",
">=",
"threshold",
"[",
"'?'",
"]",
":",
"default",
"=",
"None",
"else",
":",
"default",
"=",
"False",
"if",
"uio",
".",
"yn",
"(",
"prompt",
",",
"default",
")",
":",
"endpoints",
"=",
"[",
"Endpoint",
"(",
"score",
".",
"value",
"(",
"highest",
"[",
"0",
"]",
")",
",",
"self",
".",
"amount",
")",
"]",
"else",
":",
"raise",
"ui",
".",
"RejectWarning",
"(",
"'top score declined'",
")",
"else",
":",
"# tied highest score, let user pick",
"uio",
".",
"show",
"(",
"'Choose '",
"+",
"outcome",
"+",
"' for transaction:'",
")",
"uio",
".",
"show",
"(",
"''",
")",
"uio",
".",
"show",
"(",
"self",
".",
"summary",
"(",
")",
")",
"prompt",
"=",
"'Choose an account'",
"endpoints",
"=",
"[",
"Endpoint",
"(",
"uio",
".",
"choose",
"(",
"prompt",
",",
"map",
"(",
"score",
".",
"value",
",",
"highest",
")",
")",
",",
"self",
".",
"amount",
")",
"]",
"except",
"ui",
".",
"RejectWarning",
":",
"# user has rejected our offer(s)",
"uio",
".",
"show",
"(",
"\"\\n\"",
")",
"uio",
".",
"show",
"(",
"'Enter '",
"+",
"outcome",
"+",
"' endpoints:'",
")",
"try",
":",
"endpoints",
"=",
"[",
"]",
"remaining",
"=",
"self",
".",
"amount",
"while",
"remaining",
":",
"uio",
".",
"show",
"(",
"'\\n${0} remaining'",
".",
"format",
"(",
"remaining",
")",
")",
"account",
"=",
"uio",
".",
"text",
"(",
"' Enter account'",
",",
"score",
".",
"value",
"(",
"highest",
"[",
"0",
"]",
")",
"if",
"highest",
"else",
"None",
")",
"amount",
"=",
"uio",
".",
"decimal",
"(",
"' Enter amount'",
",",
"default",
"=",
"remaining",
",",
"lower",
"=",
"0",
",",
"upper",
"=",
"remaining",
")",
"endpoints",
".",
"append",
"(",
"Endpoint",
"(",
"account",
",",
"amount",
")",
")",
"remaining",
"=",
"self",
".",
"amount",
"-",
"sum",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"amount",
",",
"endpoints",
")",
")",
"except",
"ui",
".",
"RejectWarning",
":",
"# bail out",
"sys",
".",
"exit",
"(",
"\"bye!\"",
")",
"# flip amounts if it was a src outcome",
"if",
"outcome",
"==",
"'src'",
":",
"endpoints",
"=",
"map",
"(",
"lambda",
"x",
":",
"Endpoint",
"(",
"x",
".",
"account",
",",
"-",
"x",
".",
"amount",
")",
",",
"endpoints",
")",
"# set endpoints",
"setattr",
"(",
"self",
",",
"outcome",
",",
"endpoints",
")"
] | Apply the given outcomes to this rule.
If user intervention is required, outcomes are not applied
unless a ui.UI is supplied. | [
"Apply",
"the",
"given",
"outcomes",
"to",
"this",
"rule",
"."
] | python | train |
sorgerlab/indra | indra/sources/reach/reader.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/reader.py#L55-L69 | def get_api_ruler(self):
"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException as e:
raise ReachOfflineReadingError(e)
return self.api_ruler | [
"def",
"get_api_ruler",
"(",
"self",
")",
":",
"if",
"self",
".",
"api_ruler",
"is",
"None",
":",
"try",
":",
"self",
".",
"api_ruler",
"=",
"autoclass",
"(",
"'org.clulab.reach.export.apis.ApiRuler'",
")",
"except",
"JavaException",
"as",
"e",
":",
"raise",
"ReachOfflineReadingError",
"(",
"e",
")",
"return",
"self",
".",
"api_ruler"
] | Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object). | [
"Return",
"the",
"existing",
"reader",
"if",
"it",
"exists",
"or",
"launch",
"a",
"new",
"one",
"."
] | python | train |
Alveo/pyalveo | pyalveo/pyalveo.py | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L572-L619 | def api_request(self, url, data=None, method='GET', raw=False, file=None):
""" Perform an API request to the given URL, optionally
including the specified data
:type url: String
:param url: the URL to which to make the request
:type data: String
:param data: the data to send with the request, if any
:type method: String
:param method: the HTTP request method
:type raw: Boolean
:para raw: if True, return the raw response, otherwise treat as JSON and return the parsed response
:type file: String
:param file: (Optional) full path to file to be uploaded in a POST request
:returns: the response from the server either as a raw response or a Python dictionary
generated by parsing the JSON response
:raises: APIError if the API request is not successful
"""
if method is 'GET':
response = self.oauth.get(url)
elif method is 'POST':
if file is not None:
response = self.oauth.post(url, data=data, file=file)
else:
response = self.oauth.post(url, data=data)
elif method is 'PUT':
response = self.oauth.put(url, data=data)
elif method is 'DELETE':
response = self.oauth.delete(url)
else:
raise APIError("Unknown request method: %s" % (method,))
# check for error responses
if response.status_code >= 400:
raise APIError(response.status_code,
'',
"Error accessing API (url: %s, method: %s)\nData: %s\nMessage: %s" % (url, method, data, response.text))
if raw:
return response.content
else:
return response.json() | [
"def",
"api_request",
"(",
"self",
",",
"url",
",",
"data",
"=",
"None",
",",
"method",
"=",
"'GET'",
",",
"raw",
"=",
"False",
",",
"file",
"=",
"None",
")",
":",
"if",
"method",
"is",
"'GET'",
":",
"response",
"=",
"self",
".",
"oauth",
".",
"get",
"(",
"url",
")",
"elif",
"method",
"is",
"'POST'",
":",
"if",
"file",
"is",
"not",
"None",
":",
"response",
"=",
"self",
".",
"oauth",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
",",
"file",
"=",
"file",
")",
"else",
":",
"response",
"=",
"self",
".",
"oauth",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
")",
"elif",
"method",
"is",
"'PUT'",
":",
"response",
"=",
"self",
".",
"oauth",
".",
"put",
"(",
"url",
",",
"data",
"=",
"data",
")",
"elif",
"method",
"is",
"'DELETE'",
":",
"response",
"=",
"self",
".",
"oauth",
".",
"delete",
"(",
"url",
")",
"else",
":",
"raise",
"APIError",
"(",
"\"Unknown request method: %s\"",
"%",
"(",
"method",
",",
")",
")",
"# check for error responses",
"if",
"response",
".",
"status_code",
">=",
"400",
":",
"raise",
"APIError",
"(",
"response",
".",
"status_code",
",",
"''",
",",
"\"Error accessing API (url: %s, method: %s)\\nData: %s\\nMessage: %s\"",
"%",
"(",
"url",
",",
"method",
",",
"data",
",",
"response",
".",
"text",
")",
")",
"if",
"raw",
":",
"return",
"response",
".",
"content",
"else",
":",
"return",
"response",
".",
"json",
"(",
")"
] | Perform an API request to the given URL, optionally
including the specified data
:type url: String
:param url: the URL to which to make the request
:type data: String
:param data: the data to send with the request, if any
:type method: String
:param method: the HTTP request method
:type raw: Boolean
:para raw: if True, return the raw response, otherwise treat as JSON and return the parsed response
:type file: String
:param file: (Optional) full path to file to be uploaded in a POST request
:returns: the response from the server either as a raw response or a Python dictionary
generated by parsing the JSON response
:raises: APIError if the API request is not successful | [
"Perform",
"an",
"API",
"request",
"to",
"the",
"given",
"URL",
"optionally",
"including",
"the",
"specified",
"data"
] | python | train |
zaturox/glin | glin/zmq/messages.py | https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/zmq/messages.py#L93-L99 | def uint64(self, val):
"""append a frame containing a uint64"""
try:
self.msg += [pack("!Q", val)]
except struct.error:
raise ValueError("Expected uint64")
return self | [
"def",
"uint64",
"(",
"self",
",",
"val",
")",
":",
"try",
":",
"self",
".",
"msg",
"+=",
"[",
"pack",
"(",
"\"!Q\"",
",",
"val",
")",
"]",
"except",
"struct",
".",
"error",
":",
"raise",
"ValueError",
"(",
"\"Expected uint64\"",
")",
"return",
"self"
] | append a frame containing a uint64 | [
"append",
"a",
"frame",
"containing",
"a",
"uint64"
] | python | train |
allenai/allennlp | allennlp/semparse/domain_languages/wikitables_language.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L362-L373 | def select_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Select function takes a row (as a list) and a column name and returns the number in that
column. If multiple rows are given, will return the first number that is not None.
"""
numbers: List[float] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, float):
numbers.append(cell_value)
return numbers[0] if numbers else -1 | [
"def",
"select_number",
"(",
"self",
",",
"rows",
":",
"List",
"[",
"Row",
"]",
",",
"column",
":",
"NumberColumn",
")",
"->",
"Number",
":",
"numbers",
":",
"List",
"[",
"float",
"]",
"=",
"[",
"]",
"for",
"row",
"in",
"rows",
":",
"cell_value",
"=",
"row",
".",
"values",
"[",
"column",
".",
"name",
"]",
"if",
"isinstance",
"(",
"cell_value",
",",
"float",
")",
":",
"numbers",
".",
"append",
"(",
"cell_value",
")",
"return",
"numbers",
"[",
"0",
"]",
"if",
"numbers",
"else",
"-",
"1"
] | Select function takes a row (as a list) and a column name and returns the number in that
column. If multiple rows are given, will return the first number that is not None. | [
"Select",
"function",
"takes",
"a",
"row",
"(",
"as",
"a",
"list",
")",
"and",
"a",
"column",
"name",
"and",
"returns",
"the",
"number",
"in",
"that",
"column",
".",
"If",
"multiple",
"rows",
"are",
"given",
"will",
"return",
"the",
"first",
"number",
"that",
"is",
"not",
"None",
"."
] | python | train |
edx/i18n-tools | i18n/validate.py | https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/validate.py#L86-L105 | def tags_in_string(msg):
"""
Return the set of tags in a message string.
Tags includes HTML tags, data placeholders, etc.
Skips tags that might change due to translations: HTML entities, <abbr>,
and so on.
"""
def is_linguistic_tag(tag):
"""Is this tag one that can change with the language?"""
if tag.startswith("&"):
return True
if any(x in tag for x in ["<abbr>", "<abbr ", "</abbr>"]):
return True
return False
__, tags = Converter().detag_string(msg)
return set(t for t in tags if not is_linguistic_tag(t)) | [
"def",
"tags_in_string",
"(",
"msg",
")",
":",
"def",
"is_linguistic_tag",
"(",
"tag",
")",
":",
"\"\"\"Is this tag one that can change with the language?\"\"\"",
"if",
"tag",
".",
"startswith",
"(",
"\"&\"",
")",
":",
"return",
"True",
"if",
"any",
"(",
"x",
"in",
"tag",
"for",
"x",
"in",
"[",
"\"<abbr>\"",
",",
"\"<abbr \"",
",",
"\"</abbr>\"",
"]",
")",
":",
"return",
"True",
"return",
"False",
"__",
",",
"tags",
"=",
"Converter",
"(",
")",
".",
"detag_string",
"(",
"msg",
")",
"return",
"set",
"(",
"t",
"for",
"t",
"in",
"tags",
"if",
"not",
"is_linguistic_tag",
"(",
"t",
")",
")"
] | Return the set of tags in a message string.
Tags includes HTML tags, data placeholders, etc.
Skips tags that might change due to translations: HTML entities, <abbr>,
and so on. | [
"Return",
"the",
"set",
"of",
"tags",
"in",
"a",
"message",
"string",
"."
] | python | train |
hydraplatform/hydra-base | hydra_base/lib/sharing.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/sharing.py#L57-L93 | def share_network(network_id, usernames, read_only, share,**kwargs):
"""
Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
"""
user_id = kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_share_permission(user_id)
if read_only == 'Y':
write = 'N'
share = 'N'
else:
write = 'Y'
if net_i.created_by != int(user_id) and share == 'Y':
raise HydraError("Cannot share the 'sharing' ability as user %s is not"
" the owner of network %s"%
(user_id, network_id))
for username in usernames:
user_i = _get_user(username)
#Set the owner ship on the network itself
net_i.set_owner(user_i.id, write=write, share=share)
for o in net_i.project.owners:
if o.user_id == user_i.id:
break
else:
#Give the user read access to the containing project
net_i.project.set_owner(user_i.id, write='N', share='N')
db.DBSession.flush() | [
"def",
"share_network",
"(",
"network_id",
",",
"usernames",
",",
"read_only",
",",
"share",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
"net_i",
"=",
"_get_network",
"(",
"network_id",
")",
"net_i",
".",
"check_share_permission",
"(",
"user_id",
")",
"if",
"read_only",
"==",
"'Y'",
":",
"write",
"=",
"'N'",
"share",
"=",
"'N'",
"else",
":",
"write",
"=",
"'Y'",
"if",
"net_i",
".",
"created_by",
"!=",
"int",
"(",
"user_id",
")",
"and",
"share",
"==",
"'Y'",
":",
"raise",
"HydraError",
"(",
"\"Cannot share the 'sharing' ability as user %s is not\"",
"\" the owner of network %s\"",
"%",
"(",
"user_id",
",",
"network_id",
")",
")",
"for",
"username",
"in",
"usernames",
":",
"user_i",
"=",
"_get_user",
"(",
"username",
")",
"#Set the owner ship on the network itself",
"net_i",
".",
"set_owner",
"(",
"user_i",
".",
"id",
",",
"write",
"=",
"write",
",",
"share",
"=",
"share",
")",
"for",
"o",
"in",
"net_i",
".",
"project",
".",
"owners",
":",
"if",
"o",
".",
"user_id",
"==",
"user_i",
".",
"id",
":",
"break",
"else",
":",
"#Give the user read access to the containing project",
"net_i",
".",
"project",
".",
"set_owner",
"(",
"user_i",
".",
"id",
",",
"write",
"=",
"'N'",
",",
"share",
"=",
"'N'",
")",
"db",
".",
"DBSession",
".",
"flush",
"(",
")"
] | Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users | [
"Share",
"a",
"network",
"with",
"a",
"list",
"of",
"users",
"identified",
"by",
"their",
"usernames",
"."
] | python | train |
spulec/moto | moto/batch/models.py | https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L310-L405 | def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = 'PENDING'
time.sleep(1)
image = 'alpine:latest'
cmd = '/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"'
name = '{0}-{1}'.format(self.job_name, self.job_id)
self.job_state = 'RUNNABLE'
# TODO setup ecs container instance
time.sleep(1)
self.job_state = 'STARTING'
container = self.docker_client.containers.run(
image, cmd,
detach=True,
name=name
)
self.job_state = 'RUNNING'
self.job_started_at = datetime.datetime.now()
try:
# Log collection
logs_stdout = []
logs_stderr = []
container.reload()
# Dodgy hack, we can only check docker logs once a second, but we want to loop more
# so we can stop if asked to in a quick manner, should all go away if we go async
# There also be some dodgyness when sending an integer to docker logs and some
# events seem to be duplicated.
now = datetime.datetime.now()
i = 1
while container.status == 'running' and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n'))
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n'))
now = datetime.datetime.now()
container.reload()
i += 1
# Container should be stopped by this point... unless asked to stop
if container.status == 'running':
container.kill()
self.job_stopped_at = datetime.datetime.now()
# Get final logs
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n'))
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n'))
self.job_state = 'SUCCEEDED' if not self.stop else 'FAILED'
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(' ', 1)
date = dateutil.parser.parse(date)
date = int(date.timestamp())
logs.append({'timestamp': date, 'message': line.strip()})
# Send to cloudwatch
log_group = '/aws/batch/job'
stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err))
self.job_state = 'FAILED'
container.kill()
finally:
container.remove()
except Exception as err:
logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err))
self.job_state = 'FAILED'
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now() | [
"def",
"run",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"job_state",
"=",
"'PENDING'",
"time",
".",
"sleep",
"(",
"1",
")",
"image",
"=",
"'alpine:latest'",
"cmd",
"=",
"'/bin/sh -c \"for a in `seq 1 10`; do echo Hello World; sleep 1; done\"'",
"name",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"self",
".",
"job_name",
",",
"self",
".",
"job_id",
")",
"self",
".",
"job_state",
"=",
"'RUNNABLE'",
"# TODO setup ecs container instance",
"time",
".",
"sleep",
"(",
"1",
")",
"self",
".",
"job_state",
"=",
"'STARTING'",
"container",
"=",
"self",
".",
"docker_client",
".",
"containers",
".",
"run",
"(",
"image",
",",
"cmd",
",",
"detach",
"=",
"True",
",",
"name",
"=",
"name",
")",
"self",
".",
"job_state",
"=",
"'RUNNING'",
"self",
".",
"job_started_at",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"# Log collection",
"logs_stdout",
"=",
"[",
"]",
"logs_stderr",
"=",
"[",
"]",
"container",
".",
"reload",
"(",
")",
"# Dodgy hack, we can only check docker logs once a second, but we want to loop more",
"# so we can stop if asked to in a quick manner, should all go away if we go async",
"# There also be some dodgyness when sending an integer to docker logs and some",
"# events seem to be duplicated.",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"i",
"=",
"1",
"while",
"container",
".",
"status",
"==",
"'running'",
"and",
"not",
"self",
".",
"stop",
":",
"time",
".",
"sleep",
"(",
"0.15",
")",
"if",
"i",
"%",
"10",
"==",
"0",
":",
"logs_stderr",
".",
"extend",
"(",
"container",
".",
"logs",
"(",
"stdout",
"=",
"False",
",",
"stderr",
"=",
"True",
",",
"timestamps",
"=",
"True",
",",
"since",
"=",
"datetime2int",
"(",
"now",
")",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"logs_stdout",
".",
"extend",
"(",
"container",
".",
"logs",
"(",
"stdout",
"=",
"True",
",",
"stderr",
"=",
"False",
",",
"timestamps",
"=",
"True",
",",
"since",
"=",
"datetime2int",
"(",
"now",
")",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"container",
".",
"reload",
"(",
")",
"i",
"+=",
"1",
"# Container should be stopped by this point... unless asked to stop",
"if",
"container",
".",
"status",
"==",
"'running'",
":",
"container",
".",
"kill",
"(",
")",
"self",
".",
"job_stopped_at",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# Get final logs",
"logs_stderr",
".",
"extend",
"(",
"container",
".",
"logs",
"(",
"stdout",
"=",
"False",
",",
"stderr",
"=",
"True",
",",
"timestamps",
"=",
"True",
",",
"since",
"=",
"datetime2int",
"(",
"now",
")",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"logs_stdout",
".",
"extend",
"(",
"container",
".",
"logs",
"(",
"stdout",
"=",
"True",
",",
"stderr",
"=",
"False",
",",
"timestamps",
"=",
"True",
",",
"since",
"=",
"datetime2int",
"(",
"now",
")",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
"self",
".",
"job_state",
"=",
"'SUCCEEDED'",
"if",
"not",
"self",
".",
"stop",
"else",
"'FAILED'",
"# Process logs",
"logs_stdout",
"=",
"[",
"x",
"for",
"x",
"in",
"logs_stdout",
"if",
"len",
"(",
"x",
")",
">",
"0",
"]",
"logs_stderr",
"=",
"[",
"x",
"for",
"x",
"in",
"logs_stderr",
"if",
"len",
"(",
"x",
")",
">",
"0",
"]",
"logs",
"=",
"[",
"]",
"for",
"line",
"in",
"logs_stdout",
"+",
"logs_stderr",
":",
"date",
",",
"line",
"=",
"line",
".",
"split",
"(",
"' '",
",",
"1",
")",
"date",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"date",
")",
"date",
"=",
"int",
"(",
"date",
".",
"timestamp",
"(",
")",
")",
"logs",
".",
"append",
"(",
"{",
"'timestamp'",
":",
"date",
",",
"'message'",
":",
"line",
".",
"strip",
"(",
")",
"}",
")",
"# Send to cloudwatch",
"log_group",
"=",
"'/aws/batch/job'",
"stream_name",
"=",
"'{0}/default/{1}'",
".",
"format",
"(",
"self",
".",
"job_definition",
".",
"name",
",",
"self",
".",
"job_id",
")",
"self",
".",
"log_stream_name",
"=",
"stream_name",
"self",
".",
"_log_backend",
".",
"ensure_log_group",
"(",
"log_group",
",",
"None",
")",
"self",
".",
"_log_backend",
".",
"create_log_stream",
"(",
"log_group",
",",
"stream_name",
")",
"self",
".",
"_log_backend",
".",
"put_log_events",
"(",
"log_group",
",",
"stream_name",
",",
"logs",
",",
"None",
")",
"except",
"Exception",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"'Failed to run AWS Batch container {0}. Error {1}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"err",
")",
")",
"self",
".",
"job_state",
"=",
"'FAILED'",
"container",
".",
"kill",
"(",
")",
"finally",
":",
"container",
".",
"remove",
"(",
")",
"except",
"Exception",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"'Failed to run AWS Batch container {0}. Error {1}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"err",
")",
")",
"self",
".",
"job_state",
"=",
"'FAILED'",
"self",
".",
"job_stopped",
"=",
"True",
"self",
".",
"job_stopped_at",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"
] | Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return: | [
"Run",
"the",
"container",
"."
] | python | train |
log2timeline/dfvfs | dfvfs/file_io/encoded_stream_io.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encoded_stream_io.py#L288-L327 | def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._decoded_stream_size is None:
self._decoded_stream_size = self._GetDecodedStreamSize()
if self._decoded_stream_size is None:
raise IOError('Invalid decoded stream size.')
offset += self._decoded_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True | [
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"if",
"not",
"self",
".",
"_is_open",
":",
"raise",
"IOError",
"(",
"'Not opened.'",
")",
"if",
"self",
".",
"_current_offset",
"<",
"0",
":",
"raise",
"IOError",
"(",
"'Invalid current offset: {0:d} value less than zero.'",
".",
"format",
"(",
"self",
".",
"_current_offset",
")",
")",
"if",
"whence",
"==",
"os",
".",
"SEEK_CUR",
":",
"offset",
"+=",
"self",
".",
"_current_offset",
"elif",
"whence",
"==",
"os",
".",
"SEEK_END",
":",
"if",
"self",
".",
"_decoded_stream_size",
"is",
"None",
":",
"self",
".",
"_decoded_stream_size",
"=",
"self",
".",
"_GetDecodedStreamSize",
"(",
")",
"if",
"self",
".",
"_decoded_stream_size",
"is",
"None",
":",
"raise",
"IOError",
"(",
"'Invalid decoded stream size.'",
")",
"offset",
"+=",
"self",
".",
"_decoded_stream_size",
"elif",
"whence",
"!=",
"os",
".",
"SEEK_SET",
":",
"raise",
"IOError",
"(",
"'Unsupported whence.'",
")",
"if",
"offset",
"<",
"0",
":",
"raise",
"IOError",
"(",
"'Invalid offset value less than zero.'",
")",
"if",
"offset",
"!=",
"self",
".",
"_current_offset",
":",
"self",
".",
"_current_offset",
"=",
"offset",
"self",
".",
"_realign_offset",
"=",
"True"
] | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | [
"Seeks",
"to",
"an",
"offset",
"within",
"the",
"file",
"-",
"like",
"object",
"."
] | python | train |
inveniosoftware/invenio-oauthclient | invenio_oauthclient/handlers.py | https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/handlers.py#L469-L474 | def oauth_logout_handler(sender_app, user=None):
"""Remove all access tokens from session on logout."""
oauth = current_app.extensions['oauthlib.client']
for remote in oauth.remote_apps.values():
token_delete(remote)
db.session.commit() | [
"def",
"oauth_logout_handler",
"(",
"sender_app",
",",
"user",
"=",
"None",
")",
":",
"oauth",
"=",
"current_app",
".",
"extensions",
"[",
"'oauthlib.client'",
"]",
"for",
"remote",
"in",
"oauth",
".",
"remote_apps",
".",
"values",
"(",
")",
":",
"token_delete",
"(",
"remote",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Remove all access tokens from session on logout. | [
"Remove",
"all",
"access",
"tokens",
"from",
"session",
"on",
"logout",
"."
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L4274-L4296 | def update_account_certificate(self, account_id, cert_id, body, **kwargs): # noqa: E501
"""Update trusted certificate. # noqa: E501
An endpoint for updating existing trusted certificates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account_certificate(account_id, cert_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be updated. (required)
:param TrustedCertificateUpdateReq body: A trusted certificate object with attributes. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_account_certificate_with_http_info(account_id, cert_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_account_certificate_with_http_info(account_id, cert_id, body, **kwargs) # noqa: E501
return data | [
"def",
"update_account_certificate",
"(",
"self",
",",
"account_id",
",",
"cert_id",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"update_account_certificate_with_http_info",
"(",
"account_id",
",",
"cert_id",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"update_account_certificate_with_http_info",
"(",
"account_id",
",",
"cert_id",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Update trusted certificate. # noqa: E501
An endpoint for updating existing trusted certificates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account_certificate(account_id, cert_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be updated. (required)
:param TrustedCertificateUpdateReq body: A trusted certificate object with attributes. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread. | [
"Update",
"trusted",
"certificate",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/event.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/event.py#L1627-L1647 | def event(self, event):
"""
Forwards events to the corresponding instance of your event handler
for this process.
If you subclass L{EventSift} and reimplement this method, no event
will be forwarded at all unless you call the superclass implementation.
If your filtering is based on the event type, there's a much easier way
to do it: just implement a handler for it.
"""
eventCode = event.get_event_code()
pid = event.get_pid()
handler = self.forward.get(pid, None)
if handler is None:
handler = self.cls(*self.argv, **self.argd)
if eventCode != win32.EXIT_PROCESS_DEBUG_EVENT:
self.forward[pid] = handler
elif eventCode == win32.EXIT_PROCESS_DEBUG_EVENT:
del self.forward[pid]
return handler(event) | [
"def",
"event",
"(",
"self",
",",
"event",
")",
":",
"eventCode",
"=",
"event",
".",
"get_event_code",
"(",
")",
"pid",
"=",
"event",
".",
"get_pid",
"(",
")",
"handler",
"=",
"self",
".",
"forward",
".",
"get",
"(",
"pid",
",",
"None",
")",
"if",
"handler",
"is",
"None",
":",
"handler",
"=",
"self",
".",
"cls",
"(",
"*",
"self",
".",
"argv",
",",
"*",
"*",
"self",
".",
"argd",
")",
"if",
"eventCode",
"!=",
"win32",
".",
"EXIT_PROCESS_DEBUG_EVENT",
":",
"self",
".",
"forward",
"[",
"pid",
"]",
"=",
"handler",
"elif",
"eventCode",
"==",
"win32",
".",
"EXIT_PROCESS_DEBUG_EVENT",
":",
"del",
"self",
".",
"forward",
"[",
"pid",
"]",
"return",
"handler",
"(",
"event",
")"
] | Forwards events to the corresponding instance of your event handler
for this process.
If you subclass L{EventSift} and reimplement this method, no event
will be forwarded at all unless you call the superclass implementation.
If your filtering is based on the event type, there's a much easier way
to do it: just implement a handler for it. | [
"Forwards",
"events",
"to",
"the",
"corresponding",
"instance",
"of",
"your",
"event",
"handler",
"for",
"this",
"process",
"."
] | python | train |
freelancer/freelancer-sdk-python | freelancersdk/resources/projects/projects.py | https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/projects/projects.py#L701-L714 | def post_review(session, review):
"""
Post a review
"""
# POST /api/projects/0.1/reviews/
response = make_post_request(session, 'reviews', json_data=review)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise ReviewNotPostedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | [
"def",
"post_review",
"(",
"session",
",",
"review",
")",
":",
"# POST /api/projects/0.1/reviews/",
"response",
"=",
"make_post_request",
"(",
"session",
",",
"'reviews'",
",",
"json_data",
"=",
"review",
")",
"json_data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"json_data",
"[",
"'status'",
"]",
"else",
":",
"raise",
"ReviewNotPostedException",
"(",
"message",
"=",
"json_data",
"[",
"'message'",
"]",
",",
"error_code",
"=",
"json_data",
"[",
"'error_code'",
"]",
",",
"request_id",
"=",
"json_data",
"[",
"'request_id'",
"]",
")"
] | Post a review | [
"Post",
"a",
"review"
] | python | valid |
saltstack/salt | salt/states/boto_apigateway.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L524-L532 | def _log_error_and_abort(ret, obj):
'''
helper function to update errors in the return structure
'''
ret['result'] = False
ret['abort'] = True
if 'error' in obj:
ret['comment'] = '{0}'.format(obj.get('error'))
return ret | [
"def",
"_log_error_and_abort",
"(",
"ret",
",",
"obj",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'abort'",
"]",
"=",
"True",
"if",
"'error'",
"in",
"obj",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{0}'",
".",
"format",
"(",
"obj",
".",
"get",
"(",
"'error'",
")",
")",
"return",
"ret"
] | helper function to update errors in the return structure | [
"helper",
"function",
"to",
"update",
"errors",
"in",
"the",
"return",
"structure"
] | python | train |
ethan92429/onshapepy | onshapepy/part.py | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/part.py#L96-L100 | def get_params(self):
"""Manually pull params defined in config from OnShape and return a python representation of the params.
Quantities are converted to pint quantities, Bools are converted to python bools and Enums are converted
to strings. Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI."""
self.res = c.get_configuration(self.parent.uri.as_dict()) | [
"def",
"get_params",
"(",
"self",
")",
":",
"self",
".",
"res",
"=",
"c",
".",
"get_configuration",
"(",
"self",
".",
"parent",
".",
"uri",
".",
"as_dict",
"(",
")",
")"
] | Manually pull params defined in config from OnShape and return a python representation of the params.
Quantities are converted to pint quantities, Bools are converted to python bools and Enums are converted
to strings. Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI. | [
"Manually",
"pull",
"params",
"defined",
"in",
"config",
"from",
"OnShape",
"and",
"return",
"a",
"python",
"representation",
"of",
"the",
"params",
".",
"Quantities",
"are",
"converted",
"to",
"pint",
"quantities",
"Bools",
"are",
"converted",
"to",
"python",
"bools",
"and",
"Enums",
"are",
"converted",
"to",
"strings",
".",
"Note",
"that",
"Enum",
"names",
"are",
"autogenerated",
"by",
"OnShape",
"and",
"do",
"not",
"match",
"the",
"name",
"on",
"the",
"OnShape",
"UI",
"."
] | python | train |
pgxcentre/geneparse | geneparse/readers/bgen.py | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/bgen.py#L190-L220 | def get_variant_by_name(self, name):
"""Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
Returns:
list: A list of Genotypes.
"""
results = []
try:
for info, dosage in self._bgen.get_variant(name):
results.append(Genotypes(
Variant(
info.name,
CHROM_STR_ENCODE.get(info.chrom, info.chrom),
info.pos,
[info.a1, info.a2],
),
dosage,
reference=info.a1,
coded=info.a2,
multiallelic=False,
))
except ValueError:
logging.variant_name_not_found(name)
return results | [
"def",
"get_variant_by_name",
"(",
"self",
",",
"name",
")",
":",
"results",
"=",
"[",
"]",
"try",
":",
"for",
"info",
",",
"dosage",
"in",
"self",
".",
"_bgen",
".",
"get_variant",
"(",
"name",
")",
":",
"results",
".",
"append",
"(",
"Genotypes",
"(",
"Variant",
"(",
"info",
".",
"name",
",",
"CHROM_STR_ENCODE",
".",
"get",
"(",
"info",
".",
"chrom",
",",
"info",
".",
"chrom",
")",
",",
"info",
".",
"pos",
",",
"[",
"info",
".",
"a1",
",",
"info",
".",
"a2",
"]",
",",
")",
",",
"dosage",
",",
"reference",
"=",
"info",
".",
"a1",
",",
"coded",
"=",
"info",
".",
"a2",
",",
"multiallelic",
"=",
"False",
",",
")",
")",
"except",
"ValueError",
":",
"logging",
".",
"variant_name_not_found",
"(",
"name",
")",
"return",
"results"
] | Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
Returns:
list: A list of Genotypes. | [
"Get",
"the",
"genotype",
"of",
"a",
"marker",
"using",
"it",
"s",
"name",
"."
] | python | train |
PaulHancock/Aegean | AegeanTools/MIMAS.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L95-L139 | def mask_plane(data, wcs, region, negate=False):
"""
Mask a 2d image (data) such that pixels within 'region' are set to nan.
Parameters
----------
data : 2d-array
Image array.
wcs : astropy.wcs.WCS
WCS for the image in question.
region : :class:`AegeanTools.regions.Region`
A region within which the image pixels will be masked.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
Returns
-------
masked : 2d-array
The original array, but masked as required.
"""
# create an array but don't set the values (they are random)
indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int)
# since I know exactly what the index array needs to look like i can construct
# it faster than list comprehension would allow
# we do this only once and then recycle it
idx = np.array([(j, 0) for j in range(data.shape[1])])
j = data.shape[1]
for i in range(data.shape[0]):
idx[:, 1] = i
indexes[i*j:(i+1)*j] = idx
# put ALL the pixles into our vectorized functions and minimise our overheads
ra, dec = wcs.wcs_pix2world(indexes, 1).transpose()
bigmask = region.sky_within(ra, dec, degin=True)
if not negate:
bigmask = np.bitwise_not(bigmask)
# rework our 1d list into a 2d array
bigmask = bigmask.reshape(data.shape)
# and apply the mask
data[bigmask] = np.nan
return data | [
"def",
"mask_plane",
"(",
"data",
",",
"wcs",
",",
"region",
",",
"negate",
"=",
"False",
")",
":",
"# create an array but don't set the values (they are random)",
"indexes",
"=",
"np",
".",
"empty",
"(",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"*",
"data",
".",
"shape",
"[",
"1",
"]",
",",
"2",
")",
",",
"dtype",
"=",
"int",
")",
"# since I know exactly what the index array needs to look like i can construct",
"# it faster than list comprehension would allow",
"# we do this only once and then recycle it",
"idx",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"j",
",",
"0",
")",
"for",
"j",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
"]",
")",
"j",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
":",
"idx",
"[",
":",
",",
"1",
"]",
"=",
"i",
"indexes",
"[",
"i",
"*",
"j",
":",
"(",
"i",
"+",
"1",
")",
"*",
"j",
"]",
"=",
"idx",
"# put ALL the pixles into our vectorized functions and minimise our overheads",
"ra",
",",
"dec",
"=",
"wcs",
".",
"wcs_pix2world",
"(",
"indexes",
",",
"1",
")",
".",
"transpose",
"(",
")",
"bigmask",
"=",
"region",
".",
"sky_within",
"(",
"ra",
",",
"dec",
",",
"degin",
"=",
"True",
")",
"if",
"not",
"negate",
":",
"bigmask",
"=",
"np",
".",
"bitwise_not",
"(",
"bigmask",
")",
"# rework our 1d list into a 2d array",
"bigmask",
"=",
"bigmask",
".",
"reshape",
"(",
"data",
".",
"shape",
")",
"# and apply the mask",
"data",
"[",
"bigmask",
"]",
"=",
"np",
".",
"nan",
"return",
"data"
] | Mask a 2d image (data) such that pixels within 'region' are set to nan.
Parameters
----------
data : 2d-array
Image array.
wcs : astropy.wcs.WCS
WCS for the image in question.
region : :class:`AegeanTools.regions.Region`
A region within which the image pixels will be masked.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
Returns
-------
masked : 2d-array
The original array, but masked as required. | [
"Mask",
"a",
"2d",
"image",
"(",
"data",
")",
"such",
"that",
"pixels",
"within",
"region",
"are",
"set",
"to",
"nan",
"."
] | python | train |
jmgilman/Neolib | neolib/pyamf/util/__init__.py | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/util/__init__.py#L28-L43 | def get_timestamp(d):
"""
Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@return: UTC timestamp.
@rtype: C{float}
@see: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}.
"""
if isinstance(d, datetime.date) and not isinstance(d, datetime.datetime):
d = datetime.datetime.combine(d, datetime.time(0, 0, 0, 0))
msec = str(d.microsecond).rjust(6).replace(' ', '0')
return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec)) | [
"def",
"get_timestamp",
"(",
"d",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"date",
")",
"and",
"not",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"datetime",
")",
":",
"d",
"=",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"d",
",",
"datetime",
".",
"time",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
")",
"msec",
"=",
"str",
"(",
"d",
".",
"microsecond",
")",
".",
"rjust",
"(",
"6",
")",
".",
"replace",
"(",
"' '",
",",
"'0'",
")",
"return",
"float",
"(",
"'%s.%s'",
"%",
"(",
"calendar",
".",
"timegm",
"(",
"d",
".",
"utctimetuple",
"(",
")",
")",
",",
"msec",
")",
")"
] | Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@return: UTC timestamp.
@rtype: C{float}
@see: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}. | [
"Returns",
"a",
"UTC",
"timestamp",
"for",
"a",
"C",
"{",
"datetime",
".",
"datetime",
"}",
"object",
"."
] | python | train |
sphinx-contrib/paverutils | sphinxcontrib/paverutils.py | https://github.com/sphinx-contrib/paverutils/blob/6268e5952fa564cefac05c06823b929a60522613/sphinxcontrib/paverutils.py#L312-L487 | def run_script(input_file, script_name,
interpreter='python',
include_prefix=True,
ignore_error=False,
trailing_newlines=True,
break_lines_at=0,
line_break_mode='break',
adjust_python_for_version=True,
line_cleanups=[],
):
"""Run a script in the context of the input_file's directory,
return the text output formatted to be included as an rst
literal text block.
Arguments:
input_file
The name of the file being processed by cog. Usually passed as
cog.inFile.
script_name
The name of the Python script living in the same directory as
input_file to be run. If not using an interpreter, this can be
a complete command line. If using an alternate interpreter, it
can be some other type of file. If the command line is very
long, this can be a list of parts. They will be displayed
with backslashes indicating the continuation from line to line,
but will be joined with a space character into a single string
to be executed.
include_prefix=True
Boolean controlling whether the :: prefix is included.
ignore_error=False
Boolean controlling whether errors are ignored. If not
ignored, the error is printed to stdout and then the command is
run *again* with errors ignored so that the output ends up in
the cogged file.
trailing_newlines=True
Boolean controlling whether the trailing newlines are added to
the output. If False, the output is passed to rstrip() then
one newline is added. If True, newlines are added to the
output until it ends in 2.
break_lines_at=0
Integer indicating the length where lines should be broken and
continued on the next line. Defaults to 0, meaning no special
handling should be done.
line_break_mode='break'
Name of mode to break lines.
break
Insert a hard break
continue
Insert a hard break with a backslash
wrap
Use textwrap.fill() to wrap
wrap-no-breaks
Use textwrap.fill() without breaking on hyphens
or long words
fill
Use textwrap.fill(), maintaining whitespace prefix
on subsequent lines
continue
Insert a hard break and backslash at the end of
long lines, continuing on the next
truncate
Chop the line at the required width and discard the
remainder
adjust_python_for_version=True
Boolean controlling whether the default `python`
interpreter setting is changed to `python3` when
running under python 3.
line_cleanups=[]
Process each output line through the cleanups and replace the
input line with the output values. Each cleanup should be a
callable that accepts the name of the original input file and
the line of output produced and returns a replacement string, or
the original input string if no changes are to be made.
"""
rundir = path(input_file).dirname()
if (adjust_python_for_version
and interpreter == 'python'
and sys.version_info[0] == 3):
# Automatically switch to python3 if we're running under
# python3 ourselves.
interpreter = 'python3'
cmd_list = script_name
if isinstance(script_name, list):
# We've been given a list, convert it to a string.
script_name = ' '.join(cmd_list)
if interpreter:
cmd = '%(interpreter)s %(script_name)s' % {
'interpreter': interpreter,
'script_name': script_name,
}
else:
cmd = script_name
real_cmd = 'cd %(rundir)s; %(cmd)s 2>&1' % {
'rundir': rundir,
'cmd': cmd,
}
try:
print()
output_text = sh(real_cmd, capture=True, ignore_error=ignore_error)
print(output_text)
except Exception as err:
print('*' * 50)
print('ERROR run_script(%s) => %s' % (real_cmd, err))
print('*' * 50)
output_text = sh(real_cmd, capture=True, ignore_error=True)
print(output_text)
print('*' * 50)
if not ignore_error:
raise
if include_prefix:
response = '\n.. code-block:: none\n\n'
else:
response = ''
# Start building our result list.
lines = []
# Add the command we ran to the result.
if isinstance(cmd_list, list):
# We were originally given a list, so interpret the
# parts as already split up. Add the continuation
# markers to the end.
if interpreter:
lines.append('\t$ {} {}'.format(interpreter, cmd_list[0] + ' \\'))
else:
lines.append('\t$ {}'.format(cmd_list[0] + ' \\'))
lines.extend(
l + ' \\'
for l in cmd_list[1:-1]
)
lines.append(cmd_list[-1])
else:
raw_command_line = '\t$ %s' % cmd
for cleanup in line_cleanups:
raw_command_line = cleanup(input_file, raw_command_line)
command_line = adjust_line_widths(
[raw_command_line],
break_lines_at - 1 if break_lines_at else 64,
'continue',
)
lines.extend(command_line)
lines.append('') # a blank line
lines.extend(output_text.splitlines()) # the output
# Clean up the raw output lines.
clean_lines = []
for line in lines:
for cleanup in line_cleanups:
line = cleanup(input_file, line)
clean_lines.append(line)
lines = clean_lines
# Deal with lines that might be too long
if break_lines_at:
lines = adjust_line_widths(lines, break_lines_at, line_break_mode)
response += '\n\t'.join(lines)
if trailing_newlines:
while not response.endswith('\n\n'):
response += '\n'
else:
response = response.rstrip()
response += '\n'
return response | [
"def",
"run_script",
"(",
"input_file",
",",
"script_name",
",",
"interpreter",
"=",
"'python'",
",",
"include_prefix",
"=",
"True",
",",
"ignore_error",
"=",
"False",
",",
"trailing_newlines",
"=",
"True",
",",
"break_lines_at",
"=",
"0",
",",
"line_break_mode",
"=",
"'break'",
",",
"adjust_python_for_version",
"=",
"True",
",",
"line_cleanups",
"=",
"[",
"]",
",",
")",
":",
"rundir",
"=",
"path",
"(",
"input_file",
")",
".",
"dirname",
"(",
")",
"if",
"(",
"adjust_python_for_version",
"and",
"interpreter",
"==",
"'python'",
"and",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
")",
":",
"# Automatically switch to python3 if we're running under",
"# python3 ourselves.",
"interpreter",
"=",
"'python3'",
"cmd_list",
"=",
"script_name",
"if",
"isinstance",
"(",
"script_name",
",",
"list",
")",
":",
"# We've been given a list, convert it to a string.",
"script_name",
"=",
"' '",
".",
"join",
"(",
"cmd_list",
")",
"if",
"interpreter",
":",
"cmd",
"=",
"'%(interpreter)s %(script_name)s'",
"%",
"{",
"'interpreter'",
":",
"interpreter",
",",
"'script_name'",
":",
"script_name",
",",
"}",
"else",
":",
"cmd",
"=",
"script_name",
"real_cmd",
"=",
"'cd %(rundir)s; %(cmd)s 2>&1'",
"%",
"{",
"'rundir'",
":",
"rundir",
",",
"'cmd'",
":",
"cmd",
",",
"}",
"try",
":",
"print",
"(",
")",
"output_text",
"=",
"sh",
"(",
"real_cmd",
",",
"capture",
"=",
"True",
",",
"ignore_error",
"=",
"ignore_error",
")",
"print",
"(",
"output_text",
")",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"'*'",
"*",
"50",
")",
"print",
"(",
"'ERROR run_script(%s) => %s'",
"%",
"(",
"real_cmd",
",",
"err",
")",
")",
"print",
"(",
"'*'",
"*",
"50",
")",
"output_text",
"=",
"sh",
"(",
"real_cmd",
",",
"capture",
"=",
"True",
",",
"ignore_error",
"=",
"True",
")",
"print",
"(",
"output_text",
")",
"print",
"(",
"'*'",
"*",
"50",
")",
"if",
"not",
"ignore_error",
":",
"raise",
"if",
"include_prefix",
":",
"response",
"=",
"'\\n.. code-block:: none\\n\\n'",
"else",
":",
"response",
"=",
"''",
"# Start building our result list.",
"lines",
"=",
"[",
"]",
"# Add the command we ran to the result.",
"if",
"isinstance",
"(",
"cmd_list",
",",
"list",
")",
":",
"# We were originally given a list, so interpret the",
"# parts as already split up. Add the continuation",
"# markers to the end.",
"if",
"interpreter",
":",
"lines",
".",
"append",
"(",
"'\\t$ {} {}'",
".",
"format",
"(",
"interpreter",
",",
"cmd_list",
"[",
"0",
"]",
"+",
"' \\\\'",
")",
")",
"else",
":",
"lines",
".",
"append",
"(",
"'\\t$ {}'",
".",
"format",
"(",
"cmd_list",
"[",
"0",
"]",
"+",
"' \\\\'",
")",
")",
"lines",
".",
"extend",
"(",
"l",
"+",
"' \\\\'",
"for",
"l",
"in",
"cmd_list",
"[",
"1",
":",
"-",
"1",
"]",
")",
"lines",
".",
"append",
"(",
"cmd_list",
"[",
"-",
"1",
"]",
")",
"else",
":",
"raw_command_line",
"=",
"'\\t$ %s'",
"%",
"cmd",
"for",
"cleanup",
"in",
"line_cleanups",
":",
"raw_command_line",
"=",
"cleanup",
"(",
"input_file",
",",
"raw_command_line",
")",
"command_line",
"=",
"adjust_line_widths",
"(",
"[",
"raw_command_line",
"]",
",",
"break_lines_at",
"-",
"1",
"if",
"break_lines_at",
"else",
"64",
",",
"'continue'",
",",
")",
"lines",
".",
"extend",
"(",
"command_line",
")",
"lines",
".",
"append",
"(",
"''",
")",
"# a blank line",
"lines",
".",
"extend",
"(",
"output_text",
".",
"splitlines",
"(",
")",
")",
"# the output",
"# Clean up the raw output lines.",
"clean_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"for",
"cleanup",
"in",
"line_cleanups",
":",
"line",
"=",
"cleanup",
"(",
"input_file",
",",
"line",
")",
"clean_lines",
".",
"append",
"(",
"line",
")",
"lines",
"=",
"clean_lines",
"# Deal with lines that might be too long",
"if",
"break_lines_at",
":",
"lines",
"=",
"adjust_line_widths",
"(",
"lines",
",",
"break_lines_at",
",",
"line_break_mode",
")",
"response",
"+=",
"'\\n\\t'",
".",
"join",
"(",
"lines",
")",
"if",
"trailing_newlines",
":",
"while",
"not",
"response",
".",
"endswith",
"(",
"'\\n\\n'",
")",
":",
"response",
"+=",
"'\\n'",
"else",
":",
"response",
"=",
"response",
".",
"rstrip",
"(",
")",
"response",
"+=",
"'\\n'",
"return",
"response"
] | Run a script in the context of the input_file's directory,
return the text output formatted to be included as an rst
literal text block.
Arguments:
input_file
The name of the file being processed by cog. Usually passed as
cog.inFile.
script_name
The name of the Python script living in the same directory as
input_file to be run. If not using an interpreter, this can be
a complete command line. If using an alternate interpreter, it
can be some other type of file. If the command line is very
long, this can be a list of parts. They will be displayed
with backslashes indicating the continuation from line to line,
but will be joined with a space character into a single string
to be executed.
include_prefix=True
Boolean controlling whether the :: prefix is included.
ignore_error=False
Boolean controlling whether errors are ignored. If not
ignored, the error is printed to stdout and then the command is
run *again* with errors ignored so that the output ends up in
the cogged file.
trailing_newlines=True
Boolean controlling whether the trailing newlines are added to
the output. If False, the output is passed to rstrip() then
one newline is added. If True, newlines are added to the
output until it ends in 2.
break_lines_at=0
Integer indicating the length where lines should be broken and
continued on the next line. Defaults to 0, meaning no special
handling should be done.
line_break_mode='break'
Name of mode to break lines.
break
Insert a hard break
continue
Insert a hard break with a backslash
wrap
Use textwrap.fill() to wrap
wrap-no-breaks
Use textwrap.fill() without breaking on hyphens
or long words
fill
Use textwrap.fill(), maintaining whitespace prefix
on subsequent lines
continue
Insert a hard break and backslash at the end of
long lines, continuing on the next
truncate
Chop the line at the required width and discard the
remainder
adjust_python_for_version=True
Boolean controlling whether the default `python`
interpreter setting is changed to `python3` when
running under python 3.
line_cleanups=[]
Process each output line through the cleanups and replace the
input line with the output values. Each cleanup should be a
callable that accepts the name of the original input file and
the line of output produced and returns a replacement string, or
the original input string if no changes are to be made. | [
"Run",
"a",
"script",
"in",
"the",
"context",
"of",
"the",
"input_file",
"s",
"directory",
"return",
"the",
"text",
"output",
"formatted",
"to",
"be",
"included",
"as",
"an",
"rst",
"literal",
"text",
"block",
"."
] | python | train |
olitheolix/qtmacs | qtmacs/extensions/qtmacstextedit_widget.py | https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacstextedit_widget.py#L125-L132 | def reverseCommit(self):
"""
Reverse the document to the original state.
"""
print(self.after == self.before)
pos = self.qteWidget.textCursor().position()
self.qteWidget.setHtml(self.before)
self.placeCursor(pos) | [
"def",
"reverseCommit",
"(",
"self",
")",
":",
"print",
"(",
"self",
".",
"after",
"==",
"self",
".",
"before",
")",
"pos",
"=",
"self",
".",
"qteWidget",
".",
"textCursor",
"(",
")",
".",
"position",
"(",
")",
"self",
".",
"qteWidget",
".",
"setHtml",
"(",
"self",
".",
"before",
")",
"self",
".",
"placeCursor",
"(",
"pos",
")"
] | Reverse the document to the original state. | [
"Reverse",
"the",
"document",
"to",
"the",
"original",
"state",
"."
] | python | train |
ConsenSys/mythril-classic | mythril/ethereum/interface/leveldb/client.py | https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/ethereum/interface/leveldb/client.py#L117-L128 | def _get_block_header(self, block_hash, num):
"""Get block header by block header hash & number.
:param block_hash:
:param num:
:return:
"""
header_key = header_prefix + num + block_hash
block_header_data = self.db.get(header_key)
header = rlp.decode(block_header_data, sedes=BlockHeader)
return header | [
"def",
"_get_block_header",
"(",
"self",
",",
"block_hash",
",",
"num",
")",
":",
"header_key",
"=",
"header_prefix",
"+",
"num",
"+",
"block_hash",
"block_header_data",
"=",
"self",
".",
"db",
".",
"get",
"(",
"header_key",
")",
"header",
"=",
"rlp",
".",
"decode",
"(",
"block_header_data",
",",
"sedes",
"=",
"BlockHeader",
")",
"return",
"header"
] | Get block header by block header hash & number.
:param block_hash:
:param num:
:return: | [
"Get",
"block",
"header",
"by",
"block",
"header",
"hash",
"&",
"number",
"."
] | python | train |
Karaage-Cluster/karaage | karaage/plugins/kgapplications/views/aed.py | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgapplications/views/aed.py#L431-L511 | def get_next_action(self, request, application, label, roles):
""" Process the get_next_action request at the current step. """
# if user is logged and and not applicant, steal the
# application
if 'is_applicant' in roles:
# if we got this far, then we either we are logged in as applicant,
# or we know the secret for this application.
new_person = None
reason = None
details = None
attrs, _ = saml.parse_attributes(request)
saml_id = attrs['persistent_id']
if saml_id is not None:
query = Person.objects.filter(saml_id=saml_id)
if application.content_type.model == "person":
query = query.exclude(pk=application.applicant.pk)
if query.count() > 0:
new_person = Person.objects.get(saml_id=saml_id)
reason = "SAML id is already in use by existing person."
details = (
"It is not possible to continue this application "
+ "as is because the saml identity already exists "
+ "as a registered user.")
del query
if request.user.is_authenticated:
new_person = request.user
reason = "%s was logged in " \
"and accessed the secret URL." % new_person
details = (
"If you want to access this application "
+ "as %s " % application.applicant
+ "without %s stealing it, " % new_person
+ "you will have to ensure %s is " % new_person
+ "logged out first.")
if new_person is not None:
if application.applicant != new_person:
if 'steal' in request.POST:
old_applicant = application.applicant
application.applicant = new_person
application.save()
log.change(
application.application_ptr,
"Stolen application from %s" % old_applicant)
messages.success(
request,
"Stolen application from %s" % old_applicant)
url = base.get_url(request, application, roles, label)
return HttpResponseRedirect(url)
else:
return render(
template_name='kgapplications'
'/project_aed_steal.html',
context={
'application': application,
'person': new_person,
'reason': reason,
'details': details,
},
request=request)
# if the user is the leader, show him the leader specific page.
if ('is_leader' in roles or 'is_delegate' in roles) \
and 'is_admin' not in roles \
and 'is_applicant' not in roles:
actions = ['reopen']
if 'reopen' in request.POST:
return 'reopen'
return render(
template_name='kgapplications/project_aed_for_leader.html',
context={'application': application,
'actions': actions, 'roles': roles, },
request=request)
# otherwise do the default behaviour for StateWithSteps
return super(StateApplicantEnteringDetails, self) \
.get_next_action(request, application, label, roles) | [
"def",
"get_next_action",
"(",
"self",
",",
"request",
",",
"application",
",",
"label",
",",
"roles",
")",
":",
"# if user is logged and and not applicant, steal the",
"# application",
"if",
"'is_applicant'",
"in",
"roles",
":",
"# if we got this far, then we either we are logged in as applicant,",
"# or we know the secret for this application.",
"new_person",
"=",
"None",
"reason",
"=",
"None",
"details",
"=",
"None",
"attrs",
",",
"_",
"=",
"saml",
".",
"parse_attributes",
"(",
"request",
")",
"saml_id",
"=",
"attrs",
"[",
"'persistent_id'",
"]",
"if",
"saml_id",
"is",
"not",
"None",
":",
"query",
"=",
"Person",
".",
"objects",
".",
"filter",
"(",
"saml_id",
"=",
"saml_id",
")",
"if",
"application",
".",
"content_type",
".",
"model",
"==",
"\"person\"",
":",
"query",
"=",
"query",
".",
"exclude",
"(",
"pk",
"=",
"application",
".",
"applicant",
".",
"pk",
")",
"if",
"query",
".",
"count",
"(",
")",
">",
"0",
":",
"new_person",
"=",
"Person",
".",
"objects",
".",
"get",
"(",
"saml_id",
"=",
"saml_id",
")",
"reason",
"=",
"\"SAML id is already in use by existing person.\"",
"details",
"=",
"(",
"\"It is not possible to continue this application \"",
"+",
"\"as is because the saml identity already exists \"",
"+",
"\"as a registered user.\"",
")",
"del",
"query",
"if",
"request",
".",
"user",
".",
"is_authenticated",
":",
"new_person",
"=",
"request",
".",
"user",
"reason",
"=",
"\"%s was logged in \"",
"\"and accessed the secret URL.\"",
"%",
"new_person",
"details",
"=",
"(",
"\"If you want to access this application \"",
"+",
"\"as %s \"",
"%",
"application",
".",
"applicant",
"+",
"\"without %s stealing it, \"",
"%",
"new_person",
"+",
"\"you will have to ensure %s is \"",
"%",
"new_person",
"+",
"\"logged out first.\"",
")",
"if",
"new_person",
"is",
"not",
"None",
":",
"if",
"application",
".",
"applicant",
"!=",
"new_person",
":",
"if",
"'steal'",
"in",
"request",
".",
"POST",
":",
"old_applicant",
"=",
"application",
".",
"applicant",
"application",
".",
"applicant",
"=",
"new_person",
"application",
".",
"save",
"(",
")",
"log",
".",
"change",
"(",
"application",
".",
"application_ptr",
",",
"\"Stolen application from %s\"",
"%",
"old_applicant",
")",
"messages",
".",
"success",
"(",
"request",
",",
"\"Stolen application from %s\"",
"%",
"old_applicant",
")",
"url",
"=",
"base",
".",
"get_url",
"(",
"request",
",",
"application",
",",
"roles",
",",
"label",
")",
"return",
"HttpResponseRedirect",
"(",
"url",
")",
"else",
":",
"return",
"render",
"(",
"template_name",
"=",
"'kgapplications'",
"'/project_aed_steal.html'",
",",
"context",
"=",
"{",
"'application'",
":",
"application",
",",
"'person'",
":",
"new_person",
",",
"'reason'",
":",
"reason",
",",
"'details'",
":",
"details",
",",
"}",
",",
"request",
"=",
"request",
")",
"# if the user is the leader, show him the leader specific page.",
"if",
"(",
"'is_leader'",
"in",
"roles",
"or",
"'is_delegate'",
"in",
"roles",
")",
"and",
"'is_admin'",
"not",
"in",
"roles",
"and",
"'is_applicant'",
"not",
"in",
"roles",
":",
"actions",
"=",
"[",
"'reopen'",
"]",
"if",
"'reopen'",
"in",
"request",
".",
"POST",
":",
"return",
"'reopen'",
"return",
"render",
"(",
"template_name",
"=",
"'kgapplications/project_aed_for_leader.html'",
",",
"context",
"=",
"{",
"'application'",
":",
"application",
",",
"'actions'",
":",
"actions",
",",
"'roles'",
":",
"roles",
",",
"}",
",",
"request",
"=",
"request",
")",
"# otherwise do the default behaviour for StateWithSteps",
"return",
"super",
"(",
"StateApplicantEnteringDetails",
",",
"self",
")",
".",
"get_next_action",
"(",
"request",
",",
"application",
",",
"label",
",",
"roles",
")"
] | Process the get_next_action request at the current step. | [
"Process",
"the",
"get_next_action",
"request",
"at",
"the",
"current",
"step",
"."
] | python | train |
wbond/certvalidator | certvalidator/registry.py | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/registry.py#L276-L323 | def build_paths(self, end_entity_cert):
"""
Builds a list of ValidationPath objects from a certificate in the
operating system trust store to the end-entity certificate
:param end_entity_cert:
A byte string of a DER or PEM-encoded X.509 certificate, or an
instance of asn1crypto.x509.Certificate
:return:
A list of certvalidator.path.ValidationPath objects that represent
the possible paths from the end-entity certificate to one of the CA
certs.
"""
if not isinstance(end_entity_cert, byte_cls) and not isinstance(end_entity_cert, x509.Certificate):
raise TypeError(pretty_message(
'''
end_entity_cert must be a byte string or an instance of
asn1crypto.x509.Certificate, not %s
''',
type_name(end_entity_cert)
))
if isinstance(end_entity_cert, byte_cls):
if pem.detect(end_entity_cert):
_, _, end_entity_cert = pem.unarmor(end_entity_cert)
end_entity_cert = x509.Certificate.load(end_entity_cert)
path = ValidationPath(end_entity_cert)
paths = []
failed_paths = []
self._walk_issuers(path, paths, failed_paths)
if len(paths) == 0:
cert_name = end_entity_cert.subject.human_friendly
missing_issuer_name = failed_paths[0].first.issuer.human_friendly
raise PathBuildingError(pretty_message(
'''
Unable to build a validation path for the certificate "%s" - no
issuer matching "%s" was found
''',
cert_name,
missing_issuer_name
))
return paths | [
"def",
"build_paths",
"(",
"self",
",",
"end_entity_cert",
")",
":",
"if",
"not",
"isinstance",
"(",
"end_entity_cert",
",",
"byte_cls",
")",
"and",
"not",
"isinstance",
"(",
"end_entity_cert",
",",
"x509",
".",
"Certificate",
")",
":",
"raise",
"TypeError",
"(",
"pretty_message",
"(",
"'''\n end_entity_cert must be a byte string or an instance of\n asn1crypto.x509.Certificate, not %s\n '''",
",",
"type_name",
"(",
"end_entity_cert",
")",
")",
")",
"if",
"isinstance",
"(",
"end_entity_cert",
",",
"byte_cls",
")",
":",
"if",
"pem",
".",
"detect",
"(",
"end_entity_cert",
")",
":",
"_",
",",
"_",
",",
"end_entity_cert",
"=",
"pem",
".",
"unarmor",
"(",
"end_entity_cert",
")",
"end_entity_cert",
"=",
"x509",
".",
"Certificate",
".",
"load",
"(",
"end_entity_cert",
")",
"path",
"=",
"ValidationPath",
"(",
"end_entity_cert",
")",
"paths",
"=",
"[",
"]",
"failed_paths",
"=",
"[",
"]",
"self",
".",
"_walk_issuers",
"(",
"path",
",",
"paths",
",",
"failed_paths",
")",
"if",
"len",
"(",
"paths",
")",
"==",
"0",
":",
"cert_name",
"=",
"end_entity_cert",
".",
"subject",
".",
"human_friendly",
"missing_issuer_name",
"=",
"failed_paths",
"[",
"0",
"]",
".",
"first",
".",
"issuer",
".",
"human_friendly",
"raise",
"PathBuildingError",
"(",
"pretty_message",
"(",
"'''\n Unable to build a validation path for the certificate \"%s\" - no\n issuer matching \"%s\" was found\n '''",
",",
"cert_name",
",",
"missing_issuer_name",
")",
")",
"return",
"paths"
] | Builds a list of ValidationPath objects from a certificate in the
operating system trust store to the end-entity certificate
:param end_entity_cert:
A byte string of a DER or PEM-encoded X.509 certificate, or an
instance of asn1crypto.x509.Certificate
:return:
A list of certvalidator.path.ValidationPath objects that represent
the possible paths from the end-entity certificate to one of the CA
certs. | [
"Builds",
"a",
"list",
"of",
"ValidationPath",
"objects",
"from",
"a",
"certificate",
"in",
"the",
"operating",
"system",
"trust",
"store",
"to",
"the",
"end",
"-",
"entity",
"certificate"
] | python | train |
Crunch-io/crunch-cube | src/cr/cube/crunch_cube.py | https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1445-L1449 | def weighted_n(self):
"""float count of returned rows adjusted for weighting."""
if not self.is_weighted:
return float(self.unweighted_n)
return float(sum(self._cube_dict["result"]["measures"]["count"]["data"])) | [
"def",
"weighted_n",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_weighted",
":",
"return",
"float",
"(",
"self",
".",
"unweighted_n",
")",
"return",
"float",
"(",
"sum",
"(",
"self",
".",
"_cube_dict",
"[",
"\"result\"",
"]",
"[",
"\"measures\"",
"]",
"[",
"\"count\"",
"]",
"[",
"\"data\"",
"]",
")",
")"
] | float count of returned rows adjusted for weighting. | [
"float",
"count",
"of",
"returned",
"rows",
"adjusted",
"for",
"weighting",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/utils.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/utils.py#L828-L877 | def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None):
"""Generate ZipInfo instance for the given name, compression and stat.
Args:
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Returns:
ZipInfo instance.
Raises:
ValueError: If arcname is not provided.
"""
# Fake stat response.
if st is None:
# TODO(user):pytype: stat_result typing is not correct.
# pytype: disable=wrong-arg-count
st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0))
# pytype: enable=wrong-arg-count
mtime = time.localtime(st.st_mtime or time.time())
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
raise ValueError("An arcname must be provided.")
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self._compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.flag_bits = 0x08 # Setting data descriptor flag.
zinfo.CRC = 0x08074b50 # Predefined CRC for archives using data
# descriptors.
# This fills an empty Info-ZIP Unix extra field.
zinfo.extra = struct.pack(
"<HHIIHH",
0x5855,
12,
0, # time of last access (UTC/GMT)
0, # time of last modification (UTC/GMT)
0, # user ID
0) # group ID
return zinfo | [
"def",
"_GenerateZipInfo",
"(",
"self",
",",
"arcname",
"=",
"None",
",",
"compress_type",
"=",
"None",
",",
"st",
"=",
"None",
")",
":",
"# Fake stat response.",
"if",
"st",
"is",
"None",
":",
"# TODO(user):pytype: stat_result typing is not correct.",
"# pytype: disable=wrong-arg-count",
"st",
"=",
"os",
".",
"stat_result",
"(",
"(",
"0o100644",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
")",
"# pytype: enable=wrong-arg-count",
"mtime",
"=",
"time",
".",
"localtime",
"(",
"st",
".",
"st_mtime",
"or",
"time",
".",
"time",
"(",
")",
")",
"date_time",
"=",
"mtime",
"[",
"0",
":",
"6",
"]",
"# Create ZipInfo instance to store file information",
"if",
"arcname",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"An arcname must be provided.\"",
")",
"zinfo",
"=",
"zipfile",
".",
"ZipInfo",
"(",
"arcname",
",",
"date_time",
")",
"zinfo",
".",
"external_attr",
"=",
"(",
"st",
"[",
"0",
"]",
"&",
"0xFFFF",
")",
"<<",
"16",
"# Unix attributes",
"if",
"compress_type",
"is",
"None",
":",
"zinfo",
".",
"compress_type",
"=",
"self",
".",
"_compression",
"else",
":",
"zinfo",
".",
"compress_type",
"=",
"compress_type",
"zinfo",
".",
"file_size",
"=",
"0",
"zinfo",
".",
"compress_size",
"=",
"0",
"zinfo",
".",
"flag_bits",
"=",
"0x08",
"# Setting data descriptor flag.",
"zinfo",
".",
"CRC",
"=",
"0x08074b50",
"# Predefined CRC for archives using data",
"# descriptors.",
"# This fills an empty Info-ZIP Unix extra field.",
"zinfo",
".",
"extra",
"=",
"struct",
".",
"pack",
"(",
"\"<HHIIHH\"",
",",
"0x5855",
",",
"12",
",",
"0",
",",
"# time of last access (UTC/GMT)",
"0",
",",
"# time of last modification (UTC/GMT)",
"0",
",",
"# user ID",
"0",
")",
"# group ID",
"return",
"zinfo"
] | Generate ZipInfo instance for the given name, compression and stat.
Args:
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Returns:
ZipInfo instance.
Raises:
ValueError: If arcname is not provided. | [
"Generate",
"ZipInfo",
"instance",
"for",
"the",
"given",
"name",
"compression",
"and",
"stat",
"."
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py#L757-L790 | def DocbookSlidesHtml(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTML slides output.
"""
# Init list of targets/sources
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_SLIDESHTML', ['slides','html','plain.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, [os.path.join(base_dir, 'toc.html')] +
glob.glob(os.path.join(base_dir, 'foil*.html')))
return result | [
"def",
"DocbookSlidesHtml",
"(",
"env",
",",
"target",
",",
"source",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# Init list of targets/sources",
"if",
"not",
"SCons",
".",
"Util",
".",
"is_List",
"(",
"target",
")",
":",
"target",
"=",
"[",
"target",
"]",
"if",
"not",
"source",
":",
"source",
"=",
"target",
"target",
"=",
"[",
"'index.html'",
"]",
"elif",
"not",
"SCons",
".",
"Util",
".",
"is_List",
"(",
"source",
")",
":",
"source",
"=",
"[",
"source",
"]",
"# Init XSL stylesheet",
"__init_xsl_stylesheet",
"(",
"kw",
",",
"env",
",",
"'$DOCBOOK_DEFAULT_XSL_SLIDESHTML'",
",",
"[",
"'slides'",
",",
"'html'",
",",
"'plain.xsl'",
"]",
")",
"# Setup builder",
"__builder",
"=",
"__select_builder",
"(",
"__lxml_builder",
",",
"__libxml2_builder",
",",
"__xsltproc_builder",
")",
"# Detect base dir",
"base_dir",
"=",
"kw",
".",
"get",
"(",
"'base_dir'",
",",
"''",
")",
"if",
"base_dir",
":",
"__create_output_dir",
"(",
"base_dir",
")",
"# Create targets",
"result",
"=",
"[",
"]",
"r",
"=",
"__builder",
".",
"__call__",
"(",
"env",
",",
"__ensure_suffix",
"(",
"str",
"(",
"target",
"[",
"0",
"]",
")",
",",
"'.html'",
")",
",",
"source",
"[",
"0",
"]",
",",
"*",
"*",
"kw",
")",
"env",
".",
"Depends",
"(",
"r",
",",
"kw",
"[",
"'DOCBOOK_XSL'",
"]",
")",
"result",
".",
"extend",
"(",
"r",
")",
"# Add supporting files for cleanup",
"env",
".",
"Clean",
"(",
"r",
",",
"[",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"'toc.html'",
")",
"]",
"+",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"'foil*.html'",
")",
")",
")",
"return",
"result"
] | A pseudo-Builder, providing a Docbook toolchain for HTML slides output. | [
"A",
"pseudo",
"-",
"Builder",
"providing",
"a",
"Docbook",
"toolchain",
"for",
"HTML",
"slides",
"output",
"."
] | python | train |
saltstack/salt | salt/modules/boto_secgroup.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_secgroup.py#L239-L298 | def get_all_security_groups(groupnames=None, group_ids=None, filters=None,
region=None, key=None, keyid=None, profile=None):
'''
Return a list of all Security Groups matching the given criteria and filters.
Note that the 'groupnames' argument only functions correctly for EC2 Classic
and default VPC Security Groups. To find groups by name in other VPCs you'll
want to use the 'group-name' filter instead.
Valid keys for the filters argument are:
description - The description of the security group.
egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.
group-id - The ID of the security group.
group-name - The name of the security group.
ip-permission.cidr - A CIDR range that has been granted permission.
ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.
ip-permission.group-id - The ID of a security group that has been granted permission.
ip-permission.group-name - The name of a security group that has been granted permission.
ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).
ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.
ip-permission.user-id - The ID of an AWS account that has been granted permission.
owner-id - The AWS account ID of the owner of the security group.
tag-key - The key of a tag assigned to the security group.
tag-value - The value of a tag assigned to the security group.
vpc-id - The ID of the VPC specified when the security group was created.
CLI example::
salt myminion boto_secgroup.get_all_security_groups filters='{group-name: mygroup}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(groupnames, six.string_types):
groupnames = [groupnames]
if isinstance(group_ids, six.string_types):
groupnames = [group_ids]
interesting = ['description', 'id', 'instances', 'name', 'owner_id',
'region', 'rules', 'rules_egress', 'tags', 'vpc_id']
ret = []
try:
r = conn.get_all_security_groups(groupnames=groupnames,
group_ids=group_ids,
filters=filters)
for g in r:
n = {}
for a in interesting:
v = getattr(g, a, None)
if a == 'region':
v = v.name
elif a in ('rules', 'rules_egress'):
v = _parse_rules(g, v)
elif a == 'instances':
v = [i.id for i in v()]
n[a] = v
ret += [n]
return ret
except boto.exception.BotoServerError as e:
log.debug(e)
return [] | [
"def",
"get_all_security_groups",
"(",
"groupnames",
"=",
"None",
",",
"group_ids",
"=",
"None",
",",
"filters",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"isinstance",
"(",
"groupnames",
",",
"six",
".",
"string_types",
")",
":",
"groupnames",
"=",
"[",
"groupnames",
"]",
"if",
"isinstance",
"(",
"group_ids",
",",
"six",
".",
"string_types",
")",
":",
"groupnames",
"=",
"[",
"group_ids",
"]",
"interesting",
"=",
"[",
"'description'",
",",
"'id'",
",",
"'instances'",
",",
"'name'",
",",
"'owner_id'",
",",
"'region'",
",",
"'rules'",
",",
"'rules_egress'",
",",
"'tags'",
",",
"'vpc_id'",
"]",
"ret",
"=",
"[",
"]",
"try",
":",
"r",
"=",
"conn",
".",
"get_all_security_groups",
"(",
"groupnames",
"=",
"groupnames",
",",
"group_ids",
"=",
"group_ids",
",",
"filters",
"=",
"filters",
")",
"for",
"g",
"in",
"r",
":",
"n",
"=",
"{",
"}",
"for",
"a",
"in",
"interesting",
":",
"v",
"=",
"getattr",
"(",
"g",
",",
"a",
",",
"None",
")",
"if",
"a",
"==",
"'region'",
":",
"v",
"=",
"v",
".",
"name",
"elif",
"a",
"in",
"(",
"'rules'",
",",
"'rules_egress'",
")",
":",
"v",
"=",
"_parse_rules",
"(",
"g",
",",
"v",
")",
"elif",
"a",
"==",
"'instances'",
":",
"v",
"=",
"[",
"i",
".",
"id",
"for",
"i",
"in",
"v",
"(",
")",
"]",
"n",
"[",
"a",
"]",
"=",
"v",
"ret",
"+=",
"[",
"n",
"]",
"return",
"ret",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"return",
"[",
"]"
] | Return a list of all Security Groups matching the given criteria and filters.
Note that the 'groupnames' argument only functions correctly for EC2 Classic
and default VPC Security Groups. To find groups by name in other VPCs you'll
want to use the 'group-name' filter instead.
Valid keys for the filters argument are:
description - The description of the security group.
egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.
group-id - The ID of the security group.
group-name - The name of the security group.
ip-permission.cidr - A CIDR range that has been granted permission.
ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.
ip-permission.group-id - The ID of a security group that has been granted permission.
ip-permission.group-name - The name of a security group that has been granted permission.
ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).
ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.
ip-permission.user-id - The ID of an AWS account that has been granted permission.
owner-id - The AWS account ID of the owner of the security group.
tag-key - The key of a tag assigned to the security group.
tag-value - The value of a tag assigned to the security group.
vpc-id - The ID of the VPC specified when the security group was created.
CLI example::
salt myminion boto_secgroup.get_all_security_groups filters='{group-name: mygroup}' | [
"Return",
"a",
"list",
"of",
"all",
"Security",
"Groups",
"matching",
"the",
"given",
"criteria",
"and",
"filters",
"."
] | python | train |
neo4j-drivers/neobolt | neobolt/impl/python/direct.py | https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/direct.py#L288-L298 | def _append(self, signature, fields=(), response=None):
""" Add a message to the outgoing queue.
:arg signature: the signature of the message
:arg fields: the fields of the message as a tuple
:arg response: a response object to handle callbacks
"""
self.packer.pack_struct(signature, fields)
self.output_buffer.chunk()
self.output_buffer.chunk()
self.responses.append(response) | [
"def",
"_append",
"(",
"self",
",",
"signature",
",",
"fields",
"=",
"(",
")",
",",
"response",
"=",
"None",
")",
":",
"self",
".",
"packer",
".",
"pack_struct",
"(",
"signature",
",",
"fields",
")",
"self",
".",
"output_buffer",
".",
"chunk",
"(",
")",
"self",
".",
"output_buffer",
".",
"chunk",
"(",
")",
"self",
".",
"responses",
".",
"append",
"(",
"response",
")"
] | Add a message to the outgoing queue.
:arg signature: the signature of the message
:arg fields: the fields of the message as a tuple
:arg response: a response object to handle callbacks | [
"Add",
"a",
"message",
"to",
"the",
"outgoing",
"queue",
"."
] | python | train |
twilio/twilio-python | twilio/rest/api/v2010/account/usage/trigger.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/trigger.py#L296-L325 | def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
data = values.of({
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
'FriendlyName': friendly_name,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return TriggerInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) | [
"def",
"update",
"(",
"self",
",",
"callback_method",
"=",
"values",
".",
"unset",
",",
"callback_url",
"=",
"values",
".",
"unset",
",",
"friendly_name",
"=",
"values",
".",
"unset",
")",
":",
"data",
"=",
"values",
".",
"of",
"(",
"{",
"'CallbackMethod'",
":",
"callback_method",
",",
"'CallbackUrl'",
":",
"callback_url",
",",
"'FriendlyName'",
":",
"friendly_name",
",",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"update",
"(",
"'POST'",
",",
"self",
".",
"_uri",
",",
"data",
"=",
"data",
",",
")",
"return",
"TriggerInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
"sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")"
] | Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance | [
"Update",
"the",
"TriggerInstance"
] | python | train |
Rockhopper-Technologies/pluginlib | pluginlib/_loader.py | https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_loader.py#L371-L395 | def get_plugin(self, plugin_type, name, version=None):
"""
Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
Returns:
:py:class:`Plugin`: Plugin, or :py:data:`None` if plugin can't be found
Retrieve a specific plugin. ``blacklist`` and ``type_filter`` still apply.
If ``version`` is not specified, the newest available version is returned.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist,
newest_only=True,
type_filter=self.type_filter,
type=plugin_type,
name=name,
version=version) | [
"def",
"get_plugin",
"(",
"self",
",",
"plugin_type",
",",
"name",
",",
"version",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"loaded",
":",
"self",
".",
"load_modules",
"(",
")",
"# pylint: disable=protected-access",
"return",
"get_plugins",
"(",
")",
"[",
"self",
".",
"group",
"]",
".",
"_filter",
"(",
"blacklist",
"=",
"self",
".",
"blacklist",
",",
"newest_only",
"=",
"True",
",",
"type_filter",
"=",
"self",
".",
"type_filter",
",",
"type",
"=",
"plugin_type",
",",
"name",
"=",
"name",
",",
"version",
"=",
"version",
")"
] | Args:
plugin_type(str): Parent type
name(str): Plugin name
version(str): Plugin version
Returns:
:py:class:`Plugin`: Plugin, or :py:data:`None` if plugin can't be found
Retrieve a specific plugin. ``blacklist`` and ``type_filter`` still apply.
If ``version`` is not specified, the newest available version is returned. | [
"Args",
":",
"plugin_type",
"(",
"str",
")",
":",
"Parent",
"type",
"name",
"(",
"str",
")",
":",
"Plugin",
"name",
"version",
"(",
"str",
")",
":",
"Plugin",
"version"
] | python | train |
ubc/ubcpi | ubcpi/answer_pool.py | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/answer_pool.py#L277-L323 | def get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses):
"""
Get answers from others with random algorithm, which randomly select answer from the pool.
Student may get three answers for option 1 or one answer for option 1 and two answers for option 2.
Args:
see `get_other_answers`
num_responses (int): the number of responses to be returned. This value may not be
respected if there is not enough answers to return
Returns:
dict: answers based on the selection algorithm
"""
ret = []
# clean up answers so that all keys are int
pool = {int(k): v for k, v in pool.items()}
seeded = {'seeded'+str(index): answer for index, answer in enumerate(seeded_answers)}
merged_pool = seeded.keys()
for key in pool:
merged_pool += pool[key].keys()
# shuffle
random.shuffle(merged_pool)
# get student identifier
student_id = get_student_item_dict()['student_id']
for student in merged_pool:
if len(ret) >= num_responses:
# have enough answers
break
elif student == student_id:
# this is the student's answer so don't return
continue
if student.startswith('seeded'):
option = seeded[student]['answer']
rationale = seeded[student]['rationale']
else:
student_item = get_student_item_dict(student)
submission = sas_api.get_answers_for_student(student_item)
rationale = submission.get_rationale(0)
option = submission.get_vote(0)
ret.append({'option': option, 'rationale': rationale})
return {"answers": ret} | [
"def",
"get_other_answers_random",
"(",
"pool",
",",
"seeded_answers",
",",
"get_student_item_dict",
",",
"num_responses",
")",
":",
"ret",
"=",
"[",
"]",
"# clean up answers so that all keys are int",
"pool",
"=",
"{",
"int",
"(",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"pool",
".",
"items",
"(",
")",
"}",
"seeded",
"=",
"{",
"'seeded'",
"+",
"str",
"(",
"index",
")",
":",
"answer",
"for",
"index",
",",
"answer",
"in",
"enumerate",
"(",
"seeded_answers",
")",
"}",
"merged_pool",
"=",
"seeded",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"pool",
":",
"merged_pool",
"+=",
"pool",
"[",
"key",
"]",
".",
"keys",
"(",
")",
"# shuffle",
"random",
".",
"shuffle",
"(",
"merged_pool",
")",
"# get student identifier",
"student_id",
"=",
"get_student_item_dict",
"(",
")",
"[",
"'student_id'",
"]",
"for",
"student",
"in",
"merged_pool",
":",
"if",
"len",
"(",
"ret",
")",
">=",
"num_responses",
":",
"# have enough answers",
"break",
"elif",
"student",
"==",
"student_id",
":",
"# this is the student's answer so don't return",
"continue",
"if",
"student",
".",
"startswith",
"(",
"'seeded'",
")",
":",
"option",
"=",
"seeded",
"[",
"student",
"]",
"[",
"'answer'",
"]",
"rationale",
"=",
"seeded",
"[",
"student",
"]",
"[",
"'rationale'",
"]",
"else",
":",
"student_item",
"=",
"get_student_item_dict",
"(",
"student",
")",
"submission",
"=",
"sas_api",
".",
"get_answers_for_student",
"(",
"student_item",
")",
"rationale",
"=",
"submission",
".",
"get_rationale",
"(",
"0",
")",
"option",
"=",
"submission",
".",
"get_vote",
"(",
"0",
")",
"ret",
".",
"append",
"(",
"{",
"'option'",
":",
"option",
",",
"'rationale'",
":",
"rationale",
"}",
")",
"return",
"{",
"\"answers\"",
":",
"ret",
"}"
] | Get answers from others with random algorithm, which randomly select answer from the pool.
Student may get three answers for option 1 or one answer for option 1 and two answers for option 2.
Args:
see `get_other_answers`
num_responses (int): the number of responses to be returned. This value may not be
respected if there is not enough answers to return
Returns:
dict: answers based on the selection algorithm | [
"Get",
"answers",
"from",
"others",
"with",
"random",
"algorithm",
"which",
"randomly",
"select",
"answer",
"from",
"the",
"pool",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/gb/grain.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/gb/grain.py#L1376-L1517 | def enum_sigma_rho(cutoff, r_axis, ratio_alpha):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio_alpha is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError('For irrational ratio_alpha, CSL only exist for [1,1,1]'
'or [u, v, -(u+v)] and m =0')
else:
mu, mv = ratio_alpha
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + \
2 * mv * (v * w + w * u + u * v)
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv))) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if ratio_alpha is None and u + v + w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv)) - n ** 2 * d) / (mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round(abs(F / com_fac)))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180.0
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas | [
"def",
"enum_sigma_rho",
"(",
"cutoff",
",",
"r_axis",
",",
"ratio_alpha",
")",
":",
"sigmas",
"=",
"{",
"}",
"# transform four index notation to three index notation",
"if",
"len",
"(",
"r_axis",
")",
"==",
"4",
":",
"u1",
"=",
"r_axis",
"[",
"0",
"]",
"v1",
"=",
"r_axis",
"[",
"1",
"]",
"w1",
"=",
"r_axis",
"[",
"3",
"]",
"u",
"=",
"2",
"*",
"u1",
"+",
"v1",
"+",
"w1",
"v",
"=",
"v1",
"+",
"w1",
"-",
"u1",
"w",
"=",
"w1",
"-",
"2",
"*",
"v1",
"-",
"u1",
"r_axis",
"=",
"[",
"u",
",",
"v",
",",
"w",
"]",
"# make sure gcd(r_axis)==1",
"if",
"reduce",
"(",
"gcd",
",",
"r_axis",
")",
"!=",
"1",
":",
"r_axis",
"=",
"[",
"int",
"(",
"round",
"(",
"x",
"/",
"reduce",
"(",
"gcd",
",",
"r_axis",
")",
")",
")",
"for",
"x",
"in",
"r_axis",
"]",
"u",
",",
"v",
",",
"w",
"=",
"r_axis",
"# make sure mu, mv are coprime integers.",
"if",
"ratio_alpha",
"is",
"None",
":",
"mu",
",",
"mv",
"=",
"[",
"1",
",",
"1",
"]",
"if",
"u",
"+",
"v",
"+",
"w",
"!=",
"0",
":",
"if",
"u",
"!=",
"v",
"or",
"u",
"!=",
"w",
":",
"raise",
"RuntimeError",
"(",
"'For irrational ratio_alpha, CSL only exist for [1,1,1]'",
"'or [u, v, -(u+v)] and m =0'",
")",
"else",
":",
"mu",
",",
"mv",
"=",
"ratio_alpha",
"if",
"gcd",
"(",
"mu",
",",
"mv",
")",
"!=",
"1",
":",
"temp",
"=",
"gcd",
"(",
"mu",
",",
"mv",
")",
"mu",
"=",
"int",
"(",
"round",
"(",
"mu",
"/",
"temp",
")",
")",
"mv",
"=",
"int",
"(",
"round",
"(",
"mv",
"/",
"temp",
")",
")",
"# refer to the meaning of d in reference",
"d",
"=",
"(",
"u",
"**",
"2",
"+",
"v",
"**",
"2",
"+",
"w",
"**",
"2",
")",
"*",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"+",
"2",
"*",
"mv",
"*",
"(",
"v",
"*",
"w",
"+",
"w",
"*",
"u",
"+",
"u",
"*",
"v",
")",
"# Compute the max n we need to enumerate.",
"n_max",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"(",
"cutoff",
"*",
"abs",
"(",
"4",
"*",
"mu",
"*",
"(",
"mu",
"-",
"3",
"*",
"mv",
")",
")",
")",
"/",
"abs",
"(",
"d",
")",
")",
")",
"# Enumerate all possible n, m to give possible sigmas within the cutoff.",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"n_max",
"+",
"1",
")",
":",
"if",
"ratio_alpha",
"is",
"None",
"and",
"u",
"+",
"v",
"+",
"w",
"==",
"0",
":",
"m_max",
"=",
"0",
"else",
":",
"m_max",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"(",
"cutoff",
"*",
"abs",
"(",
"4",
"*",
"mu",
"*",
"(",
"mu",
"-",
"3",
"*",
"mv",
")",
")",
"-",
"n",
"**",
"2",
"*",
"d",
")",
"/",
"(",
"mu",
")",
")",
")",
"for",
"m",
"in",
"range",
"(",
"0",
",",
"m_max",
"+",
"1",
")",
":",
"if",
"gcd",
"(",
"m",
",",
"n",
")",
"==",
"1",
"or",
"m",
"==",
"0",
":",
"# construct the rotation matrix, refer to the reference",
"R_list",
"=",
"[",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"(",
"u",
"**",
"2",
"-",
"v",
"**",
"2",
"-",
"w",
"**",
"2",
")",
"*",
"n",
"**",
"2",
"+",
"2",
"*",
"mv",
"*",
"(",
"v",
"-",
"w",
")",
"*",
"m",
"*",
"n",
"-",
"2",
"*",
"mv",
"*",
"v",
"*",
"w",
"*",
"n",
"**",
"2",
"+",
"mu",
"*",
"m",
"**",
"2",
",",
"2",
"*",
"(",
"mv",
"*",
"u",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"u",
"*",
"n",
"-",
"m",
")",
"-",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"w",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"u",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"u",
"*",
"n",
"*",
"(",
"v",
"*",
"n",
"+",
"u",
"*",
"n",
"+",
"m",
")",
"+",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"v",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"u",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"v",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"v",
"*",
"n",
"+",
"m",
")",
"+",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"w",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"u",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"(",
"v",
"**",
"2",
"-",
"w",
"**",
"2",
"-",
"u",
"**",
"2",
")",
"*",
"n",
"**",
"2",
"+",
"2",
"*",
"mv",
"*",
"(",
"w",
"-",
"u",
")",
"*",
"m",
"*",
"n",
"-",
"2",
"*",
"mv",
"*",
"u",
"*",
"w",
"*",
"n",
"**",
"2",
"+",
"mu",
"*",
"m",
"**",
"2",
",",
"2",
"*",
"(",
"mv",
"*",
"v",
"*",
"n",
"*",
"(",
"v",
"*",
"n",
"+",
"u",
"*",
"n",
"-",
"m",
")",
"-",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"u",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"w",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"v",
"*",
"n",
"-",
"m",
")",
"-",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"v",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"u",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"w",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"u",
"*",
"n",
"+",
"m",
")",
"+",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"u",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"(",
"w",
"**",
"2",
"-",
"u",
"**",
"2",
"-",
"v",
"**",
"2",
")",
"*",
"n",
"**",
"2",
"+",
"2",
"*",
"mv",
"*",
"(",
"u",
"-",
"v",
")",
"*",
"m",
"*",
"n",
"-",
"2",
"*",
"mv",
"*",
"u",
"*",
"v",
"*",
"n",
"**",
"2",
"+",
"mu",
"*",
"m",
"**",
"2",
"]",
"m",
"=",
"-",
"1",
"*",
"m",
"# inverse of the rotation matrix",
"R_list_inv",
"=",
"[",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"(",
"u",
"**",
"2",
"-",
"v",
"**",
"2",
"-",
"w",
"**",
"2",
")",
"*",
"n",
"**",
"2",
"+",
"2",
"*",
"mv",
"*",
"(",
"v",
"-",
"w",
")",
"*",
"m",
"*",
"n",
"-",
"2",
"*",
"mv",
"*",
"v",
"*",
"w",
"*",
"n",
"**",
"2",
"+",
"mu",
"*",
"m",
"**",
"2",
",",
"2",
"*",
"(",
"mv",
"*",
"u",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"u",
"*",
"n",
"-",
"m",
")",
"-",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"w",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"u",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"u",
"*",
"n",
"*",
"(",
"v",
"*",
"n",
"+",
"u",
"*",
"n",
"+",
"m",
")",
"+",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"v",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"u",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"v",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"v",
"*",
"n",
"+",
"m",
")",
"+",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"w",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"u",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"(",
"v",
"**",
"2",
"-",
"w",
"**",
"2",
"-",
"u",
"**",
"2",
")",
"*",
"n",
"**",
"2",
"+",
"2",
"*",
"mv",
"*",
"(",
"w",
"-",
"u",
")",
"*",
"m",
"*",
"n",
"-",
"2",
"*",
"mv",
"*",
"u",
"*",
"w",
"*",
"n",
"**",
"2",
"+",
"mu",
"*",
"m",
"**",
"2",
",",
"2",
"*",
"(",
"mv",
"*",
"v",
"*",
"n",
"*",
"(",
"v",
"*",
"n",
"+",
"u",
"*",
"n",
"-",
"m",
")",
"-",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"u",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"w",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"v",
"*",
"n",
"-",
"m",
")",
"-",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"v",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"u",
"*",
"n",
"**",
"2",
")",
",",
"2",
"*",
"(",
"mv",
"*",
"w",
"*",
"n",
"*",
"(",
"w",
"*",
"n",
"+",
"u",
"*",
"n",
"+",
"m",
")",
"+",
"(",
"mu",
"-",
"mv",
")",
"*",
"m",
"*",
"u",
"*",
"n",
"+",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"w",
"*",
"v",
"*",
"n",
"**",
"2",
")",
",",
"(",
"mu",
"-",
"2",
"*",
"mv",
")",
"*",
"(",
"w",
"**",
"2",
"-",
"u",
"**",
"2",
"-",
"v",
"**",
"2",
")",
"*",
"n",
"**",
"2",
"+",
"2",
"*",
"mv",
"*",
"(",
"u",
"-",
"v",
")",
"*",
"m",
"*",
"n",
"-",
"2",
"*",
"mv",
"*",
"u",
"*",
"v",
"*",
"n",
"**",
"2",
"+",
"mu",
"*",
"m",
"**",
"2",
"]",
"m",
"=",
"-",
"1",
"*",
"m",
"F",
"=",
"mu",
"*",
"m",
"**",
"2",
"+",
"d",
"*",
"n",
"**",
"2",
"all_list",
"=",
"R_list_inv",
"+",
"R_list",
"+",
"[",
"F",
"]",
"# Compute the max common factors for the elements of the rotation matrix",
"# and its inverse.",
"com_fac",
"=",
"reduce",
"(",
"gcd",
",",
"all_list",
")",
"sigma",
"=",
"int",
"(",
"round",
"(",
"abs",
"(",
"F",
"/",
"com_fac",
")",
")",
")",
"if",
"(",
"sigma",
"<=",
"cutoff",
")",
"and",
"(",
"sigma",
">",
"1",
")",
":",
"if",
"sigma",
"not",
"in",
"list",
"(",
"sigmas",
".",
"keys",
"(",
")",
")",
":",
"if",
"m",
"==",
"0",
":",
"angle",
"=",
"180.0",
"else",
":",
"angle",
"=",
"2",
"*",
"np",
".",
"arctan",
"(",
"n",
"/",
"m",
"*",
"np",
".",
"sqrt",
"(",
"d",
"/",
"mu",
")",
")",
"/",
"np",
".",
"pi",
"*",
"180",
"sigmas",
"[",
"sigma",
"]",
"=",
"[",
"angle",
"]",
"else",
":",
"if",
"m",
"==",
"0",
":",
"angle",
"=",
"180",
"else",
":",
"angle",
"=",
"2",
"*",
"np",
".",
"arctan",
"(",
"n",
"/",
"m",
"*",
"np",
".",
"sqrt",
"(",
"d",
"/",
"mu",
")",
")",
"/",
"np",
".",
"pi",
"*",
"180.0",
"if",
"angle",
"not",
"in",
"sigmas",
"[",
"sigma",
"]",
":",
"sigmas",
"[",
"sigma",
"]",
".",
"append",
"(",
"angle",
")",
"if",
"m_max",
"==",
"0",
":",
"break",
"return",
"sigmas"
] | Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures. | [
"Find",
"all",
"possible",
"sigma",
"values",
"and",
"corresponding",
"rotation",
"angles",
"within",
"a",
"sigma",
"value",
"cutoff",
"with",
"known",
"rotation",
"axis",
"in",
"rhombohedral",
"system",
".",
"The",
"algorithm",
"for",
"this",
"code",
"is",
"from",
"reference",
"Acta",
"Cryst",
"A45",
"505",
"(",
"1989",
")",
"."
] | python | train |
WoLpH/python-progressbar | progressbar/bar.py | https://github.com/WoLpH/python-progressbar/blob/963617a1bb9d81624ecf31f3457185992cd97bfa/progressbar/bar.py#L527-L550 | def _needs_update(self):
'Returns whether the ProgressBar should redraw the line.'
if self.poll_interval:
delta = timeit.default_timer() - self._last_update_timer
poll_status = delta > self.poll_interval.total_seconds()
else:
delta = 0
poll_status = False
# Do not update if value increment is not large enough to
# add more bars to progressbar (according to current
# terminal width)
try:
divisor = self.max_value / self.term_width # float division
if self.value // divisor == self.previous_value // divisor:
return poll_status or self.end_time
else:
return True
except Exception:
# ignore any division errors
pass
return poll_status or self.end_time | [
"def",
"_needs_update",
"(",
"self",
")",
":",
"if",
"self",
".",
"poll_interval",
":",
"delta",
"=",
"timeit",
".",
"default_timer",
"(",
")",
"-",
"self",
".",
"_last_update_timer",
"poll_status",
"=",
"delta",
">",
"self",
".",
"poll_interval",
".",
"total_seconds",
"(",
")",
"else",
":",
"delta",
"=",
"0",
"poll_status",
"=",
"False",
"# Do not update if value increment is not large enough to",
"# add more bars to progressbar (according to current",
"# terminal width)",
"try",
":",
"divisor",
"=",
"self",
".",
"max_value",
"/",
"self",
".",
"term_width",
"# float division",
"if",
"self",
".",
"value",
"//",
"divisor",
"==",
"self",
".",
"previous_value",
"//",
"divisor",
":",
"return",
"poll_status",
"or",
"self",
".",
"end_time",
"else",
":",
"return",
"True",
"except",
"Exception",
":",
"# ignore any division errors",
"pass",
"return",
"poll_status",
"or",
"self",
".",
"end_time"
] | Returns whether the ProgressBar should redraw the line. | [
"Returns",
"whether",
"the",
"ProgressBar",
"should",
"redraw",
"the",
"line",
"."
] | python | train |
google/openhtf | openhtf/plugs/usb/adb_device.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_device.py#L153-L172 | def pull(self, device_filename, dest_file=None, timeout_ms=None):
"""Pull file from device.
Arguments:
device_filename: The filename on the device to pull.
dest_file: If set, a filename or writable file-like object.
timeout_ms: Expected timeout for the pull.
Returns:
The file data if dest_file is not set, None otherwise.
"""
should_return_data = dest_file is None
if isinstance(dest_file, six.string_types):
dest_file = open(dest_file, 'w')
elif dest_file is None:
dest_file = six.StringIO()
self.filesync_service.recv(device_filename, dest_file,
timeouts.PolledTimeout.from_millis(timeout_ms))
if should_return_data:
return dest_file.getvalue() | [
"def",
"pull",
"(",
"self",
",",
"device_filename",
",",
"dest_file",
"=",
"None",
",",
"timeout_ms",
"=",
"None",
")",
":",
"should_return_data",
"=",
"dest_file",
"is",
"None",
"if",
"isinstance",
"(",
"dest_file",
",",
"six",
".",
"string_types",
")",
":",
"dest_file",
"=",
"open",
"(",
"dest_file",
",",
"'w'",
")",
"elif",
"dest_file",
"is",
"None",
":",
"dest_file",
"=",
"six",
".",
"StringIO",
"(",
")",
"self",
".",
"filesync_service",
".",
"recv",
"(",
"device_filename",
",",
"dest_file",
",",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
")",
"if",
"should_return_data",
":",
"return",
"dest_file",
".",
"getvalue",
"(",
")"
] | Pull file from device.
Arguments:
device_filename: The filename on the device to pull.
dest_file: If set, a filename or writable file-like object.
timeout_ms: Expected timeout for the pull.
Returns:
The file data if dest_file is not set, None otherwise. | [
"Pull",
"file",
"from",
"device",
"."
] | python | train |
google/openhtf | openhtf/plugs/usb/filesync_service.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L415-L461 | def read_message(self, timeout=None):
"""Read a message from this transport and return it.
Reads a message of RECV_MSG_TYPE and returns it. Note that this method
abstracts the data length and data read so that the caller simply gets the
data along with the header in the returned message.
Args:
timeout: timeouts.PolledTimeout to use for the operation.
Returns:
An instance of self.RECV_MSG_TYPE that was read from self.stream.
Raises:
AdbProtocolError: If an invalid response is received.
AdbRemoteError: If a FAIL response is received.
"""
raw_data = self.stream.read(
struct.calcsize(self.RECV_MSG_TYPE.struct_format), timeout)
try:
raw_message = struct.unpack(self.RECV_MSG_TYPE.struct_format, raw_data)
except struct.error:
raise usb_exceptions.AdbProtocolError(
'%s expected format "%s", got data %s', self,
self.RECV_MSG_TYPE.struct_format, raw_data)
if raw_message[0] not in self.WIRE_TO_CMD:
raise usb_exceptions.AdbProtocolError(
'Unrecognized command id: %s', raw_message)
# Swap out the wire command with the string equivalent.
raw_message = (self.WIRE_TO_CMD[raw_message[0]],) + raw_message[1:]
if self.RECV_MSG_TYPE.has_data and raw_message[-1]:
# For messages that have data, the length of the data is the last field
# in the struct. We do another read and swap out that length for the
# actual data read before we create the namedtuple to return.
data_len = raw_message[-1]
raw_message = raw_message[:-1] + (self.stream.read(data_len, timeout),)
if raw_message[0] not in self.VALID_RESPONSES:
raise usb_exceptions.AdbProtocolError(
'%s not a valid response for %s', raw_message[0], self)
if raw_message[0] == 'FAIL':
raise usb_exceptions.AdbRemoteError(
'Remote ADB failure: %s', raw_message)
return self.RECV_MSG_TYPE(*raw_message) | [
"def",
"read_message",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"raw_data",
"=",
"self",
".",
"stream",
".",
"read",
"(",
"struct",
".",
"calcsize",
"(",
"self",
".",
"RECV_MSG_TYPE",
".",
"struct_format",
")",
",",
"timeout",
")",
"try",
":",
"raw_message",
"=",
"struct",
".",
"unpack",
"(",
"self",
".",
"RECV_MSG_TYPE",
".",
"struct_format",
",",
"raw_data",
")",
"except",
"struct",
".",
"error",
":",
"raise",
"usb_exceptions",
".",
"AdbProtocolError",
"(",
"'%s expected format \"%s\", got data %s'",
",",
"self",
",",
"self",
".",
"RECV_MSG_TYPE",
".",
"struct_format",
",",
"raw_data",
")",
"if",
"raw_message",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"WIRE_TO_CMD",
":",
"raise",
"usb_exceptions",
".",
"AdbProtocolError",
"(",
"'Unrecognized command id: %s'",
",",
"raw_message",
")",
"# Swap out the wire command with the string equivalent.",
"raw_message",
"=",
"(",
"self",
".",
"WIRE_TO_CMD",
"[",
"raw_message",
"[",
"0",
"]",
"]",
",",
")",
"+",
"raw_message",
"[",
"1",
":",
"]",
"if",
"self",
".",
"RECV_MSG_TYPE",
".",
"has_data",
"and",
"raw_message",
"[",
"-",
"1",
"]",
":",
"# For messages that have data, the length of the data is the last field",
"# in the struct. We do another read and swap out that length for the",
"# actual data read before we create the namedtuple to return.",
"data_len",
"=",
"raw_message",
"[",
"-",
"1",
"]",
"raw_message",
"=",
"raw_message",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"self",
".",
"stream",
".",
"read",
"(",
"data_len",
",",
"timeout",
")",
",",
")",
"if",
"raw_message",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"VALID_RESPONSES",
":",
"raise",
"usb_exceptions",
".",
"AdbProtocolError",
"(",
"'%s not a valid response for %s'",
",",
"raw_message",
"[",
"0",
"]",
",",
"self",
")",
"if",
"raw_message",
"[",
"0",
"]",
"==",
"'FAIL'",
":",
"raise",
"usb_exceptions",
".",
"AdbRemoteError",
"(",
"'Remote ADB failure: %s'",
",",
"raw_message",
")",
"return",
"self",
".",
"RECV_MSG_TYPE",
"(",
"*",
"raw_message",
")"
] | Read a message from this transport and return it.
Reads a message of RECV_MSG_TYPE and returns it. Note that this method
abstracts the data length and data read so that the caller simply gets the
data along with the header in the returned message.
Args:
timeout: timeouts.PolledTimeout to use for the operation.
Returns:
An instance of self.RECV_MSG_TYPE that was read from self.stream.
Raises:
AdbProtocolError: If an invalid response is received.
AdbRemoteError: If a FAIL response is received. | [
"Read",
"a",
"message",
"from",
"this",
"transport",
"and",
"return",
"it",
"."
] | python | train |
ereOn/azmq | azmq/common.py | https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/common.py#L156-L168 | def close(self):
"""
Close the instance.
"""
if not self.closed and not self.closing:
logger.debug(
"%s[%s] closing...",
self.__class__.__name__,
id(self),
)
self._closing.set()
future = asyncio.ensure_future(self.on_close(), loop=self.loop)
future.add_done_callback(self._set_closed) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"closed",
"and",
"not",
"self",
".",
"closing",
":",
"logger",
".",
"debug",
"(",
"\"%s[%s] closing...\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"id",
"(",
"self",
")",
",",
")",
"self",
".",
"_closing",
".",
"set",
"(",
")",
"future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"on_close",
"(",
")",
",",
"loop",
"=",
"self",
".",
"loop",
")",
"future",
".",
"add_done_callback",
"(",
"self",
".",
"_set_closed",
")"
] | Close the instance. | [
"Close",
"the",
"instance",
"."
] | python | train |
dstufft/crust | crust/query.py | https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L80-L99 | def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low | [
"def",
"set_limits",
"(",
"self",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
")",
":",
"if",
"high",
"is",
"not",
"None",
":",
"if",
"self",
".",
"high_mark",
"is",
"not",
"None",
":",
"self",
".",
"high_mark",
"=",
"min",
"(",
"self",
".",
"high_mark",
",",
"self",
".",
"low_mark",
"+",
"high",
")",
"else",
":",
"self",
".",
"high_mark",
"=",
"self",
".",
"low_mark",
"+",
"high",
"if",
"low",
"is",
"not",
"None",
":",
"if",
"self",
".",
"high_mark",
"is",
"not",
"None",
":",
"self",
".",
"low_mark",
"=",
"min",
"(",
"self",
".",
"high_mark",
",",
"self",
".",
"low_mark",
"+",
"low",
")",
"else",
":",
"self",
".",
"low_mark",
"=",
"self",
".",
"low_mark",
"+",
"low"
] | Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value. | [
"Adjusts",
"the",
"limits",
"on",
"the",
"rows",
"retrieved",
".",
"We",
"use",
"low",
"/",
"high",
"to",
"set",
"these",
"as",
"it",
"makes",
"it",
"more",
"Pythonic",
"to",
"read",
"and",
"write",
".",
"When",
"the",
"API",
"query",
"is",
"created",
"they",
"are",
"converted",
"to",
"the",
"appropriate",
"offset",
"and",
"limit",
"values",
"."
] | python | train |
edx/edx-enterprise | enterprise/api/pagination.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/pagination.py#L13-L47 | def get_paginated_response(data, request):
"""
Update pagination links in course catalog data and return DRF Response.
Arguments:
data (dict): Dictionary containing catalog courses.
request (HttpRequest): Current request object.
Returns:
(Response): DRF response object containing pagination links.
"""
url = urlparse(request.build_absolute_uri())._replace(query=None).geturl()
next_page = None
previous_page = None
if data['next']:
next_page = "{base_url}?{query_parameters}".format(
base_url=url,
query_parameters=urlparse(data['next']).query,
)
next_page = next_page.rstrip('?')
if data['previous']:
previous_page = "{base_url}?{query_parameters}".format(
base_url=url,
query_parameters=urlparse(data['previous'] or "").query,
)
previous_page = previous_page.rstrip('?')
return Response(OrderedDict([
('count', data['count']),
('next', next_page),
('previous', previous_page),
('results', data['results'])
])) | [
"def",
"get_paginated_response",
"(",
"data",
",",
"request",
")",
":",
"url",
"=",
"urlparse",
"(",
"request",
".",
"build_absolute_uri",
"(",
")",
")",
".",
"_replace",
"(",
"query",
"=",
"None",
")",
".",
"geturl",
"(",
")",
"next_page",
"=",
"None",
"previous_page",
"=",
"None",
"if",
"data",
"[",
"'next'",
"]",
":",
"next_page",
"=",
"\"{base_url}?{query_parameters}\"",
".",
"format",
"(",
"base_url",
"=",
"url",
",",
"query_parameters",
"=",
"urlparse",
"(",
"data",
"[",
"'next'",
"]",
")",
".",
"query",
",",
")",
"next_page",
"=",
"next_page",
".",
"rstrip",
"(",
"'?'",
")",
"if",
"data",
"[",
"'previous'",
"]",
":",
"previous_page",
"=",
"\"{base_url}?{query_parameters}\"",
".",
"format",
"(",
"base_url",
"=",
"url",
",",
"query_parameters",
"=",
"urlparse",
"(",
"data",
"[",
"'previous'",
"]",
"or",
"\"\"",
")",
".",
"query",
",",
")",
"previous_page",
"=",
"previous_page",
".",
"rstrip",
"(",
"'?'",
")",
"return",
"Response",
"(",
"OrderedDict",
"(",
"[",
"(",
"'count'",
",",
"data",
"[",
"'count'",
"]",
")",
",",
"(",
"'next'",
",",
"next_page",
")",
",",
"(",
"'previous'",
",",
"previous_page",
")",
",",
"(",
"'results'",
",",
"data",
"[",
"'results'",
"]",
")",
"]",
")",
")"
] | Update pagination links in course catalog data and return DRF Response.
Arguments:
data (dict): Dictionary containing catalog courses.
request (HttpRequest): Current request object.
Returns:
(Response): DRF response object containing pagination links. | [
"Update",
"pagination",
"links",
"in",
"course",
"catalog",
"data",
"and",
"return",
"DRF",
"Response",
"."
] | python | valid |
Fuyukai/ConfigMaster | configmaster/ConfigKey.py | https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/ConfigKey.py#L37-L52 | def dump(self) -> dict:
"""
Dumps data from the ConfigKey into a dict.
:return: The keys and values from the ConfigKey encapsulated in a dict.
"""
d = {}
for item in self.__dict__:
if item in ['parsed', 'dump', 'parse_data', 'iter_list', 'safe_load']:
continue
if isinstance(self.__dict__[item], ConfigKey):
d[item] = self.__dict__[item].dump()
elif isinstance(self.__dict__[item], list):
d[item] = self.iter_list_dump(self.__dict__[item])
else:
d[item] = self.__dict__[item]
return d | [
"def",
"dump",
"(",
"self",
")",
"->",
"dict",
":",
"d",
"=",
"{",
"}",
"for",
"item",
"in",
"self",
".",
"__dict__",
":",
"if",
"item",
"in",
"[",
"'parsed'",
",",
"'dump'",
",",
"'parse_data'",
",",
"'iter_list'",
",",
"'safe_load'",
"]",
":",
"continue",
"if",
"isinstance",
"(",
"self",
".",
"__dict__",
"[",
"item",
"]",
",",
"ConfigKey",
")",
":",
"d",
"[",
"item",
"]",
"=",
"self",
".",
"__dict__",
"[",
"item",
"]",
".",
"dump",
"(",
")",
"elif",
"isinstance",
"(",
"self",
".",
"__dict__",
"[",
"item",
"]",
",",
"list",
")",
":",
"d",
"[",
"item",
"]",
"=",
"self",
".",
"iter_list_dump",
"(",
"self",
".",
"__dict__",
"[",
"item",
"]",
")",
"else",
":",
"d",
"[",
"item",
"]",
"=",
"self",
".",
"__dict__",
"[",
"item",
"]",
"return",
"d"
] | Dumps data from the ConfigKey into a dict.
:return: The keys and values from the ConfigKey encapsulated in a dict. | [
"Dumps",
"data",
"from",
"the",
"ConfigKey",
"into",
"a",
"dict",
".",
":",
"return",
":",
"The",
"keys",
"and",
"values",
"from",
"the",
"ConfigKey",
"encapsulated",
"in",
"a",
"dict",
"."
] | python | train |
gwastro/pycbc | pycbc/frame/frame.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/frame/frame.py#L295-L328 | def frame_paths(frame_type, start_time, end_time, server=None, url_type='file'):
"""Return the paths to a span of frame files
Parameters
----------
frame_type : string
The string representation of the frame type (ex. 'H1_ER_C00_L1')
start_time : int
The start time that we need the frames to span.
end_time : int
The end time that we need the frames to span.
server : {None, SERVER:PORT string}, optional
Optional string to specify the datafind server to use. By default an
attempt is made to use a local datafind server.
url_type : string
Returns only frame URLs with a particular scheme or head such
as "file" or "gsiftp". Default is "file", which queries locally
stored frames. Option can be disabled if set to None.
Returns
-------
paths : list of paths
The list of paths to the frame files.
Examples
--------
>>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048)
"""
site = frame_type[0]
connection = datafind_connection(server)
connection.find_times(site, frame_type,
gpsstart=start_time, gpsend=end_time)
cache = connection.find_frame_urls(site, frame_type, start_time, end_time,urltype=url_type)
paths = [entry.path for entry in cache]
return paths | [
"def",
"frame_paths",
"(",
"frame_type",
",",
"start_time",
",",
"end_time",
",",
"server",
"=",
"None",
",",
"url_type",
"=",
"'file'",
")",
":",
"site",
"=",
"frame_type",
"[",
"0",
"]",
"connection",
"=",
"datafind_connection",
"(",
"server",
")",
"connection",
".",
"find_times",
"(",
"site",
",",
"frame_type",
",",
"gpsstart",
"=",
"start_time",
",",
"gpsend",
"=",
"end_time",
")",
"cache",
"=",
"connection",
".",
"find_frame_urls",
"(",
"site",
",",
"frame_type",
",",
"start_time",
",",
"end_time",
",",
"urltype",
"=",
"url_type",
")",
"paths",
"=",
"[",
"entry",
".",
"path",
"for",
"entry",
"in",
"cache",
"]",
"return",
"paths"
] | Return the paths to a span of frame files
Parameters
----------
frame_type : string
The string representation of the frame type (ex. 'H1_ER_C00_L1')
start_time : int
The start time that we need the frames to span.
end_time : int
The end time that we need the frames to span.
server : {None, SERVER:PORT string}, optional
Optional string to specify the datafind server to use. By default an
attempt is made to use a local datafind server.
url_type : string
Returns only frame URLs with a particular scheme or head such
as "file" or "gsiftp". Default is "file", which queries locally
stored frames. Option can be disabled if set to None.
Returns
-------
paths : list of paths
The list of paths to the frame files.
Examples
--------
>>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048) | [
"Return",
"the",
"paths",
"to",
"a",
"span",
"of",
"frame",
"files"
] | python | train |
gem/oq-engine | openquake/hazardlib/sourceconverter.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourceconverter.py#L376-L412 | def convert_surfaces(self, surface_nodes):
"""
Utility to convert a list of surface nodes into a single hazardlib
surface. There are four possibilities:
1. there is a single simpleFaultGeometry node; returns a
:class:`openquake.hazardlib.geo.simpleFaultSurface` instance
2. there is a single complexFaultGeometry node; returns a
:class:`openquake.hazardlib.geo.complexFaultSurface` instance
3. there is a single griddedSurface node; returns a
:class:`openquake.hazardlib.geo.GriddedSurface` instance
4. there is a list of PlanarSurface nodes; returns a
:class:`openquake.hazardlib.geo.MultiSurface` instance
:param surface_nodes: surface nodes as just described
"""
surface_node = surface_nodes[0]
if surface_node.tag.endswith('simpleFaultGeometry'):
surface = geo.SimpleFaultSurface.from_fault_data(
self.geo_line(surface_node),
~surface_node.upperSeismoDepth,
~surface_node.lowerSeismoDepth,
~surface_node.dip,
self.rupture_mesh_spacing)
elif surface_node.tag.endswith('complexFaultGeometry'):
surface = geo.ComplexFaultSurface.from_fault_data(
self.geo_lines(surface_node),
self.complex_fault_mesh_spacing)
elif surface_node.tag.endswith('griddedSurface'):
with context(self.fname, surface_node):
coords = split_coords_3d(~surface_node.posList)
points = [geo.Point(*p) for p in coords]
surface = geo.GriddedSurface.from_points_list(points)
else: # a collection of planar surfaces
planar_surfaces = list(map(self.geo_planar, surface_nodes))
surface = geo.MultiSurface(planar_surfaces)
return surface | [
"def",
"convert_surfaces",
"(",
"self",
",",
"surface_nodes",
")",
":",
"surface_node",
"=",
"surface_nodes",
"[",
"0",
"]",
"if",
"surface_node",
".",
"tag",
".",
"endswith",
"(",
"'simpleFaultGeometry'",
")",
":",
"surface",
"=",
"geo",
".",
"SimpleFaultSurface",
".",
"from_fault_data",
"(",
"self",
".",
"geo_line",
"(",
"surface_node",
")",
",",
"~",
"surface_node",
".",
"upperSeismoDepth",
",",
"~",
"surface_node",
".",
"lowerSeismoDepth",
",",
"~",
"surface_node",
".",
"dip",
",",
"self",
".",
"rupture_mesh_spacing",
")",
"elif",
"surface_node",
".",
"tag",
".",
"endswith",
"(",
"'complexFaultGeometry'",
")",
":",
"surface",
"=",
"geo",
".",
"ComplexFaultSurface",
".",
"from_fault_data",
"(",
"self",
".",
"geo_lines",
"(",
"surface_node",
")",
",",
"self",
".",
"complex_fault_mesh_spacing",
")",
"elif",
"surface_node",
".",
"tag",
".",
"endswith",
"(",
"'griddedSurface'",
")",
":",
"with",
"context",
"(",
"self",
".",
"fname",
",",
"surface_node",
")",
":",
"coords",
"=",
"split_coords_3d",
"(",
"~",
"surface_node",
".",
"posList",
")",
"points",
"=",
"[",
"geo",
".",
"Point",
"(",
"*",
"p",
")",
"for",
"p",
"in",
"coords",
"]",
"surface",
"=",
"geo",
".",
"GriddedSurface",
".",
"from_points_list",
"(",
"points",
")",
"else",
":",
"# a collection of planar surfaces",
"planar_surfaces",
"=",
"list",
"(",
"map",
"(",
"self",
".",
"geo_planar",
",",
"surface_nodes",
")",
")",
"surface",
"=",
"geo",
".",
"MultiSurface",
"(",
"planar_surfaces",
")",
"return",
"surface"
] | Utility to convert a list of surface nodes into a single hazardlib
surface. There are four possibilities:
1. there is a single simpleFaultGeometry node; returns a
:class:`openquake.hazardlib.geo.simpleFaultSurface` instance
2. there is a single complexFaultGeometry node; returns a
:class:`openquake.hazardlib.geo.complexFaultSurface` instance
3. there is a single griddedSurface node; returns a
:class:`openquake.hazardlib.geo.GriddedSurface` instance
4. there is a list of PlanarSurface nodes; returns a
:class:`openquake.hazardlib.geo.MultiSurface` instance
:param surface_nodes: surface nodes as just described | [
"Utility",
"to",
"convert",
"a",
"list",
"of",
"surface",
"nodes",
"into",
"a",
"single",
"hazardlib",
"surface",
".",
"There",
"are",
"four",
"possibilities",
":"
] | python | train |
zhanglab/psamm | psamm/datasource/native.py | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/native.py#L244-L287 | def parse_compartments(self):
"""Parse compartment information from model.
Return tuple of: 1) iterator of
:class:`psamm.datasource.entry.CompartmentEntry`; 2) Set of pairs
defining the compartment boundaries of the model.
"""
compartments = OrderedDict()
boundaries = set()
if 'compartments' in self._model:
boundary_map = {}
for compartment_def in self._model['compartments']:
compartment_id = compartment_def.get('id')
_check_id(compartment_id, 'Compartment')
if compartment_id in compartments:
raise ParseError('Duplicate compartment ID: {}'.format(
compartment_id))
props = dict(compartment_def)
adjacent_to = props.pop('adjacent_to', None)
if adjacent_to is not None:
if not isinstance(adjacent_to, list):
adjacent_to = [adjacent_to]
for other in adjacent_to:
boundary_map.setdefault(other, set()).add(
compartment_id)
mark = FileMark(self._context, None, None)
compartment = CompartmentEntry(props, mark)
compartments[compartment_id] = compartment
# Check boundaries from boundary_map
for source, dest_set in iteritems(boundary_map):
if source not in compartments:
raise ParseError(
'Invalid compartment {} referenced'
' by compartment {}'.format(
source, ', '.join(dest_set)))
for dest in dest_set:
boundaries.add(tuple(sorted((source, dest))))
return itervalues(compartments), frozenset(boundaries) | [
"def",
"parse_compartments",
"(",
"self",
")",
":",
"compartments",
"=",
"OrderedDict",
"(",
")",
"boundaries",
"=",
"set",
"(",
")",
"if",
"'compartments'",
"in",
"self",
".",
"_model",
":",
"boundary_map",
"=",
"{",
"}",
"for",
"compartment_def",
"in",
"self",
".",
"_model",
"[",
"'compartments'",
"]",
":",
"compartment_id",
"=",
"compartment_def",
".",
"get",
"(",
"'id'",
")",
"_check_id",
"(",
"compartment_id",
",",
"'Compartment'",
")",
"if",
"compartment_id",
"in",
"compartments",
":",
"raise",
"ParseError",
"(",
"'Duplicate compartment ID: {}'",
".",
"format",
"(",
"compartment_id",
")",
")",
"props",
"=",
"dict",
"(",
"compartment_def",
")",
"adjacent_to",
"=",
"props",
".",
"pop",
"(",
"'adjacent_to'",
",",
"None",
")",
"if",
"adjacent_to",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"adjacent_to",
",",
"list",
")",
":",
"adjacent_to",
"=",
"[",
"adjacent_to",
"]",
"for",
"other",
"in",
"adjacent_to",
":",
"boundary_map",
".",
"setdefault",
"(",
"other",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"compartment_id",
")",
"mark",
"=",
"FileMark",
"(",
"self",
".",
"_context",
",",
"None",
",",
"None",
")",
"compartment",
"=",
"CompartmentEntry",
"(",
"props",
",",
"mark",
")",
"compartments",
"[",
"compartment_id",
"]",
"=",
"compartment",
"# Check boundaries from boundary_map",
"for",
"source",
",",
"dest_set",
"in",
"iteritems",
"(",
"boundary_map",
")",
":",
"if",
"source",
"not",
"in",
"compartments",
":",
"raise",
"ParseError",
"(",
"'Invalid compartment {} referenced'",
"' by compartment {}'",
".",
"format",
"(",
"source",
",",
"', '",
".",
"join",
"(",
"dest_set",
")",
")",
")",
"for",
"dest",
"in",
"dest_set",
":",
"boundaries",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"source",
",",
"dest",
")",
")",
")",
")",
"return",
"itervalues",
"(",
"compartments",
")",
",",
"frozenset",
"(",
"boundaries",
")"
] | Parse compartment information from model.
Return tuple of: 1) iterator of
:class:`psamm.datasource.entry.CompartmentEntry`; 2) Set of pairs
defining the compartment boundaries of the model. | [
"Parse",
"compartment",
"information",
"from",
"model",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/dagcircuit/dagcircuit.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/dagcircuit/dagcircuit.py#L1205-L1217 | def remove_nonancestors_of(self, node):
"""Remove all of the non-ancestors operation nodes of node."""
if isinstance(node, int):
warnings.warn('Calling remove_nonancestors_of() with a node id is deprecated,'
' use a DAGNode instead',
DeprecationWarning, 2)
node = self._id_to_node[node]
anc = nx.ancestors(self._multi_graph, node)
comp = list(set(self._multi_graph.nodes()) - set(anc))
for n in comp:
if n.type == "op":
self.remove_op_node(n) | [
"def",
"remove_nonancestors_of",
"(",
"self",
",",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"int",
")",
":",
"warnings",
".",
"warn",
"(",
"'Calling remove_nonancestors_of() with a node id is deprecated,'",
"' use a DAGNode instead'",
",",
"DeprecationWarning",
",",
"2",
")",
"node",
"=",
"self",
".",
"_id_to_node",
"[",
"node",
"]",
"anc",
"=",
"nx",
".",
"ancestors",
"(",
"self",
".",
"_multi_graph",
",",
"node",
")",
"comp",
"=",
"list",
"(",
"set",
"(",
"self",
".",
"_multi_graph",
".",
"nodes",
"(",
")",
")",
"-",
"set",
"(",
"anc",
")",
")",
"for",
"n",
"in",
"comp",
":",
"if",
"n",
".",
"type",
"==",
"\"op\"",
":",
"self",
".",
"remove_op_node",
"(",
"n",
")"
] | Remove all of the non-ancestors operation nodes of node. | [
"Remove",
"all",
"of",
"the",
"non",
"-",
"ancestors",
"operation",
"nodes",
"of",
"node",
"."
] | python | test |
pyviz/holoviews | holoviews/plotting/plotly/element.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plotly/element.py#L103-L110 | def initialize_plot(self, ranges=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
fig = self.generate_plot(self.keys[-1], ranges)
self.drawn = True
return fig | [
"def",
"initialize_plot",
"(",
"self",
",",
"ranges",
"=",
"None",
")",
":",
"# Get element key and ranges for frame",
"fig",
"=",
"self",
".",
"generate_plot",
"(",
"self",
".",
"keys",
"[",
"-",
"1",
"]",
",",
"ranges",
")",
"self",
".",
"drawn",
"=",
"True",
"return",
"fig"
] | Initializes a new plot object with the last available frame. | [
"Initializes",
"a",
"new",
"plot",
"object",
"with",
"the",
"last",
"available",
"frame",
"."
] | python | train |
gabrielelanaro/chemview | chemview/viewer.py | https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L86-L148 | def toggle_axes(self, parameters = None):
'''Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts'''
if len(self._axes_reps)>0:
for rep_id in self._axes_reps:
self.remove_representation(rep_id)
self._axes_reps = []
else:
if not isinstance(parameters,dict):
parameters={}
def defaults(pdict,keys,default,length=3,instance=(int,float)):
'''Helper function to generate default values and handle errors'''
for k in keys:
val=pdict.get(k)
if val!=None:
break
if val==None:
val=default
elif isinstance(val,instance) and length>1:
val = [val]*length
elif isinstance(val,(list,np.generic,np.ndarray)) and length>1:
if not all([isinstance(v,instance) for v in val]):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
elif not isinstance(val,instance):
raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance))
return val
p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0))
l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1)
o = defaults(parameters,['offsets','offset','o'],l*1.05,1)
ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex))
r = defaults(parameters,['radii','radius','r'],[0.005]*3,3)
t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str)
s = defaults(parameters,['sizes','size','s'],[32]*3,3)
f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str)
starts=np.array([p,p,p],float)
ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float)
axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float)
a_rep=self.add_representation('cylinders',{"startCoords":starts,
"endCoords":ends,
"colors":ac,
"radii":r})
t_rep=self.add_representation('text',{"coordinates":axis_labels_coords,
"text":t,
"colors":tc,
"sizes":s,
"fonts":f})
self._axes_reps = [a_rep, t_rep] | [
"def",
"toggle_axes",
"(",
"self",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"_axes_reps",
")",
">",
"0",
":",
"for",
"rep_id",
"in",
"self",
".",
"_axes_reps",
":",
"self",
".",
"remove_representation",
"(",
"rep_id",
")",
"self",
".",
"_axes_reps",
"=",
"[",
"]",
"else",
":",
"if",
"not",
"isinstance",
"(",
"parameters",
",",
"dict",
")",
":",
"parameters",
"=",
"{",
"}",
"def",
"defaults",
"(",
"pdict",
",",
"keys",
",",
"default",
",",
"length",
"=",
"3",
",",
"instance",
"=",
"(",
"int",
",",
"float",
")",
")",
":",
"'''Helper function to generate default values and handle errors'''",
"for",
"k",
"in",
"keys",
":",
"val",
"=",
"pdict",
".",
"get",
"(",
"k",
")",
"if",
"val",
"!=",
"None",
":",
"break",
"if",
"val",
"==",
"None",
":",
"val",
"=",
"default",
"elif",
"isinstance",
"(",
"val",
",",
"instance",
")",
"and",
"length",
">",
"1",
":",
"val",
"=",
"[",
"val",
"]",
"*",
"length",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"list",
",",
"np",
".",
"generic",
",",
"np",
".",
"ndarray",
")",
")",
"and",
"length",
">",
"1",
":",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"v",
",",
"instance",
")",
"for",
"v",
"in",
"val",
"]",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Invalid type {t} for parameter {p}. Use {i}.\"",
".",
"format",
"(",
"t",
"=",
"type",
"(",
"val",
")",
",",
"p",
"=",
"val",
",",
"i",
"=",
"instance",
")",
")",
"elif",
"not",
"isinstance",
"(",
"val",
",",
"instance",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Invalid type {t} for parameter {p}. Use {i}.\"",
".",
"format",
"(",
"t",
"=",
"type",
"(",
"val",
")",
",",
"p",
"=",
"val",
",",
"i",
"=",
"instance",
")",
")",
"return",
"val",
"p",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'positions'",
",",
"'position'",
",",
"'p'",
"]",
",",
"np",
".",
"average",
"(",
"self",
".",
"coordinates",
",",
"0",
")",
")",
"l",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'lengths'",
",",
"'length'",
",",
"'l'",
"]",
",",
"max",
"(",
"[",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
"-",
"p",
")",
"for",
"x",
"in",
"self",
".",
"coordinates",
"]",
")",
",",
"1",
")",
"o",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'offsets'",
",",
"'offset'",
",",
"'o'",
"]",
",",
"l",
"*",
"1.05",
",",
"1",
")",
"ac",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"a",
"+",
"c",
"for",
"a",
"in",
"[",
"'axis_'",
",",
"'a'",
",",
"''",
"]",
"for",
"c",
"in",
"[",
"'colors'",
",",
"'colours'",
",",
"'color'",
",",
"'colour'",
",",
"'c'",
"]",
"]",
",",
"[",
"0xff0000",
",",
"0x00ff00",
",",
"0x0000ff",
"]",
",",
"3",
",",
"(",
"int",
",",
"hex",
")",
")",
"tc",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"a",
"+",
"c",
"for",
"a",
"in",
"[",
"'text_'",
",",
"'t'",
",",
"''",
"]",
"for",
"c",
"in",
"[",
"'colors'",
",",
"'colours'",
",",
"'color'",
",",
"'colour'",
",",
"'c'",
"]",
"]",
",",
"[",
"0xff0000",
",",
"0x00ff00",
",",
"0x0000ff",
"]",
",",
"3",
",",
"(",
"int",
",",
"hex",
")",
")",
"r",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'radii'",
",",
"'radius'",
",",
"'r'",
"]",
",",
"[",
"0.005",
"]",
"*",
"3",
",",
"3",
")",
"t",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'text'",
",",
"'labels'",
",",
"'t'",
"]",
",",
"[",
"'X'",
",",
"'Y'",
",",
"'Z'",
"]",
",",
"3",
",",
"str",
")",
"s",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'sizes'",
",",
"'size'",
",",
"'s'",
"]",
",",
"[",
"32",
"]",
"*",
"3",
",",
"3",
")",
"f",
"=",
"defaults",
"(",
"parameters",
",",
"[",
"'fonts'",
",",
"'font'",
",",
"'f'",
"]",
",",
"[",
"'Arial'",
"]",
"*",
"3",
",",
"3",
",",
"str",
")",
"starts",
"=",
"np",
".",
"array",
"(",
"[",
"p",
",",
"p",
",",
"p",
"]",
",",
"float",
")",
"ends",
"=",
"np",
".",
"array",
"(",
"[",
"p",
"+",
"[",
"l",
",",
"0",
",",
"0",
"]",
",",
"p",
"+",
"[",
"0",
",",
"l",
",",
"0",
"]",
",",
"p",
"+",
"[",
"0",
",",
"0",
",",
"l",
"]",
"]",
",",
"float",
")",
"axis_labels_coords",
"=",
"np",
".",
"array",
"(",
"[",
"p",
"+",
"[",
"o",
",",
"0",
",",
"0",
"]",
",",
"p",
"+",
"[",
"0",
",",
"o",
",",
"0",
"]",
",",
"p",
"+",
"[",
"0",
",",
"0",
",",
"o",
"]",
"]",
",",
"float",
")",
"a_rep",
"=",
"self",
".",
"add_representation",
"(",
"'cylinders'",
",",
"{",
"\"startCoords\"",
":",
"starts",
",",
"\"endCoords\"",
":",
"ends",
",",
"\"colors\"",
":",
"ac",
",",
"\"radii\"",
":",
"r",
"}",
")",
"t_rep",
"=",
"self",
".",
"add_representation",
"(",
"'text'",
",",
"{",
"\"coordinates\"",
":",
"axis_labels_coords",
",",
"\"text\"",
":",
"t",
",",
"\"colors\"",
":",
"tc",
",",
"\"sizes\"",
":",
"s",
",",
"\"fonts\"",
":",
"f",
"}",
")",
"self",
".",
"_axes_reps",
"=",
"[",
"a_rep",
",",
"t_rep",
"]"
] | Toggle axes [x,y,z] on and off for the current representation
Parameters: dictionary of parameters to control axes:
position/p: origin of axes
length/l: length of axis
offset/o: offset to place axis labels
axis_colors/ac: axis colors
text_colors/tc: label colors
radii/r: axis radii
text/t: label text
sizes/s: label sizes
fonts/f: label fonts | [
"Toggle",
"axes",
"[",
"x",
"y",
"z",
"]",
"on",
"and",
"off",
"for",
"the",
"current",
"representation",
"Parameters",
":",
"dictionary",
"of",
"parameters",
"to",
"control",
"axes",
":",
"position",
"/",
"p",
":",
"origin",
"of",
"axes",
"length",
"/",
"l",
":",
"length",
"of",
"axis",
"offset",
"/",
"o",
":",
"offset",
"to",
"place",
"axis",
"labels",
"axis_colors",
"/",
"ac",
":",
"axis",
"colors",
"text_colors",
"/",
"tc",
":",
"label",
"colors",
"radii",
"/",
"r",
":",
"axis",
"radii",
"text",
"/",
"t",
":",
"label",
"text",
"sizes",
"/",
"s",
":",
"label",
"sizes",
"fonts",
"/",
"f",
":",
"label",
"fonts"
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L1523-L1547 | def copy(cell):
"""
Copy the contents of a SpiceCell of any data type to another
cell of the same type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/copy_c.html
:param cell: Cell to be copied.
:type cell: spiceypy.utils.support_types.SpiceCell
:return: New cell
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cell, stypes.SpiceCell)
# Next line was redundant with [raise NotImpImplementedError] below
# assert cell.dtype == 0 or cell.dtype == 1 or cell.dtype == 2
if cell.dtype is 0:
newcopy = stypes.SPICECHAR_CELL(cell.size, cell.length)
elif cell.dtype is 1:
newcopy = stypes.SPICEDOUBLE_CELL(cell.size)
elif cell.dtype is 2:
newcopy = stypes.SPICEINT_CELL(cell.size)
else:
raise NotImplementedError
libspice.copy_c(ctypes.byref(cell), ctypes.byref(newcopy))
return newcopy | [
"def",
"copy",
"(",
"cell",
")",
":",
"assert",
"isinstance",
"(",
"cell",
",",
"stypes",
".",
"SpiceCell",
")",
"# Next line was redundant with [raise NotImpImplementedError] below",
"# assert cell.dtype == 0 or cell.dtype == 1 or cell.dtype == 2",
"if",
"cell",
".",
"dtype",
"is",
"0",
":",
"newcopy",
"=",
"stypes",
".",
"SPICECHAR_CELL",
"(",
"cell",
".",
"size",
",",
"cell",
".",
"length",
")",
"elif",
"cell",
".",
"dtype",
"is",
"1",
":",
"newcopy",
"=",
"stypes",
".",
"SPICEDOUBLE_CELL",
"(",
"cell",
".",
"size",
")",
"elif",
"cell",
".",
"dtype",
"is",
"2",
":",
"newcopy",
"=",
"stypes",
".",
"SPICEINT_CELL",
"(",
"cell",
".",
"size",
")",
"else",
":",
"raise",
"NotImplementedError",
"libspice",
".",
"copy_c",
"(",
"ctypes",
".",
"byref",
"(",
"cell",
")",
",",
"ctypes",
".",
"byref",
"(",
"newcopy",
")",
")",
"return",
"newcopy"
] | Copy the contents of a SpiceCell of any data type to another
cell of the same type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/copy_c.html
:param cell: Cell to be copied.
:type cell: spiceypy.utils.support_types.SpiceCell
:return: New cell
:rtype: spiceypy.utils.support_types.SpiceCell | [
"Copy",
"the",
"contents",
"of",
"a",
"SpiceCell",
"of",
"any",
"data",
"type",
"to",
"another",
"cell",
"of",
"the",
"same",
"type",
"."
] | python | train |
HazyResearch/metal | metal/label_model/baselines.py | https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/label_model/baselines.py#L35-L41 | def train_model(self, balance, *args, **kwargs):
"""
Args:
balance: A 1d arraylike that sums to 1, corresponding to the
(possibly estimated) class balance.
"""
self.balance = np.array(balance) | [
"def",
"train_model",
"(",
"self",
",",
"balance",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"balance",
"=",
"np",
".",
"array",
"(",
"balance",
")"
] | Args:
balance: A 1d arraylike that sums to 1, corresponding to the
(possibly estimated) class balance. | [
"Args",
":",
"balance",
":",
"A",
"1d",
"arraylike",
"that",
"sums",
"to",
"1",
"corresponding",
"to",
"the",
"(",
"possibly",
"estimated",
")",
"class",
"balance",
"."
] | python | train |
fictorial/pygameui | pygameui/view.py | https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L209-L227 | def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout() | [
"def",
"stylize",
"(",
"self",
")",
":",
"# do children first in case parent needs to override their style",
"for",
"child",
"in",
"self",
".",
"children",
":",
"child",
".",
"stylize",
"(",
")",
"style",
"=",
"theme",
".",
"current",
".",
"get_dict",
"(",
"self",
")",
"preserve_child",
"=",
"False",
"try",
":",
"preserve_child",
"=",
"getattr",
"(",
"theme",
".",
"current",
",",
"'preserve_child'",
")",
"except",
":",
"preserve_child",
"=",
"False",
"for",
"key",
",",
"val",
"in",
"style",
".",
"iteritems",
"(",
")",
":",
"kvc",
".",
"set_value_for_keypath",
"(",
"self",
",",
"key",
",",
"val",
",",
"preserve_child",
")",
"self",
".",
"layout",
"(",
")"
] | Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled. | [
"Apply",
"theme",
"style",
"attributes",
"to",
"this",
"instance",
"and",
"its",
"children",
"."
] | python | train |
sibirrer/lenstronomy | lenstronomy/Util/param_util.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/param_util.py#L50-L58 | def ellipticity2phi_gamma(e1, e2):
"""
:param e1: ellipticity component
:param e2: ellipticity component
:return: angle and abs value of ellipticity
"""
phi = np.arctan2(e2, e1)/2
gamma = np.sqrt(e1**2+e2**2)
return phi, gamma | [
"def",
"ellipticity2phi_gamma",
"(",
"e1",
",",
"e2",
")",
":",
"phi",
"=",
"np",
".",
"arctan2",
"(",
"e2",
",",
"e1",
")",
"/",
"2",
"gamma",
"=",
"np",
".",
"sqrt",
"(",
"e1",
"**",
"2",
"+",
"e2",
"**",
"2",
")",
"return",
"phi",
",",
"gamma"
] | :param e1: ellipticity component
:param e2: ellipticity component
:return: angle and abs value of ellipticity | [
":",
"param",
"e1",
":",
"ellipticity",
"component",
":",
"param",
"e2",
":",
"ellipticity",
"component",
":",
"return",
":",
"angle",
"and",
"abs",
"value",
"of",
"ellipticity"
] | python | train |
timothydmorton/simpledist | simpledist/distributions.py | https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L615-L664 | def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi | [
"def",
"double_lorgauss",
"(",
"x",
",",
"p",
")",
":",
"mu",
",",
"sig1",
",",
"sig2",
",",
"gam1",
",",
"gam2",
",",
"G1",
",",
"G2",
"=",
"p",
"gam1",
"=",
"float",
"(",
"gam1",
")",
"gam2",
"=",
"float",
"(",
"gam2",
")",
"G1",
"=",
"abs",
"(",
"G1",
")",
"G2",
"=",
"abs",
"(",
"G2",
")",
"sig1",
"=",
"abs",
"(",
"sig1",
")",
"sig2",
"=",
"abs",
"(",
"sig2",
")",
"gam1",
"=",
"abs",
"(",
"gam1",
")",
"gab2",
"=",
"abs",
"(",
"gam2",
")",
"L2",
"=",
"(",
"gam1",
"/",
"(",
"gam1",
"+",
"gam2",
")",
")",
"*",
"(",
"(",
"gam2",
"*",
"np",
".",
"pi",
"*",
"G1",
")",
"/",
"(",
"sig1",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"-",
"(",
"gam2",
"*",
"np",
".",
"pi",
"*",
"G2",
")",
"/",
"(",
"sig2",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"+",
"(",
"gam2",
"/",
"gam1",
")",
"*",
"(",
"4",
"-",
"G1",
"-",
"G2",
")",
")",
"L1",
"=",
"4",
"-",
"G1",
"-",
"G2",
"-",
"L2",
"#print G1,G2,L1,L2",
"y1",
"=",
"G1",
"/",
"(",
"sig1",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"x",
"-",
"mu",
")",
"**",
"2",
"/",
"sig1",
"**",
"2",
")",
"+",
"L1",
"/",
"(",
"np",
".",
"pi",
"*",
"gam1",
")",
"*",
"gam1",
"**",
"2",
"/",
"(",
"(",
"x",
"-",
"mu",
")",
"**",
"2",
"+",
"gam1",
"**",
"2",
")",
"y2",
"=",
"G2",
"/",
"(",
"sig2",
"*",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"x",
"-",
"mu",
")",
"**",
"2",
"/",
"sig2",
"**",
"2",
")",
"+",
"L2",
"/",
"(",
"np",
".",
"pi",
"*",
"gam2",
")",
"*",
"gam2",
"**",
"2",
"/",
"(",
"(",
"x",
"-",
"mu",
")",
"**",
"2",
"+",
"gam2",
"**",
"2",
")",
"lo",
"=",
"(",
"x",
"<",
"mu",
")",
"hi",
"=",
"(",
"x",
">=",
"mu",
")",
"return",
"y1",
"*",
"lo",
"+",
"y2",
"*",
"hi"
] | Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned. | [
"Evaluates",
"a",
"normalized",
"distribution",
"that",
"is",
"a",
"mixture",
"of",
"a",
"double",
"-",
"sided",
"Gaussian",
"and",
"Double",
"-",
"sided",
"Lorentzian",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/ptyprocess/ptyprocess.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/ptyprocess/ptyprocess.py#L557-L562 | def write(self, s, flush=True):
"""Write bytes to the pseudoterminal.
Returns the number of bytes written.
"""
return self._writeb(s, flush=flush) | [
"def",
"write",
"(",
"self",
",",
"s",
",",
"flush",
"=",
"True",
")",
":",
"return",
"self",
".",
"_writeb",
"(",
"s",
",",
"flush",
"=",
"flush",
")"
] | Write bytes to the pseudoterminal.
Returns the number of bytes written. | [
"Write",
"bytes",
"to",
"the",
"pseudoterminal",
".",
"Returns",
"the",
"number",
"of",
"bytes",
"written",
"."
] | python | train |
gabstopper/smc-python | smc/administration/user_auth/servers.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/administration/user_auth/servers.py#L136-L214 | def create(cls, name, address, base_dn, bind_user_id=None, bind_password=None,
port=389, protocol='ldap', tls_profile=None, tls_identity=None,
domain_controller=None, supported_method=None, timeout=10, max_search_result=0,
page_size=0, internet_auth_service_enabled=False, **kwargs):
"""
Create an AD server element using basic settings. You can also provide additional
kwargs documented in the class description::
ActiveDirectoryServer.create(name='somedirectory',
address='10.10.10.10',
base_dn='dc=domain,dc=net',
bind_user_id='cn=admin,cn=users,dc=domain,dc=net',
bind_password='somecrazypassword')
Configure NPS along with Active Directory::
ActiveDirectoryServer.create(name='somedirectory5',
address='10.10.10.10',
base_dn='dc=lepages,dc=net',
internet_auth_service_enabled=True,
retries=3,
auth_ipaddress='10.10.10.15',
auth_port=1900,
shared_secret='123456')
:param str name: name of AD element for display
:param str address: address of AD server
:param str base_dn: base DN for which to retrieve users, format is 'dc=domain,dc=com'
:param str bind_user_id: bind user ID credentials, fully qualified. Format is
'cn=admin,cn=users,dc=domain,dc=com'. If not provided, anonymous bind is used
:param str bind_password: bind password, required if bind_user_id set
:param int port: LDAP bind port, (default: 389)
:param str protocol: Which LDAP protocol to use, options 'ldap/ldaps/ldap_tls'. If
ldaps or ldap_tls is used, you must provide a tls_profile element (default: ldap)
:param str,TLSProfile tls_profile by element of str href. Used when protocol is set
to ldaps or ldap_tls
:param str,TLSIdentity tls_identity: check server identity when establishing TLS connection
:param list(DomainController) domain_controller: list of domain controller objects to
add an additional domain controllers for AD communication
:param list(AuthenticationMethod) supported_method: authentication services allowed
for this resource
:param int timeout: The time (in seconds) that components wait for the server to reply
:param int max_search_result: The maximum number of LDAP entries that are returned in
an LDAP response (default: 0 for no limit)
:param int page_size: The maximum number of LDAP entries that are returned on each page
of the LDAP response. (default: 0 for no limit)
:param bool internet_auth_service_enabled: whether to attach an NPS service to this
AD controller (default: False). If setting to true, provide kwargs values for
auth_ipaddress, auth_port and shared_secret
:raises CreateElementFailed: failed creating element
:rtype: ActiveDirectoryServer
"""
json={'name': name, 'address': address, 'base_dn': base_dn,
'bind_user_id': bind_user_id, 'bind_password': bind_password,
'port': port, 'protocol': protocol, 'timeout': timeout,
'domain_controller': domain_controller or [],
'max_search_result': max_search_result, 'page_size': page_size,
'internet_auth_service_enabled': internet_auth_service_enabled,
'supported_method': element_resolver(supported_method) or []}
for obj_class in ('group_object_class', 'user_object_class'):
json[obj_class] = kwargs.pop(obj_class, [])
if protocol in ('ldaps', 'ldap_tls'):
if not tls_profile:
raise CreateElementFailed('You must provide a TLS Profile when TLS '
'connections are configured to the AD controller.')
json.update(tls_profile_ref=element_resolver(tls_profile),
tls_identity=tls_identity)
if internet_auth_service_enabled:
ias = {'auth_port': kwargs.pop('auth_port', 1812),
'auth_ipaddress': kwargs.pop('auth_ipaddress', ''),
'shared_secret': kwargs.pop('shared_secret'),
'retries': kwargs.pop('retries', 2)}
json.update(ias)
json.update(kwargs)
return ElementCreator(cls, json) | [
"def",
"create",
"(",
"cls",
",",
"name",
",",
"address",
",",
"base_dn",
",",
"bind_user_id",
"=",
"None",
",",
"bind_password",
"=",
"None",
",",
"port",
"=",
"389",
",",
"protocol",
"=",
"'ldap'",
",",
"tls_profile",
"=",
"None",
",",
"tls_identity",
"=",
"None",
",",
"domain_controller",
"=",
"None",
",",
"supported_method",
"=",
"None",
",",
"timeout",
"=",
"10",
",",
"max_search_result",
"=",
"0",
",",
"page_size",
"=",
"0",
",",
"internet_auth_service_enabled",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"json",
"=",
"{",
"'name'",
":",
"name",
",",
"'address'",
":",
"address",
",",
"'base_dn'",
":",
"base_dn",
",",
"'bind_user_id'",
":",
"bind_user_id",
",",
"'bind_password'",
":",
"bind_password",
",",
"'port'",
":",
"port",
",",
"'protocol'",
":",
"protocol",
",",
"'timeout'",
":",
"timeout",
",",
"'domain_controller'",
":",
"domain_controller",
"or",
"[",
"]",
",",
"'max_search_result'",
":",
"max_search_result",
",",
"'page_size'",
":",
"page_size",
",",
"'internet_auth_service_enabled'",
":",
"internet_auth_service_enabled",
",",
"'supported_method'",
":",
"element_resolver",
"(",
"supported_method",
")",
"or",
"[",
"]",
"}",
"for",
"obj_class",
"in",
"(",
"'group_object_class'",
",",
"'user_object_class'",
")",
":",
"json",
"[",
"obj_class",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"obj_class",
",",
"[",
"]",
")",
"if",
"protocol",
"in",
"(",
"'ldaps'",
",",
"'ldap_tls'",
")",
":",
"if",
"not",
"tls_profile",
":",
"raise",
"CreateElementFailed",
"(",
"'You must provide a TLS Profile when TLS '",
"'connections are configured to the AD controller.'",
")",
"json",
".",
"update",
"(",
"tls_profile_ref",
"=",
"element_resolver",
"(",
"tls_profile",
")",
",",
"tls_identity",
"=",
"tls_identity",
")",
"if",
"internet_auth_service_enabled",
":",
"ias",
"=",
"{",
"'auth_port'",
":",
"kwargs",
".",
"pop",
"(",
"'auth_port'",
",",
"1812",
")",
",",
"'auth_ipaddress'",
":",
"kwargs",
".",
"pop",
"(",
"'auth_ipaddress'",
",",
"''",
")",
",",
"'shared_secret'",
":",
"kwargs",
".",
"pop",
"(",
"'shared_secret'",
")",
",",
"'retries'",
":",
"kwargs",
".",
"pop",
"(",
"'retries'",
",",
"2",
")",
"}",
"json",
".",
"update",
"(",
"ias",
")",
"json",
".",
"update",
"(",
"kwargs",
")",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
] | Create an AD server element using basic settings. You can also provide additional
kwargs documented in the class description::
ActiveDirectoryServer.create(name='somedirectory',
address='10.10.10.10',
base_dn='dc=domain,dc=net',
bind_user_id='cn=admin,cn=users,dc=domain,dc=net',
bind_password='somecrazypassword')
Configure NPS along with Active Directory::
ActiveDirectoryServer.create(name='somedirectory5',
address='10.10.10.10',
base_dn='dc=lepages,dc=net',
internet_auth_service_enabled=True,
retries=3,
auth_ipaddress='10.10.10.15',
auth_port=1900,
shared_secret='123456')
:param str name: name of AD element for display
:param str address: address of AD server
:param str base_dn: base DN for which to retrieve users, format is 'dc=domain,dc=com'
:param str bind_user_id: bind user ID credentials, fully qualified. Format is
'cn=admin,cn=users,dc=domain,dc=com'. If not provided, anonymous bind is used
:param str bind_password: bind password, required if bind_user_id set
:param int port: LDAP bind port, (default: 389)
:param str protocol: Which LDAP protocol to use, options 'ldap/ldaps/ldap_tls'. If
ldaps or ldap_tls is used, you must provide a tls_profile element (default: ldap)
:param str,TLSProfile tls_profile by element of str href. Used when protocol is set
to ldaps or ldap_tls
:param str,TLSIdentity tls_identity: check server identity when establishing TLS connection
:param list(DomainController) domain_controller: list of domain controller objects to
add an additional domain controllers for AD communication
:param list(AuthenticationMethod) supported_method: authentication services allowed
for this resource
:param int timeout: The time (in seconds) that components wait for the server to reply
:param int max_search_result: The maximum number of LDAP entries that are returned in
an LDAP response (default: 0 for no limit)
:param int page_size: The maximum number of LDAP entries that are returned on each page
of the LDAP response. (default: 0 for no limit)
:param bool internet_auth_service_enabled: whether to attach an NPS service to this
AD controller (default: False). If setting to true, provide kwargs values for
auth_ipaddress, auth_port and shared_secret
:raises CreateElementFailed: failed creating element
:rtype: ActiveDirectoryServer | [
"Create",
"an",
"AD",
"server",
"element",
"using",
"basic",
"settings",
".",
"You",
"can",
"also",
"provide",
"additional",
"kwargs",
"documented",
"in",
"the",
"class",
"description",
"::",
"ActiveDirectoryServer",
".",
"create",
"(",
"name",
"=",
"somedirectory",
"address",
"=",
"10",
".",
"10",
".",
"10",
".",
"10",
"base_dn",
"=",
"dc",
"=",
"domain",
"dc",
"=",
"net",
"bind_user_id",
"=",
"cn",
"=",
"admin",
"cn",
"=",
"users",
"dc",
"=",
"domain",
"dc",
"=",
"net",
"bind_password",
"=",
"somecrazypassword",
")",
"Configure",
"NPS",
"along",
"with",
"Active",
"Directory",
"::",
"ActiveDirectoryServer",
".",
"create",
"(",
"name",
"=",
"somedirectory5",
"address",
"=",
"10",
".",
"10",
".",
"10",
".",
"10",
"base_dn",
"=",
"dc",
"=",
"lepages",
"dc",
"=",
"net",
"internet_auth_service_enabled",
"=",
"True",
"retries",
"=",
"3",
"auth_ipaddress",
"=",
"10",
".",
"10",
".",
"10",
".",
"15",
"auth_port",
"=",
"1900",
"shared_secret",
"=",
"123456",
")",
":",
"param",
"str",
"name",
":",
"name",
"of",
"AD",
"element",
"for",
"display",
":",
"param",
"str",
"address",
":",
"address",
"of",
"AD",
"server",
":",
"param",
"str",
"base_dn",
":",
"base",
"DN",
"for",
"which",
"to",
"retrieve",
"users",
"format",
"is",
"dc",
"=",
"domain",
"dc",
"=",
"com",
":",
"param",
"str",
"bind_user_id",
":",
"bind",
"user",
"ID",
"credentials",
"fully",
"qualified",
".",
"Format",
"is",
"cn",
"=",
"admin",
"cn",
"=",
"users",
"dc",
"=",
"domain",
"dc",
"=",
"com",
".",
"If",
"not",
"provided",
"anonymous",
"bind",
"is",
"used",
":",
"param",
"str",
"bind_password",
":",
"bind",
"password",
"required",
"if",
"bind_user_id",
"set",
":",
"param",
"int",
"port",
":",
"LDAP",
"bind",
"port",
"(",
"default",
":",
"389",
")",
":",
"param",
"str",
"protocol",
":",
"Which",
"LDAP",
"protocol",
"to",
"use",
"options",
"ldap",
"/",
"ldaps",
"/",
"ldap_tls",
".",
"If",
"ldaps",
"or",
"ldap_tls",
"is",
"used",
"you",
"must",
"provide",
"a",
"tls_profile",
"element",
"(",
"default",
":",
"ldap",
")",
":",
"param",
"str",
"TLSProfile",
"tls_profile",
"by",
"element",
"of",
"str",
"href",
".",
"Used",
"when",
"protocol",
"is",
"set",
"to",
"ldaps",
"or",
"ldap_tls",
":",
"param",
"str",
"TLSIdentity",
"tls_identity",
":",
"check",
"server",
"identity",
"when",
"establishing",
"TLS",
"connection",
":",
"param",
"list",
"(",
"DomainController",
")",
"domain_controller",
":",
"list",
"of",
"domain",
"controller",
"objects",
"to",
"add",
"an",
"additional",
"domain",
"controllers",
"for",
"AD",
"communication",
":",
"param",
"list",
"(",
"AuthenticationMethod",
")",
"supported_method",
":",
"authentication",
"services",
"allowed",
"for",
"this",
"resource",
":",
"param",
"int",
"timeout",
":",
"The",
"time",
"(",
"in",
"seconds",
")",
"that",
"components",
"wait",
"for",
"the",
"server",
"to",
"reply",
":",
"param",
"int",
"max_search_result",
":",
"The",
"maximum",
"number",
"of",
"LDAP",
"entries",
"that",
"are",
"returned",
"in",
"an",
"LDAP",
"response",
"(",
"default",
":",
"0",
"for",
"no",
"limit",
")",
":",
"param",
"int",
"page_size",
":",
"The",
"maximum",
"number",
"of",
"LDAP",
"entries",
"that",
"are",
"returned",
"on",
"each",
"page",
"of",
"the",
"LDAP",
"response",
".",
"(",
"default",
":",
"0",
"for",
"no",
"limit",
")",
":",
"param",
"bool",
"internet_auth_service_enabled",
":",
"whether",
"to",
"attach",
"an",
"NPS",
"service",
"to",
"this",
"AD",
"controller",
"(",
"default",
":",
"False",
")",
".",
"If",
"setting",
"to",
"true",
"provide",
"kwargs",
"values",
"for",
"auth_ipaddress",
"auth_port",
"and",
"shared_secret",
":",
"raises",
"CreateElementFailed",
":",
"failed",
"creating",
"element",
":",
"rtype",
":",
"ActiveDirectoryServer"
] | python | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4803-L4811 | def validateRoot(self, ctxt):
"""Try to validate a the root element basically it does the
following check as described by the XML-1.0 recommendation:
- [ VC: Root Element Type ] it doesn't try to recurse or
apply other check to the element """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateRoot(ctxt__o, self._o)
return ret | [
"def",
"validateRoot",
"(",
"self",
",",
"ctxt",
")",
":",
"if",
"ctxt",
"is",
"None",
":",
"ctxt__o",
"=",
"None",
"else",
":",
"ctxt__o",
"=",
"ctxt",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlValidateRoot",
"(",
"ctxt__o",
",",
"self",
".",
"_o",
")",
"return",
"ret"
] | Try to validate a the root element basically it does the
following check as described by the XML-1.0 recommendation:
- [ VC: Root Element Type ] it doesn't try to recurse or
apply other check to the element | [
"Try",
"to",
"validate",
"a",
"the",
"root",
"element",
"basically",
"it",
"does",
"the",
"following",
"check",
"as",
"described",
"by",
"the",
"XML",
"-",
"1",
".",
"0",
"recommendation",
":",
"-",
"[",
"VC",
":",
"Root",
"Element",
"Type",
"]",
"it",
"doesn",
"t",
"try",
"to",
"recurse",
"or",
"apply",
"other",
"check",
"to",
"the",
"element"
] | python | train |
numenta/htmresearch | htmresearch/data/sm_sequences.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L391-L399 | def printSensoryCodingScheme(self):
"""
Print sensory inputs along with their encoded versions.
"""
print "\nsensory coding scheme: "
for loc in self.spatialConfig:
sensoryElement = self.spatialMap[tuple(loc)]
print sensoryElement, "%s : " % loc,
printSequence(self.encodeSensoryInput(sensoryElement)) | [
"def",
"printSensoryCodingScheme",
"(",
"self",
")",
":",
"print",
"\"\\nsensory coding scheme: \"",
"for",
"loc",
"in",
"self",
".",
"spatialConfig",
":",
"sensoryElement",
"=",
"self",
".",
"spatialMap",
"[",
"tuple",
"(",
"loc",
")",
"]",
"print",
"sensoryElement",
",",
"\"%s : \"",
"%",
"loc",
",",
"printSequence",
"(",
"self",
".",
"encodeSensoryInput",
"(",
"sensoryElement",
")",
")"
] | Print sensory inputs along with their encoded versions. | [
"Print",
"sensory",
"inputs",
"along",
"with",
"their",
"encoded",
"versions",
"."
] | python | train |
heroku/heroku.py | heroku/models.py | https://github.com/heroku/heroku.py/blob/cadc0a074896cf29c65a457c5c5bdb2069470af0/heroku/models.py#L300-L308 | def maintenance(self, on=True):
"""Toggles maintenance mode."""
r = self._h._http_resource(
method='POST',
resource=('apps', self.name, 'server', 'maintenance'),
data={'maintenance_mode': int(on)}
)
return r.ok | [
"def",
"maintenance",
"(",
"self",
",",
"on",
"=",
"True",
")",
":",
"r",
"=",
"self",
".",
"_h",
".",
"_http_resource",
"(",
"method",
"=",
"'POST'",
",",
"resource",
"=",
"(",
"'apps'",
",",
"self",
".",
"name",
",",
"'server'",
",",
"'maintenance'",
")",
",",
"data",
"=",
"{",
"'maintenance_mode'",
":",
"int",
"(",
"on",
")",
"}",
")",
"return",
"r",
".",
"ok"
] | Toggles maintenance mode. | [
"Toggles",
"maintenance",
"mode",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.