repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
LudovicRousseau/pyscard | smartcard/wx/ReaderToolbar.py | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/wx/ReaderToolbar.py#L45-L56 | def update(self, observable, handlers):
"""Toolbar ReaderObserver callback that is notified when
readers are added or removed."""
addedreaders, removedreaders = handlers
for reader in addedreaders:
item = self.Append(str(reader))
self.SetClientData(item, reader)
for reader in removedreaders:
item = self.FindString(str(reader))
if wx.NOT_FOUND != item:
self.Delete(item)
selection = self.GetSelection() | [
"def",
"update",
"(",
"self",
",",
"observable",
",",
"handlers",
")",
":",
"addedreaders",
",",
"removedreaders",
"=",
"handlers",
"for",
"reader",
"in",
"addedreaders",
":",
"item",
"=",
"self",
".",
"Append",
"(",
"str",
"(",
"reader",
")",
")",
"self",
".",
"SetClientData",
"(",
"item",
",",
"reader",
")",
"for",
"reader",
"in",
"removedreaders",
":",
"item",
"=",
"self",
".",
"FindString",
"(",
"str",
"(",
"reader",
")",
")",
"if",
"wx",
".",
"NOT_FOUND",
"!=",
"item",
":",
"self",
".",
"Delete",
"(",
"item",
")",
"selection",
"=",
"self",
".",
"GetSelection",
"(",
")"
]
| Toolbar ReaderObserver callback that is notified when
readers are added or removed. | [
"Toolbar",
"ReaderObserver",
"callback",
"that",
"is",
"notified",
"when",
"readers",
"are",
"added",
"or",
"removed",
"."
]
| python | train | 42 |
maroba/findiff | findiff/coefs.py | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/coefs.py#L131-L136 | def _build_rhs(p, q, deriv):
"""The right hand side of the equation system matrix"""
b = [0 for _ in range(p+q+1)]
b[deriv] = math.factorial(deriv)
return np.array(b) | [
"def",
"_build_rhs",
"(",
"p",
",",
"q",
",",
"deriv",
")",
":",
"b",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"p",
"+",
"q",
"+",
"1",
")",
"]",
"b",
"[",
"deriv",
"]",
"=",
"math",
".",
"factorial",
"(",
"deriv",
")",
"return",
"np",
".",
"array",
"(",
"b",
")"
]
| The right hand side of the equation system matrix | [
"The",
"right",
"hand",
"side",
"of",
"the",
"equation",
"system",
"matrix"
]
| python | train | 29.666667 |
orbingol/NURBS-Python | geomdl/_exchange.py | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_exchange.py#L102-L150 | def import_surf_mesh(file_name):
""" Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface
"""
raw_content = read_file(file_name)
raw_content = raw_content.split("\n")
content = []
for rc in raw_content:
temp = rc.strip().split()
content.append(temp)
# 1st line defines the dimension and it must be 3
if int(content[0][0]) != 3:
raise TypeError("Input mesh '" + str(file_name) + "' must be 3-dimensional")
# Create a NURBS surface instance and fill with the data read from mesh file
surf = shortcuts.generate_surface(rational=True)
# 2nd line is the degrees
surf.degree_u = int(content[1][0])
surf.degree_v = int(content[1][1])
# 3rd line is the number of weighted control points in u and v directions
dim_u = int(content[2][0])
dim_v = int(content[2][1])
# Starting from 6th line, we have the weighted control points
ctrlpts_end = 5 + (dim_u * dim_v)
ctrlpts_mesh = content[5:ctrlpts_end]
# mesh files have the control points in u-row order format
ctrlpts = compatibility.flip_ctrlpts_u(ctrlpts_mesh, dim_u, dim_v)
# mesh files store control points in format (x, y, z, w)
ctrlptsw = compatibility.generate_ctrlptsw(ctrlpts)
# Set control points
surf.set_ctrlpts(ctrlptsw, dim_u, dim_v)
# 4th and 5th lines are knot vectors
surf.knotvector_u = [float(u) for u in content[3]]
surf.knotvector_v = [float(v) for v in content[4]]
# Return the surface instance
return surf | [
"def",
"import_surf_mesh",
"(",
"file_name",
")",
":",
"raw_content",
"=",
"read_file",
"(",
"file_name",
")",
"raw_content",
"=",
"raw_content",
".",
"split",
"(",
"\"\\n\"",
")",
"content",
"=",
"[",
"]",
"for",
"rc",
"in",
"raw_content",
":",
"temp",
"=",
"rc",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"content",
".",
"append",
"(",
"temp",
")",
"# 1st line defines the dimension and it must be 3",
"if",
"int",
"(",
"content",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"!=",
"3",
":",
"raise",
"TypeError",
"(",
"\"Input mesh '\"",
"+",
"str",
"(",
"file_name",
")",
"+",
"\"' must be 3-dimensional\"",
")",
"# Create a NURBS surface instance and fill with the data read from mesh file",
"surf",
"=",
"shortcuts",
".",
"generate_surface",
"(",
"rational",
"=",
"True",
")",
"# 2nd line is the degrees",
"surf",
".",
"degree_u",
"=",
"int",
"(",
"content",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"surf",
".",
"degree_v",
"=",
"int",
"(",
"content",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"# 3rd line is the number of weighted control points in u and v directions",
"dim_u",
"=",
"int",
"(",
"content",
"[",
"2",
"]",
"[",
"0",
"]",
")",
"dim_v",
"=",
"int",
"(",
"content",
"[",
"2",
"]",
"[",
"1",
"]",
")",
"# Starting from 6th line, we have the weighted control points",
"ctrlpts_end",
"=",
"5",
"+",
"(",
"dim_u",
"*",
"dim_v",
")",
"ctrlpts_mesh",
"=",
"content",
"[",
"5",
":",
"ctrlpts_end",
"]",
"# mesh files have the control points in u-row order format",
"ctrlpts",
"=",
"compatibility",
".",
"flip_ctrlpts_u",
"(",
"ctrlpts_mesh",
",",
"dim_u",
",",
"dim_v",
")",
"# mesh files store control points in format (x, y, z, w)",
"ctrlptsw",
"=",
"compatibility",
".",
"generate_ctrlptsw",
"(",
"ctrlpts",
")",
"# Set control points",
"surf",
".",
"set_ctrlpts",
"(",
"ctrlptsw",
",",
"dim_u",
",",
"dim_v",
")",
"# 4th and 5th lines are knot vectors",
"surf",
".",
"knotvector_u",
"=",
"[",
"float",
"(",
"u",
")",
"for",
"u",
"in",
"content",
"[",
"3",
"]",
"]",
"surf",
".",
"knotvector_v",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"content",
"[",
"4",
"]",
"]",
"# Return the surface instance",
"return",
"surf"
]
| Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface | [
"Generates",
"a",
"NURBS",
"surface",
"object",
"from",
"a",
"mesh",
"file",
"."
]
| python | train | 32.428571 |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9165-L9182 | def attitude_quaternion_encode(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion. Quaternion order is
w, x, y, z and a zero rotation would be expressed as
(1 0 0 0).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1, w (1 in null-rotation) (float)
q2 : Quaternion component 2, x (0 in null-rotation) (float)
q3 : Quaternion component 3, y (0 in null-rotation) (float)
q4 : Quaternion component 4, z (0 in null-rotation) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
return MAVLink_attitude_quaternion_message(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed) | [
"def",
"attitude_quaternion_encode",
"(",
"self",
",",
"time_boot_ms",
",",
"q1",
",",
"q2",
",",
"q3",
",",
"q4",
",",
"rollspeed",
",",
"pitchspeed",
",",
"yawspeed",
")",
":",
"return",
"MAVLink_attitude_quaternion_message",
"(",
"time_boot_ms",
",",
"q1",
",",
"q2",
",",
"q3",
",",
"q4",
",",
"rollspeed",
",",
"pitchspeed",
",",
"yawspeed",
")"
]
| The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right), expressed as quaternion. Quaternion order is
w, x, y, z and a zero rotation would be expressed as
(1 0 0 0).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
q1 : Quaternion component 1, w (1 in null-rotation) (float)
q2 : Quaternion component 2, x (0 in null-rotation) (float)
q3 : Quaternion component 3, y (0 in null-rotation) (float)
q4 : Quaternion component 4, z (0 in null-rotation) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float) | [
"The",
"attitude",
"in",
"the",
"aeronautical",
"frame",
"(",
"right",
"-",
"handed",
"Z",
"-",
"down",
"X",
"-",
"front",
"Y",
"-",
"right",
")",
"expressed",
"as",
"quaternion",
".",
"Quaternion",
"order",
"is",
"w",
"x",
"y",
"z",
"and",
"a",
"zero",
"rotation",
"would",
"be",
"expressed",
"as",
"(",
"1",
"0",
"0",
"0",
")",
"."
]
| python | train | 68.444444 |
sbg/sevenbridges-python | sevenbridges/models/marker.py | https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/marker.py#L96-L117 | def save(self, inplace=True):
"""
Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance.
"""
modified_data = self._modified_data()
if bool(modified_data):
extra = {
'resource': self.__class__.__name__,
'query': {
'id': self.id,
'modified_data': modified_data
}
}
logger.info('Saving marker', extra=extra)
data = self._api.patch(url=self._URL['get'].format(id=self.id),
data=modified_data).json()
marker = Marker(api=self._api, **data)
return marker
else:
raise ResourceNotModified() | [
"def",
"save",
"(",
"self",
",",
"inplace",
"=",
"True",
")",
":",
"modified_data",
"=",
"self",
".",
"_modified_data",
"(",
")",
"if",
"bool",
"(",
"modified_data",
")",
":",
"extra",
"=",
"{",
"'resource'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'query'",
":",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'modified_data'",
":",
"modified_data",
"}",
"}",
"logger",
".",
"info",
"(",
"'Saving marker'",
",",
"extra",
"=",
"extra",
")",
"data",
"=",
"self",
".",
"_api",
".",
"patch",
"(",
"url",
"=",
"self",
".",
"_URL",
"[",
"'get'",
"]",
".",
"format",
"(",
"id",
"=",
"self",
".",
"id",
")",
",",
"data",
"=",
"modified_data",
")",
".",
"json",
"(",
")",
"marker",
"=",
"Marker",
"(",
"api",
"=",
"self",
".",
"_api",
",",
"*",
"*",
"data",
")",
"return",
"marker",
"else",
":",
"raise",
"ResourceNotModified",
"(",
")"
]
| Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance. | [
"Saves",
"all",
"modification",
"to",
"the",
"marker",
"on",
"the",
"server",
".",
":",
"param",
"inplace",
"Apply",
"edits",
"on",
"the",
"current",
"instance",
"or",
"get",
"a",
"new",
"one",
".",
":",
"return",
":",
"Marker",
"instance",
"."
]
| python | train | 37.454545 |
ray-project/ray | python/ray/rllib/optimizers/multi_gpu_impl.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/optimizers/multi_gpu_impl.py#L118-L225 | def load_data(self, sess, inputs, state_inputs):
"""Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device.
"""
if log_once("load_data"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize({
"placeholders": self.loss_inputs,
"inputs": inputs,
"state_inputs": state_inputs
})))
feed_dict = {}
assert len(self.loss_inputs) == len(inputs + state_inputs), \
(self.loss_inputs, inputs, state_inputs)
# Let's suppose we have the following input data, and 2 devices:
# 1 2 3 4 5 6 7 <- state inputs shape
# A A A B B B C C C D D D E E E F F F G G G <- inputs shape
# The data is truncated and split across devices as follows:
# |---| seq len = 3
# |---------------------------------| seq batch size = 6 seqs
# |----------------| per device batch size = 9 tuples
if len(state_inputs) > 0:
smallest_array = state_inputs[0]
seq_len = len(inputs[0]) // len(state_inputs[0])
self._loaded_max_seq_len = seq_len
else:
smallest_array = inputs[0]
self._loaded_max_seq_len = 1
sequences_per_minibatch = (
self.max_per_device_batch_size // self._loaded_max_seq_len * len(
self.devices))
if sequences_per_minibatch < 1:
logger.warn(
("Target minibatch size is {}, however the rollout sequence "
"length is {}, hence the minibatch size will be raised to "
"{}.").format(self.max_per_device_batch_size,
self._loaded_max_seq_len,
self._loaded_max_seq_len * len(self.devices)))
sequences_per_minibatch = 1
if len(smallest_array) < sequences_per_minibatch:
# Dynamically shrink the batch size if insufficient data
sequences_per_minibatch = make_divisible_by(
len(smallest_array), len(self.devices))
if log_once("data_slicing"):
logger.info(
("Divided {} rollout sequences, each of length {}, among "
"{} devices.").format(
len(smallest_array), self._loaded_max_seq_len,
len(self.devices)))
if sequences_per_minibatch < len(self.devices):
raise ValueError(
"Must load at least 1 tuple sequence per device. Try "
"increasing `sgd_minibatch_size` or reducing `max_seq_len` "
"to ensure that at least one sequence fits per device.")
self._loaded_per_device_batch_size = (sequences_per_minibatch // len(
self.devices) * self._loaded_max_seq_len)
if len(state_inputs) > 0:
# First truncate the RNN state arrays to the sequences_per_minib.
state_inputs = [
make_divisible_by(arr, sequences_per_minibatch)
for arr in state_inputs
]
# Then truncate the data inputs to match
inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs]
assert len(state_inputs[0]) * seq_len == len(inputs[0]), \
(len(state_inputs[0]), sequences_per_minibatch, seq_len,
len(inputs[0]))
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
feed_dict[ph] = arr
truncated_len = len(inputs[0])
else:
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
truncated_arr = make_divisible_by(arr, sequences_per_minibatch)
feed_dict[ph] = truncated_arr
truncated_len = len(truncated_arr)
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
self.num_tuples_loaded = truncated_len
tuples_per_device = truncated_len // len(self.devices)
assert tuples_per_device > 0, "No data loaded?"
assert tuples_per_device % self._loaded_per_device_batch_size == 0
return tuples_per_device | [
"def",
"load_data",
"(",
"self",
",",
"sess",
",",
"inputs",
",",
"state_inputs",
")",
":",
"if",
"log_once",
"(",
"\"load_data\"",
")",
":",
"logger",
".",
"info",
"(",
"\"Training on concatenated sample batches:\\n\\n{}\\n\"",
".",
"format",
"(",
"summarize",
"(",
"{",
"\"placeholders\"",
":",
"self",
".",
"loss_inputs",
",",
"\"inputs\"",
":",
"inputs",
",",
"\"state_inputs\"",
":",
"state_inputs",
"}",
")",
")",
")",
"feed_dict",
"=",
"{",
"}",
"assert",
"len",
"(",
"self",
".",
"loss_inputs",
")",
"==",
"len",
"(",
"inputs",
"+",
"state_inputs",
")",
",",
"(",
"self",
".",
"loss_inputs",
",",
"inputs",
",",
"state_inputs",
")",
"# Let's suppose we have the following input data, and 2 devices:",
"# 1 2 3 4 5 6 7 <- state inputs shape",
"# A A A B B B C C C D D D E E E F F F G G G <- inputs shape",
"# The data is truncated and split across devices as follows:",
"# |---| seq len = 3",
"# |---------------------------------| seq batch size = 6 seqs",
"# |----------------| per device batch size = 9 tuples",
"if",
"len",
"(",
"state_inputs",
")",
">",
"0",
":",
"smallest_array",
"=",
"state_inputs",
"[",
"0",
"]",
"seq_len",
"=",
"len",
"(",
"inputs",
"[",
"0",
"]",
")",
"//",
"len",
"(",
"state_inputs",
"[",
"0",
"]",
")",
"self",
".",
"_loaded_max_seq_len",
"=",
"seq_len",
"else",
":",
"smallest_array",
"=",
"inputs",
"[",
"0",
"]",
"self",
".",
"_loaded_max_seq_len",
"=",
"1",
"sequences_per_minibatch",
"=",
"(",
"self",
".",
"max_per_device_batch_size",
"//",
"self",
".",
"_loaded_max_seq_len",
"*",
"len",
"(",
"self",
".",
"devices",
")",
")",
"if",
"sequences_per_minibatch",
"<",
"1",
":",
"logger",
".",
"warn",
"(",
"(",
"\"Target minibatch size is {}, however the rollout sequence \"",
"\"length is {}, hence the minibatch size will be raised to \"",
"\"{}.\"",
")",
".",
"format",
"(",
"self",
".",
"max_per_device_batch_size",
",",
"self",
".",
"_loaded_max_seq_len",
",",
"self",
".",
"_loaded_max_seq_len",
"*",
"len",
"(",
"self",
".",
"devices",
")",
")",
")",
"sequences_per_minibatch",
"=",
"1",
"if",
"len",
"(",
"smallest_array",
")",
"<",
"sequences_per_minibatch",
":",
"# Dynamically shrink the batch size if insufficient data",
"sequences_per_minibatch",
"=",
"make_divisible_by",
"(",
"len",
"(",
"smallest_array",
")",
",",
"len",
"(",
"self",
".",
"devices",
")",
")",
"if",
"log_once",
"(",
"\"data_slicing\"",
")",
":",
"logger",
".",
"info",
"(",
"(",
"\"Divided {} rollout sequences, each of length {}, among \"",
"\"{} devices.\"",
")",
".",
"format",
"(",
"len",
"(",
"smallest_array",
")",
",",
"self",
".",
"_loaded_max_seq_len",
",",
"len",
"(",
"self",
".",
"devices",
")",
")",
")",
"if",
"sequences_per_minibatch",
"<",
"len",
"(",
"self",
".",
"devices",
")",
":",
"raise",
"ValueError",
"(",
"\"Must load at least 1 tuple sequence per device. Try \"",
"\"increasing `sgd_minibatch_size` or reducing `max_seq_len` \"",
"\"to ensure that at least one sequence fits per device.\"",
")",
"self",
".",
"_loaded_per_device_batch_size",
"=",
"(",
"sequences_per_minibatch",
"//",
"len",
"(",
"self",
".",
"devices",
")",
"*",
"self",
".",
"_loaded_max_seq_len",
")",
"if",
"len",
"(",
"state_inputs",
")",
">",
"0",
":",
"# First truncate the RNN state arrays to the sequences_per_minib.",
"state_inputs",
"=",
"[",
"make_divisible_by",
"(",
"arr",
",",
"sequences_per_minibatch",
")",
"for",
"arr",
"in",
"state_inputs",
"]",
"# Then truncate the data inputs to match",
"inputs",
"=",
"[",
"arr",
"[",
":",
"len",
"(",
"state_inputs",
"[",
"0",
"]",
")",
"*",
"seq_len",
"]",
"for",
"arr",
"in",
"inputs",
"]",
"assert",
"len",
"(",
"state_inputs",
"[",
"0",
"]",
")",
"*",
"seq_len",
"==",
"len",
"(",
"inputs",
"[",
"0",
"]",
")",
",",
"(",
"len",
"(",
"state_inputs",
"[",
"0",
"]",
")",
",",
"sequences_per_minibatch",
",",
"seq_len",
",",
"len",
"(",
"inputs",
"[",
"0",
"]",
")",
")",
"for",
"ph",
",",
"arr",
"in",
"zip",
"(",
"self",
".",
"loss_inputs",
",",
"inputs",
"+",
"state_inputs",
")",
":",
"feed_dict",
"[",
"ph",
"]",
"=",
"arr",
"truncated_len",
"=",
"len",
"(",
"inputs",
"[",
"0",
"]",
")",
"else",
":",
"for",
"ph",
",",
"arr",
"in",
"zip",
"(",
"self",
".",
"loss_inputs",
",",
"inputs",
"+",
"state_inputs",
")",
":",
"truncated_arr",
"=",
"make_divisible_by",
"(",
"arr",
",",
"sequences_per_minibatch",
")",
"feed_dict",
"[",
"ph",
"]",
"=",
"truncated_arr",
"truncated_len",
"=",
"len",
"(",
"truncated_arr",
")",
"sess",
".",
"run",
"(",
"[",
"t",
".",
"init_op",
"for",
"t",
"in",
"self",
".",
"_towers",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"self",
".",
"num_tuples_loaded",
"=",
"truncated_len",
"tuples_per_device",
"=",
"truncated_len",
"//",
"len",
"(",
"self",
".",
"devices",
")",
"assert",
"tuples_per_device",
">",
"0",
",",
"\"No data loaded?\"",
"assert",
"tuples_per_device",
"%",
"self",
".",
"_loaded_per_device_batch_size",
"==",
"0",
"return",
"tuples_per_device"
]
| Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device. | [
"Bulk",
"loads",
"the",
"specified",
"inputs",
"into",
"device",
"memory",
"."
]
| python | train | 44.305556 |
AASHE/django-constant-contact | django_constant_contact/models.py | https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L107-L162 | def update_email_marketing_campaign(self, email_marketing_campaign,
name, email_content, from_email,
from_name, reply_to_email, subject,
text_content, address,
is_view_as_webpage_enabled=False,
view_as_web_page_link_text='',
view_as_web_page_text='',
is_permission_reminder_enabled=False,
permission_reminder_text=''):
"""Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object.
"""
url = self.api.join(
'/'.join([self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id)]))
inlined_email_content = self.inline_css(email_content)
minified_email_content = html_minify(inlined_email_content)
worked_around_email_content = work_around(minified_email_content)
data = {
'name': name,
'subject': subject,
'from_name': from_name,
'from_email': from_email,
'reply_to_email': reply_to_email,
'email_content': worked_around_email_content,
'email_content_format': 'HTML',
'text_content': text_content,
'message_footer': {
'organization_name': address['organization_name'],
'address_line_1': address['address_line_1'],
'address_line_2': address['address_line_2'],
'address_line_3': address['address_line_3'],
'city': address['city'],
'state': address['state'],
'international_state': address['international_state'],
'postal_code': address['postal_code'],
'country': address['country']
},
'is_view_as_webpage_enabled': is_view_as_webpage_enabled,
'view_as_web_page_link_text': view_as_web_page_link_text,
'view_as_web_page_text': view_as_web_page_text,
'is_permission_reminder_enabled': is_permission_reminder_enabled,
'permission_reminder_text': permission_reminder_text
}
response = url.put(data=json.dumps(data),
headers={'content-type': 'application/json'})
self.handle_response_status(response)
email_marketing_campaign.data = response.json()
email_marketing_campaign.save()
return email_marketing_campaign | [
"def",
"update_email_marketing_campaign",
"(",
"self",
",",
"email_marketing_campaign",
",",
"name",
",",
"email_content",
",",
"from_email",
",",
"from_name",
",",
"reply_to_email",
",",
"subject",
",",
"text_content",
",",
"address",
",",
"is_view_as_webpage_enabled",
"=",
"False",
",",
"view_as_web_page_link_text",
"=",
"''",
",",
"view_as_web_page_text",
"=",
"''",
",",
"is_permission_reminder_enabled",
"=",
"False",
",",
"permission_reminder_text",
"=",
"''",
")",
":",
"url",
"=",
"self",
".",
"api",
".",
"join",
"(",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"EMAIL_MARKETING_CAMPAIGN_URL",
",",
"str",
"(",
"email_marketing_campaign",
".",
"constant_contact_id",
")",
"]",
")",
")",
"inlined_email_content",
"=",
"self",
".",
"inline_css",
"(",
"email_content",
")",
"minified_email_content",
"=",
"html_minify",
"(",
"inlined_email_content",
")",
"worked_around_email_content",
"=",
"work_around",
"(",
"minified_email_content",
")",
"data",
"=",
"{",
"'name'",
":",
"name",
",",
"'subject'",
":",
"subject",
",",
"'from_name'",
":",
"from_name",
",",
"'from_email'",
":",
"from_email",
",",
"'reply_to_email'",
":",
"reply_to_email",
",",
"'email_content'",
":",
"worked_around_email_content",
",",
"'email_content_format'",
":",
"'HTML'",
",",
"'text_content'",
":",
"text_content",
",",
"'message_footer'",
":",
"{",
"'organization_name'",
":",
"address",
"[",
"'organization_name'",
"]",
",",
"'address_line_1'",
":",
"address",
"[",
"'address_line_1'",
"]",
",",
"'address_line_2'",
":",
"address",
"[",
"'address_line_2'",
"]",
",",
"'address_line_3'",
":",
"address",
"[",
"'address_line_3'",
"]",
",",
"'city'",
":",
"address",
"[",
"'city'",
"]",
",",
"'state'",
":",
"address",
"[",
"'state'",
"]",
",",
"'international_state'",
":",
"address",
"[",
"'international_state'",
"]",
",",
"'postal_code'",
":",
"address",
"[",
"'postal_code'",
"]",
",",
"'country'",
":",
"address",
"[",
"'country'",
"]",
"}",
",",
"'is_view_as_webpage_enabled'",
":",
"is_view_as_webpage_enabled",
",",
"'view_as_web_page_link_text'",
":",
"view_as_web_page_link_text",
",",
"'view_as_web_page_text'",
":",
"view_as_web_page_text",
",",
"'is_permission_reminder_enabled'",
":",
"is_permission_reminder_enabled",
",",
"'permission_reminder_text'",
":",
"permission_reminder_text",
"}",
"response",
"=",
"url",
".",
"put",
"(",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
")",
"self",
".",
"handle_response_status",
"(",
"response",
")",
"email_marketing_campaign",
".",
"data",
"=",
"response",
".",
"json",
"(",
")",
"email_marketing_campaign",
".",
"save",
"(",
")",
"return",
"email_marketing_campaign"
]
| Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object. | [
"Update",
"a",
"Constant",
"Contact",
"email",
"marketing",
"campaign",
".",
"Returns",
"the",
"updated",
"EmailMarketingCampaign",
"object",
"."
]
| python | train | 47.125 |
gwastro/pycbc-glue | pycbc_glue/ligolw/utils/process.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/process.py#L185-L201 | def get_process_params(xmldoc, program, param, require_unique_program = True):
"""
Return a list of the values stored in the process_params table for
params named param for the program(s) named program. The values
are returned as Python native types, not as the strings appearing
in the XML document. If require_unique_program is True (default),
then the document must contain exactly one program with the
requested name, otherwise ValueError is raised. If
require_unique_program is not True, then there must be at least one
program with the requested name otherwise ValueError is raised.
"""
process_ids = lsctables.ProcessTable.get_table(xmldoc).get_ids_by_program(program)
if len(process_ids) < 1:
raise ValueError("process table must contain at least one program named '%s'" % program)
elif require_unique_program and len(process_ids) != 1:
raise ValueError("process table must contain exactly one program named '%s'" % program)
return [row.pyvalue for row in lsctables.ProcessParamsTable.get_table(xmldoc) if (row.process_id in process_ids) and (row.param == param)] | [
"def",
"get_process_params",
"(",
"xmldoc",
",",
"program",
",",
"param",
",",
"require_unique_program",
"=",
"True",
")",
":",
"process_ids",
"=",
"lsctables",
".",
"ProcessTable",
".",
"get_table",
"(",
"xmldoc",
")",
".",
"get_ids_by_program",
"(",
"program",
")",
"if",
"len",
"(",
"process_ids",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"process table must contain at least one program named '%s'\"",
"%",
"program",
")",
"elif",
"require_unique_program",
"and",
"len",
"(",
"process_ids",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"process table must contain exactly one program named '%s'\"",
"%",
"program",
")",
"return",
"[",
"row",
".",
"pyvalue",
"for",
"row",
"in",
"lsctables",
".",
"ProcessParamsTable",
".",
"get_table",
"(",
"xmldoc",
")",
"if",
"(",
"row",
".",
"process_id",
"in",
"process_ids",
")",
"and",
"(",
"row",
".",
"param",
"==",
"param",
")",
"]"
]
| Return a list of the values stored in the process_params table for
params named param for the program(s) named program. The values
are returned as Python native types, not as the strings appearing
in the XML document. If require_unique_program is True (default),
then the document must contain exactly one program with the
requested name, otherwise ValueError is raised. If
require_unique_program is not True, then there must be at least one
program with the requested name otherwise ValueError is raised. | [
"Return",
"a",
"list",
"of",
"the",
"values",
"stored",
"in",
"the",
"process_params",
"table",
"for",
"params",
"named",
"param",
"for",
"the",
"program",
"(",
"s",
")",
"named",
"program",
".",
"The",
"values",
"are",
"returned",
"as",
"Python",
"native",
"types",
"not",
"as",
"the",
"strings",
"appearing",
"in",
"the",
"XML",
"document",
".",
"If",
"require_unique_program",
"is",
"True",
"(",
"default",
")",
"then",
"the",
"document",
"must",
"contain",
"exactly",
"one",
"program",
"with",
"the",
"requested",
"name",
"otherwise",
"ValueError",
"is",
"raised",
".",
"If",
"require_unique_program",
"is",
"not",
"True",
"then",
"there",
"must",
"be",
"at",
"least",
"one",
"program",
"with",
"the",
"requested",
"name",
"otherwise",
"ValueError",
"is",
"raised",
"."
]
| python | train | 63.294118 |
CalebBell/thermo | thermo/volume.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/volume.py#L2360-L2378 | def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = []
if self.CASRN in CRC_inorg_s_const_data.index:
methods.append(CRC_INORG_S)
self.CRC_INORG_S_Vm = float(CRC_inorg_s_const_data.at[self.CASRN, 'Vm'])
# if all((self.Tt, self.Vml_Tt, self.MW)):
# self.rhol_Tt = Vm_to_rho(self.Vml_Tt, self.MW)
# methods.append(GOODMAN)
self.all_methods = set(methods) | [
"def",
"load_all_methods",
"(",
"self",
")",
":",
"methods",
"=",
"[",
"]",
"if",
"self",
".",
"CASRN",
"in",
"CRC_inorg_s_const_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"CRC_INORG_S",
")",
"self",
".",
"CRC_INORG_S_Vm",
"=",
"float",
"(",
"CRC_inorg_s_const_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Vm'",
"]",
")",
"# if all((self.Tt, self.Vml_Tt, self.MW)):",
"# self.rhol_Tt = Vm_to_rho(self.Vml_Tt, self.MW)",
"# methods.append(GOODMAN)",
"self",
".",
"all_methods",
"=",
"set",
"(",
"methods",
")"
]
| r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters. | [
"r",
"Method",
"which",
"picks",
"out",
"coefficients",
"for",
"the",
"specified",
"chemical",
"from",
"the",
"various",
"dictionaries",
"and",
"DataFrames",
"storing",
"it",
".",
"All",
"data",
"is",
"stored",
"as",
"attributes",
".",
"This",
"method",
"also",
"sets",
":",
"obj",
":",
"Tmin",
":",
"obj",
":",
"Tmax",
"and",
":",
"obj",
":",
"all_methods",
"as",
"a",
"set",
"of",
"methods",
"for",
"which",
"the",
"data",
"exists",
"for",
"."
]
| python | valid | 52 |
aewallin/allantools | examples/noise-color_and_PSD.py | https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/noise-color_and_PSD.py#L7-L18 | def many_psds(k=2,fs=1.0, b0=1.0, N=1024):
""" compute average of many PSDs """
psd=[]
for j in range(k):
print j
x = noise.white(N=2*4096,b0=b0,fs=fs)
f, tmp = noise.numpy_psd(x,fs)
if j==0:
psd = tmp
else:
psd = psd + tmp
return f, psd/k | [
"def",
"many_psds",
"(",
"k",
"=",
"2",
",",
"fs",
"=",
"1.0",
",",
"b0",
"=",
"1.0",
",",
"N",
"=",
"1024",
")",
":",
"psd",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"k",
")",
":",
"print",
"j",
"x",
"=",
"noise",
".",
"white",
"(",
"N",
"=",
"2",
"*",
"4096",
",",
"b0",
"=",
"b0",
",",
"fs",
"=",
"fs",
")",
"f",
",",
"tmp",
"=",
"noise",
".",
"numpy_psd",
"(",
"x",
",",
"fs",
")",
"if",
"j",
"==",
"0",
":",
"psd",
"=",
"tmp",
"else",
":",
"psd",
"=",
"psd",
"+",
"tmp",
"return",
"f",
",",
"psd",
"/",
"k"
]
| compute average of many PSDs | [
"compute",
"average",
"of",
"many",
"PSDs"
]
| python | train | 25.666667 |
Terrance/SkPy | skpy/conn.py | https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/conn.py#L877-L885 | def ping(self, timeout=12):
"""
Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active
"""
self.conn("POST", "{0}/users/ME/endpoints/{1}/active".format(self.conn.msgsHost, self.id),
auth=SkypeConnection.Auth.RegToken, json={"timeout": timeout}) | [
"def",
"ping",
"(",
"self",
",",
"timeout",
"=",
"12",
")",
":",
"self",
".",
"conn",
"(",
"\"POST\"",
",",
"\"{0}/users/ME/endpoints/{1}/active\"",
".",
"format",
"(",
"self",
".",
"conn",
".",
"msgsHost",
",",
"self",
".",
"id",
")",
",",
"auth",
"=",
"SkypeConnection",
".",
"Auth",
".",
"RegToken",
",",
"json",
"=",
"{",
"\"timeout\"",
":",
"timeout",
"}",
")"
]
| Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active | [
"Send",
"a",
"keep",
"-",
"alive",
"request",
"for",
"the",
"endpoint",
"."
]
| python | test | 41.333333 |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/jar.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/jar.py#L80-L90 | def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags | [
"def",
"jarFlags",
"(",
"target",
",",
"source",
",",
"env",
",",
"for_signature",
")",
":",
"jarflags",
"=",
"env",
".",
"subst",
"(",
"'$JARFLAGS'",
",",
"target",
"=",
"target",
",",
"source",
"=",
"source",
")",
"for",
"src",
"in",
"source",
":",
"contents",
"=",
"src",
".",
"get_text_contents",
"(",
")",
"if",
"contents",
"[",
":",
"16",
"]",
"==",
"\"Manifest-Version\"",
":",
"if",
"not",
"'m'",
"in",
"jarflags",
":",
"return",
"jarflags",
"+",
"'m'",
"break",
"return",
"jarflags"
]
| If we have a manifest, make sure that the 'm'
flag is specified. | [
"If",
"we",
"have",
"a",
"manifest",
"make",
"sure",
"that",
"the",
"m",
"flag",
"is",
"specified",
"."
]
| python | train | 37.454545 |
stevelittlefish/littlefish | littlefish/colourutil.py | https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/colourutil.py#L115-L143 | def blend_html_colour_to_white(html_colour, alpha):
"""
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
"""
html_colour = html_colour.upper()
has_hash = False
if html_colour[0] == '#':
has_hash = True
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
r = int(alpha * r + (1 - alpha) * 255)
g = int(alpha * g + (1 - alpha) * 255)
b = int(alpha * b + (1 - alpha) * 255)
out = '{:02X}{:02X}{:02X}'.format(r, g, b)
if has_hash:
out = '#' + out
return out | [
"def",
"blend_html_colour_to_white",
"(",
"html_colour",
",",
"alpha",
")",
":",
"html_colour",
"=",
"html_colour",
".",
"upper",
"(",
")",
"has_hash",
"=",
"False",
"if",
"html_colour",
"[",
"0",
"]",
"==",
"'#'",
":",
"has_hash",
"=",
"True",
"html_colour",
"=",
"html_colour",
"[",
"1",
":",
"]",
"r_str",
"=",
"html_colour",
"[",
"0",
":",
"2",
"]",
"g_str",
"=",
"html_colour",
"[",
"2",
":",
"4",
"]",
"b_str",
"=",
"html_colour",
"[",
"4",
":",
"6",
"]",
"r",
"=",
"int",
"(",
"r_str",
",",
"16",
")",
"g",
"=",
"int",
"(",
"g_str",
",",
"16",
")",
"b",
"=",
"int",
"(",
"b_str",
",",
"16",
")",
"r",
"=",
"int",
"(",
"alpha",
"*",
"r",
"+",
"(",
"1",
"-",
"alpha",
")",
"*",
"255",
")",
"g",
"=",
"int",
"(",
"alpha",
"*",
"g",
"+",
"(",
"1",
"-",
"alpha",
")",
"*",
"255",
")",
"b",
"=",
"int",
"(",
"alpha",
"*",
"b",
"+",
"(",
"1",
"-",
"alpha",
")",
"*",
"255",
")",
"out",
"=",
"'{:02X}{:02X}{:02X}'",
".",
"format",
"(",
"r",
",",
"g",
",",
"b",
")",
"if",
"has_hash",
":",
"out",
"=",
"'#'",
"+",
"out",
"return",
"out"
]
| :param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white | [
":",
"param",
"html_colour",
":",
"Colour",
"string",
"like",
"FF552B",
"or",
"#334455",
":",
"param",
"alpha",
":",
"Alpha",
"value",
":",
"return",
":",
"Html",
"colour",
"alpha",
"blended",
"onto",
"white"
]
| python | test | 25.103448 |
samjabrahams/anchorhub | anchorhub/builtin/github/writer.py | https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/builtin/github/writer.py#L12-L44 | def make_github_markdown_writer(opts):
"""
Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
opts is a namespace object containing runtime options. It should
generally include the following attributes:
* 'open': a string corresponding to the opening portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '{'
* 'close: a string corresponding ot the closing portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '}'
* 'wrapper_regex': An escaped regular expression that matches tags
located inside of wrappers
:param opts: namespace object, usually created from command-line
arguments, that is used to pass runtime options to concrete
WriterStrategy objects.
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors
"""
assert hasattr(opts, 'wrapper_regex')
atx = MarkdownATXWriterStrategy(opts, 'ATX headers')
setext = MarkdownSetextWriterStrategy(opts, 'Setext headers')
inline = MarkdownInlineLinkWriterStrategy(opts, 'inline links')
ref = MarkdownReferenceLinkWriterStrategy(opts, 'reference links')
code_block_switch = ghswitches.code_block_switch
strategies = [atx, setext, inline, ref]
switches = [code_block_switch]
return Writer(strategies, switches=switches) | [
"def",
"make_github_markdown_writer",
"(",
"opts",
")",
":",
"assert",
"hasattr",
"(",
"opts",
",",
"'wrapper_regex'",
")",
"atx",
"=",
"MarkdownATXWriterStrategy",
"(",
"opts",
",",
"'ATX headers'",
")",
"setext",
"=",
"MarkdownSetextWriterStrategy",
"(",
"opts",
",",
"'Setext headers'",
")",
"inline",
"=",
"MarkdownInlineLinkWriterStrategy",
"(",
"opts",
",",
"'inline links'",
")",
"ref",
"=",
"MarkdownReferenceLinkWriterStrategy",
"(",
"opts",
",",
"'reference links'",
")",
"code_block_switch",
"=",
"ghswitches",
".",
"code_block_switch",
"strategies",
"=",
"[",
"atx",
",",
"setext",
",",
"inline",
",",
"ref",
"]",
"switches",
"=",
"[",
"code_block_switch",
"]",
"return",
"Writer",
"(",
"strategies",
",",
"switches",
"=",
"switches",
")"
]
| Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
opts is a namespace object containing runtime options. It should
generally include the following attributes:
* 'open': a string corresponding to the opening portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '{'
* 'close: a string corresponding ot the closing portion of the wrapper
identifier. Built-in AnchorHub usage defaults this to '}'
* 'wrapper_regex': An escaped regular expression that matches tags
located inside of wrappers
:param opts: namespace object, usually created from command-line
arguments, that is used to pass runtime options to concrete
WriterStrategy objects.
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors | [
"Creates",
"a",
"Writer",
"object",
"used",
"for",
"parsing",
"and",
"writing",
"Markdown",
"files",
"with",
"a",
"GitHub",
"style",
"anchor",
"transformation"
]
| python | train | 45.818182 |
ipfs/py-ipfs-api | ipfsapi/multipart.py | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L244-L265 | def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb | [
"def",
"gen_chunks",
"(",
"self",
",",
"gen",
")",
":",
"for",
"data",
"in",
"gen",
":",
"size",
"=",
"len",
"(",
"data",
")",
"if",
"size",
"<",
"self",
".",
"chunk_size",
":",
"yield",
"data",
"else",
":",
"mv",
"=",
"buffer",
"(",
"data",
")",
"offset",
"=",
"0",
"while",
"offset",
"<",
"size",
":",
"nb",
"=",
"min",
"(",
"self",
".",
"chunk_size",
",",
"size",
"-",
"offset",
")",
"yield",
"mv",
"[",
"offset",
":",
"offset",
"+",
"nb",
"]",
"offset",
"+=",
"nb"
]
| Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes | [
"Generates",
"byte",
"chunks",
"of",
"a",
"given",
"size",
"."
]
| python | train | 30.090909 |
intuition-io/intuition | intuition/data/universe.py | https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/data/universe.py#L44-L49 | def _load_market_scheme(self):
''' Load market yaml description '''
try:
self.scheme = yaml.load(open(self.scheme_path, 'r'))
except Exception, error:
raise LoadMarketSchemeFailed(reason=error) | [
"def",
"_load_market_scheme",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"scheme",
"=",
"yaml",
".",
"load",
"(",
"open",
"(",
"self",
".",
"scheme_path",
",",
"'r'",
")",
")",
"except",
"Exception",
",",
"error",
":",
"raise",
"LoadMarketSchemeFailed",
"(",
"reason",
"=",
"error",
")"
]
| Load market yaml description | [
"Load",
"market",
"yaml",
"description"
]
| python | train | 39.333333 |
lehins/python-wepay | wepay/calls/checkout.py | https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/checkout.py#L165-L187 | def __capture(self, checkout_id, **kwargs):
"""Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'checkout_id': checkout_id
}
return self.make_call(self.__capture, params, kwargs) | [
"def",
"__capture",
"(",
"self",
",",
"checkout_id",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'checkout_id'",
":",
"checkout_id",
"}",
"return",
"self",
".",
"make_call",
"(",
"self",
".",
"__capture",
",",
"params",
",",
"kwargs",
")"
]
| Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` | [
"Call",
"documentation",
":",
"/",
"checkout",
"/",
"capture",
"<https",
":",
"//",
"www",
".",
"wepay",
".",
"com",
"/",
"developer",
"/",
"reference",
"/",
"checkout#capture",
">",
"_",
"plus",
"extra",
"keyword",
"parameters",
":",
":",
"keyword",
"str",
"access_token",
":",
"will",
"be",
"used",
"instead",
"of",
"instance",
"s",
"access_token",
"with",
"batch_mode",
"=",
"True",
"will",
"set",
"authorization",
"param",
"to",
"it",
"s",
"value",
"."
]
| python | train | 36.173913 |
3DLIRIOUS/MeshLabXML | meshlabxml/util.py | https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/util.py#L90-L104 | def make_list(var, num_terms=1):
""" Make a variable a list if it is not already
If variable is not a list it will make it a list of the correct length with
all terms identical.
"""
if not isinstance(var, list):
if isinstance(var, tuple):
var = list(var)
else:
var = [var]
#if len(var) == 1:
for _ in range(1, num_terms):
var.append(var[0])
return var | [
"def",
"make_list",
"(",
"var",
",",
"num_terms",
"=",
"1",
")",
":",
"if",
"not",
"isinstance",
"(",
"var",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"tuple",
")",
":",
"var",
"=",
"list",
"(",
"var",
")",
"else",
":",
"var",
"=",
"[",
"var",
"]",
"#if len(var) == 1:",
"for",
"_",
"in",
"range",
"(",
"1",
",",
"num_terms",
")",
":",
"var",
".",
"append",
"(",
"var",
"[",
"0",
"]",
")",
"return",
"var"
]
| Make a variable a list if it is not already
If variable is not a list it will make it a list of the correct length with
all terms identical. | [
"Make",
"a",
"variable",
"a",
"list",
"if",
"it",
"is",
"not",
"already"
]
| python | test | 28.933333 |
rkhleics/wagtailmenus | wagtailmenus/templatetags/menu_tags.py | https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/templatetags/menu_tags.py#L84-L113 | def section_menu(
context, show_section_root=True, show_multiple_levels=True,
apply_active_classes=True, allow_repeating_parents=True,
max_levels=settings.DEFAULT_SECTION_MENU_MAX_LEVELS,
template='', sub_menu_template='', sub_menu_templates=None,
use_specific=settings.DEFAULT_SECTION_MENU_USE_SPECIFIC,
use_absolute_page_urls=False, add_sub_menus_inline=None, **kwargs
):
"""Render a section menu for the current section."""
validate_supplied_values('section_menu', max_levels=max_levels,
use_specific=use_specific)
if not show_multiple_levels:
max_levels = 1
menu_class = settings.objects.SECTION_MENU_CLASS
return menu_class.render_from_tag(
context=context,
max_levels=max_levels,
use_specific=use_specific,
apply_active_classes=apply_active_classes,
allow_repeating_parents=allow_repeating_parents,
use_absolute_page_urls=use_absolute_page_urls,
add_sub_menus_inline=add_sub_menus_inline,
template_name=template,
sub_menu_template_name=sub_menu_template,
sub_menu_template_names=split_if_string(sub_menu_templates),
show_section_root=show_section_root,
**kwargs
) | [
"def",
"section_menu",
"(",
"context",
",",
"show_section_root",
"=",
"True",
",",
"show_multiple_levels",
"=",
"True",
",",
"apply_active_classes",
"=",
"True",
",",
"allow_repeating_parents",
"=",
"True",
",",
"max_levels",
"=",
"settings",
".",
"DEFAULT_SECTION_MENU_MAX_LEVELS",
",",
"template",
"=",
"''",
",",
"sub_menu_template",
"=",
"''",
",",
"sub_menu_templates",
"=",
"None",
",",
"use_specific",
"=",
"settings",
".",
"DEFAULT_SECTION_MENU_USE_SPECIFIC",
",",
"use_absolute_page_urls",
"=",
"False",
",",
"add_sub_menus_inline",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"validate_supplied_values",
"(",
"'section_menu'",
",",
"max_levels",
"=",
"max_levels",
",",
"use_specific",
"=",
"use_specific",
")",
"if",
"not",
"show_multiple_levels",
":",
"max_levels",
"=",
"1",
"menu_class",
"=",
"settings",
".",
"objects",
".",
"SECTION_MENU_CLASS",
"return",
"menu_class",
".",
"render_from_tag",
"(",
"context",
"=",
"context",
",",
"max_levels",
"=",
"max_levels",
",",
"use_specific",
"=",
"use_specific",
",",
"apply_active_classes",
"=",
"apply_active_classes",
",",
"allow_repeating_parents",
"=",
"allow_repeating_parents",
",",
"use_absolute_page_urls",
"=",
"use_absolute_page_urls",
",",
"add_sub_menus_inline",
"=",
"add_sub_menus_inline",
",",
"template_name",
"=",
"template",
",",
"sub_menu_template_name",
"=",
"sub_menu_template",
",",
"sub_menu_template_names",
"=",
"split_if_string",
"(",
"sub_menu_templates",
")",
",",
"show_section_root",
"=",
"show_section_root",
",",
"*",
"*",
"kwargs",
")"
]
| Render a section menu for the current section. | [
"Render",
"a",
"section",
"menu",
"for",
"the",
"current",
"section",
"."
]
| python | train | 40.766667 |
orbingol/NURBS-Python | geomdl/exchange.py | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L303-L336 | def export_yaml(obj, file_name):
""" Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
# Ref: https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
stream = StringIO()
yaml = YAML()
yaml.dump(data, stream)
return stream.getvalue()
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data) | [
"def",
"export_yaml",
"(",
"obj",
",",
"file_name",
")",
":",
"def",
"callback",
"(",
"data",
")",
":",
"# Ref: https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string",
"stream",
"=",
"StringIO",
"(",
")",
"yaml",
"=",
"YAML",
"(",
")",
"yaml",
".",
"dump",
"(",
"data",
",",
"stream",
")",
"return",
"stream",
".",
"getvalue",
"(",
")",
"# Check if it is possible to import 'ruamel.yaml'",
"try",
":",
"from",
"ruamel",
".",
"yaml",
"import",
"YAML",
"except",
"ImportError",
":",
"raise",
"exch",
".",
"GeomdlException",
"(",
"\"Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml\"",
")",
"# Export data",
"exported_data",
"=",
"exch",
".",
"export_dict_str",
"(",
"obj",
"=",
"obj",
",",
"callback",
"=",
"callback",
")",
"# Write to file",
"return",
"exch",
".",
"write_file",
"(",
"file_name",
",",
"exported_data",
")"
]
| Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file | [
"Exports",
"curves",
"and",
"surfaces",
"in",
"YAML",
"format",
"."
]
| python | train | 35.117647 |
PyHDI/Pyverilog | pyverilog/vparser/parser.py | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L2177-L2180 | def p_single_statement_systemcall(self, p):
'single_statement : systemcall SEMICOLON'
p[0] = SingleStatement(p[1], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_single_statement_systemcall",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"SingleStatement",
"(",
"p",
"[",
"1",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
]
| single_statement : systemcall SEMICOLON | [
"single_statement",
":",
"systemcall",
"SEMICOLON"
]
| python | train | 46 |
StackStorm/pybind | pybind/slxos/v17s_1_02/routing_system/evpn_config/evpn/evpn_instance/vlan/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/evpn_config/evpn/evpn_instance/vlan/__init__.py#L94-L115 | def _set_vlan_add(self, v, load=False):
"""
Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_add() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan_add must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__vlan_add = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_vlan_add",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"vlan_add",
".",
"vlan_add",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"vlan-add\"",
",",
"rest_name",
"=",
"\"\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Add/Remove VLANs from EVPN Instance'",
",",
"u'cli-drop-node-name'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-bgp'",
",",
"defining_module",
"=",
"'brocade-bgp'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"vlan_add must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=vlan_add.vlan_add, is_container='container', presence=False, yang_name=\"vlan-add\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__vlan_add",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_add() directly. | [
"Setter",
"method",
"for",
"vlan_add",
"mapped",
"from",
"YANG",
"variable",
"/",
"routing_system",
"/",
"evpn_config",
"/",
"evpn",
"/",
"evpn_instance",
"/",
"vlan",
"/",
"vlan_add",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_vlan_add",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_vlan_add",
"()",
"directly",
"."
]
| python | train | 75.227273 |
chainer/chainerui | chainerui/tasks/collect_results.py | https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/tasks/collect_results.py#L32-L59 | def collect_results(project, force=False):
"""collect_results."""
if not project.crawlable:
return project
now = datetime.datetime.now()
if (now - project.updated_at).total_seconds() < 4 and (not force):
return project
result_paths = []
if os.path.isdir(project.path_name):
result_paths.extend(_list_result_paths(project.path_name))
registered_results = db.session.query(Result.path_name).filter_by(
project_id=project.id
).all()
registered_paths = {r.path_name for r in registered_results}
for result_path in result_paths:
if result_path not in registered_paths:
_register_result(project.id, result_path)
project.updated_at = datetime.datetime.now()
db.session.commit()
return project | [
"def",
"collect_results",
"(",
"project",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"project",
".",
"crawlable",
":",
"return",
"project",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"if",
"(",
"now",
"-",
"project",
".",
"updated_at",
")",
".",
"total_seconds",
"(",
")",
"<",
"4",
"and",
"(",
"not",
"force",
")",
":",
"return",
"project",
"result_paths",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"project",
".",
"path_name",
")",
":",
"result_paths",
".",
"extend",
"(",
"_list_result_paths",
"(",
"project",
".",
"path_name",
")",
")",
"registered_results",
"=",
"db",
".",
"session",
".",
"query",
"(",
"Result",
".",
"path_name",
")",
".",
"filter_by",
"(",
"project_id",
"=",
"project",
".",
"id",
")",
".",
"all",
"(",
")",
"registered_paths",
"=",
"{",
"r",
".",
"path_name",
"for",
"r",
"in",
"registered_results",
"}",
"for",
"result_path",
"in",
"result_paths",
":",
"if",
"result_path",
"not",
"in",
"registered_paths",
":",
"_register_result",
"(",
"project",
".",
"id",
",",
"result_path",
")",
"project",
".",
"updated_at",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"project"
]
| collect_results. | [
"collect_results",
"."
]
| python | train | 27.5 |
knipknap/SpiffWorkflow | SpiffWorkflow/util/weakmethod.py | https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/util/weakmethod.py#L117-L132 | def ref(function, callback=None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback) | [
"def",
"ref",
"(",
"function",
",",
"callback",
"=",
"None",
")",
":",
"try",
":",
"function",
".",
"__func__",
"except",
"AttributeError",
":",
"return",
"_WeakMethodFree",
"(",
"function",
",",
"callback",
")",
"return",
"_WeakMethodBound",
"(",
"function",
",",
"callback",
")"
]
| Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies. | [
"Returns",
"a",
"weak",
"reference",
"to",
"the",
"given",
"method",
"or",
"function",
".",
"If",
"the",
"callback",
"argument",
"is",
"not",
"None",
"it",
"is",
"called",
"as",
"soon",
"as",
"the",
"referenced",
"function",
"is",
"garbage",
"deleted",
"."
]
| python | valid | 33.25 |
apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L774-L824 | def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False):
"""
Predict with data.
NOTE: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call bst.copy() to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
self._validate_features(data)
length = ctypes.c_ulong()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGBoosterPredict(self.handle, data.handle,
option_mask, ntree_limit,
ctypes.byref(length),
ctypes.byref(preds)))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
preds = preds.reshape(nrow, preds.size / nrow)
return preds | [
"def",
"predict",
"(",
"self",
",",
"data",
",",
"output_margin",
"=",
"False",
",",
"ntree_limit",
"=",
"0",
",",
"pred_leaf",
"=",
"False",
")",
":",
"option_mask",
"=",
"0x00",
"if",
"output_margin",
":",
"option_mask",
"|=",
"0x01",
"if",
"pred_leaf",
":",
"option_mask",
"|=",
"0x02",
"self",
".",
"_validate_features",
"(",
"data",
")",
"length",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"preds",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_float",
")",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterPredict",
"(",
"self",
".",
"handle",
",",
"data",
".",
"handle",
",",
"option_mask",
",",
"ntree_limit",
",",
"ctypes",
".",
"byref",
"(",
"length",
")",
",",
"ctypes",
".",
"byref",
"(",
"preds",
")",
")",
")",
"preds",
"=",
"ctypes2numpy",
"(",
"preds",
",",
"length",
".",
"value",
",",
"np",
".",
"float32",
")",
"if",
"pred_leaf",
":",
"preds",
"=",
"preds",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"nrow",
"=",
"data",
".",
"num_row",
"(",
")",
"if",
"preds",
".",
"size",
"!=",
"nrow",
"and",
"preds",
".",
"size",
"%",
"nrow",
"==",
"0",
":",
"preds",
"=",
"preds",
".",
"reshape",
"(",
"nrow",
",",
"preds",
".",
"size",
"/",
"nrow",
")",
"return",
"preds"
]
| Predict with data.
NOTE: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call bst.copy() to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array | [
"Predict",
"with",
"data",
"."
]
| python | train | 37.117647 |
ekzhu/datasketch | datasketch/lshforest.py | https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshforest.py#L62-L68 | def index(self):
'''
Index all the keys added so far and make them searchable.
'''
for i, hashtable in enumerate(self.hashtables):
self.sorted_hashtables[i] = [H for H in hashtable.keys()]
self.sorted_hashtables[i].sort() | [
"def",
"index",
"(",
"self",
")",
":",
"for",
"i",
",",
"hashtable",
"in",
"enumerate",
"(",
"self",
".",
"hashtables",
")",
":",
"self",
".",
"sorted_hashtables",
"[",
"i",
"]",
"=",
"[",
"H",
"for",
"H",
"in",
"hashtable",
".",
"keys",
"(",
")",
"]",
"self",
".",
"sorted_hashtables",
"[",
"i",
"]",
".",
"sort",
"(",
")"
]
| Index all the keys added so far and make them searchable. | [
"Index",
"all",
"the",
"keys",
"added",
"so",
"far",
"and",
"make",
"them",
"searchable",
"."
]
| python | test | 38.714286 |
DataBiosphere/dsub | dsub/providers/local.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/local.py#L641-L699 | def _get_task_from_task_dir(self, job_id, user_id, task_id, task_attempt):
"""Return a Task object with this task's info."""
# We need to be very careful about how we read and interpret the contents
# of the task directory. The directory could be changing because a new
# task is being created. The directory could be changing because a task
# is ending.
#
# If the meta.yaml does not exist, the task does not yet exist.
# If the meta.yaml exists, it means the task is scheduled. It does not mean
# it is yet running.
# If the task.pid file exists, it means that the runner.sh was started.
task_dir = self._task_directory(job_id, task_id, task_attempt)
job_descriptor = self._read_task_metadata(task_dir)
if not job_descriptor:
return None
# If we read up an old task, the user-id will not be in the job_descriptor.
if not job_descriptor.job_metadata.get('user-id'):
job_descriptor.job_metadata['user-id'] = user_id
# Get the pid of the runner
pid = -1
try:
with open(os.path.join(task_dir, 'task.pid'), 'r') as f:
pid = int(f.readline().strip())
except (IOError, OSError):
pass
# Get the script contents
script = None
script_name = job_descriptor.job_metadata.get('script-name')
if script_name:
script = self._read_script(task_dir, script_name)
# Read the files written by the runner.sh.
# For new tasks, these may not have been written yet.
end_time = self._get_end_time_from_task_dir(task_dir)
last_update = self._get_last_update_time_from_task_dir(task_dir)
events = self._get_events_from_task_dir(task_dir)
status = self._get_status_from_task_dir(task_dir)
log_detail = self._get_log_detail_from_task_dir(task_dir)
# If the status file is not yet written, then mark the task as pending
if not status:
status = 'RUNNING'
log_detail = ['Pending']
return LocalTask(
task_status=status,
events=events,
log_detail=log_detail,
job_descriptor=job_descriptor,
end_time=end_time,
last_update=last_update,
pid=pid,
script=script) | [
"def",
"_get_task_from_task_dir",
"(",
"self",
",",
"job_id",
",",
"user_id",
",",
"task_id",
",",
"task_attempt",
")",
":",
"# We need to be very careful about how we read and interpret the contents",
"# of the task directory. The directory could be changing because a new",
"# task is being created. The directory could be changing because a task",
"# is ending.",
"#",
"# If the meta.yaml does not exist, the task does not yet exist.",
"# If the meta.yaml exists, it means the task is scheduled. It does not mean",
"# it is yet running.",
"# If the task.pid file exists, it means that the runner.sh was started.",
"task_dir",
"=",
"self",
".",
"_task_directory",
"(",
"job_id",
",",
"task_id",
",",
"task_attempt",
")",
"job_descriptor",
"=",
"self",
".",
"_read_task_metadata",
"(",
"task_dir",
")",
"if",
"not",
"job_descriptor",
":",
"return",
"None",
"# If we read up an old task, the user-id will not be in the job_descriptor.",
"if",
"not",
"job_descriptor",
".",
"job_metadata",
".",
"get",
"(",
"'user-id'",
")",
":",
"job_descriptor",
".",
"job_metadata",
"[",
"'user-id'",
"]",
"=",
"user_id",
"# Get the pid of the runner",
"pid",
"=",
"-",
"1",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"task_dir",
",",
"'task.pid'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"pid",
"=",
"int",
"(",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"pass",
"# Get the script contents",
"script",
"=",
"None",
"script_name",
"=",
"job_descriptor",
".",
"job_metadata",
".",
"get",
"(",
"'script-name'",
")",
"if",
"script_name",
":",
"script",
"=",
"self",
".",
"_read_script",
"(",
"task_dir",
",",
"script_name",
")",
"# Read the files written by the runner.sh.",
"# For new tasks, these may not have been written yet.",
"end_time",
"=",
"self",
".",
"_get_end_time_from_task_dir",
"(",
"task_dir",
")",
"last_update",
"=",
"self",
".",
"_get_last_update_time_from_task_dir",
"(",
"task_dir",
")",
"events",
"=",
"self",
".",
"_get_events_from_task_dir",
"(",
"task_dir",
")",
"status",
"=",
"self",
".",
"_get_status_from_task_dir",
"(",
"task_dir",
")",
"log_detail",
"=",
"self",
".",
"_get_log_detail_from_task_dir",
"(",
"task_dir",
")",
"# If the status file is not yet written, then mark the task as pending",
"if",
"not",
"status",
":",
"status",
"=",
"'RUNNING'",
"log_detail",
"=",
"[",
"'Pending'",
"]",
"return",
"LocalTask",
"(",
"task_status",
"=",
"status",
",",
"events",
"=",
"events",
",",
"log_detail",
"=",
"log_detail",
",",
"job_descriptor",
"=",
"job_descriptor",
",",
"end_time",
"=",
"end_time",
",",
"last_update",
"=",
"last_update",
",",
"pid",
"=",
"pid",
",",
"script",
"=",
"script",
")"
]
| Return a Task object with this task's info. | [
"Return",
"a",
"Task",
"object",
"with",
"this",
"task",
"s",
"info",
"."
]
| python | valid | 35.949153 |
openego/eTraGo | etrago/tools/utilities.py | https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L57-L75 | def buses_of_vlvl(network, voltage_level):
""" Get bus-ids of given voltage level(s).
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
voltage_level: list
Returns
-------
list
List containing bus-ids.
"""
mask = network.buses.v_nom.isin(voltage_level)
df = network.buses[mask]
return df.index | [
"def",
"buses_of_vlvl",
"(",
"network",
",",
"voltage_level",
")",
":",
"mask",
"=",
"network",
".",
"buses",
".",
"v_nom",
".",
"isin",
"(",
"voltage_level",
")",
"df",
"=",
"network",
".",
"buses",
"[",
"mask",
"]",
"return",
"df",
".",
"index"
]
| Get bus-ids of given voltage level(s).
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
voltage_level: list
Returns
-------
list
List containing bus-ids. | [
"Get",
"bus",
"-",
"ids",
"of",
"given",
"voltage",
"level",
"(",
"s",
")",
"."
]
| python | train | 19.684211 |
jrosebr1/imutils | imutils/convenience.py | https://github.com/jrosebr1/imutils/blob/4430083199793bd66db64e574379cbe18414d420/imutils/convenience.py#L304-L319 | def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness)) | [
"def",
"adjust_brightness_contrast",
"(",
"image",
",",
"brightness",
"=",
"0.",
",",
"contrast",
"=",
"0.",
")",
":",
"beta",
"=",
"0",
"# See the OpenCV docs for more info on the `beta` parameter to addWeighted",
"# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19",
"return",
"cv2",
".",
"addWeighted",
"(",
"image",
",",
"1",
"+",
"float",
"(",
"contrast",
")",
"/",
"100.",
",",
"image",
",",
"beta",
",",
"float",
"(",
"brightness",
")",
")"
]
| Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change | [
"Adjust",
"the",
"brightness",
"and",
"/",
"or",
"contrast",
"of",
"an",
"image"
]
| python | train | 43.9375 |
cloudsmith-io/cloudsmith-cli | cloudsmith_cli/core/api/packages.py | https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/core/api/packages.py#L122-L142 | def get_package_status(owner, repo, identifier):
"""Get the status for a package in a repository."""
client = get_packages_api()
with catch_raise_api_exception():
data, _, headers = client.packages_status_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers)
# pylint: disable=no-member
# Pylint detects the returned value as a tuple
return (
data.is_sync_completed,
data.is_sync_failed,
data.sync_progress,
data.status_str,
data.stage_str,
data.status_reason,
) | [
"def",
"get_package_status",
"(",
"owner",
",",
"repo",
",",
"identifier",
")",
":",
"client",
"=",
"get_packages_api",
"(",
")",
"with",
"catch_raise_api_exception",
"(",
")",
":",
"data",
",",
"_",
",",
"headers",
"=",
"client",
".",
"packages_status_with_http_info",
"(",
"owner",
"=",
"owner",
",",
"repo",
"=",
"repo",
",",
"identifier",
"=",
"identifier",
")",
"ratelimits",
".",
"maybe_rate_limit",
"(",
"client",
",",
"headers",
")",
"# pylint: disable=no-member",
"# Pylint detects the returned value as a tuple",
"return",
"(",
"data",
".",
"is_sync_completed",
",",
"data",
".",
"is_sync_failed",
",",
"data",
".",
"sync_progress",
",",
"data",
".",
"status_str",
",",
"data",
".",
"stage_str",
",",
"data",
".",
"status_reason",
",",
")"
]
| Get the status for a package in a repository. | [
"Get",
"the",
"status",
"for",
"a",
"package",
"in",
"a",
"repository",
"."
]
| python | train | 28.952381 |
saltstack/salt | salt/modules/kubernetesmod.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1467-L1510 | def __read_and_render_yaml_file(source,
template,
saltenv):
'''
Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file.
'''
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
raise CommandExecutionError(
'Source file \'{0}\' not found'.format(source))
with salt.utils.files.fopen(sfn, 'r') as src:
contents = src.read()
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
# TODO: should we allow user to set also `context` like # pylint: disable=fixme
# `file.managed` does?
# Apply templating
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
saltenv=saltenv,
grains=__grains__,
pillar=__pillar__,
salt=__salt__,
opts=__opts__)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to render file path with error: '
'{0}'.format(data['data'])
)
contents = data['data'].encode('utf-8')
else:
raise CommandExecutionError(
'Unknown template specified: {0}'.format(
template))
return salt.utils.yaml.safe_load(contents) | [
"def",
"__read_and_render_yaml_file",
"(",
"source",
",",
"template",
",",
"saltenv",
")",
":",
"sfn",
"=",
"__salt__",
"[",
"'cp.cache_file'",
"]",
"(",
"source",
",",
"saltenv",
")",
"if",
"not",
"sfn",
":",
"raise",
"CommandExecutionError",
"(",
"'Source file \\'{0}\\' not found'",
".",
"format",
"(",
"source",
")",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"sfn",
",",
"'r'",
")",
"as",
"src",
":",
"contents",
"=",
"src",
".",
"read",
"(",
")",
"if",
"template",
":",
"if",
"template",
"in",
"salt",
".",
"utils",
".",
"templates",
".",
"TEMPLATE_REGISTRY",
":",
"# TODO: should we allow user to set also `context` like # pylint: disable=fixme",
"# `file.managed` does?",
"# Apply templating",
"data",
"=",
"salt",
".",
"utils",
".",
"templates",
".",
"TEMPLATE_REGISTRY",
"[",
"template",
"]",
"(",
"contents",
",",
"from_str",
"=",
"True",
",",
"to_str",
"=",
"True",
",",
"saltenv",
"=",
"saltenv",
",",
"grains",
"=",
"__grains__",
",",
"pillar",
"=",
"__pillar__",
",",
"salt",
"=",
"__salt__",
",",
"opts",
"=",
"__opts__",
")",
"if",
"not",
"data",
"[",
"'result'",
"]",
":",
"# Failed to render the template",
"raise",
"CommandExecutionError",
"(",
"'Failed to render file path with error: '",
"'{0}'",
".",
"format",
"(",
"data",
"[",
"'data'",
"]",
")",
")",
"contents",
"=",
"data",
"[",
"'data'",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
"else",
":",
"raise",
"CommandExecutionError",
"(",
"'Unknown template specified: {0}'",
".",
"format",
"(",
"template",
")",
")",
"return",
"salt",
".",
"utils",
".",
"yaml",
".",
"safe_load",
"(",
"contents",
")"
]
| Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file. | [
"Read",
"a",
"yaml",
"file",
"and",
"if",
"needed",
"renders",
"that",
"using",
"the",
"specifieds",
"templating",
".",
"Returns",
"the",
"python",
"objects",
"defined",
"inside",
"of",
"the",
"file",
"."
]
| python | train | 37.5 |
DataDog/integrations-core | openstack_controller/datadog_checks/openstack_controller/api.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openstack_controller/datadog_checks/openstack_controller/api.py#L598-L623 | def _get_valid_endpoint(resp, name, entry_type):
"""
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
"""
catalog = resp.get('token', {}).get('catalog', [])
for entry in catalog:
if (
entry.get('name')
and entry.get('type')
and entry.get('name') == name
and entry.get('type') == entry_type
):
# Collect any endpoints on the public or internal interface
valid_endpoints = {}
for ep in entry.get('endpoints'):
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep.get('url')
if valid_endpoints:
# Favor public endpoints over internal
return valid_endpoints.get('public', valid_endpoints.get('internal'))
return None | [
"def",
"_get_valid_endpoint",
"(",
"resp",
",",
"name",
",",
"entry_type",
")",
":",
"catalog",
"=",
"resp",
".",
"get",
"(",
"'token'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'catalog'",
",",
"[",
"]",
")",
"for",
"entry",
"in",
"catalog",
":",
"if",
"(",
"entry",
".",
"get",
"(",
"'name'",
")",
"and",
"entry",
".",
"get",
"(",
"'type'",
")",
"and",
"entry",
".",
"get",
"(",
"'name'",
")",
"==",
"name",
"and",
"entry",
".",
"get",
"(",
"'type'",
")",
"==",
"entry_type",
")",
":",
"# Collect any endpoints on the public or internal interface",
"valid_endpoints",
"=",
"{",
"}",
"for",
"ep",
"in",
"entry",
".",
"get",
"(",
"'endpoints'",
")",
":",
"interface",
"=",
"ep",
".",
"get",
"(",
"'interface'",
",",
"''",
")",
"if",
"interface",
"in",
"[",
"'public'",
",",
"'internal'",
"]",
":",
"valid_endpoints",
"[",
"interface",
"]",
"=",
"ep",
".",
"get",
"(",
"'url'",
")",
"if",
"valid_endpoints",
":",
"# Favor public endpoints over internal",
"return",
"valid_endpoints",
".",
"get",
"(",
"'public'",
",",
"valid_endpoints",
".",
"get",
"(",
"'internal'",
")",
")",
"return",
"None"
]
| Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog | [
"Parse",
"the",
"service",
"catalog",
"returned",
"by",
"the",
"Identity",
"API",
"for",
"an",
"endpoint",
"matching",
"the",
"Nova",
"service",
"with",
"the",
"requested",
"version",
"Sends",
"a",
"CRITICAL",
"service",
"check",
"when",
"no",
"viable",
"candidates",
"are",
"found",
"in",
"the",
"Catalog"
]
| python | train | 43.115385 |
todstoychev/signal-dispatcher | signal_dispatcher/signal_dispatcher.py | https://github.com/todstoychev/signal-dispatcher/blob/77131d119045973d65434abbcd6accdfa9cc327a/signal_dispatcher/signal_dispatcher.py#L75-L84 | def signal_alias_exists(alias: str) -> bool:
"""
Checks if signal alias exists.
:param alias: Signal alias.
:return:
"""
if SignalDispatcher.signals.get(alias):
return True
return False | [
"def",
"signal_alias_exists",
"(",
"alias",
":",
"str",
")",
"->",
"bool",
":",
"if",
"SignalDispatcher",
".",
"signals",
".",
"get",
"(",
"alias",
")",
":",
"return",
"True",
"return",
"False"
]
| Checks if signal alias exists.
:param alias: Signal alias.
:return: | [
"Checks",
"if",
"signal",
"alias",
"exists",
".",
":",
"param",
"alias",
":",
"Signal",
"alias",
".",
":",
"return",
":"
]
| python | train | 24.5 |
Neurita/boyle | boyle/nifti/storage.py | https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/storage.py#L160-L176 | def hdfpath_to_nifti1image(file_path, h5path):
"""Returns a nibabel Nifti1Image from a HDF5 group datasets
Parameters
----------
file_path: string
HDF5 file path
h5path:
HDF5 group path in file_path
Returns
-------
nibabel Nifti1Image
"""
with h5py.File(file_path, 'r') as f:
return hdfgroup_to_nifti1image(f[h5path]) | [
"def",
"hdfpath_to_nifti1image",
"(",
"file_path",
",",
"h5path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"file_path",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"hdfgroup_to_nifti1image",
"(",
"f",
"[",
"h5path",
"]",
")"
]
| Returns a nibabel Nifti1Image from a HDF5 group datasets
Parameters
----------
file_path: string
HDF5 file path
h5path:
HDF5 group path in file_path
Returns
-------
nibabel Nifti1Image | [
"Returns",
"a",
"nibabel",
"Nifti1Image",
"from",
"a",
"HDF5",
"group",
"datasets"
]
| python | valid | 21.882353 |
ssalentin/plip | plip/plipcmd.py | https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/plipcmd.py#L131-L177 | def main(inputstructs, inputpdbids):
"""Main function. Calls functions for processing, report generation and visualization."""
pdbid, pdbpath = None, None
# #@todo For multiprocessing, implement better stacktracing for errors
# Print title and version
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message('\n' + '*' * len(title) + '\n')
write_message(title)
write_message('\n' + '*' * len(title) + '\n\n')
outputprefix = config.OUTPUTFILENAME
if inputstructs is not None: # Process PDB file(s)
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
read_from_stdin = False
for inputstruct in inputstructs:
if inputstruct == '-':
inputstruct = sys.stdin.read()
read_from_stdin = True
if config.RAWSTRING:
if sys.version_info < (3,):
inputstruct = bytes(inputstruct).decode('unicode_escape')
else:
inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape')
else:
if os.path.getsize(inputstruct) == 0:
sysexit(2, 'Empty PDB file\n') # Exit if input file is empty
if num_structures > 1:
basename = inputstruct.split('.')[-2].split('/')[-1]
config.OUTPATH = '/'.join([config.BASEPATH, basename])
outputprefix = 'report'
process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix)
else: # Try to fetch the current PDB structure(s) directly from the RCBS server
num_pdbids = len(inputpdbids)
inputpdbids = remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
outputprefix = 'report'
process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in ['.', './']:
write_message('\nFinished analysis. Find the result files in the working directory.\n\n')
else:
write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH) | [
"def",
"main",
"(",
"inputstructs",
",",
"inputpdbids",
")",
":",
"pdbid",
",",
"pdbpath",
"=",
"None",
",",
"None",
"# #@todo For multiprocessing, implement better stacktracing for errors",
"# Print title and version",
"title",
"=",
"\"* Protein-Ligand Interaction Profiler v%s *\"",
"%",
"__version__",
"write_message",
"(",
"'\\n'",
"+",
"'*'",
"*",
"len",
"(",
"title",
")",
"+",
"'\\n'",
")",
"write_message",
"(",
"title",
")",
"write_message",
"(",
"'\\n'",
"+",
"'*'",
"*",
"len",
"(",
"title",
")",
"+",
"'\\n\\n'",
")",
"outputprefix",
"=",
"config",
".",
"OUTPUTFILENAME",
"if",
"inputstructs",
"is",
"not",
"None",
":",
"# Process PDB file(s)",
"num_structures",
"=",
"len",
"(",
"inputstructs",
")",
"inputstructs",
"=",
"remove_duplicates",
"(",
"inputstructs",
")",
"read_from_stdin",
"=",
"False",
"for",
"inputstruct",
"in",
"inputstructs",
":",
"if",
"inputstruct",
"==",
"'-'",
":",
"inputstruct",
"=",
"sys",
".",
"stdin",
".",
"read",
"(",
")",
"read_from_stdin",
"=",
"True",
"if",
"config",
".",
"RAWSTRING",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"inputstruct",
"=",
"bytes",
"(",
"inputstruct",
")",
".",
"decode",
"(",
"'unicode_escape'",
")",
"else",
":",
"inputstruct",
"=",
"bytes",
"(",
"inputstruct",
",",
"'utf8'",
")",
".",
"decode",
"(",
"'unicode_escape'",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"getsize",
"(",
"inputstruct",
")",
"==",
"0",
":",
"sysexit",
"(",
"2",
",",
"'Empty PDB file\\n'",
")",
"# Exit if input file is empty",
"if",
"num_structures",
">",
"1",
":",
"basename",
"=",
"inputstruct",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"2",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"config",
".",
"OUTPATH",
"=",
"'/'",
".",
"join",
"(",
"[",
"config",
".",
"BASEPATH",
",",
"basename",
"]",
")",
"outputprefix",
"=",
"'report'",
"process_pdb",
"(",
"inputstruct",
",",
"config",
".",
"OUTPATH",
",",
"as_string",
"=",
"read_from_stdin",
",",
"outputprefix",
"=",
"outputprefix",
")",
"else",
":",
"# Try to fetch the current PDB structure(s) directly from the RCBS server",
"num_pdbids",
"=",
"len",
"(",
"inputpdbids",
")",
"inputpdbids",
"=",
"remove_duplicates",
"(",
"inputpdbids",
")",
"for",
"inputpdbid",
"in",
"inputpdbids",
":",
"pdbpath",
",",
"pdbid",
"=",
"download_structure",
"(",
"inputpdbid",
")",
"if",
"num_pdbids",
">",
"1",
":",
"config",
".",
"OUTPATH",
"=",
"'/'",
".",
"join",
"(",
"[",
"config",
".",
"BASEPATH",
",",
"pdbid",
"[",
"1",
":",
"3",
"]",
".",
"upper",
"(",
")",
",",
"pdbid",
".",
"upper",
"(",
")",
"]",
")",
"outputprefix",
"=",
"'report'",
"process_pdb",
"(",
"pdbpath",
",",
"config",
".",
"OUTPATH",
",",
"outputprefix",
"=",
"outputprefix",
")",
"if",
"(",
"pdbid",
"is",
"not",
"None",
"or",
"inputstructs",
"is",
"not",
"None",
")",
"and",
"config",
".",
"BASEPATH",
"is",
"not",
"None",
":",
"if",
"config",
".",
"BASEPATH",
"in",
"[",
"'.'",
",",
"'./'",
"]",
":",
"write_message",
"(",
"'\\nFinished analysis. Find the result files in the working directory.\\n\\n'",
")",
"else",
":",
"write_message",
"(",
"'\\nFinished analysis. Find the result files in %s\\n\\n'",
"%",
"config",
".",
"BASEPATH",
")"
]
| Main function. Calls functions for processing, report generation and visualization. | [
"Main",
"function",
".",
"Calls",
"functions",
"for",
"processing",
"report",
"generation",
"and",
"visualization",
"."
]
| python | train | 52.148936 |
ros-infrastructure/ros_buildfarm | ros_buildfarm/status_page.py | https://github.com/ros-infrastructure/ros_buildfarm/blob/c63ad85b21470f3262086fcd987528a0efc0cf6d/ros_buildfarm/status_page.py#L489-L511 | def get_jenkins_job_urls(
rosdistro_name, jenkins_url, release_build_name, targets):
"""
Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string
"""
urls = {}
for target in targets:
view_name = get_release_view_name(
rosdistro_name, release_build_name,
target.os_name, target.os_code_name, target.arch)
base_url = jenkins_url + '/view/%s/job/%s__{pkg}__' % \
(view_name, view_name)
if target.arch == 'source':
urls[target] = base_url + '%s_%s__source' % \
(target.os_name, target.os_code_name)
else:
urls[target] = base_url + '%s_%s_%s__binary' % \
(target.os_name, target.os_code_name, target.arch)
return urls | [
"def",
"get_jenkins_job_urls",
"(",
"rosdistro_name",
",",
"jenkins_url",
",",
"release_build_name",
",",
"targets",
")",
":",
"urls",
"=",
"{",
"}",
"for",
"target",
"in",
"targets",
":",
"view_name",
"=",
"get_release_view_name",
"(",
"rosdistro_name",
",",
"release_build_name",
",",
"target",
".",
"os_name",
",",
"target",
".",
"os_code_name",
",",
"target",
".",
"arch",
")",
"base_url",
"=",
"jenkins_url",
"+",
"'/view/%s/job/%s__{pkg}__'",
"%",
"(",
"view_name",
",",
"view_name",
")",
"if",
"target",
".",
"arch",
"==",
"'source'",
":",
"urls",
"[",
"target",
"]",
"=",
"base_url",
"+",
"'%s_%s__source'",
"%",
"(",
"target",
".",
"os_name",
",",
"target",
".",
"os_code_name",
")",
"else",
":",
"urls",
"[",
"target",
"]",
"=",
"base_url",
"+",
"'%s_%s_%s__binary'",
"%",
"(",
"target",
".",
"os_name",
",",
"target",
".",
"os_code_name",
",",
"target",
".",
"arch",
")",
"return",
"urls"
]
| Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string | [
"Get",
"the",
"Jenkins",
"job",
"urls",
"for",
"each",
"target",
"."
]
| python | valid | 37.652174 |
IEMLdev/ieml | ieml/distance/order.py | https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/distance/order.py#L25-L46 | def count_id(w0):
"""
0 -> no terms idd
1 -> most term idd are shared in root morphem
2 -> most term idd are shared in flexing morphem
3 -> most term idd are shared root <-> flexing (crossed)
:param w0:
:param w1:
:return:
"""
def f(w1):
count = [set(w0.root).intersection(w1.root),
set(w0.flexing).intersection(w1.flexing),
set(w0.root).intersection(w1.flexing) | set(w1.root).intersection(w0.flexing)]
if any(count):
return max((1,2,3), key=lambda i: len(count[i - 1]))
else:
return 0
return f | [
"def",
"count_id",
"(",
"w0",
")",
":",
"def",
"f",
"(",
"w1",
")",
":",
"count",
"=",
"[",
"set",
"(",
"w0",
".",
"root",
")",
".",
"intersection",
"(",
"w1",
".",
"root",
")",
",",
"set",
"(",
"w0",
".",
"flexing",
")",
".",
"intersection",
"(",
"w1",
".",
"flexing",
")",
",",
"set",
"(",
"w0",
".",
"root",
")",
".",
"intersection",
"(",
"w1",
".",
"flexing",
")",
"|",
"set",
"(",
"w1",
".",
"root",
")",
".",
"intersection",
"(",
"w0",
".",
"flexing",
")",
"]",
"if",
"any",
"(",
"count",
")",
":",
"return",
"max",
"(",
"(",
"1",
",",
"2",
",",
"3",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"len",
"(",
"count",
"[",
"i",
"-",
"1",
"]",
")",
")",
"else",
":",
"return",
"0",
"return",
"f"
]
| 0 -> no terms idd
1 -> most term idd are shared in root morphem
2 -> most term idd are shared in flexing morphem
3 -> most term idd are shared root <-> flexing (crossed)
:param w0:
:param w1:
:return: | [
"0",
"-",
">",
"no",
"terms",
"idd",
"1",
"-",
">",
"most",
"term",
"idd",
"are",
"shared",
"in",
"root",
"morphem",
"2",
"-",
">",
"most",
"term",
"idd",
"are",
"shared",
"in",
"flexing",
"morphem",
"3",
"-",
">",
"most",
"term",
"idd",
"are",
"shared",
"root",
"<",
"-",
">",
"flexing",
"(",
"crossed",
")",
":",
"param",
"w0",
":",
":",
"param",
"w1",
":",
":",
"return",
":"
]
| python | test | 27.409091 |
craffel/mir_eval | mir_eval/melody.py | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L358-L426 | def voicing_measures(ref_voicing, est_voicing):
"""Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est
"""
validate_voicing(ref_voicing, est_voicing)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0:
return 0.
# How voicing is computed
# | ref_v | !ref_v |
# -------|-------|--------|
# est_v | TP | FP |
# -------|-------|------- |
# !est_v | FN | TN |
# -------------------------
TP = (ref_voicing*est_voicing).sum()
FP = ((ref_voicing == 0)*est_voicing).sum()
FN = (ref_voicing*(est_voicing == 0)).sum()
TN = ((ref_voicing == 0)*(est_voicing == 0)).sum()
# Voicing recall = fraction of voiced frames according the reference that
# are declared as voiced by the estimate
if TP + FN == 0:
vx_recall = 0.
else:
vx_recall = TP/float(TP + FN)
# Voicing false alarm = fraction of unvoiced frames according to the
# reference that are declared as voiced by the estimate
if FP + TN == 0:
vx_false_alm = 0.
else:
vx_false_alm = FP/float(FP + TN)
return vx_recall, vx_false_alm | [
"def",
"voicing_measures",
"(",
"ref_voicing",
",",
"est_voicing",
")",
":",
"validate_voicing",
"(",
"ref_voicing",
",",
"est_voicing",
")",
"ref_voicing",
"=",
"ref_voicing",
".",
"astype",
"(",
"bool",
")",
"est_voicing",
"=",
"est_voicing",
".",
"astype",
"(",
"bool",
")",
"# When input arrays are empty, return 0 by special case",
"if",
"ref_voicing",
".",
"size",
"==",
"0",
"or",
"est_voicing",
".",
"size",
"==",
"0",
":",
"return",
"0.",
"# How voicing is computed",
"# | ref_v | !ref_v |",
"# -------|-------|--------|",
"# est_v | TP | FP |",
"# -------|-------|------- |",
"# !est_v | FN | TN |",
"# -------------------------",
"TP",
"=",
"(",
"ref_voicing",
"*",
"est_voicing",
")",
".",
"sum",
"(",
")",
"FP",
"=",
"(",
"(",
"ref_voicing",
"==",
"0",
")",
"*",
"est_voicing",
")",
".",
"sum",
"(",
")",
"FN",
"=",
"(",
"ref_voicing",
"*",
"(",
"est_voicing",
"==",
"0",
")",
")",
".",
"sum",
"(",
")",
"TN",
"=",
"(",
"(",
"ref_voicing",
"==",
"0",
")",
"*",
"(",
"est_voicing",
"==",
"0",
")",
")",
".",
"sum",
"(",
")",
"# Voicing recall = fraction of voiced frames according the reference that",
"# are declared as voiced by the estimate",
"if",
"TP",
"+",
"FN",
"==",
"0",
":",
"vx_recall",
"=",
"0.",
"else",
":",
"vx_recall",
"=",
"TP",
"/",
"float",
"(",
"TP",
"+",
"FN",
")",
"# Voicing false alarm = fraction of unvoiced frames according to the",
"# reference that are declared as voiced by the estimate",
"if",
"FP",
"+",
"TN",
"==",
"0",
":",
"vx_false_alm",
"=",
"0.",
"else",
":",
"vx_false_alm",
"=",
"FP",
"/",
"float",
"(",
"FP",
"+",
"TN",
")",
"return",
"vx_recall",
",",
"vx_false_alm"
]
| Compute the voicing recall and false alarm rates given two voicing
indicator sequences, one as reference (truth) and the other as the estimate
(prediction). The sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v,
... est_v)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
Returns
-------
vx_recall : float
Voicing recall rate, the fraction of voiced frames in ref
indicated as voiced in est
vx_false_alarm : float
Voicing false alarm rate, the fraction of unvoiced frames in ref
indicated as voiced in est | [
"Compute",
"the",
"voicing",
"recall",
"and",
"false",
"alarm",
"rates",
"given",
"two",
"voicing",
"indicator",
"sequences",
"one",
"as",
"reference",
"(",
"truth",
")",
"and",
"the",
"other",
"as",
"the",
"estimate",
"(",
"prediction",
")",
".",
"The",
"sequences",
"must",
"be",
"of",
"the",
"same",
"length",
"."
]
| python | train | 35.028986 |
PmagPy/PmagPy | pmagpy/builder2.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L110-L119 | def find_or_create_by_name(self, item_name, items_list, item_type):
"""
See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type.
"""
item = self.find_by_name(item_name, items_list)
if not item:
item = self.data_lists[item_type][2](item_name, None)
return item | [
"def",
"find_or_create_by_name",
"(",
"self",
",",
"item_name",
",",
"items_list",
",",
"item_type",
")",
":",
"item",
"=",
"self",
".",
"find_by_name",
"(",
"item_name",
",",
"items_list",
")",
"if",
"not",
"item",
":",
"item",
"=",
"self",
".",
"data_lists",
"[",
"item_type",
"]",
"[",
"2",
"]",
"(",
"item_name",
",",
"None",
")",
"return",
"item"
]
| See if item with item_name exists in item_list.
If not, create that item.
Either way, return an item of type item_type. | [
"See",
"if",
"item",
"with",
"item_name",
"exists",
"in",
"item_list",
".",
"If",
"not",
"create",
"that",
"item",
".",
"Either",
"way",
"return",
"an",
"item",
"of",
"type",
"item_type",
"."
]
| python | train | 38.9 |
scott-griffiths/bitstring | bitstring.py | https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1472-L1477 | def _readintbe(self, length, start):
"""Read bits and interpret as a big-endian signed int."""
if length % 8:
raise InterpretError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
return self._readint(length, start) | [
"def",
"_readintbe",
"(",
"self",
",",
"length",
",",
"start",
")",
":",
"if",
"length",
"%",
"8",
":",
"raise",
"InterpretError",
"(",
"\"Big-endian integers must be whole-byte. \"",
"\"Length = {0} bits.\"",
",",
"length",
")",
"return",
"self",
".",
"_readint",
"(",
"length",
",",
"start",
")"
]
| Read bits and interpret as a big-endian signed int. | [
"Read",
"bits",
"and",
"interpret",
"as",
"a",
"big",
"-",
"endian",
"signed",
"int",
"."
]
| python | train | 50.5 |
GNS3/gns3-server | gns3server/compute/dynamips/__init__.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/__init__.py#L328-L395 | def create_nio(self, node, nio_settings):
"""
Creates a new NIO.
:param node: Dynamips node instance
:param nio_settings: information to create the NIO
:returns: a NIO object
"""
nio = None
if nio_settings["type"] == "nio_udp":
lport = nio_settings["lport"]
rhost = nio_settings["rhost"]
rport = nio_settings["rport"]
try:
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
if not info:
raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
for res in info:
af, socktype, proto, _, sa = res
with socket.socket(af, socktype, proto) as sock:
sock.connect(sa)
except OSError as e:
raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
nio = NIOUDP(node, lport, rhost, rport, nio_settings.get("filters", {}))
elif nio_settings["type"] == "nio_generic_ethernet":
ethernet_device = nio_settings["ethernet_device"]
if sys.platform.startswith("win"):
# replace the interface name by the GUID on Windows
windows_interfaces = interfaces()
npf_interface = None
for interface in windows_interfaces:
if interface["name"] == ethernet_device:
npf_interface = interface["id"]
if not npf_interface:
raise DynamipsError("Could not find interface {} on this host".format(ethernet_device))
else:
ethernet_device = npf_interface
if not is_interface_up(ethernet_device):
raise aiohttp.web.HTTPConflict(text="Ethernet interface {} is down".format(ethernet_device))
nio = NIOGenericEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_linux_ethernet":
if sys.platform.startswith("win"):
raise DynamipsError("This NIO type is not supported on Windows")
ethernet_device = nio_settings["ethernet_device"]
nio = NIOLinuxEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_tap":
tap_device = nio_settings["tap_device"]
nio = NIOTAP(node.hypervisor, tap_device)
if not is_interface_up(tap_device):
# test after the TAP interface has been created (if it doesn't exist yet)
raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device))
elif nio_settings["type"] == "nio_unix":
local_file = nio_settings["local_file"]
remote_file = nio_settings["remote_file"]
nio = NIOUNIX(node.hypervisor, local_file, remote_file)
elif nio_settings["type"] == "nio_vde":
control_file = nio_settings["control_file"]
local_file = nio_settings["local_file"]
nio = NIOVDE(node.hypervisor, control_file, local_file)
elif nio_settings["type"] == "nio_null":
nio = NIONull(node.hypervisor)
else:
raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_settings["type"]))
yield from nio.create()
return nio | [
"def",
"create_nio",
"(",
"self",
",",
"node",
",",
"nio_settings",
")",
":",
"nio",
"=",
"None",
"if",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_udp\"",
":",
"lport",
"=",
"nio_settings",
"[",
"\"lport\"",
"]",
"rhost",
"=",
"nio_settings",
"[",
"\"rhost\"",
"]",
"rport",
"=",
"nio_settings",
"[",
"\"rport\"",
"]",
"try",
":",
"info",
"=",
"socket",
".",
"getaddrinfo",
"(",
"rhost",
",",
"rport",
",",
"socket",
".",
"AF_UNSPEC",
",",
"socket",
".",
"SOCK_DGRAM",
",",
"0",
",",
"socket",
".",
"AI_PASSIVE",
")",
"if",
"not",
"info",
":",
"raise",
"DynamipsError",
"(",
"\"getaddrinfo returns an empty list on {}:{}\"",
".",
"format",
"(",
"rhost",
",",
"rport",
")",
")",
"for",
"res",
"in",
"info",
":",
"af",
",",
"socktype",
",",
"proto",
",",
"_",
",",
"sa",
"=",
"res",
"with",
"socket",
".",
"socket",
"(",
"af",
",",
"socktype",
",",
"proto",
")",
"as",
"sock",
":",
"sock",
".",
"connect",
"(",
"sa",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"DynamipsError",
"(",
"\"Could not create an UDP connection to {}:{}: {}\"",
".",
"format",
"(",
"rhost",
",",
"rport",
",",
"e",
")",
")",
"nio",
"=",
"NIOUDP",
"(",
"node",
",",
"lport",
",",
"rhost",
",",
"rport",
",",
"nio_settings",
".",
"get",
"(",
"\"filters\"",
",",
"{",
"}",
")",
")",
"elif",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_generic_ethernet\"",
":",
"ethernet_device",
"=",
"nio_settings",
"[",
"\"ethernet_device\"",
"]",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"win\"",
")",
":",
"# replace the interface name by the GUID on Windows",
"windows_interfaces",
"=",
"interfaces",
"(",
")",
"npf_interface",
"=",
"None",
"for",
"interface",
"in",
"windows_interfaces",
":",
"if",
"interface",
"[",
"\"name\"",
"]",
"==",
"ethernet_device",
":",
"npf_interface",
"=",
"interface",
"[",
"\"id\"",
"]",
"if",
"not",
"npf_interface",
":",
"raise",
"DynamipsError",
"(",
"\"Could not find interface {} on this host\"",
".",
"format",
"(",
"ethernet_device",
")",
")",
"else",
":",
"ethernet_device",
"=",
"npf_interface",
"if",
"not",
"is_interface_up",
"(",
"ethernet_device",
")",
":",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPConflict",
"(",
"text",
"=",
"\"Ethernet interface {} is down\"",
".",
"format",
"(",
"ethernet_device",
")",
")",
"nio",
"=",
"NIOGenericEthernet",
"(",
"node",
".",
"hypervisor",
",",
"ethernet_device",
")",
"elif",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_linux_ethernet\"",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"win\"",
")",
":",
"raise",
"DynamipsError",
"(",
"\"This NIO type is not supported on Windows\"",
")",
"ethernet_device",
"=",
"nio_settings",
"[",
"\"ethernet_device\"",
"]",
"nio",
"=",
"NIOLinuxEthernet",
"(",
"node",
".",
"hypervisor",
",",
"ethernet_device",
")",
"elif",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_tap\"",
":",
"tap_device",
"=",
"nio_settings",
"[",
"\"tap_device\"",
"]",
"nio",
"=",
"NIOTAP",
"(",
"node",
".",
"hypervisor",
",",
"tap_device",
")",
"if",
"not",
"is_interface_up",
"(",
"tap_device",
")",
":",
"# test after the TAP interface has been created (if it doesn't exist yet)",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPConflict",
"(",
"text",
"=",
"\"TAP interface {} is down\"",
".",
"format",
"(",
"tap_device",
")",
")",
"elif",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_unix\"",
":",
"local_file",
"=",
"nio_settings",
"[",
"\"local_file\"",
"]",
"remote_file",
"=",
"nio_settings",
"[",
"\"remote_file\"",
"]",
"nio",
"=",
"NIOUNIX",
"(",
"node",
".",
"hypervisor",
",",
"local_file",
",",
"remote_file",
")",
"elif",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_vde\"",
":",
"control_file",
"=",
"nio_settings",
"[",
"\"control_file\"",
"]",
"local_file",
"=",
"nio_settings",
"[",
"\"local_file\"",
"]",
"nio",
"=",
"NIOVDE",
"(",
"node",
".",
"hypervisor",
",",
"control_file",
",",
"local_file",
")",
"elif",
"nio_settings",
"[",
"\"type\"",
"]",
"==",
"\"nio_null\"",
":",
"nio",
"=",
"NIONull",
"(",
"node",
".",
"hypervisor",
")",
"else",
":",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPConflict",
"(",
"text",
"=",
"\"NIO of type {} is not supported\"",
".",
"format",
"(",
"nio_settings",
"[",
"\"type\"",
"]",
")",
")",
"yield",
"from",
"nio",
".",
"create",
"(",
")",
"return",
"nio"
]
| Creates a new NIO.
:param node: Dynamips node instance
:param nio_settings: information to create the NIO
:returns: a NIO object | [
"Creates",
"a",
"new",
"NIO",
"."
]
| python | train | 50.294118 |
Galarzaa90/tibia.py | tibiapy/house.py | https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/house.py#L120-L171 | def from_content(cls, content):
"""Parses a Tibia.com response into a House object.
Parameters
----------
content: :class:`str`
HTML content of the page.
Returns
-------
:class:`House`
The house contained in the page, or None if the house doesn't exist.
Raises
------
InvalidContent
If the content is not the house section on Tibia.com
"""
parsed_content = parse_tibiacom_content(content)
image_column, desc_column, *_ = parsed_content.find_all('td')
if "Error" in image_column.text:
return None
image = image_column.find('img')
for br in desc_column.find_all("br"):
br.replace_with("\n")
description = desc_column.text.replace("\u00a0", " ").replace("\n\n","\n")
lines = description.splitlines()
try:
name, beds, info, state, *_ = lines
except ValueError:
raise InvalidContent("content does is not from the house section of Tibia.com")
house = cls(name.strip())
house.image_url = image["src"]
house.id = int(id_regex.search(house.image_url).group(1))
m = bed_regex.search(beds)
if m:
house.type = HouseType.GUILDHALL if m.group("type") in ["guildhall", "clanhall"] else HouseType.HOUSE
beds_word = m.group("beds")
if beds_word == "no":
house.beds = 0
else:
house.beds = parse_number_words(beds_word)
m = info_regex.search(info)
if m:
house.world = m.group("world")
house.rent = int(m.group("rent"))
house.size = int(m.group("size"))
house._parse_status(state)
return house | [
"def",
"from_content",
"(",
"cls",
",",
"content",
")",
":",
"parsed_content",
"=",
"parse_tibiacom_content",
"(",
"content",
")",
"image_column",
",",
"desc_column",
",",
"",
"*",
"_",
"=",
"parsed_content",
".",
"find_all",
"(",
"'td'",
")",
"if",
"\"Error\"",
"in",
"image_column",
".",
"text",
":",
"return",
"None",
"image",
"=",
"image_column",
".",
"find",
"(",
"'img'",
")",
"for",
"br",
"in",
"desc_column",
".",
"find_all",
"(",
"\"br\"",
")",
":",
"br",
".",
"replace_with",
"(",
"\"\\n\"",
")",
"description",
"=",
"desc_column",
".",
"text",
".",
"replace",
"(",
"\"\\u00a0\"",
",",
"\" \"",
")",
".",
"replace",
"(",
"\"\\n\\n\"",
",",
"\"\\n\"",
")",
"lines",
"=",
"description",
".",
"splitlines",
"(",
")",
"try",
":",
"name",
",",
"beds",
",",
"info",
",",
"state",
",",
"",
"*",
"_",
"=",
"lines",
"except",
"ValueError",
":",
"raise",
"InvalidContent",
"(",
"\"content does is not from the house section of Tibia.com\"",
")",
"house",
"=",
"cls",
"(",
"name",
".",
"strip",
"(",
")",
")",
"house",
".",
"image_url",
"=",
"image",
"[",
"\"src\"",
"]",
"house",
".",
"id",
"=",
"int",
"(",
"id_regex",
".",
"search",
"(",
"house",
".",
"image_url",
")",
".",
"group",
"(",
"1",
")",
")",
"m",
"=",
"bed_regex",
".",
"search",
"(",
"beds",
")",
"if",
"m",
":",
"house",
".",
"type",
"=",
"HouseType",
".",
"GUILDHALL",
"if",
"m",
".",
"group",
"(",
"\"type\"",
")",
"in",
"[",
"\"guildhall\"",
",",
"\"clanhall\"",
"]",
"else",
"HouseType",
".",
"HOUSE",
"beds_word",
"=",
"m",
".",
"group",
"(",
"\"beds\"",
")",
"if",
"beds_word",
"==",
"\"no\"",
":",
"house",
".",
"beds",
"=",
"0",
"else",
":",
"house",
".",
"beds",
"=",
"parse_number_words",
"(",
"beds_word",
")",
"m",
"=",
"info_regex",
".",
"search",
"(",
"info",
")",
"if",
"m",
":",
"house",
".",
"world",
"=",
"m",
".",
"group",
"(",
"\"world\"",
")",
"house",
".",
"rent",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"\"rent\"",
")",
")",
"house",
".",
"size",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"\"size\"",
")",
")",
"house",
".",
"_parse_status",
"(",
"state",
")",
"return",
"house"
]
| Parses a Tibia.com response into a House object.
Parameters
----------
content: :class:`str`
HTML content of the page.
Returns
-------
:class:`House`
The house contained in the page, or None if the house doesn't exist.
Raises
------
InvalidContent
If the content is not the house section on Tibia.com | [
"Parses",
"a",
"Tibia",
".",
"com",
"response",
"into",
"a",
"House",
"object",
"."
]
| python | train | 33.846154 |
trendels/rhino | rhino/mapper.py | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L629-L673 | def path(self, target, args, kw):
"""Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path.
"""
if type(target) in string_types:
if ':' in target:
# Build path a nested route name
prefix, rest = target.split(':', 1)
route = self.named_routes[prefix]
prefix_params = route._pop_params(args, kw)
prefix_path = route.path([], prefix_params)
next_mapper = route.resource
return prefix_path + next_mapper.path(rest, args, kw)
else:
# Build path for a named route
return self.named_routes[target].path(args, kw)
elif isinstance(target, Route):
# Build path for a route instance, used by build_url('.')
for route in self.routes:
if route is target:
return route.path(args, kw)
raise InvalidArgumentError("Route '%s' not found in this %s object." % (target, self.__class__.__name__))
else:
# Build path for resource by object id
target_id = id(target)
if target_id in self._lookup:
return self._lookup[target_id].path(args, kw)
raise InvalidArgumentError("No Route found for target '%s' in this %s object." % (target, self.__class__.__name__)) | [
"def",
"path",
"(",
"self",
",",
"target",
",",
"args",
",",
"kw",
")",
":",
"if",
"type",
"(",
"target",
")",
"in",
"string_types",
":",
"if",
"':'",
"in",
"target",
":",
"# Build path a nested route name",
"prefix",
",",
"rest",
"=",
"target",
".",
"split",
"(",
"':'",
",",
"1",
")",
"route",
"=",
"self",
".",
"named_routes",
"[",
"prefix",
"]",
"prefix_params",
"=",
"route",
".",
"_pop_params",
"(",
"args",
",",
"kw",
")",
"prefix_path",
"=",
"route",
".",
"path",
"(",
"[",
"]",
",",
"prefix_params",
")",
"next_mapper",
"=",
"route",
".",
"resource",
"return",
"prefix_path",
"+",
"next_mapper",
".",
"path",
"(",
"rest",
",",
"args",
",",
"kw",
")",
"else",
":",
"# Build path for a named route",
"return",
"self",
".",
"named_routes",
"[",
"target",
"]",
".",
"path",
"(",
"args",
",",
"kw",
")",
"elif",
"isinstance",
"(",
"target",
",",
"Route",
")",
":",
"# Build path for a route instance, used by build_url('.')",
"for",
"route",
"in",
"self",
".",
"routes",
":",
"if",
"route",
"is",
"target",
":",
"return",
"route",
".",
"path",
"(",
"args",
",",
"kw",
")",
"raise",
"InvalidArgumentError",
"(",
"\"Route '%s' not found in this %s object.\"",
"%",
"(",
"target",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"else",
":",
"# Build path for resource by object id",
"target_id",
"=",
"id",
"(",
"target",
")",
"if",
"target_id",
"in",
"self",
".",
"_lookup",
":",
"return",
"self",
".",
"_lookup",
"[",
"target_id",
"]",
".",
"path",
"(",
"args",
",",
"kw",
")",
"raise",
"InvalidArgumentError",
"(",
"\"No Route found for target '%s' in this %s object.\"",
"%",
"(",
"target",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
]
| Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path. | [
"Build",
"a",
"URL",
"path",
"fragment",
"for",
"a",
"resource",
"or",
"route",
"."
]
| python | train | 44.644444 |
south-coast-science/scs_core | src/scs_core/gas/pid_datum.py | https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/pid_datum.py#L44-L58 | def __we_c(cls, calib, tc, temp, we_v):
"""
Compute weC from sensor temperature compensation of weV
"""
offset_v = calib.pid_elc_mv / 1000.0
response_v = we_v - offset_v # remove electronic zero
response_c = tc.correct(temp, response_v) # correct the response component
if response_c is None:
return None
we_c = response_c + offset_v # replace electronic zero
return we_c | [
"def",
"__we_c",
"(",
"cls",
",",
"calib",
",",
"tc",
",",
"temp",
",",
"we_v",
")",
":",
"offset_v",
"=",
"calib",
".",
"pid_elc_mv",
"/",
"1000.0",
"response_v",
"=",
"we_v",
"-",
"offset_v",
"# remove electronic zero",
"response_c",
"=",
"tc",
".",
"correct",
"(",
"temp",
",",
"response_v",
")",
"# correct the response component",
"if",
"response_c",
"is",
"None",
":",
"return",
"None",
"we_c",
"=",
"response_c",
"+",
"offset_v",
"# replace electronic zero",
"return",
"we_c"
]
| Compute weC from sensor temperature compensation of weV | [
"Compute",
"weC",
"from",
"sensor",
"temperature",
"compensation",
"of",
"weV"
]
| python | train | 31.8 |
sentinel-hub/sentinelhub-py | sentinelhub/geometry.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geometry.py#L206-L212 | def middle(self):
""" Returns the middle point of the bounding box
:return: middle point
:rtype: (float, float)
"""
return (self.min_x + self.max_x) / 2, (self.min_y + self.max_y) / 2 | [
"def",
"middle",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"min_x",
"+",
"self",
".",
"max_x",
")",
"/",
"2",
",",
"(",
"self",
".",
"min_y",
"+",
"self",
".",
"max_y",
")",
"/",
"2"
]
| Returns the middle point of the bounding box
:return: middle point
:rtype: (float, float) | [
"Returns",
"the",
"middle",
"point",
"of",
"the",
"bounding",
"box"
]
| python | train | 31.142857 |
nagius/snmp_passpersist | snmp_passpersist.py | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L222-L224 | def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label) | [
"def",
"add_ip",
"(",
"self",
",",
"oid",
",",
"value",
",",
"label",
"=",
"None",
")",
":",
"self",
".",
"add_oid_entry",
"(",
"oid",
",",
"'IPADDRESS'",
",",
"value",
",",
"label",
"=",
"label",
")"
]
| Short helper to add an IP address value to the MIB subtree. | [
"Short",
"helper",
"to",
"add",
"an",
"IP",
"address",
"value",
"to",
"the",
"MIB",
"subtree",
"."
]
| python | train | 53.333333 |
ksbg/sparklanes | sparklanes/_submit/submit.py | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L127-L155 | def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args | [
"def",
"__validate_and_fix_spark_args",
"(",
"spark_args",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'[\\w\\-_]+=.+'",
")",
"fixed_args",
"=",
"[",
"]",
"for",
"arg",
"in",
"spark_args",
":",
"if",
"arg",
"not",
"in",
"SPARK_SUBMIT_FLAGS",
":",
"if",
"not",
"pattern",
".",
"match",
"(",
"arg",
")",
":",
"raise",
"SystemExit",
"(",
"'Spark argument `%s` does not seem to be in the correct format '",
"'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'",
"'valid spark-submit flags (%s).'",
"%",
"(",
"arg",
",",
"str",
"(",
"SPARK_SUBMIT_FLAGS",
")",
")",
")",
"eq_pos",
"=",
"arg",
".",
"find",
"(",
"'='",
")",
"fixed_args",
".",
"append",
"(",
"'--'",
"+",
"arg",
"[",
":",
"eq_pos",
"]",
")",
"fixed_args",
".",
"append",
"(",
"arg",
"[",
"eq_pos",
"+",
"1",
":",
"]",
")",
"else",
":",
"fixed_args",
".",
"append",
"(",
"'--'",
"+",
"arg",
")",
"return",
"fixed_args"
]
| Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments | [
"Prepares",
"spark",
"arguments",
".",
"In",
"the",
"command",
"-",
"line",
"script",
"they",
"are",
"passed",
"as",
"for",
"example",
"-",
"s",
"master",
"=",
"local",
"[",
"4",
"]",
"deploy",
"-",
"mode",
"=",
"client",
"verbose",
"which",
"would",
"be",
"passed",
"to",
"spark",
"-",
"submit",
"as",
"--",
"master",
"local",
"[",
"4",
"]",
"--",
"deploy",
"-",
"mode",
"client",
"--",
"verbose"
]
| python | train | 38.655172 |
mitsei/dlkit | dlkit/json_/authorization/queries.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/queries.py#L230-L240 | def match_agent_id(self, agent_id, match):
"""Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('agentId', str(agent_id), bool(match)) | [
"def",
"match_agent_id",
"(",
"self",
",",
"agent_id",
",",
"match",
")",
":",
"self",
".",
"_add_match",
"(",
"'agentId'",
",",
"str",
"(",
"agent_id",
")",
",",
"bool",
"(",
"match",
")",
")"
]
| Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* | [
"Matches",
"the",
"agent",
"identified",
"by",
"the",
"given",
"Id",
"."
]
| python | train | 42.454545 |
brandjon/simplestruct | simplestruct/type.py | https://github.com/brandjon/simplestruct/blob/f2bba77278838b5904fd72b35741da162f337c37/simplestruct/type.py#L61-L106 | def checktype_seq(self, seq, kind, *, unique=False, **kargs):
"""Raise TypeError if seq is not a sequence of elements satisfying
kind. Optionally require elements to be unique.
As a special case, a string is considered to be an atomic value
rather than a sequence of single-character strings. (Thus,
checktype_seq('foo', str) will fail.)
"""
exp = self.str_kind(kind)
# Make sure we have a sequence.
try:
iterator = iter(seq)
# Generators aren't sequences. This avoids a confusing bug
# where we consume a generator by type-checking it, and leave
# only an exhausted iterator for the user code.
len(seq)
except TypeError:
got = self.str_valtype(seq)
raise TypeError('Expected sequence of {}; '
'got {} instead of sequence'.format(exp, got))
if isinstance(seq, str):
raise TypeError('Expected sequence of {}; got single str '
'(strings do not count as character '
'sequences)'.format(exp))
for i, item in enumerate(iterator):
# Depend on checktype() to check individual elements,
# but generate an error message that includes the position
# of the failure.
try:
self.checktype(item, kind, **kargs)
except TypeError:
got = self.str_valtype(item)
raise TypeError('Expected sequence of {}; '
'got sequence with {} at position {}'.format(
exp, got, i)) from None
if unique:
seen = []
for i, item in enumerate(seq):
if item in seen:
raise TypeError('Duplicate element {} at '
'position {}'.format(repr(item), i))
seen.append(item) | [
"def",
"checktype_seq",
"(",
"self",
",",
"seq",
",",
"kind",
",",
"*",
",",
"unique",
"=",
"False",
",",
"*",
"*",
"kargs",
")",
":",
"exp",
"=",
"self",
".",
"str_kind",
"(",
"kind",
")",
"# Make sure we have a sequence.",
"try",
":",
"iterator",
"=",
"iter",
"(",
"seq",
")",
"# Generators aren't sequences. This avoids a confusing bug",
"# where we consume a generator by type-checking it, and leave",
"# only an exhausted iterator for the user code.",
"len",
"(",
"seq",
")",
"except",
"TypeError",
":",
"got",
"=",
"self",
".",
"str_valtype",
"(",
"seq",
")",
"raise",
"TypeError",
"(",
"'Expected sequence of {}; '",
"'got {} instead of sequence'",
".",
"format",
"(",
"exp",
",",
"got",
")",
")",
"if",
"isinstance",
"(",
"seq",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'Expected sequence of {}; got single str '",
"'(strings do not count as character '",
"'sequences)'",
".",
"format",
"(",
"exp",
")",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"iterator",
")",
":",
"# Depend on checktype() to check individual elements,",
"# but generate an error message that includes the position",
"# of the failure.",
"try",
":",
"self",
".",
"checktype",
"(",
"item",
",",
"kind",
",",
"*",
"*",
"kargs",
")",
"except",
"TypeError",
":",
"got",
"=",
"self",
".",
"str_valtype",
"(",
"item",
")",
"raise",
"TypeError",
"(",
"'Expected sequence of {}; '",
"'got sequence with {} at position {}'",
".",
"format",
"(",
"exp",
",",
"got",
",",
"i",
")",
")",
"from",
"None",
"if",
"unique",
":",
"seen",
"=",
"[",
"]",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"seq",
")",
":",
"if",
"item",
"in",
"seen",
":",
"raise",
"TypeError",
"(",
"'Duplicate element {} at '",
"'position {}'",
".",
"format",
"(",
"repr",
"(",
"item",
")",
",",
"i",
")",
")",
"seen",
".",
"append",
"(",
"item",
")"
]
| Raise TypeError if seq is not a sequence of elements satisfying
kind. Optionally require elements to be unique.
As a special case, a string is considered to be an atomic value
rather than a sequence of single-character strings. (Thus,
checktype_seq('foo', str) will fail.) | [
"Raise",
"TypeError",
"if",
"seq",
"is",
"not",
"a",
"sequence",
"of",
"elements",
"satisfying",
"kind",
".",
"Optionally",
"require",
"elements",
"to",
"be",
"unique",
".",
"As",
"a",
"special",
"case",
"a",
"string",
"is",
"considered",
"to",
"be",
"an",
"atomic",
"value",
"rather",
"than",
"a",
"sequence",
"of",
"single",
"-",
"character",
"strings",
".",
"(",
"Thus",
"checktype_seq",
"(",
"foo",
"str",
")",
"will",
"fail",
".",
")"
]
| python | train | 43.304348 |
aliyun/aliyun-log-python-sdk | aliyun/log/logclient_operator.py | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient_operator.py#L780-L889 | def transform_data(from_client, from_project, from_logstore, from_time,
to_time=None,
to_client=None, to_project=None, to_logstore=None,
shard_list=None,
config=None,
batch_size=None, compress=None,
cg_name=None, c_name=None,
cg_heartbeat_interval=None, cg_data_fetch_interval=None, cg_in_order=None,
cg_worker_pool_size=None
):
"""
transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side.
"""
if not config:
logger.info("transform_data: config is not configured, use copy data by default.")
return copy_data(from_client, from_project, from_logstore, from_time, to_time=to_time,
to_client=to_client, to_project=to_project, to_logstore=to_logstore,
shard_list=shard_list,
batch_size=batch_size, compress=compress)
to_client = to_client or from_client
# increase the timeout to 2 min at least
from_client.timeout = max(from_client.timeout, 120)
to_client.timeout = max(to_client.timeout, 120)
to_project = to_project or from_project
to_logstore = to_logstore or from_logstore
if not cg_name:
# batch mode
to_time = to_time or "end"
cpu_count = multiprocessing.cpu_count() * 2
shards = from_client.list_shards(from_project, from_logstore).get_shards_info()
current_shards = [str(shard['shardID']) for shard in shards]
target_shards = _parse_shard_list(shard_list, current_shards)
worker_size = min(cpu_count, len(target_shards))
result = dict()
total_count = 0
total_removed = 0
with ProcessPoolExecutor(max_workers=worker_size) as pool:
futures = [pool.submit(transform_worker, from_client, from_project, from_logstore, shard,
from_time, to_time, config,
to_client, to_project, to_logstore,
batch_size=batch_size, compress=compress)
for shard in target_shards]
for future in as_completed(futures):
if future.exception():
logger.error("get error when transforming data: {0}".format(future.exception()))
else:
partition, count, removed, processed, failed = future.result()
total_count += count
total_removed += removed
if count:
result[partition] = {"total_count": count, "transformed":
processed, "removed": removed, "failed": failed}
return LogResponse({}, {"total_count": total_count, "shards": result})
else:
# consumer group mode
c_name = c_name or "transform_data_{0}".format(multiprocessing.current_process().pid)
cg_heartbeat_interval = cg_heartbeat_interval or 20
cg_data_fetch_interval = cg_data_fetch_interval or 2
cg_in_order = False if cg_in_order is None else cg_in_order
cg_worker_pool_size = cg_worker_pool_size or 3
option = LogHubConfig(from_client._endpoint, from_client._accessKeyId, from_client._accessKey,
from_project, from_logstore, cg_name,
c_name, cursor_position=CursorPosition.SPECIAL_TIMER_CURSOR,
cursor_start_time=from_time,
cursor_end_time=to_time,
heartbeat_interval=cg_heartbeat_interval, data_fetch_interval=cg_data_fetch_interval,
in_order=cg_in_order,
worker_pool_size=cg_worker_pool_size)
TransformDataConsumer.set_transform_options(config, to_client, to_project, to_logstore)
result = {"total_count": 0, "shards": {}}
l = RLock()
def status_updator(shard_id, count=0, removed=0, processed=0, failed=0):
logger.info("status update is called, shard: {0}, count: {1}, removed: {2}, processed: {3}, failed: {4}".format(shard_id, count, removed, processed, failed))
with l:
result["total_count"] += count
if shard_id in result["shards"]:
data = result["shards"][shard_id]
result["shards"][shard_id] = {"total_count": data["total_count"] + count, "transformed": data["transformed"] + processed, "removed": data["removed"] + removed, "failed": data["failed"] + failed}
else:
result["shards"][shard_id] = {"total_count": count, "transformed": processed, "removed": removed, "failed": failed}
worker = ConsumerWorker(TransformDataConsumer, consumer_option=option, args=(status_updator, ) )
worker.start()
try:
while worker.is_alive():
worker.join(timeout=60)
logger.info("transform_data: worker exit unexpected, try to shutdown it")
worker.shutdown()
except KeyboardInterrupt:
logger.info("transform_data: *** try to exit **** ")
print("try to stop transforming data.")
worker.shutdown()
worker.join(timeout=120)
return LogResponse({}, result) | [
"def",
"transform_data",
"(",
"from_client",
",",
"from_project",
",",
"from_logstore",
",",
"from_time",
",",
"to_time",
"=",
"None",
",",
"to_client",
"=",
"None",
",",
"to_project",
"=",
"None",
",",
"to_logstore",
"=",
"None",
",",
"shard_list",
"=",
"None",
",",
"config",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"cg_name",
"=",
"None",
",",
"c_name",
"=",
"None",
",",
"cg_heartbeat_interval",
"=",
"None",
",",
"cg_data_fetch_interval",
"=",
"None",
",",
"cg_in_order",
"=",
"None",
",",
"cg_worker_pool_size",
"=",
"None",
")",
":",
"if",
"not",
"config",
":",
"logger",
".",
"info",
"(",
"\"transform_data: config is not configured, use copy data by default.\"",
")",
"return",
"copy_data",
"(",
"from_client",
",",
"from_project",
",",
"from_logstore",
",",
"from_time",
",",
"to_time",
"=",
"to_time",
",",
"to_client",
"=",
"to_client",
",",
"to_project",
"=",
"to_project",
",",
"to_logstore",
"=",
"to_logstore",
",",
"shard_list",
"=",
"shard_list",
",",
"batch_size",
"=",
"batch_size",
",",
"compress",
"=",
"compress",
")",
"to_client",
"=",
"to_client",
"or",
"from_client",
"# increase the timeout to 2 min at least",
"from_client",
".",
"timeout",
"=",
"max",
"(",
"from_client",
".",
"timeout",
",",
"120",
")",
"to_client",
".",
"timeout",
"=",
"max",
"(",
"to_client",
".",
"timeout",
",",
"120",
")",
"to_project",
"=",
"to_project",
"or",
"from_project",
"to_logstore",
"=",
"to_logstore",
"or",
"from_logstore",
"if",
"not",
"cg_name",
":",
"# batch mode",
"to_time",
"=",
"to_time",
"or",
"\"end\"",
"cpu_count",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"*",
"2",
"shards",
"=",
"from_client",
".",
"list_shards",
"(",
"from_project",
",",
"from_logstore",
")",
".",
"get_shards_info",
"(",
")",
"current_shards",
"=",
"[",
"str",
"(",
"shard",
"[",
"'shardID'",
"]",
")",
"for",
"shard",
"in",
"shards",
"]",
"target_shards",
"=",
"_parse_shard_list",
"(",
"shard_list",
",",
"current_shards",
")",
"worker_size",
"=",
"min",
"(",
"cpu_count",
",",
"len",
"(",
"target_shards",
")",
")",
"result",
"=",
"dict",
"(",
")",
"total_count",
"=",
"0",
"total_removed",
"=",
"0",
"with",
"ProcessPoolExecutor",
"(",
"max_workers",
"=",
"worker_size",
")",
"as",
"pool",
":",
"futures",
"=",
"[",
"pool",
".",
"submit",
"(",
"transform_worker",
",",
"from_client",
",",
"from_project",
",",
"from_logstore",
",",
"shard",
",",
"from_time",
",",
"to_time",
",",
"config",
",",
"to_client",
",",
"to_project",
",",
"to_logstore",
",",
"batch_size",
"=",
"batch_size",
",",
"compress",
"=",
"compress",
")",
"for",
"shard",
"in",
"target_shards",
"]",
"for",
"future",
"in",
"as_completed",
"(",
"futures",
")",
":",
"if",
"future",
".",
"exception",
"(",
")",
":",
"logger",
".",
"error",
"(",
"\"get error when transforming data: {0}\"",
".",
"format",
"(",
"future",
".",
"exception",
"(",
")",
")",
")",
"else",
":",
"partition",
",",
"count",
",",
"removed",
",",
"processed",
",",
"failed",
"=",
"future",
".",
"result",
"(",
")",
"total_count",
"+=",
"count",
"total_removed",
"+=",
"removed",
"if",
"count",
":",
"result",
"[",
"partition",
"]",
"=",
"{",
"\"total_count\"",
":",
"count",
",",
"\"transformed\"",
":",
"processed",
",",
"\"removed\"",
":",
"removed",
",",
"\"failed\"",
":",
"failed",
"}",
"return",
"LogResponse",
"(",
"{",
"}",
",",
"{",
"\"total_count\"",
":",
"total_count",
",",
"\"shards\"",
":",
"result",
"}",
")",
"else",
":",
"# consumer group mode",
"c_name",
"=",
"c_name",
"or",
"\"transform_data_{0}\"",
".",
"format",
"(",
"multiprocessing",
".",
"current_process",
"(",
")",
".",
"pid",
")",
"cg_heartbeat_interval",
"=",
"cg_heartbeat_interval",
"or",
"20",
"cg_data_fetch_interval",
"=",
"cg_data_fetch_interval",
"or",
"2",
"cg_in_order",
"=",
"False",
"if",
"cg_in_order",
"is",
"None",
"else",
"cg_in_order",
"cg_worker_pool_size",
"=",
"cg_worker_pool_size",
"or",
"3",
"option",
"=",
"LogHubConfig",
"(",
"from_client",
".",
"_endpoint",
",",
"from_client",
".",
"_accessKeyId",
",",
"from_client",
".",
"_accessKey",
",",
"from_project",
",",
"from_logstore",
",",
"cg_name",
",",
"c_name",
",",
"cursor_position",
"=",
"CursorPosition",
".",
"SPECIAL_TIMER_CURSOR",
",",
"cursor_start_time",
"=",
"from_time",
",",
"cursor_end_time",
"=",
"to_time",
",",
"heartbeat_interval",
"=",
"cg_heartbeat_interval",
",",
"data_fetch_interval",
"=",
"cg_data_fetch_interval",
",",
"in_order",
"=",
"cg_in_order",
",",
"worker_pool_size",
"=",
"cg_worker_pool_size",
")",
"TransformDataConsumer",
".",
"set_transform_options",
"(",
"config",
",",
"to_client",
",",
"to_project",
",",
"to_logstore",
")",
"result",
"=",
"{",
"\"total_count\"",
":",
"0",
",",
"\"shards\"",
":",
"{",
"}",
"}",
"l",
"=",
"RLock",
"(",
")",
"def",
"status_updator",
"(",
"shard_id",
",",
"count",
"=",
"0",
",",
"removed",
"=",
"0",
",",
"processed",
"=",
"0",
",",
"failed",
"=",
"0",
")",
":",
"logger",
".",
"info",
"(",
"\"status update is called, shard: {0}, count: {1}, removed: {2}, processed: {3}, failed: {4}\"",
".",
"format",
"(",
"shard_id",
",",
"count",
",",
"removed",
",",
"processed",
",",
"failed",
")",
")",
"with",
"l",
":",
"result",
"[",
"\"total_count\"",
"]",
"+=",
"count",
"if",
"shard_id",
"in",
"result",
"[",
"\"shards\"",
"]",
":",
"data",
"=",
"result",
"[",
"\"shards\"",
"]",
"[",
"shard_id",
"]",
"result",
"[",
"\"shards\"",
"]",
"[",
"shard_id",
"]",
"=",
"{",
"\"total_count\"",
":",
"data",
"[",
"\"total_count\"",
"]",
"+",
"count",
",",
"\"transformed\"",
":",
"data",
"[",
"\"transformed\"",
"]",
"+",
"processed",
",",
"\"removed\"",
":",
"data",
"[",
"\"removed\"",
"]",
"+",
"removed",
",",
"\"failed\"",
":",
"data",
"[",
"\"failed\"",
"]",
"+",
"failed",
"}",
"else",
":",
"result",
"[",
"\"shards\"",
"]",
"[",
"shard_id",
"]",
"=",
"{",
"\"total_count\"",
":",
"count",
",",
"\"transformed\"",
":",
"processed",
",",
"\"removed\"",
":",
"removed",
",",
"\"failed\"",
":",
"failed",
"}",
"worker",
"=",
"ConsumerWorker",
"(",
"TransformDataConsumer",
",",
"consumer_option",
"=",
"option",
",",
"args",
"=",
"(",
"status_updator",
",",
")",
")",
"worker",
".",
"start",
"(",
")",
"try",
":",
"while",
"worker",
".",
"is_alive",
"(",
")",
":",
"worker",
".",
"join",
"(",
"timeout",
"=",
"60",
")",
"logger",
".",
"info",
"(",
"\"transform_data: worker exit unexpected, try to shutdown it\"",
")",
"worker",
".",
"shutdown",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"info",
"(",
"\"transform_data: *** try to exit **** \"",
")",
"print",
"(",
"\"try to stop transforming data.\"",
")",
"worker",
".",
"shutdown",
"(",
")",
"worker",
".",
"join",
"(",
"timeout",
"=",
"120",
")",
"return",
"LogResponse",
"(",
"{",
"}",
",",
"result",
")"
]
| transform data from one logstore to another one (could be the same or in different region), the time is log received time on server side. | [
"transform",
"data",
"from",
"one",
"logstore",
"to",
"another",
"one",
"(",
"could",
"be",
"the",
"same",
"or",
"in",
"different",
"region",
")",
"the",
"time",
"is",
"log",
"received",
"time",
"on",
"server",
"side",
"."
]
| python | train | 48.818182 |
hanguokai/youku | youku/youku_videos.py | https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_videos.py#L21-L31 | def find_video_by_id(self, video_id):
"""doc: http://open.youku.com/docs/doc?id=44
"""
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | [
"def",
"find_video_by_id",
"(",
"self",
",",
"video_id",
")",
":",
"url",
"=",
"'https://openapi.youku.com/v2/videos/show_basic.json'",
"params",
"=",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'video_id'",
":",
"video_id",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"check_error",
"(",
"r",
")",
"return",
"r",
".",
"json",
"(",
")"
]
| doc: http://open.youku.com/docs/doc?id=44 | [
"doc",
":",
"http",
":",
"//",
"open",
".",
"youku",
".",
"com",
"/",
"docs",
"/",
"doc?id",
"=",
"44"
]
| python | train | 32.272727 |
markovmodel/msmtools | msmtools/analysis/sparse/committor.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/sparse/committor.py#L104-L175 | def backward_committor(T, A, B):
r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'.
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError("Sets A and B have to be disjoint")
pi = stationary_distribution(T)
L = T - eye(T.shape[0], T.shape[0])
D = diags([pi, ], [0, ])
K = (D.dot(L)).T
"""Assemble left-hand side W for linear system"""
"""Equation (I)"""
W = 1.0 * K
"""Equation (II)"""
W = W.todok()
W[list(A), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(A)), (list(A), list(A))), shape=W.shape).tocsr()
"""Equation (III)"""
W = W.todok()
W[list(B), :] = 0.0
W.tocsr()
W = W + coo_matrix((np.ones(len(B)), (list(B), list(B))), shape=W.shape).tocsr()
"""Assemble right-hand side r for linear system"""
"""Equation (I)+(III)"""
r = np.zeros(T.shape[0])
"""Equation (II)"""
r[list(A)] = 1.0
u = spsolve(W, r)
return u | [
"def",
"backward_committor",
"(",
"T",
",",
"A",
",",
"B",
")",
":",
"X",
"=",
"set",
"(",
"range",
"(",
"T",
".",
"shape",
"[",
"0",
"]",
")",
")",
"A",
"=",
"set",
"(",
"A",
")",
"B",
"=",
"set",
"(",
"B",
")",
"AB",
"=",
"A",
".",
"intersection",
"(",
"B",
")",
"notAB",
"=",
"X",
".",
"difference",
"(",
"A",
")",
".",
"difference",
"(",
"B",
")",
"if",
"len",
"(",
"AB",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Sets A and B have to be disjoint\"",
")",
"pi",
"=",
"stationary_distribution",
"(",
"T",
")",
"L",
"=",
"T",
"-",
"eye",
"(",
"T",
".",
"shape",
"[",
"0",
"]",
",",
"T",
".",
"shape",
"[",
"0",
"]",
")",
"D",
"=",
"diags",
"(",
"[",
"pi",
",",
"]",
",",
"[",
"0",
",",
"]",
")",
"K",
"=",
"(",
"D",
".",
"dot",
"(",
"L",
")",
")",
".",
"T",
"\"\"\"Assemble left-hand side W for linear system\"\"\"",
"\"\"\"Equation (I)\"\"\"",
"W",
"=",
"1.0",
"*",
"K",
"\"\"\"Equation (II)\"\"\"",
"W",
"=",
"W",
".",
"todok",
"(",
")",
"W",
"[",
"list",
"(",
"A",
")",
",",
":",
"]",
"=",
"0.0",
"W",
".",
"tocsr",
"(",
")",
"W",
"=",
"W",
"+",
"coo_matrix",
"(",
"(",
"np",
".",
"ones",
"(",
"len",
"(",
"A",
")",
")",
",",
"(",
"list",
"(",
"A",
")",
",",
"list",
"(",
"A",
")",
")",
")",
",",
"shape",
"=",
"W",
".",
"shape",
")",
".",
"tocsr",
"(",
")",
"\"\"\"Equation (III)\"\"\"",
"W",
"=",
"W",
".",
"todok",
"(",
")",
"W",
"[",
"list",
"(",
"B",
")",
",",
":",
"]",
"=",
"0.0",
"W",
".",
"tocsr",
"(",
")",
"W",
"=",
"W",
"+",
"coo_matrix",
"(",
"(",
"np",
".",
"ones",
"(",
"len",
"(",
"B",
")",
")",
",",
"(",
"list",
"(",
"B",
")",
",",
"list",
"(",
"B",
")",
")",
")",
",",
"shape",
"=",
"W",
".",
"shape",
")",
".",
"tocsr",
"(",
")",
"\"\"\"Assemble right-hand side r for linear system\"\"\"",
"\"\"\"Equation (I)+(III)\"\"\"",
"r",
"=",
"np",
".",
"zeros",
"(",
"T",
".",
"shape",
"[",
"0",
"]",
")",
"\"\"\"Equation (II)\"\"\"",
"r",
"[",
"list",
"(",
"A",
")",
"]",
"=",
"1.0",
"u",
"=",
"spsolve",
"(",
"W",
",",
"r",
")",
"return",
"u"
]
| r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'. | [
"r",
"Backward",
"committor",
"between",
"given",
"sets",
"."
]
| python | train | 25.305556 |
litl/rauth | rauth/service.py | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/service.py#L261-L292 | def get_raw_access_token(self,
request_token,
request_token_secret,
method='GET',
**kwargs):
'''
Returns a Requests' response over the
:attr:`rauth.OAuth1Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
# ensure we've set the access_token_url
if self.access_token_url is None:
raise TypeError('access_token_url must not be None')
session = self.get_session((request_token, request_token_secret))
self.access_token_response = session.request(method,
self.access_token_url,
**kwargs)
return self.access_token_response | [
"def",
"get_raw_access_token",
"(",
"self",
",",
"request_token",
",",
"request_token_secret",
",",
"method",
"=",
"'GET'",
",",
"*",
"*",
"kwargs",
")",
":",
"# ensure we've set the access_token_url",
"if",
"self",
".",
"access_token_url",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'access_token_url must not be None'",
")",
"session",
"=",
"self",
".",
"get_session",
"(",
"(",
"request_token",
",",
"request_token_secret",
")",
")",
"self",
".",
"access_token_response",
"=",
"session",
".",
"request",
"(",
"method",
",",
"self",
".",
"access_token_url",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"access_token_response"
]
| Returns a Requests' response over the
:attr:`rauth.OAuth1Service.access_token_url`.
Use this if your endpoint if you need the full `Response` object.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict | [
"Returns",
"a",
"Requests",
"response",
"over",
"the",
":",
"attr",
":",
"rauth",
".",
"OAuth1Service",
".",
"access_token_url",
"."
]
| python | train | 42.84375 |
googlemaps/google-maps-services-python | googlemaps/places.py | https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/places.py#L175-L249 | def places_nearby(client, location=None, radius=None, keyword=None,
language=None, min_price=None, max_price=None, name=None,
open_now=False, rank_by=None, type=None, page_token=None):
"""
Performs nearby search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param rank_by: Specifies the order in which results are listed.
Possible values are: prominence (default), distance
:type rank_by: string
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results
"""
if not location and not page_token:
raise ValueError("either a location or page_token arg is required")
if rank_by == "distance":
if not (keyword or name or type):
raise ValueError("either a keyword, name, or type arg is required "
"when rank_by is set to distance")
elif radius is not None:
raise ValueError("radius cannot be specified when rank_by is set to "
"distance")
return _places(client, "nearby", location=location, radius=radius,
keyword=keyword, language=language, min_price=min_price,
max_price=max_price, name=name, open_now=open_now,
rank_by=rank_by, type=type, page_token=page_token) | [
"def",
"places_nearby",
"(",
"client",
",",
"location",
"=",
"None",
",",
"radius",
"=",
"None",
",",
"keyword",
"=",
"None",
",",
"language",
"=",
"None",
",",
"min_price",
"=",
"None",
",",
"max_price",
"=",
"None",
",",
"name",
"=",
"None",
",",
"open_now",
"=",
"False",
",",
"rank_by",
"=",
"None",
",",
"type",
"=",
"None",
",",
"page_token",
"=",
"None",
")",
":",
"if",
"not",
"location",
"and",
"not",
"page_token",
":",
"raise",
"ValueError",
"(",
"\"either a location or page_token arg is required\"",
")",
"if",
"rank_by",
"==",
"\"distance\"",
":",
"if",
"not",
"(",
"keyword",
"or",
"name",
"or",
"type",
")",
":",
"raise",
"ValueError",
"(",
"\"either a keyword, name, or type arg is required \"",
"\"when rank_by is set to distance\"",
")",
"elif",
"radius",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"radius cannot be specified when rank_by is set to \"",
"\"distance\"",
")",
"return",
"_places",
"(",
"client",
",",
"\"nearby\"",
",",
"location",
"=",
"location",
",",
"radius",
"=",
"radius",
",",
"keyword",
"=",
"keyword",
",",
"language",
"=",
"language",
",",
"min_price",
"=",
"min_price",
",",
"max_price",
"=",
"max_price",
",",
"name",
"=",
"name",
",",
"open_now",
"=",
"open_now",
",",
"rank_by",
"=",
"rank_by",
",",
"type",
"=",
"type",
",",
"page_token",
"=",
"page_token",
")"
]
| Performs nearby search for places.
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param region: The region code, optional parameter.
See more @ https://developers.google.com/places/web-service/search
:type region: string
:param keyword: A term to be matched against all content that Google has
indexed for this place.
:type keyword: string
:param language: The language in which to return results.
:type language: string
:param min_price: Restricts results to only those places with no less than
this price level. Valid values are in the range from 0
(most affordable) to 4 (most expensive).
:type min_price: int
:param max_price: Restricts results to only those places with no greater
than this price level. Valid values are in the range
from 0 (most affordable) to 4 (most expensive).
:type max_price: int
:param name: One or more terms to be matched against the names of places.
:type name: string or list of strings
:param open_now: Return only those places that are open for business at
the time the query is sent.
:type open_now: bool
:param rank_by: Specifies the order in which results are listed.
Possible values are: prominence (default), distance
:type rank_by: string
:param type: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/supported_types
:type type: string
:param page_token: Token from a previous search that when provided will
returns the next page of results for the same search.
:type page_token: string
:rtype: result dict with the following keys:
status: status code
results: list of places
html_attributions: set of attributions which must be displayed
next_page_token: token for retrieving the next page of results | [
"Performs",
"nearby",
"search",
"for",
"places",
"."
]
| python | train | 43.44 |
tus/tus-py-client | tusclient/uploader.py | https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L274-L292 | def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at)) | [
"def",
"upload",
"(",
"self",
",",
"stop_at",
"=",
"None",
")",
":",
"self",
".",
"stop_at",
"=",
"stop_at",
"or",
"self",
".",
"file_size",
"while",
"self",
".",
"offset",
"<",
"self",
".",
"stop_at",
":",
"self",
".",
"upload_chunk",
"(",
")",
"else",
":",
"if",
"self",
".",
"log_func",
":",
"self",
".",
"log_func",
"(",
"\"maximum upload specified({} bytes) has been reached\"",
".",
"format",
"(",
"self",
".",
"stop_at",
")",
")"
]
| Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size. | [
"Perform",
"file",
"upload",
"."
]
| python | train | 35.473684 |
PaulHancock/Aegean | AegeanTools/catalogs.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L714-L782 | def writeDB(filename, catalog, meta=None):
"""
Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None
"""
def sqlTypes(obj, names):
"""
Return the sql type corresponding to each named parameter in obj
"""
types = []
for n in names:
val = getattr(obj, n)
if isinstance(val, bool):
types.append("BOOL")
elif isinstance(val, (int, np.int64, np.int32)):
types.append("INT")
elif isinstance(val, (float, np.float64, np.float32)): # float32 is bugged and claims not to be a float
types.append("FLOAT")
elif isinstance(val, six.string_types):
types.append("VARCHAR")
else:
log.warning("Column {0} is of unknown type {1}".format(n, type(n)))
log.warning("Using VARCHAR")
types.append("VARCHAR")
return types
if os.path.exists(filename):
log.warning("overwriting {0}".format(filename))
os.remove(filename)
conn = sqlite3.connect(filename)
db = conn.cursor()
# determine the column names by inspecting the catalog class
for t, tn in zip(classify_catalog(catalog), ["components", "islands", "simples"]):
if len(t) < 1:
continue #don't write empty tables
col_names = t[0].names
col_types = sqlTypes(t[0], col_names)
stmnt = ','.join(["{0} {1}".format(a, b) for a, b in zip(col_names, col_types)])
db.execute('CREATE TABLE {0} ({1})'.format(tn, stmnt))
stmnt = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(tn, ','.join(col_names), ','.join(['?' for i in col_names]))
# expend the iterators that are created by python 3+
data = list(map(nulls, list(r.as_list() for r in t)))
db.executemany(stmnt, data)
log.info("Created table {0}".format(tn))
# metadata add some meta data
db.execute("CREATE TABLE meta (key VARCHAR, val VARCHAR)")
for k in meta:
db.execute("INSERT INTO meta (key, val) VALUES (?,?)", (k, meta[k]))
conn.commit()
log.info(db.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall())
conn.close()
log.info("Wrote file {0}".format(filename))
return | [
"def",
"writeDB",
"(",
"filename",
",",
"catalog",
",",
"meta",
"=",
"None",
")",
":",
"def",
"sqlTypes",
"(",
"obj",
",",
"names",
")",
":",
"\"\"\"\n Return the sql type corresponding to each named parameter in obj\n \"\"\"",
"types",
"=",
"[",
"]",
"for",
"n",
"in",
"names",
":",
"val",
"=",
"getattr",
"(",
"obj",
",",
"n",
")",
"if",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"types",
".",
"append",
"(",
"\"BOOL\"",
")",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"int",
",",
"np",
".",
"int64",
",",
"np",
".",
"int32",
")",
")",
":",
"types",
".",
"append",
"(",
"\"INT\"",
")",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"float",
",",
"np",
".",
"float64",
",",
"np",
".",
"float32",
")",
")",
":",
"# float32 is bugged and claims not to be a float",
"types",
".",
"append",
"(",
"\"FLOAT\"",
")",
"elif",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"types",
".",
"append",
"(",
"\"VARCHAR\"",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"Column {0} is of unknown type {1}\"",
".",
"format",
"(",
"n",
",",
"type",
"(",
"n",
")",
")",
")",
"log",
".",
"warning",
"(",
"\"Using VARCHAR\"",
")",
"types",
".",
"append",
"(",
"\"VARCHAR\"",
")",
"return",
"types",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"log",
".",
"warning",
"(",
"\"overwriting {0}\"",
".",
"format",
"(",
"filename",
")",
")",
"os",
".",
"remove",
"(",
"filename",
")",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"filename",
")",
"db",
"=",
"conn",
".",
"cursor",
"(",
")",
"# determine the column names by inspecting the catalog class",
"for",
"t",
",",
"tn",
"in",
"zip",
"(",
"classify_catalog",
"(",
"catalog",
")",
",",
"[",
"\"components\"",
",",
"\"islands\"",
",",
"\"simples\"",
"]",
")",
":",
"if",
"len",
"(",
"t",
")",
"<",
"1",
":",
"continue",
"#don't write empty tables",
"col_names",
"=",
"t",
"[",
"0",
"]",
".",
"names",
"col_types",
"=",
"sqlTypes",
"(",
"t",
"[",
"0",
"]",
",",
"col_names",
")",
"stmnt",
"=",
"','",
".",
"join",
"(",
"[",
"\"{0} {1}\"",
".",
"format",
"(",
"a",
",",
"b",
")",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"col_names",
",",
"col_types",
")",
"]",
")",
"db",
".",
"execute",
"(",
"'CREATE TABLE {0} ({1})'",
".",
"format",
"(",
"tn",
",",
"stmnt",
")",
")",
"stmnt",
"=",
"'INSERT INTO {0} ({1}) VALUES ({2})'",
".",
"format",
"(",
"tn",
",",
"','",
".",
"join",
"(",
"col_names",
")",
",",
"','",
".",
"join",
"(",
"[",
"'?'",
"for",
"i",
"in",
"col_names",
"]",
")",
")",
"# expend the iterators that are created by python 3+",
"data",
"=",
"list",
"(",
"map",
"(",
"nulls",
",",
"list",
"(",
"r",
".",
"as_list",
"(",
")",
"for",
"r",
"in",
"t",
")",
")",
")",
"db",
".",
"executemany",
"(",
"stmnt",
",",
"data",
")",
"log",
".",
"info",
"(",
"\"Created table {0}\"",
".",
"format",
"(",
"tn",
")",
")",
"# metadata add some meta data",
"db",
".",
"execute",
"(",
"\"CREATE TABLE meta (key VARCHAR, val VARCHAR)\"",
")",
"for",
"k",
"in",
"meta",
":",
"db",
".",
"execute",
"(",
"\"INSERT INTO meta (key, val) VALUES (?,?)\"",
",",
"(",
"k",
",",
"meta",
"[",
"k",
"]",
")",
")",
"conn",
".",
"commit",
"(",
")",
"log",
".",
"info",
"(",
"db",
".",
"execute",
"(",
"\"SELECT name FROM sqlite_master WHERE type='table';\"",
")",
".",
"fetchall",
"(",
")",
")",
"conn",
".",
"close",
"(",
")",
"log",
".",
"info",
"(",
"\"Wrote file {0}\"",
".",
"format",
"(",
"filename",
")",
")",
"return"
]
| Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None | [
"Output",
"an",
"sqlite3",
"database",
"containing",
"one",
"table",
"for",
"each",
"source",
"type"
]
| python | train | 37.57971 |
henzk/django-productline | django_productline/features/staticfiles/urls.py | https://github.com/henzk/django-productline/blob/24ff156924c1a8c07b99cbb8a1de0a42b8d81f60/django_productline/features/staticfiles/urls.py#L4-L26 | def refine_get_urls(original):
"""
serve static files (and media files also)
in production the webserver should serve requested
static files itself and never let requests to /static/*
and /media/* get to the django application.
"""
def get_urls():
from django.conf.urls import url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
if settings.DEBUG:
return staticfiles_urlpatterns() + [
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
] + original()
else:
return original()
return get_urls | [
"def",
"refine_get_urls",
"(",
"original",
")",
":",
"def",
"get_urls",
"(",
")",
":",
"from",
"django",
".",
"conf",
".",
"urls",
"import",
"url",
"from",
"django",
".",
"conf",
"import",
"settings",
"from",
"django",
".",
"contrib",
".",
"staticfiles",
".",
"urls",
"import",
"staticfiles_urlpatterns",
"from",
"django",
".",
"views",
".",
"static",
"import",
"serve",
"if",
"settings",
".",
"DEBUG",
":",
"return",
"staticfiles_urlpatterns",
"(",
")",
"+",
"[",
"url",
"(",
"r'^media/(?P<path>.*)$'",
",",
"serve",
",",
"{",
"'document_root'",
":",
"settings",
".",
"MEDIA_ROOT",
",",
"}",
")",
",",
"]",
"+",
"original",
"(",
")",
"else",
":",
"return",
"original",
"(",
")",
"return",
"get_urls"
]
| serve static files (and media files also)
in production the webserver should serve requested
static files itself and never let requests to /static/*
and /media/* get to the django application. | [
"serve",
"static",
"files",
"(",
"and",
"media",
"files",
"also",
")"
]
| python | train | 32.956522 |
apache/incubator-heron | heron/instance/src/python/instance/st_heron_instance.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/instance/st_heron_instance.py#L118-L142 | def handle_new_tuple_set_2(self, hts2):
"""Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type
"""
if self.my_pplan_helper is None or self.my_instance is None:
Log.error("Got tuple set when no instance assigned yet")
else:
hts = tuple_pb2.HeronTupleSet()
if hts2.HasField('control'):
hts.control.CopyFrom(hts2.control)
else:
hdts = tuple_pb2.HeronDataTupleSet()
hdts.stream.CopyFrom(hts2.data.stream)
try:
for trunk in hts2.data.tuples:
added_tuple = hdts.tuples.add()
added_tuple.ParseFromString(trunk)
except Exception:
Log.exception('Fail to deserialize HeronDataTuple')
hts.data.CopyFrom(hdts)
self.in_stream.offer(hts)
if self.my_pplan_helper.is_topology_running():
self.my_instance.py_class.process_incoming_tuples() | [
"def",
"handle_new_tuple_set_2",
"(",
"self",
",",
"hts2",
")",
":",
"if",
"self",
".",
"my_pplan_helper",
"is",
"None",
"or",
"self",
".",
"my_instance",
"is",
"None",
":",
"Log",
".",
"error",
"(",
"\"Got tuple set when no instance assigned yet\"",
")",
"else",
":",
"hts",
"=",
"tuple_pb2",
".",
"HeronTupleSet",
"(",
")",
"if",
"hts2",
".",
"HasField",
"(",
"'control'",
")",
":",
"hts",
".",
"control",
".",
"CopyFrom",
"(",
"hts2",
".",
"control",
")",
"else",
":",
"hdts",
"=",
"tuple_pb2",
".",
"HeronDataTupleSet",
"(",
")",
"hdts",
".",
"stream",
".",
"CopyFrom",
"(",
"hts2",
".",
"data",
".",
"stream",
")",
"try",
":",
"for",
"trunk",
"in",
"hts2",
".",
"data",
".",
"tuples",
":",
"added_tuple",
"=",
"hdts",
".",
"tuples",
".",
"add",
"(",
")",
"added_tuple",
".",
"ParseFromString",
"(",
"trunk",
")",
"except",
"Exception",
":",
"Log",
".",
"exception",
"(",
"'Fail to deserialize HeronDataTuple'",
")",
"hts",
".",
"data",
".",
"CopyFrom",
"(",
"hdts",
")",
"self",
".",
"in_stream",
".",
"offer",
"(",
"hts",
")",
"if",
"self",
".",
"my_pplan_helper",
".",
"is_topology_running",
"(",
")",
":",
"self",
".",
"my_instance",
".",
"py_class",
".",
"process_incoming_tuples",
"(",
")"
]
| Called when new HeronTupleSet2 arrives
Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet
See more at GitHub PR #1421
:param tuple_msg_set: HeronTupleSet2 type | [
"Called",
"when",
"new",
"HeronTupleSet2",
"arrives",
"Convert",
"(",
"Assemble",
")",
"HeronTupleSet2",
"(",
"raw",
"byte",
"array",
")",
"to",
"HeronTupleSet",
"See",
"more",
"at",
"GitHub",
"PR",
"#1421",
":",
"param",
"tuple_msg_set",
":",
"HeronTupleSet2",
"type"
]
| python | valid | 39.64 |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/ext/ipy_inputhook.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/ipy_inputhook.py#L138-L152 | def set_inputhook(self, callback):
"""Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original | [
"def",
"set_inputhook",
"(",
"self",
",",
"callback",
")",
":",
"# On platforms with 'readline' support, it's all too likely to",
"# have a KeyboardInterrupt signal delivered *even before* an",
"# initial ``try:`` clause in the callback can be executed, so",
"# we need to disable CTRL+C in this situation.",
"ignore_CTRL_C",
"(",
")",
"self",
".",
"_callback",
"=",
"callback",
"self",
".",
"_callback_pyfunctype",
"=",
"self",
".",
"PYFUNC",
"(",
"callback",
")",
"pyos_inputhook_ptr",
"=",
"self",
".",
"get_pyos_inputhook",
"(",
")",
"original",
"=",
"self",
".",
"get_pyos_inputhook_as_func",
"(",
")",
"pyos_inputhook_ptr",
".",
"value",
"=",
"ctypes",
".",
"cast",
"(",
"self",
".",
"_callback_pyfunctype",
",",
"ctypes",
".",
"c_void_p",
")",
".",
"value",
"self",
".",
"_installed",
"=",
"True",
"return",
"original"
]
| Set PyOS_InputHook to callback and return the previous one. | [
"Set",
"PyOS_InputHook",
"to",
"callback",
"and",
"return",
"the",
"previous",
"one",
"."
]
| python | train | 49.933333 |
prompt-toolkit/pymux | pymux/key_bindings.py | https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/key_bindings.py#L85-L195 | def _load_builtins(self):
"""
Fill the Registry with the hard coded key bindings.
"""
pymux = self.pymux
kb = KeyBindings()
# Create filters.
has_prefix = HasPrefix(pymux)
waits_for_confirmation = WaitsForConfirmation(pymux)
prompt_or_command_focus = has_focus(COMMAND) | has_focus(PROMPT)
display_pane_numbers = Condition(lambda: pymux.display_pane_numbers)
in_scroll_buffer_not_searching = InScrollBufferNotSearching(pymux)
@kb.add(Keys.Any, filter=has_prefix)
def _(event):
" Ignore unknown Ctrl-B prefixed key sequences. "
pymux.get_client_state().has_prefix = False
@kb.add('c-c', filter=prompt_or_command_focus & ~has_prefix)
@kb.add('c-g', filter=prompt_or_command_focus & ~has_prefix)
# @kb.add('backspace', filter=has_focus(COMMAND) & ~has_prefix &
# Condition(lambda: cli.buffers[COMMAND].text == ''))
def _(event):
" Leave command mode. "
pymux.leave_command_mode(append_to_history=False)
@kb.add('y', filter=waits_for_confirmation)
@kb.add('Y', filter=waits_for_confirmation)
def _(event):
"""
Confirm command.
"""
client_state = pymux.get_client_state()
command = client_state.confirm_command
client_state.confirm_command = None
client_state.confirm_text = None
pymux.handle_command(command)
@kb.add('n', filter=waits_for_confirmation)
@kb.add('N', filter=waits_for_confirmation)
@kb.add('c-c' , filter=waits_for_confirmation)
def _(event):
"""
Cancel command.
"""
client_state = pymux.get_client_state()
client_state.confirm_command = None
client_state.confirm_text = None
@kb.add('c-c', filter=in_scroll_buffer_not_searching)
@kb.add('enter', filter=in_scroll_buffer_not_searching)
@kb.add('q', filter=in_scroll_buffer_not_searching)
def _(event):
" Exit scroll buffer. "
pane = pymux.arrangement.get_active_pane()
pane.exit_scroll_buffer()
@kb.add(' ', filter=in_scroll_buffer_not_searching)
def _(event):
" Enter selection mode when pressing space in copy mode. "
event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS)
@kb.add('enter', filter=in_scroll_buffer_not_searching & has_selection)
def _(event):
" Copy selection when pressing Enter. "
clipboard_data = event.current_buffer.copy_selection()
event.app.clipboard.set_data(clipboard_data)
@kb.add('v', filter=in_scroll_buffer_not_searching & has_selection)
def _(event):
" Toggle between selection types. "
types = [SelectionType.LINES, SelectionType.BLOCK, SelectionType.CHARACTERS]
selection_state = event.current_buffer.selection_state
try:
index = types.index(selection_state.type)
except ValueError: # Not in list.
index = 0
selection_state.type = types[(index + 1) % len(types)]
@Condition
def popup_displayed():
return self.pymux.get_client_state().display_popup
@kb.add('q', filter=popup_displayed, eager=True)
def _(event):
" Quit pop-up dialog. "
self.pymux.get_client_state().display_popup = False
@kb.add(Keys.Any, eager=True, filter=display_pane_numbers)
def _(event):
" When the pane numbers are shown. Any key press should hide them. "
pymux.display_pane_numbers = False
@Condition
def clock_displayed():
" "
pane = pymux.arrangement.get_active_pane()
return pane.clock_mode
@kb.add(Keys.Any, eager=True, filter=clock_displayed)
def _(event):
" When the clock is displayed. Any key press should hide it. "
pane = pymux.arrangement.get_active_pane()
pane.clock_mode = False
return kb | [
"def",
"_load_builtins",
"(",
"self",
")",
":",
"pymux",
"=",
"self",
".",
"pymux",
"kb",
"=",
"KeyBindings",
"(",
")",
"# Create filters.",
"has_prefix",
"=",
"HasPrefix",
"(",
"pymux",
")",
"waits_for_confirmation",
"=",
"WaitsForConfirmation",
"(",
"pymux",
")",
"prompt_or_command_focus",
"=",
"has_focus",
"(",
"COMMAND",
")",
"|",
"has_focus",
"(",
"PROMPT",
")",
"display_pane_numbers",
"=",
"Condition",
"(",
"lambda",
":",
"pymux",
".",
"display_pane_numbers",
")",
"in_scroll_buffer_not_searching",
"=",
"InScrollBufferNotSearching",
"(",
"pymux",
")",
"@",
"kb",
".",
"add",
"(",
"Keys",
".",
"Any",
",",
"filter",
"=",
"has_prefix",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" Ignore unknown Ctrl-B prefixed key sequences. \"",
"pymux",
".",
"get_client_state",
"(",
")",
".",
"has_prefix",
"=",
"False",
"@",
"kb",
".",
"add",
"(",
"'c-c'",
",",
"filter",
"=",
"prompt_or_command_focus",
"&",
"~",
"has_prefix",
")",
"@",
"kb",
".",
"add",
"(",
"'c-g'",
",",
"filter",
"=",
"prompt_or_command_focus",
"&",
"~",
"has_prefix",
")",
"# @kb.add('backspace', filter=has_focus(COMMAND) & ~has_prefix &",
"# Condition(lambda: cli.buffers[COMMAND].text == ''))",
"def",
"_",
"(",
"event",
")",
":",
"\" Leave command mode. \"",
"pymux",
".",
"leave_command_mode",
"(",
"append_to_history",
"=",
"False",
")",
"@",
"kb",
".",
"add",
"(",
"'y'",
",",
"filter",
"=",
"waits_for_confirmation",
")",
"@",
"kb",
".",
"add",
"(",
"'Y'",
",",
"filter",
"=",
"waits_for_confirmation",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Confirm command.\n \"\"\"",
"client_state",
"=",
"pymux",
".",
"get_client_state",
"(",
")",
"command",
"=",
"client_state",
".",
"confirm_command",
"client_state",
".",
"confirm_command",
"=",
"None",
"client_state",
".",
"confirm_text",
"=",
"None",
"pymux",
".",
"handle_command",
"(",
"command",
")",
"@",
"kb",
".",
"add",
"(",
"'n'",
",",
"filter",
"=",
"waits_for_confirmation",
")",
"@",
"kb",
".",
"add",
"(",
"'N'",
",",
"filter",
"=",
"waits_for_confirmation",
")",
"@",
"kb",
".",
"add",
"(",
"'c-c'",
",",
"filter",
"=",
"waits_for_confirmation",
")",
"def",
"_",
"(",
"event",
")",
":",
"\"\"\"\n Cancel command.\n \"\"\"",
"client_state",
"=",
"pymux",
".",
"get_client_state",
"(",
")",
"client_state",
".",
"confirm_command",
"=",
"None",
"client_state",
".",
"confirm_text",
"=",
"None",
"@",
"kb",
".",
"add",
"(",
"'c-c'",
",",
"filter",
"=",
"in_scroll_buffer_not_searching",
")",
"@",
"kb",
".",
"add",
"(",
"'enter'",
",",
"filter",
"=",
"in_scroll_buffer_not_searching",
")",
"@",
"kb",
".",
"add",
"(",
"'q'",
",",
"filter",
"=",
"in_scroll_buffer_not_searching",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" Exit scroll buffer. \"",
"pane",
"=",
"pymux",
".",
"arrangement",
".",
"get_active_pane",
"(",
")",
"pane",
".",
"exit_scroll_buffer",
"(",
")",
"@",
"kb",
".",
"add",
"(",
"' '",
",",
"filter",
"=",
"in_scroll_buffer_not_searching",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" Enter selection mode when pressing space in copy mode. \"",
"event",
".",
"current_buffer",
".",
"start_selection",
"(",
"selection_type",
"=",
"SelectionType",
".",
"CHARACTERS",
")",
"@",
"kb",
".",
"add",
"(",
"'enter'",
",",
"filter",
"=",
"in_scroll_buffer_not_searching",
"&",
"has_selection",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" Copy selection when pressing Enter. \"",
"clipboard_data",
"=",
"event",
".",
"current_buffer",
".",
"copy_selection",
"(",
")",
"event",
".",
"app",
".",
"clipboard",
".",
"set_data",
"(",
"clipboard_data",
")",
"@",
"kb",
".",
"add",
"(",
"'v'",
",",
"filter",
"=",
"in_scroll_buffer_not_searching",
"&",
"has_selection",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" Toggle between selection types. \"",
"types",
"=",
"[",
"SelectionType",
".",
"LINES",
",",
"SelectionType",
".",
"BLOCK",
",",
"SelectionType",
".",
"CHARACTERS",
"]",
"selection_state",
"=",
"event",
".",
"current_buffer",
".",
"selection_state",
"try",
":",
"index",
"=",
"types",
".",
"index",
"(",
"selection_state",
".",
"type",
")",
"except",
"ValueError",
":",
"# Not in list.",
"index",
"=",
"0",
"selection_state",
".",
"type",
"=",
"types",
"[",
"(",
"index",
"+",
"1",
")",
"%",
"len",
"(",
"types",
")",
"]",
"@",
"Condition",
"def",
"popup_displayed",
"(",
")",
":",
"return",
"self",
".",
"pymux",
".",
"get_client_state",
"(",
")",
".",
"display_popup",
"@",
"kb",
".",
"add",
"(",
"'q'",
",",
"filter",
"=",
"popup_displayed",
",",
"eager",
"=",
"True",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" Quit pop-up dialog. \"",
"self",
".",
"pymux",
".",
"get_client_state",
"(",
")",
".",
"display_popup",
"=",
"False",
"@",
"kb",
".",
"add",
"(",
"Keys",
".",
"Any",
",",
"eager",
"=",
"True",
",",
"filter",
"=",
"display_pane_numbers",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" When the pane numbers are shown. Any key press should hide them. \"",
"pymux",
".",
"display_pane_numbers",
"=",
"False",
"@",
"Condition",
"def",
"clock_displayed",
"(",
")",
":",
"\" \"",
"pane",
"=",
"pymux",
".",
"arrangement",
".",
"get_active_pane",
"(",
")",
"return",
"pane",
".",
"clock_mode",
"@",
"kb",
".",
"add",
"(",
"Keys",
".",
"Any",
",",
"eager",
"=",
"True",
",",
"filter",
"=",
"clock_displayed",
")",
"def",
"_",
"(",
"event",
")",
":",
"\" When the clock is displayed. Any key press should hide it. \"",
"pane",
"=",
"pymux",
".",
"arrangement",
".",
"get_active_pane",
"(",
")",
"pane",
".",
"clock_mode",
"=",
"False",
"return",
"kb"
]
| Fill the Registry with the hard coded key bindings. | [
"Fill",
"the",
"Registry",
"with",
"the",
"hard",
"coded",
"key",
"bindings",
"."
]
| python | train | 37.288288 |
neithere/monk | monk/manipulation.py | https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/manipulation.py#L108-L139 | def normalize_list_of_dicts(value, default_key, default_value=UNDEFINED):
"""
Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x
"""
if value is None:
if default_value is UNDEFINED:
return []
value = default_value
if isinstance(value, dict):
return [value]
if isinstance(value, text_type):
return [{default_key: value}]
if isinstance(value, list):
if not all(isinstance(x, dict) for x in value):
def _fix(x):
return {default_key: x} if isinstance(x, text_type) else x
return list(map(_fix, value))
return value | [
"def",
"normalize_list_of_dicts",
"(",
"value",
",",
"default_key",
",",
"default_value",
"=",
"UNDEFINED",
")",
":",
"if",
"value",
"is",
"None",
":",
"if",
"default_value",
"is",
"UNDEFINED",
":",
"return",
"[",
"]",
"value",
"=",
"default_value",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"[",
"value",
"]",
"if",
"isinstance",
"(",
"value",
",",
"text_type",
")",
":",
"return",
"[",
"{",
"default_key",
":",
"value",
"}",
"]",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"x",
",",
"dict",
")",
"for",
"x",
"in",
"value",
")",
":",
"def",
"_fix",
"(",
"x",
")",
":",
"return",
"{",
"default_key",
":",
"x",
"}",
"if",
"isinstance",
"(",
"x",
",",
"text_type",
")",
"else",
"x",
"return",
"list",
"(",
"map",
"(",
"_fix",
",",
"value",
")",
")",
"return",
"value"
]
| Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x | [
"Converts",
"given",
"value",
"to",
"a",
"list",
"of",
"dictionaries",
"as",
"follows",
":"
]
| python | train | 29.28125 |
SCIP-Interfaces/PySCIPOpt | examples/unfinished/tsptw.py | https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/tsptw.py#L52-L92 | def mtz2tw(n,c,e,l):
"""mtz: model for the traveling salesman problem with time windows
(based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
- e[i]: earliest date for visiting node i
- l[i]: latest date for visiting node i
Returns a model, ready to be solved.
"""
model = Model("tsptw - mtz-strong")
x,u = {},{}
for i in range(1,n+1):
u[i] = model.addVar(lb=e[i], ub=l[i], vtype="C", name="u(%s)"%i)
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for j in range(2,n+1):
if i != j:
M1 = max(l[i] + c[i,j] - e[j], 0)
M2 = max(l[i] + min(-c[j,i], e[j]-e[i]) - e[j], 0)
model.addCons(u[i] + c[i,j] - M1*(1-x[i,j]) + M2*x[j,i] <= u[j], "LiftedMTZ(%s,%s)"%(i,j))
for i in range(2,n+1):
model.addCons(e[i] + quicksum(max(e[j]+c[j,i]-e[i],0) * x[j,i] for j in range(1,n+1) if i != j) \
<= u[i], "LiftedLB(%s)"%i)
model.addCons(u[i] <= l[i] - \
quicksum(max(l[i]-l[j]+c[i,j],0) * x[i,j] for j in range(2,n+1) if i != j), \
"LiftedUB(%s)"%i)
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,u
return model | [
"def",
"mtz2tw",
"(",
"n",
",",
"c",
",",
"e",
",",
"l",
")",
":",
"model",
"=",
"Model",
"(",
"\"tsptw - mtz-strong\"",
")",
"x",
",",
"u",
"=",
"{",
"}",
",",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"u",
"[",
"i",
"]",
"=",
"model",
".",
"addVar",
"(",
"lb",
"=",
"e",
"[",
"i",
"]",
",",
"ub",
"=",
"l",
"[",
"i",
"]",
",",
"vtype",
"=",
"\"C\"",
",",
"name",
"=",
"\"u(%s)\"",
"%",
"i",
")",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"if",
"i",
"!=",
"j",
":",
"x",
"[",
"i",
",",
"j",
"]",
"=",
"model",
".",
"addVar",
"(",
"vtype",
"=",
"\"B\"",
",",
"name",
"=",
"\"x(%s,%s)\"",
"%",
"(",
"i",
",",
"j",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"model",
".",
"addCons",
"(",
"quicksum",
"(",
"x",
"[",
"i",
",",
"j",
"]",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
"if",
"j",
"!=",
"i",
")",
"==",
"1",
",",
"\"Out(%s)\"",
"%",
"i",
")",
"model",
".",
"addCons",
"(",
"quicksum",
"(",
"x",
"[",
"j",
",",
"i",
"]",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
"if",
"j",
"!=",
"i",
")",
"==",
"1",
",",
"\"In(%s)\"",
"%",
"i",
")",
"for",
"j",
"in",
"range",
"(",
"2",
",",
"n",
"+",
"1",
")",
":",
"if",
"i",
"!=",
"j",
":",
"M1",
"=",
"max",
"(",
"l",
"[",
"i",
"]",
"+",
"c",
"[",
"i",
",",
"j",
"]",
"-",
"e",
"[",
"j",
"]",
",",
"0",
")",
"M2",
"=",
"max",
"(",
"l",
"[",
"i",
"]",
"+",
"min",
"(",
"-",
"c",
"[",
"j",
",",
"i",
"]",
",",
"e",
"[",
"j",
"]",
"-",
"e",
"[",
"i",
"]",
")",
"-",
"e",
"[",
"j",
"]",
",",
"0",
")",
"model",
".",
"addCons",
"(",
"u",
"[",
"i",
"]",
"+",
"c",
"[",
"i",
",",
"j",
"]",
"-",
"M1",
"*",
"(",
"1",
"-",
"x",
"[",
"i",
",",
"j",
"]",
")",
"+",
"M2",
"*",
"x",
"[",
"j",
",",
"i",
"]",
"<=",
"u",
"[",
"j",
"]",
",",
"\"LiftedMTZ(%s,%s)\"",
"%",
"(",
"i",
",",
"j",
")",
")",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"n",
"+",
"1",
")",
":",
"model",
".",
"addCons",
"(",
"e",
"[",
"i",
"]",
"+",
"quicksum",
"(",
"max",
"(",
"e",
"[",
"j",
"]",
"+",
"c",
"[",
"j",
",",
"i",
"]",
"-",
"e",
"[",
"i",
"]",
",",
"0",
")",
"*",
"x",
"[",
"j",
",",
"i",
"]",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
"if",
"i",
"!=",
"j",
")",
"<=",
"u",
"[",
"i",
"]",
",",
"\"LiftedLB(%s)\"",
"%",
"i",
")",
"model",
".",
"addCons",
"(",
"u",
"[",
"i",
"]",
"<=",
"l",
"[",
"i",
"]",
"-",
"quicksum",
"(",
"max",
"(",
"l",
"[",
"i",
"]",
"-",
"l",
"[",
"j",
"]",
"+",
"c",
"[",
"i",
",",
"j",
"]",
",",
"0",
")",
"*",
"x",
"[",
"i",
",",
"j",
"]",
"for",
"j",
"in",
"range",
"(",
"2",
",",
"n",
"+",
"1",
")",
"if",
"i",
"!=",
"j",
")",
",",
"\"LiftedUB(%s)\"",
"%",
"i",
")",
"model",
".",
"setObjective",
"(",
"quicksum",
"(",
"c",
"[",
"i",
",",
"j",
"]",
"*",
"x",
"[",
"i",
",",
"j",
"]",
"for",
"(",
"i",
",",
"j",
")",
"in",
"x",
")",
",",
"\"minimize\"",
")",
"model",
".",
"data",
"=",
"x",
",",
"u",
"return",
"model"
]
| mtz: model for the traveling salesman problem with time windows
(based on Miller-Tucker-Zemlin's one-index potential formulation, stronger constraints)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
- e[i]: earliest date for visiting node i
- l[i]: latest date for visiting node i
Returns a model, ready to be solved. | [
"mtz",
":",
"model",
"for",
"the",
"traveling",
"salesman",
"problem",
"with",
"time",
"windows",
"(",
"based",
"on",
"Miller",
"-",
"Tucker",
"-",
"Zemlin",
"s",
"one",
"-",
"index",
"potential",
"formulation",
"stronger",
"constraints",
")",
"Parameters",
":",
"-",
"n",
":",
"number",
"of",
"nodes",
"-",
"c",
"[",
"i",
"j",
"]",
":",
"cost",
"for",
"traversing",
"arc",
"(",
"i",
"j",
")",
"-",
"e",
"[",
"i",
"]",
":",
"earliest",
"date",
"for",
"visiting",
"node",
"i",
"-",
"l",
"[",
"i",
"]",
":",
"latest",
"date",
"for",
"visiting",
"node",
"i",
"Returns",
"a",
"model",
"ready",
"to",
"be",
"solved",
"."
]
| python | train | 40 |
googleapis/gax-python | google/gax/errors.py | https://github.com/googleapis/gax-python/blob/309aedfcfd48e4c8fa22dd60e9c84c3cc71bb20e/google/gax/errors.py#L73-L90 | def create_error(msg, cause=None):
"""Creates a ``GaxError`` or subclass.
Attributes:
msg (string): describes the error that occurred.
cause (Exception, optional): the exception raised by a lower
layer of the RPC stack (for example, gRPC) that caused this
exception, or None if this exception originated in GAX.
Returns:
.GaxError: The exception that wraps ``cause``.
"""
status_code = config.exc_to_code(cause)
status_name = config.NAME_STATUS_CODES.get(status_code)
if status_name == 'INVALID_ARGUMENT':
return InvalidArgumentError(msg, cause=cause)
else:
return GaxError(msg, cause=cause) | [
"def",
"create_error",
"(",
"msg",
",",
"cause",
"=",
"None",
")",
":",
"status_code",
"=",
"config",
".",
"exc_to_code",
"(",
"cause",
")",
"status_name",
"=",
"config",
".",
"NAME_STATUS_CODES",
".",
"get",
"(",
"status_code",
")",
"if",
"status_name",
"==",
"'INVALID_ARGUMENT'",
":",
"return",
"InvalidArgumentError",
"(",
"msg",
",",
"cause",
"=",
"cause",
")",
"else",
":",
"return",
"GaxError",
"(",
"msg",
",",
"cause",
"=",
"cause",
")"
]
| Creates a ``GaxError`` or subclass.
Attributes:
msg (string): describes the error that occurred.
cause (Exception, optional): the exception raised by a lower
layer of the RPC stack (for example, gRPC) that caused this
exception, or None if this exception originated in GAX.
Returns:
.GaxError: The exception that wraps ``cause``. | [
"Creates",
"a",
"GaxError",
"or",
"subclass",
"."
]
| python | train | 37.333333 |
iotile/coretools | iotilecore/iotile/core/utilities/intelhex/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/intelhex/__init__.py#L1071-L1096 | def bin2hex(fin, fout, offset=0):
"""Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin
"""
h = IntelHex()
try:
h.loadbin(fin, offset)
except IOError:
e = sys.exc_info()[1] # current exception
txt = 'ERROR: unable to load bin file:', str(e)
print(txt)
return 1
try:
h.tofile(fout, format='hex')
except IOError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e))
print(txt)
return 1
return 0 | [
"def",
"bin2hex",
"(",
"fin",
",",
"fout",
",",
"offset",
"=",
"0",
")",
":",
"h",
"=",
"IntelHex",
"(",
")",
"try",
":",
"h",
".",
"loadbin",
"(",
"fin",
",",
"offset",
")",
"except",
"IOError",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"# current exception",
"txt",
"=",
"'ERROR: unable to load bin file:'",
",",
"str",
"(",
"e",
")",
"print",
"(",
"txt",
")",
"return",
"1",
"try",
":",
"h",
".",
"tofile",
"(",
"fout",
",",
"format",
"=",
"'hex'",
")",
"except",
"IOError",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"# current exception",
"txt",
"=",
"\"ERROR: Could not write to file: %s: %s\"",
"%",
"(",
"fout",
",",
"str",
"(",
"e",
")",
")",
"print",
"(",
"txt",
")",
"return",
"1",
"return",
"0"
]
| Simple bin-to-hex convertor.
@return 0 if all OK
@param fin input bin file (filename or file-like object)
@param fout output hex file (filename or file-like object)
@param offset starting address offset for loading bin | [
"Simple",
"bin",
"-",
"to",
"-",
"hex",
"convertor",
".",
"@return",
"0",
"if",
"all",
"OK"
]
| python | train | 28.615385 |
explosion/thinc | examples/text-pair/glove_mwe_multipool_siamese.py | https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/examples/text-pair/glove_mwe_multipool_siamese.py#L46-L64 | def track_progress(**context):
"""Print training progress. Called after each epoch."""
model = context["model"]
train_X = context["train_X"]
dev_X = context["dev_X"]
dev_y = context["dev_y"]
n_train = len(train_X)
trainer = context["trainer"]
def each_epoch():
global epoch_train_acc, epoch
with model.use_params(trainer.optimizer.averages):
avg_acc = model.evaluate_logloss(dev_X, dev_y)
stats = (avg_acc, float(epoch_train_acc) / n_train, trainer.dropout)
print("%.3f dev acc, %.3f train acc, %.4f drop" % stats)
epoch_train_acc = 0.0
epoch += 1
return each_epoch | [
"def",
"track_progress",
"(",
"*",
"*",
"context",
")",
":",
"model",
"=",
"context",
"[",
"\"model\"",
"]",
"train_X",
"=",
"context",
"[",
"\"train_X\"",
"]",
"dev_X",
"=",
"context",
"[",
"\"dev_X\"",
"]",
"dev_y",
"=",
"context",
"[",
"\"dev_y\"",
"]",
"n_train",
"=",
"len",
"(",
"train_X",
")",
"trainer",
"=",
"context",
"[",
"\"trainer\"",
"]",
"def",
"each_epoch",
"(",
")",
":",
"global",
"epoch_train_acc",
",",
"epoch",
"with",
"model",
".",
"use_params",
"(",
"trainer",
".",
"optimizer",
".",
"averages",
")",
":",
"avg_acc",
"=",
"model",
".",
"evaluate_logloss",
"(",
"dev_X",
",",
"dev_y",
")",
"stats",
"=",
"(",
"avg_acc",
",",
"float",
"(",
"epoch_train_acc",
")",
"/",
"n_train",
",",
"trainer",
".",
"dropout",
")",
"print",
"(",
"\"%.3f dev acc, %.3f train acc, %.4f drop\"",
"%",
"stats",
")",
"epoch_train_acc",
"=",
"0.0",
"epoch",
"+=",
"1",
"return",
"each_epoch"
]
| Print training progress. Called after each epoch. | [
"Print",
"training",
"progress",
".",
"Called",
"after",
"each",
"epoch",
"."
]
| python | train | 33.947368 |
opencobra/memote | memote/utils.py | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L71-L130 | def annotate(title, format_type, message=None, data=None, metric=1.0):
"""
Annotate a test case with info that should be displayed in the reports.
Parameters
----------
title : str
A human-readable descriptive title of the test case.
format_type : str
A string that determines how the result data is formatted in the
report. It is expected not to be None.
* 'number' : 'data' is a single number which can be an integer or
float and should be represented as such.
* 'count' : 'data' is a list, set or tuple. Choosing 'count' will
display the length of that list e.g. number of metabolites without
formula.
* 'percent' : Instead of 'data' the content of 'metric' ought to be
displayed e.g. percentage of metabolites without charge.
'metric' is expected to be a floating point number.
* 'raw' : 'data' is ought to be displayed "as is" without formatting.
This option is appropriate for single strings or a boolean output.
message : str
A short written explanation that states and possibly explains the test
result.
data
Raw data which the test case generates and assesses. Can be of the
following types: list, set, tuple, string, float, integer, and boolean.
metric: float
A value x in the range of 0 <= x <= 1 which represents the fraction of
'data' to the total in the model. For example, if 'data' are all
metabolites without formula, 'metric' should be the fraction of
metabolites without formula from the total of metabolites in the model.
Returns
-------
function
The decorated function, now extended by the attribute 'annotation'.
Notes
-----
Adds "annotation" attribute to the function object, which stores values for
predefined keys as a dictionary.
"""
if format_type not in TYPES:
raise ValueError(
"Invalid type. Expected one of: {}.".format(", ".join(TYPES)))
def decorator(func):
func.annotation = dict(
title=title,
summary=extended_summary(func),
message=message,
data=data,
format_type=format_type,
metric=metric)
return func
return decorator | [
"def",
"annotate",
"(",
"title",
",",
"format_type",
",",
"message",
"=",
"None",
",",
"data",
"=",
"None",
",",
"metric",
"=",
"1.0",
")",
":",
"if",
"format_type",
"not",
"in",
"TYPES",
":",
"raise",
"ValueError",
"(",
"\"Invalid type. Expected one of: {}.\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"TYPES",
")",
")",
")",
"def",
"decorator",
"(",
"func",
")",
":",
"func",
".",
"annotation",
"=",
"dict",
"(",
"title",
"=",
"title",
",",
"summary",
"=",
"extended_summary",
"(",
"func",
")",
",",
"message",
"=",
"message",
",",
"data",
"=",
"data",
",",
"format_type",
"=",
"format_type",
",",
"metric",
"=",
"metric",
")",
"return",
"func",
"return",
"decorator"
]
| Annotate a test case with info that should be displayed in the reports.
Parameters
----------
title : str
A human-readable descriptive title of the test case.
format_type : str
A string that determines how the result data is formatted in the
report. It is expected not to be None.
* 'number' : 'data' is a single number which can be an integer or
float and should be represented as such.
* 'count' : 'data' is a list, set or tuple. Choosing 'count' will
display the length of that list e.g. number of metabolites without
formula.
* 'percent' : Instead of 'data' the content of 'metric' ought to be
displayed e.g. percentage of metabolites without charge.
'metric' is expected to be a floating point number.
* 'raw' : 'data' is ought to be displayed "as is" without formatting.
This option is appropriate for single strings or a boolean output.
message : str
A short written explanation that states and possibly explains the test
result.
data
Raw data which the test case generates and assesses. Can be of the
following types: list, set, tuple, string, float, integer, and boolean.
metric: float
A value x in the range of 0 <= x <= 1 which represents the fraction of
'data' to the total in the model. For example, if 'data' are all
metabolites without formula, 'metric' should be the fraction of
metabolites without formula from the total of metabolites in the model.
Returns
-------
function
The decorated function, now extended by the attribute 'annotation'.
Notes
-----
Adds "annotation" attribute to the function object, which stores values for
predefined keys as a dictionary. | [
"Annotate",
"a",
"test",
"case",
"with",
"info",
"that",
"should",
"be",
"displayed",
"in",
"the",
"reports",
"."
]
| python | train | 38 |
nickmilon/Hellas | Hellas/Athens.py | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Athens.py#L37-L66 | def haversine(lon1, lat1, lon2, lat2, earth_radius=6357000):
"""Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers
"""
# convert decimal degrees to radiant
lon1, lat1, lon2, lat2 = list(map(math.radians, [lon1, lat1, lon2, lat2]))
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
distance = earth_radius * c # 6371 # Radius of earth in kilometers. Use 3956 for miles
return distance | [
"def",
"haversine",
"(",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
",",
"earth_radius",
"=",
"6357000",
")",
":",
"# convert decimal degrees to radiant",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
"=",
"list",
"(",
"map",
"(",
"math",
".",
"radians",
",",
"[",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
"]",
")",
")",
"# haversine formula",
"dlon",
"=",
"lon2",
"-",
"lon1",
"dlat",
"=",
"lat2",
"-",
"lat1",
"a",
"=",
"math",
".",
"sin",
"(",
"dlat",
"/",
"2",
")",
"**",
"2",
"+",
"math",
".",
"cos",
"(",
"lat1",
")",
"*",
"math",
".",
"cos",
"(",
"lat2",
")",
"*",
"math",
".",
"sin",
"(",
"dlon",
"/",
"2",
")",
"**",
"2",
"c",
"=",
"2",
"*",
"math",
".",
"asin",
"(",
"math",
".",
"sqrt",
"(",
"a",
")",
")",
"distance",
"=",
"earth_radius",
"*",
"c",
"# 6371 # Radius of earth in kilometers. Use 3956 for miles",
"return",
"distance"
]
| Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers | [
"Calculate",
"the",
"great",
"circle",
"distance",
"between",
"two",
"points",
"on",
"earth",
"in",
"Kilometers",
"on",
"the",
"earth",
"(",
"specified",
"in",
"decimal",
"degrees",
")"
]
| python | train | 45.3 |
knagra/farnsworth | threads/views.py | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/threads/views.py#L193-L216 | def list_user_threads_view(request, targetUsername):
''' View of threads a user has created. '''
targetUser = get_object_or_404(User, username=targetUsername)
targetProfile = get_object_or_404(UserProfile, user=targetUser)
threads = Thread.objects.filter(owner=targetProfile)
page_name = "{0}'s Threads".format(targetUser.get_full_name())
create_form = ThreadForm(
request.POST if "submit_thread_form" in request.POST else None,
profile=UserProfile.objects.get(user=request.user),
prefix="create",
)
if create_form.is_valid():
thread = create_form.save()
return HttpResponseRedirect(reverse("threads:view_thread", kwargs={"pk": thread.pk}))
elif request.method == "POST":
messages.add_message(request, messages.ERROR, MESSAGES['THREAD_ERROR'])
return render_to_response('list_threads.html', {
'page_name': page_name,
'threads': threads,
"create_form": create_form,
'targetUsername': targetUsername,
}, context_instance=RequestContext(request)) | [
"def",
"list_user_threads_view",
"(",
"request",
",",
"targetUsername",
")",
":",
"targetUser",
"=",
"get_object_or_404",
"(",
"User",
",",
"username",
"=",
"targetUsername",
")",
"targetProfile",
"=",
"get_object_or_404",
"(",
"UserProfile",
",",
"user",
"=",
"targetUser",
")",
"threads",
"=",
"Thread",
".",
"objects",
".",
"filter",
"(",
"owner",
"=",
"targetProfile",
")",
"page_name",
"=",
"\"{0}'s Threads\"",
".",
"format",
"(",
"targetUser",
".",
"get_full_name",
"(",
")",
")",
"create_form",
"=",
"ThreadForm",
"(",
"request",
".",
"POST",
"if",
"\"submit_thread_form\"",
"in",
"request",
".",
"POST",
"else",
"None",
",",
"profile",
"=",
"UserProfile",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"request",
".",
"user",
")",
",",
"prefix",
"=",
"\"create\"",
",",
")",
"if",
"create_form",
".",
"is_valid",
"(",
")",
":",
"thread",
"=",
"create_form",
".",
"save",
"(",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"\"threads:view_thread\"",
",",
"kwargs",
"=",
"{",
"\"pk\"",
":",
"thread",
".",
"pk",
"}",
")",
")",
"elif",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"messages",
".",
"add_message",
"(",
"request",
",",
"messages",
".",
"ERROR",
",",
"MESSAGES",
"[",
"'THREAD_ERROR'",
"]",
")",
"return",
"render_to_response",
"(",
"'list_threads.html'",
",",
"{",
"'page_name'",
":",
"page_name",
",",
"'threads'",
":",
"threads",
",",
"\"create_form\"",
":",
"create_form",
",",
"'targetUsername'",
":",
"targetUsername",
",",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
]
| View of threads a user has created. | [
"View",
"of",
"threads",
"a",
"user",
"has",
"created",
"."
]
| python | train | 43.916667 |
simpleai-team/simpleai | simpleai/search/local.py | https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/local.py#L21-L38 | def beam(problem, beam_size=100, iterations_limit=0, viewer=None):
'''
Beam search.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value,
and SearchProblem.generate_random_state.
'''
return _local_search(problem,
_all_expander,
iterations_limit=iterations_limit,
fringe_size=beam_size,
random_initial_states=True,
stop_when_no_better=iterations_limit==0,
viewer=viewer) | [
"def",
"beam",
"(",
"problem",
",",
"beam_size",
"=",
"100",
",",
"iterations_limit",
"=",
"0",
",",
"viewer",
"=",
"None",
")",
":",
"return",
"_local_search",
"(",
"problem",
",",
"_all_expander",
",",
"iterations_limit",
"=",
"iterations_limit",
",",
"fringe_size",
"=",
"beam_size",
",",
"random_initial_states",
"=",
"True",
",",
"stop_when_no_better",
"=",
"iterations_limit",
"==",
"0",
",",
"viewer",
"=",
"viewer",
")"
]
| Beam search.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value,
and SearchProblem.generate_random_state. | [
"Beam",
"search",
"."
]
| python | train | 42.722222 |
waqasbhatti/astrobase | astrobase/timeutils.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/timeutils.py#L294-L388 | def get_epochs_given_midtimes_and_period(
t_mid,
period,
err_t_mid=None,
t0_fixed=None,
t0_percentile=None,
verbose=False
):
'''This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed.
'''
kwargarr = np.array([isinstance(err_t_mid,np.ndarray),
t0_fixed,
t0_percentile])
if not _single_true(kwargarr) and not np.all(~kwargarr.astype(bool)):
raise AssertionError(
'can have at most one of err_t_mid, t0_fixed, t0_percentile')
t_mid = t_mid[np.isfinite(t_mid)]
N_midtimes = len(t_mid)
if t0_fixed:
t0 = t0_fixed
elif isinstance(err_t_mid,np.ndarray):
# get the weighted average. then round it to the nearest transit epoch.
t0_avg = np.average(t_mid, weights=1/err_t_mid**2)
t0_options = np.arange(min(t_mid), max(t_mid)+period, period)
t0 = t0_options[np.argmin(np.abs(t0_options - t0_avg))]
else:
if not t0_percentile:
# if there are an odd number of times, take the median time as
# epoch=0. elif there are an even number of times, take the lower
# of the two middle times as epoch=0.
if N_midtimes % 2 == 1:
t0 = np.median(t_mid)
else:
t0 = t_mid[int(N_midtimes/2)]
else:
t0 = np.sort(t_mid)[int(N_midtimes*t0_percentile/100)]
epoch = (t_mid - t0)/period
# do not convert numpy entries to actual ints, because np.nan is float type
int_epoch = np.round(epoch, 0)
if verbose:
LOGINFO('epochs before rounding')
LOGINFO('\n{:s}'.format(repr(epoch)))
LOGINFO('epochs after rounding')
LOGINFO('\n{:s}'.format(repr(int_epoch)))
return int_epoch, t0 | [
"def",
"get_epochs_given_midtimes_and_period",
"(",
"t_mid",
",",
"period",
",",
"err_t_mid",
"=",
"None",
",",
"t0_fixed",
"=",
"None",
",",
"t0_percentile",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"kwargarr",
"=",
"np",
".",
"array",
"(",
"[",
"isinstance",
"(",
"err_t_mid",
",",
"np",
".",
"ndarray",
")",
",",
"t0_fixed",
",",
"t0_percentile",
"]",
")",
"if",
"not",
"_single_true",
"(",
"kwargarr",
")",
"and",
"not",
"np",
".",
"all",
"(",
"~",
"kwargarr",
".",
"astype",
"(",
"bool",
")",
")",
":",
"raise",
"AssertionError",
"(",
"'can have at most one of err_t_mid, t0_fixed, t0_percentile'",
")",
"t_mid",
"=",
"t_mid",
"[",
"np",
".",
"isfinite",
"(",
"t_mid",
")",
"]",
"N_midtimes",
"=",
"len",
"(",
"t_mid",
")",
"if",
"t0_fixed",
":",
"t0",
"=",
"t0_fixed",
"elif",
"isinstance",
"(",
"err_t_mid",
",",
"np",
".",
"ndarray",
")",
":",
"# get the weighted average. then round it to the nearest transit epoch.",
"t0_avg",
"=",
"np",
".",
"average",
"(",
"t_mid",
",",
"weights",
"=",
"1",
"/",
"err_t_mid",
"**",
"2",
")",
"t0_options",
"=",
"np",
".",
"arange",
"(",
"min",
"(",
"t_mid",
")",
",",
"max",
"(",
"t_mid",
")",
"+",
"period",
",",
"period",
")",
"t0",
"=",
"t0_options",
"[",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"t0_options",
"-",
"t0_avg",
")",
")",
"]",
"else",
":",
"if",
"not",
"t0_percentile",
":",
"# if there are an odd number of times, take the median time as",
"# epoch=0. elif there are an even number of times, take the lower",
"# of the two middle times as epoch=0.",
"if",
"N_midtimes",
"%",
"2",
"==",
"1",
":",
"t0",
"=",
"np",
".",
"median",
"(",
"t_mid",
")",
"else",
":",
"t0",
"=",
"t_mid",
"[",
"int",
"(",
"N_midtimes",
"/",
"2",
")",
"]",
"else",
":",
"t0",
"=",
"np",
".",
"sort",
"(",
"t_mid",
")",
"[",
"int",
"(",
"N_midtimes",
"*",
"t0_percentile",
"/",
"100",
")",
"]",
"epoch",
"=",
"(",
"t_mid",
"-",
"t0",
")",
"/",
"period",
"# do not convert numpy entries to actual ints, because np.nan is float type",
"int_epoch",
"=",
"np",
".",
"round",
"(",
"epoch",
",",
"0",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'epochs before rounding'",
")",
"LOGINFO",
"(",
"'\\n{:s}'",
".",
"format",
"(",
"repr",
"(",
"epoch",
")",
")",
")",
"LOGINFO",
"(",
"'epochs after rounding'",
")",
"LOGINFO",
"(",
"'\\n{:s}'",
".",
"format",
"(",
"repr",
"(",
"int_epoch",
")",
")",
")",
"return",
"int_epoch",
",",
"t0"
]
| This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed. | [
"This",
"calculates",
"the",
"future",
"epochs",
"for",
"a",
"transit",
"given",
"a",
"period",
"and",
"a",
"starting",
"epoch"
]
| python | valid | 33.2 |
pappasam/latexbuild | latexbuild/__init__.py | https://github.com/pappasam/latexbuild/blob/596a2a0a4c42eaa5eb9503d64f9073ad5d0640d5/latexbuild/__init__.py#L40-L56 | def build_html(path_jinja2, template_name, path_outfile, template_kwargs=None):
'''Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables
'''
latex_template_object = LatexBuild(
path_jinja2,
template_name,
template_kwargs,
)
return latex_template_object.build_html(path_outfile) | [
"def",
"build_html",
"(",
"path_jinja2",
",",
"template_name",
",",
"path_outfile",
",",
"template_kwargs",
"=",
"None",
")",
":",
"latex_template_object",
"=",
"LatexBuild",
"(",
"path_jinja2",
",",
"template_name",
",",
"template_kwargs",
",",
")",
"return",
"latex_template_object",
".",
"build_html",
"(",
"path_outfile",
")"
]
| Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables | [
"Helper",
"function",
"for",
"building",
"an",
"html",
"from",
"a",
"latex",
"jinja2",
"template"
]
| python | train | 46.117647 |
inveniosoftware/kwalitee | kwalitee/hooks.py | https://github.com/inveniosoftware/kwalitee/blob/9124f8f55b15547fef08c6c43cabced314e70674/kwalitee/hooks.py#L285-L306 | def run(command, raw_output=False):
"""Run a command using subprocess.
:param command: command line to be run
:type command: str
:param raw_output: does not attempt to convert the output as unicode
:type raw_output: bool
:return: error code, output (``stdout``) and error (``stderr``)
:rtype: tuple
"""
p = Popen(command.split(), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
# On python 3, subprocess.Popen returns bytes objects.
if not raw_output:
return (
p.returncode,
[line.rstrip() for line in stdout.decode("utf-8").splitlines()],
[line.rstrip() for line in stderr.decode("utf-8").splitlines()]
)
else:
return (p.returncode, stdout, stderr) | [
"def",
"run",
"(",
"command",
",",
"raw_output",
"=",
"False",
")",
":",
"p",
"=",
"Popen",
"(",
"command",
".",
"split",
"(",
")",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"(",
"stdout",
",",
"stderr",
")",
"=",
"p",
".",
"communicate",
"(",
")",
"# On python 3, subprocess.Popen returns bytes objects.",
"if",
"not",
"raw_output",
":",
"return",
"(",
"p",
".",
"returncode",
",",
"[",
"line",
".",
"rstrip",
"(",
")",
"for",
"line",
"in",
"stdout",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"splitlines",
"(",
")",
"]",
",",
"[",
"line",
".",
"rstrip",
"(",
")",
"for",
"line",
"in",
"stderr",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"splitlines",
"(",
")",
"]",
")",
"else",
":",
"return",
"(",
"p",
".",
"returncode",
",",
"stdout",
",",
"stderr",
")"
]
| Run a command using subprocess.
:param command: command line to be run
:type command: str
:param raw_output: does not attempt to convert the output as unicode
:type raw_output: bool
:return: error code, output (``stdout``) and error (``stderr``)
:rtype: tuple | [
"Run",
"a",
"command",
"using",
"subprocess",
"."
]
| python | train | 34.318182 |
annayqho/TheCannon | TheCannon/normalization.py | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L380-L398 | def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda,
ranges, verbose=True):
""" Perform continuum normalization using running quantile, for spectrum
that comes in chunks
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars | [
"def",
"_cont_norm_running_quantile_regions",
"(",
"wl",
",",
"fluxes",
",",
"ivars",
",",
"q",
",",
"delta_lambda",
",",
"ranges",
",",
"verbose",
"=",
"True",
")",
":",
"print",
"(",
"\"contnorm.py: continuum norm using running quantile\"",
")",
"print",
"(",
"\"Taking spectra in %s chunks\"",
"%",
"len",
"(",
"ranges",
")",
")",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"norm_fluxes",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"norm_ivars",
"=",
"np",
".",
"zeros",
"(",
"ivars",
".",
"shape",
")",
"for",
"chunk",
"in",
"ranges",
":",
"start",
"=",
"chunk",
"[",
"0",
"]",
"stop",
"=",
"chunk",
"[",
"1",
"]",
"output",
"=",
"_cont_norm_running_quantile",
"(",
"wl",
"[",
"start",
":",
"stop",
"]",
",",
"fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"q",
",",
"delta_lambda",
")",
"norm_fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"0",
"]",
"norm_ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"1",
"]",
"return",
"norm_fluxes",
",",
"norm_ivars"
]
| Perform continuum normalization using running quantile, for spectrum
that comes in chunks | [
"Perform",
"continuum",
"normalization",
"using",
"running",
"quantile",
"for",
"spectrum",
"that",
"comes",
"in",
"chunks"
]
| python | train | 42.736842 |
carlosp420/dataset-creator | dataset_creator/utils.py | https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/utils.py#L59-L72 | def convert_nexus_to_format(dataset_as_nexus, dataset_format):
"""
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
"""
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta | [
"def",
"convert_nexus_to_format",
"(",
"dataset_as_nexus",
",",
"dataset_format",
")",
":",
"fake_handle",
"=",
"StringIO",
"(",
"dataset_as_nexus",
")",
"nexus_al",
"=",
"AlignIO",
".",
"parse",
"(",
"fake_handle",
",",
"'nexus'",
")",
"tmp_file",
"=",
"make_random_filename",
"(",
")",
"AlignIO",
".",
"write",
"(",
"nexus_al",
",",
"tmp_file",
",",
"dataset_format",
")",
"dataset_as_fasta",
"=",
"read_and_delete_tmp_file",
"(",
"tmp_file",
")",
"return",
"dataset_as_fasta"
]
| Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return: | [
"Converts",
"nexus",
"format",
"to",
"Phylip",
"and",
"Fasta",
"using",
"Biopython",
"tools",
"."
]
| python | train | 34.142857 |
boriel/zxbasic | zxbparser.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L912-L917 | def p_statement_draw_attr(p):
""" statement : DRAW attr_list expr COMMA expr
"""
p[0] = make_sentence('DRAW',
make_typecast(TYPE.integer, p[3], p.lineno(4)),
make_typecast(TYPE.integer, p[5], p.lineno(4)), p[2]) | [
"def",
"p_statement_draw_attr",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"make_sentence",
"(",
"'DRAW'",
",",
"make_typecast",
"(",
"TYPE",
".",
"integer",
",",
"p",
"[",
"3",
"]",
",",
"p",
".",
"lineno",
"(",
"4",
")",
")",
",",
"make_typecast",
"(",
"TYPE",
".",
"integer",
",",
"p",
"[",
"5",
"]",
",",
"p",
".",
"lineno",
"(",
"4",
")",
")",
",",
"p",
"[",
"2",
"]",
")"
]
| statement : DRAW attr_list expr COMMA expr | [
"statement",
":",
"DRAW",
"attr_list",
"expr",
"COMMA",
"expr"
]
| python | train | 44.666667 |
obulpathi/cdn-fastly-python | fastly/__init__.py | https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L286-L294 | def content_edge_check(self, url):
"""Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server."""
prefixes = ["http://", "https://"]
for prefix in prefixes:
if url.startswith(prefix):
url = url[len(prefix):]
break
content = self._fetch("/content/edge_check/%s" % url)
return content | [
"def",
"content_edge_check",
"(",
"self",
",",
"url",
")",
":",
"prefixes",
"=",
"[",
"\"http://\"",
",",
"\"https://\"",
"]",
"for",
"prefix",
"in",
"prefixes",
":",
"if",
"url",
".",
"startswith",
"(",
"prefix",
")",
":",
"url",
"=",
"url",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"break",
"content",
"=",
"self",
".",
"_fetch",
"(",
"\"/content/edge_check/%s\"",
"%",
"url",
")",
"return",
"content"
]
| Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server. | [
"Retrieve",
"headers",
"and",
"MD5",
"hash",
"of",
"the",
"content",
"for",
"a",
"particular",
"url",
"from",
"each",
"Fastly",
"edge",
"server",
"."
]
| python | train | 37.111111 |
bitprophet/ssh | ssh/transport.py | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1466-L1482 | def _compute_key(self, id, nbytes):
"id is 'A' - 'F' for the various keys used by ssh"
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(id)
m.add_bytes(self.session_id)
out = sofar = SHA.new(str(m)).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = SHA.new(str(m)).digest()
out += digest
sofar += digest
return out[:nbytes] | [
"def",
"_compute_key",
"(",
"self",
",",
"id",
",",
"nbytes",
")",
":",
"m",
"=",
"Message",
"(",
")",
"m",
".",
"add_mpint",
"(",
"self",
".",
"K",
")",
"m",
".",
"add_bytes",
"(",
"self",
".",
"H",
")",
"m",
".",
"add_byte",
"(",
"id",
")",
"m",
".",
"add_bytes",
"(",
"self",
".",
"session_id",
")",
"out",
"=",
"sofar",
"=",
"SHA",
".",
"new",
"(",
"str",
"(",
"m",
")",
")",
".",
"digest",
"(",
")",
"while",
"len",
"(",
"out",
")",
"<",
"nbytes",
":",
"m",
"=",
"Message",
"(",
")",
"m",
".",
"add_mpint",
"(",
"self",
".",
"K",
")",
"m",
".",
"add_bytes",
"(",
"self",
".",
"H",
")",
"m",
".",
"add_bytes",
"(",
"sofar",
")",
"digest",
"=",
"SHA",
".",
"new",
"(",
"str",
"(",
"m",
")",
")",
".",
"digest",
"(",
")",
"out",
"+=",
"digest",
"sofar",
"+=",
"digest",
"return",
"out",
"[",
":",
"nbytes",
"]"
]
| id is 'A' - 'F' for the various keys used by ssh | [
"id",
"is",
"A",
"-",
"F",
"for",
"the",
"various",
"keys",
"used",
"by",
"ssh"
]
| python | train | 32.058824 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L283-L303 | def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app) | [
"def",
"make_datastore_query",
"(",
"self",
",",
"cursor",
"=",
"None",
")",
":",
"filters",
"=",
"{",
"}",
"filters",
"[",
"'__key__ >= '",
"]",
"=",
"_key_for_namespace",
"(",
"self",
".",
"namespace_start",
",",
"self",
".",
"app",
")",
"filters",
"[",
"'__key__ <= '",
"]",
"=",
"_key_for_namespace",
"(",
"self",
".",
"namespace_end",
",",
"self",
".",
"app",
")",
"return",
"datastore",
".",
"Query",
"(",
"'__namespace__'",
",",
"filters",
"=",
"filters",
",",
"keys_only",
"=",
"True",
",",
"cursor",
"=",
"cursor",
",",
"_app",
"=",
"self",
".",
"app",
")"
]
| Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange. | [
"Returns",
"a",
"datastore",
".",
"Query",
"that",
"generates",
"all",
"namespaces",
"in",
"the",
"range",
"."
]
| python | train | 32.809524 |
mottosso/be | be/vendor/requests/packages/urllib3/util/retry.py | https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/packages/urllib3/util/retry.py#L158-L167 | def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value) | [
"def",
"get_backoff_time",
"(",
"self",
")",
":",
"if",
"self",
".",
"_observed_errors",
"<=",
"1",
":",
"return",
"0",
"backoff_value",
"=",
"self",
".",
"backoff_factor",
"*",
"(",
"2",
"**",
"(",
"self",
".",
"_observed_errors",
"-",
"1",
")",
")",
"return",
"min",
"(",
"self",
".",
"BACKOFF_MAX",
",",
"backoff_value",
")"
]
| Formula for computing the current backoff
:rtype: float | [
"Formula",
"for",
"computing",
"the",
"current",
"backoff"
]
| python | train | 30.1 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L316-L337 | def plot(self, numPoints=100):
"""
Specific plotting method for cylinders.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# generate cylinder
x = np.linspace(- self.radius, self.radius, numPoints)
z = np.linspace(- self.height / 2., self.height / 2., numPoints)
Xc, Zc = np.meshgrid(x, z)
Yc = np.sqrt(self.radius ** 2 - Xc ** 2)
# plot
ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=20, cstride=10)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("{}".format(self))
return fig, ax | [
"def",
"plot",
"(",
"self",
",",
"numPoints",
"=",
"100",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
",",
"projection",
"=",
"'3d'",
")",
"# generate cylinder",
"x",
"=",
"np",
".",
"linspace",
"(",
"-",
"self",
".",
"radius",
",",
"self",
".",
"radius",
",",
"numPoints",
")",
"z",
"=",
"np",
".",
"linspace",
"(",
"-",
"self",
".",
"height",
"/",
"2.",
",",
"self",
".",
"height",
"/",
"2.",
",",
"numPoints",
")",
"Xc",
",",
"Zc",
"=",
"np",
".",
"meshgrid",
"(",
"x",
",",
"z",
")",
"Yc",
"=",
"np",
".",
"sqrt",
"(",
"self",
".",
"radius",
"**",
"2",
"-",
"Xc",
"**",
"2",
")",
"# plot",
"ax",
".",
"plot_surface",
"(",
"Xc",
",",
"Yc",
",",
"Zc",
",",
"alpha",
"=",
"0.2",
",",
"rstride",
"=",
"20",
",",
"cstride",
"=",
"10",
")",
"ax",
".",
"plot_surface",
"(",
"Xc",
",",
"-",
"Yc",
",",
"Zc",
",",
"alpha",
"=",
"0.2",
",",
"rstride",
"=",
"20",
",",
"cstride",
"=",
"10",
")",
"ax",
".",
"set_xlabel",
"(",
"\"X\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Y\"",
")",
"ax",
".",
"set_zlabel",
"(",
"\"Z\"",
")",
"plt",
".",
"title",
"(",
"\"{}\"",
".",
"format",
"(",
"self",
")",
")",
"return",
"fig",
",",
"ax"
]
| Specific plotting method for cylinders. | [
"Specific",
"plotting",
"method",
"for",
"cylinders",
"."
]
| python | train | 28.954545 |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/instruments/instrument_dummy.py | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/instruments/instrument_dummy.py#L139-L159 | def run(self):
"""
this is the actual execution of the instrument thread: continuously read values from the probes
"""
eta = self.settings['noise_strength']
gamma = 2 * np.pi * self.settings['noise_bandwidth']
dt = 1. / self.settings['update frequency']
control = self.settings['control']
self._state = self._output
while self._stop is False:
A = -gamma * dt
noise = np.sqrt(2*gamma*eta)*np.random.randn()
self._state *= (1. + A)
self._state += noise + control
self._output = self._state
self.msleep(int(1e3 / self.settings['update frequency'])) | [
"def",
"run",
"(",
"self",
")",
":",
"eta",
"=",
"self",
".",
"settings",
"[",
"'noise_strength'",
"]",
"gamma",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"self",
".",
"settings",
"[",
"'noise_bandwidth'",
"]",
"dt",
"=",
"1.",
"/",
"self",
".",
"settings",
"[",
"'update frequency'",
"]",
"control",
"=",
"self",
".",
"settings",
"[",
"'control'",
"]",
"self",
".",
"_state",
"=",
"self",
".",
"_output",
"while",
"self",
".",
"_stop",
"is",
"False",
":",
"A",
"=",
"-",
"gamma",
"*",
"dt",
"noise",
"=",
"np",
".",
"sqrt",
"(",
"2",
"*",
"gamma",
"*",
"eta",
")",
"*",
"np",
".",
"random",
".",
"randn",
"(",
")",
"self",
".",
"_state",
"*=",
"(",
"1.",
"+",
"A",
")",
"self",
".",
"_state",
"+=",
"noise",
"+",
"control",
"self",
".",
"_output",
"=",
"self",
".",
"_state",
"self",
".",
"msleep",
"(",
"int",
"(",
"1e3",
"/",
"self",
".",
"settings",
"[",
"'update frequency'",
"]",
")",
")"
]
| this is the actual execution of the instrument thread: continuously read values from the probes | [
"this",
"is",
"the",
"actual",
"execution",
"of",
"the",
"instrument",
"thread",
":",
"continuously",
"read",
"values",
"from",
"the",
"probes"
]
| python | train | 32.142857 |
AshleySetter/optoanalysis | optoanalysis/optoanalysis/optoanalysis.py | https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L4233-L4278 | def get_wigner(z, freq, sample_freq, histbins=200, show_plot=False):
"""
Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres
"""
phase, phase_slices = extract_slices(z, freq, sample_freq, show_plot=False)
counts_array, bin_edges = histogram_phase(phase_slices, phase, histbins, show_plot=show_plot)
diff = bin_edges[1] - bin_edges[0]
bin_centres = bin_edges[:-1] + diff
iradon_output = _iradon_sart(counts_array, theta=phase)
#_plt.imshow(iradon_output, extent=[bin_centres[0], bin_centres[-1], bin_centres[0], bin_centres[-1]])
#_plt.show()
return iradon_output, bin_centres | [
"def",
"get_wigner",
"(",
"z",
",",
"freq",
",",
"sample_freq",
",",
"histbins",
"=",
"200",
",",
"show_plot",
"=",
"False",
")",
":",
"phase",
",",
"phase_slices",
"=",
"extract_slices",
"(",
"z",
",",
"freq",
",",
"sample_freq",
",",
"show_plot",
"=",
"False",
")",
"counts_array",
",",
"bin_edges",
"=",
"histogram_phase",
"(",
"phase_slices",
",",
"phase",
",",
"histbins",
",",
"show_plot",
"=",
"show_plot",
")",
"diff",
"=",
"bin_edges",
"[",
"1",
"]",
"-",
"bin_edges",
"[",
"0",
"]",
"bin_centres",
"=",
"bin_edges",
"[",
":",
"-",
"1",
"]",
"+",
"diff",
"iradon_output",
"=",
"_iradon_sart",
"(",
"counts_array",
",",
"theta",
"=",
"phase",
")",
"#_plt.imshow(iradon_output, extent=[bin_centres[0], bin_centres[-1], bin_centres[0], bin_centres[-1]])",
"#_plt.show()",
"return",
"iradon_output",
",",
"bin_centres"
]
| Calculates an approximation to the wigner quasi-probability distribution
by splitting the z position array into slices of the length of one period
of the motion. This slice is then associated with phase from -180 to 180
degrees. These slices are then histogramed in order to get a distribution
of counts of where the particle is observed at each phase. The 2d array
containing the counts varying with position and phase is then passed through
the inverse radon transformation using the Simultaneous Algebraic
Reconstruction Technique approximation from the scikit-image package.
Parameters
----------
z : ndarray
trace of z motion
freq : float
frequency of motion
sample_freq : float
sample frequency of the z array
histbins : int, optional (default=200)
number of bins to use in histogramming data for each phase
show_plot : bool, optional (default=False)
Whether or not to plot the phase distribution
Returns
-------
iradon_output : ndarray
2d array of size (histbins x histbins)
bin_centres : ndarray
positions of the bin centres | [
"Calculates",
"an",
"approximation",
"to",
"the",
"wigner",
"quasi",
"-",
"probability",
"distribution",
"by",
"splitting",
"the",
"z",
"position",
"array",
"into",
"slices",
"of",
"the",
"length",
"of",
"one",
"period",
"of",
"the",
"motion",
".",
"This",
"slice",
"is",
"then",
"associated",
"with",
"phase",
"from",
"-",
"180",
"to",
"180",
"degrees",
".",
"These",
"slices",
"are",
"then",
"histogramed",
"in",
"order",
"to",
"get",
"a",
"distribution",
"of",
"counts",
"of",
"where",
"the",
"particle",
"is",
"observed",
"at",
"each",
"phase",
".",
"The",
"2d",
"array",
"containing",
"the",
"counts",
"varying",
"with",
"position",
"and",
"phase",
"is",
"then",
"passed",
"through",
"the",
"inverse",
"radon",
"transformation",
"using",
"the",
"Simultaneous",
"Algebraic",
"Reconstruction",
"Technique",
"approximation",
"from",
"the",
"scikit",
"-",
"image",
"package",
"."
]
| python | train | 36.891304 |
lawsie/guizero | guizero/event.py | https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/event.py#L177-L190 | def set_event(self, ref, tk_event, callback):
"""
Sets a callback for this widget against a ref (reference) for a
tk_event, setting the callback to None will remove it.
"""
# has an EventCallback been created for this tk event
if tk_event not in self._event_callbacks:
self._event_callbacks[tk_event] = EventCallback(self._widget, self._tks, tk_event)
# assign this ref to this event callback
self._refs[ref] = self._event_callbacks[tk_event]
# set up the callback
self._refs[ref].set_callback(ref, callback) | [
"def",
"set_event",
"(",
"self",
",",
"ref",
",",
"tk_event",
",",
"callback",
")",
":",
"# has an EventCallback been created for this tk event",
"if",
"tk_event",
"not",
"in",
"self",
".",
"_event_callbacks",
":",
"self",
".",
"_event_callbacks",
"[",
"tk_event",
"]",
"=",
"EventCallback",
"(",
"self",
".",
"_widget",
",",
"self",
".",
"_tks",
",",
"tk_event",
")",
"# assign this ref to this event callback",
"self",
".",
"_refs",
"[",
"ref",
"]",
"=",
"self",
".",
"_event_callbacks",
"[",
"tk_event",
"]",
"# set up the callback",
"self",
".",
"_refs",
"[",
"ref",
"]",
".",
"set_callback",
"(",
"ref",
",",
"callback",
")"
]
| Sets a callback for this widget against a ref (reference) for a
tk_event, setting the callback to None will remove it. | [
"Sets",
"a",
"callback",
"for",
"this",
"widget",
"against",
"a",
"ref",
"(",
"reference",
")",
"for",
"a",
"tk_event",
"setting",
"the",
"callback",
"to",
"None",
"will",
"remove",
"it",
"."
]
| python | train | 42.071429 |
spotify/luigi | luigi/contrib/s3.py | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/s3.py#L177-L195 | def exists(self, path):
"""
Does provided path exist on S3?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# root always exists
if self._is_root(key):
return True
# file
if self._exists(bucket, key):
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False | [
"def",
"exists",
"(",
"self",
",",
"path",
")",
":",
"(",
"bucket",
",",
"key",
")",
"=",
"self",
".",
"_path_to_bucket_and_key",
"(",
"path",
")",
"# root always exists",
"if",
"self",
".",
"_is_root",
"(",
"key",
")",
":",
"return",
"True",
"# file",
"if",
"self",
".",
"_exists",
"(",
"bucket",
",",
"key",
")",
":",
"return",
"True",
"if",
"self",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"True",
"logger",
".",
"debug",
"(",
"'Path %s does not exist'",
",",
"path",
")",
"return",
"False"
]
| Does provided path exist on S3? | [
"Does",
"provided",
"path",
"exist",
"on",
"S3?"
]
| python | train | 22.105263 |
hvac/hvac | hvac/api/system_backend/key.py | https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/key.py#L7-L20 | def read_root_generation_progress(self):
"""Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/generate-root/attempt'
response = self._adapter.get(
url=api_path,
)
return response.json() | [
"def",
"read_root_generation_progress",
"(",
"self",
")",
":",
"api_path",
"=",
"'/v1/sys/generate-root/attempt'",
"response",
"=",
"self",
".",
"_adapter",
".",
"get",
"(",
"url",
"=",
"api_path",
",",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict | [
"Read",
"the",
"configuration",
"and",
"process",
"of",
"the",
"current",
"root",
"generation",
"attempt",
"."
]
| python | train | 32.714286 |
klmitch/turnstile | turnstile/control.py | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L82-L98 | def get_limits(self, limit_sum=None):
"""
Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects.
"""
with self.limit_lock:
# Any changes?
if limit_sum and self.limit_sum == limit_sum:
raise NoChangeException()
# Return a tuple of the limits and limit sum
return (self.limit_sum, self.limit_data) | [
"def",
"get_limits",
"(",
"self",
",",
"limit_sum",
"=",
"None",
")",
":",
"with",
"self",
".",
"limit_lock",
":",
"# Any changes?",
"if",
"limit_sum",
"and",
"self",
".",
"limit_sum",
"==",
"limit_sum",
":",
"raise",
"NoChangeException",
"(",
")",
"# Return a tuple of the limits and limit sum",
"return",
"(",
"self",
".",
"limit_sum",
",",
"self",
".",
"limit_data",
")"
]
| Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects. | [
"Gets",
"the",
"current",
"limit",
"data",
"if",
"it",
"is",
"different",
"from",
"the",
"data",
"indicated",
"by",
"limit_sum",
".",
"The",
"db",
"argument",
"is",
"used",
"for",
"hydrating",
"the",
"limit",
"objects",
".",
"Raises",
"a",
"NoChangeException",
"if",
"the",
"limit_sum",
"represents",
"no",
"change",
"otherwise",
"returns",
"a",
"tuple",
"consisting",
"of",
"the",
"current",
"limit_sum",
"and",
"a",
"list",
"of",
"Limit",
"objects",
"."
]
| python | train | 39 |
connectordb/connectordb-python | connectordb/_connection.py | https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L117-L121 | def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text | [
"def",
"ping",
"(",
"self",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"get",
"(",
"self",
".",
"url",
",",
"params",
"=",
"{",
"\"q\"",
":",
"\"this\"",
"}",
")",
")",
".",
"text"
]
| Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device | [
"Attempts",
"to",
"ping",
"the",
"server",
"using",
"current",
"credentials",
"and",
"responds",
"with",
"the",
"path",
"of",
"the",
"currently",
"authenticated",
"device"
]
| python | test | 55.8 |
iotile/coretools | iotilegateway/iotilegateway/device.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/device.py#L110-L120 | def get_config(self, name, default=_MISSING):
"""Get a configuration setting from this DeviceAdapter.
See :meth:`AbstractDeviceAdapter.get_config`.
"""
val = self._config.get(name, default)
if val is _MISSING:
raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name))
return val | [
"def",
"get_config",
"(",
"self",
",",
"name",
",",
"default",
"=",
"_MISSING",
")",
":",
"val",
"=",
"self",
".",
"_config",
".",
"get",
"(",
"name",
",",
"default",
")",
"if",
"val",
"is",
"_MISSING",
":",
"raise",
"ArgumentError",
"(",
"\"DeviceAdapter config {} did not exist and no default\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"val"
]
| Get a configuration setting from this DeviceAdapter.
See :meth:`AbstractDeviceAdapter.get_config`. | [
"Get",
"a",
"configuration",
"setting",
"from",
"this",
"DeviceAdapter",
"."
]
| python | train | 32.909091 |
quantumlib/Cirq | cirq/google/sim/xmon_stepper.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/xmon_stepper.py#L304-L339 | def simulate_w(self,
index: int,
half_turns: float,
axis_half_turns: float):
"""Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above.
"""
args = self._shard_num_args({
'index': index,
'half_turns': half_turns,
'axis_half_turns': axis_half_turns
})
if index >= self._num_shard_qubits:
# W gate spans shards.
self._pool.map(_clear_scratch, args)
self._pool.map(_w_between_shards, args)
self._pool.map(_copy_scratch_to_state, args)
else:
# W gate is within a shard.
self._pool.map(_w_within_shard, args)
# Normalize after every w.
norm_squared = np.sum(self._pool.map(_norm_squared, args))
args = self._shard_num_args({
'norm_squared': norm_squared
})
self._pool.map(_renorm, args) | [
"def",
"simulate_w",
"(",
"self",
",",
"index",
":",
"int",
",",
"half_turns",
":",
"float",
",",
"axis_half_turns",
":",
"float",
")",
":",
"args",
"=",
"self",
".",
"_shard_num_args",
"(",
"{",
"'index'",
":",
"index",
",",
"'half_turns'",
":",
"half_turns",
",",
"'axis_half_turns'",
":",
"axis_half_turns",
"}",
")",
"if",
"index",
">=",
"self",
".",
"_num_shard_qubits",
":",
"# W gate spans shards.",
"self",
".",
"_pool",
".",
"map",
"(",
"_clear_scratch",
",",
"args",
")",
"self",
".",
"_pool",
".",
"map",
"(",
"_w_between_shards",
",",
"args",
")",
"self",
".",
"_pool",
".",
"map",
"(",
"_copy_scratch_to_state",
",",
"args",
")",
"else",
":",
"# W gate is within a shard.",
"self",
".",
"_pool",
".",
"map",
"(",
"_w_within_shard",
",",
"args",
")",
"# Normalize after every w.",
"norm_squared",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"_pool",
".",
"map",
"(",
"_norm_squared",
",",
"args",
")",
")",
"args",
"=",
"self",
".",
"_shard_num_args",
"(",
"{",
"'norm_squared'",
":",
"norm_squared",
"}",
")",
"self",
".",
"_pool",
".",
"map",
"(",
"_renorm",
",",
"args",
")"
]
| Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above. | [
"Simulate",
"a",
"single",
"qubit",
"rotation",
"gate",
"about",
"a",
"X",
"+",
"b",
"Y",
"."
]
| python | train | 36.277778 |
ray-project/ray | python/ray/rllib/agents/dqn/dqn_policy_graph.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/agents/dqn/dqn_policy_graph.py#L679-L700 | def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name) | [
"def",
"_scope_vars",
"(",
"scope",
",",
"trainable_only",
"=",
"False",
")",
":",
"return",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"TRAINABLE_VARIABLES",
"if",
"trainable_only",
"else",
"tf",
".",
"GraphKeys",
".",
"VARIABLES",
",",
"scope",
"=",
"scope",
"if",
"isinstance",
"(",
"scope",
",",
"str",
")",
"else",
"scope",
".",
"name",
")"
]
| Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`. | [
"Get",
"variables",
"inside",
"a",
"scope",
"The",
"scope",
"can",
"be",
"specified",
"as",
"a",
"string"
]
| python | train | 27.636364 |
ga4gh/ga4gh-server | ga4gh/server/datamodel/__init__.py | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/__init__.py#L68-L90 | def getFileHandle(self, dataFile, openMethod):
"""
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
"""
if dataFile in self._memoTable:
handle = self._memoTable[dataFile]
self._update(dataFile, handle)
return handle
else:
try:
handle = openMethod(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
self._memoTable[dataFile] = handle
self._add(dataFile, handle)
if len(self._memoTable) > self._maxCacheSize:
dataFile = self._removeLru()
del self._memoTable[dataFile]
return handle | [
"def",
"getFileHandle",
"(",
"self",
",",
"dataFile",
",",
"openMethod",
")",
":",
"if",
"dataFile",
"in",
"self",
".",
"_memoTable",
":",
"handle",
"=",
"self",
".",
"_memoTable",
"[",
"dataFile",
"]",
"self",
".",
"_update",
"(",
"dataFile",
",",
"handle",
")",
"return",
"handle",
"else",
":",
"try",
":",
"handle",
"=",
"openMethod",
"(",
"dataFile",
")",
"except",
"ValueError",
":",
"raise",
"exceptions",
".",
"FileOpenFailedException",
"(",
"dataFile",
")",
"self",
".",
"_memoTable",
"[",
"dataFile",
"]",
"=",
"handle",
"self",
".",
"_add",
"(",
"dataFile",
",",
"handle",
")",
"if",
"len",
"(",
"self",
".",
"_memoTable",
")",
">",
"self",
".",
"_maxCacheSize",
":",
"dataFile",
"=",
"self",
".",
"_removeLru",
"(",
")",
"del",
"self",
".",
"_memoTable",
"[",
"dataFile",
"]",
"return",
"handle"
]
| Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle. | [
"Returns",
"handle",
"associated",
"to",
"the",
"filename",
".",
"If",
"the",
"file",
"is",
"already",
"opened",
"update",
"its",
"priority",
"in",
"the",
"cache",
"and",
"return",
"its",
"handle",
".",
"Otherwise",
"open",
"the",
"file",
"using",
"openMethod",
"store",
"it",
"in",
"the",
"cache",
"and",
"return",
"the",
"corresponding",
"handle",
"."
]
| python | train | 39.391304 |
tanghaibao/jcvi | jcvi/formats/fastq.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L238-L267 | def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) | [
"def",
"uniq",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"uniq",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"fastqfile",
",",
"=",
"args",
"fw",
"=",
"must_open",
"(",
"opts",
".",
"outfile",
",",
"\"w\"",
")",
"nduplicates",
"=",
"nreads",
"=",
"0",
"seen",
"=",
"set",
"(",
")",
"for",
"rec",
"in",
"iter_fastq",
"(",
"fastqfile",
")",
":",
"nreads",
"+=",
"1",
"if",
"rec",
"is",
"None",
":",
"break",
"name",
"=",
"rec",
".",
"name",
"if",
"name",
"in",
"seen",
":",
"nduplicates",
"+=",
"1",
"continue",
"seen",
".",
"add",
"(",
"name",
")",
"print",
"(",
"rec",
",",
"file",
"=",
"fw",
")",
"logging",
".",
"debug",
"(",
"\"Removed duplicate reads: {}\"",
".",
"format",
"(",
"percentage",
"(",
"nduplicates",
",",
"nreads",
")",
")",
")"
]
| %prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name. | [
"%prog",
"uniq",
"fastqfile"
]
| python | train | 25.066667 |
cjdrake/pyeda | pyeda/boolalg/expr.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/expr.py#L242-L245 | def expr2dimacscnf(ex):
"""Convert an expression into an equivalent DIMACS CNF."""
litmap, nvars, clauses = ex.encode_cnf()
return litmap, DimacsCNF(nvars, clauses) | [
"def",
"expr2dimacscnf",
"(",
"ex",
")",
":",
"litmap",
",",
"nvars",
",",
"clauses",
"=",
"ex",
".",
"encode_cnf",
"(",
")",
"return",
"litmap",
",",
"DimacsCNF",
"(",
"nvars",
",",
"clauses",
")"
]
| Convert an expression into an equivalent DIMACS CNF. | [
"Convert",
"an",
"expression",
"into",
"an",
"equivalent",
"DIMACS",
"CNF",
"."
]
| python | train | 43.25 |
jtwhite79/pyemu | pyemu/utils/helpers.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L601-L713 | def kl_setup(num_eig,sr,struct,prefixes,
factors_file="kl_factors.dat",islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr,flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct,str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
#trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:,:num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name":eig_names},index=eig_names)
pp_df.loc[:,"x"] = -1.0 * sr.ncol
pp_df.loc[:,"y"] = -1.0 * sr.nrow
pp_df.loc[:,"zone"] = -999
pp_df.loc[:,"parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"),pp_df)
eigen_basis_to_factor_file(sr.nrow,sr.ncol,trunc_basis,factors_file=factors_file,islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir,"{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat",tpl_file,prefix)
shutil.copy2("temp.dat",tpl_file.replace(".tpl",""))
df.loc[:,"tpl_file"] = tpl_file
df.loc[:,"in_file"] = tpl_file.replace(".tpl","")
df.loc[:,"prefix"] = prefix
df.loc[:,"pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
#arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:,"parubnd"] = 10.0
df.loc[:,"parlbnd"] = 0.1
return pd.concat(dfs) | [
"def",
"kl_setup",
"(",
"num_eig",
",",
"sr",
",",
"struct",
",",
"prefixes",
",",
"factors_file",
"=",
"\"kl_factors.dat\"",
",",
"islog",
"=",
"True",
",",
"basis_file",
"=",
"None",
",",
"tpl_dir",
"=",
"\".\"",
")",
":",
"try",
":",
"import",
"flopy",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"\"error import flopy: {0}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"assert",
"isinstance",
"(",
"sr",
",",
"flopy",
".",
"utils",
".",
"SpatialReference",
")",
"# for name,array in array_dict.items():",
"# assert isinstance(array,np.ndarray)",
"# assert array.shape[0] == sr.nrow",
"# assert array.shape[1] == sr.ncol",
"# assert len(name) + len(str(num_eig)) <= 12,\"name too long:{0}\".\\",
"# format(name)",
"if",
"isinstance",
"(",
"struct",
",",
"str",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"struct",
")",
"gs",
"=",
"pyemu",
".",
"utils",
".",
"read_struct_file",
"(",
"struct",
")",
"else",
":",
"gs",
"=",
"struct",
"names",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"sr",
".",
"nrow",
")",
":",
"names",
".",
"extend",
"(",
"[",
"\"i{0:04d}j{1:04d}\"",
".",
"format",
"(",
"i",
",",
"j",
")",
"for",
"j",
"in",
"range",
"(",
"sr",
".",
"ncol",
")",
"]",
")",
"cov",
"=",
"gs",
".",
"covariance_matrix",
"(",
"sr",
".",
"xcentergrid",
".",
"flatten",
"(",
")",
",",
"sr",
".",
"ycentergrid",
".",
"flatten",
"(",
")",
",",
"names",
"=",
"names",
")",
"eig_names",
"=",
"[",
"\"eig_{0:04d}\"",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"cov",
".",
"shape",
"[",
"0",
"]",
")",
"]",
"trunc_basis",
"=",
"cov",
".",
"u",
"trunc_basis",
".",
"col_names",
"=",
"eig_names",
"#trunc_basis.col_names = [\"\"]",
"if",
"basis_file",
"is",
"not",
"None",
":",
"trunc_basis",
".",
"to_binary",
"(",
"basis_file",
")",
"trunc_basis",
"=",
"trunc_basis",
"[",
":",
",",
":",
"num_eig",
"]",
"eig_names",
"=",
"eig_names",
"[",
":",
"num_eig",
"]",
"pp_df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"name\"",
":",
"eig_names",
"}",
",",
"index",
"=",
"eig_names",
")",
"pp_df",
".",
"loc",
"[",
":",
",",
"\"x\"",
"]",
"=",
"-",
"1.0",
"*",
"sr",
".",
"ncol",
"pp_df",
".",
"loc",
"[",
":",
",",
"\"y\"",
"]",
"=",
"-",
"1.0",
"*",
"sr",
".",
"nrow",
"pp_df",
".",
"loc",
"[",
":",
",",
"\"zone\"",
"]",
"=",
"-",
"999",
"pp_df",
".",
"loc",
"[",
":",
",",
"\"parval1\"",
"]",
"=",
"1.0",
"pyemu",
".",
"pp_utils",
".",
"write_pp_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"temp.dat\"",
")",
",",
"pp_df",
")",
"eigen_basis_to_factor_file",
"(",
"sr",
".",
"nrow",
",",
"sr",
".",
"ncol",
",",
"trunc_basis",
",",
"factors_file",
"=",
"factors_file",
",",
"islog",
"=",
"islog",
")",
"dfs",
"=",
"[",
"]",
"for",
"prefix",
"in",
"prefixes",
":",
"tpl_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tpl_dir",
",",
"\"{0}.dat_kl.tpl\"",
".",
"format",
"(",
"prefix",
")",
")",
"df",
"=",
"pyemu",
".",
"pp_utils",
".",
"pilot_points_to_tpl",
"(",
"\"temp.dat\"",
",",
"tpl_file",
",",
"prefix",
")",
"shutil",
".",
"copy2",
"(",
"\"temp.dat\"",
",",
"tpl_file",
".",
"replace",
"(",
"\".tpl\"",
",",
"\"\"",
")",
")",
"df",
".",
"loc",
"[",
":",
",",
"\"tpl_file\"",
"]",
"=",
"tpl_file",
"df",
".",
"loc",
"[",
":",
",",
"\"in_file\"",
"]",
"=",
"tpl_file",
".",
"replace",
"(",
"\".tpl\"",
",",
"\"\"",
")",
"df",
".",
"loc",
"[",
":",
",",
"\"prefix\"",
"]",
"=",
"prefix",
"df",
".",
"loc",
"[",
":",
",",
"\"pargp\"",
"]",
"=",
"\"kl_{0}\"",
".",
"format",
"(",
"prefix",
")",
"dfs",
".",
"append",
"(",
"df",
")",
"#arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)",
"df",
"=",
"pd",
".",
"concat",
"(",
"dfs",
")",
"df",
".",
"loc",
"[",
":",
",",
"\"parubnd\"",
"]",
"=",
"10.0",
"df",
".",
"loc",
"[",
":",
",",
"\"parlbnd\"",
"]",
"=",
"0.1",
"return",
"pd",
".",
"concat",
"(",
"dfs",
")"
]
| setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)`` | [
"setup",
"a",
"karhuenen",
"-",
"Loeve",
"based",
"parameterization",
"for",
"a",
"given",
"geostatistical",
"structure",
"."
]
| python | train | 32.902655 |
chaoss/grimoirelab-elk | grimoire_elk/enriched/mediawiki.py | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/mediawiki.py#L109-L116 | def get_review_sh(self, revision, item):
""" Add sorting hat enrichment fields for the author of the revision """
identity = self.get_sh_identity(revision)
update = parser.parse(item[self.get_field_date()])
erevision = self.get_item_sh_fields(identity, update)
return erevision | [
"def",
"get_review_sh",
"(",
"self",
",",
"revision",
",",
"item",
")",
":",
"identity",
"=",
"self",
".",
"get_sh_identity",
"(",
"revision",
")",
"update",
"=",
"parser",
".",
"parse",
"(",
"item",
"[",
"self",
".",
"get_field_date",
"(",
")",
"]",
")",
"erevision",
"=",
"self",
".",
"get_item_sh_fields",
"(",
"identity",
",",
"update",
")",
"return",
"erevision"
]
| Add sorting hat enrichment fields for the author of the revision | [
"Add",
"sorting",
"hat",
"enrichment",
"fields",
"for",
"the",
"author",
"of",
"the",
"revision"
]
| python | train | 39 |
HIPS/autograd | examples/generative_adversarial_net.py | https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/examples/generative_adversarial_net.py#L59-L91 | def adam_minimax(grad_both, init_params_max, init_params_min, callback=None, num_iters=100,
step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8):
"""Adam modified to do minimiax optimization, for instance to help with
training generative adversarial networks."""
x_max, unflatten_max = flatten(init_params_max)
x_min, unflatten_min = flatten(init_params_min)
m_max = np.zeros(len(x_max))
v_max = np.zeros(len(x_max))
m_min = np.zeros(len(x_min))
v_min = np.zeros(len(x_min))
for i in range(num_iters):
g_max_uf, g_min_uf = grad_both(unflatten_max(x_max),
unflatten_min(x_min), i)
g_max, _ = flatten(g_max_uf)
g_min, _ = flatten(g_min_uf)
if callback: callback(unflatten_max(x_max), unflatten_min(x_min), i,
unflatten_max(g_max), unflatten_min(g_min))
m_max = (1 - b1) * g_max + b1 * m_max # First moment estimate.
v_max = (1 - b2) * (g_max**2) + b2 * v_max # Second moment estimate.
mhat_max = m_max / (1 - b1**(i + 1)) # Bias correction.
vhat_max = v_max / (1 - b2**(i + 1))
x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps)
m_min = (1 - b1) * g_min + b1 * m_min # First moment estimate.
v_min = (1 - b2) * (g_min**2) + b2 * v_min # Second moment estimate.
mhat_min = m_min / (1 - b1**(i + 1)) # Bias correction.
vhat_min = v_min / (1 - b2**(i + 1))
x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps)
return unflatten_max(x_max), unflatten_min(x_min) | [
"def",
"adam_minimax",
"(",
"grad_both",
",",
"init_params_max",
",",
"init_params_min",
",",
"callback",
"=",
"None",
",",
"num_iters",
"=",
"100",
",",
"step_size_max",
"=",
"0.001",
",",
"step_size_min",
"=",
"0.001",
",",
"b1",
"=",
"0.9",
",",
"b2",
"=",
"0.999",
",",
"eps",
"=",
"10",
"**",
"-",
"8",
")",
":",
"x_max",
",",
"unflatten_max",
"=",
"flatten",
"(",
"init_params_max",
")",
"x_min",
",",
"unflatten_min",
"=",
"flatten",
"(",
"init_params_min",
")",
"m_max",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"x_max",
")",
")",
"v_max",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"x_max",
")",
")",
"m_min",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"x_min",
")",
")",
"v_min",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"x_min",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_iters",
")",
":",
"g_max_uf",
",",
"g_min_uf",
"=",
"grad_both",
"(",
"unflatten_max",
"(",
"x_max",
")",
",",
"unflatten_min",
"(",
"x_min",
")",
",",
"i",
")",
"g_max",
",",
"_",
"=",
"flatten",
"(",
"g_max_uf",
")",
"g_min",
",",
"_",
"=",
"flatten",
"(",
"g_min_uf",
")",
"if",
"callback",
":",
"callback",
"(",
"unflatten_max",
"(",
"x_max",
")",
",",
"unflatten_min",
"(",
"x_min",
")",
",",
"i",
",",
"unflatten_max",
"(",
"g_max",
")",
",",
"unflatten_min",
"(",
"g_min",
")",
")",
"m_max",
"=",
"(",
"1",
"-",
"b1",
")",
"*",
"g_max",
"+",
"b1",
"*",
"m_max",
"# First moment estimate.",
"v_max",
"=",
"(",
"1",
"-",
"b2",
")",
"*",
"(",
"g_max",
"**",
"2",
")",
"+",
"b2",
"*",
"v_max",
"# Second moment estimate.",
"mhat_max",
"=",
"m_max",
"/",
"(",
"1",
"-",
"b1",
"**",
"(",
"i",
"+",
"1",
")",
")",
"# Bias correction.",
"vhat_max",
"=",
"v_max",
"/",
"(",
"1",
"-",
"b2",
"**",
"(",
"i",
"+",
"1",
")",
")",
"x_max",
"=",
"x_max",
"+",
"step_size_max",
"*",
"mhat_max",
"/",
"(",
"np",
".",
"sqrt",
"(",
"vhat_max",
")",
"+",
"eps",
")",
"m_min",
"=",
"(",
"1",
"-",
"b1",
")",
"*",
"g_min",
"+",
"b1",
"*",
"m_min",
"# First moment estimate.",
"v_min",
"=",
"(",
"1",
"-",
"b2",
")",
"*",
"(",
"g_min",
"**",
"2",
")",
"+",
"b2",
"*",
"v_min",
"# Second moment estimate.",
"mhat_min",
"=",
"m_min",
"/",
"(",
"1",
"-",
"b1",
"**",
"(",
"i",
"+",
"1",
")",
")",
"# Bias correction.",
"vhat_min",
"=",
"v_min",
"/",
"(",
"1",
"-",
"b2",
"**",
"(",
"i",
"+",
"1",
")",
")",
"x_min",
"=",
"x_min",
"-",
"step_size_min",
"*",
"mhat_min",
"/",
"(",
"np",
".",
"sqrt",
"(",
"vhat_min",
")",
"+",
"eps",
")",
"return",
"unflatten_max",
"(",
"x_max",
")",
",",
"unflatten_min",
"(",
"x_min",
")"
]
| Adam modified to do minimiax optimization, for instance to help with
training generative adversarial networks. | [
"Adam",
"modified",
"to",
"do",
"minimiax",
"optimization",
"for",
"instance",
"to",
"help",
"with",
"training",
"generative",
"adversarial",
"networks",
"."
]
| python | train | 49.454545 |
saltstack/salt | salt/utils/thin.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L298-L320 | def _get_supported_py_config(tops, extended_cfg):
'''
Based on the Salt SSH configuration, create a YAML configuration
for the supported Python interpreter versions. This is then written into the thin.tgz
archive and then verified by salt.client.ssh.ssh_py_shim.get_executable()
Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces.
:return:
'''
pymap = []
for py_ver, tops in _six.iteritems(copy.deepcopy(tops)):
py_ver = int(py_ver)
if py_ver == 2:
pymap.append('py2:2:7')
elif py_ver == 3:
pymap.append('py3:3:0')
for ns, cfg in _six.iteritems(copy.deepcopy(extended_cfg) or {}):
pymap.append('{}:{}:{}'.format(ns, *cfg.get('py-version')))
pymap.append('')
return salt.utils.stringutils.to_bytes(os.linesep.join(pymap)) | [
"def",
"_get_supported_py_config",
"(",
"tops",
",",
"extended_cfg",
")",
":",
"pymap",
"=",
"[",
"]",
"for",
"py_ver",
",",
"tops",
"in",
"_six",
".",
"iteritems",
"(",
"copy",
".",
"deepcopy",
"(",
"tops",
")",
")",
":",
"py_ver",
"=",
"int",
"(",
"py_ver",
")",
"if",
"py_ver",
"==",
"2",
":",
"pymap",
".",
"append",
"(",
"'py2:2:7'",
")",
"elif",
"py_ver",
"==",
"3",
":",
"pymap",
".",
"append",
"(",
"'py3:3:0'",
")",
"for",
"ns",
",",
"cfg",
"in",
"_six",
".",
"iteritems",
"(",
"copy",
".",
"deepcopy",
"(",
"extended_cfg",
")",
"or",
"{",
"}",
")",
":",
"pymap",
".",
"append",
"(",
"'{}:{}:{}'",
".",
"format",
"(",
"ns",
",",
"*",
"cfg",
".",
"get",
"(",
"'py-version'",
")",
")",
")",
"pymap",
".",
"append",
"(",
"''",
")",
"return",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"os",
".",
"linesep",
".",
"join",
"(",
"pymap",
")",
")"
]
| Based on the Salt SSH configuration, create a YAML configuration
for the supported Python interpreter versions. This is then written into the thin.tgz
archive and then verified by salt.client.ssh.ssh_py_shim.get_executable()
Note: Minimum default of 2.x versions is 2.7 and 3.x is 3.0, unless specified in namespaces.
:return: | [
"Based",
"on",
"the",
"Salt",
"SSH",
"configuration",
"create",
"a",
"YAML",
"configuration",
"for",
"the",
"supported",
"Python",
"interpreter",
"versions",
".",
"This",
"is",
"then",
"written",
"into",
"the",
"thin",
".",
"tgz",
"archive",
"and",
"then",
"verified",
"by",
"salt",
".",
"client",
".",
"ssh",
".",
"ssh_py_shim",
".",
"get_executable",
"()"
]
| python | train | 36.826087 |
bcbio/bcbio-nextgen | bcbio/cwl/tool.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L137-L154 | def _run_bunny(args):
"""Run CWL with rabix bunny.
"""
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work"))
flags = ["-b", work_dir]
log_file = os.path.join(work_dir, "%s-bunny.log" % project_name)
if os.path.exists(work_dir):
caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir)
if os.path.isdir(os.path.join(work_dir, d))]
if caches:
flags += ["--cache-dir", max(caches, key=os.path.getmtime)]
if args.no_container:
_remove_bcbiovm_path()
flags += ["--no-container"]
cmd = ["rabix"] + flags + [main_file, json_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file) | [
"def",
"_run_bunny",
"(",
"args",
")",
":",
"main_file",
",",
"json_file",
",",
"project_name",
"=",
"_get_main_and_json",
"(",
"args",
".",
"directory",
")",
"work_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"\"bunny_work\"",
")",
")",
"flags",
"=",
"[",
"\"-b\"",
",",
"work_dir",
"]",
"log_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-bunny.log\"",
"%",
"project_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"work_dir",
")",
":",
"caches",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"d",
")",
"for",
"d",
"in",
"os",
".",
"listdir",
"(",
"work_dir",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"d",
")",
")",
"]",
"if",
"caches",
":",
"flags",
"+=",
"[",
"\"--cache-dir\"",
",",
"max",
"(",
"caches",
",",
"key",
"=",
"os",
".",
"path",
".",
"getmtime",
")",
"]",
"if",
"args",
".",
"no_container",
":",
"_remove_bcbiovm_path",
"(",
")",
"flags",
"+=",
"[",
"\"--no-container\"",
"]",
"cmd",
"=",
"[",
"\"rabix\"",
"]",
"+",
"flags",
"+",
"[",
"main_file",
",",
"json_file",
"]",
"with",
"utils",
".",
"chdir",
"(",
"work_dir",
")",
":",
"_run_tool",
"(",
"cmd",
",",
"not",
"args",
".",
"no_container",
",",
"work_dir",
",",
"log_file",
")"
]
| Run CWL with rabix bunny. | [
"Run",
"CWL",
"with",
"rabix",
"bunny",
"."
]
| python | train | 44.388889 |
saltstack/salt | salt/modules/netaddress.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netaddress.py#L59-L68 | def cidr_netmask(cidr):
'''
Get the netmask address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20
'''
ips = netaddr.IPNetwork(cidr)
return six.text_type(ips.netmask) | [
"def",
"cidr_netmask",
"(",
"cidr",
")",
":",
"ips",
"=",
"netaddr",
".",
"IPNetwork",
"(",
"cidr",
")",
"return",
"six",
".",
"text_type",
"(",
"ips",
".",
"netmask",
")"
]
| Get the netmask address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20 | [
"Get",
"the",
"netmask",
"address",
"associated",
"with",
"a",
"CIDR",
"address",
"."
]
| python | train | 24.3 |
dereneaton/ipyrad | ipyrad/assemble/util.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/util.py#L865-L886 | def clustdealer(pairdealer, optim):
""" return optim clusters given iterators, and whether it got all or not"""
ccnt = 0
chunk = []
while ccnt < optim:
## try refreshing taker, else quit
try:
taker = itertools.takewhile(lambda x: x[0] != "//\n", pairdealer)
oneclust = ["".join(taker.next())]
except StopIteration:
#LOGGER.debug('last chunk %s', chunk)
return 1, chunk
## load one cluster
while 1:
try:
oneclust.append("".join(taker.next()))
except StopIteration:
break
chunk.append("".join(oneclust))
ccnt += 1
return 0, chunk | [
"def",
"clustdealer",
"(",
"pairdealer",
",",
"optim",
")",
":",
"ccnt",
"=",
"0",
"chunk",
"=",
"[",
"]",
"while",
"ccnt",
"<",
"optim",
":",
"## try refreshing taker, else quit",
"try",
":",
"taker",
"=",
"itertools",
".",
"takewhile",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"!=",
"\"//\\n\"",
",",
"pairdealer",
")",
"oneclust",
"=",
"[",
"\"\"",
".",
"join",
"(",
"taker",
".",
"next",
"(",
")",
")",
"]",
"except",
"StopIteration",
":",
"#LOGGER.debug('last chunk %s', chunk)",
"return",
"1",
",",
"chunk",
"## load one cluster",
"while",
"1",
":",
"try",
":",
"oneclust",
".",
"append",
"(",
"\"\"",
".",
"join",
"(",
"taker",
".",
"next",
"(",
")",
")",
")",
"except",
"StopIteration",
":",
"break",
"chunk",
".",
"append",
"(",
"\"\"",
".",
"join",
"(",
"oneclust",
")",
")",
"ccnt",
"+=",
"1",
"return",
"0",
",",
"chunk"
]
| return optim clusters given iterators, and whether it got all or not | [
"return",
"optim",
"clusters",
"given",
"iterators",
"and",
"whether",
"it",
"got",
"all",
"or",
"not"
]
| python | valid | 31.227273 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.