repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
gwww/elkm1
elkm1_lib/message.py
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L212-L215
def _zp_decode(self, msg): """ZP: Zone partitions.""" zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]] return {'zone_partitions': zone_partitions}
[ "def", "_zp_decode", "(", "self", ",", "msg", ")", ":", "zone_partitions", "=", "[", "ord", "(", "x", ")", "-", "0x31", "for", "x", "in", "msg", "[", "4", ":", "4", "+", "Max", ".", "ZONES", ".", "value", "]", "]", "return", "{", "'zone_partitions'", ":", "zone_partitions", "}" ]
ZP: Zone partitions.
[ "ZP", ":", "Zone", "partitions", "." ]
python
train
46
pypa/pipenv
pipenv/vendor/yaspin/core.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/yaspin/core.py#L251-L264
def hide(self): """Hide the spinner to allow for custom writing to the terminal.""" thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and not self._hide_spin.is_set(): # set the hidden spinner flag self._hide_spin.set() # clear the current line sys.stdout.write("\r") self._clear_line() # flush the stdout buffer so the current line can be rewritten to sys.stdout.flush()
[ "def", "hide", "(", "self", ")", ":", "thr_is_alive", "=", "self", ".", "_spin_thread", "and", "self", ".", "_spin_thread", ".", "is_alive", "(", ")", "if", "thr_is_alive", "and", "not", "self", ".", "_hide_spin", ".", "is_set", "(", ")", ":", "# set the hidden spinner flag", "self", ".", "_hide_spin", ".", "set", "(", ")", "# clear the current line", "sys", ".", "stdout", ".", "write", "(", "\"\\r\"", ")", "self", ".", "_clear_line", "(", ")", "# flush the stdout buffer so the current line can be rewritten to", "sys", ".", "stdout", ".", "flush", "(", ")" ]
Hide the spinner to allow for custom writing to the terminal.
[ "Hide", "the", "spinner", "to", "allow", "for", "custom", "writing", "to", "the", "terminal", "." ]
python
train
35.785714
confluentinc/confluent-kafka-python
examples/adminapi.py
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L120-L140
def example_alter_configs(a, args): """ Alter configs atomically, replacing non-specified configuration properties with their default values. """ resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) fs = a.alter_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: f.result() # empty, but raises exception on failure print("{} configuration successfully altered".format(res)) except Exception: raise
[ "def", "example_alter_configs", "(", "a", ",", "args", ")", ":", "resources", "=", "[", "]", "for", "restype", ",", "resname", ",", "configs", "in", "zip", "(", "args", "[", "0", ":", ":", "3", "]", ",", "args", "[", "1", ":", ":", "3", "]", ",", "args", "[", "2", ":", ":", "3", "]", ")", ":", "resource", "=", "ConfigResource", "(", "restype", ",", "resname", ")", "resources", ".", "append", "(", "resource", ")", "for", "k", ",", "v", "in", "[", "conf", ".", "split", "(", "'='", ")", "for", "conf", "in", "configs", ".", "split", "(", "','", ")", "]", ":", "resource", ".", "set_config", "(", "k", ",", "v", ")", "fs", "=", "a", ".", "alter_configs", "(", "resources", ")", "# Wait for operation to finish.", "for", "res", ",", "f", "in", "fs", ".", "items", "(", ")", ":", "try", ":", "f", ".", "result", "(", ")", "# empty, but raises exception on failure", "print", "(", "\"{} configuration successfully altered\"", ".", "format", "(", "res", ")", ")", "except", "Exception", ":", "raise" ]
Alter configs atomically, replacing non-specified configuration properties with their default values.
[ "Alter", "configs", "atomically", "replacing", "non", "-", "specified", "configuration", "properties", "with", "their", "default", "values", "." ]
python
train
34.619048
push-things/django-th
django_th/views_userservices.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/django_th/views_userservices.py#L22-L35
def renew_service(request, pk): """ renew an existing service :param request object :param pk: the primary key of the service to renew :type pk: int """ default_provider.load_services() service = get_object_or_404(ServicesActivated, pk=pk) service_name = str(service.name) service_object = default_provider.get_service(service_name) lets_auth = getattr(service_object, 'auth') getattr(service_object, 'reset_failed')(pk=pk) return redirect(lets_auth(request))
[ "def", "renew_service", "(", "request", ",", "pk", ")", ":", "default_provider", ".", "load_services", "(", ")", "service", "=", "get_object_or_404", "(", "ServicesActivated", ",", "pk", "=", "pk", ")", "service_name", "=", "str", "(", "service", ".", "name", ")", "service_object", "=", "default_provider", ".", "get_service", "(", "service_name", ")", "lets_auth", "=", "getattr", "(", "service_object", ",", "'auth'", ")", "getattr", "(", "service_object", ",", "'reset_failed'", ")", "(", "pk", "=", "pk", ")", "return", "redirect", "(", "lets_auth", "(", "request", ")", ")" ]
renew an existing service :param request object :param pk: the primary key of the service to renew :type pk: int
[ "renew", "an", "existing", "service", ":", "param", "request", "object", ":", "param", "pk", ":", "the", "primary", "key", "of", "the", "service", "to", "renew", ":", "type", "pk", ":", "int" ]
python
train
36.714286
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_export.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_export.py#L400-L414
def export_flow_di_data(params, plane): """ Creates a new BPMNEdge XML element for given edge parameters and adds it to 'plane' element. :param params: dictionary with edge parameters, :param plane: object of Element class, representing BPMN XML 'BPMNPlane' element (root for edge DI data). """ output_flow = eTree.SubElement(plane, BpmnDiagramGraphExport.bpmndi_namespace + consts.Consts.bpmn_edge) output_flow.set(consts.Consts.id, params[consts.Consts.id] + "_gui") output_flow.set(consts.Consts.bpmn_element, params[consts.Consts.id]) waypoints = params[consts.Consts.waypoints] for waypoint in waypoints: waypoint_element = eTree.SubElement(output_flow, "omgdi:waypoint") waypoint_element.set(consts.Consts.x, waypoint[0]) waypoint_element.set(consts.Consts.y, waypoint[1])
[ "def", "export_flow_di_data", "(", "params", ",", "plane", ")", ":", "output_flow", "=", "eTree", ".", "SubElement", "(", "plane", ",", "BpmnDiagramGraphExport", ".", "bpmndi_namespace", "+", "consts", ".", "Consts", ".", "bpmn_edge", ")", "output_flow", ".", "set", "(", "consts", ".", "Consts", ".", "id", ",", "params", "[", "consts", ".", "Consts", ".", "id", "]", "+", "\"_gui\"", ")", "output_flow", ".", "set", "(", "consts", ".", "Consts", ".", "bpmn_element", ",", "params", "[", "consts", ".", "Consts", ".", "id", "]", ")", "waypoints", "=", "params", "[", "consts", ".", "Consts", ".", "waypoints", "]", "for", "waypoint", "in", "waypoints", ":", "waypoint_element", "=", "eTree", ".", "SubElement", "(", "output_flow", ",", "\"omgdi:waypoint\"", ")", "waypoint_element", ".", "set", "(", "consts", ".", "Consts", ".", "x", ",", "waypoint", "[", "0", "]", ")", "waypoint_element", ".", "set", "(", "consts", ".", "Consts", ".", "y", ",", "waypoint", "[", "1", "]", ")" ]
Creates a new BPMNEdge XML element for given edge parameters and adds it to 'plane' element. :param params: dictionary with edge parameters, :param plane: object of Element class, representing BPMN XML 'BPMNPlane' element (root for edge DI data).
[ "Creates", "a", "new", "BPMNEdge", "XML", "element", "for", "given", "edge", "parameters", "and", "adds", "it", "to", "plane", "element", "." ]
python
train
58.733333
agoragames/leaderboard-python
leaderboard/leaderboard.py
https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L462-L481
def rank_for_in(self, leaderboard_name, member): ''' Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ''' if self.order == self.ASC: try: return self.redis_connection.zrank( leaderboard_name, member) + 1 except: return None else: try: return self.redis_connection.zrevrank( leaderboard_name, member) + 1 except: return None
[ "def", "rank_for_in", "(", "self", ",", "leaderboard_name", ",", "member", ")", ":", "if", "self", ".", "order", "==", "self", ".", "ASC", ":", "try", ":", "return", "self", ".", "redis_connection", ".", "zrank", "(", "leaderboard_name", ",", "member", ")", "+", "1", "except", ":", "return", "None", "else", ":", "try", ":", "return", "self", ".", "redis_connection", ".", "zrevrank", "(", "leaderboard_name", ",", "member", ")", "+", "1", "except", ":", "return", "None" ]
Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard.
[ "Retrieve", "the", "rank", "for", "a", "member", "in", "the", "named", "leaderboard", "." ]
python
train
33.65
bskinn/opan
opan/utils/symm.py
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L223-L300
def geom_symm_match(g, atwts, ax, theta, do_refl): """ [Revised match factor calculation] .. todo:: Complete geom_symm_match docstring """ # Imports import numpy as np from scipy import linalg as spla # Convert g and atwts to n-D vectors g = make_nd_vec(g, nd=None, t=np.float64, norm=False) atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False) # Ensure proper dimensionality if not g.shape[0] == 3 * atwts.shape[0]: raise ValueError("Size of 'g' is not 3*size of 'atwts'") ## end if # Calculate transformed geometry gx = symm_op(g, ax, theta, do_refl) # Push g to a column vector g = g.reshape((g.shape[0],1)) # Augment g and gx with imaginary atomic weights ex_wts = atwts.repeat(3,axis=0).T.reshape((atwts.shape[0]*3,1)) * 1.j g = np.add(g, ex_wts) gx = np.add(gx, ex_wts) ## # Define calc as the outer product of the augmented vectors ## calc = np.dot(g.reshape((g.shape[0],1)), \ ## np.reciprocal(gx.reshape((1,gx.shape[0])))) ## ## # Calculate the complex magnitude of each element and take log10, ## # then abs again ## calc = np.abs(np.log10(np.abs(calc))) # Expand g and gx as column vectors of coordinates calc_g = g.reshape((g.shape[0] // 3, 3)) calc_gx = gx.reshape((gx.shape[0] // 3, 3)) ## ## # Expand each into a square matrix of identical column vectors ## calc_g = calc_g.repeat(g.shape[0], axis=1) ## calc_gx = gx.repeat(gx.shape[0], axis=1) # Calc is the absolute distance between the calc-ed values, # scaled by the maximum of the individual atom distances or unity. # Calculate the unscaled distances calc = [[spla.norm(np.subtract(calc_g[i,:], calc_gx[j,:])) \ for j in range(calc_gx.shape[0])] \ for i in range(calc_g.shape[0])] # Calculate the scale factors scale_g = np.array([spla.norm(calc_g[i,:]) for i in \ range(calc_g.shape[0])]).reshape((calc_g.shape[0],1)) \ .repeat(calc_g.shape[0], axis=1) scale_gx = np.array([spla.norm(calc_gx[j,:]) for j in \ range(calc_g.shape[0])]).reshape((1,calc_gx.shape[0])) \ .repeat(calc_gx.shape[0], axis=0) scale = np.maximum(np.maximum(scale_g, scale_gx), np.ones_like(scale_g, dtype=np.float64)) # Scale calc calc = np.divide(calc, scale) # Take the minimum of each row mins = np.min(calc, axis=1) # Take the maximum of the minima for the final factor fac = np.max(mins) # Using the atomic weights for checking matching can result in 'fac' # being greater than unity. Return the minimum of fac and unity. fac = min(fac, 1.0) return fac
[ "def", "geom_symm_match", "(", "g", ",", "atwts", ",", "ax", ",", "theta", ",", "do_refl", ")", ":", "# Imports", "import", "numpy", "as", "np", "from", "scipy", "import", "linalg", "as", "spla", "# Convert g and atwts to n-D vectors", "g", "=", "make_nd_vec", "(", "g", ",", "nd", "=", "None", ",", "t", "=", "np", ".", "float64", ",", "norm", "=", "False", ")", "atwts", "=", "make_nd_vec", "(", "atwts", ",", "nd", "=", "None", ",", "t", "=", "np", ".", "float64", ",", "norm", "=", "False", ")", "# Ensure proper dimensionality", "if", "not", "g", ".", "shape", "[", "0", "]", "==", "3", "*", "atwts", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Size of 'g' is not 3*size of 'atwts'\"", ")", "## end if", "# Calculate transformed geometry", "gx", "=", "symm_op", "(", "g", ",", "ax", ",", "theta", ",", "do_refl", ")", "# Push g to a column vector", "g", "=", "g", ".", "reshape", "(", "(", "g", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "# Augment g and gx with imaginary atomic weights", "ex_wts", "=", "atwts", ".", "repeat", "(", "3", ",", "axis", "=", "0", ")", ".", "T", ".", "reshape", "(", "(", "atwts", ".", "shape", "[", "0", "]", "*", "3", ",", "1", ")", ")", "*", "1.j", "g", "=", "np", ".", "add", "(", "g", ",", "ex_wts", ")", "gx", "=", "np", ".", "add", "(", "gx", ",", "ex_wts", ")", "## # Define calc as the outer product of the augmented vectors", "## calc = np.dot(g.reshape((g.shape[0],1)), \\", "## np.reciprocal(gx.reshape((1,gx.shape[0]))))", "##", "## # Calculate the complex magnitude of each element and take log10,", "## # then abs again", "## calc = np.abs(np.log10(np.abs(calc)))", "# Expand g and gx as column vectors of coordinates", "calc_g", "=", "g", ".", "reshape", "(", "(", "g", ".", "shape", "[", "0", "]", "//", "3", ",", "3", ")", ")", "calc_gx", "=", "gx", ".", "reshape", "(", "(", "gx", ".", "shape", "[", "0", "]", "//", "3", ",", "3", ")", ")", "##", "## # Expand each into a square matrix of identical column vectors", "## calc_g = calc_g.repeat(g.shape[0], axis=1)", "## calc_gx = gx.repeat(gx.shape[0], axis=1)", "# Calc is the absolute distance between the calc-ed values,", "# scaled by the maximum of the individual atom distances or unity.", "# Calculate the unscaled distances", "calc", "=", "[", "[", "spla", ".", "norm", "(", "np", ".", "subtract", "(", "calc_g", "[", "i", ",", ":", "]", ",", "calc_gx", "[", "j", ",", ":", "]", ")", ")", "for", "j", "in", "range", "(", "calc_gx", ".", "shape", "[", "0", "]", ")", "]", "for", "i", "in", "range", "(", "calc_g", ".", "shape", "[", "0", "]", ")", "]", "# Calculate the scale factors", "scale_g", "=", "np", ".", "array", "(", "[", "spla", ".", "norm", "(", "calc_g", "[", "i", ",", ":", "]", ")", "for", "i", "in", "range", "(", "calc_g", ".", "shape", "[", "0", "]", ")", "]", ")", ".", "reshape", "(", "(", "calc_g", ".", "shape", "[", "0", "]", ",", "1", ")", ")", ".", "repeat", "(", "calc_g", ".", "shape", "[", "0", "]", ",", "axis", "=", "1", ")", "scale_gx", "=", "np", ".", "array", "(", "[", "spla", ".", "norm", "(", "calc_gx", "[", "j", ",", ":", "]", ")", "for", "j", "in", "range", "(", "calc_g", ".", "shape", "[", "0", "]", ")", "]", ")", ".", "reshape", "(", "(", "1", ",", "calc_gx", ".", "shape", "[", "0", "]", ")", ")", ".", "repeat", "(", "calc_gx", ".", "shape", "[", "0", "]", ",", "axis", "=", "0", ")", "scale", "=", "np", ".", "maximum", "(", "np", ".", "maximum", "(", "scale_g", ",", "scale_gx", ")", ",", "np", ".", "ones_like", "(", "scale_g", ",", "dtype", "=", "np", ".", "float64", ")", ")", "# Scale calc", "calc", "=", "np", ".", "divide", "(", "calc", ",", "scale", ")", "# Take the minimum of each row", "mins", "=", "np", ".", "min", "(", "calc", ",", "axis", "=", "1", ")", "# Take the maximum of the minima for the final factor", "fac", "=", "np", ".", "max", "(", "mins", ")", "# Using the atomic weights for checking matching can result in 'fac'", "# being greater than unity. Return the minimum of fac and unity.", "fac", "=", "min", "(", "fac", ",", "1.0", ")", "return", "fac" ]
[Revised match factor calculation] .. todo:: Complete geom_symm_match docstring
[ "[", "Revised", "match", "factor", "calculation", "]" ]
python
train
35.192308
PythonCharmers/python-future
src/future/types/newdict.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newdict.py#L41-L53
def items(self): """ On Python 2.7+: D.items() -> a set-like object providing a view on D's items On Python 2.6: D.items() -> an iterator over D's items """ if ver == (2, 7): return self.viewitems() elif ver == (2, 6): return self.iteritems() elif ver >= (3, 0): return self.items()
[ "def", "items", "(", "self", ")", ":", "if", "ver", "==", "(", "2", ",", "7", ")", ":", "return", "self", ".", "viewitems", "(", ")", "elif", "ver", "==", "(", "2", ",", "6", ")", ":", "return", "self", ".", "iteritems", "(", ")", "elif", "ver", ">=", "(", "3", ",", "0", ")", ":", "return", "self", ".", "items", "(", ")" ]
On Python 2.7+: D.items() -> a set-like object providing a view on D's items On Python 2.6: D.items() -> an iterator over D's items
[ "On", "Python", "2", ".", "7", "+", ":", "D", ".", "items", "()", "-", ">", "a", "set", "-", "like", "object", "providing", "a", "view", "on", "D", "s", "items", "On", "Python", "2", ".", "6", ":", "D", ".", "items", "()", "-", ">", "an", "iterator", "over", "D", "s", "items" ]
python
train
29.692308
mozilla/Marketplace.Python
marketplace/client.py
https://github.com/mozilla/Marketplace.Python/blob/88176b12201f766b6b96bccc1e4c3e82f0676283/marketplace/client.py#L183-L204
def create_screenshot(self, app_id, filename, position=1): """Add a screenshot to the web app identified by by ``app_id``. Screenshots are ordered by ``position``. :returns: HttpResponse: * status_code (int) 201 is successful * content (dict) containing screenshot data """ # prepare file for upload with open(filename, 'rb') as s_file: s_content = s_file.read() s_encoded = b64encode(s_content) url = self.url('create_screenshot') % app_id mtype, encoding = mimetypes.guess_type(filename) if mtype is None: mtype = 'image/jpeg' data = {'position': position, 'file': {'type': mtype, 'data': s_encoded}} return self.conn.fetch('POST', url, data)
[ "def", "create_screenshot", "(", "self", ",", "app_id", ",", "filename", ",", "position", "=", "1", ")", ":", "# prepare file for upload", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "s_file", ":", "s_content", "=", "s_file", ".", "read", "(", ")", "s_encoded", "=", "b64encode", "(", "s_content", ")", "url", "=", "self", ".", "url", "(", "'create_screenshot'", ")", "%", "app_id", "mtype", ",", "encoding", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "if", "mtype", "is", "None", ":", "mtype", "=", "'image/jpeg'", "data", "=", "{", "'position'", ":", "position", ",", "'file'", ":", "{", "'type'", ":", "mtype", ",", "'data'", ":", "s_encoded", "}", "}", "return", "self", ".", "conn", ".", "fetch", "(", "'POST'", ",", "url", ",", "data", ")" ]
Add a screenshot to the web app identified by by ``app_id``. Screenshots are ordered by ``position``. :returns: HttpResponse: * status_code (int) 201 is successful * content (dict) containing screenshot data
[ "Add", "a", "screenshot", "to", "the", "web", "app", "identified", "by", "by", "app_id", ".", "Screenshots", "are", "ordered", "by", "position", "." ]
python
train
36.863636
mfitzp/padua
padua/normalization.py
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/normalization.py#L4-L22
def subtract_column_median(df, prefix='Intensity '): """ Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return: """ df = df.copy() df.replace([np.inf, -np.inf], np.nan, inplace=True) mask = [l.startswith(prefix) for l in df.columns.values] df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0) return df
[ "def", "subtract_column_median", "(", "df", ",", "prefix", "=", "'Intensity '", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "df", ".", "replace", "(", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", ",", "np", ".", "nan", ",", "inplace", "=", "True", ")", "mask", "=", "[", "l", ".", "startswith", "(", "prefix", ")", "for", "l", "in", "df", ".", "columns", ".", "values", "]", "df", ".", "iloc", "[", ":", ",", "mask", "]", "=", "df", ".", "iloc", "[", ":", ",", "mask", "]", "-", "df", ".", "iloc", "[", ":", ",", "mask", "]", ".", "median", "(", "axis", "=", "0", ")", "return", "df" ]
Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return:
[ "Apply", "column", "-", "wise", "normalisation", "to", "expression", "columns", "." ]
python
train
26.631579
restran/mountains
mountains/http/__init__.py
https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/http/__init__.py#L75-L119
def read_request_from_str(data, **params): """ 从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化 :param data: :param params: :return: """ method, uri = None, None headers = {} host = '' try: split_list = data.split('\n\n') headers_text = split_list[0] body = '\n\n'.join(split_list[1:]) except: headers_text = data body = '' body = force_bytes(body) for k, v in params.items(): body = body.replace(b'{%s}' % force_bytes(k), force_bytes(v)) header_list = headers_text.split('\n') for i, line in enumerate(header_list): line = line.strip() if line.strip() == '': continue line = line.format(**params) if i == 0: # 至多3个 split_line = line.strip().split(' ') method, uri, _ = split_line[0], ' '.join(split_line[1:-1]), split_line[-1] else: # 至多2个 header, value = line.split(':', 1) header = header.strip() value = value.strip() headers[header] = value if header.lower() == 'host': host = value return headers, method, uri, host, body
[ "def", "read_request_from_str", "(", "data", ",", "*", "*", "params", ")", ":", "method", ",", "uri", "=", "None", ",", "None", "headers", "=", "{", "}", "host", "=", "''", "try", ":", "split_list", "=", "data", ".", "split", "(", "'\\n\\n'", ")", "headers_text", "=", "split_list", "[", "0", "]", "body", "=", "'\\n\\n'", ".", "join", "(", "split_list", "[", "1", ":", "]", ")", "except", ":", "headers_text", "=", "data", "body", "=", "''", "body", "=", "force_bytes", "(", "body", ")", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", ":", "body", "=", "body", ".", "replace", "(", "b'{%s}'", "%", "force_bytes", "(", "k", ")", ",", "force_bytes", "(", "v", ")", ")", "header_list", "=", "headers_text", ".", "split", "(", "'\\n'", ")", "for", "i", ",", "line", "in", "enumerate", "(", "header_list", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "strip", "(", ")", "==", "''", ":", "continue", "line", "=", "line", ".", "format", "(", "*", "*", "params", ")", "if", "i", "==", "0", ":", "# 至多3个", "split_line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "method", ",", "uri", ",", "_", "=", "split_line", "[", "0", "]", ",", "' '", ".", "join", "(", "split_line", "[", "1", ":", "-", "1", "]", ")", ",", "split_line", "[", "-", "1", "]", "else", ":", "# 至多2个", "header", ",", "value", "=", "line", ".", "split", "(", "':'", ",", "1", ")", "header", "=", "header", ".", "strip", "(", ")", "value", "=", "value", ".", "strip", "(", ")", "headers", "[", "header", "]", "=", "value", "if", "header", ".", "lower", "(", ")", "==", "'host'", ":", "host", "=", "value", "return", "headers", ",", "method", ",", "uri", ",", "host", ",", "body" ]
从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化 :param data: :param params: :return:
[ "从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化", ":", "param", "data", ":", ":", "param", "params", ":", ":", "return", ":" ]
python
train
25.777778
ethan92429/onshapepy
onshapepy/core/client.py
https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L298-L325
def create_assembly_instance(self, assembly_uri, part_uri, configuration): ''' Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data ''' payload = { "documentId": part_uri["did"], "elementId": part_uri["eid"], # could be added if needed: # "partId": "String", # "featureId": "String", # "microversionId": "String", "versionId": part_uri["wvm"], # "microversionId": "String", "isAssembly": False, "isWholePartStudio": True, "configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration) } return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] + '/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload)
[ "def", "create_assembly_instance", "(", "self", ",", "assembly_uri", ",", "part_uri", ",", "configuration", ")", ":", "payload", "=", "{", "\"documentId\"", ":", "part_uri", "[", "\"did\"", "]", ",", "\"elementId\"", ":", "part_uri", "[", "\"eid\"", "]", ",", "# could be added if needed:", "# \"partId\": \"String\",", "# \"featureId\": \"String\",", "# \"microversionId\": \"String\",", "\"versionId\"", ":", "part_uri", "[", "\"wvm\"", "]", ",", "# \"microversionId\": \"String\",", "\"isAssembly\"", ":", "False", ",", "\"isWholePartStudio\"", ":", "True", ",", "\"configuration\"", ":", "self", ".", "encode_configuration", "(", "part_uri", "[", "\"did\"", "]", ",", "part_uri", "[", "\"eid\"", "]", ",", "configuration", ")", "}", "return", "self", ".", "_api", ".", "request", "(", "'post'", ",", "'/api/assemblies/d/'", "+", "assembly_uri", "[", "\"did\"", "]", "+", "'/'", "+", "assembly_uri", "[", "\"wvm_type\"", "]", "+", "'/'", "+", "assembly_uri", "[", "\"wvm\"", "]", "+", "'/e/'", "+", "assembly_uri", "[", "\"eid\"", "]", "+", "'/instances'", ",", "body", "=", "payload", ")" ]
Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data
[ "Insert", "a", "configurable", "part", "into", "an", "assembly", "." ]
python
train
41.714286
mardix/Juice
juice/utils.py
https://github.com/mardix/Juice/blob/7afa8d4238868235dfcdae82272bd77958dd416a/juice/utils.py#L67-L75
def md5_string(s): """ Shortcut to create md5 hash :param s: :return: """ m = hashlib.md5() m.update(s) return str(m.hexdigest())
[ "def", "md5_string", "(", "s", ")", ":", "m", "=", "hashlib", ".", "md5", "(", ")", "m", ".", "update", "(", "s", ")", "return", "str", "(", "m", ".", "hexdigest", "(", ")", ")" ]
Shortcut to create md5 hash :param s: :return:
[ "Shortcut", "to", "create", "md5", "hash", ":", "param", "s", ":", ":", "return", ":" ]
python
train
17
Esri/ArcREST
src/arcrest/opendata/_web.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/opendata/_web.py#L238-L279
def _process_response(self, resp, out_folder=None): """ processes the response object""" CHUNK = 4056 maintype = self._mainType(resp) contentDisposition = resp.headers.get('content-disposition') contentEncoding = resp.headers.get('content-encoding') contentType = resp.headers.get('content-type') contentLength = resp.headers.get('content-length') if maintype.lower() in ('image', 'application/x-zip-compressed') or \ contentType == 'application/x-zip-compressed' or \ (contentDisposition is not None and \ contentDisposition.lower().find('attachment;') > -1): fname = self._get_file_name( contentDisposition=contentDisposition, url=resp.geturl()) if out_folder is None: out_folder = tempfile.gettempdir() if contentLength is not None: max_length = int(contentLength) if max_length < CHUNK: CHUNK = max_length file_name = os.path.join(out_folder, fname) with open(file_name, 'wb') as writer: for data in self._chunk(response=resp): writer.write(data) del data del writer return file_name else: read = "" for data in self._chunk(response=resp, size=4096): if self.PY3 == True: read += data.decode('utf-8') else: read += data del data try: return json.loads(read.strip()) except: return read return None
[ "def", "_process_response", "(", "self", ",", "resp", ",", "out_folder", "=", "None", ")", ":", "CHUNK", "=", "4056", "maintype", "=", "self", ".", "_mainType", "(", "resp", ")", "contentDisposition", "=", "resp", ".", "headers", ".", "get", "(", "'content-disposition'", ")", "contentEncoding", "=", "resp", ".", "headers", ".", "get", "(", "'content-encoding'", ")", "contentType", "=", "resp", ".", "headers", ".", "get", "(", "'content-type'", ")", "contentLength", "=", "resp", ".", "headers", ".", "get", "(", "'content-length'", ")", "if", "maintype", ".", "lower", "(", ")", "in", "(", "'image'", ",", "'application/x-zip-compressed'", ")", "or", "contentType", "==", "'application/x-zip-compressed'", "or", "(", "contentDisposition", "is", "not", "None", "and", "contentDisposition", ".", "lower", "(", ")", ".", "find", "(", "'attachment;'", ")", ">", "-", "1", ")", ":", "fname", "=", "self", ".", "_get_file_name", "(", "contentDisposition", "=", "contentDisposition", ",", "url", "=", "resp", ".", "geturl", "(", ")", ")", "if", "out_folder", "is", "None", ":", "out_folder", "=", "tempfile", ".", "gettempdir", "(", ")", "if", "contentLength", "is", "not", "None", ":", "max_length", "=", "int", "(", "contentLength", ")", "if", "max_length", "<", "CHUNK", ":", "CHUNK", "=", "max_length", "file_name", "=", "os", ".", "path", ".", "join", "(", "out_folder", ",", "fname", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "writer", ":", "for", "data", "in", "self", ".", "_chunk", "(", "response", "=", "resp", ")", ":", "writer", ".", "write", "(", "data", ")", "del", "data", "del", "writer", "return", "file_name", "else", ":", "read", "=", "\"\"", "for", "data", "in", "self", ".", "_chunk", "(", "response", "=", "resp", ",", "size", "=", "4096", ")", ":", "if", "self", ".", "PY3", "==", "True", ":", "read", "+=", "data", ".", "decode", "(", "'utf-8'", ")", "else", ":", "read", "+=", "data", "del", "data", "try", ":", "return", "json", ".", "loads", "(", "read", ".", "strip", "(", ")", ")", "except", ":", "return", "read", "return", "None" ]
processes the response object
[ "processes", "the", "response", "object" ]
python
train
40.904762
StellarCN/py-stellar-base
stellar_base/builder.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/builder.py#L746-L758
def sign(self, secret=None): """Sign the generated :class:`TransactionEnvelope <stellar_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param str secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope. """ keypair = self.keypair if not secret else Keypair.from_seed(secret) self.gen_te() self.te.sign(keypair)
[ "def", "sign", "(", "self", ",", "secret", "=", "None", ")", ":", "keypair", "=", "self", ".", "keypair", "if", "not", "secret", "else", "Keypair", ".", "from_seed", "(", "secret", ")", "self", ".", "gen_te", "(", ")", "self", ".", "te", ".", "sign", "(", "keypair", ")" ]
Sign the generated :class:`TransactionEnvelope <stellar_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param str secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope.
[ "Sign", "the", "generated", ":", "class", ":", "TransactionEnvelope", "<stellar_base", ".", "transaction_envelope", ".", "TransactionEnvelope", ">", "from", "the", "list", "of", "this", "builder", "s", "operations", "." ]
python
train
43.230769
JensAstrup/pyOutlook
pyOutlook/core/contact.py
https://github.com/JensAstrup/pyOutlook/blob/f4ca9d4a8629c0a41f78102ce84fab702a841167/pyOutlook/core/contact.py#L82-L84
def api_representation(self): """ Returns the JSON formatting required by Outlook's API for contacts """ return dict(EmailAddress=dict(Name=self.name, Address=self.email))
[ "def", "api_representation", "(", "self", ")", ":", "return", "dict", "(", "EmailAddress", "=", "dict", "(", "Name", "=", "self", ".", "name", ",", "Address", "=", "self", ".", "email", ")", ")" ]
Returns the JSON formatting required by Outlook's API for contacts
[ "Returns", "the", "JSON", "formatting", "required", "by", "Outlook", "s", "API", "for", "contacts" ]
python
train
61.666667
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L1287-L1334
def draw_mini_map(self, surf): """Draw the minimap.""" if (self._render_rgb and self._obs.observation.HasField("render_data") and self._obs.observation.render_data.HasField("minimap")): # Draw the rendered version. surf.blit_np_array(features.Feature.unpack_rgb_image( self._obs.observation.render_data.minimap)) else: # Render it manually from feature layer data. hmap_feature = features.MINIMAP_FEATURES.height_map hmap = hmap_feature.unpack(self._obs.observation) if not hmap.any(): hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment hmap_color = hmap_feature.color(hmap) creep_feature = features.MINIMAP_FEATURES.creep creep = creep_feature.unpack(self._obs.observation) creep_mask = creep > 0 creep_color = creep_feature.color(creep) if self._obs.observation.player_common.player_id in (0, 16): # observer # If we're the observer, show the absolute since otherwise all player # units are friendly, making it pretty boring. player_feature = features.MINIMAP_FEATURES.player_id else: player_feature = features.MINIMAP_FEATURES.player_relative player_data = player_feature.unpack(self._obs.observation) player_mask = player_data > 0 player_color = player_feature.color(player_data) visibility = features.MINIMAP_FEATURES.visibility_map.unpack( self._obs.observation) visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3]) # Compose and color the different layers. out = hmap_color * 0.6 out[creep_mask, :] = (0.4 * out[creep_mask, :] + 0.6 * creep_color[creep_mask, :]) out[player_mask, :] = player_color[player_mask, :] out *= visibility_fade[visibility] # Render the bit of the composited layers that actually correspond to the # map. This isn't all of it on non-square maps. shape = self._map_size.scale_max_size( self._feature_minimap_px).floor() surf.blit_np_array(out[:shape.y, :shape.x, :]) surf.draw_rect(colors.white * 0.8, self._camera, 1) # Camera pygame.draw.rect(surf.surf, colors.red, surf.surf.get_rect(), 1)
[ "def", "draw_mini_map", "(", "self", ",", "surf", ")", ":", "if", "(", "self", ".", "_render_rgb", "and", "self", ".", "_obs", ".", "observation", ".", "HasField", "(", "\"render_data\"", ")", "and", "self", ".", "_obs", ".", "observation", ".", "render_data", ".", "HasField", "(", "\"minimap\"", ")", ")", ":", "# Draw the rendered version.", "surf", ".", "blit_np_array", "(", "features", ".", "Feature", ".", "unpack_rgb_image", "(", "self", ".", "_obs", ".", "observation", ".", "render_data", ".", "minimap", ")", ")", "else", ":", "# Render it manually from feature layer data.", "hmap_feature", "=", "features", ".", "MINIMAP_FEATURES", ".", "height_map", "hmap", "=", "hmap_feature", ".", "unpack", "(", "self", ".", "_obs", ".", "observation", ")", "if", "not", "hmap", ".", "any", "(", ")", ":", "hmap", "=", "hmap", "+", "100", "# pylint: disable=g-no-augmented-assignment", "hmap_color", "=", "hmap_feature", ".", "color", "(", "hmap", ")", "creep_feature", "=", "features", ".", "MINIMAP_FEATURES", ".", "creep", "creep", "=", "creep_feature", ".", "unpack", "(", "self", ".", "_obs", ".", "observation", ")", "creep_mask", "=", "creep", ">", "0", "creep_color", "=", "creep_feature", ".", "color", "(", "creep", ")", "if", "self", ".", "_obs", ".", "observation", ".", "player_common", ".", "player_id", "in", "(", "0", ",", "16", ")", ":", "# observer", "# If we're the observer, show the absolute since otherwise all player", "# units are friendly, making it pretty boring.", "player_feature", "=", "features", ".", "MINIMAP_FEATURES", ".", "player_id", "else", ":", "player_feature", "=", "features", ".", "MINIMAP_FEATURES", ".", "player_relative", "player_data", "=", "player_feature", ".", "unpack", "(", "self", ".", "_obs", ".", "observation", ")", "player_mask", "=", "player_data", ">", "0", "player_color", "=", "player_feature", ".", "color", "(", "player_data", ")", "visibility", "=", "features", ".", "MINIMAP_FEATURES", ".", "visibility_map", ".", "unpack", "(", "self", ".", "_obs", ".", "observation", ")", "visibility_fade", "=", "np", ".", "array", "(", "[", "[", "0.5", "]", "*", "3", ",", "[", "0.75", "]", "*", "3", ",", "[", "1", "]", "*", "3", "]", ")", "# Compose and color the different layers.", "out", "=", "hmap_color", "*", "0.6", "out", "[", "creep_mask", ",", ":", "]", "=", "(", "0.4", "*", "out", "[", "creep_mask", ",", ":", "]", "+", "0.6", "*", "creep_color", "[", "creep_mask", ",", ":", "]", ")", "out", "[", "player_mask", ",", ":", "]", "=", "player_color", "[", "player_mask", ",", ":", "]", "out", "*=", "visibility_fade", "[", "visibility", "]", "# Render the bit of the composited layers that actually correspond to the", "# map. This isn't all of it on non-square maps.", "shape", "=", "self", ".", "_map_size", ".", "scale_max_size", "(", "self", ".", "_feature_minimap_px", ")", ".", "floor", "(", ")", "surf", ".", "blit_np_array", "(", "out", "[", ":", "shape", ".", "y", ",", ":", "shape", ".", "x", ",", ":", "]", ")", "surf", ".", "draw_rect", "(", "colors", ".", "white", "*", "0.8", ",", "self", ".", "_camera", ",", "1", ")", "# Camera", "pygame", ".", "draw", ".", "rect", "(", "surf", ".", "surf", ",", "colors", ".", "red", ",", "surf", ".", "surf", ".", "get_rect", "(", ")", ",", "1", ")" ]
Draw the minimap.
[ "Draw", "the", "minimap", "." ]
python
train
45.5625
bkg/django-spillway
spillway/query.py
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L248-L263
def warp(self, srid=None, format=None, geom=None): """Returns a new RasterQuerySet with possibly warped/converted rasters. Keyword args: format -- raster file extension format as str geom -- geometry for masking or spatial subsetting srid -- spatial reference identifier as int for warping to """ clone = self._clone() for obj in clone: obj.convert(format, geom) if srid: fp = tempfile.NamedTemporaryFile(suffix='.%s' % format or '') with obj.raster() as r, r.warp(srid, fp.name) as w: obj.image.file = fp return clone
[ "def", "warp", "(", "self", ",", "srid", "=", "None", ",", "format", "=", "None", ",", "geom", "=", "None", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "for", "obj", "in", "clone", ":", "obj", ".", "convert", "(", "format", ",", "geom", ")", "if", "srid", ":", "fp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.%s'", "%", "format", "or", "''", ")", "with", "obj", ".", "raster", "(", ")", "as", "r", ",", "r", ".", "warp", "(", "srid", ",", "fp", ".", "name", ")", "as", "w", ":", "obj", ".", "image", ".", "file", "=", "fp", "return", "clone" ]
Returns a new RasterQuerySet with possibly warped/converted rasters. Keyword args: format -- raster file extension format as str geom -- geometry for masking or spatial subsetting srid -- spatial reference identifier as int for warping to
[ "Returns", "a", "new", "RasterQuerySet", "with", "possibly", "warped", "/", "converted", "rasters", "." ]
python
train
40.75
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L331-L352
def get_user(user_name=None, region=None, key=None, keyid=None, profile=None): ''' Get user information. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_user myuser ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.get_user(user_name) if not info: return False return info except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to get IAM user %s info.', user_name) return False
[ "def", "get_user", "(", "user_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "info", "=", "conn", ".", "get_user", "(", "user_name", ")", "if", "not", "info", ":", "return", "False", "return", "info", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "log", ".", "error", "(", "'Failed to get IAM user %s info.'", ",", "user_name", ")", "return", "False" ]
Get user information. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_user myuser
[ "Get", "user", "information", "." ]
python
train
25.772727
Tivix/django-common
django_common/management/commands/generate_secret_key.py
https://github.com/Tivix/django-common/blob/407d208121011a8425139e541629554114d96c18/django_common/management/commands/generate_secret_key.py#L19-L27
def add_arguments(self, parser): """ Define optional arguments with default values """ parser.add_argument('--length', default=self.length, type=int, help=_('SECRET_KEY length default=%d' % self.length)) parser.add_argument('--alphabet', default=self.allowed_chars, type=str, help=_('alphabet to use default=%s' % self.allowed_chars))
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--length'", ",", "default", "=", "self", ".", "length", ",", "type", "=", "int", ",", "help", "=", "_", "(", "'SECRET_KEY length default=%d'", "%", "self", ".", "length", ")", ")", "parser", ".", "add_argument", "(", "'--alphabet'", ",", "default", "=", "self", ".", "allowed_chars", ",", "type", "=", "str", ",", "help", "=", "_", "(", "'alphabet to use default=%s'", "%", "self", ".", "allowed_chars", ")", ")" ]
Define optional arguments with default values
[ "Define", "optional", "arguments", "with", "default", "values" ]
python
train
47
yymao/generic-catalog-reader
GCR/base.py
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L151-L160
def first_available(self, *quantities): """ Return the first available quantity in the input arguments. Return `None` if none of them is available. """ for i, q in enumerate(quantities): if self.has_quantity(q): if i: warnings.warn('{} not available; using {} instead'.format(quantities[0], q)) return q
[ "def", "first_available", "(", "self", ",", "*", "quantities", ")", ":", "for", "i", ",", "q", "in", "enumerate", "(", "quantities", ")", ":", "if", "self", ".", "has_quantity", "(", "q", ")", ":", "if", "i", ":", "warnings", ".", "warn", "(", "'{} not available; using {} instead'", ".", "format", "(", "quantities", "[", "0", "]", ",", "q", ")", ")", "return", "q" ]
Return the first available quantity in the input arguments. Return `None` if none of them is available.
[ "Return", "the", "first", "available", "quantity", "in", "the", "input", "arguments", ".", "Return", "None", "if", "none", "of", "them", "is", "available", "." ]
python
train
39.8
jmbeach/KEP.py
src/keppy/device.py
https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L15-L22
def set_driver_simulated(self): """Sets the device driver type to simulated""" self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator" if self._is_sixteen_bit: self._device_dict["servermain.DEVICE_MODEL"] = 0 else: self._device_dict["servermain.DEVICE_MODEL"] = 1 self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1
[ "def", "set_driver_simulated", "(", "self", ")", ":", "self", ".", "_device_dict", "[", "\"servermain.MULTIPLE_TYPES_DEVICE_DRIVER\"", "]", "=", "\"Simulator\"", "if", "self", ".", "_is_sixteen_bit", ":", "self", ".", "_device_dict", "[", "\"servermain.DEVICE_MODEL\"", "]", "=", "0", "else", ":", "self", ".", "_device_dict", "[", "\"servermain.DEVICE_MODEL\"", "]", "=", "1", "self", ".", "_device_dict", "[", "\"servermain.DEVICE_ID_OCTAL\"", "]", "=", "1" ]
Sets the device driver type to simulated
[ "Sets", "the", "device", "driver", "type", "to", "simulated" ]
python
train
48.875
theislab/scvelo
scvelo/tools/rank_velocity_genes.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/rank_velocity_genes.py#L39-L52
def select_groups(adata, groups='all', key='louvain'): """Get subset of groups in adata.obs[key]. """ strings_to_categoricals(adata) if isinstance(groups, list) and isinstance(groups[0], int): groups = [str(n) for n in groups] categories = adata.obs[key].cat.categories groups_masks = np.array([categories[i] == adata.obs[key].values for i, name in enumerate(categories)]) if groups == 'all': groups = categories.values else: groups_ids = [np.where(categories.values == name)[0][0] for name in groups] groups_masks = groups_masks[groups_ids] groups = categories[groups_ids].values return groups, groups_masks
[ "def", "select_groups", "(", "adata", ",", "groups", "=", "'all'", ",", "key", "=", "'louvain'", ")", ":", "strings_to_categoricals", "(", "adata", ")", "if", "isinstance", "(", "groups", ",", "list", ")", "and", "isinstance", "(", "groups", "[", "0", "]", ",", "int", ")", ":", "groups", "=", "[", "str", "(", "n", ")", "for", "n", "in", "groups", "]", "categories", "=", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", "groups_masks", "=", "np", ".", "array", "(", "[", "categories", "[", "i", "]", "==", "adata", ".", "obs", "[", "key", "]", ".", "values", "for", "i", ",", "name", "in", "enumerate", "(", "categories", ")", "]", ")", "if", "groups", "==", "'all'", ":", "groups", "=", "categories", ".", "values", "else", ":", "groups_ids", "=", "[", "np", ".", "where", "(", "categories", ".", "values", "==", "name", ")", "[", "0", "]", "[", "0", "]", "for", "name", "in", "groups", "]", "groups_masks", "=", "groups_masks", "[", "groups_ids", "]", "groups", "=", "categories", "[", "groups_ids", "]", ".", "values", "return", "groups", ",", "groups_masks" ]
Get subset of groups in adata.obs[key].
[ "Get", "subset", "of", "groups", "in", "adata", ".", "obs", "[", "key", "]", "." ]
python
train
47.357143
inveniosoftware/invenio-logging
invenio_logging/sentry6.py
https://github.com/inveniosoftware/invenio-logging/blob/59ee171ad4f9809f62a822964b5c68e5be672dd8/invenio_logging/sentry6.py#L21-L35
def get_user_info(self, request): """Implement custom getter.""" if not current_user.is_authenticated: return {} user_info = { 'id': current_user.get_id(), } if 'SENTRY_USER_ATTRS' in current_app.config: for attr in current_app.config['SENTRY_USER_ATTRS']: if hasattr(current_user, attr): user_info[attr] = getattr(current_user, attr) return user_info
[ "def", "get_user_info", "(", "self", ",", "request", ")", ":", "if", "not", "current_user", ".", "is_authenticated", ":", "return", "{", "}", "user_info", "=", "{", "'id'", ":", "current_user", ".", "get_id", "(", ")", ",", "}", "if", "'SENTRY_USER_ATTRS'", "in", "current_app", ".", "config", ":", "for", "attr", "in", "current_app", ".", "config", "[", "'SENTRY_USER_ATTRS'", "]", ":", "if", "hasattr", "(", "current_user", ",", "attr", ")", ":", "user_info", "[", "attr", "]", "=", "getattr", "(", "current_user", ",", "attr", ")", "return", "user_info" ]
Implement custom getter.
[ "Implement", "custom", "getter", "." ]
python
train
30.666667
psd-tools/psd-tools
src/psd_tools/utils.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/utils.py#L69-L81
def read_length_block(fp, fmt='I', padding=1): """ Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object """ length = read_fmt(fmt, fp)[0] data = fp.read(length) assert len(data) == length, (len(data), length) read_padding(fp, length, padding) return data
[ "def", "read_length_block", "(", "fp", ",", "fmt", "=", "'I'", ",", "padding", "=", "1", ")", ":", "length", "=", "read_fmt", "(", "fmt", ",", "fp", ")", "[", "0", "]", "data", "=", "fp", ".", "read", "(", "length", ")", "assert", "len", "(", "data", ")", "==", "length", ",", "(", "len", "(", "data", ")", ",", "length", ")", "read_padding", "(", "fp", ",", "length", ",", "padding", ")", "return", "data" ]
Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object
[ "Read", "a", "block", "of", "data", "with", "a", "length", "marker", "at", "the", "beginning", "." ]
python
train
29
archman/beamline
beamline/lattice.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/lattice.py#L382-L388
def scanStoVars(self, strline): """ scan input string line, replace sto parameters with calculated results. """ for wd in strline.split(): if wd in self.stodict: strline = strline.replace(wd, str(self.stodict[wd])) return strline
[ "def", "scanStoVars", "(", "self", ",", "strline", ")", ":", "for", "wd", "in", "strline", ".", "split", "(", ")", ":", "if", "wd", "in", "self", ".", "stodict", ":", "strline", "=", "strline", ".", "replace", "(", "wd", ",", "str", "(", "self", ".", "stodict", "[", "wd", "]", ")", ")", "return", "strline" ]
scan input string line, replace sto parameters with calculated results.
[ "scan", "input", "string", "line", "replace", "sto", "parameters", "with", "calculated", "results", "." ]
python
train
40.428571
google/grr
grr/client/grr_response_client/client_utils_osx_linux.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_utils_osx_linux.py#L233-L249
def Write(self, grr_message): """Write the message into the transaction log.""" grr_message = grr_message.SerializeToString() try: with io.open(self.logfile, "wb") as fd: fd.write(grr_message) except (IOError, OSError): # Check if we're missing directories and try to create them. if not os.path.isdir(os.path.dirname(self.logfile)): try: os.makedirs(os.path.dirname(self.logfile)) with io.open(self.logfile, "wb") as fd: fd.write(grr_message) except (IOError, OSError): logging.exception("Couldn't write nanny transaction log to %s", self.logfile)
[ "def", "Write", "(", "self", ",", "grr_message", ")", ":", "grr_message", "=", "grr_message", ".", "SerializeToString", "(", ")", "try", ":", "with", "io", ".", "open", "(", "self", ".", "logfile", ",", "\"wb\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "grr_message", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "# Check if we're missing directories and try to create them.", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "logfile", ")", ")", ":", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "logfile", ")", ")", "with", "io", ".", "open", "(", "self", ".", "logfile", ",", "\"wb\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "grr_message", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "logging", ".", "exception", "(", "\"Couldn't write nanny transaction log to %s\"", ",", "self", ".", "logfile", ")" ]
Write the message into the transaction log.
[ "Write", "the", "message", "into", "the", "transaction", "log", "." ]
python
train
38.882353
cloudnull/turbolift
turbolift/authentication/utils.py
https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/authentication/utils.py#L155-L224
def parse_reqtype(self): """Return the authentication body.""" if self.job_args['os_auth_version'] == 'v1.0': return dict() else: setup = { 'username': self.job_args.get('os_user') } # Check if any prefix items are set. A prefix should be a # dictionary with keys matching the os_* credential type. prefixes = self.job_args.get('os_prefix') if self.job_args.get('os_token') is not None: auth_body = { 'auth': { 'token': { 'id': self.job_args.get('os_token') } } } if not self.job_args.get('os_tenant'): raise exceptions.AuthenticationProblem( 'To use token auth you must specify the tenant id. Set' ' the tenant ID with [ --os-tenant ]' ) elif self.job_args.get('os_password') is not None: setup['password'] = self.job_args.get('os_password') if prefixes: prefix = prefixes.get('os_password') if not prefix: raise NotImplementedError( 'the `password` method is not implemented for this' ' auth plugin' ) else: prefix = 'passwordCredentials' auth_body = { 'auth': { prefix: setup } } elif self.job_args.get('os_apikey') is not None: setup['apiKey'] = self.job_args.get('os_apikey') if prefixes: prefix = prefixes.get('os_apikey') if not prefix: raise NotImplementedError( 'the `apikey` method is not implemented for this' ' auth plugin' ) else: prefix = 'apiKeyCredentials' auth_body = { 'auth': { prefix: setup } } else: raise exceptions.AuthenticationProblem( 'No Password, APIKey, or Token Specified' ) if self.job_args.get('os_tenant'): auth = auth_body['auth'] auth['tenantName'] = self.job_args.get('os_tenant') LOG.debug('AUTH Request body: [ %s ]', auth_body) return auth_body
[ "def", "parse_reqtype", "(", "self", ")", ":", "if", "self", ".", "job_args", "[", "'os_auth_version'", "]", "==", "'v1.0'", ":", "return", "dict", "(", ")", "else", ":", "setup", "=", "{", "'username'", ":", "self", ".", "job_args", ".", "get", "(", "'os_user'", ")", "}", "# Check if any prefix items are set. A prefix should be a", "# dictionary with keys matching the os_* credential type.", "prefixes", "=", "self", ".", "job_args", ".", "get", "(", "'os_prefix'", ")", "if", "self", ".", "job_args", ".", "get", "(", "'os_token'", ")", "is", "not", "None", ":", "auth_body", "=", "{", "'auth'", ":", "{", "'token'", ":", "{", "'id'", ":", "self", ".", "job_args", ".", "get", "(", "'os_token'", ")", "}", "}", "}", "if", "not", "self", ".", "job_args", ".", "get", "(", "'os_tenant'", ")", ":", "raise", "exceptions", ".", "AuthenticationProblem", "(", "'To use token auth you must specify the tenant id. Set'", "' the tenant ID with [ --os-tenant ]'", ")", "elif", "self", ".", "job_args", ".", "get", "(", "'os_password'", ")", "is", "not", "None", ":", "setup", "[", "'password'", "]", "=", "self", ".", "job_args", ".", "get", "(", "'os_password'", ")", "if", "prefixes", ":", "prefix", "=", "prefixes", ".", "get", "(", "'os_password'", ")", "if", "not", "prefix", ":", "raise", "NotImplementedError", "(", "'the `password` method is not implemented for this'", "' auth plugin'", ")", "else", ":", "prefix", "=", "'passwordCredentials'", "auth_body", "=", "{", "'auth'", ":", "{", "prefix", ":", "setup", "}", "}", "elif", "self", ".", "job_args", ".", "get", "(", "'os_apikey'", ")", "is", "not", "None", ":", "setup", "[", "'apiKey'", "]", "=", "self", ".", "job_args", ".", "get", "(", "'os_apikey'", ")", "if", "prefixes", ":", "prefix", "=", "prefixes", ".", "get", "(", "'os_apikey'", ")", "if", "not", "prefix", ":", "raise", "NotImplementedError", "(", "'the `apikey` method is not implemented for this'", "' auth plugin'", ")", "else", ":", "prefix", "=", "'apiKeyCredentials'", "auth_body", "=", "{", "'auth'", ":", "{", "prefix", ":", "setup", "}", "}", "else", ":", "raise", "exceptions", ".", "AuthenticationProblem", "(", "'No Password, APIKey, or Token Specified'", ")", "if", "self", ".", "job_args", ".", "get", "(", "'os_tenant'", ")", ":", "auth", "=", "auth_body", "[", "'auth'", "]", "auth", "[", "'tenantName'", "]", "=", "self", ".", "job_args", ".", "get", "(", "'os_tenant'", ")", "LOG", ".", "debug", "(", "'AUTH Request body: [ %s ]'", ",", "auth_body", ")", "return", "auth_body" ]
Return the authentication body.
[ "Return", "the", "authentication", "body", "." ]
python
train
38.114286
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2723-L2747
def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable)
[ "def", "set_value", "(", "self", ",", "index", ",", "col", ",", "value", ",", "takeable", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"set_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_set_value", "(", "index", ",", "col", ",", "value", ",", "takeable", "=", "takeable", ")" ]
Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object.
[ "Put", "single", "value", "at", "passed", "column", "and", "index", "." ]
python
train
33.92
SwissDataScienceCenter/renku-python
renku/cli/env.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/env.py#L29-L41
def env(config, endpoint): """Print RENKU environment variables. Run this command to configure your Renku client: $ eval "$(renku env)" """ access_token = config['endpoints'][endpoint]['token']['access_token'] click.echo('export {0}={1}'.format('RENKU_ENDPOINT', endpoint)) click.echo('export {0}={1}'.format('RENKU_ACCESS_TOKEN', access_token)) click.echo('# Run this command to configure your Renku client:') click.echo('# eval "$(renku env)"')
[ "def", "env", "(", "config", ",", "endpoint", ")", ":", "access_token", "=", "config", "[", "'endpoints'", "]", "[", "endpoint", "]", "[", "'token'", "]", "[", "'access_token'", "]", "click", ".", "echo", "(", "'export {0}={1}'", ".", "format", "(", "'RENKU_ENDPOINT'", ",", "endpoint", ")", ")", "click", ".", "echo", "(", "'export {0}={1}'", ".", "format", "(", "'RENKU_ACCESS_TOKEN'", ",", "access_token", ")", ")", "click", ".", "echo", "(", "'# Run this command to configure your Renku client:'", ")", "click", ".", "echo", "(", "'# eval \"$(renku env)\"'", ")" ]
Print RENKU environment variables. Run this command to configure your Renku client: $ eval "$(renku env)"
[ "Print", "RENKU", "environment", "variables", "." ]
python
train
36.692308
tanghaibao/goatools
goatools/grouper/hdrgos.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/hdrgos.py#L69-L88
def _init_hdrgos(self, hdrgos_dflt, hdrgos_usr=None, add_dflt=True): """Initialize GO high""" # Use default GO group header values if (hdrgos_usr is None or hdrgos_usr is False) and not self.sections: return set(hdrgos_dflt) # Get GO group headers provided by user hdrgos_init = set() if hdrgos_usr: chk_goids(hdrgos_usr, "User-provided GO group headers") hdrgos_init |= set(hdrgos_usr) if self.sections: self._chk_sections(self.sections) hdrgos_sec = set([hg for _, hdrgos in self.sections for hg in hdrgos]) chk_goids(hdrgos_sec, "User-provided GO group headers in sections") hdrgos_init |= hdrgos_sec # Add default depth-01 GOs to headers, if desired if add_dflt: return set(hdrgos_init).union(hdrgos_dflt) # Return user-provided GO grouping headers return hdrgos_init
[ "def", "_init_hdrgos", "(", "self", ",", "hdrgos_dflt", ",", "hdrgos_usr", "=", "None", ",", "add_dflt", "=", "True", ")", ":", "# Use default GO group header values", "if", "(", "hdrgos_usr", "is", "None", "or", "hdrgos_usr", "is", "False", ")", "and", "not", "self", ".", "sections", ":", "return", "set", "(", "hdrgos_dflt", ")", "# Get GO group headers provided by user", "hdrgos_init", "=", "set", "(", ")", "if", "hdrgos_usr", ":", "chk_goids", "(", "hdrgos_usr", ",", "\"User-provided GO group headers\"", ")", "hdrgos_init", "|=", "set", "(", "hdrgos_usr", ")", "if", "self", ".", "sections", ":", "self", ".", "_chk_sections", "(", "self", ".", "sections", ")", "hdrgos_sec", "=", "set", "(", "[", "hg", "for", "_", ",", "hdrgos", "in", "self", ".", "sections", "for", "hg", "in", "hdrgos", "]", ")", "chk_goids", "(", "hdrgos_sec", ",", "\"User-provided GO group headers in sections\"", ")", "hdrgos_init", "|=", "hdrgos_sec", "# Add default depth-01 GOs to headers, if desired", "if", "add_dflt", ":", "return", "set", "(", "hdrgos_init", ")", ".", "union", "(", "hdrgos_dflt", ")", "# Return user-provided GO grouping headers", "return", "hdrgos_init" ]
Initialize GO high
[ "Initialize", "GO", "high" ]
python
train
46.8
pgmpy/pgmpy
pgmpy/readwrite/XMLBIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBIF.py#L430-L444
def write_xmlbif(self, filename): """ Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = XMLBIFWriter(model) >>> writer.write_xmlbif(test_file) """ with open(filename, 'w') as fout: fout.write(self.__str__())
[ "def", "write_xmlbif", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fout", ":", "fout", ".", "write", "(", "self", ".", "__str__", "(", ")", ")" ]
Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = XMLBIFWriter(model) >>> writer.write_xmlbif(test_file)
[ "Write", "the", "xml", "data", "into", "the", "file", "." ]
python
train
23.933333
python-constraint/python-constraint
constraint/__init__.py
https://github.com/python-constraint/python-constraint/blob/e23fe9852cddddf1c3e258e03f2175df24b4c702/constraint/__init__.py#L373-L386
def getSolution(self, domains, constraints, vconstraints): """ Return one solution for the given problem @param domains: Dictionary mapping variables to their domains @type domains: dict @param constraints: List of pairs of (constraint, variables) @type constraints: list @param vconstraints: Dictionary mapping variables to a list of constraints affecting the given variables. @type vconstraints: dict """ msg = "%s is an abstract class" % self.__class__.__name__ raise NotImplementedError(msg)
[ "def", "getSolution", "(", "self", ",", "domains", ",", "constraints", ",", "vconstraints", ")", ":", "msg", "=", "\"%s is an abstract class\"", "%", "self", ".", "__class__", ".", "__name__", "raise", "NotImplementedError", "(", "msg", ")" ]
Return one solution for the given problem @param domains: Dictionary mapping variables to their domains @type domains: dict @param constraints: List of pairs of (constraint, variables) @type constraints: list @param vconstraints: Dictionary mapping variables to a list of constraints affecting the given variables. @type vconstraints: dict
[ "Return", "one", "solution", "for", "the", "given", "problem" ]
python
train
43.071429
limpyd/redis-limpyd
limpyd/indexes.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/indexes.py#L1112-L1128
def _extract_value_from_storage(self, string): """Taking a string that was a member of the zset, extract the value and pk Parameters ---------- string: str The member extracted from the sorted set Returns ------- tuple Tuple with the value and the pk, extracted from the string """ parts = string.split(self.separator) pk = parts.pop() return self.separator.join(parts), pk
[ "def", "_extract_value_from_storage", "(", "self", ",", "string", ")", ":", "parts", "=", "string", ".", "split", "(", "self", ".", "separator", ")", "pk", "=", "parts", ".", "pop", "(", ")", "return", "self", ".", "separator", ".", "join", "(", "parts", ")", ",", "pk" ]
Taking a string that was a member of the zset, extract the value and pk Parameters ---------- string: str The member extracted from the sorted set Returns ------- tuple Tuple with the value and the pk, extracted from the string
[ "Taking", "a", "string", "that", "was", "a", "member", "of", "the", "zset", "extract", "the", "value", "and", "pk" ]
python
train
27.764706
catherinedevlin/ddl-generator
ddlgenerator/reshape.py
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L18-L34
def clean_key_name(key): """ Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad. """ result = _illegal_in_column_name.sub("_", key.strip()) if result[0].isdigit(): result = '_%s' % result if result.upper() in sql_reserved_words: result = '_%s' % key return result.lower()
[ "def", "clean_key_name", "(", "key", ")", ":", "result", "=", "_illegal_in_column_name", ".", "sub", "(", "\"_\"", ",", "key", ".", "strip", "(", ")", ")", "if", "result", "[", "0", "]", ".", "isdigit", "(", ")", ":", "result", "=", "'_%s'", "%", "result", "if", "result", ".", "upper", "(", ")", "in", "sql_reserved_words", ":", "result", "=", "'_%s'", "%", "key", "return", "result", ".", "lower", "(", ")" ]
Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad.
[ "Makes", "key", "a", "valid", "and", "appropriate", "SQL", "column", "name", ":" ]
python
train
32.823529
NoviceLive/intellicoder
intellicoder/synthesizers.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L162-L171
def c_struct(self): """Get the struct of the module.""" member = '\n'.join(self.c_member_funcs(True)) if self.opts.windll: return 'struct {{\n{}{} }} {};\n'.format( self._c_dll_base(), member, self.name ) return 'typedef\nstruct {2} {{\n{0}\n{1}}}\n{3};\n'.format( self._c_dll_base(), member, *self._c_struct_names() )
[ "def", "c_struct", "(", "self", ")", ":", "member", "=", "'\\n'", ".", "join", "(", "self", ".", "c_member_funcs", "(", "True", ")", ")", "if", "self", ".", "opts", ".", "windll", ":", "return", "'struct {{\\n{}{} }} {};\\n'", ".", "format", "(", "self", ".", "_c_dll_base", "(", ")", ",", "member", ",", "self", ".", "name", ")", "return", "'typedef\\nstruct {2} {{\\n{0}\\n{1}}}\\n{3};\\n'", ".", "format", "(", "self", ".", "_c_dll_base", "(", ")", ",", "member", ",", "*", "self", ".", "_c_struct_names", "(", ")", ")" ]
Get the struct of the module.
[ "Get", "the", "struct", "of", "the", "module", "." ]
python
train
40.1
eaton-lab/toytree
toytree/MultiDrawing.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/MultiDrawing.py#L170-L206
def add_tip_labels_to_axes(self): """ Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. """ # get tip-coords and replace if using fixed_order if self.style.orient in ("up", "down"): ypos = np.zeros(self.ntips) xpos = np.arange(self.ntips) if self.style.orient in ("right", "left"): xpos = np.zeros(self.ntips) ypos = np.arange(self.ntips) # pop fill from color dict if using color if self.style.tip_labels_colors: self.style.tip_labels_style.pop("fill") # fill anchor shift if None # (Toytrees fill this at draw() normally when tip_labels != None) if self.style.tip_labels_style["-toyplot-anchor-shift"] is None: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" # add tip names to coordinates calculated above self.axes.text( xpos, ypos, self.tip_labels, angle=(0 if self.style.orient in ("right", "left") else -90), style=self.style.tip_labels_style, color=self.style.tip_labels_colors, ) # get stroke-width for aligned tip-label lines (optional) # copy stroke-width from the edge_style unless user set it if not self.style.edge_align_style.get("stroke-width"): self.style.edge_align_style['stroke-width'] = ( self.style.edge_style['stroke-width'])
[ "def", "add_tip_labels_to_axes", "(", "self", ")", ":", "# get tip-coords and replace if using fixed_order", "if", "self", ".", "style", ".", "orient", "in", "(", "\"up\"", ",", "\"down\"", ")", ":", "ypos", "=", "np", ".", "zeros", "(", "self", ".", "ntips", ")", "xpos", "=", "np", ".", "arange", "(", "self", ".", "ntips", ")", "if", "self", ".", "style", ".", "orient", "in", "(", "\"right\"", ",", "\"left\"", ")", ":", "xpos", "=", "np", ".", "zeros", "(", "self", ".", "ntips", ")", "ypos", "=", "np", ".", "arange", "(", "self", ".", "ntips", ")", "# pop fill from color dict if using color", "if", "self", ".", "style", ".", "tip_labels_colors", ":", "self", ".", "style", ".", "tip_labels_style", ".", "pop", "(", "\"fill\"", ")", "# fill anchor shift if None ", "# (Toytrees fill this at draw() normally when tip_labels != None)", "if", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", "is", "None", ":", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", "=", "\"15px\"", "# add tip names to coordinates calculated above", "self", ".", "axes", ".", "text", "(", "xpos", ",", "ypos", ",", "self", ".", "tip_labels", ",", "angle", "=", "(", "0", "if", "self", ".", "style", ".", "orient", "in", "(", "\"right\"", ",", "\"left\"", ")", "else", "-", "90", ")", ",", "style", "=", "self", ".", "style", ".", "tip_labels_style", ",", "color", "=", "self", ".", "style", ".", "tip_labels_colors", ",", ")", "# get stroke-width for aligned tip-label lines (optional)", "# copy stroke-width from the edge_style unless user set it", "if", "not", "self", ".", "style", ".", "edge_align_style", ".", "get", "(", "\"stroke-width\"", ")", ":", "self", ".", "style", ".", "edge_align_style", "[", "'stroke-width'", "]", "=", "(", "self", ".", "style", ".", "edge_style", "[", "'stroke-width'", "]", ")" ]
Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting.
[ "Add", "text", "offset", "from", "tips", "of", "tree", "with", "correction", "for", "orientation", "and", "fixed_order", "which", "is", "usually", "used", "in", "multitree", "plotting", "." ]
python
train
41.27027
tijme/not-your-average-web-crawler
nyawc/Queue.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/Queue.py#L181-L192
def get_progress(self): """Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage. """ count_remaining = len(self.items_queued) + len(self.items_in_progress) percentage_remaining = 100 / self.count_total * count_remaining return 100 - percentage_remaining
[ "def", "get_progress", "(", "self", ")", ":", "count_remaining", "=", "len", "(", "self", ".", "items_queued", ")", "+", "len", "(", "self", ".", "items_in_progress", ")", "percentage_remaining", "=", "100", "/", "self", ".", "count_total", "*", "count_remaining", "return", "100", "-", "percentage_remaining" ]
Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage.
[ "Get", "the", "progress", "of", "the", "queue", "in", "percentage", "(", "float", ")", "." ]
python
train
30
google/grr
grr/core/grr_response_core/lib/utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/utils.py#L572-L626
def NormalizePath(path, sep="/"): """A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path. """ if not path: return sep path = SmartUnicode(path) path_list = path.split(sep) # This is a relative path and the first element is . or .. if path_list[0] in [".", "..", ""]: path_list.pop(0) # Deliberately begin at index 1 to preserve a single leading / i = 0 while True: list_len = len(path_list) # We begin at the last known good position so we never iterate over path # elements which are already examined for i in range(i, len(path_list)): # Remove /./ form if path_list[i] == "." or not path_list[i]: path_list.pop(i) break # Remove /../ form elif path_list[i] == "..": path_list.pop(i) # Anchor at the top level if (i == 1 and path_list[0]) or i > 1: i -= 1 path_list.pop(i) break # If we didnt alter the path so far we can quit if len(path_list) == list_len: return sep + sep.join(path_list)
[ "def", "NormalizePath", "(", "path", ",", "sep", "=", "\"/\"", ")", ":", "if", "not", "path", ":", "return", "sep", "path", "=", "SmartUnicode", "(", "path", ")", "path_list", "=", "path", ".", "split", "(", "sep", ")", "# This is a relative path and the first element is . or ..", "if", "path_list", "[", "0", "]", "in", "[", "\".\"", ",", "\"..\"", ",", "\"\"", "]", ":", "path_list", ".", "pop", "(", "0", ")", "# Deliberately begin at index 1 to preserve a single leading /", "i", "=", "0", "while", "True", ":", "list_len", "=", "len", "(", "path_list", ")", "# We begin at the last known good position so we never iterate over path", "# elements which are already examined", "for", "i", "in", "range", "(", "i", ",", "len", "(", "path_list", ")", ")", ":", "# Remove /./ form", "if", "path_list", "[", "i", "]", "==", "\".\"", "or", "not", "path_list", "[", "i", "]", ":", "path_list", ".", "pop", "(", "i", ")", "break", "# Remove /../ form", "elif", "path_list", "[", "i", "]", "==", "\"..\"", ":", "path_list", ".", "pop", "(", "i", ")", "# Anchor at the top level", "if", "(", "i", "==", "1", "and", "path_list", "[", "0", "]", ")", "or", "i", ">", "1", ":", "i", "-=", "1", "path_list", ".", "pop", "(", "i", ")", "break", "# If we didnt alter the path so far we can quit", "if", "len", "(", "path_list", ")", "==", "list_len", ":", "return", "sep", "+", "sep", ".", "join", "(", "path_list", ")" ]
A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path.
[ "A", "sane", "implementation", "of", "os", ".", "path", ".", "normpath", "." ]
python
train
27.890909
opencivicdata/pupa
pupa/utils/topsort.py
https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L105-L145
def cycles(self): """ Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle)) """ def walk_node(node, seen): """ Walk each top-level node we know about, and recurse along the graph. """ if node in seen: yield (node,) return seen.add(node) for edge in self.edges[node]: for cycle in walk_node(edge, set(seen)): yield (node,) + cycle # First, let's get a iterable of all known cycles. cycles = chain.from_iterable( (walk_node(node, set()) for node in self.nodes)) shortest = set() # Now, let's go through and sift through the cycles, finding # the shortest unique cycle known, ignoring cycles which contain # already known cycles. for cycle in sorted(cycles, key=len): for el in shortest: if set(el).issubset(set(cycle)): break else: shortest.add(cycle) # And return that unique list. return shortest
[ "def", "cycles", "(", "self", ")", ":", "def", "walk_node", "(", "node", ",", "seen", ")", ":", "\"\"\"\n Walk each top-level node we know about, and recurse\n along the graph.\n \"\"\"", "if", "node", "in", "seen", ":", "yield", "(", "node", ",", ")", "return", "seen", ".", "add", "(", "node", ")", "for", "edge", "in", "self", ".", "edges", "[", "node", "]", ":", "for", "cycle", "in", "walk_node", "(", "edge", ",", "set", "(", "seen", ")", ")", ":", "yield", "(", "node", ",", ")", "+", "cycle", "# First, let's get a iterable of all known cycles.", "cycles", "=", "chain", ".", "from_iterable", "(", "(", "walk_node", "(", "node", ",", "set", "(", ")", ")", "for", "node", "in", "self", ".", "nodes", ")", ")", "shortest", "=", "set", "(", ")", "# Now, let's go through and sift through the cycles, finding", "# the shortest unique cycle known, ignoring cycles which contain", "# already known cycles.", "for", "cycle", "in", "sorted", "(", "cycles", ",", "key", "=", "len", ")", ":", "for", "el", "in", "shortest", ":", "if", "set", "(", "el", ")", ".", "issubset", "(", "set", "(", "cycle", ")", ")", ":", "break", "else", ":", "shortest", ".", "add", "(", "cycle", ")", "# And return that unique list.", "return", "shortest" ]
Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle))
[ "Fairly", "expensive", "cycle", "detection", "algorithm", ".", "This", "method", "will", "return", "the", "shortest", "unique", "cycles", "that", "were", "detected", "." ]
python
train
33.02439
singularityhub/sregistry-cli
sregistry/main/workers/tasks.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/workers/tasks.py#L28-L59
def download_task(url, headers, destination, download_type='layer'): '''download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp. ''' # Update the user what we are doing bot.verbose("Downloading %s from %s" % (download_type, url)) # Step 1: Download the layer atomically file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names())) tar_download = download(url, file_name, headers=headers) try: shutil.move(tar_download, destination) except Exception: msg = "Cannot untar layer %s," % tar_download msg += " was there a problem with download?" bot.error(msg) sys.exit(1) return destination
[ "def", "download_task", "(", "url", ",", "headers", ",", "destination", ",", "download_type", "=", "'layer'", ")", ":", "# Update the user what we are doing", "bot", ".", "verbose", "(", "\"Downloading %s from %s\"", "%", "(", "download_type", ",", "url", ")", ")", "# Step 1: Download the layer atomically", "file_name", "=", "\"%s.%s\"", "%", "(", "destination", ",", "next", "(", "tempfile", ".", "_get_candidate_names", "(", ")", ")", ")", "tar_download", "=", "download", "(", "url", ",", "file_name", ",", "headers", "=", "headers", ")", "try", ":", "shutil", ".", "move", "(", "tar_download", ",", "destination", ")", "except", "Exception", ":", "msg", "=", "\"Cannot untar layer %s,\"", "%", "tar_download", "msg", "+=", "\" was there a problem with download?\"", "bot", ".", "error", "(", "msg", ")", "sys", ".", "exit", "(", "1", ")", "return", "destination" ]
download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp.
[ "download", "an", "image", "layer", "(", ".", "tar", ".", "gz", ")", "to", "a", "specified", "download", "folder", ".", "This", "task", "is", "done", "by", "using", "local", "versions", "of", "the", "same", "download", "functions", "that", "are", "used", "for", "the", "client", ".", "core", "stream", "/", "download", "functions", "of", "the", "parent", "client", "." ]
python
test
34.90625
michael-lazar/rtv
rtv/packages/praw/objects.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L781-L790
def mute_modmail_author(self, _unmute=False): """Mute the sender of this modmail message. :param _unmute: Unmute the user instead. Please use :meth:`unmute_modmail_author` instead of setting this directly. """ path = 'unmute_sender' if _unmute else 'mute_sender' return self.reddit_session.request_json( self.reddit_session.config[path], data={'id': self.fullname})
[ "def", "mute_modmail_author", "(", "self", ",", "_unmute", "=", "False", ")", ":", "path", "=", "'unmute_sender'", "if", "_unmute", "else", "'mute_sender'", "return", "self", ".", "reddit_session", ".", "request_json", "(", "self", ".", "reddit_session", ".", "config", "[", "path", "]", ",", "data", "=", "{", "'id'", ":", "self", ".", "fullname", "}", ")" ]
Mute the sender of this modmail message. :param _unmute: Unmute the user instead. Please use :meth:`unmute_modmail_author` instead of setting this directly.
[ "Mute", "the", "sender", "of", "this", "modmail", "message", "." ]
python
train
42.2
mozilla/treeherder
treeherder/intermittents_commenter/commenter.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/intermittents_commenter/commenter.py#L252-L300
def get_bug_stats(self, startday, endday): """Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... } """ # Min required failures per bug in order to post a comment threshold = 1 if self.weekly_mode else 15 bug_ids = (BugJobMap.failures.by_date(startday, endday) .values('bug_id') .annotate(total=Count('bug_id')) .filter(total__gte=threshold) .values_list('bug_id', flat=True)) bugs = (BugJobMap.failures.by_date(startday, endday) .filter(bug_id__in=bug_ids) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id')) bug_map = dict() for bug in bugs: platform = bug['job__machine_platform__platform'] repo = bug['job__repository__name'] bug_id = bug['bug_id'] if bug_id in bug_map: bug_map[bug_id]['total'] += 1 bug_map[bug_id]['per_platform'][platform] += 1 bug_map[bug_id]['per_repository'][repo] += 1 else: bug_map[bug_id] = {} bug_map[bug_id]['total'] = 1 bug_map[bug_id]['per_platform'] = Counter([platform]) bug_map[bug_id]['per_repository'] = Counter([repo]) return bug_map, bug_ids
[ "def", "get_bug_stats", "(", "self", ",", "startday", ",", "endday", ")", ":", "# Min required failures per bug in order to post a comment", "threshold", "=", "1", "if", "self", ".", "weekly_mode", "else", "15", "bug_ids", "=", "(", "BugJobMap", ".", "failures", ".", "by_date", "(", "startday", ",", "endday", ")", ".", "values", "(", "'bug_id'", ")", ".", "annotate", "(", "total", "=", "Count", "(", "'bug_id'", ")", ")", ".", "filter", "(", "total__gte", "=", "threshold", ")", ".", "values_list", "(", "'bug_id'", ",", "flat", "=", "True", ")", ")", "bugs", "=", "(", "BugJobMap", ".", "failures", ".", "by_date", "(", "startday", ",", "endday", ")", ".", "filter", "(", "bug_id__in", "=", "bug_ids", ")", ".", "values", "(", "'job__repository__name'", ",", "'job__machine_platform__platform'", ",", "'bug_id'", ")", ")", "bug_map", "=", "dict", "(", ")", "for", "bug", "in", "bugs", ":", "platform", "=", "bug", "[", "'job__machine_platform__platform'", "]", "repo", "=", "bug", "[", "'job__repository__name'", "]", "bug_id", "=", "bug", "[", "'bug_id'", "]", "if", "bug_id", "in", "bug_map", ":", "bug_map", "[", "bug_id", "]", "[", "'total'", "]", "+=", "1", "bug_map", "[", "bug_id", "]", "[", "'per_platform'", "]", "[", "platform", "]", "+=", "1", "bug_map", "[", "bug_id", "]", "[", "'per_repository'", "]", "[", "repo", "]", "+=", "1", "else", ":", "bug_map", "[", "bug_id", "]", "=", "{", "}", "bug_map", "[", "bug_id", "]", "[", "'total'", "]", "=", "1", "bug_map", "[", "bug_id", "]", "[", "'per_platform'", "]", "=", "Counter", "(", "[", "platform", "]", ")", "bug_map", "[", "bug_id", "]", "[", "'per_repository'", "]", "=", "Counter", "(", "[", "repo", "]", ")", "return", "bug_map", ",", "bug_ids" ]
Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... }
[ "Get", "all", "intermittent", "failures", "per", "specified", "date", "range", "and", "repository", "returning", "a", "dict", "of", "bug_id", "s", "with", "total", "repository", "and", "platform", "totals", "if", "totals", "are", "greater", "than", "or", "equal", "to", "the", "threshold", ".", "eg", ":", "{", "1206327", ":", "{", "total", ":", "5", "per_repository", ":", "{", "fx", "-", "team", ":", "2", "mozilla", "-", "inbound", ":", "3", "}", "per_platform", ":", "{", "osx", "-", "10", "-", "10", ":", "4", "b2g", "-", "emu", "-", "ics", ":", "1", "}", "}", "...", "}" ]
python
train
40.857143
sawcordwell/pymdptoolbox
src/experimental/mdpsql.py
https://github.com/sawcordwell/pymdptoolbox/blob/7c96789cc80e280437005c12065cf70266c11636/src/experimental/mdpsql.py#L50-L88
def exampleRand(S, A): """WARNING: This will delete a database with the same name as 'db'.""" db = "MDP-%sx%s.db" % (S, A) if os.path.exists(db): os.remove(db) conn = sqlite3.connect(db) with conn: c = conn.cursor() cmd = ''' CREATE TABLE info (name TEXT, value INTEGER); INSERT INTO info VALUES('states', %s); INSERT INTO info VALUES('actions', %s);''' % (S, A) c.executescript(cmd) for a in range(1, A+1): cmd = ''' CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL); CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL); ''' % (a, a) c.executescript(cmd) cmd = "INSERT INTO reward%s(val) VALUES(?)" % a c.executemany(cmd, zip(random(S).tolist())) for s in xrange(1, S+1): # to be usefully represented as a sparse matrix, the number of # nonzero entries should be less than 1/3 of dimesion of the # matrix, so S/3 n = randint(1, S//3) # timeit [90894] * 20330 # ==> 10000 loops, best of 3: 141 us per loop # timeit (90894*np.ones(20330, dtype=int)).tolist() # ==> 1000 loops, best of 3: 548 us per loop col = (permutation(arange(1,S+1))[0:n]).tolist() val = random(n) val = (val / val.sum()).tolist() cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a c.executemany(cmd, zip([s] * n, col, val)) cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a) c.execute(cmd) # return the name of teh database return db
[ "def", "exampleRand", "(", "S", ",", "A", ")", ":", "db", "=", "\"MDP-%sx%s.db\"", "%", "(", "S", ",", "A", ")", "if", "os", ".", "path", ".", "exists", "(", "db", ")", ":", "os", ".", "remove", "(", "db", ")", "conn", "=", "sqlite3", ".", "connect", "(", "db", ")", "with", "conn", ":", "c", "=", "conn", ".", "cursor", "(", ")", "cmd", "=", "'''\n CREATE TABLE info (name TEXT, value INTEGER);\n INSERT INTO info VALUES('states', %s);\n INSERT INTO info VALUES('actions', %s);'''", "%", "(", "S", ",", "A", ")", "c", ".", "executescript", "(", "cmd", ")", "for", "a", "in", "range", "(", "1", ",", "A", "+", "1", ")", ":", "cmd", "=", "'''\n CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL);\n CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL);\n '''", "%", "(", "a", ",", "a", ")", "c", ".", "executescript", "(", "cmd", ")", "cmd", "=", "\"INSERT INTO reward%s(val) VALUES(?)\"", "%", "a", "c", ".", "executemany", "(", "cmd", ",", "zip", "(", "random", "(", "S", ")", ".", "tolist", "(", ")", ")", ")", "for", "s", "in", "xrange", "(", "1", ",", "S", "+", "1", ")", ":", "# to be usefully represented as a sparse matrix, the number of", "# nonzero entries should be less than 1/3 of dimesion of the", "# matrix, so S/3", "n", "=", "randint", "(", "1", ",", "S", "//", "3", ")", "# timeit [90894] * 20330", "# ==> 10000 loops, best of 3: 141 us per loop", "# timeit (90894*np.ones(20330, dtype=int)).tolist()", "# ==> 1000 loops, best of 3: 548 us per loop", "col", "=", "(", "permutation", "(", "arange", "(", "1", ",", "S", "+", "1", ")", ")", "[", "0", ":", "n", "]", ")", ".", "tolist", "(", ")", "val", "=", "random", "(", "n", ")", "val", "=", "(", "val", "/", "val", ".", "sum", "(", ")", ")", ".", "tolist", "(", ")", "cmd", "=", "\"INSERT INTO transition%s VALUES(?, ?, ?)\"", "%", "a", "c", ".", "executemany", "(", "cmd", ",", "zip", "(", "[", "s", "]", "*", "n", ",", "col", ",", "val", ")", ")", "cmd", "=", "\"CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);\"", "%", "(", "a", ",", "a", ")", "c", ".", "execute", "(", "cmd", ")", "# return the name of teh database", "return", "db" ]
WARNING: This will delete a database with the same name as 'db'.
[ "WARNING", ":", "This", "will", "delete", "a", "database", "with", "the", "same", "name", "as", "db", "." ]
python
train
45.102564
poldracklab/niworkflows
niworkflows/interfaces/freesurfer.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/freesurfer.py#L402-L441
def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): """ Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 """ selem = sim.ball(bw) if ants_segs is None: ants_segs = np.zeros_like(aseg, dtype=np.uint8) aseg[aseg == 42] = 3 # Collapse both hemispheres gm = anat.copy() gm[aseg != 3] = 0 refined = refine_aseg(aseg) newrefmask = sim.binary_dilation(refined, selem) - refined indices = np.argwhere(newrefmask > 0) for pixel in indices: # When ATROPOS identified the pixel as GM, set and carry on if ants_segs[tuple(pixel)] == 2: refined[tuple(pixel)] = 1 continue window = gm[ pixel[0] - ww:pixel[0] + ww, pixel[1] - ww:pixel[1] + ww, pixel[2] - ww:pixel[2] + ww ] if np.any(window > 0): mu = window[window > 0].mean() sigma = max(window[window > 0].std(), 1.e-5) zstat = abs(anat[tuple(pixel)] - mu) / sigma refined[tuple(pixel)] = int(zstat < zval) refined = sim.binary_opening(refined, selem) return refined
[ "def", "grow_mask", "(", "anat", ",", "aseg", ",", "ants_segs", "=", "None", ",", "ww", "=", "7", ",", "zval", "=", "2.0", ",", "bw", "=", "4", ")", ":", "selem", "=", "sim", ".", "ball", "(", "bw", ")", "if", "ants_segs", "is", "None", ":", "ants_segs", "=", "np", ".", "zeros_like", "(", "aseg", ",", "dtype", "=", "np", ".", "uint8", ")", "aseg", "[", "aseg", "==", "42", "]", "=", "3", "# Collapse both hemispheres", "gm", "=", "anat", ".", "copy", "(", ")", "gm", "[", "aseg", "!=", "3", "]", "=", "0", "refined", "=", "refine_aseg", "(", "aseg", ")", "newrefmask", "=", "sim", ".", "binary_dilation", "(", "refined", ",", "selem", ")", "-", "refined", "indices", "=", "np", ".", "argwhere", "(", "newrefmask", ">", "0", ")", "for", "pixel", "in", "indices", ":", "# When ATROPOS identified the pixel as GM, set and carry on", "if", "ants_segs", "[", "tuple", "(", "pixel", ")", "]", "==", "2", ":", "refined", "[", "tuple", "(", "pixel", ")", "]", "=", "1", "continue", "window", "=", "gm", "[", "pixel", "[", "0", "]", "-", "ww", ":", "pixel", "[", "0", "]", "+", "ww", ",", "pixel", "[", "1", "]", "-", "ww", ":", "pixel", "[", "1", "]", "+", "ww", ",", "pixel", "[", "2", "]", "-", "ww", ":", "pixel", "[", "2", "]", "+", "ww", "]", "if", "np", ".", "any", "(", "window", ">", "0", ")", ":", "mu", "=", "window", "[", "window", ">", "0", "]", ".", "mean", "(", ")", "sigma", "=", "max", "(", "window", "[", "window", ">", "0", "]", ".", "std", "(", ")", ",", "1.e-5", ")", "zstat", "=", "abs", "(", "anat", "[", "tuple", "(", "pixel", ")", "]", "-", "mu", ")", "/", "sigma", "refined", "[", "tuple", "(", "pixel", ")", "]", "=", "int", "(", "zstat", "<", "zval", ")", "refined", "=", "sim", ".", "binary_opening", "(", "refined", ",", "selem", ")", "return", "refined" ]
Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660
[ "Grow", "mask", "including", "pixels", "that", "have", "a", "high", "likelihood", ".", "GM", "tissue", "parameters", "are", "sampled", "in", "image", "patches", "of", "ww", "size", "." ]
python
train
33.05
HumanCellAtlas/dcp-cli
hca/cli.py
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/cli.py#L61-L81
def check_if_release_is_current(log): """Warns the user if their release is behind the latest PyPi __version__.""" if __version__ == '0.0.0': return client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi') latest_pypi_version = client.package_releases('hca') latest_version_nums = [int(i) for i in latest_pypi_version[0].split('.')] this_version_nums = [int(i) for i in __version__.split('.')] for i in range(max([len(latest_version_nums), len(this_version_nums)])): try: if this_version_nums[i] < latest_version_nums[i]: log.warning('WARNING: Python (pip) package "hca" is not up-to-date!\n' 'You have hca version: ' + str(__version__) + '\n' 'Please use the latest hca version: ' + str(latest_pypi_version[0])) # handles the odd case where a user's current __version__ is higher than PyPi's elif this_version_nums[i] > latest_version_nums[i]: break # if 4.2 compared to 4.3.1, this handles the missing element except IndexError: pass
[ "def", "check_if_release_is_current", "(", "log", ")", ":", "if", "__version__", "==", "'0.0.0'", ":", "return", "client", "=", "xmlrpclib", ".", "ServerProxy", "(", "'https://pypi.python.org/pypi'", ")", "latest_pypi_version", "=", "client", ".", "package_releases", "(", "'hca'", ")", "latest_version_nums", "=", "[", "int", "(", "i", ")", "for", "i", "in", "latest_pypi_version", "[", "0", "]", ".", "split", "(", "'.'", ")", "]", "this_version_nums", "=", "[", "int", "(", "i", ")", "for", "i", "in", "__version__", ".", "split", "(", "'.'", ")", "]", "for", "i", "in", "range", "(", "max", "(", "[", "len", "(", "latest_version_nums", ")", ",", "len", "(", "this_version_nums", ")", "]", ")", ")", ":", "try", ":", "if", "this_version_nums", "[", "i", "]", "<", "latest_version_nums", "[", "i", "]", ":", "log", ".", "warning", "(", "'WARNING: Python (pip) package \"hca\" is not up-to-date!\\n'", "'You have hca version: '", "+", "str", "(", "__version__", ")", "+", "'\\n'", "'Please use the latest hca version: '", "+", "str", "(", "latest_pypi_version", "[", "0", "]", ")", ")", "# handles the odd case where a user's current __version__ is higher than PyPi's", "elif", "this_version_nums", "[", "i", "]", ">", "latest_version_nums", "[", "i", "]", ":", "break", "# if 4.2 compared to 4.3.1, this handles the missing element", "except", "IndexError", ":", "pass" ]
Warns the user if their release is behind the latest PyPi __version__.
[ "Warns", "the", "user", "if", "their", "release", "is", "behind", "the", "latest", "PyPi", "__version__", "." ]
python
train
53.857143
saltstack/salt
salt/modules/nix.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nix.py#L143-L183
def install(*pkgs, **kwargs): ''' Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...] ''' attributes = kwargs.get('attributes', False) if not pkgs: return "Plese specify a package or packages to upgrade" cmd = _quietnix() cmd.append('--install') if kwargs.get('attributes', False): cmd.extend(_zip_flatten('--attr', pkgs)) else: cmd.extend(pkgs) out = _run(cmd) installs = list(itertools.chain.from_iterable( [s.split()[1:] for s in out['stderr'].splitlines() if s.startswith('installing')] )) return [_strip_quotes(s) for s in installs]
[ "def", "install", "(", "*", "pkgs", ",", "*", "*", "kwargs", ")", ":", "attributes", "=", "kwargs", ".", "get", "(", "'attributes'", ",", "False", ")", "if", "not", "pkgs", ":", "return", "\"Plese specify a package or packages to upgrade\"", "cmd", "=", "_quietnix", "(", ")", "cmd", ".", "append", "(", "'--install'", ")", "if", "kwargs", ".", "get", "(", "'attributes'", ",", "False", ")", ":", "cmd", ".", "extend", "(", "_zip_flatten", "(", "'--attr'", ",", "pkgs", ")", ")", "else", ":", "cmd", ".", "extend", "(", "pkgs", ")", "out", "=", "_run", "(", "cmd", ")", "installs", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "[", "s", ".", "split", "(", ")", "[", "1", ":", "]", "for", "s", "in", "out", "[", "'stderr'", "]", ".", "splitlines", "(", ")", "if", "s", ".", "startswith", "(", "'installing'", ")", "]", ")", ")", "return", "[", "_strip_quotes", "(", "s", ")", "for", "s", "in", "installs", "]" ]
Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...]
[ "Installs", "a", "single", "or", "multiple", "packages", "via", "nix" ]
python
train
25.390244
Qiskit/qiskit-terra
qiskit/tools/qi/qi.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/qi/qi.py#L337-L344
def random_unitary_matrix(dim, seed=None): """Deprecated in 0.8+ """ warnings.warn('The random_unitary_matrix() function in qiskit.tools.qi has been ' 'deprecated and will be removed in the future. Instead use ' 'the function in qiskit.quantum_info.random', DeprecationWarning) return random.random_unitary(dim, seed).data
[ "def", "random_unitary_matrix", "(", "dim", ",", "seed", "=", "None", ")", ":", "warnings", ".", "warn", "(", "'The random_unitary_matrix() function in qiskit.tools.qi has been '", "'deprecated and will be removed in the future. Instead use '", "'the function in qiskit.quantum_info.random'", ",", "DeprecationWarning", ")", "return", "random", ".", "random_unitary", "(", "dim", ",", "seed", ")", ".", "data" ]
Deprecated in 0.8+
[ "Deprecated", "in", "0", ".", "8", "+" ]
python
test
48.125
data61/clkhash
clkhash/randomnames.py
https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L32-L43
def load_csv_data(resource_name): # type: (str) -> List[str] """ Loads first column of specified CSV file from package data. """ data_bytes = pkgutil.get_data('clkhash', 'data/{}'.format(resource_name)) if data_bytes is None: raise ValueError("No data resource found with name {}".format(resource_name)) else: data = data_bytes.decode('utf8') reader = csv.reader(data.splitlines()) next(reader, None) # skip the headers return [row[0] for row in reader]
[ "def", "load_csv_data", "(", "resource_name", ")", ":", "# type: (str) -> List[str]", "data_bytes", "=", "pkgutil", ".", "get_data", "(", "'clkhash'", ",", "'data/{}'", ".", "format", "(", "resource_name", ")", ")", "if", "data_bytes", "is", "None", ":", "raise", "ValueError", "(", "\"No data resource found with name {}\"", ".", "format", "(", "resource_name", ")", ")", "else", ":", "data", "=", "data_bytes", ".", "decode", "(", "'utf8'", ")", "reader", "=", "csv", ".", "reader", "(", "data", ".", "splitlines", "(", ")", ")", "next", "(", "reader", ",", "None", ")", "# skip the headers", "return", "[", "row", "[", "0", "]", "for", "row", "in", "reader", "]" ]
Loads first column of specified CSV file from package data.
[ "Loads", "first", "column", "of", "specified", "CSV", "file", "from", "package", "data", "." ]
python
train
42.25
Riffstation/flask-philo
flask_philo/db/postgresql/types.py
https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/db/postgresql/types.py#L76-L90
def _convert(self, value): """Returns a PasswordHash from the given string. PasswordHash instances or None values will return unchanged. Strings will be hashed and the resulting PasswordHash returned. Any other input will result in a TypeError. """ if isinstance(value, PasswordHash): return value elif isinstance(value, str): value = value.encode('utf-8') return PasswordHash.new(value, self.rounds) elif value is not None: raise TypeError( 'Cannot convert {} to a PasswordHash'.format(type(value)))
[ "def", "_convert", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "PasswordHash", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "return", "PasswordHash", ".", "new", "(", "value", ",", "self", ".", "rounds", ")", "elif", "value", "is", "not", "None", ":", "raise", "TypeError", "(", "'Cannot convert {} to a PasswordHash'", ".", "format", "(", "type", "(", "value", ")", ")", ")" ]
Returns a PasswordHash from the given string. PasswordHash instances or None values will return unchanged. Strings will be hashed and the resulting PasswordHash returned. Any other input will result in a TypeError.
[ "Returns", "a", "PasswordHash", "from", "the", "given", "string", "." ]
python
train
41
titusz/epubcheck
src/epubcheck/utils.py
https://github.com/titusz/epubcheck/blob/7adde81543d3ae7385ab7062adb76e1414d49c2e/src/epubcheck/utils.py#L52-L61
def generate_sample_json(): """Generate sample json data for testing""" check = EpubCheck(samples.EPUB3_VALID) with open(samples.RESULT_VALID, 'wb') as jsonfile: jsonfile.write(check._stdout) check = EpubCheck(samples.EPUB3_INVALID) with open(samples.RESULT_INVALID, 'wb') as jsonfile: jsonfile.write(check._stdout)
[ "def", "generate_sample_json", "(", ")", ":", "check", "=", "EpubCheck", "(", "samples", ".", "EPUB3_VALID", ")", "with", "open", "(", "samples", ".", "RESULT_VALID", ",", "'wb'", ")", "as", "jsonfile", ":", "jsonfile", ".", "write", "(", "check", ".", "_stdout", ")", "check", "=", "EpubCheck", "(", "samples", ".", "EPUB3_INVALID", ")", "with", "open", "(", "samples", ".", "RESULT_INVALID", ",", "'wb'", ")", "as", "jsonfile", ":", "jsonfile", ".", "write", "(", "check", ".", "_stdout", ")" ]
Generate sample json data for testing
[ "Generate", "sample", "json", "data", "for", "testing" ]
python
train
34.4
facebook/codemod
codemod/query.py
https://github.com/facebook/codemod/blob/78bb627792fc8a5253baa9cd9d8160533b16fd85/codemod/query.py#L126-L167
def generate_patches(self): """ Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor. """ start_pos = self.start_position or Position(None, None) end_pos = self.end_position or Position(None, None) path_list = Query._walk_directory(self.root_directory) path_list = Query._sublist(path_list, start_pos.path, end_pos.path) path_list = ( path for path in path_list if Query._path_looks_like_code(path) and (self.path_filter(path)) or (self.inc_extensionless and helpers.is_extensionless(path)) ) for path in path_list: try: lines = list(open(path)) except (IOError, UnicodeDecodeError): # If we can't open the file--perhaps it's a symlink whose # destination no loner exists--then short-circuit. continue for patch in self.suggestor(lines): if path == start_pos.path: if patch.start_line_number < start_pos.line_number: continue # suggestion is pre-start_pos if path == end_pos.path: if patch.end_line_number >= end_pos.line_number: break # suggestion is post-end_pos old_lines = lines[ patch.start_line_number:patch.end_line_number] if patch.new_lines is None or patch.new_lines != old_lines: patch.path = path yield patch # re-open file, in case contents changed lines[:] = list(open(path))
[ "def", "generate_patches", "(", "self", ")", ":", "start_pos", "=", "self", ".", "start_position", "or", "Position", "(", "None", ",", "None", ")", "end_pos", "=", "self", ".", "end_position", "or", "Position", "(", "None", ",", "None", ")", "path_list", "=", "Query", ".", "_walk_directory", "(", "self", ".", "root_directory", ")", "path_list", "=", "Query", ".", "_sublist", "(", "path_list", ",", "start_pos", ".", "path", ",", "end_pos", ".", "path", ")", "path_list", "=", "(", "path", "for", "path", "in", "path_list", "if", "Query", ".", "_path_looks_like_code", "(", "path", ")", "and", "(", "self", ".", "path_filter", "(", "path", ")", ")", "or", "(", "self", ".", "inc_extensionless", "and", "helpers", ".", "is_extensionless", "(", "path", ")", ")", ")", "for", "path", "in", "path_list", ":", "try", ":", "lines", "=", "list", "(", "open", "(", "path", ")", ")", "except", "(", "IOError", ",", "UnicodeDecodeError", ")", ":", "# If we can't open the file--perhaps it's a symlink whose", "# destination no loner exists--then short-circuit.", "continue", "for", "patch", "in", "self", ".", "suggestor", "(", "lines", ")", ":", "if", "path", "==", "start_pos", ".", "path", ":", "if", "patch", ".", "start_line_number", "<", "start_pos", ".", "line_number", ":", "continue", "# suggestion is pre-start_pos", "if", "path", "==", "end_pos", ".", "path", ":", "if", "patch", ".", "end_line_number", ">=", "end_pos", ".", "line_number", ":", "break", "# suggestion is post-end_pos", "old_lines", "=", "lines", "[", "patch", ".", "start_line_number", ":", "patch", ".", "end_line_number", "]", "if", "patch", ".", "new_lines", "is", "None", "or", "patch", ".", "new_lines", "!=", "old_lines", ":", "patch", ".", "path", "=", "path", "yield", "patch", "# re-open file, in case contents changed", "lines", "[", ":", "]", "=", "list", "(", "open", "(", "path", ")", ")" ]
Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor.
[ "Generates", "a", "list", "of", "patches", "for", "each", "file", "underneath", "self", ".", "root_directory", "that", "satisfy", "the", "given", "conditions", "given", "query", "conditions", "where", "patches", "for", "each", "file", "are", "suggested", "by", "self", ".", "suggestor", "." ]
python
train
42.928571
gmr/tornado-elasticsearch
tornado_elasticsearch.py
https://github.com/gmr/tornado-elasticsearch/blob/fafe0de680277ce6faceb7449ded0b33822438d0/tornado_elasticsearch.py#L349-L372
def exists(self, index, id, doc_type='_all', params=None): """ Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value """ try: self.transport.perform_request( 'HEAD', _make_path(index, doc_type, id), params=params) except exceptions.NotFoundError: return gen.Return(False) raise gen.Return(True)
[ "def", "exists", "(", "self", ",", "index", ",", "id", ",", "doc_type", "=", "'_all'", ",", "params", "=", "None", ")", ":", "try", ":", "self", ".", "transport", ".", "perform_request", "(", "'HEAD'", ",", "_make_path", "(", "index", ",", "doc_type", ",", "id", ")", ",", "params", "=", "params", ")", "except", "exceptions", ".", "NotFoundError", ":", "return", "gen", ".", "Return", "(", "False", ")", "raise", "gen", ".", "Return", "(", "True", ")" ]
Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value
[ "Returns", "a", "boolean", "indicating", "whether", "or", "not", "given", "document", "exists", "in", "Elasticsearch", ".", "<http", ":", "//", "elasticsearch", ".", "org", "/", "guide", "/", "reference", "/", "api", "/", "get", "/", ">", "_" ]
python
test
45.791667
pschmitt/pykeepass
pykeepass/kdbx_parsing/kdbx4.py
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L64-L79
def compute_header_hmac_hash(context): """Compute HMAC-SHA256 hash of header. Used to prevent header tampering.""" return hmac.new( hashlib.sha512( b'\xff' * 8 + hashlib.sha512( context._.header.value.dynamic_header.master_seed.data + context.transformed_key + b'\x01' ).digest() ).digest(), context._.header.data, hashlib.sha256 ).digest()
[ "def", "compute_header_hmac_hash", "(", "context", ")", ":", "return", "hmac", ".", "new", "(", "hashlib", ".", "sha512", "(", "b'\\xff'", "*", "8", "+", "hashlib", ".", "sha512", "(", "context", ".", "_", ".", "header", ".", "value", ".", "dynamic_header", ".", "master_seed", ".", "data", "+", "context", ".", "transformed_key", "+", "b'\\x01'", ")", ".", "digest", "(", ")", ")", ".", "digest", "(", ")", ",", "context", ".", "_", ".", "header", ".", "data", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")" ]
Compute HMAC-SHA256 hash of header. Used to prevent header tampering.
[ "Compute", "HMAC", "-", "SHA256", "hash", "of", "header", ".", "Used", "to", "prevent", "header", "tampering", "." ]
python
train
28.625
axel-events/axel
axel/axel.py
https://github.com/axel-events/axel/blob/08a663347ef21614b96f92f60f4de57a502db73c/axel/axel.py#L254-L259
def clear(self): """ Discards all registered handlers and cached results """ with self._hlock: self.handlers.clear() with self._mlock: self.memoize.clear()
[ "def", "clear", "(", "self", ")", ":", "with", "self", ".", "_hlock", ":", "self", ".", "handlers", ".", "clear", "(", ")", "with", "self", ".", "_mlock", ":", "self", ".", "memoize", ".", "clear", "(", ")" ]
Discards all registered handlers and cached results
[ "Discards", "all", "registered", "handlers", "and", "cached", "results" ]
python
train
33.833333
Esri/ArcREST
src/arcrest/manageportal/administration.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageportal/administration.py#L459-L488
def generateCertificate(self, alias, commonName, organizationalUnit, city, state, country, keyalg="RSA", keysize=1024, sigalg="SHA256withRSA", validity=90 ): """ Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore. """ params = {"f" : "json", "alias" : alias, "commonName" : commonName, "organizationalUnit" : organizationalUnit, "city" : city, "state" : state, "country" : country, "keyalg" : keyalg, "keysize" : keysize, "sigalg" : sigalg, "validity" : validity } url = self._url + "/SSLCertificate/ generateCertificate" return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
[ "def", "generateCertificate", "(", "self", ",", "alias", ",", "commonName", ",", "organizationalUnit", ",", "city", ",", "state", ",", "country", ",", "keyalg", "=", "\"RSA\"", ",", "keysize", "=", "1024", ",", "sigalg", "=", "\"SHA256withRSA\"", ",", "validity", "=", "90", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"alias\"", ":", "alias", ",", "\"commonName\"", ":", "commonName", ",", "\"organizationalUnit\"", ":", "organizationalUnit", ",", "\"city\"", ":", "city", ",", "\"state\"", ":", "state", ",", "\"country\"", ":", "country", ",", "\"keyalg\"", ":", "keyalg", ",", "\"keysize\"", ":", "keysize", ",", "\"sigalg\"", ":", "sigalg", ",", "\"validity\"", ":", "validity", "}", "url", "=", "self", ".", "_url", "+", "\"/SSLCertificate/ generateCertificate\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_url", "=", "self", ".", "_proxy_url", ")" ]
Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore.
[ "Use", "this", "operation", "to", "create", "a", "self", "-", "signed", "certificate", "or", "as", "a", "starting", "point", "for", "getting", "a", "production", "-", "ready", "CA", "-", "signed", "certificate", ".", "The", "portal", "will", "generate", "a", "certificate", "for", "you", "and", "store", "it", "in", "its", "keystore", "." ]
python
train
42.233333
tanghaibao/jcvi
jcvi/formats/fasta.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L1934-L1958
def uniq(args): """ %prog uniq fasta uniq.fasta remove fasta records that are the same """ p = OptionParser(uniq.__doc__) p.add_option("--seq", default=False, action="store_true", help="Uniqify the sequences [default: %default]") p.add_option("-t", "--trimname", dest="trimname", action="store_true", default=False, help="turn on the defline trim to first space [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, uniqfastafile = args fw = must_open(uniqfastafile, "w") seq = opts.seq for rec in _uniq_rec(fastafile, seq=seq): if opts.trimname: rec.description = "" SeqIO.write([rec], fw, "fasta")
[ "def", "uniq", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "uniq", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--seq\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Uniqify the sequences [default: %default]\"", ")", "p", ".", "add_option", "(", "\"-t\"", ",", "\"--trimname\"", ",", "dest", "=", "\"trimname\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"turn on the defline trim to first space [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "fastafile", ",", "uniqfastafile", "=", "args", "fw", "=", "must_open", "(", "uniqfastafile", ",", "\"w\"", ")", "seq", "=", "opts", ".", "seq", "for", "rec", "in", "_uniq_rec", "(", "fastafile", ",", "seq", "=", "seq", ")", ":", "if", "opts", ".", "trimname", ":", "rec", ".", "description", "=", "\"\"", "SeqIO", ".", "write", "(", "[", "rec", "]", ",", "fw", ",", "\"fasta\"", ")" ]
%prog uniq fasta uniq.fasta remove fasta records that are the same
[ "%prog", "uniq", "fasta", "uniq", ".", "fasta" ]
python
train
30.32
ThreatConnect-Inc/tcex
tcex/tcex_ti/tcex_ti.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti.py#L356-L371
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs): """ Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return: """ return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
[ "def", "signature", "(", "self", ",", "name", ",", "file_name", ",", "file_type", ",", "file_content", ",", "owner", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "Signature", "(", "self", ".", "tcex", ",", "name", ",", "file_name", ",", "file_type", ",", "file_content", ",", "owner", "=", "owner", ",", "*", "*", "kwargs", ")" ]
Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return:
[ "Create", "the", "Signature", "TI", "object", "." ]
python
train
24.875
fastai/fastai
old/fastai/structured.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/structured.py#L178-L235
def fix_missing(df, col, name, na_dict): """ Fill missing data in a column of df with the median, and add a {name}_na column which specifies if the data was missing. Parameters: ----------- df: The data frame that will be changed. col: The column of data to fix by filling in missing data. name: The name of the new filled column in df. na_dict: A dictionary of values to create na's of and the value to insert. If name is not a key of na_dict the median will fill any missing data. Also if name is not a key of na_dict and there is no missing data in col, then no {name}_na column is not created. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {}) >>> df col1 col2 col1_na 0 1 5 False 1 2 2 True 2 3 2 False >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col2'], 'col2', {}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500}) >>> df col1 col2 col1_na 0 1 5 False 1 500 2 True 2 3 2 False """ if is_numeric_dtype(col): if pd.isnull(col).sum() or (name in na_dict): df[name+'_na'] = pd.isnull(col) filler = na_dict[name] if name in na_dict else col.median() df[name] = col.fillna(filler) na_dict[name] = filler return na_dict
[ "def", "fix_missing", "(", "df", ",", "col", ",", "name", ",", "na_dict", ")", ":", "if", "is_numeric_dtype", "(", "col", ")", ":", "if", "pd", ".", "isnull", "(", "col", ")", ".", "sum", "(", ")", "or", "(", "name", "in", "na_dict", ")", ":", "df", "[", "name", "+", "'_na'", "]", "=", "pd", ".", "isnull", "(", "col", ")", "filler", "=", "na_dict", "[", "name", "]", "if", "name", "in", "na_dict", "else", "col", ".", "median", "(", ")", "df", "[", "name", "]", "=", "col", ".", "fillna", "(", "filler", ")", "na_dict", "[", "name", "]", "=", "filler", "return", "na_dict" ]
Fill missing data in a column of df with the median, and add a {name}_na column which specifies if the data was missing. Parameters: ----------- df: The data frame that will be changed. col: The column of data to fix by filling in missing data. name: The name of the new filled column in df. na_dict: A dictionary of values to create na's of and the value to insert. If name is not a key of na_dict the median will fill any missing data. Also if name is not a key of na_dict and there is no missing data in col, then no {name}_na column is not created. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {}) >>> df col1 col2 col1_na 0 1 5 False 1 2 2 True 2 3 2 False >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col2'], 'col2', {}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500}) >>> df col1 col2 col1_na 0 1 5 False 1 500 2 True 2 3 2 False
[ "Fill", "missing", "data", "in", "a", "column", "of", "df", "with", "the", "median", "and", "add", "a", "{", "name", "}", "_na", "column", "which", "specifies", "if", "the", "data", "was", "missing", ".", "Parameters", ":", "-----------", "df", ":", "The", "data", "frame", "that", "will", "be", "changed", ".", "col", ":", "The", "column", "of", "data", "to", "fix", "by", "filling", "in", "missing", "data", ".", "name", ":", "The", "name", "of", "the", "new", "filled", "column", "in", "df", ".", "na_dict", ":", "A", "dictionary", "of", "values", "to", "create", "na", "s", "of", "and", "the", "value", "to", "insert", ".", "If", "name", "is", "not", "a", "key", "of", "na_dict", "the", "median", "will", "fill", "any", "missing", "data", ".", "Also", "if", "name", "is", "not", "a", "key", "of", "na_dict", "and", "there", "is", "no", "missing", "data", "in", "col", "then", "no", "{", "name", "}", "_na", "column", "is", "not", "created", ".", "Examples", ":", "---------", ">>>", "df", "=", "pd", ".", "DataFrame", "(", "{", "col1", ":", "[", "1", "np", ".", "NaN", "3", "]", "col2", ":", "[", "5", "2", "2", "]", "}", ")", ">>>", "df", "col1", "col2", "0", "1", "5", "1", "nan", "2", "2", "3", "2", ">>>", "fix_missing", "(", "df", "df", "[", "col1", "]", "col1", "{}", ")", ">>>", "df", "col1", "col2", "col1_na", "0", "1", "5", "False", "1", "2", "2", "True", "2", "3", "2", "False", ">>>", "df", "=", "pd", ".", "DataFrame", "(", "{", "col1", ":", "[", "1", "np", ".", "NaN", "3", "]", "col2", ":", "[", "5", "2", "2", "]", "}", ")", ">>>", "df", "col1", "col2", "0", "1", "5", "1", "nan", "2", "2", "3", "2", ">>>", "fix_missing", "(", "df", "df", "[", "col2", "]", "col2", "{}", ")", ">>>", "df", "col1", "col2", "0", "1", "5", "1", "nan", "2", "2", "3", "2", ">>>", "df", "=", "pd", ".", "DataFrame", "(", "{", "col1", ":", "[", "1", "np", ".", "NaN", "3", "]", "col2", ":", "[", "5", "2", "2", "]", "}", ")", ">>>", "df", "col1", "col2", "0", "1", "5", "1", "nan", "2", "2", "3", "2", ">>>", "fix_missing", "(", "df", "df", "[", "col1", "]", "col1", "{", "col1", ":", "500", "}", ")", ">>>", "df", "col1", "col2", "col1_na", "0", "1", "5", "False", "1", "500", "2", "True", "2", "3", "2", "False" ]
python
train
31.741379
uw-it-aca/uw-restclients
restclients/uwnetid/subscription.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/uwnetid/subscription.py#L16-L31
def get_email_forwarding(netid): """ Return a restclients.models.uwnetid.UwEmailForwarding object on the given uwnetid """ subscriptions = get_netid_subscriptions(netid, Subscription.SUBS_CODE_U_FORWARDING) for subscription in subscriptions: if subscription.subscription_code == Subscription.SUBS_CODE_U_FORWARDING: return_obj = UwEmailForwarding() if subscription.data_value: return_obj.fwd = subscription.data_value return_obj.permitted = subscription.permitted return_obj.status = subscription.status_name return return_obj return None
[ "def", "get_email_forwarding", "(", "netid", ")", ":", "subscriptions", "=", "get_netid_subscriptions", "(", "netid", ",", "Subscription", ".", "SUBS_CODE_U_FORWARDING", ")", "for", "subscription", "in", "subscriptions", ":", "if", "subscription", ".", "subscription_code", "==", "Subscription", ".", "SUBS_CODE_U_FORWARDING", ":", "return_obj", "=", "UwEmailForwarding", "(", ")", "if", "subscription", ".", "data_value", ":", "return_obj", ".", "fwd", "=", "subscription", ".", "data_value", "return_obj", ".", "permitted", "=", "subscription", ".", "permitted", "return_obj", ".", "status", "=", "subscription", ".", "status_name", "return", "return_obj", "return", "None" ]
Return a restclients.models.uwnetid.UwEmailForwarding object on the given uwnetid
[ "Return", "a", "restclients", ".", "models", ".", "uwnetid", ".", "UwEmailForwarding", "object", "on", "the", "given", "uwnetid" ]
python
train
39.75
chaoss/grimoirelab-perceval
perceval/backends/core/mbox.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/mbox.py#L238-L247
def _copy_mbox(self, mbox): """Copy the contents of a mbox to a temporary file""" tmp_path = tempfile.mktemp(prefix='perceval_') with mbox.container as f_in: with open(tmp_path, mode='wb') as f_out: for l in f_in: f_out.write(l) return tmp_path
[ "def", "_copy_mbox", "(", "self", ",", "mbox", ")", ":", "tmp_path", "=", "tempfile", ".", "mktemp", "(", "prefix", "=", "'perceval_'", ")", "with", "mbox", ".", "container", "as", "f_in", ":", "with", "open", "(", "tmp_path", ",", "mode", "=", "'wb'", ")", "as", "f_out", ":", "for", "l", "in", "f_in", ":", "f_out", ".", "write", "(", "l", ")", "return", "tmp_path" ]
Copy the contents of a mbox to a temporary file
[ "Copy", "the", "contents", "of", "a", "mbox", "to", "a", "temporary", "file" ]
python
test
31.7
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L1709-L1769
def invalid_example_number(region_code): """Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in. """ if not _is_valid_region_code(region_code): return None # We start off with a valid fixed-line number since every country # supports this. Alternatively we could start with a different number # type, since fixed-line numbers typically have a wide breadth of valid # number lengths and we may have to make it very short before we get an # invalid number. metadata = PhoneMetadata.metadata_for_region(region_code.upper()) desc = _number_desc_by_type(metadata, PhoneNumberType.FIXED_LINE) if desc is None or desc.example_number is None: # This shouldn't happen; we have a test for this. return None # pragma no cover example_number = desc.example_number # Try and make the number invalid. We do this by changing the length. We # try reducing the length of the number, since currently no region has a # number that is the same length as MIN_LENGTH_FOR_NSN. This is probably # quicker than making the number longer, which is another # alternative. We could also use the possible number pattern to extract # the possible lengths of the number to make this faster, but this # method is only for unit-testing so simplicity is preferred to # performance. We don't want to return a number that can't be parsed, # so we check the number is long enough. We try all possible lengths # because phone number plans often have overlapping prefixes so the # number 123456 might be valid as a fixed-line number, and 12345 as a # mobile number. It would be faster to loop in a different order, but we # prefer numbers that look closer to real numbers (and it gives us a # variety of different lengths for the resulting phone numbers - # otherwise they would all be MIN_LENGTH_FOR_NSN digits long.) phone_number_length = len(example_number) - 1 while phone_number_length >= _MIN_LENGTH_FOR_NSN: number_to_try = example_number[:phone_number_length] try: possibly_valid_number = parse(number_to_try, region_code) if not is_valid_number(possibly_valid_number): return possibly_valid_number except NumberParseException: # pragma no cover # Shouldn't happen: we have already checked the length, we know # example numbers have only valid digits, and we know the region # code is fine. pass phone_number_length -= 1 # We have a test to check that this doesn't happen for any of our # supported regions. return None
[ "def", "invalid_example_number", "(", "region_code", ")", ":", "if", "not", "_is_valid_region_code", "(", "region_code", ")", ":", "return", "None", "# We start off with a valid fixed-line number since every country", "# supports this. Alternatively we could start with a different number", "# type, since fixed-line numbers typically have a wide breadth of valid", "# number lengths and we may have to make it very short before we get an", "# invalid number.", "metadata", "=", "PhoneMetadata", ".", "metadata_for_region", "(", "region_code", ".", "upper", "(", ")", ")", "desc", "=", "_number_desc_by_type", "(", "metadata", ",", "PhoneNumberType", ".", "FIXED_LINE", ")", "if", "desc", "is", "None", "or", "desc", ".", "example_number", "is", "None", ":", "# This shouldn't happen; we have a test for this.", "return", "None", "# pragma no cover", "example_number", "=", "desc", ".", "example_number", "# Try and make the number invalid. We do this by changing the length. We", "# try reducing the length of the number, since currently no region has a", "# number that is the same length as MIN_LENGTH_FOR_NSN. This is probably", "# quicker than making the number longer, which is another", "# alternative. We could also use the possible number pattern to extract", "# the possible lengths of the number to make this faster, but this", "# method is only for unit-testing so simplicity is preferred to", "# performance. We don't want to return a number that can't be parsed,", "# so we check the number is long enough. We try all possible lengths", "# because phone number plans often have overlapping prefixes so the", "# number 123456 might be valid as a fixed-line number, and 12345 as a", "# mobile number. It would be faster to loop in a different order, but we", "# prefer numbers that look closer to real numbers (and it gives us a", "# variety of different lengths for the resulting phone numbers -", "# otherwise they would all be MIN_LENGTH_FOR_NSN digits long.)", "phone_number_length", "=", "len", "(", "example_number", ")", "-", "1", "while", "phone_number_length", ">=", "_MIN_LENGTH_FOR_NSN", ":", "number_to_try", "=", "example_number", "[", ":", "phone_number_length", "]", "try", ":", "possibly_valid_number", "=", "parse", "(", "number_to_try", ",", "region_code", ")", "if", "not", "is_valid_number", "(", "possibly_valid_number", ")", ":", "return", "possibly_valid_number", "except", "NumberParseException", ":", "# pragma no cover", "# Shouldn't happen: we have already checked the length, we know", "# example numbers have only valid digits, and we know the region", "# code is fine.", "pass", "phone_number_length", "-=", "1", "# We have a test to check that this doesn't happen for any of our", "# supported regions.", "return", "None" ]
Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in.
[ "Gets", "an", "invalid", "number", "for", "the", "specified", "region", "." ]
python
train
52.245902
pavoni/pyvera
pyvera/__init__.py
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/__init__.py#L137-L154
def get_device_by_name(self, device_name): """Search the list of connected devices by name. device_name param is the string name of the device """ # Find the device for the vera device name we are interested in found_device = None for device in self.get_devices(): if device.name == device_name: found_device = device # found the first (and should be only) one so we will finish break if found_device is None: logger.debug('Did not find device with {}'.format(device_name)) return found_device
[ "def", "get_device_by_name", "(", "self", ",", "device_name", ")", ":", "# Find the device for the vera device name we are interested in", "found_device", "=", "None", "for", "device", "in", "self", ".", "get_devices", "(", ")", ":", "if", "device", ".", "name", "==", "device_name", ":", "found_device", "=", "device", "# found the first (and should be only) one so we will finish", "break", "if", "found_device", "is", "None", ":", "logger", ".", "debug", "(", "'Did not find device with {}'", ".", "format", "(", "device_name", ")", ")", "return", "found_device" ]
Search the list of connected devices by name. device_name param is the string name of the device
[ "Search", "the", "list", "of", "connected", "devices", "by", "name", "." ]
python
train
33.888889
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L229-L234
def jsonarrtrim(self, name, path, start, stop): """ Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop`` """ return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
[ "def", "jsonarrtrim", "(", "self", ",", "name", ",", "path", ",", "start", ",", "stop", ")", ":", "return", "self", ".", "execute_command", "(", "'JSON.ARRTRIM'", ",", "name", ",", "str_path", "(", "path", ")", ",", "start", ",", "stop", ")" ]
Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop``
[ "Trim", "the", "array", "JSON", "value", "under", "path", "at", "key", "name", "to", "the", "inclusive", "range", "given", "by", "start", "and", "stop" ]
python
train
47
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L346-L350
def kpl_set_on_mask(self, address, group, mask): """Get the status of a KPL button.""" addr = Address(address) device = self.plm.devices[addr.id] device.states[group].set_on_mask(mask)
[ "def", "kpl_set_on_mask", "(", "self", ",", "address", ",", "group", ",", "mask", ")", ":", "addr", "=", "Address", "(", "address", ")", "device", "=", "self", ".", "plm", ".", "devices", "[", "addr", ".", "id", "]", "device", ".", "states", "[", "group", "]", ".", "set_on_mask", "(", "mask", ")" ]
Get the status of a KPL button.
[ "Get", "the", "status", "of", "a", "KPL", "button", "." ]
python
train
42.4
pyviz/holoviews
holoviews/plotting/bokeh/path.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/path.py#L250-L267
def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ plot_method = properties.pop('plot_method', None) properties = mpl_to_bokeh(properties) data = dict(properties, **mapping) if self._has_holes: plot_method = 'multi_polygons' elif plot_method is None: plot_method = self._plot_methods.get('single') renderer = getattr(plot, plot_method)(**data) if self.colorbar: for k, v in list(self.handles.items()): if not k.endswith('color_mapper'): continue self._draw_colorbar(plot, v, k[:-12]) return renderer, renderer.glyph
[ "def", "_init_glyph", "(", "self", ",", "plot", ",", "mapping", ",", "properties", ")", ":", "plot_method", "=", "properties", ".", "pop", "(", "'plot_method'", ",", "None", ")", "properties", "=", "mpl_to_bokeh", "(", "properties", ")", "data", "=", "dict", "(", "properties", ",", "*", "*", "mapping", ")", "if", "self", ".", "_has_holes", ":", "plot_method", "=", "'multi_polygons'", "elif", "plot_method", "is", "None", ":", "plot_method", "=", "self", ".", "_plot_methods", ".", "get", "(", "'single'", ")", "renderer", "=", "getattr", "(", "plot", ",", "plot_method", ")", "(", "*", "*", "data", ")", "if", "self", ".", "colorbar", ":", "for", "k", ",", "v", "in", "list", "(", "self", ".", "handles", ".", "items", "(", ")", ")", ":", "if", "not", "k", ".", "endswith", "(", "'color_mapper'", ")", ":", "continue", "self", ".", "_draw_colorbar", "(", "plot", ",", "v", ",", "k", "[", ":", "-", "12", "]", ")", "return", "renderer", ",", "renderer", ".", "glyph" ]
Returns a Bokeh glyph object.
[ "Returns", "a", "Bokeh", "glyph", "object", "." ]
python
train
39.5
tipsi/aiozk
aiozk/protocol/primitives.py
https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/primitives.py#L91-L108
def parse(cls, buff, offset): """ Given a buffer and offset, returns the parsed value and new offset. Parses the ``size_primitive`` first to determine how many more bytes to consume to extract the value. """ size, offset = cls.size_primitive.parse(buff, offset) if size == -1: return None, offset var_struct = struct.Struct("!%ds" % size) value = var_struct.unpack_from(buff, offset)[0] value = cls.parse_value(value) offset += var_struct.size return value, offset
[ "def", "parse", "(", "cls", ",", "buff", ",", "offset", ")", ":", "size", ",", "offset", "=", "cls", ".", "size_primitive", ".", "parse", "(", "buff", ",", "offset", ")", "if", "size", "==", "-", "1", ":", "return", "None", ",", "offset", "var_struct", "=", "struct", ".", "Struct", "(", "\"!%ds\"", "%", "size", ")", "value", "=", "var_struct", ".", "unpack_from", "(", "buff", ",", "offset", ")", "[", "0", "]", "value", "=", "cls", ".", "parse_value", "(", "value", ")", "offset", "+=", "var_struct", ".", "size", "return", "value", ",", "offset" ]
Given a buffer and offset, returns the parsed value and new offset. Parses the ``size_primitive`` first to determine how many more bytes to consume to extract the value.
[ "Given", "a", "buffer", "and", "offset", "returns", "the", "parsed", "value", "and", "new", "offset", "." ]
python
train
31.055556
crackinglandia/pype32
pype32/pype32.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L838-L847
def isPe32(self): """ Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE32: return True return False
[ "def", "isPe32", "(", "self", ")", ":", "if", "self", ".", "ntHeaders", ".", "optionalHeader", ".", "magic", ".", "value", "==", "consts", ".", "PE32", ":", "return", "True", "return", "False" ]
Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}.
[ "Determines", "if", "the", "current", "L", "{", "PE", "}", "instance", "is", "a", "PE32", "file", "." ]
python
train
34.1
dmort27/panphon
panphon/permissive.py
https://github.com/dmort27/panphon/blob/17eaa482e3edb211f3a8138137d76e4b9246d201/panphon/permissive.py#L125-L142
def fts_match(self, fts_mask, segment): """Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment` """ fts_mask = set(fts_mask) fts_seg = self.fts(segment) if fts_seg: return fts_seg <= fts_mask else: return None
[ "def", "fts_match", "(", "self", ",", "fts_mask", ",", "segment", ")", ":", "fts_mask", "=", "set", "(", "fts_mask", ")", "fts_seg", "=", "self", ".", "fts", "(", "segment", ")", "if", "fts_seg", ":", "return", "fts_seg", "<=", "fts_mask", "else", ":", "return", "None" ]
Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment`
[ "Evaluates", "whether", "a", "set", "of", "features", "match", "a", "segment", "(", "are", "a", "subset", "of", "that", "segment", "s", "features", ")" ]
python
train
37.555556
pantsbuild/pants
src/python/pants/build_graph/build_graph.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_graph.py#L658-L680
def sort_targets(targets): """ :API: public :return: the targets that `targets` depend on sorted from most dependent to least. """ roots, inverted_deps = invert_dependencies(targets) ordered = [] visited = set() def topological_sort(target): if target not in visited: visited.add(target) if target in inverted_deps: for dep in inverted_deps[target]: topological_sort(dep) ordered.append(target) for root in roots: topological_sort(root) return ordered
[ "def", "sort_targets", "(", "targets", ")", ":", "roots", ",", "inverted_deps", "=", "invert_dependencies", "(", "targets", ")", "ordered", "=", "[", "]", "visited", "=", "set", "(", ")", "def", "topological_sort", "(", "target", ")", ":", "if", "target", "not", "in", "visited", ":", "visited", ".", "add", "(", "target", ")", "if", "target", "in", "inverted_deps", ":", "for", "dep", "in", "inverted_deps", "[", "target", "]", ":", "topological_sort", "(", "dep", ")", "ordered", ".", "append", "(", "target", ")", "for", "root", "in", "roots", ":", "topological_sort", "(", "root", ")", "return", "ordered" ]
:API: public :return: the targets that `targets` depend on sorted from most dependent to least.
[ ":", "API", ":", "public" ]
python
train
21.652174
SiLab-Bonn/pyBAR
pybar/run_manager.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L580-L602
def run_primlist(self, primlist, skip_remaining=False): '''Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ''' runlist = self.open_primlist(primlist) for index, run in enumerate(runlist): logging.info('Progressing with run %i out of %i...', index + 1, len(runlist)) join = self.run_run(run, use_thread=True) status = join() if skip_remaining and not status == run_status.finished: logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status) break
[ "def", "run_primlist", "(", "self", ",", "primlist", ",", "skip_remaining", "=", "False", ")", ":", "runlist", "=", "self", ".", "open_primlist", "(", "primlist", ")", "for", "index", ",", "run", "in", "enumerate", "(", "runlist", ")", ":", "logging", ".", "info", "(", "'Progressing with run %i out of %i...'", ",", "index", "+", "1", ",", "len", "(", "runlist", ")", ")", "join", "=", "self", ".", "run_run", "(", "run", ",", "use_thread", "=", "True", ")", "status", "=", "join", "(", ")", "if", "skip_remaining", "and", "not", "status", "==", "run_status", ".", "finished", ":", "logging", ".", "error", "(", "'Exited run %i with status %s: Skipping all remaining runs.'", ",", "run", ".", "run_number", ",", "status", ")", "break" ]
Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value>
[ "Runs", "runs", "from", "a", "primlist", "." ]
python
train
45.043478
johnnoone/json-spec
src/jsonspec/operations/bases.py
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/operations/bases.py#L28-L45
def check(self, pointer, expected, raise_onerror=False): """Check if value exists into object. :param pointer: the path to search in :param expected: the expected value :param raise_onerror: should raise on error? :return: boolean """ obj = self.document for token in Pointer(pointer): try: obj = token.extract(obj, bypass_ref=True) except ExtractError as error: if raise_onerror: raise Error(*error.args) logger.exception(error) return False return obj == expected
[ "def", "check", "(", "self", ",", "pointer", ",", "expected", ",", "raise_onerror", "=", "False", ")", ":", "obj", "=", "self", ".", "document", "for", "token", "in", "Pointer", "(", "pointer", ")", ":", "try", ":", "obj", "=", "token", ".", "extract", "(", "obj", ",", "bypass_ref", "=", "True", ")", "except", "ExtractError", "as", "error", ":", "if", "raise_onerror", ":", "raise", "Error", "(", "*", "error", ".", "args", ")", "logger", ".", "exception", "(", "error", ")", "return", "False", "return", "obj", "==", "expected" ]
Check if value exists into object. :param pointer: the path to search in :param expected: the expected value :param raise_onerror: should raise on error? :return: boolean
[ "Check", "if", "value", "exists", "into", "object", "." ]
python
train
34.944444
Capitains/MyCapytain
MyCapytain/resources/texts/remote/cts.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/remote/cts.py#L262-L271
def lastId(self): """ Children passage :rtype: str :returns: First children of the graph. Shortcut to self.graph.children[0] """ if self._last is False: # Request the next urn self._last = self.childIds[-1] return self._last
[ "def", "lastId", "(", "self", ")", ":", "if", "self", ".", "_last", "is", "False", ":", "# Request the next urn", "self", ".", "_last", "=", "self", ".", "childIds", "[", "-", "1", "]", "return", "self", ".", "_last" ]
Children passage :rtype: str :returns: First children of the graph. Shortcut to self.graph.children[0]
[ "Children", "passage" ]
python
train
28.8
GoogleCloudPlatform/datastore-ndb-python
ndb/context.py
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/context.py#L908-L928
def call_on_commit(self, callback): """Call a callback upon successful commit of a transaction. If not in a transaction, the callback is called immediately. In a transaction, multiple callbacks may be registered and will be called once the transaction commits, in the order in which they were registered. If the transaction fails, the callbacks will not be called. If the callback raises an exception, it bubbles up normally. This means: If the callback is called immediately, any exception it raises will bubble up immediately. If the call is postponed until commit, remaining callbacks will be skipped and the exception will bubble up through the transaction() call. (However, the transaction is already committed at that point.) """ if not self.in_transaction(): callback() else: self._on_commit_queue.append(callback)
[ "def", "call_on_commit", "(", "self", ",", "callback", ")", ":", "if", "not", "self", ".", "in_transaction", "(", ")", ":", "callback", "(", ")", "else", ":", "self", ".", "_on_commit_queue", ".", "append", "(", "callback", ")" ]
Call a callback upon successful commit of a transaction. If not in a transaction, the callback is called immediately. In a transaction, multiple callbacks may be registered and will be called once the transaction commits, in the order in which they were registered. If the transaction fails, the callbacks will not be called. If the callback raises an exception, it bubbles up normally. This means: If the callback is called immediately, any exception it raises will bubble up immediately. If the call is postponed until commit, remaining callbacks will be skipped and the exception will bubble up through the transaction() call. (However, the transaction is already committed at that point.)
[ "Call", "a", "callback", "upon", "successful", "commit", "of", "a", "transaction", "." ]
python
train
41.904762
pycampers/zproc
zproc/exceptions.py
https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/exceptions.py#L63-L83
def signal_to_exception(sig: signal.Signals) -> SignalException: """ Convert a ``signal.Signals`` to a ``SignalException``. This allows for natural, pythonic signal handing with the use of try-except blocks. .. code-block:: python import signal import zproc zproc.signal_to_exception(signals.SIGTERM) try: ... except zproc.SignalException as e: print("encountered:", e) finally: zproc.exception_to_signal(signals.SIGTERM) """ signal.signal(sig, _sig_exc_handler) return SignalException(sig)
[ "def", "signal_to_exception", "(", "sig", ":", "signal", ".", "Signals", ")", "->", "SignalException", ":", "signal", ".", "signal", "(", "sig", ",", "_sig_exc_handler", ")", "return", "SignalException", "(", "sig", ")" ]
Convert a ``signal.Signals`` to a ``SignalException``. This allows for natural, pythonic signal handing with the use of try-except blocks. .. code-block:: python import signal import zproc zproc.signal_to_exception(signals.SIGTERM) try: ... except zproc.SignalException as e: print("encountered:", e) finally: zproc.exception_to_signal(signals.SIGTERM)
[ "Convert", "a", "signal", ".", "Signals", "to", "a", "SignalException", "." ]
python
train
27.904762
jgorset/facepy
facepy/graph_api.py
https://github.com/jgorset/facepy/blob/1be3ee21389fb2db543927a2f4ffa949faec4242/facepy/graph_api.py#L209-L341
def _query(self, method, path, data=None, page=False, retry=0): """ Fetch an object from the Graph API and parse the output, returning a tuple where the first item is the object yielded by the Graph API and the second is the URL for the next page of results, or ``None`` if results have been exhausted. :param method: A string describing the HTTP method. :param path: A string describing the object in the Graph API. :param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests). :param page: A boolean describing whether to return an iterator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. """ if(data): data = dict( (k.replace('_sqbro_', '['), v) for k, v in data.items()) data = dict( (k.replace('_sqbrc_', ']'), v) for k, v in data.items()) data = dict( (k.replace('__', ':'), v) for k, v in data.items()) data = data or {} def load(method, url, data): for key in data: value = data[key] if isinstance(value, (list, dict, set)): data[key] = json.dumps(value) try: if method in ['GET', 'DELETE']: response = self.session.request( method, url, params=data, allow_redirects=True, verify=self.verify_ssl_certificate, timeout=self.timeout ) if method in ['POST', 'PUT']: files = {} for key in data: if hasattr(data[key], 'read'): files[key] = data[key] for key in files: data.pop(key) response = self.session.request( method, url, data=data, files=files, verify=self.verify_ssl_certificate, timeout=self.timeout ) if 500 <= response.status_code < 600: # Facebook 5XX errors usually come with helpful messages # as a JSON object describing the problem with the request. # If this is the case, an error will be raised and we just # need to re-raise it. This is most likely to happen # with the Ads API. # This will raise an exception if a JSON-like error object # comes in the response. self._parse(response.content) # If Facebook does not provide any JSON-formatted error # but just a plain-text, useless error, we'll just inform # about a Facebook Internal errror occurred. raise FacebookError( 'Internal Facebook error occurred', response.status_code ) except requests.RequestException as exception: raise HTTPError(exception) result = self._parse(response.content) if isinstance(result, dict): result['headers'] = response.headers try: next_url = result['paging']['next'] except (KeyError, TypeError): next_url = None return result, next_url def load_with_retry(method, url, data): remaining_retries = retry while True: try: return load(method, url, data) except FacepyError as e: log.warn("Exception on %s: %s, retries remaining: %s", url, e, remaining_retries, ) if remaining_retries > 0: remaining_retries -= 1 else: raise def paginate(method, url, data): while url: result, url = load_with_retry(method, url, data) # Reset pagination parameters. for key in ['offset', 'until', 'since']: if key in data: del data[key] yield result # Convert option lists to comma-separated values. for key in data: if isinstance(data[key], (list, set, tuple)) and all([isinstance(item, six.string_types) for item in data[key]]): data[key] = ','.join(data[key]) # Support absolute paths too if not path.startswith('/'): if six.PY2: path = '/' + six.text_type(path.decode('utf-8')) else: path = '/' + path url = self._get_url(path) if self.oauth_token: data['access_token'] = self.oauth_token if self.appsecret and self.oauth_token: data['appsecret_proof'] = self._generate_appsecret_proof() if page: return paginate(method, url, data) else: return load_with_retry(method, url, data)[0]
[ "def", "_query", "(", "self", ",", "method", ",", "path", ",", "data", "=", "None", ",", "page", "=", "False", ",", "retry", "=", "0", ")", ":", "if", "(", "data", ")", ":", "data", "=", "dict", "(", "(", "k", ".", "replace", "(", "'_sqbro_'", ",", "'['", ")", ",", "v", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ")", "data", "=", "dict", "(", "(", "k", ".", "replace", "(", "'_sqbrc_'", ",", "']'", ")", ",", "v", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ")", "data", "=", "dict", "(", "(", "k", ".", "replace", "(", "'__'", ",", "':'", ")", ",", "v", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ")", "data", "=", "data", "or", "{", "}", "def", "load", "(", "method", ",", "url", ",", "data", ")", ":", "for", "key", "in", "data", ":", "value", "=", "data", "[", "key", "]", "if", "isinstance", "(", "value", ",", "(", "list", ",", "dict", ",", "set", ")", ")", ":", "data", "[", "key", "]", "=", "json", ".", "dumps", "(", "value", ")", "try", ":", "if", "method", "in", "[", "'GET'", ",", "'DELETE'", "]", ":", "response", "=", "self", ".", "session", ".", "request", "(", "method", ",", "url", ",", "params", "=", "data", ",", "allow_redirects", "=", "True", ",", "verify", "=", "self", ".", "verify_ssl_certificate", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "method", "in", "[", "'POST'", ",", "'PUT'", "]", ":", "files", "=", "{", "}", "for", "key", "in", "data", ":", "if", "hasattr", "(", "data", "[", "key", "]", ",", "'read'", ")", ":", "files", "[", "key", "]", "=", "data", "[", "key", "]", "for", "key", "in", "files", ":", "data", ".", "pop", "(", "key", ")", "response", "=", "self", ".", "session", ".", "request", "(", "method", ",", "url", ",", "data", "=", "data", ",", "files", "=", "files", ",", "verify", "=", "self", ".", "verify_ssl_certificate", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "500", "<=", "response", ".", "status_code", "<", "600", ":", "# Facebook 5XX errors usually come with helpful messages", "# as a JSON object describing the problem with the request.", "# If this is the case, an error will be raised and we just", "# need to re-raise it. This is most likely to happen", "# with the Ads API.", "# This will raise an exception if a JSON-like error object", "# comes in the response.", "self", ".", "_parse", "(", "response", ".", "content", ")", "# If Facebook does not provide any JSON-formatted error", "# but just a plain-text, useless error, we'll just inform", "# about a Facebook Internal errror occurred.", "raise", "FacebookError", "(", "'Internal Facebook error occurred'", ",", "response", ".", "status_code", ")", "except", "requests", ".", "RequestException", "as", "exception", ":", "raise", "HTTPError", "(", "exception", ")", "result", "=", "self", ".", "_parse", "(", "response", ".", "content", ")", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "result", "[", "'headers'", "]", "=", "response", ".", "headers", "try", ":", "next_url", "=", "result", "[", "'paging'", "]", "[", "'next'", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "next_url", "=", "None", "return", "result", ",", "next_url", "def", "load_with_retry", "(", "method", ",", "url", ",", "data", ")", ":", "remaining_retries", "=", "retry", "while", "True", ":", "try", ":", "return", "load", "(", "method", ",", "url", ",", "data", ")", "except", "FacepyError", "as", "e", ":", "log", ".", "warn", "(", "\"Exception on %s: %s, retries remaining: %s\"", ",", "url", ",", "e", ",", "remaining_retries", ",", ")", "if", "remaining_retries", ">", "0", ":", "remaining_retries", "-=", "1", "else", ":", "raise", "def", "paginate", "(", "method", ",", "url", ",", "data", ")", ":", "while", "url", ":", "result", ",", "url", "=", "load_with_retry", "(", "method", ",", "url", ",", "data", ")", "# Reset pagination parameters.", "for", "key", "in", "[", "'offset'", ",", "'until'", ",", "'since'", "]", ":", "if", "key", "in", "data", ":", "del", "data", "[", "key", "]", "yield", "result", "# Convert option lists to comma-separated values.", "for", "key", "in", "data", ":", "if", "isinstance", "(", "data", "[", "key", "]", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", "and", "all", "(", "[", "isinstance", "(", "item", ",", "six", ".", "string_types", ")", "for", "item", "in", "data", "[", "key", "]", "]", ")", ":", "data", "[", "key", "]", "=", "','", ".", "join", "(", "data", "[", "key", "]", ")", "# Support absolute paths too", "if", "not", "path", ".", "startswith", "(", "'/'", ")", ":", "if", "six", ".", "PY2", ":", "path", "=", "'/'", "+", "six", ".", "text_type", "(", "path", ".", "decode", "(", "'utf-8'", ")", ")", "else", ":", "path", "=", "'/'", "+", "path", "url", "=", "self", ".", "_get_url", "(", "path", ")", "if", "self", ".", "oauth_token", ":", "data", "[", "'access_token'", "]", "=", "self", ".", "oauth_token", "if", "self", ".", "appsecret", "and", "self", ".", "oauth_token", ":", "data", "[", "'appsecret_proof'", "]", "=", "self", ".", "_generate_appsecret_proof", "(", ")", "if", "page", ":", "return", "paginate", "(", "method", ",", "url", ",", "data", ")", "else", ":", "return", "load_with_retry", "(", "method", ",", "url", ",", "data", ")", "[", "0", "]" ]
Fetch an object from the Graph API and parse the output, returning a tuple where the first item is the object yielded by the Graph API and the second is the URL for the next page of results, or ``None`` if results have been exhausted. :param method: A string describing the HTTP method. :param path: A string describing the object in the Graph API. :param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests). :param page: A boolean describing whether to return an iterator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried.
[ "Fetch", "an", "object", "from", "the", "Graph", "API", "and", "parse", "the", "output", "returning", "a", "tuple", "where", "the", "first", "item", "is", "the", "object", "yielded", "by", "the", "Graph", "API", "and", "the", "second", "is", "the", "URL", "for", "the", "next", "page", "of", "results", "or", "None", "if", "results", "have", "been", "exhausted", "." ]
python
train
38.969925
gc3-uzh-ch/elasticluster
elasticluster/providers/ec2_boto.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/ec2_boto.py#L144-L154
def to_vars_dict(self): """ Return local state which is relevant for the cluster setup process. """ return { 'aws_access_key_id': self._access_key, 'aws_secret_access_key': self._secret_key, 'aws_region': self._region_name, 'aws_vpc_name': (self._vpc or ''), 'aws_vpc_id': (self._vpc_id or ''), }
[ "def", "to_vars_dict", "(", "self", ")", ":", "return", "{", "'aws_access_key_id'", ":", "self", ".", "_access_key", ",", "'aws_secret_access_key'", ":", "self", ".", "_secret_key", ",", "'aws_region'", ":", "self", ".", "_region_name", ",", "'aws_vpc_name'", ":", "(", "self", ".", "_vpc", "or", "''", ")", ",", "'aws_vpc_id'", ":", "(", "self", ".", "_vpc_id", "or", "''", ")", ",", "}" ]
Return local state which is relevant for the cluster setup process.
[ "Return", "local", "state", "which", "is", "relevant", "for", "the", "cluster", "setup", "process", "." ]
python
train
38.727273
ibis-project/ibis
ibis/clickhouse/operations.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/clickhouse/operations.py#L214-L219
def _sign(translator, expr): """Workaround for missing sign function""" op = expr.op() arg, = op.args arg_ = translator.translate(arg) return 'intDivOrZero({0}, abs({0}))'.format(arg_)
[ "def", "_sign", "(", "translator", ",", "expr", ")", ":", "op", "=", "expr", ".", "op", "(", ")", "arg", ",", "=", "op", ".", "args", "arg_", "=", "translator", ".", "translate", "(", "arg", ")", "return", "'intDivOrZero({0}, abs({0}))'", ".", "format", "(", "arg_", ")" ]
Workaround for missing sign function
[ "Workaround", "for", "missing", "sign", "function" ]
python
train
33.166667
swistakm/graceful
src/graceful/authentication.py
https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/authentication.py#L77-L81
def get_user( self, identified_with, identifier, req, resp, resource, uri_kwargs ): """Return default user object.""" return self.user
[ "def", "get_user", "(", "self", ",", "identified_with", ",", "identifier", ",", "req", ",", "resp", ",", "resource", ",", "uri_kwargs", ")", ":", "return", "self", ".", "user" ]
Return default user object.
[ "Return", "default", "user", "object", "." ]
python
train
31.6
twilio/twilio-python
twilio/base/page.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/base/page.py#L51-L61
def process_response(self, response): """ Load a JSON response. :param Response response: The HTTP response. :return dict: The JSON-loaded content. """ if response.status_code != 200: raise TwilioException('Unable to fetch page', response) return json.loads(response.text)
[ "def", "process_response", "(", "self", ",", "response", ")", ":", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "TwilioException", "(", "'Unable to fetch page'", ",", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Load a JSON response. :param Response response: The HTTP response. :return dict: The JSON-loaded content.
[ "Load", "a", "JSON", "response", "." ]
python
train
30.181818
senaite/senaite.core
bika/lims/browser/dashboard/dashboard.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/dashboard/dashboard.py#L129-L157
def get_dashboard_panels_visibility_by_section(section_name): """ Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples. """ registry_info = get_dashboard_registry_record() if section_name not in registry_info: # Registry hasn't been set, do it at least for this section registry_info = \ setup_dashboard_panels_visibility_registry(section_name) pairs = registry_info.get(section_name) pairs = get_strings(pairs) if pairs is None: # In the registry, but with None value? setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) pairs = pairs.split(',') if len(pairs) == 0 or len(pairs) % 2 != 0: # Non-valid or malformed value setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) result = [ (pairs[i], pairs[i + 1]) for i in range(len(pairs)) if i % 2 == 0] return result
[ "def", "get_dashboard_panels_visibility_by_section", "(", "section_name", ")", ":", "registry_info", "=", "get_dashboard_registry_record", "(", ")", "if", "section_name", "not", "in", "registry_info", ":", "# Registry hasn't been set, do it at least for this section", "registry_info", "=", "setup_dashboard_panels_visibility_registry", "(", "section_name", ")", "pairs", "=", "registry_info", ".", "get", "(", "section_name", ")", "pairs", "=", "get_strings", "(", "pairs", ")", "if", "pairs", "is", "None", ":", "# In the registry, but with None value?", "setup_dashboard_panels_visibility_registry", "(", "section_name", ")", "return", "get_dashboard_panels_visibility_by_section", "(", "section_name", ")", "pairs", "=", "pairs", ".", "split", "(", "','", ")", "if", "len", "(", "pairs", ")", "==", "0", "or", "len", "(", "pairs", ")", "%", "2", "!=", "0", ":", "# Non-valid or malformed value", "setup_dashboard_panels_visibility_registry", "(", "section_name", ")", "return", "get_dashboard_panels_visibility_by_section", "(", "section_name", ")", "result", "=", "[", "(", "pairs", "[", "i", "]", ",", "pairs", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "len", "(", "pairs", ")", ")", "if", "i", "%", "2", "==", "0", "]", "return", "result" ]
Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples.
[ "Return", "a", "list", "of", "pairs", "as", "values", "that", "represents", "the", "role", "-", "permission", "view", "relation", "for", "the", "panel", "section", "passed", "in", ".", ":", "param", "section_name", ":", "the", "panels", "section", "id", ".", ":", "return", ":", "a", "list", "of", "tuples", "." ]
python
train
39.793103
googleapis/google-cloud-python
datastore/google/cloud/datastore/key.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/key.py#L629-L656
def _to_legacy_path(dict_path): """Convert a tuple of ints and strings in a legacy "Path". .. note: This assumes, but does not verify, that each entry in ``dict_path`` is valid (i.e. doesn't have more than one key out of "name" / "id"). :type dict_path: lsit :param dict_path: The "structured" path for a key, i.e. it is a list of dictionaries, each of which has "kind" and one of "name" / "id" as keys. :rtype: :class:`._app_engine_key_pb2.Path` :returns: The legacy path corresponding to ``dict_path``. """ elements = [] for part in dict_path: element_kwargs = {"type": part["kind"]} if "id" in part: element_kwargs["id"] = part["id"] elif "name" in part: element_kwargs["name"] = part["name"] element = _app_engine_key_pb2.Path.Element(**element_kwargs) elements.append(element) return _app_engine_key_pb2.Path(element=elements)
[ "def", "_to_legacy_path", "(", "dict_path", ")", ":", "elements", "=", "[", "]", "for", "part", "in", "dict_path", ":", "element_kwargs", "=", "{", "\"type\"", ":", "part", "[", "\"kind\"", "]", "}", "if", "\"id\"", "in", "part", ":", "element_kwargs", "[", "\"id\"", "]", "=", "part", "[", "\"id\"", "]", "elif", "\"name\"", "in", "part", ":", "element_kwargs", "[", "\"name\"", "]", "=", "part", "[", "\"name\"", "]", "element", "=", "_app_engine_key_pb2", ".", "Path", ".", "Element", "(", "*", "*", "element_kwargs", ")", "elements", ".", "append", "(", "element", ")", "return", "_app_engine_key_pb2", ".", "Path", "(", "element", "=", "elements", ")" ]
Convert a tuple of ints and strings in a legacy "Path". .. note: This assumes, but does not verify, that each entry in ``dict_path`` is valid (i.e. doesn't have more than one key out of "name" / "id"). :type dict_path: lsit :param dict_path: The "structured" path for a key, i.e. it is a list of dictionaries, each of which has "kind" and one of "name" / "id" as keys. :rtype: :class:`._app_engine_key_pb2.Path` :returns: The legacy path corresponding to ``dict_path``.
[ "Convert", "a", "tuple", "of", "ints", "and", "strings", "in", "a", "legacy", "Path", "." ]
python
train
35.035714
jleclanche/fireplace
fireplace/game.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/game.py#L153-L158
def trigger(self, source, actions, event_args): """ Perform actions as a result of an event listener (TRIGGER) """ type = BlockType.TRIGGER return self.action_block(source, actions, type, event_args=event_args)
[ "def", "trigger", "(", "self", ",", "source", ",", "actions", ",", "event_args", ")", ":", "type", "=", "BlockType", ".", "TRIGGER", "return", "self", ".", "action_block", "(", "source", ",", "actions", ",", "type", ",", "event_args", "=", "event_args", ")" ]
Perform actions as a result of an event listener (TRIGGER)
[ "Perform", "actions", "as", "a", "result", "of", "an", "event", "listener", "(", "TRIGGER", ")" ]
python
train
35.833333
sander76/aio-powerview-api
aiopvapi/helpers/powerview_util.py
https://github.com/sander76/aio-powerview-api/blob/08b6ac747aba9de19842359a981a7ff1292f5a6c/aiopvapi/helpers/powerview_util.py#L32-L40
async def create_scene(self, scene_name, room_id) -> Scene: """Create a scene and returns the scene object. :raises PvApiError when something is wrong with the hub. """ _raw = await self._scenes_entry_point.create_scene(room_id, scene_name) result = Scene(_raw, self.request) self.scenes.append(result) return result
[ "async", "def", "create_scene", "(", "self", ",", "scene_name", ",", "room_id", ")", "->", "Scene", ":", "_raw", "=", "await", "self", ".", "_scenes_entry_point", ".", "create_scene", "(", "room_id", ",", "scene_name", ")", "result", "=", "Scene", "(", "_raw", ",", "self", ".", "request", ")", "self", ".", "scenes", ".", "append", "(", "result", ")", "return", "result" ]
Create a scene and returns the scene object. :raises PvApiError when something is wrong with the hub.
[ "Create", "a", "scene", "and", "returns", "the", "scene", "object", "." ]
python
train
40.555556
umich-brcf-bioinf/Jacquard
jacquard/utils/vcf.py
https://github.com/umich-brcf-bioinf/Jacquard/blob/83dd61dd2b5e4110468493beec7bc121e6cb3cd1/jacquard/utils/vcf.py#L313-L326
def _join_info_fields(self): """Updates info attribute from info dict.""" if self.info_dict: info_fields = [] if len(self.info_dict) > 1: self.info_dict.pop(".", None) for field, value in self.info_dict.items(): if field == value: info_fields.append(value) else: info_fields.append("=".join([field, value])) self.info = ";".join(info_fields) else: self.info = "."
[ "def", "_join_info_fields", "(", "self", ")", ":", "if", "self", ".", "info_dict", ":", "info_fields", "=", "[", "]", "if", "len", "(", "self", ".", "info_dict", ")", ">", "1", ":", "self", ".", "info_dict", ".", "pop", "(", "\".\"", ",", "None", ")", "for", "field", ",", "value", "in", "self", ".", "info_dict", ".", "items", "(", ")", ":", "if", "field", "==", "value", ":", "info_fields", ".", "append", "(", "value", ")", "else", ":", "info_fields", ".", "append", "(", "\"=\"", ".", "join", "(", "[", "field", ",", "value", "]", ")", ")", "self", ".", "info", "=", "\";\"", ".", "join", "(", "info_fields", ")", "else", ":", "self", ".", "info", "=", "\".\"" ]
Updates info attribute from info dict.
[ "Updates", "info", "attribute", "from", "info", "dict", "." ]
python
test
37.285714
oauthlib/oauthlib
oauthlib/oauth2/rfc6749/tokens.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/tokens.py#L300-L340
def create_token(self, request, refresh_token=False, **kwargs): """ Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token: """ if "save_token" in kwargs: warnings.warn("`save_token` has been deprecated, it was not called internally." "If you do, call `request_validator.save_token()` instead.", DeprecationWarning) if callable(self.expires_in): expires_in = self.expires_in(request) else: expires_in = self.expires_in request.expires_in = expires_in token = { 'access_token': self.token_generator(request), 'expires_in': expires_in, 'token_type': 'Bearer', } # If provided, include - this is optional in some cases https://tools.ietf.org/html/rfc6749#section-3.3 but # there is currently no mechanism to coordinate issuing a token for only a subset of the requested scopes so # all tokens issued are for the entire set of requested scopes. if request.scopes is not None: token['scope'] = ' '.join(request.scopes) if refresh_token: if (request.refresh_token and not self.request_validator.rotate_refresh_token(request)): token['refresh_token'] = request.refresh_token else: token['refresh_token'] = self.refresh_token_generator(request) token.update(request.extra_credentials or {}) return OAuth2Token(token)
[ "def", "create_token", "(", "self", ",", "request", ",", "refresh_token", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "\"save_token\"", "in", "kwargs", ":", "warnings", ".", "warn", "(", "\"`save_token` has been deprecated, it was not called internally.\"", "\"If you do, call `request_validator.save_token()` instead.\"", ",", "DeprecationWarning", ")", "if", "callable", "(", "self", ".", "expires_in", ")", ":", "expires_in", "=", "self", ".", "expires_in", "(", "request", ")", "else", ":", "expires_in", "=", "self", ".", "expires_in", "request", ".", "expires_in", "=", "expires_in", "token", "=", "{", "'access_token'", ":", "self", ".", "token_generator", "(", "request", ")", ",", "'expires_in'", ":", "expires_in", ",", "'token_type'", ":", "'Bearer'", ",", "}", "# If provided, include - this is optional in some cases https://tools.ietf.org/html/rfc6749#section-3.3 but", "# there is currently no mechanism to coordinate issuing a token for only a subset of the requested scopes so", "# all tokens issued are for the entire set of requested scopes.", "if", "request", ".", "scopes", "is", "not", "None", ":", "token", "[", "'scope'", "]", "=", "' '", ".", "join", "(", "request", ".", "scopes", ")", "if", "refresh_token", ":", "if", "(", "request", ".", "refresh_token", "and", "not", "self", ".", "request_validator", ".", "rotate_refresh_token", "(", "request", ")", ")", ":", "token", "[", "'refresh_token'", "]", "=", "request", ".", "refresh_token", "else", ":", "token", "[", "'refresh_token'", "]", "=", "self", ".", "refresh_token_generator", "(", "request", ")", "token", ".", "update", "(", "request", ".", "extra_credentials", "or", "{", "}", ")", "return", "OAuth2Token", "(", "token", ")" ]
Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token:
[ "Create", "a", "BearerToken", "by", "default", "without", "refresh", "token", "." ]
python
train
39.853659
pantsbuild/pants
src/python/pants/build_graph/bundle_mixin.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/bundle_mixin.py#L58-L80
def symlink_bundles(self, app, bundle_dir): """For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. """ for bundle_counter, bundle in enumerate(app.bundles): count = 0 for path, relpath in bundle.filemap.items(): bundle_path = os.path.join(bundle_dir, relpath) count += 1 if os.path.exists(bundle_path): continue if os.path.isfile(path): safe_mkdir(os.path.dirname(bundle_path)) os.symlink(path, bundle_path) elif os.path.isdir(path): safe_mkdir(bundle_path) if count == 0: raise TargetDefinitionException(app.target, 'Bundle index {} of "bundles" field ' 'does not match any files.'.format(bundle_counter))
[ "def", "symlink_bundles", "(", "self", ",", "app", ",", "bundle_dir", ")", ":", "for", "bundle_counter", ",", "bundle", "in", "enumerate", "(", "app", ".", "bundles", ")", ":", "count", "=", "0", "for", "path", ",", "relpath", "in", "bundle", ".", "filemap", ".", "items", "(", ")", ":", "bundle_path", "=", "os", ".", "path", ".", "join", "(", "bundle_dir", ",", "relpath", ")", "count", "+=", "1", "if", "os", ".", "path", ".", "exists", "(", "bundle_path", ")", ":", "continue", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "safe_mkdir", "(", "os", ".", "path", ".", "dirname", "(", "bundle_path", ")", ")", "os", ".", "symlink", "(", "path", ",", "bundle_path", ")", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "safe_mkdir", "(", "bundle_path", ")", "if", "count", "==", "0", ":", "raise", "TargetDefinitionException", "(", "app", ".", "target", ",", "'Bundle index {} of \"bundles\" field '", "'does not match any files.'", ".", "format", "(", "bundle_counter", ")", ")" ]
For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle.
[ "For", "each", "bundle", "in", "the", "given", "app", "symlinks", "relevant", "matched", "paths", "." ]
python
train
37.478261
spyder-ide/spyder
spyder/utils/syntaxhighlighters.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/syntaxhighlighters.py#L1220-L1230
def highlightBlock(self, text): """ Actually highlight the block""" # Note that an undefined blockstate is equal to -1, so the first block # will have the correct behaviour of starting at 0. if self._allow_highlight: start = self.previousBlockState() + 1 end = start + len(text) for i, (fmt, letter) in enumerate(self._charlist[start:end]): self.setFormat(i, 1, fmt) self.setCurrentBlockState(end) self.highlight_spaces(text)
[ "def", "highlightBlock", "(", "self", ",", "text", ")", ":", "# Note that an undefined blockstate is equal to -1, so the first block\r", "# will have the correct behaviour of starting at 0.\r", "if", "self", ".", "_allow_highlight", ":", "start", "=", "self", ".", "previousBlockState", "(", ")", "+", "1", "end", "=", "start", "+", "len", "(", "text", ")", "for", "i", ",", "(", "fmt", ",", "letter", ")", "in", "enumerate", "(", "self", ".", "_charlist", "[", "start", ":", "end", "]", ")", ":", "self", ".", "setFormat", "(", "i", ",", "1", ",", "fmt", ")", "self", ".", "setCurrentBlockState", "(", "end", ")", "self", ".", "highlight_spaces", "(", "text", ")" ]
Actually highlight the block
[ "Actually", "highlight", "the", "block" ]
python
train
48.454545
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L216-L236
def point_distance(point1, point2): """ calculate the distance between two points on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance """ lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c) * 1000
[ "def", "point_distance", "(", "point1", ",", "point2", ")", ":", "lon1", "=", "point1", "[", "'coordinates'", "]", "[", "0", "]", "lat1", "=", "point1", "[", "'coordinates'", "]", "[", "1", "]", "lon2", "=", "point2", "[", "'coordinates'", "]", "[", "0", "]", "lat2", "=", "point2", "[", "'coordinates'", "]", "[", "1", "]", "deg_lat", "=", "number2radius", "(", "lat2", "-", "lat1", ")", "deg_lon", "=", "number2radius", "(", "lon2", "-", "lon1", ")", "a", "=", "math", ".", "pow", "(", "math", ".", "sin", "(", "deg_lat", "/", "2", ")", ",", "2", ")", "+", "math", ".", "cos", "(", "number2radius", "(", "lat1", ")", ")", "*", "math", ".", "cos", "(", "number2radius", "(", "lat2", ")", ")", "*", "math", ".", "pow", "(", "math", ".", "sin", "(", "deg_lon", "/", "2", ")", ",", "2", ")", "c", "=", "2", "*", "math", ".", "atan2", "(", "math", ".", "sqrt", "(", "a", ")", ",", "math", ".", "sqrt", "(", "1", "-", "a", ")", ")", "return", "(", "6371", "*", "c", ")", "*", "1000" ]
calculate the distance between two points on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance
[ "calculate", "the", "distance", "between", "two", "points", "on", "the", "sphere", "like", "google", "map", "reference", "http", ":", "//", "www", ".", "movable", "-", "type", ".", "co", ".", "uk", "/", "scripts", "/", "latlong", ".", "html" ]
python
train
36.238095
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/buildconfigurations_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/buildconfigurations_api.py#L1431-L1459
def get_dependencies(self, id, **kwargs): """ Get the direct dependencies of the specified configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dependencies(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_dependencies_with_http_info(id, **kwargs) else: (data) = self.get_dependencies_with_http_info(id, **kwargs) return data
[ "def", "get_dependencies", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "get_dependencies_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "get_dependencies_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Get the direct dependencies of the specified configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dependencies(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread.
[ "Get", "the", "direct", "dependencies", "of", "the", "specified", "configuration", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "get_dependencies", "(", "id", "callback", "=", "callback_function", ")" ]
python
train
41.448276
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L680-L693
def searchForUsers(self, name, limit=10): """ Find and get user by his/her name :param name: Name of the user :param limit: The max. amount of users to fetch :return: :class:`models.User` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed """ params = {"search": name, "limit": limit} j = self.graphql_request(GraphQL(query=GraphQL.SEARCH_USER, params=params)) return [User._from_graphql(node) for node in j[name]["users"]["nodes"]]
[ "def", "searchForUsers", "(", "self", ",", "name", ",", "limit", "=", "10", ")", ":", "params", "=", "{", "\"search\"", ":", "name", ",", "\"limit\"", ":", "limit", "}", "j", "=", "self", ".", "graphql_request", "(", "GraphQL", "(", "query", "=", "GraphQL", ".", "SEARCH_USER", ",", "params", "=", "params", ")", ")", "return", "[", "User", ".", "_from_graphql", "(", "node", ")", "for", "node", "in", "j", "[", "name", "]", "[", "\"users\"", "]", "[", "\"nodes\"", "]", "]" ]
Find and get user by his/her name :param name: Name of the user :param limit: The max. amount of users to fetch :return: :class:`models.User` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed
[ "Find", "and", "get", "user", "by", "his", "/", "her", "name" ]
python
train
38.857143
zetaops/zengine
zengine/engine.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/engine.py#L608-L643
def _load_activity(self, activity): """ Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path. """ fpths = [] full_path = '' errors = [] paths = settings.ACTIVITY_MODULES_IMPORT_PATHS number_of_paths = len(paths) for index_no in range(number_of_paths): full_path = "%s.%s" % (paths[index_no], activity) for look4kls in (0, 1): try: self.current.log.info("try to load from %s[%s]" % (full_path, look4kls)) kls, cls_name, cls_method = self._import_object(full_path, look4kls) if cls_method: self.current.log.info("WILLCall %s(current).%s()" % (kls, cls_method)) self.wf_activities[activity] = lambda crnt: getattr(kls(crnt), cls_method)() else: self.wf_activities[activity] = kls return except (ImportError, AttributeError): fpths.append(full_path) errmsg = "{activity} not found under these paths:\n\n >>> {paths} \n\n" \ "Error Messages:\n {errors}" errors.append("\n========================================================>\n" "| PATH | %s" "\n========================================================>\n\n" "%s" % (full_path, traceback.format_exc())) assert index_no != number_of_paths - 1, errmsg.format(activity=activity, paths='\n >>> '.join( set(fpths)), errors='\n\n'.join(errors) ) except: self.current.log.exception("Cannot found the %s" % activity)
[ "def", "_load_activity", "(", "self", ",", "activity", ")", ":", "fpths", "=", "[", "]", "full_path", "=", "''", "errors", "=", "[", "]", "paths", "=", "settings", ".", "ACTIVITY_MODULES_IMPORT_PATHS", "number_of_paths", "=", "len", "(", "paths", ")", "for", "index_no", "in", "range", "(", "number_of_paths", ")", ":", "full_path", "=", "\"%s.%s\"", "%", "(", "paths", "[", "index_no", "]", ",", "activity", ")", "for", "look4kls", "in", "(", "0", ",", "1", ")", ":", "try", ":", "self", ".", "current", ".", "log", ".", "info", "(", "\"try to load from %s[%s]\"", "%", "(", "full_path", ",", "look4kls", ")", ")", "kls", ",", "cls_name", ",", "cls_method", "=", "self", ".", "_import_object", "(", "full_path", ",", "look4kls", ")", "if", "cls_method", ":", "self", ".", "current", ".", "log", ".", "info", "(", "\"WILLCall %s(current).%s()\"", "%", "(", "kls", ",", "cls_method", ")", ")", "self", ".", "wf_activities", "[", "activity", "]", "=", "lambda", "crnt", ":", "getattr", "(", "kls", "(", "crnt", ")", ",", "cls_method", ")", "(", ")", "else", ":", "self", ".", "wf_activities", "[", "activity", "]", "=", "kls", "return", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "fpths", ".", "append", "(", "full_path", ")", "errmsg", "=", "\"{activity} not found under these paths:\\n\\n >>> {paths} \\n\\n\"", "\"Error Messages:\\n {errors}\"", "errors", ".", "append", "(", "\"\\n========================================================>\\n\"", "\"| PATH | %s\"", "\"\\n========================================================>\\n\\n\"", "\"%s\"", "%", "(", "full_path", ",", "traceback", ".", "format_exc", "(", ")", ")", ")", "assert", "index_no", "!=", "number_of_paths", "-", "1", ",", "errmsg", ".", "format", "(", "activity", "=", "activity", ",", "paths", "=", "'\\n >>> '", ".", "join", "(", "set", "(", "fpths", ")", ")", ",", "errors", "=", "'\\n\\n'", ".", "join", "(", "errors", ")", ")", "except", ":", "self", ".", "current", ".", "log", ".", "exception", "(", "\"Cannot found the %s\"", "%", "activity", ")" ]
Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path.
[ "Iterates", "trough", "the", "all", "enabled", "~zengine", ".", "settings", ".", "ACTIVITY_MODULES_IMPORT_PATHS", "to", "find", "the", "given", "path", "." ]
python
train
58.888889
Qiskit/qiskit-api-py
IBMQuantumExperience/IBMQuantumExperience.py
https://github.com/Qiskit/qiskit-api-py/blob/2ab240110fb7e653254e44c4833f3643e8ae7f0f/IBMQuantumExperience/IBMQuantumExperience.py#L734-L769
def get_job(self, id_job, hub=None, group=None, project=None, access_token=None, user_id=None): """ Get the information about a job, by its id """ if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): respond = {} respond["status"] = 'Error' respond["error"] = "Not credentials valid" return respond if not id_job: respond = {} respond["status"] = 'Error' respond["error"] = "Job ID not specified" return respond url = get_job_url(self.config, hub, group, project) url += '/' + id_job job = self.req.get(url) if 'qasms' in job: for qasm in job['qasms']: if ('result' in qasm) and ('data' in qasm['result']): qasm['data'] = qasm['result']['data'] del qasm['result']['data'] for key in qasm['result']: qasm['data'][key] = qasm['result'][key] del qasm['result'] return job
[ "def", "get_job", "(", "self", ",", "id_job", ",", "hub", "=", "None", ",", "group", "=", "None", ",", "project", "=", "None", ",", "access_token", "=", "None", ",", "user_id", "=", "None", ")", ":", "if", "access_token", ":", "self", ".", "req", ".", "credential", ".", "set_token", "(", "access_token", ")", "if", "user_id", ":", "self", ".", "req", ".", "credential", ".", "set_user_id", "(", "user_id", ")", "if", "not", "self", ".", "check_credentials", "(", ")", ":", "respond", "=", "{", "}", "respond", "[", "\"status\"", "]", "=", "'Error'", "respond", "[", "\"error\"", "]", "=", "\"Not credentials valid\"", "return", "respond", "if", "not", "id_job", ":", "respond", "=", "{", "}", "respond", "[", "\"status\"", "]", "=", "'Error'", "respond", "[", "\"error\"", "]", "=", "\"Job ID not specified\"", "return", "respond", "url", "=", "get_job_url", "(", "self", ".", "config", ",", "hub", ",", "group", ",", "project", ")", "url", "+=", "'/'", "+", "id_job", "job", "=", "self", ".", "req", ".", "get", "(", "url", ")", "if", "'qasms'", "in", "job", ":", "for", "qasm", "in", "job", "[", "'qasms'", "]", ":", "if", "(", "'result'", "in", "qasm", ")", "and", "(", "'data'", "in", "qasm", "[", "'result'", "]", ")", ":", "qasm", "[", "'data'", "]", "=", "qasm", "[", "'result'", "]", "[", "'data'", "]", "del", "qasm", "[", "'result'", "]", "[", "'data'", "]", "for", "key", "in", "qasm", "[", "'result'", "]", ":", "qasm", "[", "'data'", "]", "[", "key", "]", "=", "qasm", "[", "'result'", "]", "[", "key", "]", "del", "qasm", "[", "'result'", "]", "return", "job" ]
Get the information about a job, by its id
[ "Get", "the", "information", "about", "a", "job", "by", "its", "id" ]
python
train
33.25
dag/flask-zodb
flask_zodb.py
https://github.com/dag/flask-zodb/blob/c5451aba28cd5b303c71654b7ef2b62edb08afe8/flask_zodb.py#L48-L57
def close_db(self, exception): """Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request.""" if self.is_connected: if exception is None and not transaction.isDoomed(): transaction.commit() else: transaction.abort() self.connection.close()
[ "def", "close_db", "(", "self", ",", "exception", ")", ":", "if", "self", ".", "is_connected", ":", "if", "exception", "is", "None", "and", "not", "transaction", ".", "isDoomed", "(", ")", ":", "transaction", ".", "commit", "(", ")", "else", ":", "transaction", ".", "abort", "(", ")", "self", ".", "connection", ".", "close", "(", ")" ]
Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request.
[ "Added", "as", "a", "~flask", ".", "Flask", ".", "teardown_request", "to", "applications", "to", "commit", "the", "transaction", "and", "disconnect", "ZODB", "if", "it", "was", "used", "during", "the", "request", "." ]
python
train
41.3
stanfordnlp/stanza
stanza/research/learner.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/learner.py#L153-L164
def load(self, infile): ''' Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model. ''' model = pickle.load(infile) self.__dict__.update(model.__dict__)
[ "def", "load", "(", "self", ",", "infile", ")", ":", "model", "=", "pickle", ".", "load", "(", "infile", ")", "self", ".", "__dict__", ".", "update", "(", "model", ".", "__dict__", ")" ]
Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model.
[ "Deserialize", "a", "model", "from", "a", "stored", "file", "." ]
python
train
34.333333
mila/pyoo
pyoo.py
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1776-L1786
def date_to_number(self, date): """ Converts a date or datetime instance to a corresponding float value. """ if isinstance(date, datetime.datetime): delta = date - self._null_date elif isinstance(date, datetime.date): delta = date - self._null_date.date() else: raise TypeError(date) return delta.days + delta.seconds / (24.0 * 60 * 60)
[ "def", "date_to_number", "(", "self", ",", "date", ")", ":", "if", "isinstance", "(", "date", ",", "datetime", ".", "datetime", ")", ":", "delta", "=", "date", "-", "self", ".", "_null_date", "elif", "isinstance", "(", "date", ",", "datetime", ".", "date", ")", ":", "delta", "=", "date", "-", "self", ".", "_null_date", ".", "date", "(", ")", "else", ":", "raise", "TypeError", "(", "date", ")", "return", "delta", ".", "days", "+", "delta", ".", "seconds", "/", "(", "24.0", "*", "60", "*", "60", ")" ]
Converts a date or datetime instance to a corresponding float value.
[ "Converts", "a", "date", "or", "datetime", "instance", "to", "a", "corresponding", "float", "value", "." ]
python
train
38
jam31118/vis
vis/layout.py
https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/layout.py#L59-L71
def get_text_position_and_inner_alignment(ax, pos, scale=default_text_relative_padding, with_transAxes_kwargs=True): """Return text position and its alignment in its bounding box. The returned position is given in Axes coordinate, as defined in matplotlib documentation on transformation. The returned alignment is given in dictionary, which can be put as a fontdict to text-relavent method. """ xy = get_text_position_in_ax_coord(ax,pos,scale=scale) alignment_fontdict = get_text_alignment(pos) if with_transAxes_kwargs: alignment_fontdict = {**alignment_fontdict, **{'transform':ax.transAxes}} return xy, alignment_fontdict
[ "def", "get_text_position_and_inner_alignment", "(", "ax", ",", "pos", ",", "scale", "=", "default_text_relative_padding", ",", "with_transAxes_kwargs", "=", "True", ")", ":", "xy", "=", "get_text_position_in_ax_coord", "(", "ax", ",", "pos", ",", "scale", "=", "scale", ")", "alignment_fontdict", "=", "get_text_alignment", "(", "pos", ")", "if", "with_transAxes_kwargs", ":", "alignment_fontdict", "=", "{", "*", "*", "alignment_fontdict", ",", "*", "*", "{", "'transform'", ":", "ax", ".", "transAxes", "}", "}", "return", "xy", ",", "alignment_fontdict" ]
Return text position and its alignment in its bounding box. The returned position is given in Axes coordinate, as defined in matplotlib documentation on transformation. The returned alignment is given in dictionary, which can be put as a fontdict to text-relavent method.
[ "Return", "text", "position", "and", "its", "alignment", "in", "its", "bounding", "box", ".", "The", "returned", "position", "is", "given", "in", "Axes", "coordinate", "as", "defined", "in", "matplotlib", "documentation", "on", "transformation", "." ]
python
train
50.692308
Telefonica/toolium
toolium/config_parser.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/config_parser.py#L75-L82
def update_properties(self, new_properties): """ Update config properties values Property name must be equal to 'Section_option' of config property :param new_properties: dict with new properties values """ [self._update_property_from_dict(section, option, new_properties) for section in self.sections() for option in self.options(section)]
[ "def", "update_properties", "(", "self", ",", "new_properties", ")", ":", "[", "self", ".", "_update_property_from_dict", "(", "section", ",", "option", ",", "new_properties", ")", "for", "section", "in", "self", ".", "sections", "(", ")", "for", "option", "in", "self", ".", "options", "(", "section", ")", "]" ]
Update config properties values Property name must be equal to 'Section_option' of config property :param new_properties: dict with new properties values
[ "Update", "config", "properties", "values", "Property", "name", "must", "be", "equal", "to", "Section_option", "of", "config", "property" ]
python
train
47.875
Azure/blobxfer
blobxfer/operations/upload.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/upload.py#L369-L397
def _process_transfer(self, ud, ase, offsets, data): # type: (Uploader, blobxfer.models.upload.Descriptor, # blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes) -> None """Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload """ # issue put range self._put_data(ud, ase, offsets, data) # accounting with self._transfer_lock: if ud.local_path.use_stdin: self._upload_bytes_total += offsets.num_bytes elif offsets.chunk_num == 0: self._upload_bytes_total += ase.size self._upload_bytes_sofar += offsets.num_bytes self._transfer_set.remove( blobxfer.operations.upload.Uploader.create_unique_transfer_id( ud.local_path, ase, offsets)) ud.complete_offset_upload(offsets.chunk_num) # add descriptor back to upload queue only for append blobs if ud.entity.mode == blobxfer.models.azure.StorageModes.Append: self._upload_queue.put(ud) # update progress bar self._update_progress_bar(stdin=ud.local_path.use_stdin)
[ "def", "_process_transfer", "(", "self", ",", "ud", ",", "ase", ",", "offsets", ",", "data", ")", ":", "# type: (Uploader, blobxfer.models.upload.Descriptor,", "# blobxfer.models.azure.StorageEntity,", "# blobxfer.models.upload.Offsets, bytes) -> None", "# issue put range", "self", ".", "_put_data", "(", "ud", ",", "ase", ",", "offsets", ",", "data", ")", "# accounting", "with", "self", ".", "_transfer_lock", ":", "if", "ud", ".", "local_path", ".", "use_stdin", ":", "self", ".", "_upload_bytes_total", "+=", "offsets", ".", "num_bytes", "elif", "offsets", ".", "chunk_num", "==", "0", ":", "self", ".", "_upload_bytes_total", "+=", "ase", ".", "size", "self", ".", "_upload_bytes_sofar", "+=", "offsets", ".", "num_bytes", "self", ".", "_transfer_set", ".", "remove", "(", "blobxfer", ".", "operations", ".", "upload", ".", "Uploader", ".", "create_unique_transfer_id", "(", "ud", ".", "local_path", ",", "ase", ",", "offsets", ")", ")", "ud", ".", "complete_offset_upload", "(", "offsets", ".", "chunk_num", ")", "# add descriptor back to upload queue only for append blobs", "if", "ud", ".", "entity", ".", "mode", "==", "blobxfer", ".", "models", ".", "azure", ".", "StorageModes", ".", "Append", ":", "self", ".", "_upload_queue", ".", "put", "(", "ud", ")", "# update progress bar", "self", ".", "_update_progress_bar", "(", "stdin", "=", "ud", ".", "local_path", ".", "use_stdin", ")" ]
Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload
[ "Process", "transfer", "instructions", ":", "param", "Uploader", "self", ":", "this", ":", "param", "blobxfer", ".", "models", ".", "upload", ".", "Descriptor", "ud", ":", "upload", "descriptor", ":", "param", "blobxfer", ".", "models", ".", "azure", ".", "StorageEntity", "ase", ":", "Storage", "entity", ":", "param", "blobxfer", ".", "models", ".", "upload", ".", "Offsets", "offsets", ":", "offsets", ":", "param", "bytes", "data", ":", "data", "to", "upload" ]
python
train
48.827586
mayfield/shellish
shellish/layout/table.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L207-L238
def attach_arguments(cls, parser, prefix='--', skip_formats=False, format_excludes=None, format_title=None, format_desc=None, skip_render=False, render_excludes=None, render_title=None, render_desc=None, skip_filters=False, filter_excludes=None, filter_title=None, filter_desc=None): """ Attach argparse arguments to an argparse parser/group with table options. These are renderer options and filtering options with the ability to turn off headers and footers. The return value is function that parses an argparse.Namespace object into keyword arguments for a layout.Table constructor. """ convs = [] if not skip_formats: attach = cls.attach_format_arguments convs.append(attach(parser, prefix, format_excludes, format_title, format_desc)) if not skip_render: attach = cls.attach_render_arguments convs.append(attach(parser, prefix, render_excludes, render_title, render_desc)) if not skip_filters: attach = cls.attach_filter_arguments convs.append(attach(parser, prefix, filter_excludes, filter_title, filter_desc)) def argparse_ns_to_table_opts(ns): options = {} for conv in convs: options.update(conv(ns)) return options return argparse_ns_to_table_opts
[ "def", "attach_arguments", "(", "cls", ",", "parser", ",", "prefix", "=", "'--'", ",", "skip_formats", "=", "False", ",", "format_excludes", "=", "None", ",", "format_title", "=", "None", ",", "format_desc", "=", "None", ",", "skip_render", "=", "False", ",", "render_excludes", "=", "None", ",", "render_title", "=", "None", ",", "render_desc", "=", "None", ",", "skip_filters", "=", "False", ",", "filter_excludes", "=", "None", ",", "filter_title", "=", "None", ",", "filter_desc", "=", "None", ")", ":", "convs", "=", "[", "]", "if", "not", "skip_formats", ":", "attach", "=", "cls", ".", "attach_format_arguments", "convs", ".", "append", "(", "attach", "(", "parser", ",", "prefix", ",", "format_excludes", ",", "format_title", ",", "format_desc", ")", ")", "if", "not", "skip_render", ":", "attach", "=", "cls", ".", "attach_render_arguments", "convs", ".", "append", "(", "attach", "(", "parser", ",", "prefix", ",", "render_excludes", ",", "render_title", ",", "render_desc", ")", ")", "if", "not", "skip_filters", ":", "attach", "=", "cls", ".", "attach_filter_arguments", "convs", ".", "append", "(", "attach", "(", "parser", ",", "prefix", ",", "filter_excludes", ",", "filter_title", ",", "filter_desc", ")", ")", "def", "argparse_ns_to_table_opts", "(", "ns", ")", ":", "options", "=", "{", "}", "for", "conv", "in", "convs", ":", "options", ".", "update", "(", "conv", "(", "ns", ")", ")", "return", "options", "return", "argparse_ns_to_table_opts" ]
Attach argparse arguments to an argparse parser/group with table options. These are renderer options and filtering options with the ability to turn off headers and footers. The return value is function that parses an argparse.Namespace object into keyword arguments for a layout.Table constructor.
[ "Attach", "argparse", "arguments", "to", "an", "argparse", "parser", "/", "group", "with", "table", "options", ".", "These", "are", "renderer", "options", "and", "filtering", "options", "with", "the", "ability", "to", "turn", "off", "headers", "and", "footers", ".", "The", "return", "value", "is", "function", "that", "parses", "an", "argparse", ".", "Namespace", "object", "into", "keyword", "arguments", "for", "a", "layout", ".", "Table", "constructor", "." ]
python
train
49.5625