repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
brian-rose/climlab
climlab/process/process.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/process/process.py#L642-L660
def lat_bounds(self): """Latitude of grid interfaces (degrees North) :getter: Returns the bounds of axis ``'lat'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lat'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislat = dom.axes['lat'].bounds except: pass return thislat except: raise ValueError('Can\'t resolve a lat axis.')
[ "def", "lat_bounds", "(", "self", ")", ":", "try", ":", "for", "domname", ",", "dom", "in", "self", ".", "domains", ".", "items", "(", ")", ":", "try", ":", "thislat", "=", "dom", ".", "axes", "[", "'lat'", "]", ".", "bounds", "except", ":", "pass", "return", "thislat", "except", ":", "raise", "ValueError", "(", "'Can\\'t resolve a lat axis.'", ")" ]
Latitude of grid interfaces (degrees North) :getter: Returns the bounds of axis ``'lat'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lat'`` axis can be found.
[ "Latitude", "of", "grid", "interfaces", "(", "degrees", "North", ")" ]
python
train
31.368421
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/batch_v2alpha1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/batch_v2alpha1_api.py#L755-L779
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_cron_job # noqa: E501 partially update the specified CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
[ "def", "patch_namespaced_cron_job", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "patch_namespaced_cron_job_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "patch_namespaced_cron_job_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
patch_namespaced_cron_job # noqa: E501 partially update the specified CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread.
[ "patch_namespaced_cron_job", "#", "noqa", ":", "E501" ]
python
train
60.64
woolfson-group/isambard
isambard/add_ons/filesystem.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/add_ons/filesystem.py#L489-L504
def current_codes_from_pdb(): """ Get list of all PDB codes currently listed in the PDB. Returns ------- pdb_codes : list(str) List of PDB codes (in lower case). """ url = 'http://www.rcsb.org/pdb/rest/getCurrent' r = requests.get(url) if r.status_code == 200: pdb_codes = [x.lower() for x in r.text.split('"') if len(x) == 4] else: print('Request for {0} failed with status code {1}'.format(url, r.status_code)) return return pdb_codes
[ "def", "current_codes_from_pdb", "(", ")", ":", "url", "=", "'http://www.rcsb.org/pdb/rest/getCurrent'", "r", "=", "requests", ".", "get", "(", "url", ")", "if", "r", ".", "status_code", "==", "200", ":", "pdb_codes", "=", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "r", ".", "text", ".", "split", "(", "'\"'", ")", "if", "len", "(", "x", ")", "==", "4", "]", "else", ":", "print", "(", "'Request for {0} failed with status code {1}'", ".", "format", "(", "url", ",", "r", ".", "status_code", ")", ")", "return", "return", "pdb_codes" ]
Get list of all PDB codes currently listed in the PDB. Returns ------- pdb_codes : list(str) List of PDB codes (in lower case).
[ "Get", "list", "of", "all", "PDB", "codes", "currently", "listed", "in", "the", "PDB", "." ]
python
train
30.875
mardix/Mocha
mocha/contrib/auth/decorators.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/decorators.py#L152-L166
def accepts_admin_roles(func): """ Decorator that accepts only admin roles :param func: :return: """ if inspect.isclass(func): apply_function_to_members(func, accepts_admin_roles) return func else: @functools.wraps(func) def decorator(*args, **kwargs): return accepts_roles(*ROLES_ADMIN)(func)(*args, **kwargs) return decorator
[ "def", "accepts_admin_roles", "(", "func", ")", ":", "if", "inspect", ".", "isclass", "(", "func", ")", ":", "apply_function_to_members", "(", "func", ",", "accepts_admin_roles", ")", "return", "func", "else", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "accepts_roles", "(", "*", "ROLES_ADMIN", ")", "(", "func", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorator" ]
Decorator that accepts only admin roles :param func: :return:
[ "Decorator", "that", "accepts", "only", "admin", "roles", ":", "param", "func", ":", ":", "return", ":" ]
python
train
26.266667
lesscpy/lesscpy
lesscpy/plib/identifier.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/plib/identifier.py#L146-L155
def raw(self, clean=False): """Raw identifier. args: clean (bool): clean name returns: str """ if clean: return ''.join(''.join(p) for p in self.parsed).replace('?', ' ') return '%'.join('%'.join(p) for p in self.parsed).strip().strip('%')
[ "def", "raw", "(", "self", ",", "clean", "=", "False", ")", ":", "if", "clean", ":", "return", "''", ".", "join", "(", "''", ".", "join", "(", "p", ")", "for", "p", "in", "self", ".", "parsed", ")", ".", "replace", "(", "'?'", ",", "' '", ")", "return", "'%'", ".", "join", "(", "'%'", ".", "join", "(", "p", ")", "for", "p", "in", "self", ".", "parsed", ")", ".", "strip", "(", ")", ".", "strip", "(", "'%'", ")" ]
Raw identifier. args: clean (bool): clean name returns: str
[ "Raw", "identifier", ".", "args", ":", "clean", "(", "bool", ")", ":", "clean", "name", "returns", ":", "str" ]
python
valid
31.4
hyperledger-archives/indy-anoncreds
anoncreds/protocol/issuer.py
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L134-L146
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \ Dict[ID, Claims]: """ Issue claims for the given users and schemas. :param allClaimRequest: a map of schema ID to a claim request containing prover ID and prover-generated values :return: The claims (both primary and non-revocation) """ res = {} for schemaId, claimReq in allClaimRequest.items(): res[schemaId] = await self.issueClaim(schemaId, claimReq) return res
[ "async", "def", "issueClaims", "(", "self", ",", "allClaimRequest", ":", "Dict", "[", "ID", ",", "ClaimRequest", "]", ")", "->", "Dict", "[", "ID", ",", "Claims", "]", ":", "res", "=", "{", "}", "for", "schemaId", ",", "claimReq", "in", "allClaimRequest", ".", "items", "(", ")", ":", "res", "[", "schemaId", "]", "=", "await", "self", ".", "issueClaim", "(", "schemaId", ",", "claimReq", ")", "return", "res" ]
Issue claims for the given users and schemas. :param allClaimRequest: a map of schema ID to a claim request containing prover ID and prover-generated values :return: The claims (both primary and non-revocation)
[ "Issue", "claims", "for", "the", "given", "users", "and", "schemas", "." ]
python
train
40.307692
gwastro/pycbc
pycbc/noise/gaussian.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/noise/gaussian.py#L75-L127
def noise_from_psd(length, delta_t, psd, seed=None): """ Create noise with a given psd. Return noise with a given psd. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- length : int The length of noise to generate in samples. delta_t : float The time step of the noise. psd : FrequencySeries The noise weighting to color the noise. seed : {0, int} The seed to generate the noise. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd. """ noise_ts = TimeSeries(zeros(length), delta_t=delta_t) if seed is None: seed = numpy.random.randint(2**32) randomness = lal.gsl_rng("ranlux", seed) N = int (1.0 / delta_t / psd.delta_f) n = N//2+1 stride = N//2 if n > len(psd): raise ValueError("PSD not compatible with requested delta_t") psd = (psd[0:n]).lal() psd.data.data[n-1] = 0 segment = TimeSeries(zeros(N), delta_t=delta_t).lal() length_generated = 0 SimNoise(segment, 0, psd, randomness) while (length_generated < length): if (length_generated + stride) < length: noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride] else: noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated] length_generated += stride SimNoise(segment, stride, psd, randomness) return noise_ts
[ "def", "noise_from_psd", "(", "length", ",", "delta_t", ",", "psd", ",", "seed", "=", "None", ")", ":", "noise_ts", "=", "TimeSeries", "(", "zeros", "(", "length", ")", ",", "delta_t", "=", "delta_t", ")", "if", "seed", "is", "None", ":", "seed", "=", "numpy", ".", "random", ".", "randint", "(", "2", "**", "32", ")", "randomness", "=", "lal", ".", "gsl_rng", "(", "\"ranlux\"", ",", "seed", ")", "N", "=", "int", "(", "1.0", "/", "delta_t", "/", "psd", ".", "delta_f", ")", "n", "=", "N", "//", "2", "+", "1", "stride", "=", "N", "//", "2", "if", "n", ">", "len", "(", "psd", ")", ":", "raise", "ValueError", "(", "\"PSD not compatible with requested delta_t\"", ")", "psd", "=", "(", "psd", "[", "0", ":", "n", "]", ")", ".", "lal", "(", ")", "psd", ".", "data", ".", "data", "[", "n", "-", "1", "]", "=", "0", "segment", "=", "TimeSeries", "(", "zeros", "(", "N", ")", ",", "delta_t", "=", "delta_t", ")", ".", "lal", "(", ")", "length_generated", "=", "0", "SimNoise", "(", "segment", ",", "0", ",", "psd", ",", "randomness", ")", "while", "(", "length_generated", "<", "length", ")", ":", "if", "(", "length_generated", "+", "stride", ")", "<", "length", ":", "noise_ts", ".", "data", "[", "length_generated", ":", "length_generated", "+", "stride", "]", "=", "segment", ".", "data", ".", "data", "[", "0", ":", "stride", "]", "else", ":", "noise_ts", ".", "data", "[", "length_generated", ":", "length", "]", "=", "segment", ".", "data", ".", "data", "[", "0", ":", "length", "-", "length_generated", "]", "length_generated", "+=", "stride", "SimNoise", "(", "segment", ",", "stride", ",", "psd", ",", "randomness", ")", "return", "noise_ts" ]
Create noise with a given psd. Return noise with a given psd. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- length : int The length of noise to generate in samples. delta_t : float The time step of the noise. psd : FrequencySeries The noise weighting to color the noise. seed : {0, int} The seed to generate the noise. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd.
[ "Create", "noise", "with", "a", "given", "psd", "." ]
python
train
28.207547
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L1043-L1126
def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif type(value) in [int, bool]: host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif type(value) == type(None): host_info[key] = '' else: # Remove non-processed complex types pass return host_info
[ "def", "get_host_info_dict_from_describe_dict", "(", "self", ",", "describe_dict", ")", ":", "# I really don't agree with prefixing everything with 'ec2'", "# because EC2, RDS and ElastiCache are different services.", "# I'm just following the pattern used until now to not break any", "# compatibility.", "host_info", "=", "{", "}", "for", "key", "in", "describe_dict", ":", "value", "=", "describe_dict", "[", "key", "]", "key", "=", "self", ".", "to_safe", "(", "'ec2_'", "+", "self", ".", "uncammelize", "(", "key", ")", ")", "# Handle complex types", "# Target: Memcached Cache Clusters", "if", "key", "==", "'ec2_configuration_endpoint'", "and", "value", ":", "host_info", "[", "'ec2_configuration_endpoint_address'", "]", "=", "value", "[", "'Address'", "]", "host_info", "[", "'ec2_configuration_endpoint_port'", "]", "=", "value", "[", "'Port'", "]", "# Target: Cache Nodes and Redis Cache Clusters (single node)", "if", "key", "==", "'ec2_endpoint'", "and", "value", ":", "host_info", "[", "'ec2_endpoint_address'", "]", "=", "value", "[", "'Address'", "]", "host_info", "[", "'ec2_endpoint_port'", "]", "=", "value", "[", "'Port'", "]", "# Target: Redis Replication Groups", "if", "key", "==", "'ec2_node_groups'", "and", "value", ":", "host_info", "[", "'ec2_endpoint_address'", "]", "=", "value", "[", "0", "]", "[", "'PrimaryEndpoint'", "]", "[", "'Address'", "]", "host_info", "[", "'ec2_endpoint_port'", "]", "=", "value", "[", "0", "]", "[", "'PrimaryEndpoint'", "]", "[", "'Port'", "]", "replica_count", "=", "0", "for", "node", "in", "value", "[", "0", "]", "[", "'NodeGroupMembers'", "]", ":", "if", "node", "[", "'CurrentRole'", "]", "==", "'primary'", ":", "host_info", "[", "'ec2_primary_cluster_address'", "]", "=", "node", "[", "'ReadEndpoint'", "]", "[", "'Address'", "]", "host_info", "[", "'ec2_primary_cluster_port'", "]", "=", "node", "[", "'ReadEndpoint'", "]", "[", "'Port'", "]", "host_info", "[", "'ec2_primary_cluster_id'", "]", "=", "node", "[", "'CacheClusterId'", "]", "elif", "node", "[", "'CurrentRole'", "]", "==", "'replica'", ":", "host_info", "[", "'ec2_replica_cluster_address_'", "+", "str", "(", "replica_count", ")", "]", "=", "node", "[", "'ReadEndpoint'", "]", "[", "'Address'", "]", "host_info", "[", "'ec2_replica_cluster_port_'", "+", "str", "(", "replica_count", ")", "]", "=", "node", "[", "'ReadEndpoint'", "]", "[", "'Port'", "]", "host_info", "[", "'ec2_replica_cluster_id_'", "+", "str", "(", "replica_count", ")", "]", "=", "node", "[", "'CacheClusterId'", "]", "replica_count", "+=", "1", "# Target: Redis Replication Groups", "if", "key", "==", "'ec2_member_clusters'", "and", "value", ":", "host_info", "[", "'ec2_member_clusters'", "]", "=", "','", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "value", "]", ")", "# Target: All Cache Clusters", "elif", "key", "==", "'ec2_cache_parameter_group'", ":", "host_info", "[", "\"ec2_cache_node_ids_to_reboot\"", "]", "=", "','", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "value", "[", "'CacheNodeIdsToReboot'", "]", "]", ")", "host_info", "[", "'ec2_cache_parameter_group_name'", "]", "=", "value", "[", "'CacheParameterGroupName'", "]", "host_info", "[", "'ec2_cache_parameter_apply_status'", "]", "=", "value", "[", "'ParameterApplyStatus'", "]", "# Target: Almost everything", "elif", "key", "==", "'ec2_security_groups'", ":", "# Skip if SecurityGroups is None", "# (it is possible to have the key defined but no value in it).", "if", "value", "is", "not", "None", ":", "sg_ids", "=", "[", "]", "for", "sg", "in", "value", ":", "sg_ids", ".", "append", "(", "sg", "[", "'SecurityGroupId'", "]", ")", "host_info", "[", "\"ec2_security_group_ids\"", "]", "=", "','", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "sg_ids", "]", ")", "# Target: Everything", "# Preserve booleans and integers", "elif", "type", "(", "value", ")", "in", "[", "int", ",", "bool", "]", ":", "host_info", "[", "key", "]", "=", "value", "# Target: Everything", "# Sanitize string values", "elif", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "host_info", "[", "key", "]", "=", "value", ".", "strip", "(", ")", "# Target: Everything", "# Replace None by an empty string", "elif", "type", "(", "value", ")", "==", "type", "(", "None", ")", ":", "host_info", "[", "key", "]", "=", "''", "else", ":", "# Remove non-processed complex types", "pass", "return", "host_info" ]
Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes.
[ "Parses", "the", "dictionary", "returned", "by", "the", "API", "call", "into", "a", "flat", "list", "of", "parameters", ".", "This", "method", "should", "be", "used", "only", "when", "describe", "is", "used", "directly", "because", "Boto", "doesn", "t", "provide", "specific", "classes", "." ]
python
train
47.5
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L23682-L23720
def move_to(self, location): """Changes the location of this medium. Some medium types may support changing the storage unit location by simply changing the value of the associated property. In this case the operation is performed immediately, and @a progress is returning a @c null reference. Otherwise on success there is a progress object returned, which signals progress and completion of the operation. This distinction is necessary because for some formats the operation is very fast, while for others it can be very slow (moving the image file by copying all data), and in the former case it'd be a waste of resources to create a progress object which will immediately signal completion. When setting a location for a medium which corresponds to a/several regular file(s) in the host's file system, the given file name may be either relative to the :py:func:`IVirtualBox.home_folder` VirtualBox home folder or absolute. Note that if the given location specification does not contain the file extension part then a proper default extension will be automatically appended by the implementation depending on the medium type. in location of type str New location. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`OleErrorNotimpl` The operation is not implemented yet. raises :class:`VBoxErrorNotSupported` Medium format does not support changing the location. """ if not isinstance(location, basestring): raise TypeError("location can only be an instance of type basestring") progress = self._call("moveTo", in_p=[location]) progress = IProgress(progress) return progress
[ "def", "move_to", "(", "self", ",", "location", ")", ":", "if", "not", "isinstance", "(", "location", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"location can only be an instance of type basestring\"", ")", "progress", "=", "self", ".", "_call", "(", "\"moveTo\"", ",", "in_p", "=", "[", "location", "]", ")", "progress", "=", "IProgress", "(", "progress", ")", "return", "progress" ]
Changes the location of this medium. Some medium types may support changing the storage unit location by simply changing the value of the associated property. In this case the operation is performed immediately, and @a progress is returning a @c null reference. Otherwise on success there is a progress object returned, which signals progress and completion of the operation. This distinction is necessary because for some formats the operation is very fast, while for others it can be very slow (moving the image file by copying all data), and in the former case it'd be a waste of resources to create a progress object which will immediately signal completion. When setting a location for a medium which corresponds to a/several regular file(s) in the host's file system, the given file name may be either relative to the :py:func:`IVirtualBox.home_folder` VirtualBox home folder or absolute. Note that if the given location specification does not contain the file extension part then a proper default extension will be automatically appended by the implementation depending on the medium type. in location of type str New location. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`OleErrorNotimpl` The operation is not implemented yet. raises :class:`VBoxErrorNotSupported` Medium format does not support changing the location.
[ "Changes", "the", "location", "of", "this", "medium", ".", "Some", "medium", "types", "may", "support", "changing", "the", "storage", "unit", "location", "by", "simply", "changing", "the", "value", "of", "the", "associated", "property", ".", "In", "this", "case", "the", "operation", "is", "performed", "immediately", "and", "@a", "progress", "is", "returning", "a", "@c", "null", "reference", ".", "Otherwise", "on", "success", "there", "is", "a", "progress", "object", "returned", "which", "signals", "progress", "and", "completion", "of", "the", "operation", ".", "This", "distinction", "is", "necessary", "because", "for", "some", "formats", "the", "operation", "is", "very", "fast", "while", "for", "others", "it", "can", "be", "very", "slow", "(", "moving", "the", "image", "file", "by", "copying", "all", "data", ")", "and", "in", "the", "former", "case", "it", "d", "be", "a", "waste", "of", "resources", "to", "create", "a", "progress", "object", "which", "will", "immediately", "signal", "completion", ".", "When", "setting", "a", "location", "for", "a", "medium", "which", "corresponds", "to", "a", "/", "several", "regular", "file", "(", "s", ")", "in", "the", "host", "s", "file", "system", "the", "given", "file", "name", "may", "be", "either", "relative", "to", "the", ":", "py", ":", "func", ":", "IVirtualBox", ".", "home_folder", "VirtualBox", "home", "folder", "or", "absolute", ".", "Note", "that", "if", "the", "given", "location", "specification", "does", "not", "contain", "the", "file", "extension", "part", "then", "a", "proper", "default", "extension", "will", "be", "automatically", "appended", "by", "the", "implementation", "depending", "on", "the", "medium", "type", "." ]
python
train
49.102564
google/textfsm
textfsm/parser.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L814-L859
def _ValidateFSM(self): """Checks state names and destinations for validity. Each destination state must exist, be a valid name and not be a reserved name. There must be a 'Start' state and if 'EOF' or 'End' states are specified, they must be empty. Returns: True if FSM is valid. Raises: TextFSMTemplateError: If any state definitions are invalid. """ # Must have 'Start' state. if 'Start' not in self.states: raise TextFSMTemplateError("Missing state 'Start'.") # 'End/EOF' state (if specified) must be empty. if self.states.get('End'): raise TextFSMTemplateError("Non-Empty 'End' state.") if self.states.get('EOF'): raise TextFSMTemplateError("Non-Empty 'EOF' state.") # Remove 'End' state. if 'End' in self.states: del self.states['End'] self.state_list.remove('End') # Ensure jump states are all valid. for state in self.states: for rule in self.states[state]: if rule.line_op == 'Error': continue if not rule.new_state or rule.new_state in ('End', 'EOF'): continue if rule.new_state not in self.states: raise TextFSMTemplateError( "State '%s' not found, referenced in state '%s'" % (rule.new_state, state)) return True
[ "def", "_ValidateFSM", "(", "self", ")", ":", "# Must have 'Start' state.", "if", "'Start'", "not", "in", "self", ".", "states", ":", "raise", "TextFSMTemplateError", "(", "\"Missing state 'Start'.\"", ")", "# 'End/EOF' state (if specified) must be empty.", "if", "self", ".", "states", ".", "get", "(", "'End'", ")", ":", "raise", "TextFSMTemplateError", "(", "\"Non-Empty 'End' state.\"", ")", "if", "self", ".", "states", ".", "get", "(", "'EOF'", ")", ":", "raise", "TextFSMTemplateError", "(", "\"Non-Empty 'EOF' state.\"", ")", "# Remove 'End' state.", "if", "'End'", "in", "self", ".", "states", ":", "del", "self", ".", "states", "[", "'End'", "]", "self", ".", "state_list", ".", "remove", "(", "'End'", ")", "# Ensure jump states are all valid.", "for", "state", "in", "self", ".", "states", ":", "for", "rule", "in", "self", ".", "states", "[", "state", "]", ":", "if", "rule", ".", "line_op", "==", "'Error'", ":", "continue", "if", "not", "rule", ".", "new_state", "or", "rule", ".", "new_state", "in", "(", "'End'", ",", "'EOF'", ")", ":", "continue", "if", "rule", ".", "new_state", "not", "in", "self", ".", "states", ":", "raise", "TextFSMTemplateError", "(", "\"State '%s' not found, referenced in state '%s'\"", "%", "(", "rule", ".", "new_state", ",", "state", ")", ")", "return", "True" ]
Checks state names and destinations for validity. Each destination state must exist, be a valid name and not be a reserved name. There must be a 'Start' state and if 'EOF' or 'End' states are specified, they must be empty. Returns: True if FSM is valid. Raises: TextFSMTemplateError: If any state definitions are invalid.
[ "Checks", "state", "names", "and", "destinations", "for", "validity", "." ]
python
train
28.086957
rbw/pysnow
pysnow/params_builder.py
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/params_builder.py#L70-L79
def display_value(self, value): """Sets `sysparm_display_value` :param value: Bool or 'all' """ if not (isinstance(value, bool) or value == 'all'): raise InvalidUsage("Display value can be of type bool or value 'all'") self._sysparms['sysparm_display_value'] = value
[ "def", "display_value", "(", "self", ",", "value", ")", ":", "if", "not", "(", "isinstance", "(", "value", ",", "bool", ")", "or", "value", "==", "'all'", ")", ":", "raise", "InvalidUsage", "(", "\"Display value can be of type bool or value 'all'\"", ")", "self", ".", "_sysparms", "[", "'sysparm_display_value'", "]", "=", "value" ]
Sets `sysparm_display_value` :param value: Bool or 'all'
[ "Sets", "sysparm_display_value" ]
python
train
31.3
hyperledger/indy-node
environment/vagrant/sandbox/DevelopmentEnvironment/common/indypool.py
https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/environment/vagrant/sandbox/DevelopmentEnvironment/common/indypool.py#L174-L242
def startIndyPool(**kwargs): '''Start the indy_pool docker container iff it is not already running. See <indy-sdk>/ci/indy-pool.dockerfile for details. Idempotent. Simply ensures that the indy_pool container is up and running.''' # TODO: Decide if we need a separate docker container for testing and one for # development. The indy_sdk tests setup and teardown "indy_pool" on # ports 9701-9708. Perhaps we need an "indy_dev_pool" on 9709-9716? I'm # not quite sure where all of our dependencies are on port 9701-9708. # If the test harness (mocks) are hardcoding 9701-9708, then an # indy_dev_pool on different ports will not work. print("Starting indy_pool ...") # Check if indy_pool is running if containerIsRunning("indy_pool"): print("... already running") exit(0) else: # If the container already exists and isn't running, force remove it and # readd it. This is brute force, but is sufficient and simple. container = getContainer("indy_pool") if container: container.remove(force=True) # Build and run indy_pool if it is not already running # Build the indy_pool image from the dockerfile in: # /vagrant/indy-sdk/ci/indy-pool.dockerfile # # In shell using docker cli: # cd /vagrant/indy-sdk # sudo docker build -f ci/indy-pool.dockerfile -t indy_pool . # # NOTE: https://jira.hyperledger.org/browse/IS-406 prevents indy_pool from # starting on the `rc` branch. Apply the patch in the Jira issue to # overcome this problem. try: # indy-sdk won't be in /vagrant if the indy-sdk is cloned to a directory outside # the Vagrant project. Regardless of where indy-sdk is cloned, it will be found # in /src/indy-sdk in the Vagrant VM. image = getImage(path="/src/indy-sdk", dockerfile="ci/indy-pool.dockerfile", tag="indy_pool") except TypeError as exc: image = getImage(path="/vagrant/indy-sdk", dockerfile="ci/indy-pool.dockerfile", tag="indy_pool") except: print("Failed to find indy-pool.dockerfile in /vagrant/indy-sdk or /src/indy-sdk") # Run a container using the image # # In shell using docker cli: # sudo docker run -itd -p 9701-9708:9701-9708 indy_pool # # NOTE: {'2222/tcp': 3333} is sufficient. A tuple of (address, port) if you # want to specify the host interface. container = runContainer(image, ports={ '9701/tcp': ('0.0.0.0', 9701), '9702/tcp': ('0.0.0.0', 9702), '9703/tcp': ('0.0.0.0', 9703), '9704/tcp': ('0.0.0.0', 9704), '9705/tcp': ('0.0.0.0', 9705), '9706/tcp': ('0.0.0.0', 9706), '9707/tcp': ('0.0.0.0', 9707), '9708/tcp': ('0.0.0.0', 9708) }, detach=True, name="indy_pool" ) print("...started")
[ "def", "startIndyPool", "(", "*", "*", "kwargs", ")", ":", "# TODO: Decide if we need a separate docker container for testing and one for", "# development. The indy_sdk tests setup and teardown \"indy_pool\" on", "# ports 9701-9708. Perhaps we need an \"indy_dev_pool\" on 9709-9716? I'm", "# not quite sure where all of our dependencies are on port 9701-9708.", "# If the test harness (mocks) are hardcoding 9701-9708, then an", "# indy_dev_pool on different ports will not work.", "print", "(", "\"Starting indy_pool ...\"", ")", "# Check if indy_pool is running", "if", "containerIsRunning", "(", "\"indy_pool\"", ")", ":", "print", "(", "\"... already running\"", ")", "exit", "(", "0", ")", "else", ":", "# If the container already exists and isn't running, force remove it and", "# readd it. This is brute force, but is sufficient and simple.", "container", "=", "getContainer", "(", "\"indy_pool\"", ")", "if", "container", ":", "container", ".", "remove", "(", "force", "=", "True", ")", "# Build and run indy_pool if it is not already running", "# Build the indy_pool image from the dockerfile in:", "# /vagrant/indy-sdk/ci/indy-pool.dockerfile", "# ", "# In shell using docker cli:", "# cd /vagrant/indy-sdk", "# sudo docker build -f ci/indy-pool.dockerfile -t indy_pool .", "#", "# NOTE: https://jira.hyperledger.org/browse/IS-406 prevents indy_pool from", "# starting on the `rc` branch. Apply the patch in the Jira issue to", "# overcome this problem.", "try", ":", "# indy-sdk won't be in /vagrant if the indy-sdk is cloned to a directory outside", "# the Vagrant project. Regardless of where indy-sdk is cloned, it will be found", "# in /src/indy-sdk in the Vagrant VM.", "image", "=", "getImage", "(", "path", "=", "\"/src/indy-sdk\"", ",", "dockerfile", "=", "\"ci/indy-pool.dockerfile\"", ",", "tag", "=", "\"indy_pool\"", ")", "except", "TypeError", "as", "exc", ":", "image", "=", "getImage", "(", "path", "=", "\"/vagrant/indy-sdk\"", ",", "dockerfile", "=", "\"ci/indy-pool.dockerfile\"", ",", "tag", "=", "\"indy_pool\"", ")", "except", ":", "print", "(", "\"Failed to find indy-pool.dockerfile in /vagrant/indy-sdk or /src/indy-sdk\"", ")", "# Run a container using the image", "#", "# In shell using docker cli:", "# sudo docker run -itd -p 9701-9708:9701-9708 indy_pool", "#", "# NOTE: {'2222/tcp': 3333} is sufficient. A tuple of (address, port) if you", "# want to specify the host interface. ", "container", "=", "runContainer", "(", "image", ",", "ports", "=", "{", "'9701/tcp'", ":", "(", "'0.0.0.0'", ",", "9701", ")", ",", "'9702/tcp'", ":", "(", "'0.0.0.0'", ",", "9702", ")", ",", "'9703/tcp'", ":", "(", "'0.0.0.0'", ",", "9703", ")", ",", "'9704/tcp'", ":", "(", "'0.0.0.0'", ",", "9704", ")", ",", "'9705/tcp'", ":", "(", "'0.0.0.0'", ",", "9705", ")", ",", "'9706/tcp'", ":", "(", "'0.0.0.0'", ",", "9706", ")", ",", "'9707/tcp'", ":", "(", "'0.0.0.0'", ",", "9707", ")", ",", "'9708/tcp'", ":", "(", "'0.0.0.0'", ",", "9708", ")", "}", ",", "detach", "=", "True", ",", "name", "=", "\"indy_pool\"", ")", "print", "(", "\"...started\"", ")" ]
Start the indy_pool docker container iff it is not already running. See <indy-sdk>/ci/indy-pool.dockerfile for details. Idempotent. Simply ensures that the indy_pool container is up and running.
[ "Start", "the", "indy_pool", "docker", "container", "iff", "it", "is", "not", "already", "running", ".", "See", "<indy", "-", "sdk", ">", "/", "ci", "/", "indy", "-", "pool", ".", "dockerfile", "for", "details", ".", "Idempotent", ".", "Simply", "ensures", "that", "the", "indy_pool", "container", "is", "up", "and", "running", "." ]
python
train
41.130435
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L23-L27
def get_domain_events(self, originator_id, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=True, page_size=None): """ Returns domain events for given entity ID. """
[ "def", "get_domain_events", "(", "self", ",", "originator_id", ",", "gt", "=", "None", ",", "gte", "=", "None", ",", "lt", "=", "None", ",", "lte", "=", "None", ",", "limit", "=", "None", ",", "is_ascending", "=", "True", ",", "page_size", "=", "None", ")", ":" ]
Returns domain events for given entity ID.
[ "Returns", "domain", "events", "for", "given", "entity", "ID", "." ]
python
train
45
mapnik/Cascadenik
cascadenik/compile.py
https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L104-L107
def url2fs(url): """ encode a URL to be safe as a filename """ uri, extension = posixpath.splitext(url) return safe64.dir(uri) + extension
[ "def", "url2fs", "(", "url", ")", ":", "uri", ",", "extension", "=", "posixpath", ".", "splitext", "(", "url", ")", "return", "safe64", ".", "dir", "(", "uri", ")", "+", "extension" ]
encode a URL to be safe as a filename
[ "encode", "a", "URL", "to", "be", "safe", "as", "a", "filename" ]
python
train
36.75
google/textfsm
textfsm/texttable.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L612-L620
def _SetTable(self, table): """Sets table, with column headers and separators.""" if not isinstance(table, TextTable): raise TypeError('Not an instance of TextTable.') self.Reset() self._table = copy.deepcopy(table._table) # pylint: disable=W0212 # Point parent table of each row back ourselves. for row in self: row.table = self
[ "def", "_SetTable", "(", "self", ",", "table", ")", ":", "if", "not", "isinstance", "(", "table", ",", "TextTable", ")", ":", "raise", "TypeError", "(", "'Not an instance of TextTable.'", ")", "self", ".", "Reset", "(", ")", "self", ".", "_table", "=", "copy", ".", "deepcopy", "(", "table", ".", "_table", ")", "# pylint: disable=W0212", "# Point parent table of each row back ourselves.", "for", "row", "in", "self", ":", "row", ".", "table", "=", "self" ]
Sets table, with column headers and separators.
[ "Sets", "table", "with", "column", "headers", "and", "separators", "." ]
python
train
39.888889
secdev/scapy
scapy/arch/windows/__init__.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/windows/__init__.py#L686-L694
def dev_from_name(self, name): """Return the first pcap device name for a given Windows device name. """ try: return next(iface for iface in six.itervalues(self) if (iface.name == name or iface.description == name)) except (StopIteration, RuntimeError): raise ValueError("Unknown network interface %r" % name)
[ "def", "dev_from_name", "(", "self", ",", "name", ")", ":", "try", ":", "return", "next", "(", "iface", "for", "iface", "in", "six", ".", "itervalues", "(", "self", ")", "if", "(", "iface", ".", "name", "==", "name", "or", "iface", ".", "description", "==", "name", ")", ")", "except", "(", "StopIteration", ",", "RuntimeError", ")", ":", "raise", "ValueError", "(", "\"Unknown network interface %r\"", "%", "name", ")" ]
Return the first pcap device name for a given Windows device name.
[ "Return", "the", "first", "pcap", "device", "name", "for", "a", "given", "Windows", "device", "name", "." ]
python
train
43.222222
revelc/pyaccumulo
pyaccumulo/__init__.py
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/__init__.py#L271-L280
def add_mutations_and_flush(self, table, muts): """ Add mutations to a table without the need to create and manage a batch writer. """ if not isinstance(muts, list) and not isinstance(muts, tuple): muts = [muts] cells = {} for mut in muts: cells.setdefault(mut.row, []).extend(mut.updates) self.client.updateAndFlush(self.login, table, cells)
[ "def", "add_mutations_and_flush", "(", "self", ",", "table", ",", "muts", ")", ":", "if", "not", "isinstance", "(", "muts", ",", "list", ")", "and", "not", "isinstance", "(", "muts", ",", "tuple", ")", ":", "muts", "=", "[", "muts", "]", "cells", "=", "{", "}", "for", "mut", "in", "muts", ":", "cells", ".", "setdefault", "(", "mut", ".", "row", ",", "[", "]", ")", ".", "extend", "(", "mut", ".", "updates", ")", "self", ".", "client", ".", "updateAndFlush", "(", "self", ".", "login", ",", "table", ",", "cells", ")" ]
Add mutations to a table without the need to create and manage a batch writer.
[ "Add", "mutations", "to", "a", "table", "without", "the", "need", "to", "create", "and", "manage", "a", "batch", "writer", "." ]
python
train
41.3
CI-WATER/gsshapy
gsshapy/lib/spn_chunk.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/spn_chunk.py#L27-L43
def sjuncChunk(key, chunk): """ Parse Super Junction (SJUNC) Chunk Method """ schunk = chunk[0].strip().split() result = {'sjuncNumber': schunk[1], 'groundSurfaceElev': schunk[2], 'invertElev': schunk[3], 'manholeSA': schunk[4], 'inletCode': schunk[5], 'linkOrCellI': schunk[6], 'nodeOrCellJ': schunk[7], 'weirSideLength': schunk[8], 'orificeDiameter': schunk[9]} return result
[ "def", "sjuncChunk", "(", "key", ",", "chunk", ")", ":", "schunk", "=", "chunk", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "result", "=", "{", "'sjuncNumber'", ":", "schunk", "[", "1", "]", ",", "'groundSurfaceElev'", ":", "schunk", "[", "2", "]", ",", "'invertElev'", ":", "schunk", "[", "3", "]", ",", "'manholeSA'", ":", "schunk", "[", "4", "]", ",", "'inletCode'", ":", "schunk", "[", "5", "]", ",", "'linkOrCellI'", ":", "schunk", "[", "6", "]", ",", "'nodeOrCellJ'", ":", "schunk", "[", "7", "]", ",", "'weirSideLength'", ":", "schunk", "[", "8", "]", ",", "'orificeDiameter'", ":", "schunk", "[", "9", "]", "}", "return", "result" ]
Parse Super Junction (SJUNC) Chunk Method
[ "Parse", "Super", "Junction", "(", "SJUNC", ")", "Chunk", "Method" ]
python
train
29.352941
fossasia/AYABInterface
AYABInterface/communication/hardware_messages.py
https://github.com/fossasia/AYABInterface/blob/e2065eed8daf17b2936f6ca5e488c9bfb850914e/AYABInterface/communication/hardware_messages.py#L258-L261
def _init(self): """Read the success byte.""" self._api_version = self._file.read(1)[0] self._firmware_version = FirmwareVersion(*self._file.read(2))
[ "def", "_init", "(", "self", ")", ":", "self", ".", "_api_version", "=", "self", ".", "_file", ".", "read", "(", "1", ")", "[", "0", "]", "self", ".", "_firmware_version", "=", "FirmwareVersion", "(", "*", "self", ".", "_file", ".", "read", "(", "2", ")", ")" ]
Read the success byte.
[ "Read", "the", "success", "byte", "." ]
python
train
42.5
soasme/rio
rio/models/utils.py
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/models/utils.py#L105-L121
def get_instance_by_slug(model, slug, **kwargs): """Get an instance by slug. :param model: a string, model name in rio.models :param slug: a string used to query by `slug`. This requires there is a slug field in model definition. :return: None or a SQLAlchemy Model instance. """ try: model = get_model(model) except ImportError: return None query_params = dict(kwargs) query_params['slug'] = slug return model.query.filter_by(**query_params).first()
[ "def", "get_instance_by_slug", "(", "model", ",", "slug", ",", "*", "*", "kwargs", ")", ":", "try", ":", "model", "=", "get_model", "(", "model", ")", "except", "ImportError", ":", "return", "None", "query_params", "=", "dict", "(", "kwargs", ")", "query_params", "[", "'slug'", "]", "=", "slug", "return", "model", ".", "query", ".", "filter_by", "(", "*", "*", "query_params", ")", ".", "first", "(", ")" ]
Get an instance by slug. :param model: a string, model name in rio.models :param slug: a string used to query by `slug`. This requires there is a slug field in model definition. :return: None or a SQLAlchemy Model instance.
[ "Get", "an", "instance", "by", "slug", "." ]
python
train
30
casacore/python-casacore
casacore/util/substitute.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/util/substitute.py#L47-L59
def getvariable(name): """Get the value of a local variable somewhere in the call stack.""" import inspect fr = inspect.currentframe() try: while fr: fr = fr.f_back vars = fr.f_locals if name in vars: return vars[name] except: pass return None
[ "def", "getvariable", "(", "name", ")", ":", "import", "inspect", "fr", "=", "inspect", ".", "currentframe", "(", ")", "try", ":", "while", "fr", ":", "fr", "=", "fr", ".", "f_back", "vars", "=", "fr", ".", "f_locals", "if", "name", "in", "vars", ":", "return", "vars", "[", "name", "]", "except", ":", "pass", "return", "None" ]
Get the value of a local variable somewhere in the call stack.
[ "Get", "the", "value", "of", "a", "local", "variable", "somewhere", "in", "the", "call", "stack", "." ]
python
train
24.846154
ph4r05/monero-serialize
monero_serialize/xmrserialize.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrserialize.py#L659-L671
async def _dump_message_field(self, writer, msg, field, fvalue=None): """ Dumps a message field to the writer. Field is defined by the message field specification. :param writer: :param msg: :param field: :param fvalue: :return: """ fname, ftype, params = field[0], field[1], field[2:] fvalue = getattr(msg, fname, None) if fvalue is None else fvalue await self.dump_field(writer, fvalue, ftype, params)
[ "async", "def", "_dump_message_field", "(", "self", ",", "writer", ",", "msg", ",", "field", ",", "fvalue", "=", "None", ")", ":", "fname", ",", "ftype", ",", "params", "=", "field", "[", "0", "]", ",", "field", "[", "1", "]", ",", "field", "[", "2", ":", "]", "fvalue", "=", "getattr", "(", "msg", ",", "fname", ",", "None", ")", "if", "fvalue", "is", "None", "else", "fvalue", "await", "self", ".", "dump_field", "(", "writer", ",", "fvalue", ",", "ftype", ",", "params", ")" ]
Dumps a message field to the writer. Field is defined by the message field specification. :param writer: :param msg: :param field: :param fvalue: :return:
[ "Dumps", "a", "message", "field", "to", "the", "writer", ".", "Field", "is", "defined", "by", "the", "message", "field", "specification", "." ]
python
train
36.923077
nicolargo/glances
glances/plugins/glances_processlist.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_processlist.py#L477-L565
def __msg_curse_sum(self, ret, sep_char='_', mmm=None, args=None): """ Build the sum message (only when filter is on) and add it to the ret dict. * ret: list of string where the message is added * sep_char: define the line separation char * mmm: display min, max, mean or current (if mmm=None) * args: Glances args """ ret.append(self.curse_new_line()) if mmm is None: ret.append(self.curse_add_line(sep_char * 69)) ret.append(self.curse_new_line()) # CPU percent sum msg = self.layout_stat['cpu'].format(self.__sum_stats('cpu_percent', mmm=mmm)) ret.append(self.curse_add_line(msg, decoration=self.__mmm_deco(mmm))) # MEM percent sum msg = self.layout_stat['mem'].format(self.__sum_stats('memory_percent', mmm=mmm)) ret.append(self.curse_add_line(msg, decoration=self.__mmm_deco(mmm))) # VIRT and RES memory sum if 'memory_info' in self.stats[0] and self.stats[0]['memory_info'] is not None and self.stats[0]['memory_info'] != '': # VMS msg = self.layout_stat['virt'].format(self.auto_unit(self.__sum_stats('memory_info', indice=1, mmm=mmm), low_precision=False)) ret.append(self.curse_add_line(msg, decoration=self.__mmm_deco(mmm), optional=True)) # RSS msg = self.layout_stat['res'].format(self.auto_unit(self.__sum_stats('memory_info', indice=0, mmm=mmm), low_precision=False)) ret.append(self.curse_add_line(msg, decoration=self.__mmm_deco(mmm), optional=True)) else: msg = self.layout_header['virt'].format('') ret.append(self.curse_add_line(msg)) msg = self.layout_header['res'].format('') ret.append(self.curse_add_line(msg)) # PID msg = self.layout_header['pid'].format('', width=self.__max_pid_size()) ret.append(self.curse_add_line(msg)) # USER msg = self.layout_header['user'].format('') ret.append(self.curse_add_line(msg)) # TIME+ msg = self.layout_header['time'].format('') ret.append(self.curse_add_line(msg, optional=True)) # THREAD msg = self.layout_header['thread'].format('') ret.append(self.curse_add_line(msg)) # NICE msg = self.layout_header['nice'].format('') ret.append(self.curse_add_line(msg)) # STATUS msg = self.layout_header['status'].format('') ret.append(self.curse_add_line(msg)) # IO read/write if 'io_counters' in self.stats[0] and mmm is None: # IO read io_rs = int((self.__sum_stats('io_counters', 0) - self.__sum_stats('io_counters', indice=2, mmm=mmm)) / self.stats[0]['time_since_update']) if io_rs == 0: msg = self.layout_stat['ior'].format('0') else: msg = self.layout_stat['ior'].format(self.auto_unit(io_rs, low_precision=True)) ret.append(self.curse_add_line(msg, decoration=self.__mmm_deco(mmm), optional=True, additional=True)) # IO write io_ws = int((self.__sum_stats('io_counters', 1) - self.__sum_stats('io_counters', indice=3, mmm=mmm)) / self.stats[0]['time_since_update']) if io_ws == 0: msg = self.layout_stat['iow'].format('0') else: msg = self.layout_stat['iow'].format(self.auto_unit(io_ws, low_precision=True)) ret.append(self.curse_add_line(msg, decoration=self.__mmm_deco(mmm), optional=True, additional=True)) else: msg = self.layout_header['ior'].format('') ret.append(self.curse_add_line(msg, optional=True, additional=True)) msg = self.layout_header['iow'].format('') ret.append(self.curse_add_line(msg, optional=True, additional=True)) if mmm is None: msg = ' < {}'.format('current') ret.append(self.curse_add_line(msg, optional=True)) else: msg = ' < {}'.format(mmm) ret.append(self.curse_add_line(msg, optional=True)) msg = ' (\'M\' to reset)' ret.append(self.curse_add_line(msg, optional=True))
[ "def", "__msg_curse_sum", "(", "self", ",", "ret", ",", "sep_char", "=", "'_'", ",", "mmm", "=", "None", ",", "args", "=", "None", ")", ":", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "if", "mmm", "is", "None", ":", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "sep_char", "*", "69", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# CPU percent sum", "msg", "=", "self", ".", "layout_stat", "[", "'cpu'", "]", ".", "format", "(", "self", ".", "__sum_stats", "(", "'cpu_percent'", ",", "mmm", "=", "mmm", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "self", ".", "__mmm_deco", "(", "mmm", ")", ")", ")", "# MEM percent sum", "msg", "=", "self", ".", "layout_stat", "[", "'mem'", "]", ".", "format", "(", "self", ".", "__sum_stats", "(", "'memory_percent'", ",", "mmm", "=", "mmm", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "self", ".", "__mmm_deco", "(", "mmm", ")", ")", ")", "# VIRT and RES memory sum", "if", "'memory_info'", "in", "self", ".", "stats", "[", "0", "]", "and", "self", ".", "stats", "[", "0", "]", "[", "'memory_info'", "]", "is", "not", "None", "and", "self", ".", "stats", "[", "0", "]", "[", "'memory_info'", "]", "!=", "''", ":", "# VMS", "msg", "=", "self", ".", "layout_stat", "[", "'virt'", "]", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "__sum_stats", "(", "'memory_info'", ",", "indice", "=", "1", ",", "mmm", "=", "mmm", ")", ",", "low_precision", "=", "False", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "self", ".", "__mmm_deco", "(", "mmm", ")", ",", "optional", "=", "True", ")", ")", "# RSS", "msg", "=", "self", ".", "layout_stat", "[", "'res'", "]", ".", "format", "(", "self", ".", "auto_unit", "(", "self", ".", "__sum_stats", "(", "'memory_info'", ",", "indice", "=", "0", ",", "mmm", "=", "mmm", ")", ",", "low_precision", "=", "False", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "self", ".", "__mmm_deco", "(", "mmm", ")", ",", "optional", "=", "True", ")", ")", "else", ":", "msg", "=", "self", ".", "layout_header", "[", "'virt'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "msg", "=", "self", ".", "layout_header", "[", "'res'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# PID", "msg", "=", "self", ".", "layout_header", "[", "'pid'", "]", ".", "format", "(", "''", ",", "width", "=", "self", ".", "__max_pid_size", "(", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# USER", "msg", "=", "self", ".", "layout_header", "[", "'user'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# TIME+", "msg", "=", "self", ".", "layout_header", "[", "'time'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "True", ")", ")", "# THREAD", "msg", "=", "self", ".", "layout_header", "[", "'thread'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# NICE", "msg", "=", "self", ".", "layout_header", "[", "'nice'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# STATUS", "msg", "=", "self", ".", "layout_header", "[", "'status'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# IO read/write", "if", "'io_counters'", "in", "self", ".", "stats", "[", "0", "]", "and", "mmm", "is", "None", ":", "# IO read", "io_rs", "=", "int", "(", "(", "self", ".", "__sum_stats", "(", "'io_counters'", ",", "0", ")", "-", "self", ".", "__sum_stats", "(", "'io_counters'", ",", "indice", "=", "2", ",", "mmm", "=", "mmm", ")", ")", "/", "self", ".", "stats", "[", "0", "]", "[", "'time_since_update'", "]", ")", "if", "io_rs", "==", "0", ":", "msg", "=", "self", ".", "layout_stat", "[", "'ior'", "]", ".", "format", "(", "'0'", ")", "else", ":", "msg", "=", "self", ".", "layout_stat", "[", "'ior'", "]", ".", "format", "(", "self", ".", "auto_unit", "(", "io_rs", ",", "low_precision", "=", "True", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "self", ".", "__mmm_deco", "(", "mmm", ")", ",", "optional", "=", "True", ",", "additional", "=", "True", ")", ")", "# IO write", "io_ws", "=", "int", "(", "(", "self", ".", "__sum_stats", "(", "'io_counters'", ",", "1", ")", "-", "self", ".", "__sum_stats", "(", "'io_counters'", ",", "indice", "=", "3", ",", "mmm", "=", "mmm", ")", ")", "/", "self", ".", "stats", "[", "0", "]", "[", "'time_since_update'", "]", ")", "if", "io_ws", "==", "0", ":", "msg", "=", "self", ".", "layout_stat", "[", "'iow'", "]", ".", "format", "(", "'0'", ")", "else", ":", "msg", "=", "self", ".", "layout_stat", "[", "'iow'", "]", ".", "format", "(", "self", ".", "auto_unit", "(", "io_ws", ",", "low_precision", "=", "True", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "decoration", "=", "self", ".", "__mmm_deco", "(", "mmm", ")", ",", "optional", "=", "True", ",", "additional", "=", "True", ")", ")", "else", ":", "msg", "=", "self", ".", "layout_header", "[", "'ior'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "True", ",", "additional", "=", "True", ")", ")", "msg", "=", "self", ".", "layout_header", "[", "'iow'", "]", ".", "format", "(", "''", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "True", ",", "additional", "=", "True", ")", ")", "if", "mmm", "is", "None", ":", "msg", "=", "' < {}'", ".", "format", "(", "'current'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "True", ")", ")", "else", ":", "msg", "=", "' < {}'", ".", "format", "(", "mmm", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "True", ")", ")", "msg", "=", "' (\\'M\\' to reset)'", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "optional", "=", "True", ")", ")" ]
Build the sum message (only when filter is on) and add it to the ret dict. * ret: list of string where the message is added * sep_char: define the line separation char * mmm: display min, max, mean or current (if mmm=None) * args: Glances args
[ "Build", "the", "sum", "message", "(", "only", "when", "filter", "is", "on", ")", "and", "add", "it", "to", "the", "ret", "dict", "." ]
python
train
51.382022
anntzer/mplcursors
lib/mplcursors/_mplcursors.py
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L73-L79
def _iter_axes_subartists(ax): r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*.""" yield from ax.collections yield from ax.images yield from ax.lines yield from ax.patches yield from ax.texts
[ "def", "_iter_axes_subartists", "(", "ax", ")", ":", "yield", "from", "ax", ".", "collections", "yield", "from", "ax", ".", "images", "yield", "from", "ax", ".", "lines", "yield", "from", "ax", ".", "patches", "yield", "from", "ax", ".", "texts" ]
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*.
[ "r", "Yield", "all", "child", "Artist", "\\", "s", "(", "*", "not", "*", "Container", "\\", "s", ")", "of", "*", "ax", "*", "." ]
python
train
31.714286
refenv/cijoe
modules/cij/fio.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/fio.py#L32-L44
def pkill(): """Kill all of FIO processes""" if env(): return 1 cmd = ["ps -aux | grep fio | grep -v grep"] status, _, _ = cij.ssh.command(cmd, shell=True, echo=False) if not status: status, _, _ = cij.ssh.command(["pkill -f fio"], shell=True) if status: return 1 return 0
[ "def", "pkill", "(", ")", ":", "if", "env", "(", ")", ":", "return", "1", "cmd", "=", "[", "\"ps -aux | grep fio | grep -v grep\"", "]", "status", ",", "_", ",", "_", "=", "cij", ".", "ssh", ".", "command", "(", "cmd", ",", "shell", "=", "True", ",", "echo", "=", "False", ")", "if", "not", "status", ":", "status", ",", "_", ",", "_", "=", "cij", ".", "ssh", ".", "command", "(", "[", "\"pkill -f fio\"", "]", ",", "shell", "=", "True", ")", "if", "status", ":", "return", "1", "return", "0" ]
Kill all of FIO processes
[ "Kill", "all", "of", "FIO", "processes" ]
python
valid
24.769231
jic-dtool/dtoolcore
dtoolcore/storagebroker.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L448-L451
def get_size_in_bytes(self, handle): """Return the size in bytes.""" fpath = self._fpath_from_handle(handle) return os.stat(fpath).st_size
[ "def", "get_size_in_bytes", "(", "self", ",", "handle", ")", ":", "fpath", "=", "self", ".", "_fpath_from_handle", "(", "handle", ")", "return", "os", ".", "stat", "(", "fpath", ")", ".", "st_size" ]
Return the size in bytes.
[ "Return", "the", "size", "in", "bytes", "." ]
python
train
39.75
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L6830-L6866
def Runtime_compileScript(self, expression, sourceURL, persistScript, **kwargs ): """ Function path: Runtime.compileScript Domain: Runtime Method name: compileScript Parameters: Required arguments: 'expression' (type: string) -> Expression to compile. 'sourceURL' (type: string) -> Source url to be set for the script. 'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted. Optional arguments: 'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page. Returns: 'scriptId' (type: ScriptId) -> Id of the script. 'exceptionDetails' (type: ExceptionDetails) -> Exception details. Description: Compiles expression. """ assert isinstance(expression, (str,) ), "Argument 'expression' must be of type '['str']'. Received type: '%s'" % type( expression) assert isinstance(sourceURL, (str,) ), "Argument 'sourceURL' must be of type '['str']'. Received type: '%s'" % type( sourceURL) assert isinstance(persistScript, (bool,) ), "Argument 'persistScript' must be of type '['bool']'. Received type: '%s'" % type( persistScript) expected = ['executionContextId'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['executionContextId']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('Runtime.compileScript', expression=expression, sourceURL=sourceURL, persistScript= persistScript, **kwargs) return subdom_funcs
[ "def", "Runtime_compileScript", "(", "self", ",", "expression", ",", "sourceURL", ",", "persistScript", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "expression", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'expression' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "expression", ")", "assert", "isinstance", "(", "sourceURL", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'sourceURL' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "sourceURL", ")", "assert", "isinstance", "(", "persistScript", ",", "(", "bool", ",", ")", ")", ",", "\"Argument 'persistScript' must be of type '['bool']'. Received type: '%s'\"", "%", "type", "(", "persistScript", ")", "expected", "=", "[", "'executionContextId'", "]", "passed_keys", "=", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "assert", "all", "(", "[", "(", "key", "in", "expected", ")", "for", "key", "in", "passed_keys", "]", ")", ",", "\"Allowed kwargs are ['executionContextId']. Passed kwargs: %s\"", "%", "passed_keys", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Runtime.compileScript'", ",", "expression", "=", "expression", ",", "sourceURL", "=", "sourceURL", ",", "persistScript", "=", "persistScript", ",", "*", "*", "kwargs", ")", "return", "subdom_funcs" ]
Function path: Runtime.compileScript Domain: Runtime Method name: compileScript Parameters: Required arguments: 'expression' (type: string) -> Expression to compile. 'sourceURL' (type: string) -> Source url to be set for the script. 'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted. Optional arguments: 'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page. Returns: 'scriptId' (type: ScriptId) -> Id of the script. 'exceptionDetails' (type: ExceptionDetails) -> Exception details. Description: Compiles expression.
[ "Function", "path", ":", "Runtime", ".", "compileScript", "Domain", ":", "Runtime", "Method", "name", ":", "compileScript", "Parameters", ":", "Required", "arguments", ":", "expression", "(", "type", ":", "string", ")", "-", ">", "Expression", "to", "compile", ".", "sourceURL", "(", "type", ":", "string", ")", "-", ">", "Source", "url", "to", "be", "set", "for", "the", "script", ".", "persistScript", "(", "type", ":", "boolean", ")", "-", ">", "Specifies", "whether", "the", "compiled", "script", "should", "be", "persisted", ".", "Optional", "arguments", ":", "executionContextId", "(", "type", ":", "ExecutionContextId", ")", "-", ">", "Specifies", "in", "which", "execution", "context", "to", "perform", "script", "run", ".", "If", "the", "parameter", "is", "omitted", "the", "evaluation", "will", "be", "performed", "in", "the", "context", "of", "the", "inspected", "page", ".", "Returns", ":", "scriptId", "(", "type", ":", "ScriptId", ")", "-", ">", "Id", "of", "the", "script", ".", "exceptionDetails", "(", "type", ":", "ExceptionDetails", ")", "-", ">", "Exception", "details", ".", "Description", ":", "Compiles", "expression", "." ]
python
train
45.351351
csparpa/pyowm
pyowm/alertapi30/alert_manager.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/alertapi30/alert_manager.py#L105-L118
def get_trigger(self, trigger_id): """ Retrieves the named trigger from the Weather Alert API. :param trigger_id: the ID of the trigger :type trigger_id: str :return: a `pyowm.alertapi30.trigger.Trigger` instance """ assert isinstance(trigger_id, str), "Value must be a string" status, data = self.http_client.get_json( NAMED_TRIGGER_URI % trigger_id, params={'appid': self.API_key}, headers={'Content-Type': 'application/json'}) return self.trigger_parser.parse_dict(data)
[ "def", "get_trigger", "(", "self", ",", "trigger_id", ")", ":", "assert", "isinstance", "(", "trigger_id", ",", "str", ")", ",", "\"Value must be a string\"", "status", ",", "data", "=", "self", ".", "http_client", ".", "get_json", "(", "NAMED_TRIGGER_URI", "%", "trigger_id", ",", "params", "=", "{", "'appid'", ":", "self", ".", "API_key", "}", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")", "return", "self", ".", "trigger_parser", ".", "parse_dict", "(", "data", ")" ]
Retrieves the named trigger from the Weather Alert API. :param trigger_id: the ID of the trigger :type trigger_id: str :return: a `pyowm.alertapi30.trigger.Trigger` instance
[ "Retrieves", "the", "named", "trigger", "from", "the", "Weather", "Alert", "API", "." ]
python
train
40.642857
plivo/sharq
sharq/queue.py
https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L63-L66
def _load_config(self): """Read the configuration file and load it into memory.""" self._config = ConfigParser.SafeConfigParser() self._config.read(self.config_path)
[ "def", "_load_config", "(", "self", ")", ":", "self", ".", "_config", "=", "ConfigParser", ".", "SafeConfigParser", "(", ")", "self", ".", "_config", ".", "read", "(", "self", ".", "config_path", ")" ]
Read the configuration file and load it into memory.
[ "Read", "the", "configuration", "file", "and", "load", "it", "into", "memory", "." ]
python
train
46.5
pypa/pipenv
pipenv/patched/notpip/_vendor/cachecontrol/controller.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/cachecontrol/controller.py#L329-L367
def update_cached_response(self, request, response): """On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response. """ cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) return cached_response
[ "def", "update_cached_response", "(", "self", ",", "request", ",", "response", ")", ":", "cache_url", "=", "self", ".", "cache_url", "(", "request", ".", "url", ")", "cached_response", "=", "self", ".", "serializer", ".", "loads", "(", "request", ",", "self", ".", "cache", ".", "get", "(", "cache_url", ")", ")", "if", "not", "cached_response", ":", "# we didn't have a cached response", "return", "response", "# Lets update our headers with the headers from the new request:", "# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1", "#", "# The server isn't supposed to send headers that would make", "# the cached body invalid. But... just in case, we'll be sure", "# to strip out ones we know that might be problmatic due to", "# typical assumptions.", "excluded_headers", "=", "[", "\"content-length\"", "]", "cached_response", ".", "headers", ".", "update", "(", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "response", ".", "headers", ".", "items", "(", ")", "if", "k", ".", "lower", "(", ")", "not", "in", "excluded_headers", ")", ")", "# we want a 200 b/c we have content via the cache", "cached_response", ".", "status", "=", "200", "# update our cache", "self", ".", "cache", ".", "set", "(", "cache_url", ",", "self", ".", "serializer", ".", "dumps", "(", "request", ",", "cached_response", ")", ")", "return", "cached_response" ]
On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response.
[ "On", "a", "304", "we", "will", "get", "a", "new", "set", "of", "headers", "that", "we", "want", "to", "update", "our", "cached", "value", "with", "assuming", "we", "have", "one", "." ]
python
train
36.076923
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L304-L322
def create(session, course_name): """ Create an instance using a session and a course_name. @param session: Requests session. @type session: requests.Session @param course_name: Course name (slug) from course json. @type course_name: str @return: Instance of OnDemandCourseMaterialItems @rtype: OnDemandCourseMaterialItems """ dom = get_page(session, OPENCOURSE_ONDEMAND_COURSE_MATERIALS, json=True, class_name=course_name) return OnDemandCourseMaterialItemsV1( dom['linked']['onDemandCourseMaterialItems.v1'])
[ "def", "create", "(", "session", ",", "course_name", ")", ":", "dom", "=", "get_page", "(", "session", ",", "OPENCOURSE_ONDEMAND_COURSE_MATERIALS", ",", "json", "=", "True", ",", "class_name", "=", "course_name", ")", "return", "OnDemandCourseMaterialItemsV1", "(", "dom", "[", "'linked'", "]", "[", "'onDemandCourseMaterialItems.v1'", "]", ")" ]
Create an instance using a session and a course_name. @param session: Requests session. @type session: requests.Session @param course_name: Course name (slug) from course json. @type course_name: str @return: Instance of OnDemandCourseMaterialItems @rtype: OnDemandCourseMaterialItems
[ "Create", "an", "instance", "using", "a", "session", "and", "a", "course_name", "." ]
python
train
33.789474
python-wink/python-wink
src/pywink/devices/air_conditioner.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/air_conditioner.py#L45-L56
def set_schedule_enabled(self, state): """ :param state: a boolean True (on) or False (off) :return: nothing """ desired_state = {"schedule_enabled": state} response = self.api_interface.set_device_state(self, { "desired_state": desired_state }) self._update_state_from_response(response)
[ "def", "set_schedule_enabled", "(", "self", ",", "state", ")", ":", "desired_state", "=", "{", "\"schedule_enabled\"", ":", "state", "}", "response", "=", "self", ".", "api_interface", ".", "set_device_state", "(", "self", ",", "{", "\"desired_state\"", ":", "desired_state", "}", ")", "self", ".", "_update_state_from_response", "(", "response", ")" ]
:param state: a boolean True (on) or False (off) :return: nothing
[ ":", "param", "state", ":", "a", "boolean", "True", "(", "on", ")", "or", "False", "(", "off", ")", ":", "return", ":", "nothing" ]
python
train
29.583333
7sDream/zhihu-py3
zhihu/client.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/client.py#L101-L138
def login_in_terminal(self, need_captcha=False, use_getpass=True): """不使用cookies,在终端中根据提示登陆知乎 :param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True :param bool use_getpass: 是否使用安全模式输入密码,默认为 True, 如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试 :return: 如果成功返回cookies字符串 :rtype: str """ print('====== zhihu login =====') email = input('email: ') if use_getpass: password = getpass.getpass('password: ') else: password = input("password: ") if need_captcha: captcha_data = self.get_captcha() with open('captcha.gif', 'wb') as f: f.write(captcha_data) print('please check captcha.gif for captcha') captcha = input('captcha: ') os.remove('captcha.gif') else: captcha = None print('====== logging.... =====') code, msg, cookies = self.login(email, password, captcha) if code == 0: print('login successfully') else: print('login failed, reason: {0}'.format(msg)) return cookies
[ "def", "login_in_terminal", "(", "self", ",", "need_captcha", "=", "False", ",", "use_getpass", "=", "True", ")", ":", "print", "(", "'====== zhihu login ====='", ")", "email", "=", "input", "(", "'email: '", ")", "if", "use_getpass", ":", "password", "=", "getpass", ".", "getpass", "(", "'password: '", ")", "else", ":", "password", "=", "input", "(", "\"password: \"", ")", "if", "need_captcha", ":", "captcha_data", "=", "self", ".", "get_captcha", "(", ")", "with", "open", "(", "'captcha.gif'", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "captcha_data", ")", "print", "(", "'please check captcha.gif for captcha'", ")", "captcha", "=", "input", "(", "'captcha: '", ")", "os", ".", "remove", "(", "'captcha.gif'", ")", "else", ":", "captcha", "=", "None", "print", "(", "'====== logging.... ====='", ")", "code", ",", "msg", ",", "cookies", "=", "self", ".", "login", "(", "email", ",", "password", ",", "captcha", ")", "if", "code", "==", "0", ":", "print", "(", "'login successfully'", ")", "else", ":", "print", "(", "'login failed, reason: {0}'", ".", "format", "(", "msg", ")", ")", "return", "cookies" ]
不使用cookies,在终端中根据提示登陆知乎 :param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True :param bool use_getpass: 是否使用安全模式输入密码,默认为 True, 如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试 :return: 如果成功返回cookies字符串 :rtype: str
[ "不使用cookies,在终端中根据提示登陆知乎" ]
python
train
29.5
tensorlayer/tensorlayer
tensorlayer/visualize.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L53-L81
def read_images(img_list, path='', n_threads=10, printable=True): """Returns all images in list by given path and name of each image file. Parameters ------------- img_list : list of str The image file names. path : str The image folder path. n_threads : int The number of threads to read image. printable : boolean Whether to print information when reading images. Returns ------- list of numpy.array The images. """ imgs = [] for idx in range(0, len(img_list), n_threads): b_imgs_list = img_list[idx:idx + n_threads] b_imgs = tl.prepro.threading_data(b_imgs_list, fn=read_image, path=path) # tl.logging.info(b_imgs.shape) imgs.extend(b_imgs) if printable: tl.logging.info('read %d from %s' % (len(imgs), path)) return imgs
[ "def", "read_images", "(", "img_list", ",", "path", "=", "''", ",", "n_threads", "=", "10", ",", "printable", "=", "True", ")", ":", "imgs", "=", "[", "]", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "img_list", ")", ",", "n_threads", ")", ":", "b_imgs_list", "=", "img_list", "[", "idx", ":", "idx", "+", "n_threads", "]", "b_imgs", "=", "tl", ".", "prepro", ".", "threading_data", "(", "b_imgs_list", ",", "fn", "=", "read_image", ",", "path", "=", "path", ")", "# tl.logging.info(b_imgs.shape)", "imgs", ".", "extend", "(", "b_imgs", ")", "if", "printable", ":", "tl", ".", "logging", ".", "info", "(", "'read %d from %s'", "%", "(", "len", "(", "imgs", ")", ",", "path", ")", ")", "return", "imgs" ]
Returns all images in list by given path and name of each image file. Parameters ------------- img_list : list of str The image file names. path : str The image folder path. n_threads : int The number of threads to read image. printable : boolean Whether to print information when reading images. Returns ------- list of numpy.array The images.
[ "Returns", "all", "images", "in", "list", "by", "given", "path", "and", "name", "of", "each", "image", "file", "." ]
python
valid
29.206897
angr/angr
angr/analyses/bindiff.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/bindiff.py#L380-L388
def identical_blocks(self): """ :returns: A list of block matches which appear to be identical """ identical_blocks = [] for (block_a, block_b) in self._block_matches: if self.blocks_probably_identical(block_a, block_b): identical_blocks.append((block_a, block_b)) return identical_blocks
[ "def", "identical_blocks", "(", "self", ")", ":", "identical_blocks", "=", "[", "]", "for", "(", "block_a", ",", "block_b", ")", "in", "self", ".", "_block_matches", ":", "if", "self", ".", "blocks_probably_identical", "(", "block_a", ",", "block_b", ")", ":", "identical_blocks", ".", "append", "(", "(", "block_a", ",", "block_b", ")", ")", "return", "identical_blocks" ]
:returns: A list of block matches which appear to be identical
[ ":", "returns", ":", "A", "list", "of", "block", "matches", "which", "appear", "to", "be", "identical" ]
python
train
39.555556
pydot/pydot-ng
pydot_ng/__init__.py
https://github.com/pydot/pydot-ng/blob/16f39800b6f5dc28d291a4d7763bbac04b9efe72/pydot_ng/__init__.py#L111-L146
def get_fobj(fname, mode='w+'): """Obtain a proper file object. Parameters ---------- fname : string, file object, file descriptor If a string or file descriptor, then we create a file object. If *fname* is a file object, then we do nothing and ignore the specified *mode* parameter. mode : str The mode of the file to be opened. Returns ------- fobj : file object The file object. close : bool If *fname* was a string, then *close* will be *True* to signify that the file object should be closed after writing to it. Otherwise, *close* will be *False* signifying that the user, in essence, created the file object already and that subsequent operations should not close it. """ if is_string_like(fname): fobj = open(fname, mode) close = True elif hasattr(fname, 'write'): # fname is a file-like object, perhaps a StringIO (for example) fobj = fname close = False else: # assume it is a file descriptor fobj = os.fdopen(fname, mode) close = False return fobj, close
[ "def", "get_fobj", "(", "fname", ",", "mode", "=", "'w+'", ")", ":", "if", "is_string_like", "(", "fname", ")", ":", "fobj", "=", "open", "(", "fname", ",", "mode", ")", "close", "=", "True", "elif", "hasattr", "(", "fname", ",", "'write'", ")", ":", "# fname is a file-like object, perhaps a StringIO (for example)", "fobj", "=", "fname", "close", "=", "False", "else", ":", "# assume it is a file descriptor", "fobj", "=", "os", ".", "fdopen", "(", "fname", ",", "mode", ")", "close", "=", "False", "return", "fobj", ",", "close" ]
Obtain a proper file object. Parameters ---------- fname : string, file object, file descriptor If a string or file descriptor, then we create a file object. If *fname* is a file object, then we do nothing and ignore the specified *mode* parameter. mode : str The mode of the file to be opened. Returns ------- fobj : file object The file object. close : bool If *fname* was a string, then *close* will be *True* to signify that the file object should be closed after writing to it. Otherwise, *close* will be *False* signifying that the user, in essence, created the file object already and that subsequent operations should not close it.
[ "Obtain", "a", "proper", "file", "object", "." ]
python
train
31.472222
ska-sa/katcp-python
katcp/client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L505-L528
def handle_message(self, msg): """Handle a message from the server. Parameters ---------- msg : Message object The Message to dispatch to the handler methods. """ # log messages received so that no one else has to if self._logger.isEnabledFor(logging.DEBUG): self._logger.debug( "received from {}: {}" .format(self.bind_address_string, repr(str(msg)))) if msg.mtype == Message.INFORM: return self.handle_inform(msg) elif msg.mtype == Message.REPLY: return self.handle_reply(msg) elif msg.mtype == Message.REQUEST: return self.handle_request(msg) else: self._logger.error("Unexpected message type from server ['%s']." % (msg,))
[ "def", "handle_message", "(", "self", ",", "msg", ")", ":", "# log messages received so that no one else has to", "if", "self", ".", "_logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"received from {}: {}\"", ".", "format", "(", "self", ".", "bind_address_string", ",", "repr", "(", "str", "(", "msg", ")", ")", ")", ")", "if", "msg", ".", "mtype", "==", "Message", ".", "INFORM", ":", "return", "self", ".", "handle_inform", "(", "msg", ")", "elif", "msg", ".", "mtype", "==", "Message", ".", "REPLY", ":", "return", "self", ".", "handle_reply", "(", "msg", ")", "elif", "msg", ".", "mtype", "==", "Message", ".", "REQUEST", ":", "return", "self", ".", "handle_request", "(", "msg", ")", "else", ":", "self", ".", "_logger", ".", "error", "(", "\"Unexpected message type from server ['%s'].\"", "%", "(", "msg", ",", ")", ")" ]
Handle a message from the server. Parameters ---------- msg : Message object The Message to dispatch to the handler methods.
[ "Handle", "a", "message", "from", "the", "server", "." ]
python
train
34.541667
skyfielders/python-skyfield
skyfield/contrib/iosurvey.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/contrib/iosurvey.py#L11-L29
def get_summary(url, spk=True): ''' simple function to retrieve the header of a BSP file and return SPK object''' # connect to file at URL bspurl = urllib2.urlopen(url) # retrieve the "tip" of a file at URL bsptip = bspurl.read(10**5) # first 100kB # save data in fake file object (in-memory) bspstr = StringIO(bsptip) # load into DAF object daf = DAF(bspstr) # return either SPK or DAF object if spk: # make a SPK object spk = SPK(daf) # return representation return spk else: # return representation return daf
[ "def", "get_summary", "(", "url", ",", "spk", "=", "True", ")", ":", "# connect to file at URL", "bspurl", "=", "urllib2", ".", "urlopen", "(", "url", ")", "# retrieve the \"tip\" of a file at URL", "bsptip", "=", "bspurl", ".", "read", "(", "10", "**", "5", ")", "# first 100kB", "# save data in fake file object (in-memory)", "bspstr", "=", "StringIO", "(", "bsptip", ")", "# load into DAF object", "daf", "=", "DAF", "(", "bspstr", ")", "# return either SPK or DAF object", "if", "spk", ":", "# make a SPK object", "spk", "=", "SPK", "(", "daf", ")", "# return representation ", "return", "spk", "else", ":", "# return representation ", "return", "daf" ]
simple function to retrieve the header of a BSP file and return SPK object
[ "simple", "function", "to", "retrieve", "the", "header", "of", "a", "BSP", "file", "and", "return", "SPK", "object" ]
python
train
30.526316
googleapis/google-cloud-python
securitycenter/google/cloud/securitycenter_v1/gapic/security_center_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/securitycenter/google/cloud/securitycenter_v1/gapic/security_center_client.py#L104-L110
def asset_path(cls, organization, asset): """Return a fully-qualified asset string.""" return google.api_core.path_template.expand( "organizations/{organization}/assets/{asset}", organization=organization, asset=asset, )
[ "def", "asset_path", "(", "cls", ",", "organization", ",", "asset", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"organizations/{organization}/assets/{asset}\"", ",", "organization", "=", "organization", ",", "asset", "=", "asset", ",", ")" ]
Return a fully-qualified asset string.
[ "Return", "a", "fully", "-", "qualified", "asset", "string", "." ]
python
train
39.142857
networks-lab/metaknowledge
metaknowledge/citation.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/citation.py#L243-L261
def Extra(self): """ Returns any `V`, `P`, `DOI` or `misc` values as a string. These are all the values not returned by [ID()](#metaknowledge.citation.Citation.ID), they are separated by `' ,'`. # Returns `str` > A string containing the data not in the ID of the `Citation`. """ extraTags = ['V', 'P', 'DOI', 'misc'] retVal = "" for tag in extraTags: if getattr(self, tag): retVal += getattr(self, tag) + ', ' if len(retVal) > 2: return retVal[:-2] else: return retVal
[ "def", "Extra", "(", "self", ")", ":", "extraTags", "=", "[", "'V'", ",", "'P'", ",", "'DOI'", ",", "'misc'", "]", "retVal", "=", "\"\"", "for", "tag", "in", "extraTags", ":", "if", "getattr", "(", "self", ",", "tag", ")", ":", "retVal", "+=", "getattr", "(", "self", ",", "tag", ")", "+", "', '", "if", "len", "(", "retVal", ")", ">", "2", ":", "return", "retVal", "[", ":", "-", "2", "]", "else", ":", "return", "retVal" ]
Returns any `V`, `P`, `DOI` or `misc` values as a string. These are all the values not returned by [ID()](#metaknowledge.citation.Citation.ID), they are separated by `' ,'`. # Returns `str` > A string containing the data not in the ID of the `Citation`.
[ "Returns", "any", "V", "P", "DOI", "or", "misc", "values", "as", "a", "string", ".", "These", "are", "all", "the", "values", "not", "returned", "by", "[", "ID", "()", "]", "(", "#metaknowledge", ".", "citation", ".", "Citation", ".", "ID", ")", "they", "are", "separated", "by", "." ]
python
train
31.210526
materialsproject/pymatgen
pymatgen/io/abinit/works.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L1311-L1361
def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True, exclude_tasks=None, include_tasks=None): """ This method is called when all the q-points have been computed. It runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. Args: delete_source_ddbs: True if input DDB should be removed once final DDB is created. only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work Useful e.g. for finite stress corrections in which the stress in the initial configuration should be merged in the final DDB. exclude_tasks: List of tasks that should be excluded when merging the partial DDB files. include_tasks: List of tasks that should be included when merging the partial DDB files. Mutually exclusive with exclude_tasks. Returns: path to the output DDB file """ if exclude_tasks: my_tasks = [task for task in self if task not in exclude_tasks] elif include_tasks: my_tasks = [task for task in self if task in include_tasks] else: my_tasks = [task for task in self] if only_dfpt_tasks: ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \ if isinstance(task, DfptTask)])) else: ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks])) self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files)) # DDB files are always produces so this should never happen! if not ddb_files: raise RuntimeError("Cannot find any DDB file to merge by the task of " % self) # Final DDB file will be produced in the outdir of the work. out_ddb = self.outdir.path_in("out_DDB") if len(ddb_files) == 1: # Avoid the merge. Just copy the DDB file to the outdir of the work. shutil.copy(ddb_files[0], out_ddb) else: # Call mrgddb desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime()) mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0) mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc, delete_source_ddbs=delete_source_ddbs) return out_ddb
[ "def", "merge_ddb_files", "(", "self", ",", "delete_source_ddbs", "=", "True", ",", "only_dfpt_tasks", "=", "True", ",", "exclude_tasks", "=", "None", ",", "include_tasks", "=", "None", ")", ":", "if", "exclude_tasks", ":", "my_tasks", "=", "[", "task", "for", "task", "in", "self", "if", "task", "not", "in", "exclude_tasks", "]", "elif", "include_tasks", ":", "my_tasks", "=", "[", "task", "for", "task", "in", "self", "if", "task", "in", "include_tasks", "]", "else", ":", "my_tasks", "=", "[", "task", "for", "task", "in", "self", "]", "if", "only_dfpt_tasks", ":", "ddb_files", "=", "list", "(", "filter", "(", "None", ",", "[", "task", ".", "outdir", ".", "has_abiext", "(", "\"DDB\"", ")", "for", "task", "in", "my_tasks", "if", "isinstance", "(", "task", ",", "DfptTask", ")", "]", ")", ")", "else", ":", "ddb_files", "=", "list", "(", "filter", "(", "None", ",", "[", "task", ".", "outdir", ".", "has_abiext", "(", "\"DDB\"", ")", "for", "task", "in", "my_tasks", "]", ")", ")", "self", ".", "history", ".", "info", "(", "\"Will call mrgddb to merge %s DDB files:\"", "%", "len", "(", "ddb_files", ")", ")", "# DDB files are always produces so this should never happen!", "if", "not", "ddb_files", ":", "raise", "RuntimeError", "(", "\"Cannot find any DDB file to merge by the task of \"", "%", "self", ")", "# Final DDB file will be produced in the outdir of the work.", "out_ddb", "=", "self", ".", "outdir", ".", "path_in", "(", "\"out_DDB\"", ")", "if", "len", "(", "ddb_files", ")", "==", "1", ":", "# Avoid the merge. Just copy the DDB file to the outdir of the work.", "shutil", ".", "copy", "(", "ddb_files", "[", "0", "]", ",", "out_ddb", ")", "else", ":", "# Call mrgddb", "desc", "=", "\"DDB file merged by %s on %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "time", ".", "asctime", "(", ")", ")", "mrgddb", "=", "wrappers", ".", "Mrgddb", "(", "manager", "=", "self", "[", "0", "]", ".", "manager", ",", "verbose", "=", "0", ")", "mrgddb", ".", "merge", "(", "self", ".", "outdir", ".", "path", ",", "ddb_files", ",", "out_ddb", "=", "out_ddb", ",", "description", "=", "desc", ",", "delete_source_ddbs", "=", "delete_source_ddbs", ")", "return", "out_ddb" ]
This method is called when all the q-points have been computed. It runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. Args: delete_source_ddbs: True if input DDB should be removed once final DDB is created. only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work Useful e.g. for finite stress corrections in which the stress in the initial configuration should be merged in the final DDB. exclude_tasks: List of tasks that should be excluded when merging the partial DDB files. include_tasks: List of tasks that should be included when merging the partial DDB files. Mutually exclusive with exclude_tasks. Returns: path to the output DDB file
[ "This", "method", "is", "called", "when", "all", "the", "q", "-", "points", "have", "been", "computed", ".", "It", "runs", "mrgddb", "in", "sequential", "on", "the", "local", "machine", "to", "produce", "the", "final", "DDB", "file", "in", "the", "outdir", "of", "the", "Work", "." ]
python
train
48.862745
resync/resync
resync/list_base_with_index.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/list_base_with_index.py#L272-L350
def write(self, basename='/tmp/sitemap.xml'): """Write one or a set of sitemap files to disk. resources is a ResourceContainer that may be an ResourceList or a ChangeList. This may be a generator so data is read as needed and length is determined at the end. basename is used as the name of the single sitemap file or the sitemapindex for a set of sitemap files. Uses self.max_sitemap_entries to determine whether the resource_list can be written as one sitemap. If there are more entries and self.allow_multifile is set True then a set of sitemap files, with an sitemapindex, will be written. """ # Access resources through iterator only resources_iter = iter(self.resources) (chunk, nxt) = self.get_resources_chunk(resources_iter) s = self.new_sitemap() if (nxt is not None): # Have more than self.max_sitemap_entries => sitemapindex if (not self.allow_multifile): raise ListBaseIndexError( "Too many entries for a single sitemap but multifile disabled") # Work out URI of sitemapindex so that we can link up to # it from the individual sitemap files try: index_uri = self.mapper.dst_to_src(basename) except MapperError as e: raise ListBaseIndexError( "Cannot map sitemapindex filename to URI (%s)" % str(e)) # Use iterator over all resources and count off sets of # max_sitemap_entries to go into each sitemap, store the # names of the sitemaps as we go. Copy md from self into # the index and use this for all chunks also index = ListBase(md=self.md.copy(), ln=list(self.ln)) index.capability_name = self.capability_name index.default_capability() while (len(chunk) > 0): file = self.part_name(basename, len(index)) # Check that we can map the filename of this sitemap into # URI space for the sitemapindex try: uri = self.mapper.dst_to_src(file) except MapperError as e: raise ListBaseIndexError( "Cannot map sitemap filename to URI (%s)" % str(e)) self.logger.info("Writing sitemap %s..." % (file)) f = open(file, 'w') chunk.index = index_uri chunk.md = index.md s.resources_as_xml(chunk, fh=f) f.close() # Record information about this sitemap for index r = Resource(uri=uri, timestamp=os.stat(file).st_mtime, md5=Hashes(['md5'], file).md5) index.add(r) # Get next chunk (chunk, nxt) = self.get_resources_chunk(resources_iter, nxt) self.logger.info("Wrote %d sitemaps" % (len(index))) f = open(basename, 'w') self.logger.info("Writing sitemapindex %s..." % (basename)) s.resources_as_xml(index, sitemapindex=True, fh=f) f.close() self.logger.info("Wrote sitemapindex %s" % (basename)) elif self.sitemapindex: f = open(basename, 'w') self.logger.info("Writing sitemapindex %s..." % (basename)) s.resources_as_xml(chunk, sitemapindex=True, fh=f) f.close() self.logger.info("Wrote sitemapindex %s" % (basename)) else: f = open(basename, 'w') self.logger.info("Writing sitemap %s..." % (basename)) s.resources_as_xml(chunk, fh=f) f.close() self.logger.info("Wrote sitemap %s" % (basename))
[ "def", "write", "(", "self", ",", "basename", "=", "'/tmp/sitemap.xml'", ")", ":", "# Access resources through iterator only", "resources_iter", "=", "iter", "(", "self", ".", "resources", ")", "(", "chunk", ",", "nxt", ")", "=", "self", ".", "get_resources_chunk", "(", "resources_iter", ")", "s", "=", "self", ".", "new_sitemap", "(", ")", "if", "(", "nxt", "is", "not", "None", ")", ":", "# Have more than self.max_sitemap_entries => sitemapindex", "if", "(", "not", "self", ".", "allow_multifile", ")", ":", "raise", "ListBaseIndexError", "(", "\"Too many entries for a single sitemap but multifile disabled\"", ")", "# Work out URI of sitemapindex so that we can link up to", "# it from the individual sitemap files", "try", ":", "index_uri", "=", "self", ".", "mapper", ".", "dst_to_src", "(", "basename", ")", "except", "MapperError", "as", "e", ":", "raise", "ListBaseIndexError", "(", "\"Cannot map sitemapindex filename to URI (%s)\"", "%", "str", "(", "e", ")", ")", "# Use iterator over all resources and count off sets of", "# max_sitemap_entries to go into each sitemap, store the", "# names of the sitemaps as we go. Copy md from self into", "# the index and use this for all chunks also", "index", "=", "ListBase", "(", "md", "=", "self", ".", "md", ".", "copy", "(", ")", ",", "ln", "=", "list", "(", "self", ".", "ln", ")", ")", "index", ".", "capability_name", "=", "self", ".", "capability_name", "index", ".", "default_capability", "(", ")", "while", "(", "len", "(", "chunk", ")", ">", "0", ")", ":", "file", "=", "self", ".", "part_name", "(", "basename", ",", "len", "(", "index", ")", ")", "# Check that we can map the filename of this sitemap into", "# URI space for the sitemapindex", "try", ":", "uri", "=", "self", ".", "mapper", ".", "dst_to_src", "(", "file", ")", "except", "MapperError", "as", "e", ":", "raise", "ListBaseIndexError", "(", "\"Cannot map sitemap filename to URI (%s)\"", "%", "str", "(", "e", ")", ")", "self", ".", "logger", ".", "info", "(", "\"Writing sitemap %s...\"", "%", "(", "file", ")", ")", "f", "=", "open", "(", "file", ",", "'w'", ")", "chunk", ".", "index", "=", "index_uri", "chunk", ".", "md", "=", "index", ".", "md", "s", ".", "resources_as_xml", "(", "chunk", ",", "fh", "=", "f", ")", "f", ".", "close", "(", ")", "# Record information about this sitemap for index", "r", "=", "Resource", "(", "uri", "=", "uri", ",", "timestamp", "=", "os", ".", "stat", "(", "file", ")", ".", "st_mtime", ",", "md5", "=", "Hashes", "(", "[", "'md5'", "]", ",", "file", ")", ".", "md5", ")", "index", ".", "add", "(", "r", ")", "# Get next chunk", "(", "chunk", ",", "nxt", ")", "=", "self", ".", "get_resources_chunk", "(", "resources_iter", ",", "nxt", ")", "self", ".", "logger", ".", "info", "(", "\"Wrote %d sitemaps\"", "%", "(", "len", "(", "index", ")", ")", ")", "f", "=", "open", "(", "basename", ",", "'w'", ")", "self", ".", "logger", ".", "info", "(", "\"Writing sitemapindex %s...\"", "%", "(", "basename", ")", ")", "s", ".", "resources_as_xml", "(", "index", ",", "sitemapindex", "=", "True", ",", "fh", "=", "f", ")", "f", ".", "close", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Wrote sitemapindex %s\"", "%", "(", "basename", ")", ")", "elif", "self", ".", "sitemapindex", ":", "f", "=", "open", "(", "basename", ",", "'w'", ")", "self", ".", "logger", ".", "info", "(", "\"Writing sitemapindex %s...\"", "%", "(", "basename", ")", ")", "s", ".", "resources_as_xml", "(", "chunk", ",", "sitemapindex", "=", "True", ",", "fh", "=", "f", ")", "f", ".", "close", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Wrote sitemapindex %s\"", "%", "(", "basename", ")", ")", "else", ":", "f", "=", "open", "(", "basename", ",", "'w'", ")", "self", ".", "logger", ".", "info", "(", "\"Writing sitemap %s...\"", "%", "(", "basename", ")", ")", "s", ".", "resources_as_xml", "(", "chunk", ",", "fh", "=", "f", ")", "f", ".", "close", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Wrote sitemap %s\"", "%", "(", "basename", ")", ")" ]
Write one or a set of sitemap files to disk. resources is a ResourceContainer that may be an ResourceList or a ChangeList. This may be a generator so data is read as needed and length is determined at the end. basename is used as the name of the single sitemap file or the sitemapindex for a set of sitemap files. Uses self.max_sitemap_entries to determine whether the resource_list can be written as one sitemap. If there are more entries and self.allow_multifile is set True then a set of sitemap files, with an sitemapindex, will be written.
[ "Write", "one", "or", "a", "set", "of", "sitemap", "files", "to", "disk", "." ]
python
train
48.189873
angr/angr
angr/utils/library.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/utils/library.py#L5-L36
def get_function_name(s): """ Get the function name from a C-style function declaration string. :param str s: A C-style function declaration string. :return: The function name. :rtype: str """ s = s.strip() if s.startswith("__attribute__"): # Remove "__attribute__ ((foobar))" if "))" not in s: raise ValueError("__attribute__ is present, but I cannot find double-right parenthesis in the function " "declaration string.") s = s[s.index("))") + 2 : ].strip() if '(' not in s: raise ValueError("Cannot find any left parenthesis in the function declaration string.") func_name = s[:s.index('(')].strip() for i, ch in enumerate(reversed(func_name)): if ch == ' ': pos = len(func_name) - 1 - i break else: raise ValueError('Cannot find any space in the function declaration string.') func_name = func_name[pos + 1 : ] return func_name
[ "def", "get_function_name", "(", "s", ")", ":", "s", "=", "s", ".", "strip", "(", ")", "if", "s", ".", "startswith", "(", "\"__attribute__\"", ")", ":", "# Remove \"__attribute__ ((foobar))\"", "if", "\"))\"", "not", "in", "s", ":", "raise", "ValueError", "(", "\"__attribute__ is present, but I cannot find double-right parenthesis in the function \"", "\"declaration string.\"", ")", "s", "=", "s", "[", "s", ".", "index", "(", "\"))\"", ")", "+", "2", ":", "]", ".", "strip", "(", ")", "if", "'('", "not", "in", "s", ":", "raise", "ValueError", "(", "\"Cannot find any left parenthesis in the function declaration string.\"", ")", "func_name", "=", "s", "[", ":", "s", ".", "index", "(", "'('", ")", "]", ".", "strip", "(", ")", "for", "i", ",", "ch", "in", "enumerate", "(", "reversed", "(", "func_name", ")", ")", ":", "if", "ch", "==", "' '", ":", "pos", "=", "len", "(", "func_name", ")", "-", "1", "-", "i", "break", "else", ":", "raise", "ValueError", "(", "'Cannot find any space in the function declaration string.'", ")", "func_name", "=", "func_name", "[", "pos", "+", "1", ":", "]", "return", "func_name" ]
Get the function name from a C-style function declaration string. :param str s: A C-style function declaration string. :return: The function name. :rtype: str
[ "Get", "the", "function", "name", "from", "a", "C", "-", "style", "function", "declaration", "string", "." ]
python
train
30.8125
Kronuz/pyScss
scss/cssdefs.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/cssdefs.py#L242-L267
def cancel_base_units(units, to_remove): """Given a list of units, remove a specified number of each base unit. Arguments: units: an iterable of units to_remove: a mapping of base_unit => count, such as that returned from count_base_units Returns a 2-tuple of (factor, remaining_units). """ # Copy the dict since we're about to mutate it to_remove = to_remove.copy() remaining_units = [] total_factor = Fraction(1) for unit in units: factor, base_unit = get_conversion_factor(unit) if not to_remove.get(base_unit, 0): remaining_units.append(unit) continue total_factor *= factor to_remove[base_unit] -= 1 return total_factor, remaining_units
[ "def", "cancel_base_units", "(", "units", ",", "to_remove", ")", ":", "# Copy the dict since we're about to mutate it", "to_remove", "=", "to_remove", ".", "copy", "(", ")", "remaining_units", "=", "[", "]", "total_factor", "=", "Fraction", "(", "1", ")", "for", "unit", "in", "units", ":", "factor", ",", "base_unit", "=", "get_conversion_factor", "(", "unit", ")", "if", "not", "to_remove", ".", "get", "(", "base_unit", ",", "0", ")", ":", "remaining_units", ".", "append", "(", "unit", ")", "continue", "total_factor", "*=", "factor", "to_remove", "[", "base_unit", "]", "-=", "1", "return", "total_factor", ",", "remaining_units" ]
Given a list of units, remove a specified number of each base unit. Arguments: units: an iterable of units to_remove: a mapping of base_unit => count, such as that returned from count_base_units Returns a 2-tuple of (factor, remaining_units).
[ "Given", "a", "list", "of", "units", "remove", "a", "specified", "number", "of", "each", "base", "unit", "." ]
python
train
28.692308
BlackEarth/bxml
bxml/xt.py
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xt.py#L53-L59
def Element(self, elem, **params): """Ensure that the input element is immutable by the transformation. Returns a single element.""" res = self.__call__(deepcopy(elem), **params) if len(res) > 0: return res[0] else: return None
[ "def", "Element", "(", "self", ",", "elem", ",", "*", "*", "params", ")", ":", "res", "=", "self", ".", "__call__", "(", "deepcopy", "(", "elem", ")", ",", "*", "*", "params", ")", "if", "len", "(", "res", ")", ">", "0", ":", "return", "res", "[", "0", "]", "else", ":", "return", "None" ]
Ensure that the input element is immutable by the transformation. Returns a single element.
[ "Ensure", "that", "the", "input", "element", "is", "immutable", "by", "the", "transformation", ".", "Returns", "a", "single", "element", "." ]
python
train
39.714286
osrg/ryu
ryu/services/protocols/bgp/core_managers/table_manager.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/core_managers/table_manager.py#L433-L449
def re_install_net_ctrl_paths(self, vrf_table): """Re-installs paths from NC with current BGP policy. Iterates over known paths from NC installed in `vrf4_table` and adds new path with path attributes as per current VRF configuration. """ assert vrf_table for dest in vrf_table.values(): for path in dest.known_path_list: if path.source is None: vrf_table.insert_vrf_path( nlri=path.nlri, next_hop=path.nexthop, gen_lbl=True ) LOG.debug('Re-installed NC paths with current policy for table %s.', vrf_table)
[ "def", "re_install_net_ctrl_paths", "(", "self", ",", "vrf_table", ")", ":", "assert", "vrf_table", "for", "dest", "in", "vrf_table", ".", "values", "(", ")", ":", "for", "path", "in", "dest", ".", "known_path_list", ":", "if", "path", ".", "source", "is", "None", ":", "vrf_table", ".", "insert_vrf_path", "(", "nlri", "=", "path", ".", "nlri", ",", "next_hop", "=", "path", ".", "nexthop", ",", "gen_lbl", "=", "True", ")", "LOG", ".", "debug", "(", "'Re-installed NC paths with current policy for table %s.'", ",", "vrf_table", ")" ]
Re-installs paths from NC with current BGP policy. Iterates over known paths from NC installed in `vrf4_table` and adds new path with path attributes as per current VRF configuration.
[ "Re", "-", "installs", "paths", "from", "NC", "with", "current", "BGP", "policy", "." ]
python
train
41.470588
etingof/pyasn1
pyasn1/type/univ.py
https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/univ.py#L2165-L2208
def setComponentByName(self, name, value=noValue, verifyConstraints=True, matchTags=True, matchConstraints=True): """Assign |ASN.1| type component by name. Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`). Parameters ---------- name: :class:`str` |ASN.1| type component name Keyword Args ------------ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. verifyConstraints: :class:`bool` If `False`, skip constraints validation matchTags: :class:`bool` If `False`, skip component tags matching matchConstraints: :class:`bool` If `False`, skip component constraints matching Returns ------- self """ if self._componentTypeLen: idx = self.componentType.getPositionByName(name) else: try: idx = self._dynamicNames.getPositionByName(name) except KeyError: raise error.PyAsn1Error('Name %s not found' % (name,)) return self.setComponentByPosition( idx, value, verifyConstraints, matchTags, matchConstraints )
[ "def", "setComponentByName", "(", "self", ",", "name", ",", "value", "=", "noValue", ",", "verifyConstraints", "=", "True", ",", "matchTags", "=", "True", ",", "matchConstraints", "=", "True", ")", ":", "if", "self", ".", "_componentTypeLen", ":", "idx", "=", "self", ".", "componentType", ".", "getPositionByName", "(", "name", ")", "else", ":", "try", ":", "idx", "=", "self", ".", "_dynamicNames", ".", "getPositionByName", "(", "name", ")", "except", "KeyError", ":", "raise", "error", ".", "PyAsn1Error", "(", "'Name %s not found'", "%", "(", "name", ",", ")", ")", "return", "self", ".", "setComponentByPosition", "(", "idx", ",", "value", ",", "verifyConstraints", ",", "matchTags", ",", "matchConstraints", ")" ]
Assign |ASN.1| type component by name. Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`). Parameters ---------- name: :class:`str` |ASN.1| type component name Keyword Args ------------ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. verifyConstraints: :class:`bool` If `False`, skip constraints validation matchTags: :class:`bool` If `False`, skip component tags matching matchConstraints: :class:`bool` If `False`, skip component constraints matching Returns ------- self
[ "Assign", "|ASN", ".", "1|", "type", "component", "by", "name", "." ]
python
train
32.704545
sckott/pygbif
pygbif/registry/datasets.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/registry/datasets.py#L88-L143
def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None, publishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None, limit = 100, offset = None, **kwargs): ''' Search that returns up to 20 matching datasets. Results are ordered by relevance. :param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*`` :param type: [str] Type of dataset, options include OCCURRENCE, etc. :param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02. :param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations` :param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations` :param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations` :param publishingCountry: [str] Publishing country. :param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below) :param limit: [int] Number of results to return. Default: ``300`` :param offset: [int] Record to start at. Default: ``0`` :return: A dictionary References: http://www.gbif.org/developer/registry#datasetSearch Usage:: from pygbif import registry registry.dataset_suggest(q="Amazon", type="OCCURRENCE") # Suggest datasets tagged with keyword "france". registry.dataset_suggest(keyword="france") # Suggest datasets owned by the organization with key # "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN). registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862") # Fulltext search for all datasets having the word "amsterdam" somewhere in # its metadata (title, description, etc). registry.dataset_suggest(q="amsterdam") # Limited search registry.dataset_suggest(type="OCCURRENCE", limit=2) registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10) # Return just descriptions registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True) # Search by decade registry.dataset_suggest(decade=1980, limit = 30) ''' url = gbif_baseurl + 'dataset/suggest' args = {'q': q, 'type': type, 'keyword': keyword, 'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg, 'owningOrg': owningOrg, 'decade': decade, 'publishingCountry': publishingCountry, 'limit': limit, 'offset': offset} out = gbif_GET(url, args, **kwargs) return out
[ "def", "dataset_suggest", "(", "q", "=", "None", ",", "type", "=", "None", ",", "keyword", "=", "None", ",", "owningOrg", "=", "None", ",", "publishingOrg", "=", "None", ",", "hostingOrg", "=", "None", ",", "publishingCountry", "=", "None", ",", "decade", "=", "None", ",", "limit", "=", "100", ",", "offset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", "=", "gbif_baseurl", "+", "'dataset/suggest'", "args", "=", "{", "'q'", ":", "q", ",", "'type'", ":", "type", ",", "'keyword'", ":", "keyword", ",", "'publishingOrg'", ":", "publishingOrg", ",", "'hostingOrg'", ":", "hostingOrg", ",", "'owningOrg'", ":", "owningOrg", ",", "'decade'", ":", "decade", ",", "'publishingCountry'", ":", "publishingCountry", ",", "'limit'", ":", "limit", ",", "'offset'", ":", "offset", "}", "out", "=", "gbif_GET", "(", "url", ",", "args", ",", "*", "*", "kwargs", ")", "return", "out" ]
Search that returns up to 20 matching datasets. Results are ordered by relevance. :param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*`` :param type: [str] Type of dataset, options include OCCURRENCE, etc. :param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02. :param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations` :param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations` :param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations` :param publishingCountry: [str] Publishing country. :param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below) :param limit: [int] Number of results to return. Default: ``300`` :param offset: [int] Record to start at. Default: ``0`` :return: A dictionary References: http://www.gbif.org/developer/registry#datasetSearch Usage:: from pygbif import registry registry.dataset_suggest(q="Amazon", type="OCCURRENCE") # Suggest datasets tagged with keyword "france". registry.dataset_suggest(keyword="france") # Suggest datasets owned by the organization with key # "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN). registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862") # Fulltext search for all datasets having the word "amsterdam" somewhere in # its metadata (title, description, etc). registry.dataset_suggest(q="amsterdam") # Limited search registry.dataset_suggest(type="OCCURRENCE", limit=2) registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10) # Return just descriptions registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True) # Search by decade registry.dataset_suggest(decade=1980, limit = 30)
[ "Search", "that", "returns", "up", "to", "20", "matching", "datasets", ".", "Results", "are", "ordered", "by", "relevance", "." ]
python
train
52.553571
marshmallow-code/apispec
src/apispec/ext/marshmallow/openapi.py
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/ext/marshmallow/openapi.py#L159-L186
def field2type_and_format(self, field): """Return the dictionary of OpenAPI type and format based on the field type :param Field field: A marshmallow field. :rtype: dict """ # If this type isn't directly in the field mapping then check the # hierarchy until we find something that does. for field_class in type(field).__mro__: if field_class in self.field_mapping: type_, fmt = self.field_mapping[field_class] break else: warnings.warn( "Field of type {} does not inherit from marshmallow.Field.".format( type(field) ), UserWarning, ) type_, fmt = "string", None ret = {"type": type_} if fmt: ret["format"] = fmt return ret
[ "def", "field2type_and_format", "(", "self", ",", "field", ")", ":", "# If this type isn't directly in the field mapping then check the", "# hierarchy until we find something that does.", "for", "field_class", "in", "type", "(", "field", ")", ".", "__mro__", ":", "if", "field_class", "in", "self", ".", "field_mapping", ":", "type_", ",", "fmt", "=", "self", ".", "field_mapping", "[", "field_class", "]", "break", "else", ":", "warnings", ".", "warn", "(", "\"Field of type {} does not inherit from marshmallow.Field.\"", ".", "format", "(", "type", "(", "field", ")", ")", ",", "UserWarning", ",", ")", "type_", ",", "fmt", "=", "\"string\"", ",", "None", "ret", "=", "{", "\"type\"", ":", "type_", "}", "if", "fmt", ":", "ret", "[", "\"format\"", "]", "=", "fmt", "return", "ret" ]
Return the dictionary of OpenAPI type and format based on the field type :param Field field: A marshmallow field. :rtype: dict
[ "Return", "the", "dictionary", "of", "OpenAPI", "type", "and", "format", "based", "on", "the", "field", "type" ]
python
train
30.571429
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L5355-L5363
def list_privileges(self, principal_name, principal_type, hiveObject): """ Parameters: - principal_name - principal_type - hiveObject """ self.send_list_privileges(principal_name, principal_type, hiveObject) return self.recv_list_privileges()
[ "def", "list_privileges", "(", "self", ",", "principal_name", ",", "principal_type", ",", "hiveObject", ")", ":", "self", ".", "send_list_privileges", "(", "principal_name", ",", "principal_type", ",", "hiveObject", ")", "return", "self", ".", "recv_list_privileges", "(", ")" ]
Parameters: - principal_name - principal_type - hiveObject
[ "Parameters", ":", "-", "principal_name", "-", "principal_type", "-", "hiveObject" ]
python
train
29.888889
joeyespo/gitpress
gitpress/helpers.py
https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/helpers.py#L13-L28
def remove_directory(directory, show_warnings=True): """Deletes a directory and its contents. Returns a list of errors in form (function, path, excinfo).""" errors = [] def onerror(function, path, excinfo): if show_warnings: print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1]) errors.append((function, path, excinfo)) if os.path.exists(directory): if not os.path.isdir(directory): raise NotADirectoryError(directory) shutil.rmtree(directory, onerror=onerror) return errors
[ "def", "remove_directory", "(", "directory", ",", "show_warnings", "=", "True", ")", ":", "errors", "=", "[", "]", "def", "onerror", "(", "function", ",", "path", ",", "excinfo", ")", ":", "if", "show_warnings", ":", "print", "'Cannot delete %s: %s'", "%", "(", "os", ".", "path", ".", "relpath", "(", "directory", ")", ",", "excinfo", "[", "1", "]", ")", "errors", ".", "append", "(", "(", "function", ",", "path", ",", "excinfo", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "raise", "NotADirectoryError", "(", "directory", ")", "shutil", ".", "rmtree", "(", "directory", ",", "onerror", "=", "onerror", ")", "return", "errors" ]
Deletes a directory and its contents. Returns a list of errors in form (function, path, excinfo).
[ "Deletes", "a", "directory", "and", "its", "contents", ".", "Returns", "a", "list", "of", "errors", "in", "form", "(", "function", "path", "excinfo", ")", "." ]
python
train
35
saltstack/salt
salt/serializers/python.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/serializers/python.py#L25-L43
def serialize(obj, **options): ''' Serialize Python data to a Python string representation (via pprint.format) :param obj: the data structure to serialize :param options: options given to pprint.format ''' #round-trip this through JSON to avoid OrderedDict types # there's probably a more performant way to do this... # TODO remove json round-trip when all dataset will use # serializers return pprint.pformat( salt.utils.json.loads( salt.utils.json.dumps(obj, _json_module=_json), _json_module=_json ), **options )
[ "def", "serialize", "(", "obj", ",", "*", "*", "options", ")", ":", "#round-trip this through JSON to avoid OrderedDict types", "# there's probably a more performant way to do this...", "# TODO remove json round-trip when all dataset will use", "# serializers", "return", "pprint", ".", "pformat", "(", "salt", ".", "utils", ".", "json", ".", "loads", "(", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "obj", ",", "_json_module", "=", "_json", ")", ",", "_json_module", "=", "_json", ")", ",", "*", "*", "options", ")" ]
Serialize Python data to a Python string representation (via pprint.format) :param obj: the data structure to serialize :param options: options given to pprint.format
[ "Serialize", "Python", "data", "to", "a", "Python", "string", "representation", "(", "via", "pprint", ".", "format", ")" ]
python
train
31.105263
MakerReduxCorp/PLOD
PLOD/__init__.py
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/__init__.py#L610-L640
def gte(self, key, value, includeMissing=False): '''Return entries where the key's value is greater or equal (>=). Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).gte("age", 19).returnString() [ {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill', wigs: None } ] .. versionadded:: 0.1.1 :param key: The dictionary key (or cascading list of keys) that should be the basis of comparison. :param value: The value to compare with. :param includeMissing: Defaults to False. If True, then entries missing the key are also included. :returns: self ''' (self.table, self.index_track) = internal.select(self.table, self.index_track, key, self.GREATERorEQUAL, value, includeMissing) return self
[ "def", "gte", "(", "self", ",", "key", ",", "value", ",", "includeMissing", "=", "False", ")", ":", "(", "self", ".", "table", ",", "self", ".", "index_track", ")", "=", "internal", ".", "select", "(", "self", ".", "table", ",", "self", ".", "index_track", ",", "key", ",", "self", ".", "GREATERorEQUAL", ",", "value", ",", "includeMissing", ")", "return", "self" ]
Return entries where the key's value is greater or equal (>=). Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).gte("age", 19).returnString() [ {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill', wigs: None } ] .. versionadded:: 0.1.1 :param key: The dictionary key (or cascading list of keys) that should be the basis of comparison. :param value: The value to compare with. :param includeMissing: Defaults to False. If True, then entries missing the key are also included. :returns: self
[ "Return", "entries", "where", "the", "key", "s", "value", "is", "greater", "or", "equal", "(", ">", "=", ")", "." ]
python
train
40.387097
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QAUser.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAUser.py#L475-L499
def sync(self): """基于账户/密码去sync数据库 """ if self.wechat_id is not None: res = self.client.find_one({'wechat_id': self.wechat_id}) else: res = self.client.find_one( { 'username': self.username, 'password': self.password } ) if res is None: if self.client.find_one({'username': self.username}) is None: self.client.insert_one(self.message) return self else: raise RuntimeError('账户名已存在且账户密码不匹配') else: self.reload(res) return self
[ "def", "sync", "(", "self", ")", ":", "if", "self", ".", "wechat_id", "is", "not", "None", ":", "res", "=", "self", ".", "client", ".", "find_one", "(", "{", "'wechat_id'", ":", "self", ".", "wechat_id", "}", ")", "else", ":", "res", "=", "self", ".", "client", ".", "find_one", "(", "{", "'username'", ":", "self", ".", "username", ",", "'password'", ":", "self", ".", "password", "}", ")", "if", "res", "is", "None", ":", "if", "self", ".", "client", ".", "find_one", "(", "{", "'username'", ":", "self", ".", "username", "}", ")", "is", "None", ":", "self", ".", "client", ".", "insert_one", "(", "self", ".", "message", ")", "return", "self", "else", ":", "raise", "RuntimeError", "(", "'账户名已存在且账户密码不匹配')", "", "else", ":", "self", ".", "reload", "(", "res", ")", "return", "self" ]
基于账户/密码去sync数据库
[ "基于账户", "/", "密码去sync数据库" ]
python
train
26.28
ThreatConnect-Inc/tcex
tcex/tcex_ti/tcex_ti.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti.py#L295-L312
def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs): """ Create the Email TI object. Args: owner: to: from_addr: name: subject: header: body: **kwargs: Return: """ return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs)
[ "def", "email", "(", "self", ",", "name", ",", "to", ",", "from_addr", ",", "subject", ",", "body", ",", "header", ",", "owner", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "Email", "(", "self", ".", "tcex", ",", "name", ",", "to", ",", "from_addr", ",", "subject", ",", "body", ",", "header", ",", "owner", "=", "owner", ",", "*", "*", "kwargs", ")" ]
Create the Email TI object. Args: owner: to: from_addr: name: subject: header: body: **kwargs: Return:
[ "Create", "the", "Email", "TI", "object", "." ]
python
train
23
42matters/bottle-couchbase
bottle_couchbase.py
https://github.com/42matters/bottle-couchbase/blob/79e8c347e0ec4e1daab5da2a844fe8aa763019e2/bottle_couchbase.py#L69-L98
def get_client(self, initial_timeout=0.1, next_timeout=30): """ Wait until a client instance is available :param float initial_timeout: how long to wait initially for an existing client to complete :param float next_timeout: if the pool could not obtain a client during the initial timeout, and we have allocated the maximum available number of clients, wait this long until we can retrieve another one :return: A connection object """ try: return self._test_client(self._q.get(True, initial_timeout)) except Empty: try: self._lock.acquire() if self._clients_in_use >= self._max_clients: raise _ClientUnavailableError("Too many clients in use") return self._test_client(self._make_client()) except NetworkError: if not self._tolerate_error: raise except _ClientUnavailableError as e: try: return self._test_client(self._q.get(True, next_timeout)) except Empty: raise e finally: self._lock.release()
[ "def", "get_client", "(", "self", ",", "initial_timeout", "=", "0.1", ",", "next_timeout", "=", "30", ")", ":", "try", ":", "return", "self", ".", "_test_client", "(", "self", ".", "_q", ".", "get", "(", "True", ",", "initial_timeout", ")", ")", "except", "Empty", ":", "try", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "if", "self", ".", "_clients_in_use", ">=", "self", ".", "_max_clients", ":", "raise", "_ClientUnavailableError", "(", "\"Too many clients in use\"", ")", "return", "self", ".", "_test_client", "(", "self", ".", "_make_client", "(", ")", ")", "except", "NetworkError", ":", "if", "not", "self", ".", "_tolerate_error", ":", "raise", "except", "_ClientUnavailableError", "as", "e", ":", "try", ":", "return", "self", ".", "_test_client", "(", "self", ".", "_q", ".", "get", "(", "True", ",", "next_timeout", ")", ")", "except", "Empty", ":", "raise", "e", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")" ]
Wait until a client instance is available :param float initial_timeout: how long to wait initially for an existing client to complete :param float next_timeout: if the pool could not obtain a client during the initial timeout, and we have allocated the maximum available number of clients, wait this long until we can retrieve another one :return: A connection object
[ "Wait", "until", "a", "client", "instance", "is", "available", ":", "param", "float", "initial_timeout", ":", "how", "long", "to", "wait", "initially", "for", "an", "existing", "client", "to", "complete", ":", "param", "float", "next_timeout", ":", "if", "the", "pool", "could", "not", "obtain", "a", "client", "during", "the", "initial", "timeout", "and", "we", "have", "allocated", "the", "maximum", "available", "number", "of", "clients", "wait", "this", "long", "until", "we", "can", "retrieve", "another", "one" ]
python
train
40.866667
katerina7479/pypdflite
pypdflite/pdflite.py
https://github.com/katerina7479/pypdflite/blob/ac2501f30d6619eae9dea5644717575ca9263d0a/pypdflite/pdflite.py#L294-L304
def _output_to_file(self): """ Save to filepath specified on init. (Will throw an error if the document is already open). """ f = open(self.filepath, 'wb') if not f: raise Exception('Unable to create output file: ', self.filepath) f.write(self.session.buffer) f.close()
[ "def", "_output_to_file", "(", "self", ")", ":", "f", "=", "open", "(", "self", ".", "filepath", ",", "'wb'", ")", "if", "not", "f", ":", "raise", "Exception", "(", "'Unable to create output file: '", ",", "self", ".", "filepath", ")", "f", ".", "write", "(", "self", ".", "session", ".", "buffer", ")", "f", ".", "close", "(", ")" ]
Save to filepath specified on init. (Will throw an error if the document is already open).
[ "Save", "to", "filepath", "specified", "on", "init", ".", "(", "Will", "throw", "an", "error", "if", "the", "document", "is", "already", "open", ")", "." ]
python
test
32.181818
wummel/linkchecker
linkcheck/logger/text.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/text.py#L195-L198
def write_info (self, url_data): """Write url_data.info.""" self.write(self.part("info") + self.spaces("info")) self.writeln(self.wrap(url_data.info, 65), color=self.colorinfo)
[ "def", "write_info", "(", "self", ",", "url_data", ")", ":", "self", ".", "write", "(", "self", ".", "part", "(", "\"info\"", ")", "+", "self", ".", "spaces", "(", "\"info\"", ")", ")", "self", ".", "writeln", "(", "self", ".", "wrap", "(", "url_data", ".", "info", ",", "65", ")", ",", "color", "=", "self", ".", "colorinfo", ")" ]
Write url_data.info.
[ "Write", "url_data", ".", "info", "." ]
python
train
49.25
hughsie/python-appstream
appstream/store.py
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/store.py#L74-L79
def get_components(self): """ Returns all the applications from the store """ components = [] for app_id in self.components: components.append(self.components[app_id]) return components
[ "def", "get_components", "(", "self", ")", ":", "components", "=", "[", "]", "for", "app_id", "in", "self", ".", "components", ":", "components", ".", "append", "(", "self", ".", "components", "[", "app_id", "]", ")", "return", "components" ]
Returns all the applications from the store
[ "Returns", "all", "the", "applications", "from", "the", "store" ]
python
train
37.333333
Vital-Fernandez/dazer
bin/lib/Math_Libraries/lnr_script.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Math_Libraries/lnr_script.py#L470-L535
def mle(x1, x2, x1err=[], x2err=[], cerr=[], s_int=True, po=(1,0,0.1), verbose=False, logify=True, full_output=False): """ Maximum Likelihood Estimation of best-fit parameters Parameters ---------- x1, x2 : float arrays the independent and dependent variables. x1err, x2err : float arrays (optional) measurement uncertainties on independent and dependent variables. Any of the two, or both, can be supplied. cerr : float array (same size as x1) covariance on the measurement errors s_int : boolean (default True) whether to include intrinsic scatter in the MLE. po : tuple of floats initial guess for free parameters. If s_int is True, then po must have 3 elements; otherwise it can have two (for the zero point and the slope) verbose : boolean (default False) verbose? logify : boolean (default True) whether to convert the values to log10's. This is to calculate the best-fit power law. Note that the result is given for the equation log(y)=a+b*log(x) -- i.e., the zero point must be converted to 10**a if logify=True full_output : boolean (default False) numpy.optimize.fmin's full_output argument Returns ------- a : float Maximum Likelihood Estimate of the zero point. Note that if logify=True, the power-law intercept is 10**a b : float Maximum Likelihood Estimate of the slope s : float (optional, if s_int=True) Maximum Likelihood Estimate of the intrinsic scatter """ from scipy import optimize n = len(x1) if len(x2) != n: raise ValueError('x1 and x2 must have same length') if len(x1err) == 0: x1err = numpy.ones(n) if len(x2err) == 0: x2err = numpy.ones(n) if logify: x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err) f = lambda a, b: a + b * x1 if s_int: w = lambda b, s: numpy.sqrt(b**2 * x1err**2 + x2err**2 + s**2) loglike = lambda p: 2 * sum(numpy.log(w(p[1],p[2]))) + \ sum(((x2 - f(p[0],p[1])) / w(p[1],p[2])) ** 2) + \ numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2 else: w = lambda b: numpy.sqrt(b**2 * x1err**2 + x2err**2) loglike = lambda p: sum(numpy.log(w(p[1]))) + \ sum(((x2 - f(p[0],p[1])) / w(p[1])) ** 2) / 2 + \ numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2 po = po[:2] out = optimize.fmin(loglike, po, disp=verbose, full_output=full_output) return out
[ "def", "mle", "(", "x1", ",", "x2", ",", "x1err", "=", "[", "]", ",", "x2err", "=", "[", "]", ",", "cerr", "=", "[", "]", ",", "s_int", "=", "True", ",", "po", "=", "(", "1", ",", "0", ",", "0.1", ")", ",", "verbose", "=", "False", ",", "logify", "=", "True", ",", "full_output", "=", "False", ")", ":", "from", "scipy", "import", "optimize", "n", "=", "len", "(", "x1", ")", "if", "len", "(", "x2", ")", "!=", "n", ":", "raise", "ValueError", "(", "'x1 and x2 must have same length'", ")", "if", "len", "(", "x1err", ")", "==", "0", ":", "x1err", "=", "numpy", ".", "ones", "(", "n", ")", "if", "len", "(", "x2err", ")", "==", "0", ":", "x2err", "=", "numpy", ".", "ones", "(", "n", ")", "if", "logify", ":", "x1", ",", "x2", ",", "x1err", ",", "x2err", "=", "to_log", "(", "x1", ",", "x2", ",", "x1err", ",", "x2err", ")", "f", "=", "lambda", "a", ",", "b", ":", "a", "+", "b", "*", "x1", "if", "s_int", ":", "w", "=", "lambda", "b", ",", "s", ":", "numpy", ".", "sqrt", "(", "b", "**", "2", "*", "x1err", "**", "2", "+", "x2err", "**", "2", "+", "s", "**", "2", ")", "loglike", "=", "lambda", "p", ":", "2", "*", "sum", "(", "numpy", ".", "log", "(", "w", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ")", ")", ")", "+", "sum", "(", "(", "(", "x2", "-", "f", "(", "p", "[", "0", "]", ",", "p", "[", "1", "]", ")", ")", "/", "w", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ")", ")", "**", "2", ")", "+", "numpy", ".", "log", "(", "n", "*", "numpy", ".", "sqrt", "(", "2", "*", "numpy", ".", "pi", ")", ")", "/", "2", "else", ":", "w", "=", "lambda", "b", ":", "numpy", ".", "sqrt", "(", "b", "**", "2", "*", "x1err", "**", "2", "+", "x2err", "**", "2", ")", "loglike", "=", "lambda", "p", ":", "sum", "(", "numpy", ".", "log", "(", "w", "(", "p", "[", "1", "]", ")", ")", ")", "+", "sum", "(", "(", "(", "x2", "-", "f", "(", "p", "[", "0", "]", ",", "p", "[", "1", "]", ")", ")", "/", "w", "(", "p", "[", "1", "]", ")", ")", "**", "2", ")", "/", "2", "+", "numpy", ".", "log", "(", "n", "*", "numpy", ".", "sqrt", "(", "2", "*", "numpy", ".", "pi", ")", ")", "/", "2", "po", "=", "po", "[", ":", "2", "]", "out", "=", "optimize", ".", "fmin", "(", "loglike", ",", "po", ",", "disp", "=", "verbose", ",", "full_output", "=", "full_output", ")", "return", "out" ]
Maximum Likelihood Estimation of best-fit parameters Parameters ---------- x1, x2 : float arrays the independent and dependent variables. x1err, x2err : float arrays (optional) measurement uncertainties on independent and dependent variables. Any of the two, or both, can be supplied. cerr : float array (same size as x1) covariance on the measurement errors s_int : boolean (default True) whether to include intrinsic scatter in the MLE. po : tuple of floats initial guess for free parameters. If s_int is True, then po must have 3 elements; otherwise it can have two (for the zero point and the slope) verbose : boolean (default False) verbose? logify : boolean (default True) whether to convert the values to log10's. This is to calculate the best-fit power law. Note that the result is given for the equation log(y)=a+b*log(x) -- i.e., the zero point must be converted to 10**a if logify=True full_output : boolean (default False) numpy.optimize.fmin's full_output argument Returns ------- a : float Maximum Likelihood Estimate of the zero point. Note that if logify=True, the power-law intercept is 10**a b : float Maximum Likelihood Estimate of the slope s : float (optional, if s_int=True) Maximum Likelihood Estimate of the intrinsic scatter
[ "Maximum", "Likelihood", "Estimation", "of", "best", "-", "fit", "parameters" ]
python
train
42.681818
klen/muffin-jade
muffin_jade.py
https://github.com/klen/muffin-jade/blob/3ddd6bf27fac03edc0bef3b0840bcd2e278babb3/muffin_jade.py#L46-L57
def setup(self, app): """ Setup the plugin from an application. """ super().setup(app) if isinstance(self.cfg.template_folders, str): self.cfg.template_folders = [self.cfg.template_folders] else: self.cfg.template_folders = list(self.cfg.template_folders) self.ctx_provider(lambda: {'app': self.app}) self.env = Environment(debug=app.cfg.DEBUG, **self.cfg)
[ "def", "setup", "(", "self", ",", "app", ")", ":", "super", "(", ")", ".", "setup", "(", "app", ")", "if", "isinstance", "(", "self", ".", "cfg", ".", "template_folders", ",", "str", ")", ":", "self", ".", "cfg", ".", "template_folders", "=", "[", "self", ".", "cfg", ".", "template_folders", "]", "else", ":", "self", ".", "cfg", ".", "template_folders", "=", "list", "(", "self", ".", "cfg", ".", "template_folders", ")", "self", ".", "ctx_provider", "(", "lambda", ":", "{", "'app'", ":", "self", ".", "app", "}", ")", "self", ".", "env", "=", "Environment", "(", "debug", "=", "app", ".", "cfg", ".", "DEBUG", ",", "*", "*", "self", ".", "cfg", ")" ]
Setup the plugin from an application.
[ "Setup", "the", "plugin", "from", "an", "application", "." ]
python
train
35
osfclient/osfclient
osfclient/models/core.py
https://github.com/osfclient/osfclient/blob/44b9a87e8c1ae6b63cdecd27a924af3fc2bf94cf/osfclient/models/core.py#L62-L73
def _follow_next(self, url): """Follow the 'next' link on paginated results.""" response = self._json(self._get(url), 200) data = response['data'] next_url = self._get_attribute(response, 'links', 'next') while next_url is not None: response = self._json(self._get(next_url), 200) data.extend(response['data']) next_url = self._get_attribute(response, 'links', 'next') return data
[ "def", "_follow_next", "(", "self", ",", "url", ")", ":", "response", "=", "self", ".", "_json", "(", "self", ".", "_get", "(", "url", ")", ",", "200", ")", "data", "=", "response", "[", "'data'", "]", "next_url", "=", "self", ".", "_get_attribute", "(", "response", ",", "'links'", ",", "'next'", ")", "while", "next_url", "is", "not", "None", ":", "response", "=", "self", ".", "_json", "(", "self", ".", "_get", "(", "next_url", ")", ",", "200", ")", "data", ".", "extend", "(", "response", "[", "'data'", "]", ")", "next_url", "=", "self", ".", "_get_attribute", "(", "response", ",", "'links'", ",", "'next'", ")", "return", "data" ]
Follow the 'next' link on paginated results.
[ "Follow", "the", "next", "link", "on", "paginated", "results", "." ]
python
valid
37.916667
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L190-L242
def create_app(client_name, scopes=__DEFAULT_SCOPES, redirect_uris=None, website=None, to_file=None, api_base_url=__DEFAULT_BASE_URL, request_timeout=__DEFAULT_TIMEOUT, session=None): """ Create a new app with given `client_name` and `scopes` (The basic scropse are "read", "write", "follow" and "push" - more granular scopes are available, please refere to Mastodon documentation for which). Specify `redirect_uris` if you want users to be redirected to a certain page after authenticating in an oauth flow. You can specify multiple URLs by passing a list. Note that if you wish to use OAuth authentication with redirects, the redirect URI must be one of the URLs specified here. Specify `to_file` to persist your apps info to a file so you can use them in the constructor. Specify `api_base_url` if you want to register an app on an instance different from the flagship one. Specify `website` to give a website for your app. Specify `session` with a requests.Session for it to be used instead of the deafult. Presently, app registration is open by default, but this is not guaranteed to be the case for all future mastodon instances or even the flagship instance in the future. Returns `client_id` and `client_secret`, both as strings. """ api_base_url = Mastodon.__protocolize(api_base_url) request_data = { 'client_name': client_name, 'scopes': " ".join(scopes) } try: if redirect_uris is not None: if isinstance(redirect_uris, (list, tuple)): redirect_uris = "\n".join(list(redirect_uris)) request_data['redirect_uris'] = redirect_uris else: request_data['redirect_uris'] = 'urn:ietf:wg:oauth:2.0:oob' if website is not None: request_data['website'] = website if session: ret = session.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout) response = ret.json() else: response = requests.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout) response = response.json() except Exception as e: raise MastodonNetworkError("Could not complete request: %s" % e) if to_file is not None: with open(to_file, 'w') as secret_file: secret_file.write(response['client_id'] + '\n') secret_file.write(response['client_secret'] + '\n') return (response['client_id'], response['client_secret'])
[ "def", "create_app", "(", "client_name", ",", "scopes", "=", "__DEFAULT_SCOPES", ",", "redirect_uris", "=", "None", ",", "website", "=", "None", ",", "to_file", "=", "None", ",", "api_base_url", "=", "__DEFAULT_BASE_URL", ",", "request_timeout", "=", "__DEFAULT_TIMEOUT", ",", "session", "=", "None", ")", ":", "api_base_url", "=", "Mastodon", ".", "__protocolize", "(", "api_base_url", ")", "request_data", "=", "{", "'client_name'", ":", "client_name", ",", "'scopes'", ":", "\" \"", ".", "join", "(", "scopes", ")", "}", "try", ":", "if", "redirect_uris", "is", "not", "None", ":", "if", "isinstance", "(", "redirect_uris", ",", "(", "list", ",", "tuple", ")", ")", ":", "redirect_uris", "=", "\"\\n\"", ".", "join", "(", "list", "(", "redirect_uris", ")", ")", "request_data", "[", "'redirect_uris'", "]", "=", "redirect_uris", "else", ":", "request_data", "[", "'redirect_uris'", "]", "=", "'urn:ietf:wg:oauth:2.0:oob'", "if", "website", "is", "not", "None", ":", "request_data", "[", "'website'", "]", "=", "website", "if", "session", ":", "ret", "=", "session", ".", "post", "(", "api_base_url", "+", "'/api/v1/apps'", ",", "data", "=", "request_data", ",", "timeout", "=", "request_timeout", ")", "response", "=", "ret", ".", "json", "(", ")", "else", ":", "response", "=", "requests", ".", "post", "(", "api_base_url", "+", "'/api/v1/apps'", ",", "data", "=", "request_data", ",", "timeout", "=", "request_timeout", ")", "response", "=", "response", ".", "json", "(", ")", "except", "Exception", "as", "e", ":", "raise", "MastodonNetworkError", "(", "\"Could not complete request: %s\"", "%", "e", ")", "if", "to_file", "is", "not", "None", ":", "with", "open", "(", "to_file", ",", "'w'", ")", "as", "secret_file", ":", "secret_file", ".", "write", "(", "response", "[", "'client_id'", "]", "+", "'\\n'", ")", "secret_file", ".", "write", "(", "response", "[", "'client_secret'", "]", "+", "'\\n'", ")", "return", "(", "response", "[", "'client_id'", "]", ",", "response", "[", "'client_secret'", "]", ")" ]
Create a new app with given `client_name` and `scopes` (The basic scropse are "read", "write", "follow" and "push" - more granular scopes are available, please refere to Mastodon documentation for which). Specify `redirect_uris` if you want users to be redirected to a certain page after authenticating in an oauth flow. You can specify multiple URLs by passing a list. Note that if you wish to use OAuth authentication with redirects, the redirect URI must be one of the URLs specified here. Specify `to_file` to persist your apps info to a file so you can use them in the constructor. Specify `api_base_url` if you want to register an app on an instance different from the flagship one. Specify `website` to give a website for your app. Specify `session` with a requests.Session for it to be used instead of the deafult. Presently, app registration is open by default, but this is not guaranteed to be the case for all future mastodon instances or even the flagship instance in the future. Returns `client_id` and `client_secret`, both as strings.
[ "Create", "a", "new", "app", "with", "given", "client_name", "and", "scopes", "(", "The", "basic", "scropse", "are", "read", "write", "follow", "and", "push", "-", "more", "granular", "scopes", "are", "available", "please", "refere", "to", "Mastodon", "documentation", "for", "which", ")", "." ]
python
train
50.962264
aleju/imgaug
imgaug/augmentables/lines.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/lines.py#L1530-L1552
def on(self, image): """ Project bounding boxes from one image to a new one. Parameters ---------- image : ndarray or tuple of int The new image onto which to project. Either an image with shape ``(H,W,[C])`` or a tuple denoting such an image shape. Returns ------- line_strings : imgaug.augmentables.lines.LineStrings Object containing all projected line strings. """ shape = normalize_shape(image) if shape[0:2] == self.shape[0:2]: return self.deepcopy() line_strings = [ls.project(self.shape, shape) for ls in self.line_strings] return self.deepcopy(line_strings=line_strings, shape=shape)
[ "def", "on", "(", "self", ",", "image", ")", ":", "shape", "=", "normalize_shape", "(", "image", ")", "if", "shape", "[", "0", ":", "2", "]", "==", "self", ".", "shape", "[", "0", ":", "2", "]", ":", "return", "self", ".", "deepcopy", "(", ")", "line_strings", "=", "[", "ls", ".", "project", "(", "self", ".", "shape", ",", "shape", ")", "for", "ls", "in", "self", ".", "line_strings", "]", "return", "self", ".", "deepcopy", "(", "line_strings", "=", "line_strings", ",", "shape", "=", "shape", ")" ]
Project bounding boxes from one image to a new one. Parameters ---------- image : ndarray or tuple of int The new image onto which to project. Either an image with shape ``(H,W,[C])`` or a tuple denoting such an image shape. Returns ------- line_strings : imgaug.augmentables.lines.LineStrings Object containing all projected line strings.
[ "Project", "bounding", "boxes", "from", "one", "image", "to", "a", "new", "one", "." ]
python
valid
33.086957
jeffrimko/Qprompt
lib/qprompt.py
https://github.com/jeffrimko/Qprompt/blob/1887c53656dfecac49e0650e0f912328801cbb83/lib/qprompt.py#L674-L691
def _guess_name(desc, taken=None): """Attempts to guess the menu entry name from the function name.""" taken = taken or [] name = "" # Try to find the shortest name based on the given description. for word in desc.split(): c = word[0].lower() if not c.isalnum(): continue name += c if name not in taken: break # If name is still taken, add a number postfix. count = 2 while name in taken: name = name + str(count) count += 1 return name
[ "def", "_guess_name", "(", "desc", ",", "taken", "=", "None", ")", ":", "taken", "=", "taken", "or", "[", "]", "name", "=", "\"\"", "# Try to find the shortest name based on the given description.", "for", "word", "in", "desc", ".", "split", "(", ")", ":", "c", "=", "word", "[", "0", "]", ".", "lower", "(", ")", "if", "not", "c", ".", "isalnum", "(", ")", ":", "continue", "name", "+=", "c", "if", "name", "not", "in", "taken", ":", "break", "# If name is still taken, add a number postfix.", "count", "=", "2", "while", "name", "in", "taken", ":", "name", "=", "name", "+", "str", "(", "count", ")", "count", "+=", "1", "return", "name" ]
Attempts to guess the menu entry name from the function name.
[ "Attempts", "to", "guess", "the", "menu", "entry", "name", "from", "the", "function", "name", "." ]
python
train
29.277778
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L52-L63
def bigger_version(version_string_a, version_string_b): """Returns the bigger version of two version strings.""" major_a, minor_a, patch_a = parse_version_string(version_string_a) major_b, minor_b, patch_b = parse_version_string(version_string_b) if major_a > major_b: return version_string_a elif major_a == major_b and minor_a > minor_b: return version_string_a elif major_a == major_b and minor_a == minor_b and patch_a > patch_b: return version_string_a return version_string_b
[ "def", "bigger_version", "(", "version_string_a", ",", "version_string_b", ")", ":", "major_a", ",", "minor_a", ",", "patch_a", "=", "parse_version_string", "(", "version_string_a", ")", "major_b", ",", "minor_b", ",", "patch_b", "=", "parse_version_string", "(", "version_string_b", ")", "if", "major_a", ">", "major_b", ":", "return", "version_string_a", "elif", "major_a", "==", "major_b", "and", "minor_a", ">", "minor_b", ":", "return", "version_string_a", "elif", "major_a", "==", "major_b", "and", "minor_a", "==", "minor_b", "and", "patch_a", ">", "patch_b", ":", "return", "version_string_a", "return", "version_string_b" ]
Returns the bigger version of two version strings.
[ "Returns", "the", "bigger", "version", "of", "two", "version", "strings", "." ]
python
train
43.583333
rochacbruno/python-pagseguro
examples/flask/flask_seguro/cart.py
https://github.com/rochacbruno/python-pagseguro/blob/18a9ca3301783cb323e838574b59f9ddffa9a593/examples/flask/flask_seguro/cart.py#L49-L59
def update(self): """ Remove items in cart """ subtotal = float(0) total = float(0) for product in self.items: subtotal += float(product["price"]) if subtotal > 0: total = subtotal + self.extra_amount self.subtotal = subtotal self.total = total
[ "def", "update", "(", "self", ")", ":", "subtotal", "=", "float", "(", "0", ")", "total", "=", "float", "(", "0", ")", "for", "product", "in", "self", ".", "items", ":", "subtotal", "+=", "float", "(", "product", "[", "\"price\"", "]", ")", "if", "subtotal", ">", "0", ":", "total", "=", "subtotal", "+", "self", ".", "extra_amount", "self", ".", "subtotal", "=", "subtotal", "self", ".", "total", "=", "total" ]
Remove items in cart
[ "Remove", "items", "in", "cart" ]
python
train
28.636364
inveniosoftware/invenio-records-files
invenio_records_files/api.py
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L150-L156
def flush(self): """Flush changes to record.""" files = self.dumps() # Do not create `_files` when there has not been `_files` field before # and the record still has no files attached. if files or '_files' in self.record: self.record['_files'] = files
[ "def", "flush", "(", "self", ")", ":", "files", "=", "self", ".", "dumps", "(", ")", "# Do not create `_files` when there has not been `_files` field before", "# and the record still has no files attached.", "if", "files", "or", "'_files'", "in", "self", ".", "record", ":", "self", ".", "record", "[", "'_files'", "]", "=", "files" ]
Flush changes to record.
[ "Flush", "changes", "to", "record", "." ]
python
train
42.571429
openstack/pyghmi
pyghmi/ipmi/private/session.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L626-L632
def _add_request_entry(self, entry=()): """This function record the request with netfn, sequence number and command, which will be used in parse_ipmi_payload. :param entry: a set of netfn, sequence number and command. """ if not self._lookup_request_entry(entry): self.request_entry.append(entry)
[ "def", "_add_request_entry", "(", "self", ",", "entry", "=", "(", ")", ")", ":", "if", "not", "self", ".", "_lookup_request_entry", "(", "entry", ")", ":", "self", ".", "request_entry", ".", "append", "(", "entry", ")" ]
This function record the request with netfn, sequence number and command, which will be used in parse_ipmi_payload. :param entry: a set of netfn, sequence number and command.
[ "This", "function", "record", "the", "request", "with", "netfn", "sequence", "number", "and", "command", "which", "will", "be", "used", "in", "parse_ipmi_payload", ".", ":", "param", "entry", ":", "a", "set", "of", "netfn", "sequence", "number", "and", "command", "." ]
python
train
48.857143
hotdoc/hotdoc
hotdoc/utils/loggable.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L207-L210
def register_warning_code(code, exception_type, domain='core'): """Register a new warning code""" Logger._warning_code_to_exception[code] = (exception_type, domain) Logger._domain_codes[domain].add(code)
[ "def", "register_warning_code", "(", "code", ",", "exception_type", ",", "domain", "=", "'core'", ")", ":", "Logger", ".", "_warning_code_to_exception", "[", "code", "]", "=", "(", "exception_type", ",", "domain", ")", "Logger", ".", "_domain_codes", "[", "domain", "]", ".", "add", "(", "code", ")" ]
Register a new warning code
[ "Register", "a", "new", "warning", "code" ]
python
train
56
projectshift/shift-schema
shiftschema/validators/digits.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/validators/digits.py#L24-L42
def validate(self, value, model=None, context=None): """ Validate Perform value validation and return result :param value: value to check, cast to string :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult """ import re value = str(value) match = re.match(r'^\d+', value) if not match or value != match.group(): return Error(self.not_digital) # success otherwise return Error()
[ "def", "validate", "(", "self", ",", "value", ",", "model", "=", "None", ",", "context", "=", "None", ")", ":", "import", "re", "value", "=", "str", "(", "value", ")", "match", "=", "re", ".", "match", "(", "r'^\\d+'", ",", "value", ")", "if", "not", "match", "or", "value", "!=", "match", ".", "group", "(", ")", ":", "return", "Error", "(", "self", ".", "not_digital", ")", "# success otherwise", "return", "Error", "(", ")" ]
Validate Perform value validation and return result :param value: value to check, cast to string :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult
[ "Validate", "Perform", "value", "validation", "and", "return", "result" ]
python
train
32.263158
ministryofjustice/money-to-prisoners-common
mtp_common/build_tasks/executor.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/executor.py#L56-L69
def register(self, *dependencies, default=False, hidden=False, ignore_return_code=False): """ Decorates a callable to turn it into a task """ def outer(func): task = Task(func, *dependencies, default=default, hidden=hidden, ignore_return_code=ignore_return_code) overidden_task = self._tasks.pop(task.name, None) if overidden_task: self._overidden_tasks[task.name].append(overidden_task) self[task.name] = task return task return outer
[ "def", "register", "(", "self", ",", "*", "dependencies", ",", "default", "=", "False", ",", "hidden", "=", "False", ",", "ignore_return_code", "=", "False", ")", ":", "def", "outer", "(", "func", ")", ":", "task", "=", "Task", "(", "func", ",", "*", "dependencies", ",", "default", "=", "default", ",", "hidden", "=", "hidden", ",", "ignore_return_code", "=", "ignore_return_code", ")", "overidden_task", "=", "self", ".", "_tasks", ".", "pop", "(", "task", ".", "name", ",", "None", ")", "if", "overidden_task", ":", "self", ".", "_overidden_tasks", "[", "task", ".", "name", "]", ".", "append", "(", "overidden_task", ")", "self", "[", "task", ".", "name", "]", "=", "task", "return", "task", "return", "outer" ]
Decorates a callable to turn it into a task
[ "Decorates", "a", "callable", "to", "turn", "it", "into", "a", "task" ]
python
train
38.571429
hobu/mgrs
mgrs/core.py
https://github.com/hobu/mgrs/blob/759b3aba86779318854c73b8843ea956acb5eb3f/mgrs/core.py#L77-L82
def check_error(result, func, cargs): "Error checking proper value returns" if result != 0: msg = 'Error in "%s": %s' % (func.__name__, get_errors(result) ) raise RTreeError(msg) return
[ "def", "check_error", "(", "result", ",", "func", ",", "cargs", ")", ":", "if", "result", "!=", "0", ":", "msg", "=", "'Error in \"%s\": %s'", "%", "(", "func", ".", "__name__", ",", "get_errors", "(", "result", ")", ")", "raise", "RTreeError", "(", "msg", ")", "return" ]
Error checking proper value returns
[ "Error", "checking", "proper", "value", "returns" ]
python
train
34.666667
diging/tethne
tethne/readers/zotero.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/zotero.py#L234-L240
def handle_link(self, value): """ rdf:link rdf:resource points to the resource described by a record. """ for s, p, o in self.graph.triples((value, None, None)): if p == LINK_ELEM: return unicode(o).replace('file://', '')
[ "def", "handle_link", "(", "self", ",", "value", ")", ":", "for", "s", ",", "p", ",", "o", "in", "self", ".", "graph", ".", "triples", "(", "(", "value", ",", "None", ",", "None", ")", ")", ":", "if", "p", "==", "LINK_ELEM", ":", "return", "unicode", "(", "o", ")", ".", "replace", "(", "'file://'", ",", "''", ")" ]
rdf:link rdf:resource points to the resource described by a record.
[ "rdf", ":", "link", "rdf", ":", "resource", "points", "to", "the", "resource", "described", "by", "a", "record", "." ]
python
train
39.285714
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/geoff.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/geoff.py#L70-L118
def graph2geoff(graph, edge_rel_name, encoder=None): """ Get the `graph` as Geoff string. The edges between the nodes have relationship name `edge_rel_name`. The code below shows a simple example:: # create a graph import networkx as nx G = nx.Graph() G.add_nodes_from([1, 2, 3]) G.add_edge(1, 2) G.add_edge(2, 3) # get the geoff string geoff_string = graph2geoff(G, 'LINKS_TO') If the properties are not json encodable, please pass a custom JSON encoder class. See `JSONEncoder <http://docs.python.org/2/library/json.html#json.JSONEncoder/>`_. Parameters ---------- graph : Graph or DiGraph a NetworkX Graph or a DiGraph edge_rel_name : str relationship name between the nodes encoder: JSONEncoder or None JSONEncoder object. Defaults to JSONEncoder. Returns ------- geoff : str a Geoff string """ if encoder is None: encoder = json.JSONEncoder() is_digraph = isinstance(graph, nx.DiGraph) lines = [] lapp = lines.append for node_name, properties in graph.nodes(data=True): lapp(node2geoff(node_name, properties, encoder)) for from_node, to_node, properties in graph.edges(data=True): lapp(edge2geoff(from_node, to_node, properties, edge_rel_name, encoder)) if not is_digraph: lapp(edge2geoff(to_node, from_node, properties, edge_rel_name, encoder)) return '\n'.join(lines)
[ "def", "graph2geoff", "(", "graph", ",", "edge_rel_name", ",", "encoder", "=", "None", ")", ":", "if", "encoder", "is", "None", ":", "encoder", "=", "json", ".", "JSONEncoder", "(", ")", "is_digraph", "=", "isinstance", "(", "graph", ",", "nx", ".", "DiGraph", ")", "lines", "=", "[", "]", "lapp", "=", "lines", ".", "append", "for", "node_name", ",", "properties", "in", "graph", ".", "nodes", "(", "data", "=", "True", ")", ":", "lapp", "(", "node2geoff", "(", "node_name", ",", "properties", ",", "encoder", ")", ")", "for", "from_node", ",", "to_node", ",", "properties", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "lapp", "(", "edge2geoff", "(", "from_node", ",", "to_node", ",", "properties", ",", "edge_rel_name", ",", "encoder", ")", ")", "if", "not", "is_digraph", ":", "lapp", "(", "edge2geoff", "(", "to_node", ",", "from_node", ",", "properties", ",", "edge_rel_name", ",", "encoder", ")", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
Get the `graph` as Geoff string. The edges between the nodes have relationship name `edge_rel_name`. The code below shows a simple example:: # create a graph import networkx as nx G = nx.Graph() G.add_nodes_from([1, 2, 3]) G.add_edge(1, 2) G.add_edge(2, 3) # get the geoff string geoff_string = graph2geoff(G, 'LINKS_TO') If the properties are not json encodable, please pass a custom JSON encoder class. See `JSONEncoder <http://docs.python.org/2/library/json.html#json.JSONEncoder/>`_. Parameters ---------- graph : Graph or DiGraph a NetworkX Graph or a DiGraph edge_rel_name : str relationship name between the nodes encoder: JSONEncoder or None JSONEncoder object. Defaults to JSONEncoder. Returns ------- geoff : str a Geoff string
[ "Get", "the", "graph", "as", "Geoff", "string", ".", "The", "edges", "between", "the", "nodes", "have", "relationship", "name", "edge_rel_name", ".", "The", "code", "below", "shows", "a", "simple", "example", "::" ]
python
train
30.367347
timothyb0912/pylogit
pylogit/uneven_logit.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/uneven_logit.py#L118-L238
def _uneven_utility_transform(systematic_utilities, alt_IDs, rows_to_alts, shape_params, intercept_params, intercept_ref_pos=None, *args, **kwargs): """ Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. intercept_params : None or 1D ndarray. If an array, each element should be an int, float, or long. If J is the total number of possible alternatives for the dataset being modeled, there should be J-1 elements in the array. intercept_ref_pos : int, or None, optional. Specifies the index of the alternative, in the ordered array of unique alternatives, that is not having its intercept parameter estimated (in order to ensure identifiability). Should only be None if `intercept_params` is None. Returns ------- transformed_utilities : 2D ndarray. Should have shape `(systematic_utilities.shape[0], 1)`. The returned array contains the transformed utility values for this model. All elements will be ints, longs, or floats. """ # Convert the shape parameters back into their 'natural parametrization' natural_shapes = np.exp(shape_params) natural_shapes[np.isposinf(natural_shapes)] = max_comp_value # Figure out what shape values correspond to each row of the # systematic utilities long_natural_shapes = rows_to_alts.dot(natural_shapes) # Get the exponentiated neative utilities exp_neg_utilities = np.exp(-1 * systematic_utilities) # Get the log of 1 + exponentiated negative utilities log_1_plus_exp_neg_utilitiles = np.log1p(exp_neg_utilities) # Guard against overflow. Underflow not a problem since we add one to a # near zero number and log of one will evaluate to zero inf_idx = np.isinf(log_1_plus_exp_neg_utilitiles) log_1_plus_exp_neg_utilitiles[inf_idx] = -1 * systematic_utilities[inf_idx] # Get the exponentiated (negative utilities times the shape parameter) exp_neg_shape_utilities = np.exp(-1 * long_natural_shapes * systematic_utilities) # Get the log of 1 + exponentiated (negative utiltiies times the shape) log_1_plus_exp_neg_shape_utilities = np.log1p(exp_neg_shape_utilities) ########## # Guard against overflow ########## # Check for any values which have gone off to positive infinity inf_idx = np.isinf(log_1_plus_exp_neg_shape_utilities) # Replace those values with an approximation of the true values by ignoring # the "1." The idea is that 1 + infinity ~ infinity so the effect of the +1 # on the log is minimal. if np.any(inf_idx): log_1_plus_exp_neg_shape_utilities[inf_idx] =\ -1 * long_natural_shapes[inf_idx] * systematic_utilities[inf_idx] # Calculate the transformed utility values transformed_utilities = (systematic_utilities + log_1_plus_exp_neg_utilitiles - log_1_plus_exp_neg_shape_utilities) # Perform a final guard against numbers that are too large to deal with transformed_utilities[np.isposinf(transformed_utilities)] = max_comp_value transformed_utilities[np.isneginf(transformed_utilities)] = -max_comp_value transformed_utilities[np.isneginf(systematic_utilities)] = -max_comp_value # Account for the outside intercept parameters if there are any. if intercept_params is not None and intercept_ref_pos is not None: # Get a list of all the indices (or row indices) corresponding to the # alternatives whose intercept parameters are being estimated. needed_idxs = range(rows_to_alts.shape[1]) needed_idxs.remove(intercept_ref_pos) if len(intercept_params.shape) > 1 and intercept_params.shape[1] > 1: # Get an array of zeros with shape # (num_possible_alternatives, num_parameter_samples) all_intercepts = np.zeros((rows_to_alts.shape[1], intercept_params.shape[1])) # For alternatives having their intercept estimated, replace the # zeros with the current value of the estimated intercepts all_intercepts[needed_idxs, :] = intercept_params else: # Get an array of zeros with shape (num_possible_alternatives,) all_intercepts = np.zeros(rows_to_alts.shape[1]) # For alternatives having their intercept estimated, replace the # zeros with the current value of the estimated intercepts all_intercepts[needed_idxs] = intercept_params # Add the intercept values to f(x, beta, c) transformed_utilities += rows_to_alts.dot(all_intercepts) # Be sure to return a 2D array since other functions will be expecting that if len(transformed_utilities.shape) == 1: transformed_utilities = transformed_utilities[:, np.newaxis] return transformed_utilities
[ "def", "_uneven_utility_transform", "(", "systematic_utilities", ",", "alt_IDs", ",", "rows_to_alts", ",", "shape_params", ",", "intercept_params", ",", "intercept_ref_pos", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Convert the shape parameters back into their 'natural parametrization'", "natural_shapes", "=", "np", ".", "exp", "(", "shape_params", ")", "natural_shapes", "[", "np", ".", "isposinf", "(", "natural_shapes", ")", "]", "=", "max_comp_value", "# Figure out what shape values correspond to each row of the", "# systematic utilities", "long_natural_shapes", "=", "rows_to_alts", ".", "dot", "(", "natural_shapes", ")", "# Get the exponentiated neative utilities", "exp_neg_utilities", "=", "np", ".", "exp", "(", "-", "1", "*", "systematic_utilities", ")", "# Get the log of 1 + exponentiated negative utilities", "log_1_plus_exp_neg_utilitiles", "=", "np", ".", "log1p", "(", "exp_neg_utilities", ")", "# Guard against overflow. Underflow not a problem since we add one to a", "# near zero number and log of one will evaluate to zero", "inf_idx", "=", "np", ".", "isinf", "(", "log_1_plus_exp_neg_utilitiles", ")", "log_1_plus_exp_neg_utilitiles", "[", "inf_idx", "]", "=", "-", "1", "*", "systematic_utilities", "[", "inf_idx", "]", "# Get the exponentiated (negative utilities times the shape parameter)", "exp_neg_shape_utilities", "=", "np", ".", "exp", "(", "-", "1", "*", "long_natural_shapes", "*", "systematic_utilities", ")", "# Get the log of 1 + exponentiated (negative utiltiies times the shape)", "log_1_plus_exp_neg_shape_utilities", "=", "np", ".", "log1p", "(", "exp_neg_shape_utilities", ")", "##########", "# Guard against overflow", "##########", "# Check for any values which have gone off to positive infinity", "inf_idx", "=", "np", ".", "isinf", "(", "log_1_plus_exp_neg_shape_utilities", ")", "# Replace those values with an approximation of the true values by ignoring", "# the \"1.\" The idea is that 1 + infinity ~ infinity so the effect of the +1", "# on the log is minimal.", "if", "np", ".", "any", "(", "inf_idx", ")", ":", "log_1_plus_exp_neg_shape_utilities", "[", "inf_idx", "]", "=", "-", "1", "*", "long_natural_shapes", "[", "inf_idx", "]", "*", "systematic_utilities", "[", "inf_idx", "]", "# Calculate the transformed utility values", "transformed_utilities", "=", "(", "systematic_utilities", "+", "log_1_plus_exp_neg_utilitiles", "-", "log_1_plus_exp_neg_shape_utilities", ")", "# Perform a final guard against numbers that are too large to deal with", "transformed_utilities", "[", "np", ".", "isposinf", "(", "transformed_utilities", ")", "]", "=", "max_comp_value", "transformed_utilities", "[", "np", ".", "isneginf", "(", "transformed_utilities", ")", "]", "=", "-", "max_comp_value", "transformed_utilities", "[", "np", ".", "isneginf", "(", "systematic_utilities", ")", "]", "=", "-", "max_comp_value", "# Account for the outside intercept parameters if there are any.", "if", "intercept_params", "is", "not", "None", "and", "intercept_ref_pos", "is", "not", "None", ":", "# Get a list of all the indices (or row indices) corresponding to the", "# alternatives whose intercept parameters are being estimated.", "needed_idxs", "=", "range", "(", "rows_to_alts", ".", "shape", "[", "1", "]", ")", "needed_idxs", ".", "remove", "(", "intercept_ref_pos", ")", "if", "len", "(", "intercept_params", ".", "shape", ")", ">", "1", "and", "intercept_params", ".", "shape", "[", "1", "]", ">", "1", ":", "# Get an array of zeros with shape", "# (num_possible_alternatives, num_parameter_samples)", "all_intercepts", "=", "np", ".", "zeros", "(", "(", "rows_to_alts", ".", "shape", "[", "1", "]", ",", "intercept_params", ".", "shape", "[", "1", "]", ")", ")", "# For alternatives having their intercept estimated, replace the", "# zeros with the current value of the estimated intercepts", "all_intercepts", "[", "needed_idxs", ",", ":", "]", "=", "intercept_params", "else", ":", "# Get an array of zeros with shape (num_possible_alternatives,)", "all_intercepts", "=", "np", ".", "zeros", "(", "rows_to_alts", ".", "shape", "[", "1", "]", ")", "# For alternatives having their intercept estimated, replace the", "# zeros with the current value of the estimated intercepts", "all_intercepts", "[", "needed_idxs", "]", "=", "intercept_params", "# Add the intercept values to f(x, beta, c)", "transformed_utilities", "+=", "rows_to_alts", ".", "dot", "(", "all_intercepts", ")", "# Be sure to return a 2D array since other functions will be expecting that", "if", "len", "(", "transformed_utilities", ".", "shape", ")", "==", "1", ":", "transformed_utilities", "=", "transformed_utilities", "[", ":", ",", "np", ".", "newaxis", "]", "return", "transformed_utilities" ]
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. intercept_params : None or 1D ndarray. If an array, each element should be an int, float, or long. If J is the total number of possible alternatives for the dataset being modeled, there should be J-1 elements in the array. intercept_ref_pos : int, or None, optional. Specifies the index of the alternative, in the ordered array of unique alternatives, that is not having its intercept parameter estimated (in order to ensure identifiability). Should only be None if `intercept_params` is None. Returns ------- transformed_utilities : 2D ndarray. Should have shape `(systematic_utilities.shape[0], 1)`. The returned array contains the transformed utility values for this model. All elements will be ints, longs, or floats.
[ "Parameters", "----------", "systematic_utilities", ":", "1D", "ndarray", ".", "All", "elements", "should", "be", "ints", "floats", "or", "longs", ".", "Should", "contain", "the", "systematic", "utilities", "of", "each", "observation", "per", "available", "alternative", ".", "Note", "that", "this", "vector", "is", "formed", "by", "the", "dot", "product", "of", "the", "design", "matrix", "with", "the", "vector", "of", "utility", "coefficients", ".", "alt_IDs", ":", "1D", "ndarray", ".", "All", "elements", "should", "be", "ints", ".", "There", "should", "be", "one", "row", "per", "obervation", "per", "available", "alternative", "for", "the", "given", "observation", ".", "Elements", "denote", "the", "alternative", "corresponding", "to", "the", "given", "row", "of", "the", "design", "matrix", ".", "rows_to_alts", ":", "2D", "scipy", "sparse", "matrix", ".", "There", "should", "be", "one", "row", "per", "observation", "per", "available", "alternative", "and", "one", "column", "per", "possible", "alternative", ".", "This", "matrix", "maps", "the", "rows", "of", "the", "design", "matrix", "to", "the", "possible", "alternatives", "for", "this", "dataset", ".", "All", "elements", "should", "be", "zeros", "or", "ones", ".", "shape_params", ":", "None", "or", "1D", "ndarray", ".", "If", "an", "array", "each", "element", "should", "be", "an", "int", "float", "or", "long", ".", "There", "should", "be", "one", "value", "per", "shape", "parameter", "of", "the", "model", "being", "used", ".", "intercept_params", ":", "None", "or", "1D", "ndarray", ".", "If", "an", "array", "each", "element", "should", "be", "an", "int", "float", "or", "long", ".", "If", "J", "is", "the", "total", "number", "of", "possible", "alternatives", "for", "the", "dataset", "being", "modeled", "there", "should", "be", "J", "-", "1", "elements", "in", "the", "array", ".", "intercept_ref_pos", ":", "int", "or", "None", "optional", ".", "Specifies", "the", "index", "of", "the", "alternative", "in", "the", "ordered", "array", "of", "unique", "alternatives", "that", "is", "not", "having", "its", "intercept", "parameter", "estimated", "(", "in", "order", "to", "ensure", "identifiability", ")", ".", "Should", "only", "be", "None", "if", "intercept_params", "is", "None", "." ]
python
train
50.181818
acrisci/i3ipc-python
examples/stop-application-on-unfocus.py
https://github.com/acrisci/i3ipc-python/blob/243d353434cdd2a93a9ca917c6bbf07b865c39af/examples/stop-application-on-unfocus.py#L31-L40
def stop_cont(self, cont=True): """Send SIGSTOP/SIGCONT to processes called <name> """ for proc in psutil.process_iter(): if proc.name() == self.process_name: sig = psutil.signal.SIGCONT if cont else psutil.signal.SIGSTOP proc.send_signal(sig) if self.debug: sig = 'CONT' if cont else 'STOP' print("Sent SIG%s to process %d" % (sig, proc.pid))
[ "def", "stop_cont", "(", "self", ",", "cont", "=", "True", ")", ":", "for", "proc", "in", "psutil", ".", "process_iter", "(", ")", ":", "if", "proc", ".", "name", "(", ")", "==", "self", ".", "process_name", ":", "sig", "=", "psutil", ".", "signal", ".", "SIGCONT", "if", "cont", "else", "psutil", ".", "signal", ".", "SIGSTOP", "proc", ".", "send_signal", "(", "sig", ")", "if", "self", ".", "debug", ":", "sig", "=", "'CONT'", "if", "cont", "else", "'STOP'", "print", "(", "\"Sent SIG%s to process %d\"", "%", "(", "sig", ",", "proc", ".", "pid", ")", ")" ]
Send SIGSTOP/SIGCONT to processes called <name>
[ "Send", "SIGSTOP", "/", "SIGCONT", "to", "processes", "called", "<name", ">" ]
python
train
45.8
h2oai/h2o-3
scripts/addjavamessage2ignore.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/addjavamessage2ignore.py#L87-L101
def load_dict(): """ Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages. :return: none """ global g_load_java_message_filename global g_ok_java_messages if os.path.isfile(g_load_java_message_filename): # only load dict from file if it exists. with open(g_load_java_message_filename,'rb') as ofile: g_ok_java_messages = pickle.load(ofile) else: # no previous java messages to be excluded are found g_ok_java_messages["general"] = []
[ "def", "load_dict", "(", ")", ":", "global", "g_load_java_message_filename", "global", "g_ok_java_messages", "if", "os", ".", "path", ".", "isfile", "(", "g_load_java_message_filename", ")", ":", "# only load dict from file if it exists.", "with", "open", "(", "g_load_java_message_filename", ",", "'rb'", ")", "as", "ofile", ":", "g_ok_java_messages", "=", "pickle", ".", "load", "(", "ofile", ")", "else", ":", "# no previous java messages to be excluded are found", "g_ok_java_messages", "[", "\"general\"", "]", "=", "[", "]" ]
Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages. :return: none
[ "Load", "java", "messages", "that", "can", "be", "ignored", "pickle", "file", "into", "a", "dict", "structure", "g_ok_java_messages", "." ]
python
test
35.6
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/git/timestamp.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/git/timestamp.py#L35-L85
def read_git_commit_timestamp_for_file(filepath, repo_path=None, repo=None): """Obtain the timestamp for the most recent commit to a given file in a Git repository. Parameters ---------- filepath : `str` Absolute or repository-relative path for a file. repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory or if a ``repo`` argument is provided. repo : `git.Repo`, optional A `git.Repo` instance. Returns ------- commit_timestamp : `datetime.datetime` The datetime of the most recent commit to the given file. Raises ------ IOError Raised if the ``filepath`` does not exist in the Git repository. """ logger = logging.getLogger(__name__) if repo is None: repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) repo_path = repo.working_tree_dir head_commit = repo.head.commit # filepath relative to the repo path logger.debug('Using Git repo at %r', repo_path) filepath = os.path.relpath( os.path.abspath(filepath), start=repo_path) logger.debug('Repo-relative filepath is %r', filepath) # Most recent commit datetime of the given file. # Don't use head_commit.iter_parents because then it skips the # commit of a file that's added but never modified. for commit in head_commit.iter_items(repo, head_commit, [filepath], skip=0): return commit.committed_datetime # Only get here if git could not find the file path in the history raise IOError('File {} not found'.format(filepath))
[ "def", "read_git_commit_timestamp_for_file", "(", "filepath", ",", "repo_path", "=", "None", ",", "repo", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "if", "repo", "is", "None", ":", "repo", "=", "git", ".", "repo", ".", "base", ".", "Repo", "(", "path", "=", "repo_path", ",", "search_parent_directories", "=", "True", ")", "repo_path", "=", "repo", ".", "working_tree_dir", "head_commit", "=", "repo", ".", "head", ".", "commit", "# filepath relative to the repo path", "logger", ".", "debug", "(", "'Using Git repo at %r'", ",", "repo_path", ")", "filepath", "=", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "abspath", "(", "filepath", ")", ",", "start", "=", "repo_path", ")", "logger", ".", "debug", "(", "'Repo-relative filepath is %r'", ",", "filepath", ")", "# Most recent commit datetime of the given file.", "# Don't use head_commit.iter_parents because then it skips the", "# commit of a file that's added but never modified.", "for", "commit", "in", "head_commit", ".", "iter_items", "(", "repo", ",", "head_commit", ",", "[", "filepath", "]", ",", "skip", "=", "0", ")", ":", "return", "commit", ".", "committed_datetime", "# Only get here if git could not find the file path in the history", "raise", "IOError", "(", "'File {} not found'", ".", "format", "(", "filepath", ")", ")" ]
Obtain the timestamp for the most recent commit to a given file in a Git repository. Parameters ---------- filepath : `str` Absolute or repository-relative path for a file. repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory or if a ``repo`` argument is provided. repo : `git.Repo`, optional A `git.Repo` instance. Returns ------- commit_timestamp : `datetime.datetime` The datetime of the most recent commit to the given file. Raises ------ IOError Raised if the ``filepath`` does not exist in the Git repository.
[ "Obtain", "the", "timestamp", "for", "the", "most", "recent", "commit", "to", "a", "given", "file", "in", "a", "Git", "repository", "." ]
python
valid
34.509804
pydata/xarray
xarray/core/dataarray.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataarray.py#L687-L760
def copy(self, deep=True, data=None): """Returns a copy of this array. If `deep=True`, a deep copy is made of the data array. Otherwise, a shallow copy is made, so each variable in the new array's dataset is also a variable in this array's dataset. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array and its coordinates are loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored for all data variables, and only used for coords. Returns ------- object : DataArray New object with dimensions, attributes, coordinates, name, encoding, and optionally data copied from original. Examples -------- Shallow versus deep copy >>> array = xr.DataArray([1, 2, 3], dims='x', ... coords={'x': ['a', 'b', 'c']}) >>> array.copy() <xarray.DataArray (x: 3)> array([1, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' >>> array_0 = array.copy(deep=False) >>> array_0[0] = 7 >>> array_0 <xarray.DataArray (x: 3)> array([7, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' >>> array <xarray.DataArray (x: 3)> array([7, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' Changing the data using the ``data`` argument maintains the structure of the original object, but with the new data. Original object is unaffected. >>> array.copy(data=[0.1, 0.2, 0.3]) <xarray.DataArray (x: 3)> array([ 0.1, 0.2, 0.3]) Coordinates: * x (x) <U1 'a' 'b' 'c' >>> array <xarray.DataArray (x: 3)> array([1, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' See also -------- pandas.DataFrame.copy """ variable = self.variable.copy(deep=deep, data=data) coords = OrderedDict((k, v.copy(deep=deep)) for k, v in self._coords.items()) return self._replace(variable, coords)
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ",", "data", "=", "None", ")", ":", "variable", "=", "self", ".", "variable", ".", "copy", "(", "deep", "=", "deep", ",", "data", "=", "data", ")", "coords", "=", "OrderedDict", "(", "(", "k", ",", "v", ".", "copy", "(", "deep", "=", "deep", ")", ")", "for", "k", ",", "v", "in", "self", ".", "_coords", ".", "items", "(", ")", ")", "return", "self", ".", "_replace", "(", "variable", ",", "coords", ")" ]
Returns a copy of this array. If `deep=True`, a deep copy is made of the data array. Otherwise, a shallow copy is made, so each variable in the new array's dataset is also a variable in this array's dataset. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array and its coordinates are loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored for all data variables, and only used for coords. Returns ------- object : DataArray New object with dimensions, attributes, coordinates, name, encoding, and optionally data copied from original. Examples -------- Shallow versus deep copy >>> array = xr.DataArray([1, 2, 3], dims='x', ... coords={'x': ['a', 'b', 'c']}) >>> array.copy() <xarray.DataArray (x: 3)> array([1, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' >>> array_0 = array.copy(deep=False) >>> array_0[0] = 7 >>> array_0 <xarray.DataArray (x: 3)> array([7, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' >>> array <xarray.DataArray (x: 3)> array([7, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' Changing the data using the ``data`` argument maintains the structure of the original object, but with the new data. Original object is unaffected. >>> array.copy(data=[0.1, 0.2, 0.3]) <xarray.DataArray (x: 3)> array([ 0.1, 0.2, 0.3]) Coordinates: * x (x) <U1 'a' 'b' 'c' >>> array <xarray.DataArray (x: 3)> array([1, 2, 3]) Coordinates: * x (x) <U1 'a' 'b' 'c' See also -------- pandas.DataFrame.copy
[ "Returns", "a", "copy", "of", "this", "array", "." ]
python
train
32.567568
CivicSpleen/ambry
ambry/bundle/bundle.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L1163-L1273
def get_source(self, source, clean=False, callback=None): """ Download a file from a URL and return it wrapped in a row-generating acessor object. :param spec: A SourceSpec that describes the source to fetch. :param account_accessor: A callable to return the username and password to use for access FTP and S3 URLs. :param clean: Delete files in cache and re-download. :param callback: A callback, called while reading files in download. signatire is f(read_len, total_len) :return: a SourceFile object. """ from fs.zipfs import ZipOpenError import os from ambry_sources.sources import ( GoogleSource, CsvSource, TsvSource, FixedSource, ExcelSource, PartitionSource, SourceError, DelayedOpen, DelayedDownload, ShapefileSource, SocrataSource ) from ambry_sources import extract_file_from_zip spec = source.spec cache_fs = self.library.download_cache account_accessor = self.library.account_accessor # FIXME. urltype should be moved to reftype. url_type = spec.get_urltype() def do_download(): from ambry_sources.fetch import download return download(spec.url, cache_fs, account_accessor, clean=clean, logger=self.logger, callback=callback) if url_type == 'file': from fs.opener import fsopen syspath = spec.url.replace('file://', '') cache_path = syspath.strip('/') cache_fs.makedir(os.path.dirname(cache_path), recursive=True, allow_recreate=True) if os.path.isabs(syspath): # FIXME! Probably should not be with open(syspath) as f: cache_fs.setcontents(cache_path, f) else: cache_fs.setcontents(cache_path, self.source_fs.getcontents(syspath)) elif url_type not in ('gs', 'socrata'): # FIXME. Need to clean up the logic for gs types. try: cache_path, download_time = do_download() spec.download_time = download_time except Exception as e: from ambry_sources.exceptions import DownloadError raise DownloadError("Failed to download {}; {}".format(spec.url, e)) else: cache_path, download_time = None, None if url_type == 'zip': try: fstor = extract_file_from_zip(cache_fs, cache_path, spec.url, spec.file) except ZipOpenError: # Try it again cache_fs.remove(cache_path) cache_path, spec.download_time = do_download() fstor = extract_file_from_zip(cache_fs, cache_path, spec.url, spec.file) file_type = spec.get_filetype(fstor.path) elif url_type == 'gs': fstor = get_gs(spec.url, spec.segment, account_accessor) file_type = 'gs' elif url_type == 'socrata': spec.encoding = 'utf8' spec.header_lines = [0] spec.start_line = 1 url = SocrataSource.download_url(spec) fstor = DelayedDownload(url, cache_fs) file_type = 'socrata' else: fstor = DelayedOpen(cache_fs, cache_path, 'rb') file_type = spec.get_filetype(fstor.path) spec.filetype = file_type TYPE_TO_SOURCE_MAP = { 'gs': GoogleSource, 'csv': CsvSource, 'tsv': TsvSource, 'fixed': FixedSource, 'txt': FixedSource, 'xls': ExcelSource, 'xlsx': ExcelSource, 'partition': PartitionSource, 'shape': ShapefileSource, 'socrata': SocrataSource } cls = TYPE_TO_SOURCE_MAP.get(file_type) if cls is None: raise SourceError( "Failed to determine file type for source '{}'; unknown type '{}' " .format(spec.name, file_type)) return cls(spec, fstor)
[ "def", "get_source", "(", "self", ",", "source", ",", "clean", "=", "False", ",", "callback", "=", "None", ")", ":", "from", "fs", ".", "zipfs", "import", "ZipOpenError", "import", "os", "from", "ambry_sources", ".", "sources", "import", "(", "GoogleSource", ",", "CsvSource", ",", "TsvSource", ",", "FixedSource", ",", "ExcelSource", ",", "PartitionSource", ",", "SourceError", ",", "DelayedOpen", ",", "DelayedDownload", ",", "ShapefileSource", ",", "SocrataSource", ")", "from", "ambry_sources", "import", "extract_file_from_zip", "spec", "=", "source", ".", "spec", "cache_fs", "=", "self", ".", "library", ".", "download_cache", "account_accessor", "=", "self", ".", "library", ".", "account_accessor", "# FIXME. urltype should be moved to reftype.", "url_type", "=", "spec", ".", "get_urltype", "(", ")", "def", "do_download", "(", ")", ":", "from", "ambry_sources", ".", "fetch", "import", "download", "return", "download", "(", "spec", ".", "url", ",", "cache_fs", ",", "account_accessor", ",", "clean", "=", "clean", ",", "logger", "=", "self", ".", "logger", ",", "callback", "=", "callback", ")", "if", "url_type", "==", "'file'", ":", "from", "fs", ".", "opener", "import", "fsopen", "syspath", "=", "spec", ".", "url", ".", "replace", "(", "'file://'", ",", "''", ")", "cache_path", "=", "syspath", ".", "strip", "(", "'/'", ")", "cache_fs", ".", "makedir", "(", "os", ".", "path", ".", "dirname", "(", "cache_path", ")", ",", "recursive", "=", "True", ",", "allow_recreate", "=", "True", ")", "if", "os", ".", "path", ".", "isabs", "(", "syspath", ")", ":", "# FIXME! Probably should not be", "with", "open", "(", "syspath", ")", "as", "f", ":", "cache_fs", ".", "setcontents", "(", "cache_path", ",", "f", ")", "else", ":", "cache_fs", ".", "setcontents", "(", "cache_path", ",", "self", ".", "source_fs", ".", "getcontents", "(", "syspath", ")", ")", "elif", "url_type", "not", "in", "(", "'gs'", ",", "'socrata'", ")", ":", "# FIXME. Need to clean up the logic for gs types.", "try", ":", "cache_path", ",", "download_time", "=", "do_download", "(", ")", "spec", ".", "download_time", "=", "download_time", "except", "Exception", "as", "e", ":", "from", "ambry_sources", ".", "exceptions", "import", "DownloadError", "raise", "DownloadError", "(", "\"Failed to download {}; {}\"", ".", "format", "(", "spec", ".", "url", ",", "e", ")", ")", "else", ":", "cache_path", ",", "download_time", "=", "None", ",", "None", "if", "url_type", "==", "'zip'", ":", "try", ":", "fstor", "=", "extract_file_from_zip", "(", "cache_fs", ",", "cache_path", ",", "spec", ".", "url", ",", "spec", ".", "file", ")", "except", "ZipOpenError", ":", "# Try it again", "cache_fs", ".", "remove", "(", "cache_path", ")", "cache_path", ",", "spec", ".", "download_time", "=", "do_download", "(", ")", "fstor", "=", "extract_file_from_zip", "(", "cache_fs", ",", "cache_path", ",", "spec", ".", "url", ",", "spec", ".", "file", ")", "file_type", "=", "spec", ".", "get_filetype", "(", "fstor", ".", "path", ")", "elif", "url_type", "==", "'gs'", ":", "fstor", "=", "get_gs", "(", "spec", ".", "url", ",", "spec", ".", "segment", ",", "account_accessor", ")", "file_type", "=", "'gs'", "elif", "url_type", "==", "'socrata'", ":", "spec", ".", "encoding", "=", "'utf8'", "spec", ".", "header_lines", "=", "[", "0", "]", "spec", ".", "start_line", "=", "1", "url", "=", "SocrataSource", ".", "download_url", "(", "spec", ")", "fstor", "=", "DelayedDownload", "(", "url", ",", "cache_fs", ")", "file_type", "=", "'socrata'", "else", ":", "fstor", "=", "DelayedOpen", "(", "cache_fs", ",", "cache_path", ",", "'rb'", ")", "file_type", "=", "spec", ".", "get_filetype", "(", "fstor", ".", "path", ")", "spec", ".", "filetype", "=", "file_type", "TYPE_TO_SOURCE_MAP", "=", "{", "'gs'", ":", "GoogleSource", ",", "'csv'", ":", "CsvSource", ",", "'tsv'", ":", "TsvSource", ",", "'fixed'", ":", "FixedSource", ",", "'txt'", ":", "FixedSource", ",", "'xls'", ":", "ExcelSource", ",", "'xlsx'", ":", "ExcelSource", ",", "'partition'", ":", "PartitionSource", ",", "'shape'", ":", "ShapefileSource", ",", "'socrata'", ":", "SocrataSource", "}", "cls", "=", "TYPE_TO_SOURCE_MAP", ".", "get", "(", "file_type", ")", "if", "cls", "is", "None", ":", "raise", "SourceError", "(", "\"Failed to determine file type for source '{}'; unknown type '{}' \"", ".", "format", "(", "spec", ".", "name", ",", "file_type", ")", ")", "return", "cls", "(", "spec", ",", "fstor", ")" ]
Download a file from a URL and return it wrapped in a row-generating acessor object. :param spec: A SourceSpec that describes the source to fetch. :param account_accessor: A callable to return the username and password to use for access FTP and S3 URLs. :param clean: Delete files in cache and re-download. :param callback: A callback, called while reading files in download. signatire is f(read_len, total_len) :return: a SourceFile object.
[ "Download", "a", "file", "from", "a", "URL", "and", "return", "it", "wrapped", "in", "a", "row", "-", "generating", "acessor", "object", "." ]
python
train
36.36036
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/twistedtools.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/twistedtools.py#L66-L83
def stop_reactor(): """Stop the reactor and join the reactor thread until it stops. Call this function in teardown at the module or package level to reset the twisted system after your tests. You *must* do this if you mix tests using these tools and tests using twisted.trial. """ global _twisted_thread def stop_reactor(): '''Helper for calling stop from withing the thread.''' reactor.stop() reactor.callFromThread(stop_reactor) reactor_thread.join() for p in reactor.getDelayedCalls(): if p.active(): p.cancel() _twisted_thread = None
[ "def", "stop_reactor", "(", ")", ":", "global", "_twisted_thread", "def", "stop_reactor", "(", ")", ":", "'''Helper for calling stop from withing the thread.'''", "reactor", ".", "stop", "(", ")", "reactor", ".", "callFromThread", "(", "stop_reactor", ")", "reactor_thread", ".", "join", "(", ")", "for", "p", "in", "reactor", ".", "getDelayedCalls", "(", ")", ":", "if", "p", ".", "active", "(", ")", ":", "p", ".", "cancel", "(", ")", "_twisted_thread", "=", "None" ]
Stop the reactor and join the reactor thread until it stops. Call this function in teardown at the module or package level to reset the twisted system after your tests. You *must* do this if you mix tests using these tools and tests using twisted.trial.
[ "Stop", "the", "reactor", "and", "join", "the", "reactor", "thread", "until", "it", "stops", ".", "Call", "this", "function", "in", "teardown", "at", "the", "module", "or", "package", "level", "to", "reset", "the", "twisted", "system", "after", "your", "tests", ".", "You", "*", "must", "*", "do", "this", "if", "you", "mix", "tests", "using", "these", "tools", "and", "tests", "using", "twisted", ".", "trial", "." ]
python
test
33.444444
jim-easterbrook/pywws
src/pywws/sqlite3data.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/sqlite3data.py#L516-L520
def items(self): """D.items() -> a set-like object providing a view on D's items""" keycol = self._keycol for row in self.__iter__(): yield (row[keycol], dict(row))
[ "def", "items", "(", "self", ")", ":", "keycol", "=", "self", ".", "_keycol", "for", "row", "in", "self", ".", "__iter__", "(", ")", ":", "yield", "(", "row", "[", "keycol", "]", ",", "dict", "(", "row", ")", ")" ]
D.items() -> a set-like object providing a view on D's items
[ "D", ".", "items", "()", "-", ">", "a", "set", "-", "like", "object", "providing", "a", "view", "on", "D", "s", "items" ]
python
train
39.2
AoiKuiyuyou/AoikI18n
src/aoiki18n/aoiki18n_.py
https://github.com/AoiKuiyuyou/AoikI18n/blob/8d60ea6a2be24e533a9cf92b433a8cfdb67f813e/src/aoiki18n/aoiki18n_.py#L78-L88
def yaml_force_unicode(): """ Force pyyaml to return unicode values. """ #/ ## modified from |http://stackoverflow.com/a/2967461| if sys.version_info[0] == 2: def construct_func(self, node): return self.construct_scalar(node) yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
[ "def", "yaml_force_unicode", "(", ")", ":", "#/", "## modified from |http://stackoverflow.com/a/2967461|", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "def", "construct_func", "(", "self", ",", "node", ")", ":", "return", "self", ".", "construct_scalar", "(", "node", ")", "yaml", ".", "Loader", ".", "add_constructor", "(", "U", "(", "'tag:yaml.org,2002:str'", ")", ",", "construct_func", ")", "yaml", ".", "SafeLoader", ".", "add_constructor", "(", "U", "(", "'tag:yaml.org,2002:str'", ")", ",", "construct_func", ")" ]
Force pyyaml to return unicode values.
[ "Force", "pyyaml", "to", "return", "unicode", "values", "." ]
python
train
42.090909
cokelaer/spectrum
src/spectrum/window.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/window.py#L638-L676
def window_blackman(N, alpha=0.16): r"""Blackman window :param N: window length .. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1}) with .. math:: a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16 When :math:`\alpha=0.16`, this is the unqualified Blackman window with :math:`a_0=0.48` and :math:`a_2=0.08`. .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'blackman') .. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`, this implementation is valid for any :math:`\alpha`. .. seealso:: numpy.blackman, :func:`create_window`, :class:`Window` """ a0 = (1. - alpha)/2. a1 = 0.5 a2 = alpha/2. if (N == 1): win = array([1.]) else: k = arange(0, N)/float(N-1.) win = a0 - a1 * cos (2 * pi * k) + a2 * cos (4 * pi * k) return win
[ "def", "window_blackman", "(", "N", ",", "alpha", "=", "0.16", ")", ":", "a0", "=", "(", "1.", "-", "alpha", ")", "/", "2.", "a1", "=", "0.5", "a2", "=", "alpha", "/", "2.", "if", "(", "N", "==", "1", ")", ":", "win", "=", "array", "(", "[", "1.", "]", ")", "else", ":", "k", "=", "arange", "(", "0", ",", "N", ")", "/", "float", "(", "N", "-", "1.", ")", "win", "=", "a0", "-", "a1", "*", "cos", "(", "2", "*", "pi", "*", "k", ")", "+", "a2", "*", "cos", "(", "4", "*", "pi", "*", "k", ")", "return", "win" ]
r"""Blackman window :param N: window length .. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1}) with .. math:: a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16 When :math:`\alpha=0.16`, this is the unqualified Blackman window with :math:`a_0=0.48` and :math:`a_2=0.08`. .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'blackman') .. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`, this implementation is valid for any :math:`\alpha`. .. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`
[ "r", "Blackman", "window" ]
python
valid
24.230769
caktus/django-treenav
treenav/admin.py
https://github.com/caktus/django-treenav/blob/8f81619a8598790d1c2dc7bf77ba9d8e9e9564e6/treenav/admin.py#L93-L101
def clean_cache(self, request): """ Remove all MenuItems from Cache. """ treenav.delete_cache() self.message_user(request, _('Cache menuitem cache cleaned successfully.')) info = self.model._meta.app_label, self.model._meta.model_name changelist_url = reverse('admin:%s_%s_changelist' % info, current_app=self.admin_site.name) return redirect(changelist_url)
[ "def", "clean_cache", "(", "self", ",", "request", ")", ":", "treenav", ".", "delete_cache", "(", ")", "self", ".", "message_user", "(", "request", ",", "_", "(", "'Cache menuitem cache cleaned successfully.'", ")", ")", "info", "=", "self", ".", "model", ".", "_meta", ".", "app_label", ",", "self", ".", "model", ".", "_meta", ".", "model_name", "changelist_url", "=", "reverse", "(", "'admin:%s_%s_changelist'", "%", "info", ",", "current_app", "=", "self", ".", "admin_site", ".", "name", ")", "return", "redirect", "(", "changelist_url", ")" ]
Remove all MenuItems from Cache.
[ "Remove", "all", "MenuItems", "from", "Cache", "." ]
python
train
46
allenai/allennlp
allennlp/models/semantic_parsing/wikitables/wikitables_semantic_parser.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/semantic_parsing/wikitables/wikitables_semantic_parser.py#L513-L620
def _create_grammar_state(self, world: WikiTablesWorld, possible_actions: List[ProductionRule], linking_scores: torch.Tensor, entity_types: torch.Tensor) -> LambdaGrammarStatelet: """ This method creates the LambdaGrammarStatelet object that's used for decoding. Part of creating that is creating the `valid_actions` dictionary, which contains embedded representations of all of the valid actions. So, we create that here as well. The way we represent the valid expansions is a little complicated: we use a dictionary of `action types`, where the key is the action type (like "global", "linked", or whatever your model is expecting), and the value is a tuple representing all actions of that type. The tuple is (input tensor, output tensor, action id). The input tensor has the representation that is used when `selecting` actions, for all actions of this type. The output tensor has the representation that is used when feeding the action to the next step of the decoder (this could just be the same as the input tensor). The action ids are a list of indices into the main action list for each batch instance. The inputs to this method are for a `single instance in the batch`; none of the tensors we create here are batched. We grab the global action ids from the input ``ProductionRules``, and we use those to embed the valid actions for every non-terminal type. We use the input ``linking_scores`` for non-global actions. Parameters ---------- world : ``WikiTablesWorld`` From the input to ``forward`` for a single batch instance. possible_actions : ``List[ProductionRule]`` From the input to ``forward`` for a single batch instance. linking_scores : ``torch.Tensor`` Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch dimension). entity_types : ``torch.Tensor`` Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension). """ # TODO(mattg): Move the "valid_actions" construction to another method. action_map = {} for action_index, action in enumerate(possible_actions): action_string = action[0] action_map[action_string] = action_index entity_map = {} for entity_index, entity in enumerate(world.table_graph.entities): entity_map[entity] = entity_index valid_actions = world.get_valid_actions() translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {} for key, action_strings in valid_actions.items(): translated_valid_actions[key] = {} # `key` here is a non-terminal from the grammar, and `action_strings` are all the valid # productions of that non-terminal. We'll first split those productions by global vs. # linked action. action_indices = [action_map[action_string] for action_string in action_strings] production_rule_arrays = [(possible_actions[index], index) for index in action_indices] global_actions = [] linked_actions = [] for production_rule_array, action_index in production_rule_arrays: if production_rule_array[1]: global_actions.append((production_rule_array[2], action_index)) else: linked_actions.append((production_rule_array[0], action_index)) # Then we get the embedded representations of the global actions. global_action_tensors, global_action_ids = zip(*global_actions) global_action_tensor = torch.cat(global_action_tensors, dim=0) global_input_embeddings = self._action_embedder(global_action_tensor) if self._add_action_bias: global_action_biases = self._action_biases(global_action_tensor) global_input_embeddings = torch.cat([global_input_embeddings, global_action_biases], dim=-1) global_output_embeddings = self._output_action_embedder(global_action_tensor) translated_valid_actions[key]['global'] = (global_input_embeddings, global_output_embeddings, list(global_action_ids)) # Then the representations of the linked actions. if linked_actions: linked_rules, linked_action_ids = zip(*linked_actions) entities = [rule.split(' -> ')[1] for rule in linked_rules] entity_ids = [entity_map[entity] for entity in entities] # (num_linked_actions, num_question_tokens) entity_linking_scores = linking_scores[entity_ids] # (num_linked_actions,) entity_type_tensor = entity_types[entity_ids] # (num_linked_actions, entity_type_embedding_dim) entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor) translated_valid_actions[key]['linked'] = (entity_linking_scores, entity_type_embeddings, list(linked_action_ids)) # Lastly, we need to also create embedded representations of context-specific actions. In # this case, those are only variable productions, like "r -> x". Note that our language # only permits one lambda at a time, so we don't need to worry about how nested lambdas # might impact this. context_actions = {} for action_id, action in enumerate(possible_actions): if action[0].endswith(" -> x"): input_embedding = self._action_embedder(action[2]) if self._add_action_bias: input_bias = self._action_biases(action[2]) input_embedding = torch.cat([input_embedding, input_bias], dim=-1) output_embedding = self._output_action_embedder(action[2]) context_actions[action[0]] = (input_embedding, output_embedding, action_id) return LambdaGrammarStatelet([START_SYMBOL], {}, translated_valid_actions, context_actions, type_declaration.is_nonterminal)
[ "def", "_create_grammar_state", "(", "self", ",", "world", ":", "WikiTablesWorld", ",", "possible_actions", ":", "List", "[", "ProductionRule", "]", ",", "linking_scores", ":", "torch", ".", "Tensor", ",", "entity_types", ":", "torch", ".", "Tensor", ")", "->", "LambdaGrammarStatelet", ":", "# TODO(mattg): Move the \"valid_actions\" construction to another method.", "action_map", "=", "{", "}", "for", "action_index", ",", "action", "in", "enumerate", "(", "possible_actions", ")", ":", "action_string", "=", "action", "[", "0", "]", "action_map", "[", "action_string", "]", "=", "action_index", "entity_map", "=", "{", "}", "for", "entity_index", ",", "entity", "in", "enumerate", "(", "world", ".", "table_graph", ".", "entities", ")", ":", "entity_map", "[", "entity", "]", "=", "entity_index", "valid_actions", "=", "world", ".", "get_valid_actions", "(", ")", "translated_valid_actions", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", ",", "List", "[", "int", "]", "]", "]", "]", "=", "{", "}", "for", "key", ",", "action_strings", "in", "valid_actions", ".", "items", "(", ")", ":", "translated_valid_actions", "[", "key", "]", "=", "{", "}", "# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid", "# productions of that non-terminal. We'll first split those productions by global vs.", "# linked action.", "action_indices", "=", "[", "action_map", "[", "action_string", "]", "for", "action_string", "in", "action_strings", "]", "production_rule_arrays", "=", "[", "(", "possible_actions", "[", "index", "]", ",", "index", ")", "for", "index", "in", "action_indices", "]", "global_actions", "=", "[", "]", "linked_actions", "=", "[", "]", "for", "production_rule_array", ",", "action_index", "in", "production_rule_arrays", ":", "if", "production_rule_array", "[", "1", "]", ":", "global_actions", ".", "append", "(", "(", "production_rule_array", "[", "2", "]", ",", "action_index", ")", ")", "else", ":", "linked_actions", ".", "append", "(", "(", "production_rule_array", "[", "0", "]", ",", "action_index", ")", ")", "# Then we get the embedded representations of the global actions.", "global_action_tensors", ",", "global_action_ids", "=", "zip", "(", "*", "global_actions", ")", "global_action_tensor", "=", "torch", ".", "cat", "(", "global_action_tensors", ",", "dim", "=", "0", ")", "global_input_embeddings", "=", "self", ".", "_action_embedder", "(", "global_action_tensor", ")", "if", "self", ".", "_add_action_bias", ":", "global_action_biases", "=", "self", ".", "_action_biases", "(", "global_action_tensor", ")", "global_input_embeddings", "=", "torch", ".", "cat", "(", "[", "global_input_embeddings", ",", "global_action_biases", "]", ",", "dim", "=", "-", "1", ")", "global_output_embeddings", "=", "self", ".", "_output_action_embedder", "(", "global_action_tensor", ")", "translated_valid_actions", "[", "key", "]", "[", "'global'", "]", "=", "(", "global_input_embeddings", ",", "global_output_embeddings", ",", "list", "(", "global_action_ids", ")", ")", "# Then the representations of the linked actions.", "if", "linked_actions", ":", "linked_rules", ",", "linked_action_ids", "=", "zip", "(", "*", "linked_actions", ")", "entities", "=", "[", "rule", ".", "split", "(", "' -> '", ")", "[", "1", "]", "for", "rule", "in", "linked_rules", "]", "entity_ids", "=", "[", "entity_map", "[", "entity", "]", "for", "entity", "in", "entities", "]", "# (num_linked_actions, num_question_tokens)", "entity_linking_scores", "=", "linking_scores", "[", "entity_ids", "]", "# (num_linked_actions,)", "entity_type_tensor", "=", "entity_types", "[", "entity_ids", "]", "# (num_linked_actions, entity_type_embedding_dim)", "entity_type_embeddings", "=", "self", ".", "_entity_type_decoder_embedding", "(", "entity_type_tensor", ")", "translated_valid_actions", "[", "key", "]", "[", "'linked'", "]", "=", "(", "entity_linking_scores", ",", "entity_type_embeddings", ",", "list", "(", "linked_action_ids", ")", ")", "# Lastly, we need to also create embedded representations of context-specific actions. In", "# this case, those are only variable productions, like \"r -> x\". Note that our language", "# only permits one lambda at a time, so we don't need to worry about how nested lambdas", "# might impact this.", "context_actions", "=", "{", "}", "for", "action_id", ",", "action", "in", "enumerate", "(", "possible_actions", ")", ":", "if", "action", "[", "0", "]", ".", "endswith", "(", "\" -> x\"", ")", ":", "input_embedding", "=", "self", ".", "_action_embedder", "(", "action", "[", "2", "]", ")", "if", "self", ".", "_add_action_bias", ":", "input_bias", "=", "self", ".", "_action_biases", "(", "action", "[", "2", "]", ")", "input_embedding", "=", "torch", ".", "cat", "(", "[", "input_embedding", ",", "input_bias", "]", ",", "dim", "=", "-", "1", ")", "output_embedding", "=", "self", ".", "_output_action_embedder", "(", "action", "[", "2", "]", ")", "context_actions", "[", "action", "[", "0", "]", "]", "=", "(", "input_embedding", ",", "output_embedding", ",", "action_id", ")", "return", "LambdaGrammarStatelet", "(", "[", "START_SYMBOL", "]", ",", "{", "}", ",", "translated_valid_actions", ",", "context_actions", ",", "type_declaration", ".", "is_nonterminal", ")" ]
This method creates the LambdaGrammarStatelet object that's used for decoding. Part of creating that is creating the `valid_actions` dictionary, which contains embedded representations of all of the valid actions. So, we create that here as well. The way we represent the valid expansions is a little complicated: we use a dictionary of `action types`, where the key is the action type (like "global", "linked", or whatever your model is expecting), and the value is a tuple representing all actions of that type. The tuple is (input tensor, output tensor, action id). The input tensor has the representation that is used when `selecting` actions, for all actions of this type. The output tensor has the representation that is used when feeding the action to the next step of the decoder (this could just be the same as the input tensor). The action ids are a list of indices into the main action list for each batch instance. The inputs to this method are for a `single instance in the batch`; none of the tensors we create here are batched. We grab the global action ids from the input ``ProductionRules``, and we use those to embed the valid actions for every non-terminal type. We use the input ``linking_scores`` for non-global actions. Parameters ---------- world : ``WikiTablesWorld`` From the input to ``forward`` for a single batch instance. possible_actions : ``List[ProductionRule]`` From the input to ``forward`` for a single batch instance. linking_scores : ``torch.Tensor`` Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch dimension). entity_types : ``torch.Tensor`` Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
[ "This", "method", "creates", "the", "LambdaGrammarStatelet", "object", "that", "s", "used", "for", "decoding", ".", "Part", "of", "creating", "that", "is", "creating", "the", "valid_actions", "dictionary", "which", "contains", "embedded", "representations", "of", "all", "of", "the", "valid", "actions", ".", "So", "we", "create", "that", "here", "as", "well", "." ]
python
train
61.203704
numenta/nupic
src/nupic/encoders/category.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L172-L189
def closenessScores(self, expValues, actValues, fractional=True,): """ See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder """ expValue = expValues[0] actValue = actValues[0] if expValue == actValue: closeness = 1.0 else: closeness = 0.0 if not fractional: closeness = 1.0 - closeness return numpy.array([closeness])
[ "def", "closenessScores", "(", "self", ",", "expValues", ",", "actValues", ",", "fractional", "=", "True", ",", ")", ":", "expValue", "=", "expValues", "[", "0", "]", "actValue", "=", "actValues", "[", "0", "]", "if", "expValue", "==", "actValue", ":", "closeness", "=", "1.0", "else", ":", "closeness", "=", "0.0", "if", "not", "fractional", ":", "closeness", "=", "1.0", "-", "closeness", "return", "numpy", ".", "array", "(", "[", "closeness", "]", ")" ]
See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
python
valid
23.444444
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_log.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_log.py#L104-L123
def log_status(self): '''show download status''' if self.download_filename is None: print("No download") return dt = time.time() - self.download_start speed = os.path.getsize(self.download_filename) / (1000.0 * dt) m = self.entries.get(self.download_lognum, None) if m is None: size = 0 else: size = m.size highest = max(self.download_set) diff = set(range(highest)).difference(self.download_set) print("Downloading %s - %u/%u bytes %.1f kbyte/s (%u retries %u missing)" % (self.download_filename, os.path.getsize(self.download_filename), size, speed, self.retries, len(diff)))
[ "def", "log_status", "(", "self", ")", ":", "if", "self", ".", "download_filename", "is", "None", ":", "print", "(", "\"No download\"", ")", "return", "dt", "=", "time", ".", "time", "(", ")", "-", "self", ".", "download_start", "speed", "=", "os", ".", "path", ".", "getsize", "(", "self", ".", "download_filename", ")", "/", "(", "1000.0", "*", "dt", ")", "m", "=", "self", ".", "entries", ".", "get", "(", "self", ".", "download_lognum", ",", "None", ")", "if", "m", "is", "None", ":", "size", "=", "0", "else", ":", "size", "=", "m", ".", "size", "highest", "=", "max", "(", "self", ".", "download_set", ")", "diff", "=", "set", "(", "range", "(", "highest", ")", ")", ".", "difference", "(", "self", ".", "download_set", ")", "print", "(", "\"Downloading %s - %u/%u bytes %.1f kbyte/s (%u retries %u missing)\"", "%", "(", "self", ".", "download_filename", ",", "os", ".", "path", ".", "getsize", "(", "self", ".", "download_filename", ")", ",", "size", ",", "speed", ",", "self", ".", "retries", ",", "len", "(", "diff", ")", ")", ")" ]
show download status
[ "show", "download", "status" ]
python
train
55.55
paksu/pytelegraf
telegraf/client.py
https://github.com/paksu/pytelegraf/blob/a5a326bd99902768be2bf10da7dde2dfa165c013/telegraf/client.py#L13-L29
def metric(self, measurement_name, values, tags=None, timestamp=None): """ Append global tags configured for the client to the tags given then converts the data into InfluxDB Line protocol and sends to to socket """ if not measurement_name or values in (None, {}): # Don't try to send empty data return tags = tags or {} # Do a shallow merge of the metric tags and global tags all_tags = dict(self.tags, **tags) # Create a metric line from the input and then send it to socket line = Line(measurement_name, values, all_tags, timestamp) self.send(line.to_line_protocol())
[ "def", "metric", "(", "self", ",", "measurement_name", ",", "values", ",", "tags", "=", "None", ",", "timestamp", "=", "None", ")", ":", "if", "not", "measurement_name", "or", "values", "in", "(", "None", ",", "{", "}", ")", ":", "# Don't try to send empty data", "return", "tags", "=", "tags", "or", "{", "}", "# Do a shallow merge of the metric tags and global tags", "all_tags", "=", "dict", "(", "self", ".", "tags", ",", "*", "*", "tags", ")", "# Create a metric line from the input and then send it to socket", "line", "=", "Line", "(", "measurement_name", ",", "values", ",", "all_tags", ",", "timestamp", ")", "self", ".", "send", "(", "line", ".", "to_line_protocol", "(", ")", ")" ]
Append global tags configured for the client to the tags given then converts the data into InfluxDB Line protocol and sends to to socket
[ "Append", "global", "tags", "configured", "for", "the", "client", "to", "the", "tags", "given", "then", "converts", "the", "data", "into", "InfluxDB", "Line", "protocol", "and", "sends", "to", "to", "socket" ]
python
train
39.352941
ensime/ensime-vim
ensime_shared/client.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L235-L239
def shutdown_server(self): """Shut down server if it is alive.""" self.log.debug('shutdown_server: in') if self.ensime and self.toggle_teardown: self.ensime.stop()
[ "def", "shutdown_server", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'shutdown_server: in'", ")", "if", "self", ".", "ensime", "and", "self", ".", "toggle_teardown", ":", "self", ".", "ensime", ".", "stop", "(", ")" ]
Shut down server if it is alive.
[ "Shut", "down", "server", "if", "it", "is", "alive", "." ]
python
train
39
heuer/segno
segno/__init__.py
https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/__init__.py#L180-L218
def make_sequence(content, error=None, version=None, mode=None, mask=None, encoding=None, boost_error=True, symbol_count=None): """\ Creates a sequence of QR Codes. If the content fits into one QR Code and neither ``version`` nor ``symbol_count`` is provided, this function may return a sequence with one QR Code which does not use the Structured Append mode. Otherwise a sequence of 2 .. n (max. n = 16) QR Codes is returned which use the Structured Append mode. The Structured Append mode allows to split the content over a number (max. 16) QR Codes. The Structured Append mode isn't available for Micro QR Codes, therefor the returned sequence contains QR Codes, only. Since this function returns an iterable object, it may be used as follows: .. code-block:: python for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)): qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue') The returned number of QR Codes is determined by the `version` or `symbol_count` parameter See :py:func:`make` for a description of the other parameters. :param int symbol_count: Number of symbols. :rtype: QRCodeSequence """ return QRCodeSequence(map(QRCode, encoder.encode_sequence(content, error=error, version=version, mode=mode, mask=mask, encoding=encoding, boost_error=boost_error, symbol_count=symbol_count)))
[ "def", "make_sequence", "(", "content", ",", "error", "=", "None", ",", "version", "=", "None", ",", "mode", "=", "None", ",", "mask", "=", "None", ",", "encoding", "=", "None", ",", "boost_error", "=", "True", ",", "symbol_count", "=", "None", ")", ":", "return", "QRCodeSequence", "(", "map", "(", "QRCode", ",", "encoder", ".", "encode_sequence", "(", "content", ",", "error", "=", "error", ",", "version", "=", "version", ",", "mode", "=", "mode", ",", "mask", "=", "mask", ",", "encoding", "=", "encoding", ",", "boost_error", "=", "boost_error", ",", "symbol_count", "=", "symbol_count", ")", ")", ")" ]
\ Creates a sequence of QR Codes. If the content fits into one QR Code and neither ``version`` nor ``symbol_count`` is provided, this function may return a sequence with one QR Code which does not use the Structured Append mode. Otherwise a sequence of 2 .. n (max. n = 16) QR Codes is returned which use the Structured Append mode. The Structured Append mode allows to split the content over a number (max. 16) QR Codes. The Structured Append mode isn't available for Micro QR Codes, therefor the returned sequence contains QR Codes, only. Since this function returns an iterable object, it may be used as follows: .. code-block:: python for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)): qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue') The returned number of QR Codes is determined by the `version` or `symbol_count` parameter See :py:func:`make` for a description of the other parameters. :param int symbol_count: Number of symbols. :rtype: QRCodeSequence
[ "\\", "Creates", "a", "sequence", "of", "QR", "Codes", "." ]
python
train
43.74359
stlehmann/pyads
pyads/pyads_ex.py
https://github.com/stlehmann/pyads/blob/44bd84394db2785332ac44b2948373916bea0f02/pyads/pyads_ex.py#L338-L382
def adsSyncWriteReqEx(port, address, index_group, index_offset, value, plc_data_type): # type: (int, AmsAddr, int, int, Any, Type) -> None """Send data synchronous to an ADS-device. :param int port: local AMS port as returned by adsPortOpenEx() :param pyads.structs.AmsAddr address: local or remote AmsAddr :param int indexGroup: PLC storage area, according to the INDEXGROUP constants :param int index_offset: PLC storage address :param value: value to write to the storage address of the PLC :param int plc_data_type: type of the data given to the PLC, according to PLCTYPE constants """ sync_write_request = _adsDLL.AdsSyncWriteReqEx ams_address_pointer = ctypes.pointer(address.amsAddrStruct()) index_group_c = ctypes.c_ulong(index_group) index_offset_c = ctypes.c_ulong(index_offset) if plc_data_type == PLCTYPE_STRING: data = ctypes.c_char_p(value.encode("utf-8")) data_pointer = data # type: Union[ctypes.c_char_p, ctypes.pointer] data_length = len(data_pointer.value) + 1 # type: ignore else: if type(plc_data_type).__name__ == "PyCArrayType": data = plc_data_type(*value) else: data = plc_data_type(value) data_pointer = ctypes.pointer(data) data_length = ctypes.sizeof(data) error_code = sync_write_request( port, ams_address_pointer, index_group_c, index_offset_c, data_length, data_pointer, ) if error_code: raise ADSError(error_code)
[ "def", "adsSyncWriteReqEx", "(", "port", ",", "address", ",", "index_group", ",", "index_offset", ",", "value", ",", "plc_data_type", ")", ":", "# type: (int, AmsAddr, int, int, Any, Type) -> None", "sync_write_request", "=", "_adsDLL", ".", "AdsSyncWriteReqEx", "ams_address_pointer", "=", "ctypes", ".", "pointer", "(", "address", ".", "amsAddrStruct", "(", ")", ")", "index_group_c", "=", "ctypes", ".", "c_ulong", "(", "index_group", ")", "index_offset_c", "=", "ctypes", ".", "c_ulong", "(", "index_offset", ")", "if", "plc_data_type", "==", "PLCTYPE_STRING", ":", "data", "=", "ctypes", ".", "c_char_p", "(", "value", ".", "encode", "(", "\"utf-8\"", ")", ")", "data_pointer", "=", "data", "# type: Union[ctypes.c_char_p, ctypes.pointer]", "data_length", "=", "len", "(", "data_pointer", ".", "value", ")", "+", "1", "# type: ignore", "else", ":", "if", "type", "(", "plc_data_type", ")", ".", "__name__", "==", "\"PyCArrayType\"", ":", "data", "=", "plc_data_type", "(", "*", "value", ")", "else", ":", "data", "=", "plc_data_type", "(", "value", ")", "data_pointer", "=", "ctypes", ".", "pointer", "(", "data", ")", "data_length", "=", "ctypes", ".", "sizeof", "(", "data", ")", "error_code", "=", "sync_write_request", "(", "port", ",", "ams_address_pointer", ",", "index_group_c", ",", "index_offset_c", ",", "data_length", ",", "data_pointer", ",", ")", "if", "error_code", ":", "raise", "ADSError", "(", "error_code", ")" ]
Send data synchronous to an ADS-device. :param int port: local AMS port as returned by adsPortOpenEx() :param pyads.structs.AmsAddr address: local or remote AmsAddr :param int indexGroup: PLC storage area, according to the INDEXGROUP constants :param int index_offset: PLC storage address :param value: value to write to the storage address of the PLC :param int plc_data_type: type of the data given to the PLC, according to PLCTYPE constants
[ "Send", "data", "synchronous", "to", "an", "ADS", "-", "device", "." ]
python
valid
34.133333
IDSIA/sacred
sacred/arg_parser.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/arg_parser.py#L169-L199
def format_usage(program_name, description, commands=None, options=()): """ Construct the usage text. Parameters ---------- program_name : str Usually the name of the python file that contains the experiment. description : str description of this experiment (usually the docstring). commands : dict[str, func] Dictionary of supported commands. Each entry should be a tuple of (name, function). options : list[sacred.commandline_options.CommandLineOption] A list of all supported commandline options. Returns ------- str The complete formatted usage text for this experiment. It adheres to the structure required by ``docopt``. """ usage = USAGE_TEMPLATE.format( program_name=cmd_quote(program_name), description=description.strip() if description else '', options=_format_options_usage(options), arguments=_format_arguments_usage(options), commands=_format_command_usage(commands) ) return usage
[ "def", "format_usage", "(", "program_name", ",", "description", ",", "commands", "=", "None", ",", "options", "=", "(", ")", ")", ":", "usage", "=", "USAGE_TEMPLATE", ".", "format", "(", "program_name", "=", "cmd_quote", "(", "program_name", ")", ",", "description", "=", "description", ".", "strip", "(", ")", "if", "description", "else", "''", ",", "options", "=", "_format_options_usage", "(", "options", ")", ",", "arguments", "=", "_format_arguments_usage", "(", "options", ")", ",", "commands", "=", "_format_command_usage", "(", "commands", ")", ")", "return", "usage" ]
Construct the usage text. Parameters ---------- program_name : str Usually the name of the python file that contains the experiment. description : str description of this experiment (usually the docstring). commands : dict[str, func] Dictionary of supported commands. Each entry should be a tuple of (name, function). options : list[sacred.commandline_options.CommandLineOption] A list of all supported commandline options. Returns ------- str The complete formatted usage text for this experiment. It adheres to the structure required by ``docopt``.
[ "Construct", "the", "usage", "text", "." ]
python
train
34.580645
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L600-L627
def Copy(self, old_urn, new_urn, age=NEWEST_TIME, limit=None, update_timestamps=False): """Make a copy of one AFF4 object to a different URN.""" new_urn = rdfvalue.RDFURN(new_urn) if update_timestamps and age != NEWEST_TIME: raise ValueError( "Can't update timestamps unless reading the latest version.") values = {} for predicate, value, ts in data_store.DB.ResolvePrefix( old_urn, AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age), limit=limit): if update_timestamps: values.setdefault(predicate, []).append((value, None)) else: values.setdefault(predicate, []).append((value, ts)) if values: with data_store.DB.GetMutationPool() as pool: pool.MultiSet(new_urn, values, replace=False) self._UpdateChildIndex(new_urn, pool)
[ "def", "Copy", "(", "self", ",", "old_urn", ",", "new_urn", ",", "age", "=", "NEWEST_TIME", ",", "limit", "=", "None", ",", "update_timestamps", "=", "False", ")", ":", "new_urn", "=", "rdfvalue", ".", "RDFURN", "(", "new_urn", ")", "if", "update_timestamps", "and", "age", "!=", "NEWEST_TIME", ":", "raise", "ValueError", "(", "\"Can't update timestamps unless reading the latest version.\"", ")", "values", "=", "{", "}", "for", "predicate", ",", "value", ",", "ts", "in", "data_store", ".", "DB", ".", "ResolvePrefix", "(", "old_urn", ",", "AFF4_PREFIXES", ",", "timestamp", "=", "self", ".", "ParseAgeSpecification", "(", "age", ")", ",", "limit", "=", "limit", ")", ":", "if", "update_timestamps", ":", "values", ".", "setdefault", "(", "predicate", ",", "[", "]", ")", ".", "append", "(", "(", "value", ",", "None", ")", ")", "else", ":", "values", ".", "setdefault", "(", "predicate", ",", "[", "]", ")", ".", "append", "(", "(", "value", ",", "ts", ")", ")", "if", "values", ":", "with", "data_store", ".", "DB", ".", "GetMutationPool", "(", ")", "as", "pool", ":", "pool", ".", "MultiSet", "(", "new_urn", ",", "values", ",", "replace", "=", "False", ")", "self", ".", "_UpdateChildIndex", "(", "new_urn", ",", "pool", ")" ]
Make a copy of one AFF4 object to a different URN.
[ "Make", "a", "copy", "of", "one", "AFF4", "object", "to", "a", "different", "URN", "." ]
python
train
31.571429
greyli/flask-ckeditor
flask_ckeditor/__init__.py
https://github.com/greyli/flask-ckeditor/blob/a8a1aa0d5736271762700d06fe9dbc0f8ed43aec/flask_ckeditor/__init__.py#L198-L238
def uploader(func): """This method only used for CKEditor under version 4.5, in newer version, you should use ``upload_success()`` and ``upload_fail()`` instead. Decorated the view function that handle the file upload. The upload view must return the uploaded image's url. For example:: from flask import send_from_directory app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) @ckeditor.uploader def upload(): f = request.files.get('upload') f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return url .. versionadded:: 0.3 """ @wraps(func) def wrapper(*args, **kwargs): func_num = request.args.get('CKEditorFuncNum') # ckeditor = request.args.get('CKEditor') # language code used for error message, not used yet. # lang_code = request.args.get('langCode') # the error message to display when upload failed. message = current_app.config['CKEDITOR_UPLOAD_ERROR_MESSAGE'] url = func(*args, **kwargs) return Markup('''<script type="text/javascript"> window.parent.CKEDITOR.tools.callFunction(%s, "%s", "%s");</script>''' % (func_num, url, message)) return wrapper
[ "def", "uploader", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func_num", "=", "request", ".", "args", ".", "get", "(", "'CKEditorFuncNum'", ")", "# ckeditor = request.args.get('CKEditor')", "# language code used for error message, not used yet.", "# lang_code = request.args.get('langCode')", "# the error message to display when upload failed.", "message", "=", "current_app", ".", "config", "[", "'CKEDITOR_UPLOAD_ERROR_MESSAGE'", "]", "url", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "Markup", "(", "'''<script type=\"text/javascript\">\n window.parent.CKEDITOR.tools.callFunction(%s, \"%s\", \"%s\");</script>'''", "%", "(", "func_num", ",", "url", ",", "message", ")", ")", "return", "wrapper" ]
This method only used for CKEditor under version 4.5, in newer version, you should use ``upload_success()`` and ``upload_fail()`` instead. Decorated the view function that handle the file upload. The upload view must return the uploaded image's url. For example:: from flask import send_from_directory app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) @ckeditor.uploader def upload(): f = request.files.get('upload') f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return url .. versionadded:: 0.3
[ "This", "method", "only", "used", "for", "CKEditor", "under", "version", "4", ".", "5", "in", "newer", "version", "you", "should", "use", "upload_success", "()", "and", "upload_fail", "()", "instead", "." ]
python
train
40.487805
yueyoum/social-oauth
example/_bottle.py
https://github.com/yueyoum/social-oauth/blob/80600ea737355b20931c8a0b5223f5b68175d930/example/_bottle.py#L949-L957
def params(self): """ A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`. """ params = FormsDict() for key, value in self.query.iterallitems(): params[key] = value for key, value in self.forms.iterallitems(): params[key] = value return params
[ "def", "params", "(", "self", ")", ":", "params", "=", "FormsDict", "(", ")", "for", "key", ",", "value", "in", "self", ".", "query", ".", "iterallitems", "(", ")", ":", "params", "[", "key", "]", "=", "value", "for", "key", ",", "value", "in", "self", ".", "forms", ".", "iterallitems", "(", ")", ":", "params", "[", "key", "]", "=", "value", "return", "params" ]
A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`.
[ "A", ":", "class", ":", "FormsDict", "with", "the", "combined", "values", "of", ":", "attr", ":", "query", "and", ":", "attr", ":", "forms", ".", "File", "uploads", "are", "stored", "in", ":", "attr", ":", "files", "." ]
python
train
42.444444
asmodehn/filefinder2
filefinder2/_encoding_utils.py
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/_encoding_utils.py#L126-L152
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): """Converts a bytes string with python source code to unicode. Unicode strings are passed through unchanged. Byte strings are checked for the python source file encoding cookie to determine encoding. txt can be either a bytes buffer or a string containing the source code. """ if isinstance(txt, six.text_type): return txt if isinstance(txt, six.binary_type): buffer = io.BytesIO(txt) else: buffer = txt try: encoding, _ = detect_encoding(buffer.readline) except SyntaxError: encoding = "ascii" buffer.seek(0) newline_decoder = io.IncrementalNewlineDecoder(None, True) text = io.TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) text.mode = 'r' if skip_encoding_cookie: return u"".join(strip_encoding_cookie(text)) else: return text.read()
[ "def", "source_to_unicode", "(", "txt", ",", "errors", "=", "'replace'", ",", "skip_encoding_cookie", "=", "True", ")", ":", "if", "isinstance", "(", "txt", ",", "six", ".", "text_type", ")", ":", "return", "txt", "if", "isinstance", "(", "txt", ",", "six", ".", "binary_type", ")", ":", "buffer", "=", "io", ".", "BytesIO", "(", "txt", ")", "else", ":", "buffer", "=", "txt", "try", ":", "encoding", ",", "_", "=", "detect_encoding", "(", "buffer", ".", "readline", ")", "except", "SyntaxError", ":", "encoding", "=", "\"ascii\"", "buffer", ".", "seek", "(", "0", ")", "newline_decoder", "=", "io", ".", "IncrementalNewlineDecoder", "(", "None", ",", "True", ")", "text", "=", "io", ".", "TextIOWrapper", "(", "buffer", ",", "encoding", ",", "errors", "=", "errors", ",", "line_buffering", "=", "True", ")", "text", ".", "mode", "=", "'r'", "if", "skip_encoding_cookie", ":", "return", "u\"\"", ".", "join", "(", "strip_encoding_cookie", "(", "text", ")", ")", "else", ":", "return", "text", ".", "read", "(", ")" ]
Converts a bytes string with python source code to unicode. Unicode strings are passed through unchanged. Byte strings are checked for the python source file encoding cookie to determine encoding. txt can be either a bytes buffer or a string containing the source code.
[ "Converts", "a", "bytes", "string", "with", "python", "source", "code", "to", "unicode", ".", "Unicode", "strings", "are", "passed", "through", "unchanged", ".", "Byte", "strings", "are", "checked", "for", "the", "python", "source", "file", "encoding", "cookie", "to", "determine", "encoding", ".", "txt", "can", "be", "either", "a", "bytes", "buffer", "or", "a", "string", "containing", "the", "source", "code", "." ]
python
train
34.444444
PMEAL/OpenPNM
openpnm/core/Base.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L1633-L1673
def _parse_mode(self, mode, allowed=None, single=False): r""" This private method is for checking the \'mode\' used in the calling method. Parameters ---------- mode : string or list of strings The mode(s) to be parsed allowed : list of strings A list containing the allowed modes. This list is defined by the calling method. If any of the received modes are not in the allowed list an exception is raised. single : boolean (default is False) Indicates if only a single mode is allowed. If this argument is True than a string is returned rather than a list of strings, which makes it easier to work with in the caller method. Returns ------- A list containing the received modes as strings, checked to ensure they are all within the allowed set (if provoided). Also, if the ``single`` argument was True, then a string is returned. """ if type(mode) is str: mode = [mode] for item in mode: if (allowed is not None) and (item not in allowed): raise Exception('\'mode\' must be one of the following: ' + allowed.__str__()) # Remove duplicates, if any [mode.remove(L) for L in mode if mode.count(L) > 1] if single: if len(mode) > 1: raise Exception('Multiple modes received when only one mode ' + 'allowed') else: mode = mode[0] return mode
[ "def", "_parse_mode", "(", "self", ",", "mode", ",", "allowed", "=", "None", ",", "single", "=", "False", ")", ":", "if", "type", "(", "mode", ")", "is", "str", ":", "mode", "=", "[", "mode", "]", "for", "item", "in", "mode", ":", "if", "(", "allowed", "is", "not", "None", ")", "and", "(", "item", "not", "in", "allowed", ")", ":", "raise", "Exception", "(", "'\\'mode\\' must be one of the following: '", "+", "allowed", ".", "__str__", "(", ")", ")", "# Remove duplicates, if any", "[", "mode", ".", "remove", "(", "L", ")", "for", "L", "in", "mode", "if", "mode", ".", "count", "(", "L", ")", ">", "1", "]", "if", "single", ":", "if", "len", "(", "mode", ")", ">", "1", ":", "raise", "Exception", "(", "'Multiple modes received when only one mode '", "+", "'allowed'", ")", "else", ":", "mode", "=", "mode", "[", "0", "]", "return", "mode" ]
r""" This private method is for checking the \'mode\' used in the calling method. Parameters ---------- mode : string or list of strings The mode(s) to be parsed allowed : list of strings A list containing the allowed modes. This list is defined by the calling method. If any of the received modes are not in the allowed list an exception is raised. single : boolean (default is False) Indicates if only a single mode is allowed. If this argument is True than a string is returned rather than a list of strings, which makes it easier to work with in the caller method. Returns ------- A list containing the received modes as strings, checked to ensure they are all within the allowed set (if provoided). Also, if the ``single`` argument was True, then a string is returned.
[ "r", "This", "private", "method", "is", "for", "checking", "the", "\\", "mode", "\\", "used", "in", "the", "calling", "method", "." ]
python
train
39.219512
rogerhil/thegamesdb
thegamesdb/api.py
https://github.com/rogerhil/thegamesdb/blob/795314215f9ee73697c7520dea4ddecfb23ca8e6/thegamesdb/api.py#L80-L107
def _get_response(self, url, **params): """ Giving a service path and optional specific arguments, returns the response string. """ data = urlencode(params) url = "%s?%s" % (url, data) headers = {'User-Agent': self.get_random_agent()} request = Request(url, headers=headers, method='GET') def open_request(request, attempts, err=None): if attempts > self.request_attempts: raise attempts += 1 try: with urlopen(request, timeout=self.timeout) as response: return response.read() except HTTPError as err: if err.getcode() < 500: raise print("HTTPError occurred while trying to request the url " "%s. %s. Trying again in %s seconds..." % (url, err, self.seconds_between_attempts)) time.sleep(self.seconds_between_attempts) return open_request(request, attempts, err) attempts = 0 self.last_response = open_request(request, attempts) return self.last_response
[ "def", "_get_response", "(", "self", ",", "url", ",", "*", "*", "params", ")", ":", "data", "=", "urlencode", "(", "params", ")", "url", "=", "\"%s?%s\"", "%", "(", "url", ",", "data", ")", "headers", "=", "{", "'User-Agent'", ":", "self", ".", "get_random_agent", "(", ")", "}", "request", "=", "Request", "(", "url", ",", "headers", "=", "headers", ",", "method", "=", "'GET'", ")", "def", "open_request", "(", "request", ",", "attempts", ",", "err", "=", "None", ")", ":", "if", "attempts", ">", "self", ".", "request_attempts", ":", "raise", "attempts", "+=", "1", "try", ":", "with", "urlopen", "(", "request", ",", "timeout", "=", "self", ".", "timeout", ")", "as", "response", ":", "return", "response", ".", "read", "(", ")", "except", "HTTPError", "as", "err", ":", "if", "err", ".", "getcode", "(", ")", "<", "500", ":", "raise", "print", "(", "\"HTTPError occurred while trying to request the url \"", "\"%s. %s. Trying again in %s seconds...\"", "%", "(", "url", ",", "err", ",", "self", ".", "seconds_between_attempts", ")", ")", "time", ".", "sleep", "(", "self", ".", "seconds_between_attempts", ")", "return", "open_request", "(", "request", ",", "attempts", ",", "err", ")", "attempts", "=", "0", "self", ".", "last_response", "=", "open_request", "(", "request", ",", "attempts", ")", "return", "self", ".", "last_response" ]
Giving a service path and optional specific arguments, returns the response string.
[ "Giving", "a", "service", "path", "and", "optional", "specific", "arguments", "returns", "the", "response", "string", "." ]
python
train
41.857143