repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Arvedui/picuplib
picuplib/upload.py
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L265-L289
def compose_post(apikey, resize, rotation, noexif): """ composes basic post requests """ check_rotation(rotation) check_resize(resize) post_data = { 'formatliste': ('', 'og'), 'userdrehung': ('', rotation), 'apikey': ('', apikey) } if resize and 'x' in resize: width, height = [ x.strip() for x in resize.split('x')] post_data['udefb'] = ('', width) post_data['udefh'] = ('', height) elif resize and '%' in resize: precentage = resize.strip().strip('%') post_data['udefp'] = precentage if noexif: post_data['noexif'] = ('', '') return post_data
[ "def", "compose_post", "(", "apikey", ",", "resize", ",", "rotation", ",", "noexif", ")", ":", "check_rotation", "(", "rotation", ")", "check_resize", "(", "resize", ")", "post_data", "=", "{", "'formatliste'", ":", "(", "''", ",", "'og'", ")", ",", "'userdrehung'", ":", "(", "''", ",", "rotation", ")", ",", "'apikey'", ":", "(", "''", ",", "apikey", ")", "}", "if", "resize", "and", "'x'", "in", "resize", ":", "width", ",", "height", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "resize", ".", "split", "(", "'x'", ")", "]", "post_data", "[", "'udefb'", "]", "=", "(", "''", ",", "width", ")", "post_data", "[", "'udefh'", "]", "=", "(", "''", ",", "height", ")", "elif", "resize", "and", "'%'", "in", "resize", ":", "precentage", "=", "resize", ".", "strip", "(", ")", ".", "strip", "(", "'%'", ")", "post_data", "[", "'udefp'", "]", "=", "precentage", "if", "noexif", ":", "post_data", "[", "'noexif'", "]", "=", "(", "''", ",", "''", ")", "return", "post_data" ]
composes basic post requests
[ "composes", "basic", "post", "requests" ]
python
train
asifpy/django-crudbuilder
crudbuilder/views.py
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L191-L207
def generate_delete_view(self): """Generate class based view for DeleteView""" name = model_class_form(self.model + 'DeleteView') delete_args = dict( model=self.get_model_class, template_name=self.get_template('delete'), permissions=self.view_permission('delete'), permission_required=self.check_permission_required, login_required=self.check_login_required, success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)), custom_postfix_url=self.custom_postfix_url ) delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args) self.classes[name] = delete_class return delete_class
[ "def", "generate_delete_view", "(", "self", ")", ":", "name", "=", "model_class_form", "(", "self", ".", "model", "+", "'DeleteView'", ")", "delete_args", "=", "dict", "(", "model", "=", "self", ".", "get_model_class", ",", "template_name", "=", "self", ".", "get_template", "(", "'delete'", ")", ",", "permissions", "=", "self", ".", "view_permission", "(", "'delete'", ")", ",", "permission_required", "=", "self", ".", "check_permission_required", ",", "login_required", "=", "self", ".", "check_login_required", ",", "success_url", "=", "reverse_lazy", "(", "'{}-{}-list'", ".", "format", "(", "self", ".", "app", ",", "self", ".", "custom_postfix_url", ")", ")", ",", "custom_postfix_url", "=", "self", ".", "custom_postfix_url", ")", "delete_class", "=", "type", "(", "name", ",", "(", "CrudBuilderMixin", ",", "DeleteView", ")", ",", "delete_args", ")", "self", ".", "classes", "[", "name", "]", "=", "delete_class", "return", "delete_class" ]
Generate class based view for DeleteView
[ "Generate", "class", "based", "view", "for", "DeleteView" ]
python
train
jamieleshaw/lurklib
lurklib/channel.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L198-L220
def exceptlist(self, channel): """ Get the channel exceptlist. Required arguments: * channel - Channel of which to get the exceptlist for. """ with self.lock: self.is_in_channel(channel) self.send('MODE %s e' % channel) excepts = [] while self.readable(): msg = self._recv(expected_replies=('348', '349')) if msg[0] == '348': exceptmask, who, timestamp = msg[2].split()[1:] excepts.append((self._from_(exceptmask), who, \ self._m_time.localtime(int(timestamp)))) elif msg[0] == '349': break return excepts
[ "def", "exceptlist", "(", "self", ",", "channel", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "is_in_channel", "(", "channel", ")", "self", ".", "send", "(", "'MODE %s e'", "%", "channel", ")", "excepts", "=", "[", "]", "while", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'348'", ",", "'349'", ")", ")", "if", "msg", "[", "0", "]", "==", "'348'", ":", "exceptmask", ",", "who", ",", "timestamp", "=", "msg", "[", "2", "]", ".", "split", "(", ")", "[", "1", ":", "]", "excepts", ".", "append", "(", "(", "self", ".", "_from_", "(", "exceptmask", ")", ",", "who", ",", "self", ".", "_m_time", ".", "localtime", "(", "int", "(", "timestamp", ")", ")", ")", ")", "elif", "msg", "[", "0", "]", "==", "'349'", ":", "break", "return", "excepts" ]
Get the channel exceptlist. Required arguments: * channel - Channel of which to get the exceptlist for.
[ "Get", "the", "channel", "exceptlist", ".", "Required", "arguments", ":", "*", "channel", "-", "Channel", "of", "which", "to", "get", "the", "exceptlist", "for", "." ]
python
train
ARMmbed/yotta
yotta/lib/github_access.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/github_access.py#L51-L110
def _handleAuth(fn): ''' Decorator to re-try API calls after asking the user for authentication. ''' @functools.wraps(fn) def wrapped(*args, **kwargs): # if yotta is being run noninteractively, then we never retry, but we # do call auth.authorizeUser, so that a login URL can be displayed: interactive = globalconf.get('interactive') def retryWithAuthOrRaise(original_exception): # in all cases ask for auth, so that in non-interactive mode a # login URL is displayed auth.authorizeUser(provider='github', interactive=interactive) if not interactive: raise original_exception else: logger.debug('trying with authtoken: %s', settings.getProperty('github', 'authtoken')) return fn(*args, **kwargs) # authorised requests have a higher rate limit, but display a warning # message in this case, as the user might not expect the requirement to # auth: def handleRateLimitExceeded(original_exception): if not _userAuthedWithGithub(): logger.warning('github rate limit for anonymous requests exceeded: you must log in') return retryWithAuthOrRaise(original_exception) else: raise original_exception try: return fn(*args, **kwargs) except requests.exceptions.HTTPError as e: if e.response.status_code == 403: # 403 = rate limit exceeded return handleRateLimitExceeded(e) if e.response.status_code == 401: # 401 = unauthorised return retryWithAuthOrRaise(e) raise except github.BadCredentialsException as e: logger.debug("github: bad credentials") return retryWithAuthOrRaise(e) except github.UnknownObjectException as e: logger.debug("github: unknown object") # some endpoints return 404 if the user doesn't have access, maybe # it would be better to prompt for another username and password, # and store multiple tokens that we can try for each request.... # but for now we assume that if the user is logged in then a 404 # really is a 404 if not _userAuthedWithGithub(): logger.info('failed to fetch Github object, re-trying with authentication...') return retryWithAuthOrRaise(e) raise except github.RateLimitExceededException as e: return handleRateLimitExceeded(e) except github.GithubException as e: if e.status == 403: # 403 = rate limit exceeded return handleRateLimitExceeded(e) raise return wrapped
[ "def", "_handleAuth", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# if yotta is being run noninteractively, then we never retry, but we", "# do call auth.authorizeUser, so that a login URL can be displayed:", "interactive", "=", "globalconf", ".", "get", "(", "'interactive'", ")", "def", "retryWithAuthOrRaise", "(", "original_exception", ")", ":", "# in all cases ask for auth, so that in non-interactive mode a", "# login URL is displayed", "auth", ".", "authorizeUser", "(", "provider", "=", "'github'", ",", "interactive", "=", "interactive", ")", "if", "not", "interactive", ":", "raise", "original_exception", "else", ":", "logger", ".", "debug", "(", "'trying with authtoken: %s'", ",", "settings", ".", "getProperty", "(", "'github'", ",", "'authtoken'", ")", ")", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# authorised requests have a higher rate limit, but display a warning", "# message in this case, as the user might not expect the requirement to", "# auth:", "def", "handleRateLimitExceeded", "(", "original_exception", ")", ":", "if", "not", "_userAuthedWithGithub", "(", ")", ":", "logger", ".", "warning", "(", "'github rate limit for anonymous requests exceeded: you must log in'", ")", "return", "retryWithAuthOrRaise", "(", "original_exception", ")", "else", ":", "raise", "original_exception", "try", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "if", "e", ".", "response", ".", "status_code", "==", "403", ":", "# 403 = rate limit exceeded", "return", "handleRateLimitExceeded", "(", "e", ")", "if", "e", ".", "response", ".", "status_code", "==", "401", ":", "# 401 = unauthorised", "return", "retryWithAuthOrRaise", "(", "e", ")", "raise", "except", "github", ".", "BadCredentialsException", "as", "e", ":", "logger", ".", "debug", "(", "\"github: bad credentials\"", ")", "return", "retryWithAuthOrRaise", "(", "e", ")", "except", "github", ".", "UnknownObjectException", "as", "e", ":", "logger", ".", "debug", "(", "\"github: unknown object\"", ")", "# some endpoints return 404 if the user doesn't have access, maybe", "# it would be better to prompt for another username and password,", "# and store multiple tokens that we can try for each request....", "# but for now we assume that if the user is logged in then a 404", "# really is a 404", "if", "not", "_userAuthedWithGithub", "(", ")", ":", "logger", ".", "info", "(", "'failed to fetch Github object, re-trying with authentication...'", ")", "return", "retryWithAuthOrRaise", "(", "e", ")", "raise", "except", "github", ".", "RateLimitExceededException", "as", "e", ":", "return", "handleRateLimitExceeded", "(", "e", ")", "except", "github", ".", "GithubException", "as", "e", ":", "if", "e", ".", "status", "==", "403", ":", "# 403 = rate limit exceeded", "return", "handleRateLimitExceeded", "(", "e", ")", "raise", "return", "wrapped" ]
Decorator to re-try API calls after asking the user for authentication.
[ "Decorator", "to", "re", "-", "try", "API", "calls", "after", "asking", "the", "user", "for", "authentication", "." ]
python
valid
pytroll/python-geotiepoints
geotiepoints/__init__.py
https://github.com/pytroll/python-geotiepoints/blob/7c5cc8a887f8534cc2839c716c2c560aeaf77659/geotiepoints/__init__.py#L54-L71
def metop20kmto1km(lons20km, lats20km): """Getting 1km geolocation for metop avhrr from 20km tiepoints. """ cols20km = np.array([0] + list(range(4, 2048, 20)) + [2047]) cols1km = np.arange(2048) lines = lons20km.shape[0] rows20km = np.arange(lines) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = SatelliteInterpolator((lons20km, lats20km), (rows20km, cols20km), (rows1km, cols1km), along_track_order, cross_track_order) return satint.interpolate()
[ "def", "metop20kmto1km", "(", "lons20km", ",", "lats20km", ")", ":", "cols20km", "=", "np", ".", "array", "(", "[", "0", "]", "+", "list", "(", "range", "(", "4", ",", "2048", ",", "20", ")", ")", "+", "[", "2047", "]", ")", "cols1km", "=", "np", ".", "arange", "(", "2048", ")", "lines", "=", "lons20km", ".", "shape", "[", "0", "]", "rows20km", "=", "np", ".", "arange", "(", "lines", ")", "rows1km", "=", "np", ".", "arange", "(", "lines", ")", "along_track_order", "=", "1", "cross_track_order", "=", "3", "satint", "=", "SatelliteInterpolator", "(", "(", "lons20km", ",", "lats20km", ")", ",", "(", "rows20km", ",", "cols20km", ")", ",", "(", "rows1km", ",", "cols1km", ")", ",", "along_track_order", ",", "cross_track_order", ")", "return", "satint", ".", "interpolate", "(", ")" ]
Getting 1km geolocation for metop avhrr from 20km tiepoints.
[ "Getting", "1km", "geolocation", "for", "metop", "avhrr", "from", "20km", "tiepoints", "." ]
python
train
xeroc/python-graphenelib
graphenestorage/masterpassword.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenestorage/masterpassword.py#L141-L153
def _get_encrypted_masterpassword(self): """ Obtain the encrypted masterkey .. note:: The encrypted masterkey is checksummed, so that we can figure out that a provided password is correct or not. The checksum is only 4 bytes long! """ if not self.unlocked(): raise WalletLocked aes = AESCipher(self.password) return "{}${}".format( self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey) )
[ "def", "_get_encrypted_masterpassword", "(", "self", ")", ":", "if", "not", "self", ".", "unlocked", "(", ")", ":", "raise", "WalletLocked", "aes", "=", "AESCipher", "(", "self", ".", "password", ")", "return", "\"{}${}\"", ".", "format", "(", "self", ".", "_derive_checksum", "(", "self", ".", "masterkey", ")", ",", "aes", ".", "encrypt", "(", "self", ".", "masterkey", ")", ")" ]
Obtain the encrypted masterkey .. note:: The encrypted masterkey is checksummed, so that we can figure out that a provided password is correct or not. The checksum is only 4 bytes long!
[ "Obtain", "the", "encrypted", "masterkey" ]
python
valid
kumar303/mohawk
mohawk/bewit.py
https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/bewit.py#L21-L61
def get_bewit(resource): """ Returns a bewit identifier for the resource as a string. :param resource: Resource to generate a bewit for :type resource: `mohawk.base.Resource` """ if resource.method != 'GET': raise ValueError('bewits can only be generated for GET requests') if resource.nonce != '': raise ValueError('bewits must use an empty nonce') mac = calculate_mac( 'bewit', resource, None, ) if isinstance(mac, six.binary_type): mac = mac.decode('ascii') if resource.ext is None: ext = '' else: validate_header_attr(resource.ext, name='ext') ext = resource.ext # b64encode works only with bytes in python3, but all of our parameters are # in unicode, so we need to encode them. The cleanest way to do this that # works in both python 2 and 3 is to use string formatting to get a # unicode string, and then explicitly encode it to bytes. inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format( id=resource.credentials['id'], exp=resource.timestamp, mac=mac, ext=ext, ) inner_bewit_bytes = inner_bewit.encode('ascii') bewit_bytes = urlsafe_b64encode(inner_bewit_bytes) # Now decode the resulting bytes back to a unicode string return bewit_bytes.decode('ascii')
[ "def", "get_bewit", "(", "resource", ")", ":", "if", "resource", ".", "method", "!=", "'GET'", ":", "raise", "ValueError", "(", "'bewits can only be generated for GET requests'", ")", "if", "resource", ".", "nonce", "!=", "''", ":", "raise", "ValueError", "(", "'bewits must use an empty nonce'", ")", "mac", "=", "calculate_mac", "(", "'bewit'", ",", "resource", ",", "None", ",", ")", "if", "isinstance", "(", "mac", ",", "six", ".", "binary_type", ")", ":", "mac", "=", "mac", ".", "decode", "(", "'ascii'", ")", "if", "resource", ".", "ext", "is", "None", ":", "ext", "=", "''", "else", ":", "validate_header_attr", "(", "resource", ".", "ext", ",", "name", "=", "'ext'", ")", "ext", "=", "resource", ".", "ext", "# b64encode works only with bytes in python3, but all of our parameters are", "# in unicode, so we need to encode them. The cleanest way to do this that", "# works in both python 2 and 3 is to use string formatting to get a", "# unicode string, and then explicitly encode it to bytes.", "inner_bewit", "=", "u\"{id}\\\\{exp}\\\\{mac}\\\\{ext}\"", ".", "format", "(", "id", "=", "resource", ".", "credentials", "[", "'id'", "]", ",", "exp", "=", "resource", ".", "timestamp", ",", "mac", "=", "mac", ",", "ext", "=", "ext", ",", ")", "inner_bewit_bytes", "=", "inner_bewit", ".", "encode", "(", "'ascii'", ")", "bewit_bytes", "=", "urlsafe_b64encode", "(", "inner_bewit_bytes", ")", "# Now decode the resulting bytes back to a unicode string", "return", "bewit_bytes", ".", "decode", "(", "'ascii'", ")" ]
Returns a bewit identifier for the resource as a string. :param resource: Resource to generate a bewit for :type resource: `mohawk.base.Resource`
[ "Returns", "a", "bewit", "identifier", "for", "the", "resource", "as", "a", "string", "." ]
python
train
TaurusOlson/incisive
incisive/core.py
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L55-L88
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}): """Read a CSV file Usage ----- >>> data = read_csv(filename, delimiter=delimiter, skip=skip, guess_type=guess_type, has_header=True, use_types={}) # Use specific types >>> types = {"sepal.length": int, "petal.width": float} >>> data = read_csv(filename, guess_type=guess_type, use_types=types) keywords :has_header: Determine whether the file has a header or not """ with open(filename, 'r') as f: # Skip the n first lines if has_header: header = f.readline().strip().split(delimiter) else: header = None for i in range(skip): f.readline() for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header): if use_types: yield apply_types(use_types, guess_type, line) elif guess_type: yield dmap(determine_type, line) else: yield line
[ "def", "read_csv", "(", "filename", ",", "delimiter", "=", "\",\"", ",", "skip", "=", "0", ",", "guess_type", "=", "True", ",", "has_header", "=", "True", ",", "use_types", "=", "{", "}", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "# Skip the n first lines", "if", "has_header", ":", "header", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "delimiter", ")", "else", ":", "header", "=", "None", "for", "i", "in", "range", "(", "skip", ")", ":", "f", ".", "readline", "(", ")", "for", "line", "in", "csv", ".", "DictReader", "(", "f", ",", "delimiter", "=", "delimiter", ",", "fieldnames", "=", "header", ")", ":", "if", "use_types", ":", "yield", "apply_types", "(", "use_types", ",", "guess_type", ",", "line", ")", "elif", "guess_type", ":", "yield", "dmap", "(", "determine_type", ",", "line", ")", "else", ":", "yield", "line" ]
Read a CSV file Usage ----- >>> data = read_csv(filename, delimiter=delimiter, skip=skip, guess_type=guess_type, has_header=True, use_types={}) # Use specific types >>> types = {"sepal.length": int, "petal.width": float} >>> data = read_csv(filename, guess_type=guess_type, use_types=types) keywords :has_header: Determine whether the file has a header or not
[ "Read", "a", "CSV", "file", "Usage", "-----", ">>>", "data", "=", "read_csv", "(", "filename", "delimiter", "=", "delimiter", "skip", "=", "skip", "guess_type", "=", "guess_type", "has_header", "=", "True", "use_types", "=", "{}", ")" ]
python
valid
emilydolson/avida-spatial-tools
avidaspatial/visualizations.py
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L163-L179
def plot_phens(phen_grid, **kwargs): """ Plots circles colored according to the values in phen_grid. -1 serves as a sentinel value, indicating that a circle should not be plotted in that location. """ denom, palette = get_kwargs(phen_grid, kwargs, True) grid = color_grid(phen_grid, palette, denom) for i in range(len(grid)): for j in range(len(grid[i])): if grid[i][j] != -1 and tuple(grid[i][j]) != -1: plt.gca().add_patch(plt.Circle((j, i), radius=.3, lw=1, ec="black", facecolor=grid[i][j], zorder=2))
[ "def", "plot_phens", "(", "phen_grid", ",", "*", "*", "kwargs", ")", ":", "denom", ",", "palette", "=", "get_kwargs", "(", "phen_grid", ",", "kwargs", ",", "True", ")", "grid", "=", "color_grid", "(", "phen_grid", ",", "palette", ",", "denom", ")", "for", "i", "in", "range", "(", "len", "(", "grid", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "grid", "[", "i", "]", ")", ")", ":", "if", "grid", "[", "i", "]", "[", "j", "]", "!=", "-", "1", "and", "tuple", "(", "grid", "[", "i", "]", "[", "j", "]", ")", "!=", "-", "1", ":", "plt", ".", "gca", "(", ")", ".", "add_patch", "(", "plt", ".", "Circle", "(", "(", "j", ",", "i", ")", ",", "radius", "=", ".3", ",", "lw", "=", "1", ",", "ec", "=", "\"black\"", ",", "facecolor", "=", "grid", "[", "i", "]", "[", "j", "]", ",", "zorder", "=", "2", ")", ")" ]
Plots circles colored according to the values in phen_grid. -1 serves as a sentinel value, indicating that a circle should not be plotted in that location.
[ "Plots", "circles", "colored", "according", "to", "the", "values", "in", "phen_grid", "." ]
python
train
frictionlessdata/tableschema-sql-py
tableschema_sql/storage.py
https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L109-L142
def delete(self, bucket=None, ignore=False): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Make lists buckets = bucket if isinstance(bucket, six.string_types): buckets = [bucket] elif bucket is None: buckets = reversed(self.buckets) # Iterate tables = [] for bucket in buckets: # Check existent if bucket not in self.buckets: if not ignore: message = 'Bucket "%s" doesn\'t exist.' % bucket raise tableschema.exceptions.StorageError(message) return # Remove from buckets if bucket in self.__descriptors: del self.__descriptors[bucket] # Add table to tables table = self.__get_table(bucket) tables.append(table) # Drop tables, update metadata self.__metadata.drop_all(tables=tables) self.__metadata.clear() self.__reflect()
[ "def", "delete", "(", "self", ",", "bucket", "=", "None", ",", "ignore", "=", "False", ")", ":", "# Make lists", "buckets", "=", "bucket", "if", "isinstance", "(", "bucket", ",", "six", ".", "string_types", ")", ":", "buckets", "=", "[", "bucket", "]", "elif", "bucket", "is", "None", ":", "buckets", "=", "reversed", "(", "self", ".", "buckets", ")", "# Iterate", "tables", "=", "[", "]", "for", "bucket", "in", "buckets", ":", "# Check existent", "if", "bucket", "not", "in", "self", ".", "buckets", ":", "if", "not", "ignore", ":", "message", "=", "'Bucket \"%s\" doesn\\'t exist.'", "%", "bucket", "raise", "tableschema", ".", "exceptions", ".", "StorageError", "(", "message", ")", "return", "# Remove from buckets", "if", "bucket", "in", "self", ".", "__descriptors", ":", "del", "self", ".", "__descriptors", "[", "bucket", "]", "# Add table to tables", "table", "=", "self", ".", "__get_table", "(", "bucket", ")", "tables", ".", "append", "(", "table", ")", "# Drop tables, update metadata", "self", ".", "__metadata", ".", "drop_all", "(", "tables", "=", "tables", ")", "self", ".", "__metadata", ".", "clear", "(", ")", "self", ".", "__reflect", "(", ")" ]
https://github.com/frictionlessdata/tableschema-sql-py#storage
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "tableschema", "-", "sql", "-", "py#storage" ]
python
train
calvinku96/labreporthelper
labreporthelper/bestfit/curvefit.py
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/bestfit/curvefit.py#L33-L42
def do_bestfit(self): """ Do bestfit """ self.check_important_variables() x = np.array(self.args["x"]) y = np.array(self.args["y"]) p = self.args.get("params", np.ones(self.args["num_vars"])) self.fit_args, self.cov = opt.curve_fit(self.args["func"], x, y, p) return self.fit_args
[ "def", "do_bestfit", "(", "self", ")", ":", "self", ".", "check_important_variables", "(", ")", "x", "=", "np", ".", "array", "(", "self", ".", "args", "[", "\"x\"", "]", ")", "y", "=", "np", ".", "array", "(", "self", ".", "args", "[", "\"y\"", "]", ")", "p", "=", "self", ".", "args", ".", "get", "(", "\"params\"", ",", "np", ".", "ones", "(", "self", ".", "args", "[", "\"num_vars\"", "]", ")", ")", "self", ".", "fit_args", ",", "self", ".", "cov", "=", "opt", ".", "curve_fit", "(", "self", ".", "args", "[", "\"func\"", "]", ",", "x", ",", "y", ",", "p", ")", "return", "self", ".", "fit_args" ]
Do bestfit
[ "Do", "bestfit" ]
python
train
CivicSpleen/ambry
ambry/__init__.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/__init__.py#L27-L30
def config(path=None, root=None, db=None): """Return the default run_config object for this installation.""" import ambry.run return ambry.run.load(path=path, root=root, db=db)
[ "def", "config", "(", "path", "=", "None", ",", "root", "=", "None", ",", "db", "=", "None", ")", ":", "import", "ambry", ".", "run", "return", "ambry", ".", "run", ".", "load", "(", "path", "=", "path", ",", "root", "=", "root", ",", "db", "=", "db", ")" ]
Return the default run_config object for this installation.
[ "Return", "the", "default", "run_config", "object", "for", "this", "installation", "." ]
python
train
jhuapl-boss/intern
intern/service/boss/volume.py
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/volume.py#L149-L171
def get_ids_in_region( self, resource, resolution, x_range, y_range, z_range, time_range=[0, 1]): """Get all ids in the region defined by x_range, y_range, z_range. Args: resource (intern.resource.Resource): An annotation channel. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1]. Returns: (list[int]): Example: [1, 2, 25]. Raises: requests.HTTPError TypeError: if resource is not an annotation channel. """ return self.service.get_ids_in_region( resource, resolution, x_range, y_range, z_range, time_range, self.url_prefix, self.auth, self.session, self.session_send_opts)
[ "def", "get_ids_in_region", "(", "self", ",", "resource", ",", "resolution", ",", "x_range", ",", "y_range", ",", "z_range", ",", "time_range", "=", "[", "0", ",", "1", "]", ")", ":", "return", "self", ".", "service", ".", "get_ids_in_region", "(", "resource", ",", "resolution", ",", "x_range", ",", "y_range", ",", "z_range", ",", "time_range", ",", "self", ".", "url_prefix", ",", "self", ".", "auth", ",", "self", ".", "session", ",", "self", ".", "session_send_opts", ")" ]
Get all ids in the region defined by x_range, y_range, z_range. Args: resource (intern.resource.Resource): An annotation channel. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1]. Returns: (list[int]): Example: [1, 2, 25]. Raises: requests.HTTPError TypeError: if resource is not an annotation channel.
[ "Get", "all", "ids", "in", "the", "region", "defined", "by", "x_range", "y_range", "z_range", "." ]
python
train
sharibarboza/py_zap
py_zap/py_zap.py
https://github.com/sharibarboza/py_zap/blob/ce90853efcad66d3e28b8f1ac910f275349d016c/py_zap/py_zap.py#L427-L449
def get_averages(self): """Get the broadcast network averages for that day. Returns a dictionary: key: network name value: sub-dictionary with 'viewers', 'rating', and 'share' as keys """ networks = [unescape_html(n.string) for n in self.soup.find_all('td', width='77')] table = self.soup.find_all('td', style=re.compile('^font')) # Each element is a list split as [rating, share] rateshares = [r.string.split('/') for r in table[:5] if r.string] viewers = [v.string for v in table[5:] if v.string] averages = {} # Load the averages dict for index, network in enumerate(networks): viewer = convert_float(unescape_html(viewers[index])) rating = convert_float(unescape_html(rateshares[index][0])) share = convert_float(unescape_html(rateshares[index][1])) averages[network] = {'viewer': viewer, 'rating': rating, 'share': share} return averages
[ "def", "get_averages", "(", "self", ")", ":", "networks", "=", "[", "unescape_html", "(", "n", ".", "string", ")", "for", "n", "in", "self", ".", "soup", ".", "find_all", "(", "'td'", ",", "width", "=", "'77'", ")", "]", "table", "=", "self", ".", "soup", ".", "find_all", "(", "'td'", ",", "style", "=", "re", ".", "compile", "(", "'^font'", ")", ")", "# Each element is a list split as [rating, share]", "rateshares", "=", "[", "r", ".", "string", ".", "split", "(", "'/'", ")", "for", "r", "in", "table", "[", ":", "5", "]", "if", "r", ".", "string", "]", "viewers", "=", "[", "v", ".", "string", "for", "v", "in", "table", "[", "5", ":", "]", "if", "v", ".", "string", "]", "averages", "=", "{", "}", "# Load the averages dict", "for", "index", ",", "network", "in", "enumerate", "(", "networks", ")", ":", "viewer", "=", "convert_float", "(", "unescape_html", "(", "viewers", "[", "index", "]", ")", ")", "rating", "=", "convert_float", "(", "unescape_html", "(", "rateshares", "[", "index", "]", "[", "0", "]", ")", ")", "share", "=", "convert_float", "(", "unescape_html", "(", "rateshares", "[", "index", "]", "[", "1", "]", ")", ")", "averages", "[", "network", "]", "=", "{", "'viewer'", ":", "viewer", ",", "'rating'", ":", "rating", ",", "'share'", ":", "share", "}", "return", "averages" ]
Get the broadcast network averages for that day. Returns a dictionary: key: network name value: sub-dictionary with 'viewers', 'rating', and 'share' as keys
[ "Get", "the", "broadcast", "network", "averages", "for", "that", "day", "." ]
python
train
minhhoit/yacms
yacms/utils/sites.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/utils/sites.py#L80-L96
def has_site_permission(user): """ Checks if a staff user has staff-level access for the current site. The actual permission lookup occurs in ``SitePermissionMiddleware`` which then marks the request with the ``has_site_permission`` flag, so that we only query the db once per request, so this function serves as the entry point for everything else to check access. We also fall back to an ``is_staff`` check if the middleware is not installed, to ease migration. """ mw = "yacms.core.middleware.SitePermissionMiddleware" if mw not in get_middleware_setting(): from warnings import warn warn(mw + " missing from settings.MIDDLEWARE - per site" "permissions not applied") return user.is_staff and user.is_active return getattr(user, "has_site_permission", False)
[ "def", "has_site_permission", "(", "user", ")", ":", "mw", "=", "\"yacms.core.middleware.SitePermissionMiddleware\"", "if", "mw", "not", "in", "get_middleware_setting", "(", ")", ":", "from", "warnings", "import", "warn", "warn", "(", "mw", "+", "\" missing from settings.MIDDLEWARE - per site\"", "\"permissions not applied\"", ")", "return", "user", ".", "is_staff", "and", "user", ".", "is_active", "return", "getattr", "(", "user", ",", "\"has_site_permission\"", ",", "False", ")" ]
Checks if a staff user has staff-level access for the current site. The actual permission lookup occurs in ``SitePermissionMiddleware`` which then marks the request with the ``has_site_permission`` flag, so that we only query the db once per request, so this function serves as the entry point for everything else to check access. We also fall back to an ``is_staff`` check if the middleware is not installed, to ease migration.
[ "Checks", "if", "a", "staff", "user", "has", "staff", "-", "level", "access", "for", "the", "current", "site", ".", "The", "actual", "permission", "lookup", "occurs", "in", "SitePermissionMiddleware", "which", "then", "marks", "the", "request", "with", "the", "has_site_permission", "flag", "so", "that", "we", "only", "query", "the", "db", "once", "per", "request", "so", "this", "function", "serves", "as", "the", "entry", "point", "for", "everything", "else", "to", "check", "access", ".", "We", "also", "fall", "back", "to", "an", "is_staff", "check", "if", "the", "middleware", "is", "not", "installed", "to", "ease", "migration", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2156-L2174
def atlas_peer_set_zonefile_status( peer_hostport, zonefile_hash, present, zonefile_bits=None, peer_table=None, con=None, path=None ): """ Mark a zonefile as being present or absent on a peer. Use this method to update our knowledge of what other peers have, based on when we try to ask them for zonefiles (i.e. a peer can lie about what zonefiles it has, and if it advertizes the availability of a zonefile but doesn't deliver, then we need to remember not to ask it again). """ if zonefile_bits is None: zonefile_bits = atlasdb_get_zonefile_bits( zonefile_hash, con=con, path=path ) with AtlasPeerTableLocked(peer_table) as ptbl: if ptbl.has_key(peer_hostport): peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl ) peer_inv = atlas_inventory_flip_zonefile_bits( peer_inv, zonefile_bits, present ) atlas_peer_set_zonefile_inventory( peer_hostport, peer_inv, peer_table=ptbl ) return
[ "def", "atlas_peer_set_zonefile_status", "(", "peer_hostport", ",", "zonefile_hash", ",", "present", ",", "zonefile_bits", "=", "None", ",", "peer_table", "=", "None", ",", "con", "=", "None", ",", "path", "=", "None", ")", ":", "if", "zonefile_bits", "is", "None", ":", "zonefile_bits", "=", "atlasdb_get_zonefile_bits", "(", "zonefile_hash", ",", "con", "=", "con", ",", "path", "=", "path", ")", "with", "AtlasPeerTableLocked", "(", "peer_table", ")", "as", "ptbl", ":", "if", "ptbl", ".", "has_key", "(", "peer_hostport", ")", ":", "peer_inv", "=", "atlas_peer_get_zonefile_inventory", "(", "peer_hostport", ",", "peer_table", "=", "ptbl", ")", "peer_inv", "=", "atlas_inventory_flip_zonefile_bits", "(", "peer_inv", ",", "zonefile_bits", ",", "present", ")", "atlas_peer_set_zonefile_inventory", "(", "peer_hostport", ",", "peer_inv", ",", "peer_table", "=", "ptbl", ")", "return" ]
Mark a zonefile as being present or absent on a peer. Use this method to update our knowledge of what other peers have, based on when we try to ask them for zonefiles (i.e. a peer can lie about what zonefiles it has, and if it advertizes the availability of a zonefile but doesn't deliver, then we need to remember not to ask it again).
[ "Mark", "a", "zonefile", "as", "being", "present", "or", "absent", "on", "a", "peer", ".", "Use", "this", "method", "to", "update", "our", "knowledge", "of", "what", "other", "peers", "have", "based", "on", "when", "we", "try", "to", "ask", "them", "for", "zonefiles", "(", "i", ".", "e", ".", "a", "peer", "can", "lie", "about", "what", "zonefiles", "it", "has", "and", "if", "it", "advertizes", "the", "availability", "of", "a", "zonefile", "but", "doesn", "t", "deliver", "then", "we", "need", "to", "remember", "not", "to", "ask", "it", "again", ")", "." ]
python
train
quantumlib/Cirq
cirq/google/programs.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/programs.py#L235-L266
def unpack_results( data: bytes, repetitions: int, key_sizes: Sequence[Tuple[str, int]] ) -> Dict[str, np.ndarray]: """Unpack data from a bitstring into individual measurement results. Args: data: Packed measurement results, in the form <rep0><rep1>... where each repetition is <key0_0>..<key0_{size0-1}><key1_0>... with bits packed in little-endian order in each byte. repetitions: number of repetitions. key_sizes: Keys and sizes of the measurements in the data. Returns: Dict mapping measurement key to a 2D array of boolean results. Each array has shape (repetitions, size) with size for that measurement. """ bits_per_rep = sum(size for _, size in key_sizes) total_bits = repetitions * bits_per_rep byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1)) bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool) bits = bits[:total_bits].reshape((repetitions, bits_per_rep)) results = {} ofs = 0 for key, size in key_sizes: results[key] = bits[:, ofs:ofs + size] ofs += size return results
[ "def", "unpack_results", "(", "data", ":", "bytes", ",", "repetitions", ":", "int", ",", "key_sizes", ":", "Sequence", "[", "Tuple", "[", "str", ",", "int", "]", "]", ")", "->", "Dict", "[", "str", ",", "np", ".", "ndarray", "]", ":", "bits_per_rep", "=", "sum", "(", "size", "for", "_", ",", "size", "in", "key_sizes", ")", "total_bits", "=", "repetitions", "*", "bits_per_rep", "byte_arr", "=", "np", ".", "frombuffer", "(", "data", ",", "dtype", "=", "'uint8'", ")", ".", "reshape", "(", "(", "len", "(", "data", ")", ",", "1", ")", ")", "bits", "=", "np", ".", "unpackbits", "(", "byte_arr", ",", "axis", "=", "1", ")", "[", ":", ",", ":", ":", "-", "1", "]", ".", "reshape", "(", "-", "1", ")", ".", "astype", "(", "bool", ")", "bits", "=", "bits", "[", ":", "total_bits", "]", ".", "reshape", "(", "(", "repetitions", ",", "bits_per_rep", ")", ")", "results", "=", "{", "}", "ofs", "=", "0", "for", "key", ",", "size", "in", "key_sizes", ":", "results", "[", "key", "]", "=", "bits", "[", ":", ",", "ofs", ":", "ofs", "+", "size", "]", "ofs", "+=", "size", "return", "results" ]
Unpack data from a bitstring into individual measurement results. Args: data: Packed measurement results, in the form <rep0><rep1>... where each repetition is <key0_0>..<key0_{size0-1}><key1_0>... with bits packed in little-endian order in each byte. repetitions: number of repetitions. key_sizes: Keys and sizes of the measurements in the data. Returns: Dict mapping measurement key to a 2D array of boolean results. Each array has shape (repetitions, size) with size for that measurement.
[ "Unpack", "data", "from", "a", "bitstring", "into", "individual", "measurement", "results", "." ]
python
train
boriel/zxbasic
outfmt/tzx.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/outfmt/tzx.py#L109-L112
def standard_bytes_header(self, title, addr, length): """ Generates a standard header block of CODE type """ self.save_header(self.HEADER_TYPE_CODE, title, length, param1=addr, param2=32768)
[ "def", "standard_bytes_header", "(", "self", ",", "title", ",", "addr", ",", "length", ")", ":", "self", ".", "save_header", "(", "self", ".", "HEADER_TYPE_CODE", ",", "title", ",", "length", ",", "param1", "=", "addr", ",", "param2", "=", "32768", ")" ]
Generates a standard header block of CODE type
[ "Generates", "a", "standard", "header", "block", "of", "CODE", "type" ]
python
train
UCBerkeleySETI/blimpy
blimpy/waterfall.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L269-L285
def __write_to_fil_light(self, filename_out, *args, **kwargs): """ Write data to .fil file. Args: filename_out (str): Name of output file """ n_bytes = self.header[b'nbits'] / 8 with open(filename_out, "wb") as fileh: fileh.write(generate_sigproc_header(self)) #generate_sigproc_header comes from sigproc.py j = self.data if n_bytes == 4: np.float32(j.ravel()).tofile(fileh) elif n_bytes == 2: np.int16(j.ravel()).tofile(fileh) elif n_bytes == 1: np.int8(j.ravel()).tofile(fileh)
[ "def", "__write_to_fil_light", "(", "self", ",", "filename_out", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "n_bytes", "=", "self", ".", "header", "[", "b'nbits'", "]", "/", "8", "with", "open", "(", "filename_out", ",", "\"wb\"", ")", "as", "fileh", ":", "fileh", ".", "write", "(", "generate_sigproc_header", "(", "self", ")", ")", "#generate_sigproc_header comes from sigproc.py", "j", "=", "self", ".", "data", "if", "n_bytes", "==", "4", ":", "np", ".", "float32", "(", "j", ".", "ravel", "(", ")", ")", ".", "tofile", "(", "fileh", ")", "elif", "n_bytes", "==", "2", ":", "np", ".", "int16", "(", "j", ".", "ravel", "(", ")", ")", ".", "tofile", "(", "fileh", ")", "elif", "n_bytes", "==", "1", ":", "np", ".", "int8", "(", "j", ".", "ravel", "(", ")", ")", ".", "tofile", "(", "fileh", ")" ]
Write data to .fil file. Args: filename_out (str): Name of output file
[ "Write", "data", "to", ".", "fil", "file", "." ]
python
test
akoumjian/datefinder
datefinder/__init__.py
https://github.com/akoumjian/datefinder/blob/612e8b71e57b1083e1224412ba8fb8bce3810bdd/datefinder/__init__.py#L188-L215
def find_dates(text, source=False, index=False, strict=False, base_date=None): """ Extract datetime strings from text :param text: A string that contains one or more natural language or literal datetime strings :type text: str|unicode :param source: Return the original string segment :type source: boolean :param index: Return the indices where the datetime string was located in text :type index: boolean :param strict: Only return datetimes with complete date information. For example: `July 2016` of `Monday` will not return datetimes. `May 16, 2015` will return datetimes. :type strict: boolean :param base_date: Set a default base datetime when parsing incomplete dates :type base_date: datetime :return: Returns a generator that produces :mod:`datetime.datetime` objects, or a tuple with the source text and index, if requested """ date_finder = DateFinder(base_date=base_date) return date_finder.find_dates(text, source=source, index=index, strict=strict)
[ "def", "find_dates", "(", "text", ",", "source", "=", "False", ",", "index", "=", "False", ",", "strict", "=", "False", ",", "base_date", "=", "None", ")", ":", "date_finder", "=", "DateFinder", "(", "base_date", "=", "base_date", ")", "return", "date_finder", ".", "find_dates", "(", "text", ",", "source", "=", "source", ",", "index", "=", "index", ",", "strict", "=", "strict", ")" ]
Extract datetime strings from text :param text: A string that contains one or more natural language or literal datetime strings :type text: str|unicode :param source: Return the original string segment :type source: boolean :param index: Return the indices where the datetime string was located in text :type index: boolean :param strict: Only return datetimes with complete date information. For example: `July 2016` of `Monday` will not return datetimes. `May 16, 2015` will return datetimes. :type strict: boolean :param base_date: Set a default base datetime when parsing incomplete dates :type base_date: datetime :return: Returns a generator that produces :mod:`datetime.datetime` objects, or a tuple with the source text and index, if requested
[ "Extract", "datetime", "strings", "from", "text" ]
python
train
openpermissions/koi
koi/keygen.py
https://github.com/openpermissions/koi/blob/d721f8e1dfa8f07ad265d9dec32e8aaf80a9f281/koi/keygen.py#L179-L192
def check_key_cert_match(keyfile, certfile): """ check if the ssl key matches the certificate :param keyfile: file path to the ssl key :param certfile: file path to the ssl certificate :returns: true or false """ key_modulus = subprocess.check_output( 'openssl rsa -noout -modulus -in {}'.format(keyfile), shell=True) cert_modulus = subprocess.check_output( 'openssl x509 -noout -modulus -in {}'.format(certfile), shell=True) return key_modulus == cert_modulus
[ "def", "check_key_cert_match", "(", "keyfile", ",", "certfile", ")", ":", "key_modulus", "=", "subprocess", ".", "check_output", "(", "'openssl rsa -noout -modulus -in {}'", ".", "format", "(", "keyfile", ")", ",", "shell", "=", "True", ")", "cert_modulus", "=", "subprocess", ".", "check_output", "(", "'openssl x509 -noout -modulus -in {}'", ".", "format", "(", "certfile", ")", ",", "shell", "=", "True", ")", "return", "key_modulus", "==", "cert_modulus" ]
check if the ssl key matches the certificate :param keyfile: file path to the ssl key :param certfile: file path to the ssl certificate :returns: true or false
[ "check", "if", "the", "ssl", "key", "matches", "the", "certificate", ":", "param", "keyfile", ":", "file", "path", "to", "the", "ssl", "key", ":", "param", "certfile", ":", "file", "path", "to", "the", "ssl", "certificate", ":", "returns", ":", "true", "or", "false" ]
python
train
rkhleics/wagtailmodeladmin
wagtailmodeladmin/templatetags/wagtailmodeladmin_tags.py
https://github.com/rkhleics/wagtailmodeladmin/blob/7fddc853bab2ff3868b8c7a03329308c55f16358/wagtailmodeladmin/templatetags/wagtailmodeladmin_tags.py#L88-L103
def result_list(context): """ Displays the headers and data list together """ view = context['view'] object_list = context['object_list'] headers = list(result_headers(view)) num_sorted_fields = 0 for h in headers: if h['sortable'] and h['sorted']: num_sorted_fields += 1 context.update({ 'result_headers': headers, 'num_sorted_fields': num_sorted_fields, 'results': list(results(view, object_list))}) return context
[ "def", "result_list", "(", "context", ")", ":", "view", "=", "context", "[", "'view'", "]", "object_list", "=", "context", "[", "'object_list'", "]", "headers", "=", "list", "(", "result_headers", "(", "view", ")", ")", "num_sorted_fields", "=", "0", "for", "h", "in", "headers", ":", "if", "h", "[", "'sortable'", "]", "and", "h", "[", "'sorted'", "]", ":", "num_sorted_fields", "+=", "1", "context", ".", "update", "(", "{", "'result_headers'", ":", "headers", ",", "'num_sorted_fields'", ":", "num_sorted_fields", ",", "'results'", ":", "list", "(", "results", "(", "view", ",", "object_list", ")", ")", "}", ")", "return", "context" ]
Displays the headers and data list together
[ "Displays", "the", "headers", "and", "data", "list", "together" ]
python
train
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/ast.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/ast.py#L145-L181
def addLookupReferences( feature, lookups, script=None, languages=None, exclude_dflt=False ): """Add references to named lookups to the feature's statements. If `script` (str) and `languages` (sequence of str) are provided, only register the lookup for the given script and languages, optionally with `exclude_dflt` directive. Otherwise add a global reference which will be registered for all the scripts and languages in the feature file's `languagesystems` statements. """ assert lookups if not script: for lookup in lookups: feature.statements.append(ast.LookupReferenceStatement(lookup)) return feature.statements.append(ast.ScriptStatement(script)) if exclude_dflt: for language in languages or ("dflt",): feature.statements.append( ast.LanguageStatement(language, include_default=False) ) for lookup in lookups: feature.statements.append(ast.LookupReferenceStatement(lookup)) else: feature.statements.append( ast.LanguageStatement("dflt", include_default=True) ) for lookup in lookups: feature.statements.append(ast.LookupReferenceStatement(lookup)) for language in languages or (): if language == "dflt": continue feature.statements.append( ast.LanguageStatement(language, include_default=True) )
[ "def", "addLookupReferences", "(", "feature", ",", "lookups", ",", "script", "=", "None", ",", "languages", "=", "None", ",", "exclude_dflt", "=", "False", ")", ":", "assert", "lookups", "if", "not", "script", ":", "for", "lookup", "in", "lookups", ":", "feature", ".", "statements", ".", "append", "(", "ast", ".", "LookupReferenceStatement", "(", "lookup", ")", ")", "return", "feature", ".", "statements", ".", "append", "(", "ast", ".", "ScriptStatement", "(", "script", ")", ")", "if", "exclude_dflt", ":", "for", "language", "in", "languages", "or", "(", "\"dflt\"", ",", ")", ":", "feature", ".", "statements", ".", "append", "(", "ast", ".", "LanguageStatement", "(", "language", ",", "include_default", "=", "False", ")", ")", "for", "lookup", "in", "lookups", ":", "feature", ".", "statements", ".", "append", "(", "ast", ".", "LookupReferenceStatement", "(", "lookup", ")", ")", "else", ":", "feature", ".", "statements", ".", "append", "(", "ast", ".", "LanguageStatement", "(", "\"dflt\"", ",", "include_default", "=", "True", ")", ")", "for", "lookup", "in", "lookups", ":", "feature", ".", "statements", ".", "append", "(", "ast", ".", "LookupReferenceStatement", "(", "lookup", ")", ")", "for", "language", "in", "languages", "or", "(", ")", ":", "if", "language", "==", "\"dflt\"", ":", "continue", "feature", ".", "statements", ".", "append", "(", "ast", ".", "LanguageStatement", "(", "language", ",", "include_default", "=", "True", ")", ")" ]
Add references to named lookups to the feature's statements. If `script` (str) and `languages` (sequence of str) are provided, only register the lookup for the given script and languages, optionally with `exclude_dflt` directive. Otherwise add a global reference which will be registered for all the scripts and languages in the feature file's `languagesystems` statements.
[ "Add", "references", "to", "named", "lookups", "to", "the", "feature", "s", "statements", ".", "If", "script", "(", "str", ")", "and", "languages", "(", "sequence", "of", "str", ")", "are", "provided", "only", "register", "the", "lookup", "for", "the", "given", "script", "and", "languages", "optionally", "with", "exclude_dflt", "directive", ".", "Otherwise", "add", "a", "global", "reference", "which", "will", "be", "registered", "for", "all", "the", "scripts", "and", "languages", "in", "the", "feature", "file", "s", "languagesystems", "statements", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L4088-L4116
def ekgc(selidx, row, element, lenout=_default_len_out): """ Return an element of an entry in a column of character type in a specified row. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgc_c.html :param selidx: Index of parent column in SELECT clause. :type selidx: int :param row: Row to fetch from. :type row: int :param element: Index of element, within column entry, to fetch. :type element: int :param lenout: Maximum length of column element. :type lenout: int :return: Character string element of column entry, Flag indicating whether column entry was null. :rtype: tuple """ selidx = ctypes.c_int(selidx) row = ctypes.c_int(row) element = ctypes.c_int(element) lenout = ctypes.c_int(lenout) null = ctypes.c_int() found = ctypes.c_int() cdata = stypes.stringToCharP(lenout) libspice.ekgc_c(selidx, row, element, lenout, cdata, ctypes.byref(null), ctypes.byref(found)) return stypes.toPythonString(cdata), null.value, bool(found.value)
[ "def", "ekgc", "(", "selidx", ",", "row", ",", "element", ",", "lenout", "=", "_default_len_out", ")", ":", "selidx", "=", "ctypes", ".", "c_int", "(", "selidx", ")", "row", "=", "ctypes", ".", "c_int", "(", "row", ")", "element", "=", "ctypes", ".", "c_int", "(", "element", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "lenout", ")", "null", "=", "ctypes", ".", "c_int", "(", ")", "found", "=", "ctypes", ".", "c_int", "(", ")", "cdata", "=", "stypes", ".", "stringToCharP", "(", "lenout", ")", "libspice", ".", "ekgc_c", "(", "selidx", ",", "row", ",", "element", ",", "lenout", ",", "cdata", ",", "ctypes", ".", "byref", "(", "null", ")", ",", "ctypes", ".", "byref", "(", "found", ")", ")", "return", "stypes", ".", "toPythonString", "(", "cdata", ")", ",", "null", ".", "value", ",", "bool", "(", "found", ".", "value", ")" ]
Return an element of an entry in a column of character type in a specified row. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgc_c.html :param selidx: Index of parent column in SELECT clause. :type selidx: int :param row: Row to fetch from. :type row: int :param element: Index of element, within column entry, to fetch. :type element: int :param lenout: Maximum length of column element. :type lenout: int :return: Character string element of column entry, Flag indicating whether column entry was null. :rtype: tuple
[ "Return", "an", "element", "of", "an", "entry", "in", "a", "column", "of", "character", "type", "in", "a", "specified", "row", "." ]
python
train
reingart/pyafipws
wsctg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsctg.py#L206-L212
def __analizar_errores(self, ret): "Comprueba y extrae errores si existen en la respuesta XML" if 'arrayErrores' in ret: errores = ret['arrayErrores'] or [] self.Errores = [err['error'] for err in errores] self.ErrCode = ' '.join(self.Errores) self.ErrMsg = '\n'.join(self.Errores)
[ "def", "__analizar_errores", "(", "self", ",", "ret", ")", ":", "if", "'arrayErrores'", "in", "ret", ":", "errores", "=", "ret", "[", "'arrayErrores'", "]", "or", "[", "]", "self", ".", "Errores", "=", "[", "err", "[", "'error'", "]", "for", "err", "in", "errores", "]", "self", ".", "ErrCode", "=", "' '", ".", "join", "(", "self", ".", "Errores", ")", "self", ".", "ErrMsg", "=", "'\\n'", ".", "join", "(", "self", ".", "Errores", ")" ]
Comprueba y extrae errores si existen en la respuesta XML
[ "Comprueba", "y", "extrae", "errores", "si", "existen", "en", "la", "respuesta", "XML" ]
python
train
phoebe-project/phoebe2
phoebe/backend/universe.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L1560-L1588
def _fill_teffs(self, mesh=None, ignore_effects=False, **kwargs): r""" requires _fill_loggs and _fill_gravs to have been called Calculate local temperature of a Star. """ logger.debug("{}._fill_teffs".format(self.component)) if mesh is None: mesh = self.mesh # Now we can compute the local temperatures. # see PHOEBE Legacy scientific reference eq 5.23 teffs = self.instantaneous_tpole*mesh.gravs.for_computations**0.25 if not ignore_effects: for feature in self.features: if feature.proto_coords: teffs = feature.process_teffs(teffs, mesh.roche_coords_for_computations, s=self.polar_direction_xyz, t=self.time) else: teffs = feature.process_teffs(teffs, mesh.coords_for_computations, s=self.polar_direction_xyz, t=self.time) mesh.update_columns(teffs=teffs) if not self.needs_recompute_instantaneous: logger.debug("{}._fill_teffs: copying teffs to standard mesh".format(self.component)) theta = 0.0 self._standard_meshes[theta].update_columns(teffs=teffs)
[ "def", "_fill_teffs", "(", "self", ",", "mesh", "=", "None", ",", "ignore_effects", "=", "False", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "\"{}._fill_teffs\"", ".", "format", "(", "self", ".", "component", ")", ")", "if", "mesh", "is", "None", ":", "mesh", "=", "self", ".", "mesh", "# Now we can compute the local temperatures.", "# see PHOEBE Legacy scientific reference eq 5.23", "teffs", "=", "self", ".", "instantaneous_tpole", "*", "mesh", ".", "gravs", ".", "for_computations", "**", "0.25", "if", "not", "ignore_effects", ":", "for", "feature", "in", "self", ".", "features", ":", "if", "feature", ".", "proto_coords", ":", "teffs", "=", "feature", ".", "process_teffs", "(", "teffs", ",", "mesh", ".", "roche_coords_for_computations", ",", "s", "=", "self", ".", "polar_direction_xyz", ",", "t", "=", "self", ".", "time", ")", "else", ":", "teffs", "=", "feature", ".", "process_teffs", "(", "teffs", ",", "mesh", ".", "coords_for_computations", ",", "s", "=", "self", ".", "polar_direction_xyz", ",", "t", "=", "self", ".", "time", ")", "mesh", ".", "update_columns", "(", "teffs", "=", "teffs", ")", "if", "not", "self", ".", "needs_recompute_instantaneous", ":", "logger", ".", "debug", "(", "\"{}._fill_teffs: copying teffs to standard mesh\"", ".", "format", "(", "self", ".", "component", ")", ")", "theta", "=", "0.0", "self", ".", "_standard_meshes", "[", "theta", "]", ".", "update_columns", "(", "teffs", "=", "teffs", ")" ]
r""" requires _fill_loggs and _fill_gravs to have been called Calculate local temperature of a Star.
[ "r" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/collections.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/collections.py#L1296-L1343
def extract_kv(expr, columns=None, kv_delim=':', item_delim=',', dtype='float', fill_value=None): """ Extract values in key-value represented columns into standalone columns. New column names will be the name of the key-value column followed by an underscore and the key. :param DataFrame expr: input DataFrame :param columns: the key-value columns to be extracted. :param str kv_delim: delimiter between key and value. :param str item_delim: delimiter between key-value pairs. :param str dtype: type of value columns to generate. :param fill_value: default value for missing key-value pairs. :return: extracted data frame :rtype: DataFrame :Example: >>> df name kv 0 name1 k1=1.0,k2=3.0,k5=10.0 1 name2 k2=3.0,k3=5.1 2 name3 k1=7.1,k7=8.2 3 name4 k2=1.2,k3=1.5 4 name5 k2=1.0,k9=1.1 >>> table = df.extract_kv(columns=['A', 'B'], kv_delim='=') >>> table name kv_k1 kv_k2 kv_k3 kv_k5 kv_k7 kv_k9 0 name1 1.0 3.0 Nan 10.0 Nan Nan 1 name2 Nan 3.0 5.1 Nan Nan Nan 2 name3 7.1 Nan Nan Nan 8.2 Nan 3 name4 Nan 1.2 1.5 Nan Nan Nan 4 name5 Nan 1.0 Nan Nan Nan 1.1 """ if columns is None: columns = [expr._get_field(c) for c in expr.schema.names] intact_cols = [] else: columns = [expr._get_field(c) for c in utils.to_list(columns)] name_set = set([c.name for c in columns]) intact_cols = [expr._get_field(c) for c in expr.schema.names if c not in name_set] column_type = types.validate_data_type(dtype) if any(not isinstance(c.dtype, types.String) for c in columns): raise ExpressionError('Key-value columns must be strings.') schema = DynamicSchema.from_lists([c.name for c in intact_cols], [c.dtype for c in intact_cols]) return ExtractKVCollectionExpr(_input=expr, _columns=columns, _intact=intact_cols, _schema=schema, _column_type=column_type, _default=fill_value, _kv_delimiter=kv_delim, _item_delimiter=item_delim)
[ "def", "extract_kv", "(", "expr", ",", "columns", "=", "None", ",", "kv_delim", "=", "':'", ",", "item_delim", "=", "','", ",", "dtype", "=", "'float'", ",", "fill_value", "=", "None", ")", ":", "if", "columns", "is", "None", ":", "columns", "=", "[", "expr", ".", "_get_field", "(", "c", ")", "for", "c", "in", "expr", ".", "schema", ".", "names", "]", "intact_cols", "=", "[", "]", "else", ":", "columns", "=", "[", "expr", ".", "_get_field", "(", "c", ")", "for", "c", "in", "utils", ".", "to_list", "(", "columns", ")", "]", "name_set", "=", "set", "(", "[", "c", ".", "name", "for", "c", "in", "columns", "]", ")", "intact_cols", "=", "[", "expr", ".", "_get_field", "(", "c", ")", "for", "c", "in", "expr", ".", "schema", ".", "names", "if", "c", "not", "in", "name_set", "]", "column_type", "=", "types", ".", "validate_data_type", "(", "dtype", ")", "if", "any", "(", "not", "isinstance", "(", "c", ".", "dtype", ",", "types", ".", "String", ")", "for", "c", "in", "columns", ")", ":", "raise", "ExpressionError", "(", "'Key-value columns must be strings.'", ")", "schema", "=", "DynamicSchema", ".", "from_lists", "(", "[", "c", ".", "name", "for", "c", "in", "intact_cols", "]", ",", "[", "c", ".", "dtype", "for", "c", "in", "intact_cols", "]", ")", "return", "ExtractKVCollectionExpr", "(", "_input", "=", "expr", ",", "_columns", "=", "columns", ",", "_intact", "=", "intact_cols", ",", "_schema", "=", "schema", ",", "_column_type", "=", "column_type", ",", "_default", "=", "fill_value", ",", "_kv_delimiter", "=", "kv_delim", ",", "_item_delimiter", "=", "item_delim", ")" ]
Extract values in key-value represented columns into standalone columns. New column names will be the name of the key-value column followed by an underscore and the key. :param DataFrame expr: input DataFrame :param columns: the key-value columns to be extracted. :param str kv_delim: delimiter between key and value. :param str item_delim: delimiter between key-value pairs. :param str dtype: type of value columns to generate. :param fill_value: default value for missing key-value pairs. :return: extracted data frame :rtype: DataFrame :Example: >>> df name kv 0 name1 k1=1.0,k2=3.0,k5=10.0 1 name2 k2=3.0,k3=5.1 2 name3 k1=7.1,k7=8.2 3 name4 k2=1.2,k3=1.5 4 name5 k2=1.0,k9=1.1 >>> table = df.extract_kv(columns=['A', 'B'], kv_delim='=') >>> table name kv_k1 kv_k2 kv_k3 kv_k5 kv_k7 kv_k9 0 name1 1.0 3.0 Nan 10.0 Nan Nan 1 name2 Nan 3.0 5.1 Nan Nan Nan 2 name3 7.1 Nan Nan Nan 8.2 Nan 3 name4 Nan 1.2 1.5 Nan Nan Nan 4 name5 Nan 1.0 Nan Nan Nan 1.1
[ "Extract", "values", "in", "key", "-", "value", "represented", "columns", "into", "standalone", "columns", ".", "New", "column", "names", "will", "be", "the", "name", "of", "the", "key", "-", "value", "column", "followed", "by", "an", "underscore", "and", "the", "key", "." ]
python
train
caesar0301/relogger
relogger/config_parser.py
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L137-L147
def _assemble_flowtable(self, values): """ generate a flowtable from a tuple of descriptors. """ values = map(lambda x: [] if x is None else x, values) src = values[0] + values[1] dst = values[2] + values[3] thistable = dict() for s in src: thistable[s] = dst return thistable
[ "def", "_assemble_flowtable", "(", "self", ",", "values", ")", ":", "values", "=", "map", "(", "lambda", "x", ":", "[", "]", "if", "x", "is", "None", "else", "x", ",", "values", ")", "src", "=", "values", "[", "0", "]", "+", "values", "[", "1", "]", "dst", "=", "values", "[", "2", "]", "+", "values", "[", "3", "]", "thistable", "=", "dict", "(", ")", "for", "s", "in", "src", ":", "thistable", "[", "s", "]", "=", "dst", "return", "thistable" ]
generate a flowtable from a tuple of descriptors.
[ "generate", "a", "flowtable", "from", "a", "tuple", "of", "descriptors", "." ]
python
train
tensorflow/hub
tensorflow_hub/feature_column.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L204-L245
def _check_module_is_image_embedding(module_spec): """Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)). """ issues = [] # Find issues with "default" signature inputs. The common signatures for # image models prescribe a specific name; we trust it if we find it # and if we can do the necessary inference of input shapes from it. input_info_dict = module_spec.get_input_info_dict() if (list(input_info_dict.keys()) != ["images"] or input_info_dict["images"].dtype != tf.float32): issues.append("Module 'default' signature must require a single input, " "which must have type float32 and name 'images'.") else: try: image_util.get_expected_image_size(module_spec) except ValueError as e: issues.append("Module does not support hub.get_expected_image_size(); " "original error was:\n" + str(e)) # Raised again below. # Find issues with "default" signature outputs. We test that the dtype and # shape is appropriate for use in input_layer(). output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module 'default' signature must have a 'default' output.") else: output_type = output_info_dict["default"].dtype output_shape = output_info_dict["default"].get_shape() if not (output_type == tf.float32 and output_shape.ndims == 2 and output_shape.dims[1].value): issues.append("Module 'default' signature must have a 'default' output " "of tf.Tensor(shape=(_,K), dtype=float32).") if issues: raise ValueError("Module is not usable as image embedding: %r" % issues)
[ "def", "_check_module_is_image_embedding", "(", "module_spec", ")", ":", "issues", "=", "[", "]", "# Find issues with \"default\" signature inputs. The common signatures for", "# image models prescribe a specific name; we trust it if we find it", "# and if we can do the necessary inference of input shapes from it.", "input_info_dict", "=", "module_spec", ".", "get_input_info_dict", "(", ")", "if", "(", "list", "(", "input_info_dict", ".", "keys", "(", ")", ")", "!=", "[", "\"images\"", "]", "or", "input_info_dict", "[", "\"images\"", "]", ".", "dtype", "!=", "tf", ".", "float32", ")", ":", "issues", ".", "append", "(", "\"Module 'default' signature must require a single input, \"", "\"which must have type float32 and name 'images'.\"", ")", "else", ":", "try", ":", "image_util", ".", "get_expected_image_size", "(", "module_spec", ")", "except", "ValueError", "as", "e", ":", "issues", ".", "append", "(", "\"Module does not support hub.get_expected_image_size(); \"", "\"original error was:\\n\"", "+", "str", "(", "e", ")", ")", "# Raised again below.", "# Find issues with \"default\" signature outputs. We test that the dtype and", "# shape is appropriate for use in input_layer().", "output_info_dict", "=", "module_spec", ".", "get_output_info_dict", "(", ")", "if", "\"default\"", "not", "in", "output_info_dict", ":", "issues", ".", "append", "(", "\"Module 'default' signature must have a 'default' output.\"", ")", "else", ":", "output_type", "=", "output_info_dict", "[", "\"default\"", "]", ".", "dtype", "output_shape", "=", "output_info_dict", "[", "\"default\"", "]", ".", "get_shape", "(", ")", "if", "not", "(", "output_type", "==", "tf", ".", "float32", "and", "output_shape", ".", "ndims", "==", "2", "and", "output_shape", ".", "dims", "[", "1", "]", ".", "value", ")", ":", "issues", ".", "append", "(", "\"Module 'default' signature must have a 'default' output \"", "\"of tf.Tensor(shape=(_,K), dtype=float32).\"", ")", "if", "issues", ":", "raise", "ValueError", "(", "\"Module is not usable as image embedding: %r\"", "%", "issues", ")" ]
Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)).
[ "Raises", "ValueError", "if", "module_spec", "is", "not", "usable", "as", "image", "embedding", "." ]
python
train
jsvine/spectra
spectra/grapefruit.py
https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/grapefruit.py#L1799-L1822
def ComplementaryColor(self, mode='ryb'): '''Create a new instance which is the complementary color of this one. Parameters: :mode: Select which color wheel to use for the generation (ryb/rgb). Returns: A grapefruit.Color instance. >>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb') (0.0, 0.5, 1.0, 1.0) >>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb').hsl (210, 1, 0.5) ''' h, s, l = self.__hsl if mode == 'ryb': h = Color.RgbToRyb(h) h = (h+180)%360 if mode == 'ryb': h = Color.RybToRgb(h) return Color((h, s, l), 'hsl', self.__a, self.__wref)
[ "def", "ComplementaryColor", "(", "self", ",", "mode", "=", "'ryb'", ")", ":", "h", ",", "s", ",", "l", "=", "self", ".", "__hsl", "if", "mode", "==", "'ryb'", ":", "h", "=", "Color", ".", "RgbToRyb", "(", "h", ")", "h", "=", "(", "h", "+", "180", ")", "%", "360", "if", "mode", "==", "'ryb'", ":", "h", "=", "Color", ".", "RybToRgb", "(", "h", ")", "return", "Color", "(", "(", "h", ",", "s", ",", "l", ")", ",", "'hsl'", ",", "self", ".", "__a", ",", "self", ".", "__wref", ")" ]
Create a new instance which is the complementary color of this one. Parameters: :mode: Select which color wheel to use for the generation (ryb/rgb). Returns: A grapefruit.Color instance. >>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb') (0.0, 0.5, 1.0, 1.0) >>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb').hsl (210, 1, 0.5)
[ "Create", "a", "new", "instance", "which", "is", "the", "complementary", "color", "of", "this", "one", "." ]
python
train
CalebBell/thermo
thermo/volume.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/volume.py#L1025-L1083
def calculate(self, T, method): r'''Method to calculate low-pressure liquid molar volume at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate molar volume, [K] method : str Name of the method to use Returns ------- Vm : float Molar volume of the liquid at T and a low pressure, [m^3/mol] ''' if method == RACKETT: Vm = Rackett(T, self.Tc, self.Pc, self.Zc) elif method == YAMADA_GUNN: Vm = Yamada_Gunn(T, self.Tc, self.Pc, self.omega) elif method == BHIRUD_NORMAL: Vm = Bhirud_normal(T, self.Tc, self.Pc, self.omega) elif method == TOWNSEND_HALES: Vm = Townsend_Hales(T, self.Tc, self.Vc, self.omega) elif method == HTCOSTALD: Vm = COSTALD(T, self.Tc, self.Vc, self.omega) elif method == YEN_WOODS_SAT: Vm = Yen_Woods_saturation(T, self.Tc, self.Vc, self.Zc) elif method == MMSNM0: Vm = SNM0(T, self.Tc, self.Vc, self.omega) elif method == MMSNM0FIT: Vm = SNM0(T, self.Tc, self.Vc, self.omega, self.SNM0_delta_SRK) elif method == CAMPBELL_THODOS: Vm = Campbell_Thodos(T, self.Tb, self.Tc, self.Pc, self.MW, self.dipole) elif method == HTCOSTALDFIT: Vm = COSTALD(T, self.Tc, self.COSTALD_Vchar, self.COSTALD_omega_SRK) elif method == RACKETTFIT: Vm = Rackett(T, self.Tc, self.Pc, self.RACKETT_Z_RA) elif method == PERRYDIPPR: A, B, C, D = self.DIPPR_coeffs Vm = 1./EQ105(T, A, B, C, D) elif method == CRC_INORG_L: rho = CRC_inorganic(T, self.CRC_INORG_L_rho, self.CRC_INORG_L_k, self.CRC_INORG_L_Tm) Vm = rho_to_Vm(rho, self.CRC_INORG_L_MW) elif method == VDI_PPDS: A, B, C, D = self.VDI_PPDS_coeffs tau = 1. - T/self.VDI_PPDS_Tc rho = self.VDI_PPDS_rhoc + A*tau**0.35 + B*tau**(2/3.) + C*tau + D*tau**(4/3.) Vm = rho_to_Vm(rho, self.VDI_PPDS_MW) elif method == CRC_INORG_L_CONST: Vm = self.CRC_INORG_L_CONST_Vm elif method == COOLPROP: Vm = 1./CoolProp_T_dependent_property(T, self.CASRN, 'DMOLAR', 'l') elif method in self.tabular_data: Vm = self.interpolate(T, method) return Vm
[ "def", "calculate", "(", "self", ",", "T", ",", "method", ")", ":", "if", "method", "==", "RACKETT", ":", "Vm", "=", "Rackett", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "Zc", ")", "elif", "method", "==", "YAMADA_GUNN", ":", "Vm", "=", "Yamada_Gunn", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "omega", ")", "elif", "method", "==", "BHIRUD_NORMAL", ":", "Vm", "=", "Bhirud_normal", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "omega", ")", "elif", "method", "==", "TOWNSEND_HALES", ":", "Vm", "=", "Townsend_Hales", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "omega", ")", "elif", "method", "==", "HTCOSTALD", ":", "Vm", "=", "COSTALD", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "omega", ")", "elif", "method", "==", "YEN_WOODS_SAT", ":", "Vm", "=", "Yen_Woods_saturation", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "Zc", ")", "elif", "method", "==", "MMSNM0", ":", "Vm", "=", "SNM0", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "omega", ")", "elif", "method", "==", "MMSNM0FIT", ":", "Vm", "=", "SNM0", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "omega", ",", "self", ".", "SNM0_delta_SRK", ")", "elif", "method", "==", "CAMPBELL_THODOS", ":", "Vm", "=", "Campbell_Thodos", "(", "T", ",", "self", ".", "Tb", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "MW", ",", "self", ".", "dipole", ")", "elif", "method", "==", "HTCOSTALDFIT", ":", "Vm", "=", "COSTALD", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "COSTALD_Vchar", ",", "self", ".", "COSTALD_omega_SRK", ")", "elif", "method", "==", "RACKETTFIT", ":", "Vm", "=", "Rackett", "(", "T", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "RACKETT_Z_RA", ")", "elif", "method", "==", "PERRYDIPPR", ":", "A", ",", "B", ",", "C", ",", "D", "=", "self", ".", "DIPPR_coeffs", "Vm", "=", "1.", "/", "EQ105", "(", "T", ",", "A", ",", "B", ",", "C", ",", "D", ")", "elif", "method", "==", "CRC_INORG_L", ":", "rho", "=", "CRC_inorganic", "(", "T", ",", "self", ".", "CRC_INORG_L_rho", ",", "self", ".", "CRC_INORG_L_k", ",", "self", ".", "CRC_INORG_L_Tm", ")", "Vm", "=", "rho_to_Vm", "(", "rho", ",", "self", ".", "CRC_INORG_L_MW", ")", "elif", "method", "==", "VDI_PPDS", ":", "A", ",", "B", ",", "C", ",", "D", "=", "self", ".", "VDI_PPDS_coeffs", "tau", "=", "1.", "-", "T", "/", "self", ".", "VDI_PPDS_Tc", "rho", "=", "self", ".", "VDI_PPDS_rhoc", "+", "A", "*", "tau", "**", "0.35", "+", "B", "*", "tau", "**", "(", "2", "/", "3.", ")", "+", "C", "*", "tau", "+", "D", "*", "tau", "**", "(", "4", "/", "3.", ")", "Vm", "=", "rho_to_Vm", "(", "rho", ",", "self", ".", "VDI_PPDS_MW", ")", "elif", "method", "==", "CRC_INORG_L_CONST", ":", "Vm", "=", "self", ".", "CRC_INORG_L_CONST_Vm", "elif", "method", "==", "COOLPROP", ":", "Vm", "=", "1.", "/", "CoolProp_T_dependent_property", "(", "T", ",", "self", ".", "CASRN", ",", "'DMOLAR'", ",", "'l'", ")", "elif", "method", "in", "self", ".", "tabular_data", ":", "Vm", "=", "self", ".", "interpolate", "(", "T", ",", "method", ")", "return", "Vm" ]
r'''Method to calculate low-pressure liquid molar volume at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate molar volume, [K] method : str Name of the method to use Returns ------- Vm : float Molar volume of the liquid at T and a low pressure, [m^3/mol]
[ "r", "Method", "to", "calculate", "low", "-", "pressure", "liquid", "molar", "volume", "at", "tempearture", "T", "with", "a", "given", "method", "." ]
python
valid
pandas-dev/pandas
pandas/core/nanops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L616-L679
def nanvar(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = com.values_from_object(values) dtype = values.dtype if mask is None: mask = isna(values) if is_any_int_dtype(values): values = values.astype('f8') values[mask] = np.nan if is_float_dtype(values): count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(mask, axis, ddof) if skipna: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype) return _wrap_results(result, values.dtype)
[ "def", "nanvar", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "ddof", "=", "1", ",", "mask", "=", "None", ")", ":", "values", "=", "com", ".", "values_from_object", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "values", ")", "if", "is_any_int_dtype", "(", "values", ")", ":", "values", "=", "values", ".", "astype", "(", "'f8'", ")", "values", "[", "mask", "]", "=", "np", ".", "nan", "if", "is_float_dtype", "(", "values", ")", ":", "count", ",", "d", "=", "_get_counts_nanvar", "(", "mask", ",", "axis", ",", "ddof", ",", "values", ".", "dtype", ")", "else", ":", "count", ",", "d", "=", "_get_counts_nanvar", "(", "mask", ",", "axis", ",", "ddof", ")", "if", "skipna", ":", "values", "=", "values", ".", "copy", "(", ")", "np", ".", "putmask", "(", "values", ",", "mask", ",", "0", ")", "# xref GH10242", "# Compute variance via two-pass algorithm, which is stable against", "# cancellation errors and relatively accurate for small numbers of", "# observations.", "#", "# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance", "avg", "=", "_ensure_numeric", "(", "values", ".", "sum", "(", "axis", "=", "axis", ",", "dtype", "=", "np", ".", "float64", ")", ")", "/", "count", "if", "axis", "is", "not", "None", ":", "avg", "=", "np", ".", "expand_dims", "(", "avg", ",", "axis", ")", "sqr", "=", "_ensure_numeric", "(", "(", "avg", "-", "values", ")", "**", "2", ")", "np", ".", "putmask", "(", "sqr", ",", "mask", ",", "0", ")", "result", "=", "sqr", ".", "sum", "(", "axis", "=", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "/", "d", "# Return variance as np.float64 (the datatype used in the accumulator),", "# unless we were dealing with a float array, in which case use the same", "# precision as the original values array.", "if", "is_float_dtype", "(", "dtype", ")", ":", "result", "=", "result", ".", "astype", "(", "dtype", ")", "return", "_wrap_results", "(", "result", ",", "values", ".", "dtype", ")" ]
Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0
[ "Compute", "the", "variance", "along", "given", "axis", "while", "ignoring", "NaNs" ]
python
train
ARMmbed/mbed-cloud-sdk-python
scripts/generate_ci_config.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/scripts/generate_ci_config.py#L322-L363
def new_deploy(py_ver: PyVer, release_target: ReleaseTarget): """Job for deploying package to pypi""" cache_file = f'app_{py_ver.name}.tar' template = yaml.safe_load(f""" machine: image: circleci/classic:201710-02 steps: - attach_workspace: at: {cache_dir} - checkout - run: name: Install prerequisites command: sudo pip install awscli - run: name: Load docker image layer cache command: docker load -i {cache_dir}/{cache_file} - run: name: Start a named container command: docker run --name=SDK {py_ver.tag} - run: name: Extract the documentation command: 'docker cp SDK:/build/built_docs ./built_docs' - run: name: Upload the documentation command: >- aws s3 sync --delete --cache-control max-age=3600 built_docs s3://mbed-cloud-sdk-python - run: name: Tag and release command: >- docker run --env-file=scripts/templates/envvars.env -e TWINE_REPOSITORY={release_target.twine_repo} {py_ver.tag} sh -c "source .venv/bin/activate && python scripts/tag_and_release.py --mode={release_target.mode}" - run: name: Start the release party! command: >- docker run --env-file=scripts/templates/envvars.env {py_ver.tag} sh -c "source .venv/bin/activate && python scripts/notify.py" """) return deploy_name(py_ver, release_target), template
[ "def", "new_deploy", "(", "py_ver", ":", "PyVer", ",", "release_target", ":", "ReleaseTarget", ")", ":", "cache_file", "=", "f'app_{py_ver.name}.tar'", "template", "=", "yaml", ".", "safe_load", "(", "f\"\"\"\n machine:\n image: circleci/classic:201710-02\n steps:\n - attach_workspace:\n at: {cache_dir}\n - checkout\n - run:\n name: Install prerequisites\n command: sudo pip install awscli\n - run:\n name: Load docker image layer cache\n command: docker load -i {cache_dir}/{cache_file}\n - run:\n name: Start a named container\n command: docker run --name=SDK {py_ver.tag}\n - run:\n name: Extract the documentation\n command: 'docker cp SDK:/build/built_docs ./built_docs'\n - run:\n name: Upload the documentation\n command: >-\n aws s3 sync --delete --cache-control\n max-age=3600 built_docs s3://mbed-cloud-sdk-python\n - run:\n name: Tag and release\n command: >-\n docker run --env-file=scripts/templates/envvars.env\n -e TWINE_REPOSITORY={release_target.twine_repo}\n {py_ver.tag}\n sh -c \"source .venv/bin/activate && python scripts/tag_and_release.py --mode={release_target.mode}\"\n - run:\n name: Start the release party!\n command: >-\n docker run --env-file=scripts/templates/envvars.env\n {py_ver.tag}\n sh -c \"source .venv/bin/activate && python scripts/notify.py\"\n \"\"\"", ")", "return", "deploy_name", "(", "py_ver", ",", "release_target", ")", ",", "template" ]
Job for deploying package to pypi
[ "Job", "for", "deploying", "package", "to", "pypi" ]
python
train
saltstack/salt
salt/utils/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/master.py#L371-L397
def get_minion_grains(self): ''' Get grains data for the targeted minions, either by fetching the cached minion data on the master, or by fetching the grains directly on the minion. By default, this function tries hard to get the grains data: - Try to get the cached minion grains if the master has minion_data_cache: True - If the grains data for the minion is cached, use it. - If there is no cached grains data for a minion, then try to get the minion grains directly from the minion. ''' minion_grains = {} minion_ids = self._tgt_to_list() if not minion_ids: return {} if any(arg for arg in [self.use_cached_grains, self.grains_fallback]): log.debug('Getting cached minion data.') cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids) else: cached_minion_grains = {} log.debug('Getting minion grain data for: %s', minion_ids) minion_grains = self._get_minion_grains( *minion_ids, cached_grains=cached_minion_grains) return minion_grains
[ "def", "get_minion_grains", "(", "self", ")", ":", "minion_grains", "=", "{", "}", "minion_ids", "=", "self", ".", "_tgt_to_list", "(", ")", "if", "not", "minion_ids", ":", "return", "{", "}", "if", "any", "(", "arg", "for", "arg", "in", "[", "self", ".", "use_cached_grains", ",", "self", ".", "grains_fallback", "]", ")", ":", "log", ".", "debug", "(", "'Getting cached minion data.'", ")", "cached_minion_grains", ",", "cached_minion_pillars", "=", "self", ".", "_get_cached_minion_data", "(", "*", "minion_ids", ")", "else", ":", "cached_minion_grains", "=", "{", "}", "log", ".", "debug", "(", "'Getting minion grain data for: %s'", ",", "minion_ids", ")", "minion_grains", "=", "self", ".", "_get_minion_grains", "(", "*", "minion_ids", ",", "cached_grains", "=", "cached_minion_grains", ")", "return", "minion_grains" ]
Get grains data for the targeted minions, either by fetching the cached minion data on the master, or by fetching the grains directly on the minion. By default, this function tries hard to get the grains data: - Try to get the cached minion grains if the master has minion_data_cache: True - If the grains data for the minion is cached, use it. - If there is no cached grains data for a minion, then try to get the minion grains directly from the minion.
[ "Get", "grains", "data", "for", "the", "targeted", "minions", "either", "by", "fetching", "the", "cached", "minion", "data", "on", "the", "master", "or", "by", "fetching", "the", "grains", "directly", "on", "the", "minion", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2556-L2569
def copysign(x, y, context=None): """ Return a new BigFloat object with the magnitude of x but the sign of y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_copysign, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
[ "def", "copysign", "(", "x", ",", "y", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_copysign", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", "BigFloat", ".", "_implicit_convert", "(", "y", ")", ",", ")", ",", "context", ",", ")" ]
Return a new BigFloat object with the magnitude of x but the sign of y.
[ "Return", "a", "new", "BigFloat", "object", "with", "the", "magnitude", "of", "x", "but", "the", "sign", "of", "y", "." ]
python
train
Locu/chronology
jia/scheduler/views.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/scheduler/views.py#L18-L57
def schedule(): """HTTP endpoint for scheduling tasks If a task with the same code already exists, the one with the shorter interval will be made active. """ code = request.form['code'] interval = int(request.form['interval']) task_id = binascii.b2a_hex(os.urandom(5)) new_task = Task(id=task_id) new_task.active = True new_task.code = code new_task.interval = interval # TODO(derek): Assert there is only one other_task other_task = Task.query.filter_by(code=code, active=True).first() if other_task: if other_task.interval <= new_task.interval: new_task.active = False else: other_task.active = False other_task.save() current_app.scheduler.cancel(other_task.id) if new_task.active: print current_app.scheduler.schedule current_app.scheduler.schedule({ 'id': task_id, 'code': new_task.code, 'interval': new_task.interval }) new_task.save() return json.dumps({ 'status': 'success', 'id': task_id, })
[ "def", "schedule", "(", ")", ":", "code", "=", "request", ".", "form", "[", "'code'", "]", "interval", "=", "int", "(", "request", ".", "form", "[", "'interval'", "]", ")", "task_id", "=", "binascii", ".", "b2a_hex", "(", "os", ".", "urandom", "(", "5", ")", ")", "new_task", "=", "Task", "(", "id", "=", "task_id", ")", "new_task", ".", "active", "=", "True", "new_task", ".", "code", "=", "code", "new_task", ".", "interval", "=", "interval", "# TODO(derek): Assert there is only one other_task", "other_task", "=", "Task", ".", "query", ".", "filter_by", "(", "code", "=", "code", ",", "active", "=", "True", ")", ".", "first", "(", ")", "if", "other_task", ":", "if", "other_task", ".", "interval", "<=", "new_task", ".", "interval", ":", "new_task", ".", "active", "=", "False", "else", ":", "other_task", ".", "active", "=", "False", "other_task", ".", "save", "(", ")", "current_app", ".", "scheduler", ".", "cancel", "(", "other_task", ".", "id", ")", "if", "new_task", ".", "active", ":", "print", "current_app", ".", "scheduler", ".", "schedule", "current_app", ".", "scheduler", ".", "schedule", "(", "{", "'id'", ":", "task_id", ",", "'code'", ":", "new_task", ".", "code", ",", "'interval'", ":", "new_task", ".", "interval", "}", ")", "new_task", ".", "save", "(", ")", "return", "json", ".", "dumps", "(", "{", "'status'", ":", "'success'", ",", "'id'", ":", "task_id", ",", "}", ")" ]
HTTP endpoint for scheduling tasks If a task with the same code already exists, the one with the shorter interval will be made active.
[ "HTTP", "endpoint", "for", "scheduling", "tasks" ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/muc/muc.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muc.py#L251-L262
def error(self,stanza): """ Called when an error stanza is received. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.stanza.Stanza` """ err=stanza.get_error() self.__logger.debug("Error from: %r Condition: %r" % (stanza.get_from(),err.get_condition))
[ "def", "error", "(", "self", ",", "stanza", ")", ":", "err", "=", "stanza", ".", "get_error", "(", ")", "self", ".", "__logger", ".", "debug", "(", "\"Error from: %r Condition: %r\"", "%", "(", "stanza", ".", "get_from", "(", ")", ",", "err", ".", "get_condition", ")", ")" ]
Called when an error stanza is received. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.stanza.Stanza`
[ "Called", "when", "an", "error", "stanza", "is", "received", "." ]
python
valid
tanghaibao/jcvi
jcvi/graphics/grabseeds.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/grabseeds.py#L125-L200
def calibrate(args): """ %prog calibrate calibrate.JPG boxsize Calibrate pixel-inch ratio and color adjustment. - `calibrate.JPG` is the photo containig a colorchecker - `boxsize` is the measured size for the boxes on printed colorchecker, in squared centimeter (cm2) units """ xargs = args[2:] p = OptionParser(calibrate.__doc__) opts, args, iopts = add_seeds_options(p, args) if len(args) != 2: sys.exit(not p.print_help()) imagefile, boxsize = args boxsize = float(boxsize) # Read in color checker colorcheckerfile = op.join(datadir, "colorchecker.txt") colorchecker = [] expected = 0 for row in open(colorcheckerfile): boxes = row.split() colorchecker.append(boxes) expected += len(boxes) folder = op.split(imagefile)[0] objects = seeds([imagefile, "--outdir={0}".format(folder)] + xargs) nseeds = len(objects) logging.debug("Found {0} boxes (expected={1})".format(nseeds, expected)) assert expected - 4 <= nseeds <= expected + 4, \ "Number of boxes drastically different from {0}".format(expected) # Calculate pixel-cm ratio boxes = [t.area for t in objects] reject = reject_outliers(boxes) retained_boxes = [b for r, b in zip(reject, boxes) if not r] mbox = np.median(retained_boxes) # in pixels pixel_cm_ratio = (mbox / boxsize) ** .5 logging.debug("Median box size: {0} pixels. Measured box size: {1} cm2".\ format(mbox, boxsize)) logging.debug("Pixel-cm ratio: {0}".format(pixel_cm_ratio)) xs = [t.x for t in objects] ys = [t.y for t in objects] idx_xs = get_kmeans(xs, 6) idx_ys = get_kmeans(ys, 4) for xi, yi, s in zip(idx_xs, idx_ys, objects): s.rank = (yi, xi) objects.sort(key=lambda x: x.rank) colormap = [] for s in objects: x, y = s.rank observed, expected = s.rgb, rgb_to_triplet(colorchecker[x][y]) colormap.append((np.array(observed), np.array(expected))) # Color transfer tr0 = np.eye(3).flatten() print("Initial distance:", total_error(tr0, colormap), file=sys.stderr) tr = fmin(total_error, tr0, args=(colormap,)) tr.resize((3, 3)) print("RGB linear transform:\n", tr, file=sys.stderr) calib = {"PixelCMratio": pixel_cm_ratio, "RGBtransform": tr.tolist()} jsonfile = op.join(folder, "calibrate.json") fw = must_open(jsonfile, "w") print(json.dumps(calib, indent=4), file=fw) fw.close() logging.debug("Calibration specs written to `{0}`.".format(jsonfile)) return jsonfile
[ "def", "calibrate", "(", "args", ")", ":", "xargs", "=", "args", "[", "2", ":", "]", "p", "=", "OptionParser", "(", "calibrate", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "add_seeds_options", "(", "p", ",", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "imagefile", ",", "boxsize", "=", "args", "boxsize", "=", "float", "(", "boxsize", ")", "# Read in color checker", "colorcheckerfile", "=", "op", ".", "join", "(", "datadir", ",", "\"colorchecker.txt\"", ")", "colorchecker", "=", "[", "]", "expected", "=", "0", "for", "row", "in", "open", "(", "colorcheckerfile", ")", ":", "boxes", "=", "row", ".", "split", "(", ")", "colorchecker", ".", "append", "(", "boxes", ")", "expected", "+=", "len", "(", "boxes", ")", "folder", "=", "op", ".", "split", "(", "imagefile", ")", "[", "0", "]", "objects", "=", "seeds", "(", "[", "imagefile", ",", "\"--outdir={0}\"", ".", "format", "(", "folder", ")", "]", "+", "xargs", ")", "nseeds", "=", "len", "(", "objects", ")", "logging", ".", "debug", "(", "\"Found {0} boxes (expected={1})\"", ".", "format", "(", "nseeds", ",", "expected", ")", ")", "assert", "expected", "-", "4", "<=", "nseeds", "<=", "expected", "+", "4", ",", "\"Number of boxes drastically different from {0}\"", ".", "format", "(", "expected", ")", "# Calculate pixel-cm ratio", "boxes", "=", "[", "t", ".", "area", "for", "t", "in", "objects", "]", "reject", "=", "reject_outliers", "(", "boxes", ")", "retained_boxes", "=", "[", "b", "for", "r", ",", "b", "in", "zip", "(", "reject", ",", "boxes", ")", "if", "not", "r", "]", "mbox", "=", "np", ".", "median", "(", "retained_boxes", ")", "# in pixels", "pixel_cm_ratio", "=", "(", "mbox", "/", "boxsize", ")", "**", ".5", "logging", ".", "debug", "(", "\"Median box size: {0} pixels. Measured box size: {1} cm2\"", ".", "format", "(", "mbox", ",", "boxsize", ")", ")", "logging", ".", "debug", "(", "\"Pixel-cm ratio: {0}\"", ".", "format", "(", "pixel_cm_ratio", ")", ")", "xs", "=", "[", "t", ".", "x", "for", "t", "in", "objects", "]", "ys", "=", "[", "t", ".", "y", "for", "t", "in", "objects", "]", "idx_xs", "=", "get_kmeans", "(", "xs", ",", "6", ")", "idx_ys", "=", "get_kmeans", "(", "ys", ",", "4", ")", "for", "xi", ",", "yi", ",", "s", "in", "zip", "(", "idx_xs", ",", "idx_ys", ",", "objects", ")", ":", "s", ".", "rank", "=", "(", "yi", ",", "xi", ")", "objects", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "rank", ")", "colormap", "=", "[", "]", "for", "s", "in", "objects", ":", "x", ",", "y", "=", "s", ".", "rank", "observed", ",", "expected", "=", "s", ".", "rgb", ",", "rgb_to_triplet", "(", "colorchecker", "[", "x", "]", "[", "y", "]", ")", "colormap", ".", "append", "(", "(", "np", ".", "array", "(", "observed", ")", ",", "np", ".", "array", "(", "expected", ")", ")", ")", "# Color transfer", "tr0", "=", "np", ".", "eye", "(", "3", ")", ".", "flatten", "(", ")", "print", "(", "\"Initial distance:\"", ",", "total_error", "(", "tr0", ",", "colormap", ")", ",", "file", "=", "sys", ".", "stderr", ")", "tr", "=", "fmin", "(", "total_error", ",", "tr0", ",", "args", "=", "(", "colormap", ",", ")", ")", "tr", ".", "resize", "(", "(", "3", ",", "3", ")", ")", "print", "(", "\"RGB linear transform:\\n\"", ",", "tr", ",", "file", "=", "sys", ".", "stderr", ")", "calib", "=", "{", "\"PixelCMratio\"", ":", "pixel_cm_ratio", ",", "\"RGBtransform\"", ":", "tr", ".", "tolist", "(", ")", "}", "jsonfile", "=", "op", ".", "join", "(", "folder", ",", "\"calibrate.json\"", ")", "fw", "=", "must_open", "(", "jsonfile", ",", "\"w\"", ")", "print", "(", "json", ".", "dumps", "(", "calib", ",", "indent", "=", "4", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "logging", ".", "debug", "(", "\"Calibration specs written to `{0}`.\"", ".", "format", "(", "jsonfile", ")", ")", "return", "jsonfile" ]
%prog calibrate calibrate.JPG boxsize Calibrate pixel-inch ratio and color adjustment. - `calibrate.JPG` is the photo containig a colorchecker - `boxsize` is the measured size for the boxes on printed colorchecker, in squared centimeter (cm2) units
[ "%prog", "calibrate", "calibrate", ".", "JPG", "boxsize" ]
python
train
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L3249-L3268
def mask_binary(self, binary_im): """Create a new image by zeroing out data at locations where binary_im == 0.0. Parameters ---------- binary_im : :obj:`BinaryImage` A BinaryImage of the same size as this image, with pixel values of either zero or one. Wherever this image has zero pixels, we'll zero out the pixels of the new image. Returns ------- :obj:`Image` A new Image of the same type, masked by the given binary image. """ data = np.copy(self._data) ind = np.where(binary_im.data == 0) data[ind[0], ind[1], :] = 0 return SegmentationImage(data, self._frame)
[ "def", "mask_binary", "(", "self", ",", "binary_im", ")", ":", "data", "=", "np", ".", "copy", "(", "self", ".", "_data", ")", "ind", "=", "np", ".", "where", "(", "binary_im", ".", "data", "==", "0", ")", "data", "[", "ind", "[", "0", "]", ",", "ind", "[", "1", "]", ",", ":", "]", "=", "0", "return", "SegmentationImage", "(", "data", ",", "self", ".", "_frame", ")" ]
Create a new image by zeroing out data at locations where binary_im == 0.0. Parameters ---------- binary_im : :obj:`BinaryImage` A BinaryImage of the same size as this image, with pixel values of either zero or one. Wherever this image has zero pixels, we'll zero out the pixels of the new image. Returns ------- :obj:`Image` A new Image of the same type, masked by the given binary image.
[ "Create", "a", "new", "image", "by", "zeroing", "out", "data", "at", "locations", "where", "binary_im", "==", "0", ".", "0", "." ]
python
train
romanz/trezor-agent
libagent/gpg/agent.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L129-L143
def handle_getinfo(self, conn, args): """Handle some of the GETINFO messages.""" result = None if args[0] == b'version': result = self.version elif args[0] == b's2k_count': # Use highest number of S2K iterations. # https://www.gnupg.org/documentation/manuals/gnupg/OpenPGP-Options.html # https://tools.ietf.org/html/rfc4880#section-3.7.1.3 result = '{}'.format(64 << 20).encode('ascii') else: log.warning('Unknown GETINFO command: %s', args) if result: keyring.sendline(conn, b'D ' + result)
[ "def", "handle_getinfo", "(", "self", ",", "conn", ",", "args", ")", ":", "result", "=", "None", "if", "args", "[", "0", "]", "==", "b'version'", ":", "result", "=", "self", ".", "version", "elif", "args", "[", "0", "]", "==", "b's2k_count'", ":", "# Use highest number of S2K iterations.", "# https://www.gnupg.org/documentation/manuals/gnupg/OpenPGP-Options.html", "# https://tools.ietf.org/html/rfc4880#section-3.7.1.3", "result", "=", "'{}'", ".", "format", "(", "64", "<<", "20", ")", ".", "encode", "(", "'ascii'", ")", "else", ":", "log", ".", "warning", "(", "'Unknown GETINFO command: %s'", ",", "args", ")", "if", "result", ":", "keyring", ".", "sendline", "(", "conn", ",", "b'D '", "+", "result", ")" ]
Handle some of the GETINFO messages.
[ "Handle", "some", "of", "the", "GETINFO", "messages", "." ]
python
train
kata198/indexedredis
IndexedRedis/fields/chain.py
https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/fields/chain.py#L81-L92
def _toStorage(self, value): ''' _toStorage - Convert the value to a string representation for storage. @param value - The value of the item to convert @return A string value suitable for storing. ''' for chainedField in self.chainedFields: value = chainedField.toStorage(value) return value
[ "def", "_toStorage", "(", "self", ",", "value", ")", ":", "for", "chainedField", "in", "self", ".", "chainedFields", ":", "value", "=", "chainedField", ".", "toStorage", "(", "value", ")", "return", "value" ]
_toStorage - Convert the value to a string representation for storage. @param value - The value of the item to convert @return A string value suitable for storing.
[ "_toStorage", "-", "Convert", "the", "value", "to", "a", "string", "representation", "for", "storage", "." ]
python
valid
cltk/cltk
cltk/text_reuse/automata.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/automata.py#L240-L257
def complete_automaton(self): """ Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S """ self.term_state = object() self.Q.add(self.term_state) for tv in self.Q: for u in self.S: try: self.transition[tv][u] except: self.add_transition(tv, u, self.term_state) for u in self.S: self.add_transition(self.term_state, u, self.term_state)
[ "def", "complete_automaton", "(", "self", ")", ":", "self", ".", "term_state", "=", "object", "(", ")", "self", ".", "Q", ".", "add", "(", "self", ".", "term_state", ")", "for", "tv", "in", "self", ".", "Q", ":", "for", "u", "in", "self", ".", "S", ":", "try", ":", "self", ".", "transition", "[", "tv", "]", "[", "u", "]", "except", ":", "self", ".", "add_transition", "(", "tv", ",", "u", ",", "self", ".", "term_state", ")", "for", "u", "in", "self", ".", "S", ":", "self", ".", "add_transition", "(", "self", ".", "term_state", ",", "u", ",", "self", ".", "term_state", ")" ]
Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S
[ "Adds", "missing", "transition", "states", "such", "that", "δ", "(", "q", "u", ")", "is", "defined", "for", "every", "state", "q", "and", "any", "u", "∈", "S" ]
python
train
stefankoegl/kdtree
kdtree.py
https://github.com/stefankoegl/kdtree/blob/587edc7056d7735177ad56a84ad5abccdea91693/kdtree.py#L576-L611
def create(point_list=None, dimensions=None, axis=0, sel_axis=None): """ Creates a kd-tree from a list of points All points in the list must be of the same dimensionality. If no point_list is given, an empty tree is created. The number of dimensions has to be given instead. If both a point_list and dimensions are given, the numbers must agree. Axis is the axis on which the root-node should split. sel_axis(axis) is used when creating subnodes of a node. It receives the axis of the parent node and returns the axis of the child node. """ if not point_list and not dimensions: raise ValueError('either point_list or dimensions must be provided') elif point_list: dimensions = check_dimensionality(point_list, dimensions) # by default cycle through the axis sel_axis = sel_axis or (lambda prev_axis: (prev_axis+1) % dimensions) if not point_list: return KDNode(sel_axis=sel_axis, axis=axis, dimensions=dimensions) # Sort point list and choose median as pivot element point_list = list(point_list) point_list.sort(key=lambda point: point[axis]) median = len(point_list) // 2 loc = point_list[median] left = create(point_list[:median], dimensions, sel_axis(axis)) right = create(point_list[median + 1:], dimensions, sel_axis(axis)) return KDNode(loc, left, right, axis=axis, sel_axis=sel_axis, dimensions=dimensions)
[ "def", "create", "(", "point_list", "=", "None", ",", "dimensions", "=", "None", ",", "axis", "=", "0", ",", "sel_axis", "=", "None", ")", ":", "if", "not", "point_list", "and", "not", "dimensions", ":", "raise", "ValueError", "(", "'either point_list or dimensions must be provided'", ")", "elif", "point_list", ":", "dimensions", "=", "check_dimensionality", "(", "point_list", ",", "dimensions", ")", "# by default cycle through the axis", "sel_axis", "=", "sel_axis", "or", "(", "lambda", "prev_axis", ":", "(", "prev_axis", "+", "1", ")", "%", "dimensions", ")", "if", "not", "point_list", ":", "return", "KDNode", "(", "sel_axis", "=", "sel_axis", ",", "axis", "=", "axis", ",", "dimensions", "=", "dimensions", ")", "# Sort point list and choose median as pivot element", "point_list", "=", "list", "(", "point_list", ")", "point_list", ".", "sort", "(", "key", "=", "lambda", "point", ":", "point", "[", "axis", "]", ")", "median", "=", "len", "(", "point_list", ")", "//", "2", "loc", "=", "point_list", "[", "median", "]", "left", "=", "create", "(", "point_list", "[", ":", "median", "]", ",", "dimensions", ",", "sel_axis", "(", "axis", ")", ")", "right", "=", "create", "(", "point_list", "[", "median", "+", "1", ":", "]", ",", "dimensions", ",", "sel_axis", "(", "axis", ")", ")", "return", "KDNode", "(", "loc", ",", "left", ",", "right", ",", "axis", "=", "axis", ",", "sel_axis", "=", "sel_axis", ",", "dimensions", "=", "dimensions", ")" ]
Creates a kd-tree from a list of points All points in the list must be of the same dimensionality. If no point_list is given, an empty tree is created. The number of dimensions has to be given instead. If both a point_list and dimensions are given, the numbers must agree. Axis is the axis on which the root-node should split. sel_axis(axis) is used when creating subnodes of a node. It receives the axis of the parent node and returns the axis of the child node.
[ "Creates", "a", "kd", "-", "tree", "from", "a", "list", "of", "points" ]
python
train
ergo/ziggurat_foundations
ziggurat_foundations/models/services/resource_tree_postgres.py
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/services/resource_tree_postgres.py#L298-L315
def shift_ordering_up(cls, parent_id, position, db_session=None, *args, **kwargs): """ Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.parent_id == parent_id) query = query.filter(cls.model.ordering >= position) query.update( {cls.model.ordering: cls.model.ordering + 1}, synchronize_session=False ) db_session.flush()
[ "def", "shift_ordering_up", "(", "cls", ",", "parent_id", ",", "position", ",", "db_session", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "db_session", "=", "get_db_session", "(", "db_session", ")", "query", "=", "db_session", ".", "query", "(", "cls", ".", "model", ")", "query", "=", "query", ".", "filter", "(", "cls", ".", "model", ".", "parent_id", "==", "parent_id", ")", "query", "=", "query", ".", "filter", "(", "cls", ".", "model", ".", "ordering", ">=", "position", ")", "query", ".", "update", "(", "{", "cls", ".", "model", ".", "ordering", ":", "cls", ".", "model", ".", "ordering", "+", "1", "}", ",", "synchronize_session", "=", "False", ")", "db_session", ".", "flush", "(", ")" ]
Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return:
[ "Shifts", "ordering", "to", "open", "a", "gap", "for", "node", "insertion", "begins", "the", "shift", "from", "given", "position" ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_flows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L1240-L1268
def WriteFlowResults(self, results, cursor=None): """Writes flow results for a given flow.""" query = ("INSERT INTO flow_results " "(client_id, flow_id, hunt_id, timestamp, payload, type, tag) " "VALUES ") templates = [] args = [] for r in results: templates.append("(%s, %s, %s, FROM_UNIXTIME(%s), %s, %s, %s)") args.append(db_utils.ClientIDToInt(r.client_id)) args.append(db_utils.FlowIDToInt(r.flow_id)) if r.hunt_id: args.append(db_utils.HuntIDToInt(r.hunt_id)) else: args.append(0) args.append( mysql_utils.RDFDatetimeToTimestamp(rdfvalue.RDFDatetime.Now())) args.append(r.payload.SerializeToString()) args.append(compatibility.GetName(r.payload.__class__)) args.append(r.tag) query += ",".join(templates) try: cursor.execute(query, args) except MySQLdb.IntegrityError as e: raise db.AtLeastOneUnknownFlowError( [(r.client_id, r.flow_id) for r in results], cause=e)
[ "def", "WriteFlowResults", "(", "self", ",", "results", ",", "cursor", "=", "None", ")", ":", "query", "=", "(", "\"INSERT INTO flow_results \"", "\"(client_id, flow_id, hunt_id, timestamp, payload, type, tag) \"", "\"VALUES \"", ")", "templates", "=", "[", "]", "args", "=", "[", "]", "for", "r", "in", "results", ":", "templates", ".", "append", "(", "\"(%s, %s, %s, FROM_UNIXTIME(%s), %s, %s, %s)\"", ")", "args", ".", "append", "(", "db_utils", ".", "ClientIDToInt", "(", "r", ".", "client_id", ")", ")", "args", ".", "append", "(", "db_utils", ".", "FlowIDToInt", "(", "r", ".", "flow_id", ")", ")", "if", "r", ".", "hunt_id", ":", "args", ".", "append", "(", "db_utils", ".", "HuntIDToInt", "(", "r", ".", "hunt_id", ")", ")", "else", ":", "args", ".", "append", "(", "0", ")", "args", ".", "append", "(", "mysql_utils", ".", "RDFDatetimeToTimestamp", "(", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", ")", ")", "args", ".", "append", "(", "r", ".", "payload", ".", "SerializeToString", "(", ")", ")", "args", ".", "append", "(", "compatibility", ".", "GetName", "(", "r", ".", "payload", ".", "__class__", ")", ")", "args", ".", "append", "(", "r", ".", "tag", ")", "query", "+=", "\",\"", ".", "join", "(", "templates", ")", "try", ":", "cursor", ".", "execute", "(", "query", ",", "args", ")", "except", "MySQLdb", ".", "IntegrityError", "as", "e", ":", "raise", "db", ".", "AtLeastOneUnknownFlowError", "(", "[", "(", "r", ".", "client_id", ",", "r", ".", "flow_id", ")", "for", "r", "in", "results", "]", ",", "cause", "=", "e", ")" ]
Writes flow results for a given flow.
[ "Writes", "flow", "results", "for", "a", "given", "flow", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/style/palettes.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/style/palettes.py#L418-L423
def as_hex(self): """ Return a color palette with hex codes instead of RGB values. """ hex = [mpl.colors.rgb2hex(rgb) for rgb in self] return ColorPalette(hex)
[ "def", "as_hex", "(", "self", ")", ":", "hex", "=", "[", "mpl", ".", "colors", ".", "rgb2hex", "(", "rgb", ")", "for", "rgb", "in", "self", "]", "return", "ColorPalette", "(", "hex", ")" ]
Return a color palette with hex codes instead of RGB values.
[ "Return", "a", "color", "palette", "with", "hex", "codes", "instead", "of", "RGB", "values", "." ]
python
train
juicer/juicer
juicer/utils/Remotes.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/Remotes.py#L32-L54
def assemble_remotes(resource): """ Using the specified input resource, assemble a list of rpm URLS. This function will, when given a remote package url, directory index, or a combination of the two in a local input file, do all the work required to turn that input into a list of only remote package URLs. """ resource_type = classify_resource_type(resource) if resource_type is None: juicer.utils.Log.log_debug("Could not classify or find the input resource.") return [] elif resource_type == REMOTE_PKG_TYPE: return [resource] elif resource_type == REMOTE_INDEX_TYPE: return parse_directory_index(resource) elif resource_type == REMOTE_INPUT_FILE_TYPE: # Later on this could examine the excluded data for directory # indexes and iterate over those too. remote_packages, excluded_data = parse_input_file(resource) return remote_packages
[ "def", "assemble_remotes", "(", "resource", ")", ":", "resource_type", "=", "classify_resource_type", "(", "resource", ")", "if", "resource_type", "is", "None", ":", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Could not classify or find the input resource.\"", ")", "return", "[", "]", "elif", "resource_type", "==", "REMOTE_PKG_TYPE", ":", "return", "[", "resource", "]", "elif", "resource_type", "==", "REMOTE_INDEX_TYPE", ":", "return", "parse_directory_index", "(", "resource", ")", "elif", "resource_type", "==", "REMOTE_INPUT_FILE_TYPE", ":", "# Later on this could examine the excluded data for directory", "# indexes and iterate over those too.", "remote_packages", ",", "excluded_data", "=", "parse_input_file", "(", "resource", ")", "return", "remote_packages" ]
Using the specified input resource, assemble a list of rpm URLS. This function will, when given a remote package url, directory index, or a combination of the two in a local input file, do all the work required to turn that input into a list of only remote package URLs.
[ "Using", "the", "specified", "input", "resource", "assemble", "a", "list", "of", "rpm", "URLS", "." ]
python
train
5monkeys/djedi-cms
djedi/templatetags/template.py
https://github.com/5monkeys/djedi-cms/blob/3c077edfda310717b9cdb4f2ee14e78723c94894/djedi/templatetags/template.py#L12-L72
def lazy_tag(self, func=None, takes_context=None, name=None, node_class=None): """ A tag function decorator, injected on Django's template tag library, similar to simple_tag(). The decorated function gets called when the template node tree is built and should return another function, responsible for the output, that later will be called within the rendering phase. Note: if decorated with takes_context=True, context will not be available in the init phase. @register.lazy_tag(takes_context=True) def x(context, a, b, c=True, d=False): # Init phase (no context) def render(context): # Render phase return u'Content of argument a: %s' % a return render """ def dec(func): params, varargs, varkw, defaults = getargspec(func) class SimpleNode(Node): def __init__(self, takes_context, args, kwargs): self.takes_context = takes_context self.args = args self.kwargs = kwargs resolved_args, resolved_kwargs = self.get_resolved_arguments(Context({})) self.resolved_args = resolved_args self.resolved_kwargs = resolved_kwargs self.render_func = func(*resolved_args, **resolved_kwargs) def get_resolved_arguments(self, context): resolved_args = [var.resolve(context) for var in self.args] if self.takes_context: resolved_args = [context] + resolved_args resolved_kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.items()) return resolved_args, resolved_kwargs def render(self, context): return self.render_func(context) function_name = (name or getattr(func, '_decorated_function', func).__name__) compile_func = partial(generic_tag_compiler, params=params, varargs=varargs, varkw=varkw, defaults=defaults, name=function_name, takes_context=takes_context, node_class=node_class or SimpleNode) compile_func.__doc__ = func.__doc__ self.tag(function_name, compile_func) return func if func is None: return dec # @register.lazy_tag(...) elif callable(func): return dec(func) # @register.lazy_tag else: raise TemplateSyntaxError("Invalid arguments provided to lazy_tag")
[ "def", "lazy_tag", "(", "self", ",", "func", "=", "None", ",", "takes_context", "=", "None", ",", "name", "=", "None", ",", "node_class", "=", "None", ")", ":", "def", "dec", "(", "func", ")", ":", "params", ",", "varargs", ",", "varkw", ",", "defaults", "=", "getargspec", "(", "func", ")", "class", "SimpleNode", "(", "Node", ")", ":", "def", "__init__", "(", "self", ",", "takes_context", ",", "args", ",", "kwargs", ")", ":", "self", ".", "takes_context", "=", "takes_context", "self", ".", "args", "=", "args", "self", ".", "kwargs", "=", "kwargs", "resolved_args", ",", "resolved_kwargs", "=", "self", ".", "get_resolved_arguments", "(", "Context", "(", "{", "}", ")", ")", "self", ".", "resolved_args", "=", "resolved_args", "self", ".", "resolved_kwargs", "=", "resolved_kwargs", "self", ".", "render_func", "=", "func", "(", "*", "resolved_args", ",", "*", "*", "resolved_kwargs", ")", "def", "get_resolved_arguments", "(", "self", ",", "context", ")", ":", "resolved_args", "=", "[", "var", ".", "resolve", "(", "context", ")", "for", "var", "in", "self", ".", "args", "]", "if", "self", ".", "takes_context", ":", "resolved_args", "=", "[", "context", "]", "+", "resolved_args", "resolved_kwargs", "=", "dict", "(", "(", "k", ",", "v", ".", "resolve", "(", "context", ")", ")", "for", "k", ",", "v", "in", "self", ".", "kwargs", ".", "items", "(", ")", ")", "return", "resolved_args", ",", "resolved_kwargs", "def", "render", "(", "self", ",", "context", ")", ":", "return", "self", ".", "render_func", "(", "context", ")", "function_name", "=", "(", "name", "or", "getattr", "(", "func", ",", "'_decorated_function'", ",", "func", ")", ".", "__name__", ")", "compile_func", "=", "partial", "(", "generic_tag_compiler", ",", "params", "=", "params", ",", "varargs", "=", "varargs", ",", "varkw", "=", "varkw", ",", "defaults", "=", "defaults", ",", "name", "=", "function_name", ",", "takes_context", "=", "takes_context", ",", "node_class", "=", "node_class", "or", "SimpleNode", ")", "compile_func", ".", "__doc__", "=", "func", ".", "__doc__", "self", ".", "tag", "(", "function_name", ",", "compile_func", ")", "return", "func", "if", "func", "is", "None", ":", "return", "dec", "# @register.lazy_tag(...)", "elif", "callable", "(", "func", ")", ":", "return", "dec", "(", "func", ")", "# @register.lazy_tag", "else", ":", "raise", "TemplateSyntaxError", "(", "\"Invalid arguments provided to lazy_tag\"", ")" ]
A tag function decorator, injected on Django's template tag library, similar to simple_tag(). The decorated function gets called when the template node tree is built and should return another function, responsible for the output, that later will be called within the rendering phase. Note: if decorated with takes_context=True, context will not be available in the init phase. @register.lazy_tag(takes_context=True) def x(context, a, b, c=True, d=False): # Init phase (no context) def render(context): # Render phase return u'Content of argument a: %s' % a return render
[ "A", "tag", "function", "decorator", "injected", "on", "Django", "s", "template", "tag", "library", "similar", "to", "simple_tag", "()", ".", "The", "decorated", "function", "gets", "called", "when", "the", "template", "node", "tree", "is", "built", "and", "should", "return", "another", "function", "responsible", "for", "the", "output", "that", "later", "will", "be", "called", "within", "the", "rendering", "phase", "." ]
python
train
asifpy/django-crudbuilder
crudbuilder/formset.py
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/formset.py#L31-L49
def get_factory_kwargs(self): """ Returns the keyword arguments for calling the formset factory """ kwargs = {} kwargs.update({ 'can_delete': self.can_delete, 'extra': self.extra, 'exclude': self.exclude, 'fields': self.fields, 'formfield_callback': self.formfield_callback, 'fk_name': self.fk_name, }) if self.formset_class: kwargs['formset'] = self.formset_class if self.child_form: kwargs['form'] = self.child_form return kwargs
[ "def", "get_factory_kwargs", "(", "self", ")", ":", "kwargs", "=", "{", "}", "kwargs", ".", "update", "(", "{", "'can_delete'", ":", "self", ".", "can_delete", ",", "'extra'", ":", "self", ".", "extra", ",", "'exclude'", ":", "self", ".", "exclude", ",", "'fields'", ":", "self", ".", "fields", ",", "'formfield_callback'", ":", "self", ".", "formfield_callback", ",", "'fk_name'", ":", "self", ".", "fk_name", ",", "}", ")", "if", "self", ".", "formset_class", ":", "kwargs", "[", "'formset'", "]", "=", "self", ".", "formset_class", "if", "self", ".", "child_form", ":", "kwargs", "[", "'form'", "]", "=", "self", ".", "child_form", "return", "kwargs" ]
Returns the keyword arguments for calling the formset factory
[ "Returns", "the", "keyword", "arguments", "for", "calling", "the", "formset", "factory" ]
python
train
barrust/mediawiki
mediawiki/mediawikipage.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L181-L200
def html(self): """ str: HTML representation of the page Note: Not settable Warning: This can be slow for very large pages """ if self._html is False: self._html = None query_params = { "prop": "revisions", "rvprop": "content", "rvlimit": 1, "rvparse": "", "titles": self.title, } request = self.mediawiki.wiki_request(query_params) page = request["query"]["pages"][self.pageid] self._html = page["revisions"][0]["*"] return self._html
[ "def", "html", "(", "self", ")", ":", "if", "self", ".", "_html", "is", "False", ":", "self", ".", "_html", "=", "None", "query_params", "=", "{", "\"prop\"", ":", "\"revisions\"", ",", "\"rvprop\"", ":", "\"content\"", ",", "\"rvlimit\"", ":", "1", ",", "\"rvparse\"", ":", "\"\"", ",", "\"titles\"", ":", "self", ".", "title", ",", "}", "request", "=", "self", ".", "mediawiki", ".", "wiki_request", "(", "query_params", ")", "page", "=", "request", "[", "\"query\"", "]", "[", "\"pages\"", "]", "[", "self", ".", "pageid", "]", "self", ".", "_html", "=", "page", "[", "\"revisions\"", "]", "[", "0", "]", "[", "\"*\"", "]", "return", "self", ".", "_html" ]
str: HTML representation of the page Note: Not settable Warning: This can be slow for very large pages
[ "str", ":", "HTML", "representation", "of", "the", "page" ]
python
train
secdev/scapy
scapy/packet.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L1573-L1585
def bind_layers(lower, upper, __fval=None, **fval): """Bind 2 layers on some specific fields' values. It makes the packet being built # noqa: E501 and dissected when the arguments are present. This functions calls both bind_bottom_up and bind_top_down, with all passed arguments. # noqa: E501 Please have a look at their docs: - help(bind_bottom_up) - help(bind_top_down) """ if __fval is not None: fval.update(__fval) bind_top_down(lower, upper, **fval) bind_bottom_up(lower, upper, **fval)
[ "def", "bind_layers", "(", "lower", ",", "upper", ",", "__fval", "=", "None", ",", "*", "*", "fval", ")", ":", "if", "__fval", "is", "not", "None", ":", "fval", ".", "update", "(", "__fval", ")", "bind_top_down", "(", "lower", ",", "upper", ",", "*", "*", "fval", ")", "bind_bottom_up", "(", "lower", ",", "upper", ",", "*", "*", "fval", ")" ]
Bind 2 layers on some specific fields' values. It makes the packet being built # noqa: E501 and dissected when the arguments are present. This functions calls both bind_bottom_up and bind_top_down, with all passed arguments. # noqa: E501 Please have a look at their docs: - help(bind_bottom_up) - help(bind_top_down)
[ "Bind", "2", "layers", "on", "some", "specific", "fields", "values", ".", "It", "makes", "the", "packet", "being", "built", "#", "noqa", ":", "E501", "and", "dissected", "when", "the", "arguments", "are", "present", "." ]
python
train
apache/incubator-heron
heron/tools/common/src/python/access/tracker_access.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/common/src/python/access/tracker_access.py#L75-L82
def get_topology_info(*args): """Synced API call to get topology information""" instance = tornado.ioloop.IOLoop.instance() try: return instance.run_sync(lambda: API.get_topology_info(*args)) except Exception: Log.debug(traceback.format_exc()) raise
[ "def", "get_topology_info", "(", "*", "args", ")", ":", "instance", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", "try", ":", "return", "instance", ".", "run_sync", "(", "lambda", ":", "API", ".", "get_topology_info", "(", "*", "args", ")", ")", "except", "Exception", ":", "Log", ".", "debug", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise" ]
Synced API call to get topology information
[ "Synced", "API", "call", "to", "get", "topology", "information" ]
python
valid
synw/dataswim
dataswim/charts/seaborn.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/seaborn.py#L14-L26
def residual_(self, label=None, style=None, opts=None): """ Returns a Seaborn models residuals chart """ color, _ = self._get_color_size(style) try: fig = sns.residplot(self.df[self.x], self.df[self.y], lowess=True, color=color) fig = self._set_with_height(fig, opts) return fig except Exception as e: self.err(e, self.residual_, "Can not draw models residuals chart")
[ "def", "residual_", "(", "self", ",", "label", "=", "None", ",", "style", "=", "None", ",", "opts", "=", "None", ")", ":", "color", ",", "_", "=", "self", ".", "_get_color_size", "(", "style", ")", "try", ":", "fig", "=", "sns", ".", "residplot", "(", "self", ".", "df", "[", "self", ".", "x", "]", ",", "self", ".", "df", "[", "self", ".", "y", "]", ",", "lowess", "=", "True", ",", "color", "=", "color", ")", "fig", "=", "self", ".", "_set_with_height", "(", "fig", ",", "opts", ")", "return", "fig", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "self", ".", "residual_", ",", "\"Can not draw models residuals chart\"", ")" ]
Returns a Seaborn models residuals chart
[ "Returns", "a", "Seaborn", "models", "residuals", "chart" ]
python
train
martinmcbride/pysound
pysound/soundfile.py
https://github.com/martinmcbride/pysound/blob/253c8f712ad475318350e5a8ba21f6fefd7a3de2/pysound/soundfile.py#L9-L23
def save(params, filename, source): ''' Write a sequence of samples as a WAV file Currently a 16 bit mono file ''' writer = wave.open(filename, 'wb'); # Set the WAV file parameters, currently default values writer.setnchannels(1) writer.setsampwidth(2) writer.setframerate(params.sample_rate) data_out = array.array('h') for x in source: data_out.append(int(x * 32766)) writer.writeframes(data_out.tostring()) writer.close()
[ "def", "save", "(", "params", ",", "filename", ",", "source", ")", ":", "writer", "=", "wave", ".", "open", "(", "filename", ",", "'wb'", ")", "# Set the WAV file parameters, currently default values", "writer", ".", "setnchannels", "(", "1", ")", "writer", ".", "setsampwidth", "(", "2", ")", "writer", ".", "setframerate", "(", "params", ".", "sample_rate", ")", "data_out", "=", "array", ".", "array", "(", "'h'", ")", "for", "x", "in", "source", ":", "data_out", ".", "append", "(", "int", "(", "x", "*", "32766", ")", ")", "writer", ".", "writeframes", "(", "data_out", ".", "tostring", "(", ")", ")", "writer", ".", "close", "(", ")" ]
Write a sequence of samples as a WAV file Currently a 16 bit mono file
[ "Write", "a", "sequence", "of", "samples", "as", "a", "WAV", "file", "Currently", "a", "16", "bit", "mono", "file" ]
python
train
vsergeev/python-periphery
periphery/spi.py
https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/spi.py#L131-L175
def transfer(self, data): """Shift out `data` and return shifted in data. Args: data (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out. Returns: bytes, bytearray, list: data shifted in. Raises: SPIError: if an I/O or OS error occurs. TypeError: if `data` type is invalid. ValueError: if data is not valid bytes. """ if not isinstance(data, (bytes, bytearray, list)): raise TypeError("Invalid data type, should be bytes, bytearray, or list.") # Create mutable array try: buf = array.array('B', data) except OverflowError: raise ValueError("Invalid data bytes.") buf_addr, buf_len = buf.buffer_info() # Prepare transfer structure spi_xfer = _CSpiIocTransfer() spi_xfer.tx_buf = buf_addr spi_xfer.rx_buf = buf_addr spi_xfer.len = buf_len # Transfer try: fcntl.ioctl(self._fd, SPI._SPI_IOC_MESSAGE_1, spi_xfer) except OSError as e: raise SPIError(e.errno, "SPI transfer: " + e.strerror) # Return shifted out data with the same type as shifted in data if isinstance(data, bytes): return bytes(bytearray(buf)) elif isinstance(data, bytearray): return bytearray(buf) elif isinstance(data, list): return buf.tolist()
[ "def", "transfer", "(", "self", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ",", "list", ")", ")", ":", "raise", "TypeError", "(", "\"Invalid data type, should be bytes, bytearray, or list.\"", ")", "# Create mutable array", "try", ":", "buf", "=", "array", ".", "array", "(", "'B'", ",", "data", ")", "except", "OverflowError", ":", "raise", "ValueError", "(", "\"Invalid data bytes.\"", ")", "buf_addr", ",", "buf_len", "=", "buf", ".", "buffer_info", "(", ")", "# Prepare transfer structure", "spi_xfer", "=", "_CSpiIocTransfer", "(", ")", "spi_xfer", ".", "tx_buf", "=", "buf_addr", "spi_xfer", ".", "rx_buf", "=", "buf_addr", "spi_xfer", ".", "len", "=", "buf_len", "# Transfer", "try", ":", "fcntl", ".", "ioctl", "(", "self", ".", "_fd", ",", "SPI", ".", "_SPI_IOC_MESSAGE_1", ",", "spi_xfer", ")", "except", "OSError", "as", "e", ":", "raise", "SPIError", "(", "e", ".", "errno", ",", "\"SPI transfer: \"", "+", "e", ".", "strerror", ")", "# Return shifted out data with the same type as shifted in data", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "return", "bytes", "(", "bytearray", "(", "buf", ")", ")", "elif", "isinstance", "(", "data", ",", "bytearray", ")", ":", "return", "bytearray", "(", "buf", ")", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "return", "buf", ".", "tolist", "(", ")" ]
Shift out `data` and return shifted in data. Args: data (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out. Returns: bytes, bytearray, list: data shifted in. Raises: SPIError: if an I/O or OS error occurs. TypeError: if `data` type is invalid. ValueError: if data is not valid bytes.
[ "Shift", "out", "data", "and", "return", "shifted", "in", "data", "." ]
python
train
bruziev/security_interface
security_interface/api.py
https://github.com/bruziev/security_interface/blob/ec1f30c8ac051291694b0099caa0a7fde97ddfe6/security_interface/api.py#L63-L76
async def check_permission(self, identity, permission): """ Works like :func:`Security.can`, but when check is failed :func:`ForbiddenError` exception is raised. :param identity: Claim :param permission: Permission :return: Checked claim :raise: :func:`ForbiddenError` """ await self.check_authorized(identity) allowed = await self.can(identity, permission) if not allowed: raise ForbiddenError()
[ "async", "def", "check_permission", "(", "self", ",", "identity", ",", "permission", ")", ":", "await", "self", ".", "check_authorized", "(", "identity", ")", "allowed", "=", "await", "self", ".", "can", "(", "identity", ",", "permission", ")", "if", "not", "allowed", ":", "raise", "ForbiddenError", "(", ")" ]
Works like :func:`Security.can`, but when check is failed :func:`ForbiddenError` exception is raised. :param identity: Claim :param permission: Permission :return: Checked claim :raise: :func:`ForbiddenError`
[ "Works", "like", ":", "func", ":", "Security", ".", "can", "but", "when", "check", "is", "failed", ":", "func", ":", "ForbiddenError", "exception", "is", "raised", "." ]
python
train
spyder-ide/spyder-kernels
spyder_kernels/console/kernel.py
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L65-L87
def get_namespace_view(self): """ Return the namespace view This is a dictionary with the following structure {'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}} Here: * 'a' is the variable name * 'color' is the color used to show it * 'size' and 'type' are self-evident * and'view' is its value or the text shown in the last column """ from spyder_kernels.utils.nsview import make_remote_view settings = self.namespace_view_settings if settings: ns = self._get_current_namespace() view = repr(make_remote_view(ns, settings, EXCLUDED_NAMES)) return view else: return repr(None)
[ "def", "get_namespace_view", "(", "self", ")", ":", "from", "spyder_kernels", ".", "utils", ".", "nsview", "import", "make_remote_view", "settings", "=", "self", ".", "namespace_view_settings", "if", "settings", ":", "ns", "=", "self", ".", "_get_current_namespace", "(", ")", "view", "=", "repr", "(", "make_remote_view", "(", "ns", ",", "settings", ",", "EXCLUDED_NAMES", ")", ")", "return", "view", "else", ":", "return", "repr", "(", "None", ")" ]
Return the namespace view This is a dictionary with the following structure {'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}} Here: * 'a' is the variable name * 'color' is the color used to show it * 'size' and 'type' are self-evident * and'view' is its value or the text shown in the last column
[ "Return", "the", "namespace", "view" ]
python
train
numba/llvmlite
llvmlite/binding/module.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/binding/module.py#L80-L88
def get_function(self, name): """ Get a ValueRef pointing to the function named *name*. NameError is raised if the symbol isn't found. """ p = ffi.lib.LLVMPY_GetNamedFunction(self, _encode_string(name)) if not p: raise NameError(name) return ValueRef(p, 'function', dict(module=self))
[ "def", "get_function", "(", "self", ",", "name", ")", ":", "p", "=", "ffi", ".", "lib", ".", "LLVMPY_GetNamedFunction", "(", "self", ",", "_encode_string", "(", "name", ")", ")", "if", "not", "p", ":", "raise", "NameError", "(", "name", ")", "return", "ValueRef", "(", "p", ",", "'function'", ",", "dict", "(", "module", "=", "self", ")", ")" ]
Get a ValueRef pointing to the function named *name*. NameError is raised if the symbol isn't found.
[ "Get", "a", "ValueRef", "pointing", "to", "the", "function", "named", "*", "name", "*", ".", "NameError", "is", "raised", "if", "the", "symbol", "isn", "t", "found", "." ]
python
train
archman/beamline
beamline/lattice.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/lattice.py#L103-L117
def rinse_rpnexp(self, rpnexp, rpndict): """ replace valid keyword of rpnexp from rpndict e.g. rpnexp = 'b a /', rpndict = {'b': 10} then after rinsing, rpnexp = '10 a /' return rinsed rpnexp """ for wd in rpnexp.split(): if wd in rpndict: try: val = float(rpndict[wd]) rpnexp = rpnexp.replace(wd, str(val)) except: pass return rpnexp
[ "def", "rinse_rpnexp", "(", "self", ",", "rpnexp", ",", "rpndict", ")", ":", "for", "wd", "in", "rpnexp", ".", "split", "(", ")", ":", "if", "wd", "in", "rpndict", ":", "try", ":", "val", "=", "float", "(", "rpndict", "[", "wd", "]", ")", "rpnexp", "=", "rpnexp", ".", "replace", "(", "wd", ",", "str", "(", "val", ")", ")", "except", ":", "pass", "return", "rpnexp" ]
replace valid keyword of rpnexp from rpndict e.g. rpnexp = 'b a /', rpndict = {'b': 10} then after rinsing, rpnexp = '10 a /' return rinsed rpnexp
[ "replace", "valid", "keyword", "of", "rpnexp", "from", "rpndict", "e", ".", "g", ".", "rpnexp", "=", "b", "a", "/", "rpndict", "=", "{", "b", ":", "10", "}", "then", "after", "rinsing", "rpnexp", "=", "10", "a", "/" ]
python
train
jbittel/django-mama-cas
mama_cas/request.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/request.py#L19-L24
def ns(self, prefix, tag): """ Given a prefix and an XML tag, output the qualified name for proper namespace handling on output. """ return etree.QName(self.prefixes[prefix], tag)
[ "def", "ns", "(", "self", ",", "prefix", ",", "tag", ")", ":", "return", "etree", ".", "QName", "(", "self", ".", "prefixes", "[", "prefix", "]", ",", "tag", ")" ]
Given a prefix and an XML tag, output the qualified name for proper namespace handling on output.
[ "Given", "a", "prefix", "and", "an", "XML", "tag", "output", "the", "qualified", "name", "for", "proper", "namespace", "handling", "on", "output", "." ]
python
train
Kozea/wdb
client/wdb/__init__.py
https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L389-L410
def set_trace(self, frame=None, break_=True): """Break at current state""" # We are already tracing, do nothing trace_log.info( 'Setting trace %s (stepping %s) (current_trace: %s)' % ( pretty_frame(frame or sys._getframe().f_back), self.stepping, sys.gettrace() ) ) if self.stepping or self.closed: return self.reset() trace = ( self.trace_dispatch if trace_log.level >= 30 else self.trace_debug_dispatch ) trace_frame = frame = frame or sys._getframe().f_back while frame: frame.f_trace = trace frame = frame.f_back self.state = Step(trace_frame) if break_ else Running(trace_frame) sys.settrace(trace)
[ "def", "set_trace", "(", "self", ",", "frame", "=", "None", ",", "break_", "=", "True", ")", ":", "# We are already tracing, do nothing", "trace_log", ".", "info", "(", "'Setting trace %s (stepping %s) (current_trace: %s)'", "%", "(", "pretty_frame", "(", "frame", "or", "sys", ".", "_getframe", "(", ")", ".", "f_back", ")", ",", "self", ".", "stepping", ",", "sys", ".", "gettrace", "(", ")", ")", ")", "if", "self", ".", "stepping", "or", "self", ".", "closed", ":", "return", "self", ".", "reset", "(", ")", "trace", "=", "(", "self", ".", "trace_dispatch", "if", "trace_log", ".", "level", ">=", "30", "else", "self", ".", "trace_debug_dispatch", ")", "trace_frame", "=", "frame", "=", "frame", "or", "sys", ".", "_getframe", "(", ")", ".", "f_back", "while", "frame", ":", "frame", ".", "f_trace", "=", "trace", "frame", "=", "frame", ".", "f_back", "self", ".", "state", "=", "Step", "(", "trace_frame", ")", "if", "break_", "else", "Running", "(", "trace_frame", ")", "sys", ".", "settrace", "(", "trace", ")" ]
Break at current state
[ "Break", "at", "current", "state" ]
python
train
lago-project/lago
lago/utils.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/utils.py#L123-L199
def _run_command( command, input_data=None, stdin=None, out_pipe=subprocess.PIPE, err_pipe=subprocess.PIPE, env=None, uuid=None, **kwargs ): """ Runs a command Args: command(list of str): args of the command to execute, including the command itself as command[0] as `['ls', '-l']` input_data(str): If passed, will feed that data to the subprocess through stdin out_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdout stdin(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdin err_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stderr env(dict of str:str): If set, will use the given dict as env for the subprocess uuid(uuid): If set the command will be logged with the given uuid converted to string, otherwise, a uuid v4 will be generated. **kwargs: Any other keyword args passed will be passed to the :ref:subprocess.Popen call Returns: lago.utils.CommandStatus: result of the interactive execution """ # add libexec to PATH if needed if uuid is None: uuid = uuid_m.uuid4() if constants.LIBEXEC_DIR not in os.environ['PATH'].split(':'): os.environ['PATH' ] = '%s:%s' % (constants.LIBEXEC_DIR, os.environ['PATH']) if input_data and not stdin: kwargs['stdin'] = subprocess.PIPE elif stdin: kwargs['stdin'] = stdin if env is None: env = os.environ.copy() else: env['PATH'] = ':'.join( list( set( env.get('PATH', '').split(':') + os.environ['PATH'] .split(':') ), ), ) popen = subprocess.Popen( ' '.join('"%s"' % arg for arg in command), stdout=out_pipe, stderr=err_pipe, shell=True, env=env, **kwargs ) out, err = popen.communicate(input_data) LOGGER.debug( '%s: command exit with return code: %d', str(uuid), popen.returncode ) if out: LOGGER.debug('%s: command stdout: %s', str(uuid), out) if err: LOGGER.debug('%s: command stderr: %s', str(uuid), err) return CommandStatus(popen.returncode, out, err)
[ "def", "_run_command", "(", "command", ",", "input_data", "=", "None", ",", "stdin", "=", "None", ",", "out_pipe", "=", "subprocess", ".", "PIPE", ",", "err_pipe", "=", "subprocess", ".", "PIPE", ",", "env", "=", "None", ",", "uuid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# add libexec to PATH if needed", "if", "uuid", "is", "None", ":", "uuid", "=", "uuid_m", ".", "uuid4", "(", ")", "if", "constants", ".", "LIBEXEC_DIR", "not", "in", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "':'", ")", ":", "os", ".", "environ", "[", "'PATH'", "]", "=", "'%s:%s'", "%", "(", "constants", ".", "LIBEXEC_DIR", ",", "os", ".", "environ", "[", "'PATH'", "]", ")", "if", "input_data", "and", "not", "stdin", ":", "kwargs", "[", "'stdin'", "]", "=", "subprocess", ".", "PIPE", "elif", "stdin", ":", "kwargs", "[", "'stdin'", "]", "=", "stdin", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "else", ":", "env", "[", "'PATH'", "]", "=", "':'", ".", "join", "(", "list", "(", "set", "(", "env", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "':'", ")", "+", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "':'", ")", ")", ",", ")", ",", ")", "popen", "=", "subprocess", ".", "Popen", "(", "' '", ".", "join", "(", "'\"%s\"'", "%", "arg", "for", "arg", "in", "command", ")", ",", "stdout", "=", "out_pipe", ",", "stderr", "=", "err_pipe", ",", "shell", "=", "True", ",", "env", "=", "env", ",", "*", "*", "kwargs", ")", "out", ",", "err", "=", "popen", ".", "communicate", "(", "input_data", ")", "LOGGER", ".", "debug", "(", "'%s: command exit with return code: %d'", ",", "str", "(", "uuid", ")", ",", "popen", ".", "returncode", ")", "if", "out", ":", "LOGGER", ".", "debug", "(", "'%s: command stdout: %s'", ",", "str", "(", "uuid", ")", ",", "out", ")", "if", "err", ":", "LOGGER", ".", "debug", "(", "'%s: command stderr: %s'", ",", "str", "(", "uuid", ")", ",", "err", ")", "return", "CommandStatus", "(", "popen", ".", "returncode", ",", "out", ",", "err", ")" ]
Runs a command Args: command(list of str): args of the command to execute, including the command itself as command[0] as `['ls', '-l']` input_data(str): If passed, will feed that data to the subprocess through stdin out_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdout stdin(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stdin err_pipe(int or file): File descriptor as passed to :ref:subprocess.Popen to use as stderr env(dict of str:str): If set, will use the given dict as env for the subprocess uuid(uuid): If set the command will be logged with the given uuid converted to string, otherwise, a uuid v4 will be generated. **kwargs: Any other keyword args passed will be passed to the :ref:subprocess.Popen call Returns: lago.utils.CommandStatus: result of the interactive execution
[ "Runs", "a", "command" ]
python
train
googleapis/oauth2client
oauth2client/file.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/file.py#L61-L72
def _create_file_if_needed(self): """Create an empty file if necessary. This method will not initialize the file. Instead it implements a simple version of "touch" to ensure the file has been created. """ if not os.path.exists(self._filename): old_umask = os.umask(0o177) try: open(self._filename, 'a+b').close() finally: os.umask(old_umask)
[ "def", "_create_file_if_needed", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_filename", ")", ":", "old_umask", "=", "os", ".", "umask", "(", "0o177", ")", "try", ":", "open", "(", "self", ".", "_filename", ",", "'a+b'", ")", ".", "close", "(", ")", "finally", ":", "os", ".", "umask", "(", "old_umask", ")" ]
Create an empty file if necessary. This method will not initialize the file. Instead it implements a simple version of "touch" to ensure the file has been created.
[ "Create", "an", "empty", "file", "if", "necessary", "." ]
python
valid
tonysimpson/nanomsg-python
_nanomsg_ctypes/__init__.py
https://github.com/tonysimpson/nanomsg-python/blob/3acd9160f90f91034d4a43ce603aaa19fbaf1f2e/_nanomsg_ctypes/__init__.py#L247-L264
def nn_recv(socket, *args): "receive a message" if len(args) == 1: flags, = args pointer = ctypes.c_void_p() rtn = _nn_recv(socket, ctypes.byref(pointer), ctypes.c_size_t(-1), flags) if rtn < 0: return rtn, None else: return rtn, _create_message(pointer.value, rtn) elif len(args) == 2: msg_buf, flags = args mv_buf = memoryview(msg_buf) if mv_buf.readonly: raise TypeError('Writable buffer is required') rtn = _nn_recv(socket, ctypes.addressof(msg_buf), len(mv_buf), flags) return rtn, msg_buf
[ "def", "nn_recv", "(", "socket", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "flags", ",", "=", "args", "pointer", "=", "ctypes", ".", "c_void_p", "(", ")", "rtn", "=", "_nn_recv", "(", "socket", ",", "ctypes", ".", "byref", "(", "pointer", ")", ",", "ctypes", ".", "c_size_t", "(", "-", "1", ")", ",", "flags", ")", "if", "rtn", "<", "0", ":", "return", "rtn", ",", "None", "else", ":", "return", "rtn", ",", "_create_message", "(", "pointer", ".", "value", ",", "rtn", ")", "elif", "len", "(", "args", ")", "==", "2", ":", "msg_buf", ",", "flags", "=", "args", "mv_buf", "=", "memoryview", "(", "msg_buf", ")", "if", "mv_buf", ".", "readonly", ":", "raise", "TypeError", "(", "'Writable buffer is required'", ")", "rtn", "=", "_nn_recv", "(", "socket", ",", "ctypes", ".", "addressof", "(", "msg_buf", ")", ",", "len", "(", "mv_buf", ")", ",", "flags", ")", "return", "rtn", ",", "msg_buf" ]
receive a message
[ "receive", "a", "message" ]
python
train
pantsbuild/pants
src/python/pants/cache/local_artifact_cache.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/cache/local_artifact_cache.py#L54-L87
def store_and_use_artifact(self, cache_key, src, results_dir=None): """Store and then extract the artifact from the given `src` iterator for the given cache_key. :param cache_key: Cache key for the artifact. :param src: Iterator over binary data to store for the artifact. :param str results_dir: The path to the expected destination of the artifact extraction: will be cleared both before extraction, and after a failure to extract. """ with self._tmpfile(cache_key, 'read') as tmp: for chunk in src: tmp.write(chunk) tmp.close() tarball = self._store_tarball(cache_key, tmp.name) artifact = self._artifact(tarball) # NOTE(mateo): The two clean=True args passed in this method are likely safe, since the cache will by # definition be dealing with unique results_dir, as opposed to the stable vt.results_dir (aka 'current'). # But if by chance it's passed the stable results_dir, safe_makedir(clean=True) will silently convert it # from a symlink to a real dir and cause mysterious 'Operation not permitted' errors until the workdir is cleaned. if results_dir is not None: safe_mkdir(results_dir, clean=True) try: artifact.extract() except Exception: # Do our best to clean up after a failed artifact extraction. If a results_dir has been # specified, it is "expected" to represent the output destination of the extracted # artifact, and so removing it should clear any partially extracted state. if results_dir is not None: safe_mkdir(results_dir, clean=True) safe_delete(tarball) raise return True
[ "def", "store_and_use_artifact", "(", "self", ",", "cache_key", ",", "src", ",", "results_dir", "=", "None", ")", ":", "with", "self", ".", "_tmpfile", "(", "cache_key", ",", "'read'", ")", "as", "tmp", ":", "for", "chunk", "in", "src", ":", "tmp", ".", "write", "(", "chunk", ")", "tmp", ".", "close", "(", ")", "tarball", "=", "self", ".", "_store_tarball", "(", "cache_key", ",", "tmp", ".", "name", ")", "artifact", "=", "self", ".", "_artifact", "(", "tarball", ")", "# NOTE(mateo): The two clean=True args passed in this method are likely safe, since the cache will by", "# definition be dealing with unique results_dir, as opposed to the stable vt.results_dir (aka 'current').", "# But if by chance it's passed the stable results_dir, safe_makedir(clean=True) will silently convert it", "# from a symlink to a real dir and cause mysterious 'Operation not permitted' errors until the workdir is cleaned.", "if", "results_dir", "is", "not", "None", ":", "safe_mkdir", "(", "results_dir", ",", "clean", "=", "True", ")", "try", ":", "artifact", ".", "extract", "(", ")", "except", "Exception", ":", "# Do our best to clean up after a failed artifact extraction. If a results_dir has been", "# specified, it is \"expected\" to represent the output destination of the extracted", "# artifact, and so removing it should clear any partially extracted state.", "if", "results_dir", "is", "not", "None", ":", "safe_mkdir", "(", "results_dir", ",", "clean", "=", "True", ")", "safe_delete", "(", "tarball", ")", "raise", "return", "True" ]
Store and then extract the artifact from the given `src` iterator for the given cache_key. :param cache_key: Cache key for the artifact. :param src: Iterator over binary data to store for the artifact. :param str results_dir: The path to the expected destination of the artifact extraction: will be cleared both before extraction, and after a failure to extract.
[ "Store", "and", "then", "extract", "the", "artifact", "from", "the", "given", "src", "iterator", "for", "the", "given", "cache_key", "." ]
python
train
camptocamp/anthem
anthem/lyrics/records.py
https://github.com/camptocamp/anthem/blob/6800730764d31a2edced12049f823fefb367e9ad/anthem/lyrics/records.py#L31-L42
def create_or_update(ctx, model, xmlid, values): """ Create or update a record matching xmlid with values """ if isinstance(model, basestring): model = ctx.env[model] record = ctx.env.ref(xmlid, raise_if_not_found=False) if record: record.update(values) else: record = model.create(values) add_xmlid(ctx, record, xmlid) return record
[ "def", "create_or_update", "(", "ctx", ",", "model", ",", "xmlid", ",", "values", ")", ":", "if", "isinstance", "(", "model", ",", "basestring", ")", ":", "model", "=", "ctx", ".", "env", "[", "model", "]", "record", "=", "ctx", ".", "env", ".", "ref", "(", "xmlid", ",", "raise_if_not_found", "=", "False", ")", "if", "record", ":", "record", ".", "update", "(", "values", ")", "else", ":", "record", "=", "model", ".", "create", "(", "values", ")", "add_xmlid", "(", "ctx", ",", "record", ",", "xmlid", ")", "return", "record" ]
Create or update a record matching xmlid with values
[ "Create", "or", "update", "a", "record", "matching", "xmlid", "with", "values" ]
python
train
lesscpy/lesscpy
lesscpy/lessc/parser.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/parser.py#L651-L669
def p_media_query_value(self, p): """ media_query_value : number | variable | word | color | expression """ if utility.is_variable(p[1]): var = self.scope.variables(''.join(p[1])) if var: value = var.value[0] if hasattr(value, 'parse'): p[1] = value.parse(self.scope) else: p[1] = value if isinstance(p[1], Expression): p[0] = p[1].parse(self.scope) else: p[0] = p[1]
[ "def", "p_media_query_value", "(", "self", ",", "p", ")", ":", "if", "utility", ".", "is_variable", "(", "p", "[", "1", "]", ")", ":", "var", "=", "self", ".", "scope", ".", "variables", "(", "''", ".", "join", "(", "p", "[", "1", "]", ")", ")", "if", "var", ":", "value", "=", "var", ".", "value", "[", "0", "]", "if", "hasattr", "(", "value", ",", "'parse'", ")", ":", "p", "[", "1", "]", "=", "value", ".", "parse", "(", "self", ".", "scope", ")", "else", ":", "p", "[", "1", "]", "=", "value", "if", "isinstance", "(", "p", "[", "1", "]", ",", "Expression", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", ".", "parse", "(", "self", ".", "scope", ")", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
media_query_value : number | variable | word | color | expression
[ "media_query_value", ":", "number", "|", "variable", "|", "word", "|", "color", "|", "expression" ]
python
valid
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L360-L388
def print_summary(self): """ Print per-class summary for each snapshot. """ # Emit class summaries for each snapshot classlist = self.tracked_classes fobj = self.stream fobj.write('---- SUMMARY '+'-'*66+'\n') for snapshot in self.snapshots: self.annotate_snapshot(snapshot) fobj.write('%-35s %11s %12s %12s %5s\n' % ( trunc(snapshot.desc, 35), 'active', pp(snapshot.asizeof_total), 'average', 'pct' )) for classname in classlist: info = snapshot.classes.get(classname) fobj.write(' %-33s %11d %12s %12s %4d%%\n' % ( trunc(classname, 33), info['active'], pp(info['sum']), pp(info['avg']), info['pct'] )) fobj.write('-'*79+'\n')
[ "def", "print_summary", "(", "self", ")", ":", "# Emit class summaries for each snapshot", "classlist", "=", "self", ".", "tracked_classes", "fobj", "=", "self", ".", "stream", "fobj", ".", "write", "(", "'---- SUMMARY '", "+", "'-'", "*", "66", "+", "'\\n'", ")", "for", "snapshot", "in", "self", ".", "snapshots", ":", "self", ".", "annotate_snapshot", "(", "snapshot", ")", "fobj", ".", "write", "(", "'%-35s %11s %12s %12s %5s\\n'", "%", "(", "trunc", "(", "snapshot", ".", "desc", ",", "35", ")", ",", "'active'", ",", "pp", "(", "snapshot", ".", "asizeof_total", ")", ",", "'average'", ",", "'pct'", ")", ")", "for", "classname", "in", "classlist", ":", "info", "=", "snapshot", ".", "classes", ".", "get", "(", "classname", ")", "fobj", ".", "write", "(", "' %-33s %11d %12s %12s %4d%%\\n'", "%", "(", "trunc", "(", "classname", ",", "33", ")", ",", "info", "[", "'active'", "]", ",", "pp", "(", "info", "[", "'sum'", "]", ")", ",", "pp", "(", "info", "[", "'avg'", "]", ")", ",", "info", "[", "'pct'", "]", ")", ")", "fobj", ".", "write", "(", "'-'", "*", "79", "+", "'\\n'", ")" ]
Print per-class summary for each snapshot.
[ "Print", "per", "-", "class", "summary", "for", "each", "snapshot", "." ]
python
train
dsoprea/NsqSpinner
nsq/consumer.py
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/consumer.py#L41-L153
def __send_rdy(self, connection, command): """Determine the RDY value, and set it. It can either be a static value a callback, or None. If it's None, we'll calculate the value based on our limits and connection counts. The documentation recommends starting with (1), but since we are always dealing directly with *nsqd* servers by now, we'll always have a valid count to work with. Since we derive this count off a set of servers that will always be up-to-date, we have everything we need, here, going forward. """ if self.__consumer.original_rdy is None: node_count = self.__consumer.get_node_count_for_topic( connection.context.topic) self.__logger_rdy.debug("Calculating RDY: max_in_flight=(%d) " "node_count=(%d)", self.__consumer.max_in_flight, node_count) if self.__consumer.max_in_flight >= node_count: # Calculate the RDY based on the max_in_flight and total number # of servers. We always round up, or else we'd run the risk of # not facilitating some servers. rdy_this = int(math.ceil( float(self.__consumer.max_in_flight) / float(node_count))) self.__logger_rdy.debug("Assigning RDY based on max_in_flight " "(%d) and node count (%d) (optimal): " "(%d)", self.__consumer.max_in_flight, node_count, rdy_this) else: # We have two possible scenarios: # (1) The client is starting up, and the total RDY count is # already accounted for. # (2) The client is already started, and another connection has # a (0) RDY count. # # In the case of (1), we'll take an RDY of (0). In the case of # (2) We'll send an RDY of (1) on their behalf, before we # assume a (0) for ourself. # Look for existing connections that have a (0) RDY (which # would've only been set to (0) intentionally). self.__logger_rdy.debug("(max_in_flight > nodes). Doing RDY " "election.") sleeping_connections = [ c \ for (c, info) \ in self.__consumer.connection_context.items() \ if info['rdy_count'] == 0] self.__logger_rdy.debug("Current sleeping_connections: %s", sleeping_connections) if sleeping_connections: elected_connection = random.choice(sleeping_connections) self.__logger_rdy.debug("Sending RDY of (1) on: [%s]", elected_connection) command_elected = nsq.command.Command(elected_connection) command_elected.rdy(1) else: self.__logger.debug("No sleeping connections. We got the " "short stick: [%s]", connection) rdy_this = 0 else: try: rdy_this = self.__consumer.original_rdy( connection.node, self.__consumer.connection_count, self.__consumer) self.__logger_rdy.debug("Using RDY from callback: (%d)", rdy_this) except TypeError: rdy_this = self.__consumer.original_rdy self.__logger_rdy.debug("Using static RDY: (%d)", rdy_this) # Make sure that the aggregate set of RDY counts doesn't exceed the # max. This constrains the previous value, above. rdy_this = min(rdy_this + \ self.__get_total_rdy_count(), self.__consumer.max_in_flight) # Make sure we don't exceed the maximum specified by the server. This # only works because we're running greenlets, not threads. At any given # time, only one greenlet is running, and we can make sure to # distribute the remainder of (max_in_flight / nodes) across a subset # of the nodes (they don't all have to have an even slice of # max_in_flight). server_features = self.__consumer.identify.server_features max_rdy_count = server_features['max_rdy_count'] rdy_this = min(max_rdy_count, rdy_this) self.__logger_rdy.debug("Final RDY (max_in_flight=(%d) " "max_rdy_count=(%d)): (%d)", self.__consumer.max_in_flight, max_rdy_count, rdy_this) if rdy_this > 0: command.rdy(rdy_this) else: self.__logger_rdy.info("This connection will go to sleep (not " "enough RDY to go around).") return rdy_this
[ "def", "__send_rdy", "(", "self", ",", "connection", ",", "command", ")", ":", "if", "self", ".", "__consumer", ".", "original_rdy", "is", "None", ":", "node_count", "=", "self", ".", "__consumer", ".", "get_node_count_for_topic", "(", "connection", ".", "context", ".", "topic", ")", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Calculating RDY: max_in_flight=(%d) \"", "\"node_count=(%d)\"", ",", "self", ".", "__consumer", ".", "max_in_flight", ",", "node_count", ")", "if", "self", ".", "__consumer", ".", "max_in_flight", ">=", "node_count", ":", "# Calculate the RDY based on the max_in_flight and total number ", "# of servers. We always round up, or else we'd run the risk of ", "# not facilitating some servers.", "rdy_this", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "self", ".", "__consumer", ".", "max_in_flight", ")", "/", "float", "(", "node_count", ")", ")", ")", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Assigning RDY based on max_in_flight \"", "\"(%d) and node count (%d) (optimal): \"", "\"(%d)\"", ",", "self", ".", "__consumer", ".", "max_in_flight", ",", "node_count", ",", "rdy_this", ")", "else", ":", "# We have two possible scenarios:", "# (1) The client is starting up, and the total RDY count is ", "# already accounted for.", "# (2) The client is already started, and another connection has", "# a (0) RDY count.", "#", "# In the case of (1), we'll take an RDY of (0). In the case of", "# (2) We'll send an RDY of (1) on their behalf, before we ", "# assume a (0) for ourself.", "# Look for existing connections that have a (0) RDY (which ", "# would've only been set to (0) intentionally).", "self", ".", "__logger_rdy", ".", "debug", "(", "\"(max_in_flight > nodes). Doing RDY \"", "\"election.\"", ")", "sleeping_connections", "=", "[", "c", "for", "(", "c", ",", "info", ")", "in", "self", ".", "__consumer", ".", "connection_context", ".", "items", "(", ")", "if", "info", "[", "'rdy_count'", "]", "==", "0", "]", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Current sleeping_connections: %s\"", ",", "sleeping_connections", ")", "if", "sleeping_connections", ":", "elected_connection", "=", "random", ".", "choice", "(", "sleeping_connections", ")", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Sending RDY of (1) on: [%s]\"", ",", "elected_connection", ")", "command_elected", "=", "nsq", ".", "command", ".", "Command", "(", "elected_connection", ")", "command_elected", ".", "rdy", "(", "1", ")", "else", ":", "self", ".", "__logger", ".", "debug", "(", "\"No sleeping connections. We got the \"", "\"short stick: [%s]\"", ",", "connection", ")", "rdy_this", "=", "0", "else", ":", "try", ":", "rdy_this", "=", "self", ".", "__consumer", ".", "original_rdy", "(", "connection", ".", "node", ",", "self", ".", "__consumer", ".", "connection_count", ",", "self", ".", "__consumer", ")", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Using RDY from callback: (%d)\"", ",", "rdy_this", ")", "except", "TypeError", ":", "rdy_this", "=", "self", ".", "__consumer", ".", "original_rdy", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Using static RDY: (%d)\"", ",", "rdy_this", ")", "# Make sure that the aggregate set of RDY counts doesn't exceed the ", "# max. This constrains the previous value, above.", "rdy_this", "=", "min", "(", "rdy_this", "+", "self", ".", "__get_total_rdy_count", "(", ")", ",", "self", ".", "__consumer", ".", "max_in_flight", ")", "# Make sure we don't exceed the maximum specified by the server. This ", "# only works because we're running greenlets, not threads. At any given ", "# time, only one greenlet is running, and we can make sure to ", "# distribute the remainder of (max_in_flight / nodes) across a subset ", "# of the nodes (they don't all have to have an even slice of ", "# max_in_flight).", "server_features", "=", "self", ".", "__consumer", ".", "identify", ".", "server_features", "max_rdy_count", "=", "server_features", "[", "'max_rdy_count'", "]", "rdy_this", "=", "min", "(", "max_rdy_count", ",", "rdy_this", ")", "self", ".", "__logger_rdy", ".", "debug", "(", "\"Final RDY (max_in_flight=(%d) \"", "\"max_rdy_count=(%d)): (%d)\"", ",", "self", ".", "__consumer", ".", "max_in_flight", ",", "max_rdy_count", ",", "rdy_this", ")", "if", "rdy_this", ">", "0", ":", "command", ".", "rdy", "(", "rdy_this", ")", "else", ":", "self", ".", "__logger_rdy", ".", "info", "(", "\"This connection will go to sleep (not \"", "\"enough RDY to go around).\"", ")", "return", "rdy_this" ]
Determine the RDY value, and set it. It can either be a static value a callback, or None. If it's None, we'll calculate the value based on our limits and connection counts. The documentation recommends starting with (1), but since we are always dealing directly with *nsqd* servers by now, we'll always have a valid count to work with. Since we derive this count off a set of servers that will always be up-to-date, we have everything we need, here, going forward.
[ "Determine", "the", "RDY", "value", "and", "set", "it", ".", "It", "can", "either", "be", "a", "static", "value", "a", "callback", "or", "None", ".", "If", "it", "s", "None", "we", "ll", "calculate", "the", "value", "based", "on", "our", "limits", "and", "connection", "counts", "." ]
python
train
phoebe-project/phoebe2
phoebe/dependencies/autofig/axes.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/dependencies/autofig/axes.py#L1249-L1267
def _process_dimension_kwargs(direction, kwargs): """ process kwargs for AxDimension instances by stripping off the prefix for the appropriate direction """ acceptable_keys = ['unit', 'pad', 'lim', 'label'] # if direction in ['s']: # acceptable_keys += ['mode'] processed_kwargs = {} for k,v in kwargs.items(): if k.startswith(direction): processed_key = k.lstrip(direction) else: processed_key = k if processed_key in acceptable_keys: processed_kwargs[processed_key] = v return processed_kwargs
[ "def", "_process_dimension_kwargs", "(", "direction", ",", "kwargs", ")", ":", "acceptable_keys", "=", "[", "'unit'", ",", "'pad'", ",", "'lim'", ",", "'label'", "]", "# if direction in ['s']:", "# acceptable_keys += ['mode']", "processed_kwargs", "=", "{", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "direction", ")", ":", "processed_key", "=", "k", ".", "lstrip", "(", "direction", ")", "else", ":", "processed_key", "=", "k", "if", "processed_key", "in", "acceptable_keys", ":", "processed_kwargs", "[", "processed_key", "]", "=", "v", "return", "processed_kwargs" ]
process kwargs for AxDimension instances by stripping off the prefix for the appropriate direction
[ "process", "kwargs", "for", "AxDimension", "instances", "by", "stripping", "off", "the", "prefix", "for", "the", "appropriate", "direction" ]
python
train
biocore/mustached-octo-ironman
moi/group.py
https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L165-L202
def callback(self, msg): """Accept a message that was published, process and forward Parameters ---------- msg : tuple, (str, str, str) The message sent over the line. The `tuple` is of the form: (message_type, channel, payload). Notes ----- This method only handles messages where `message_type` is "message". Raises ------ ValueError If the channel is not known. """ message_type, channel, payload = msg if message_type != 'message': return try: payload = self._decode(payload) except ValueError: # unable to decode so we cannot handle the message return if channel == self.group_pubsub: action_f = self.action elif channel in self._listening_to: action_f = self.job_action else: raise ValueError("Callback triggered unexpectedly by %s" % channel) for verb, args in payload.items(): action_f(verb, args)
[ "def", "callback", "(", "self", ",", "msg", ")", ":", "message_type", ",", "channel", ",", "payload", "=", "msg", "if", "message_type", "!=", "'message'", ":", "return", "try", ":", "payload", "=", "self", ".", "_decode", "(", "payload", ")", "except", "ValueError", ":", "# unable to decode so we cannot handle the message", "return", "if", "channel", "==", "self", ".", "group_pubsub", ":", "action_f", "=", "self", ".", "action", "elif", "channel", "in", "self", ".", "_listening_to", ":", "action_f", "=", "self", ".", "job_action", "else", ":", "raise", "ValueError", "(", "\"Callback triggered unexpectedly by %s\"", "%", "channel", ")", "for", "verb", ",", "args", "in", "payload", ".", "items", "(", ")", ":", "action_f", "(", "verb", ",", "args", ")" ]
Accept a message that was published, process and forward Parameters ---------- msg : tuple, (str, str, str) The message sent over the line. The `tuple` is of the form: (message_type, channel, payload). Notes ----- This method only handles messages where `message_type` is "message". Raises ------ ValueError If the channel is not known.
[ "Accept", "a", "message", "that", "was", "published", "process", "and", "forward" ]
python
train
ellmetha/django-machina
machina/apps/forum_conversation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L459-L476
def get_poll_option_formset_kwargs(self): """ Returns the keyword arguments for instantiating the poll option formset. """ kwargs = { 'prefix': 'poll', } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, 'files': self.request.FILES, }) else: topic = self.get_topic() poll_option_queryset = TopicPollOption.objects.filter(poll__topic=topic) kwargs.update({ 'queryset': poll_option_queryset, }) return kwargs
[ "def", "get_poll_option_formset_kwargs", "(", "self", ")", ":", "kwargs", "=", "{", "'prefix'", ":", "'poll'", ",", "}", "if", "self", ".", "request", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ")", ":", "kwargs", ".", "update", "(", "{", "'data'", ":", "self", ".", "request", ".", "POST", ",", "'files'", ":", "self", ".", "request", ".", "FILES", ",", "}", ")", "else", ":", "topic", "=", "self", ".", "get_topic", "(", ")", "poll_option_queryset", "=", "TopicPollOption", ".", "objects", ".", "filter", "(", "poll__topic", "=", "topic", ")", "kwargs", ".", "update", "(", "{", "'queryset'", ":", "poll_option_queryset", ",", "}", ")", "return", "kwargs" ]
Returns the keyword arguments for instantiating the poll option formset.
[ "Returns", "the", "keyword", "arguments", "for", "instantiating", "the", "poll", "option", "formset", "." ]
python
train
coldfix/udiskie
udiskie/udisks2.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L462-L469
def luks_cleartext_holder(self): """Get wrapper to the unlocked luks cleartext device.""" if not self.is_luks: return None for device in self._daemon: if device.luks_cleartext_slave == self: return device return None
[ "def", "luks_cleartext_holder", "(", "self", ")", ":", "if", "not", "self", ".", "is_luks", ":", "return", "None", "for", "device", "in", "self", ".", "_daemon", ":", "if", "device", ".", "luks_cleartext_slave", "==", "self", ":", "return", "device", "return", "None" ]
Get wrapper to the unlocked luks cleartext device.
[ "Get", "wrapper", "to", "the", "unlocked", "luks", "cleartext", "device", "." ]
python
train
Robin8Put/pmes
balance/handler.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/balance/handler.py#L840-L931
async def confirmbalance(self, *args, **kwargs): """ Confirm balance after trading Accepts: - message (signed dictionary): - "txid" - str - "coinid" - str - "amount" - int Returns: - "address" - str - "coinid" - str - "amount" - int - "uid" - int - "unconfirmed" - int (0 by default) - "deposit" - int (0 by default) Verified: True """ # Get data from request if kwargs.get("message"): kwargs = json.loads(kwargs.get("message", "{}")) txid = kwargs.get("txid") coinid = kwargs.get("coinid") buyer_address = kwargs.get("buyer_address") cid = kwargs.get("cid") address = kwargs.get("buyer_address") try: coinid = coinid.replace("TEST", "") except: pass # Check if required fields exists if not all([coinid, cid, buyer_address, txid]): return {"error":400, "reason": "Confirm balance. Missed required fields"} if not coinid in settings.bridges.keys(): return await self.error_400("Confirm balance. Invalid coinid: %s" % coinid) # Get offers price self.account.blockchain.setendpoint(settings.bridges[coinid]) offer = await self.account.blockchain.getoffer(cid=cid, buyer_address=buyer_address) # Get offers price for updating balance amount = int(offer["price"]) coinid = "PUT" # Get sellers account history_database = self.client[settings.HISTORY] history_collection = history_database[coinid] history = await history_collection.find_one({"txid":txid}) try: account = await self.account.getaccountdata(public_key=history["public_key"]) except: return await self.error_404("Confirm balance. Not found current deal.") # Connect to balance database database = self.client[self.collection] balance_collection = database[coinid] # Try to update balance if exists balance = await balance_collection.find_one({"uid":account["id"]}) # Decrement unconfirmed submitted = int(balance["amount_frozen"]) - int(amount) if submitted < 0: return await self.error_400("Not enough frozen amount.") decremented = await balance_collection.find_one_and_update( {"uid":account["id"]}, {"$set":{"amount_frozen": str(submitted)}}) difference = int(balance["amount_active"]) + int(amount) updated = await balance_collection.find_one_and_update( {"uid":account["id"]}, {"$set":{"amount_active":str(difference)}}) if not updated: return {"error":404, "reason":"Confirm balance. Not found current transaction id"} # Delete transaction id field await history_collection.find_one_and_update({"txid":txid}, {"$unset":{"txid":1}}) if int(account["level"]) == 2: await self.account.updatelevel(**{"id":account["id"], "level":3}) return {i:updated[i] for i in updated if i != "_id" and i != "txid"}
[ "async", "def", "confirmbalance", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Get data from request", "if", "kwargs", ".", "get", "(", "\"message\"", ")", ":", "kwargs", "=", "json", ".", "loads", "(", "kwargs", ".", "get", "(", "\"message\"", ",", "\"{}\"", ")", ")", "txid", "=", "kwargs", ".", "get", "(", "\"txid\"", ")", "coinid", "=", "kwargs", ".", "get", "(", "\"coinid\"", ")", "buyer_address", "=", "kwargs", ".", "get", "(", "\"buyer_address\"", ")", "cid", "=", "kwargs", ".", "get", "(", "\"cid\"", ")", "address", "=", "kwargs", ".", "get", "(", "\"buyer_address\"", ")", "try", ":", "coinid", "=", "coinid", ".", "replace", "(", "\"TEST\"", ",", "\"\"", ")", "except", ":", "pass", "# Check if required fields exists", "if", "not", "all", "(", "[", "coinid", ",", "cid", ",", "buyer_address", ",", "txid", "]", ")", ":", "return", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Confirm balance. Missed required fields\"", "}", "if", "not", "coinid", "in", "settings", ".", "bridges", ".", "keys", "(", ")", ":", "return", "await", "self", ".", "error_400", "(", "\"Confirm balance. Invalid coinid: %s\"", "%", "coinid", ")", "# Get offers price\t", "self", ".", "account", ".", "blockchain", ".", "setendpoint", "(", "settings", ".", "bridges", "[", "coinid", "]", ")", "offer", "=", "await", "self", ".", "account", ".", "blockchain", ".", "getoffer", "(", "cid", "=", "cid", ",", "buyer_address", "=", "buyer_address", ")", "# Get offers price for updating balance", "amount", "=", "int", "(", "offer", "[", "\"price\"", "]", ")", "coinid", "=", "\"PUT\"", "# Get sellers account", "history_database", "=", "self", ".", "client", "[", "settings", ".", "HISTORY", "]", "history_collection", "=", "history_database", "[", "coinid", "]", "history", "=", "await", "history_collection", ".", "find_one", "(", "{", "\"txid\"", ":", "txid", "}", ")", "try", ":", "account", "=", "await", "self", ".", "account", ".", "getaccountdata", "(", "public_key", "=", "history", "[", "\"public_key\"", "]", ")", "except", ":", "return", "await", "self", ".", "error_404", "(", "\"Confirm balance. Not found current deal.\"", ")", "# Connect to balance database", "database", "=", "self", ".", "client", "[", "self", ".", "collection", "]", "balance_collection", "=", "database", "[", "coinid", "]", "# Try to update balance if exists", "balance", "=", "await", "balance_collection", ".", "find_one", "(", "{", "\"uid\"", ":", "account", "[", "\"id\"", "]", "}", ")", "# Decrement unconfirmed", "submitted", "=", "int", "(", "balance", "[", "\"amount_frozen\"", "]", ")", "-", "int", "(", "amount", ")", "if", "submitted", "<", "0", ":", "return", "await", "self", ".", "error_400", "(", "\"Not enough frozen amount.\"", ")", "decremented", "=", "await", "balance_collection", ".", "find_one_and_update", "(", "{", "\"uid\"", ":", "account", "[", "\"id\"", "]", "}", ",", "{", "\"$set\"", ":", "{", "\"amount_frozen\"", ":", "str", "(", "submitted", ")", "}", "}", ")", "difference", "=", "int", "(", "balance", "[", "\"amount_active\"", "]", ")", "+", "int", "(", "amount", ")", "updated", "=", "await", "balance_collection", ".", "find_one_and_update", "(", "{", "\"uid\"", ":", "account", "[", "\"id\"", "]", "}", ",", "{", "\"$set\"", ":", "{", "\"amount_active\"", ":", "str", "(", "difference", ")", "}", "}", ")", "if", "not", "updated", ":", "return", "{", "\"error\"", ":", "404", ",", "\"reason\"", ":", "\"Confirm balance. Not found current transaction id\"", "}", "# Delete transaction id field", "await", "history_collection", ".", "find_one_and_update", "(", "{", "\"txid\"", ":", "txid", "}", ",", "{", "\"$unset\"", ":", "{", "\"txid\"", ":", "1", "}", "}", ")", "if", "int", "(", "account", "[", "\"level\"", "]", ")", "==", "2", ":", "await", "self", ".", "account", ".", "updatelevel", "(", "*", "*", "{", "\"id\"", ":", "account", "[", "\"id\"", "]", ",", "\"level\"", ":", "3", "}", ")", "return", "{", "i", ":", "updated", "[", "i", "]", "for", "i", "in", "updated", "if", "i", "!=", "\"_id\"", "and", "i", "!=", "\"txid\"", "}" ]
Confirm balance after trading Accepts: - message (signed dictionary): - "txid" - str - "coinid" - str - "amount" - int Returns: - "address" - str - "coinid" - str - "amount" - int - "uid" - int - "unconfirmed" - int (0 by default) - "deposit" - int (0 by default) Verified: True
[ "Confirm", "balance", "after", "trading" ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/base_operator.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/base_operator.py#L75-L85
def _atol(self, atol): """Set the absolute tolerence parameter for float comparisons.""" # NOTE: that this overrides the class value so applies to all # instances of the class. max_tol = self.__class__.MAX_TOL if atol < 0: raise QiskitError("Invalid atol: must be non-negative.") if atol > max_tol: raise QiskitError( "Invalid atol: must be less than {}.".format(max_tol)) self.__class__.ATOL = atol
[ "def", "_atol", "(", "self", ",", "atol", ")", ":", "# NOTE: that this overrides the class value so applies to all", "# instances of the class.", "max_tol", "=", "self", ".", "__class__", ".", "MAX_TOL", "if", "atol", "<", "0", ":", "raise", "QiskitError", "(", "\"Invalid atol: must be non-negative.\"", ")", "if", "atol", ">", "max_tol", ":", "raise", "QiskitError", "(", "\"Invalid atol: must be less than {}.\"", ".", "format", "(", "max_tol", ")", ")", "self", ".", "__class__", ".", "ATOL", "=", "atol" ]
Set the absolute tolerence parameter for float comparisons.
[ "Set", "the", "absolute", "tolerence", "parameter", "for", "float", "comparisons", "." ]
python
test
kisom/pypcapfile
pcapfile/protocols/linklayer/ethernet.py
https://github.com/kisom/pypcapfile/blob/67520cfbb6c2e9ab3e7c181a8012ddc56ec5cad8/pcapfile/protocols/linklayer/ethernet.py#L57-L65
def strip_ethernet(packet): """ Strip the Ethernet frame from a packet. """ if not isinstance(packet, Ethernet): packet = Ethernet(packet) payload = packet.payload return payload
[ "def", "strip_ethernet", "(", "packet", ")", ":", "if", "not", "isinstance", "(", "packet", ",", "Ethernet", ")", ":", "packet", "=", "Ethernet", "(", "packet", ")", "payload", "=", "packet", ".", "payload", "return", "payload" ]
Strip the Ethernet frame from a packet.
[ "Strip", "the", "Ethernet", "frame", "from", "a", "packet", "." ]
python
valid
ladybug-tools/ladybug
ladybug/datacollection.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L977-L993
def filter_by_doys(self, doys): """Filter the Data Collection based on a list of days of the year (as integers). Args: doys: A List of days of the year [1..365] Return: A new Data Collection with filtered data """ _filt_values = [] _filt_datetimes = [] for i, d in enumerate(self.datetimes): if d in doys: _filt_datetimes.append(d) _filt_values.append(self._values[i]) _filt_header = self.header.duplicate() return DailyCollection(_filt_header, _filt_values, _filt_datetimes)
[ "def", "filter_by_doys", "(", "self", ",", "doys", ")", ":", "_filt_values", "=", "[", "]", "_filt_datetimes", "=", "[", "]", "for", "i", ",", "d", "in", "enumerate", "(", "self", ".", "datetimes", ")", ":", "if", "d", "in", "doys", ":", "_filt_datetimes", ".", "append", "(", "d", ")", "_filt_values", ".", "append", "(", "self", ".", "_values", "[", "i", "]", ")", "_filt_header", "=", "self", ".", "header", ".", "duplicate", "(", ")", "return", "DailyCollection", "(", "_filt_header", ",", "_filt_values", ",", "_filt_datetimes", ")" ]
Filter the Data Collection based on a list of days of the year (as integers). Args: doys: A List of days of the year [1..365] Return: A new Data Collection with filtered data
[ "Filter", "the", "Data", "Collection", "based", "on", "a", "list", "of", "days", "of", "the", "year", "(", "as", "integers", ")", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_speech.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_speech.py#L109-L122
def cmd_speech(self, args): '''speech commands''' usage = "usage: speech <say>" if len(args) < 1: print(usage) return if args[0] == "say": if len(args) < 2: print("usage: speech say <text to say>") return self.say(" ".join(args[1::])) if args[0] == "list_voices": self.list_voices()
[ "def", "cmd_speech", "(", "self", ",", "args", ")", ":", "usage", "=", "\"usage: speech <say>\"", "if", "len", "(", "args", ")", "<", "1", ":", "print", "(", "usage", ")", "return", "if", "args", "[", "0", "]", "==", "\"say\"", ":", "if", "len", "(", "args", ")", "<", "2", ":", "print", "(", "\"usage: speech say <text to say>\"", ")", "return", "self", ".", "say", "(", "\" \"", ".", "join", "(", "args", "[", "1", ":", ":", "]", ")", ")", "if", "args", "[", "0", "]", "==", "\"list_voices\"", ":", "self", ".", "list_voices", "(", ")" ]
speech commands
[ "speech", "commands" ]
python
train
CTPUG/wafer
wafer/schedule/admin.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/schedule/admin.py#L72-L86
def validate_items(all_items): """Find errors in the schedule. Check for: - pending / rejected talks in the schedule - items with both talks and pages assigned - items with neither talks nor pages assigned """ validation = [] for item in all_items: if item.talk is not None and item.page is not None: validation.append(item) elif item.talk is None and item.page is None: validation.append(item) elif item.talk and item.talk.status not in [ACCEPTED, CANCELLED]: validation.append(item) return validation
[ "def", "validate_items", "(", "all_items", ")", ":", "validation", "=", "[", "]", "for", "item", "in", "all_items", ":", "if", "item", ".", "talk", "is", "not", "None", "and", "item", ".", "page", "is", "not", "None", ":", "validation", ".", "append", "(", "item", ")", "elif", "item", ".", "talk", "is", "None", "and", "item", ".", "page", "is", "None", ":", "validation", ".", "append", "(", "item", ")", "elif", "item", ".", "talk", "and", "item", ".", "talk", ".", "status", "not", "in", "[", "ACCEPTED", ",", "CANCELLED", "]", ":", "validation", ".", "append", "(", "item", ")", "return", "validation" ]
Find errors in the schedule. Check for: - pending / rejected talks in the schedule - items with both talks and pages assigned - items with neither talks nor pages assigned
[ "Find", "errors", "in", "the", "schedule", ".", "Check", "for", ":", "-", "pending", "/", "rejected", "talks", "in", "the", "schedule", "-", "items", "with", "both", "talks", "and", "pages", "assigned", "-", "items", "with", "neither", "talks", "nor", "pages", "assigned" ]
python
train
Erotemic/utool
utool/util_autogen.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L897-L913
def remove_codeblock_syntax_sentinals(code_text): r""" Removes template comments and vim sentinals Args: code_text (str): Returns: str: code_text_ """ flags = re.MULTILINE | re.DOTALL code_text_ = code_text code_text_ = re.sub(r'^ *# *REM [^\n]*$\n?', '', code_text_, flags=flags) code_text_ = re.sub(r'^ *# STARTBLOCK *$\n', '', code_text_, flags=flags) code_text_ = re.sub(r'^ *# ENDBLOCK *$\n?', '', code_text_, flags=flags) code_text_ = code_text_.rstrip() return code_text_
[ "def", "remove_codeblock_syntax_sentinals", "(", "code_text", ")", ":", "flags", "=", "re", ".", "MULTILINE", "|", "re", ".", "DOTALL", "code_text_", "=", "code_text", "code_text_", "=", "re", ".", "sub", "(", "r'^ *# *REM [^\\n]*$\\n?'", ",", "''", ",", "code_text_", ",", "flags", "=", "flags", ")", "code_text_", "=", "re", ".", "sub", "(", "r'^ *# STARTBLOCK *$\\n'", ",", "''", ",", "code_text_", ",", "flags", "=", "flags", ")", "code_text_", "=", "re", ".", "sub", "(", "r'^ *# ENDBLOCK *$\\n?'", ",", "''", ",", "code_text_", ",", "flags", "=", "flags", ")", "code_text_", "=", "code_text_", ".", "rstrip", "(", ")", "return", "code_text_" ]
r""" Removes template comments and vim sentinals Args: code_text (str): Returns: str: code_text_
[ "r", "Removes", "template", "comments", "and", "vim", "sentinals" ]
python
train
spyder-ide/spyder
spyder/plugins/help/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L608-L613
def _update_lock_icon(self): """Update locked state icon""" icon = ima.icon('lock') if self.locked else ima.icon('lock_open') self.locked_button.setIcon(icon) tip = _("Unlock") if self.locked else _("Lock") self.locked_button.setToolTip(tip)
[ "def", "_update_lock_icon", "(", "self", ")", ":", "icon", "=", "ima", ".", "icon", "(", "'lock'", ")", "if", "self", ".", "locked", "else", "ima", ".", "icon", "(", "'lock_open'", ")", "self", ".", "locked_button", ".", "setIcon", "(", "icon", ")", "tip", "=", "_", "(", "\"Unlock\"", ")", "if", "self", ".", "locked", "else", "_", "(", "\"Lock\"", ")", "self", ".", "locked_button", ".", "setToolTip", "(", "tip", ")" ]
Update locked state icon
[ "Update", "locked", "state", "icon" ]
python
train
amelchio/eternalegypt
examples/sms.py
https://github.com/amelchio/eternalegypt/blob/895e0b235ceaf7f61458c620237c3ad397780e98/examples/sms.py#L15-L26
async def send_message(): """Example of sending a message.""" jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) await modem.sms(phone=sys.argv[3], message=sys.argv[4]) await modem.logout() await websession.close()
[ "async", "def", "send_message", "(", ")", ":", "jar", "=", "aiohttp", ".", "CookieJar", "(", "unsafe", "=", "True", ")", "websession", "=", "aiohttp", ".", "ClientSession", "(", "cookie_jar", "=", "jar", ")", "modem", "=", "eternalegypt", ".", "Modem", "(", "hostname", "=", "sys", ".", "argv", "[", "1", "]", ",", "websession", "=", "websession", ")", "await", "modem", ".", "login", "(", "password", "=", "sys", ".", "argv", "[", "2", "]", ")", "await", "modem", ".", "sms", "(", "phone", "=", "sys", ".", "argv", "[", "3", "]", ",", "message", "=", "sys", ".", "argv", "[", "4", "]", ")", "await", "modem", ".", "logout", "(", ")", "await", "websession", ".", "close", "(", ")" ]
Example of sending a message.
[ "Example", "of", "sending", "a", "message", "." ]
python
test
scanny/python-pptx
pptx/shapes/placeholder.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/placeholder.py#L279-L296
def insert_chart(self, chart_type, chart_data): """ Return a |PlaceholderGraphicFrame| object containing a new chart of *chart_type* depicting *chart_data* and having the same position and size as this placeholder. *chart_type* is one of the :ref:`XlChartType` enumeration values. *chart_data* is a |ChartData| object populated with the categories and series values for the chart. Note that the new |Chart| object is not returned directly. The chart object may be accessed using the :attr:`~.PlaceholderGraphicFrame.chart` property of the returned |PlaceholderGraphicFrame| object. """ rId = self.part.add_chart_part(chart_type, chart_data) graphicFrame = self._new_chart_graphicFrame( rId, self.left, self.top, self.width, self.height ) self._replace_placeholder_with(graphicFrame) return PlaceholderGraphicFrame(graphicFrame, self._parent)
[ "def", "insert_chart", "(", "self", ",", "chart_type", ",", "chart_data", ")", ":", "rId", "=", "self", ".", "part", ".", "add_chart_part", "(", "chart_type", ",", "chart_data", ")", "graphicFrame", "=", "self", ".", "_new_chart_graphicFrame", "(", "rId", ",", "self", ".", "left", ",", "self", ".", "top", ",", "self", ".", "width", ",", "self", ".", "height", ")", "self", ".", "_replace_placeholder_with", "(", "graphicFrame", ")", "return", "PlaceholderGraphicFrame", "(", "graphicFrame", ",", "self", ".", "_parent", ")" ]
Return a |PlaceholderGraphicFrame| object containing a new chart of *chart_type* depicting *chart_data* and having the same position and size as this placeholder. *chart_type* is one of the :ref:`XlChartType` enumeration values. *chart_data* is a |ChartData| object populated with the categories and series values for the chart. Note that the new |Chart| object is not returned directly. The chart object may be accessed using the :attr:`~.PlaceholderGraphicFrame.chart` property of the returned |PlaceholderGraphicFrame| object.
[ "Return", "a", "|PlaceholderGraphicFrame|", "object", "containing", "a", "new", "chart", "of", "*", "chart_type", "*", "depicting", "*", "chart_data", "*", "and", "having", "the", "same", "position", "and", "size", "as", "this", "placeholder", ".", "*", "chart_type", "*", "is", "one", "of", "the", ":", "ref", ":", "XlChartType", "enumeration", "values", ".", "*", "chart_data", "*", "is", "a", "|ChartData|", "object", "populated", "with", "the", "categories", "and", "series", "values", "for", "the", "chart", ".", "Note", "that", "the", "new", "|Chart|", "object", "is", "not", "returned", "directly", ".", "The", "chart", "object", "may", "be", "accessed", "using", "the", ":", "attr", ":", "~", ".", "PlaceholderGraphicFrame", ".", "chart", "property", "of", "the", "returned", "|PlaceholderGraphicFrame|", "object", "." ]
python
train
Amsterdam/authorization_django
authorization_django/middleware.py
https://github.com/Amsterdam/authorization_django/blob/71da52b38a7f5a16a2bde8f8ea97b3c11ccb1be1/authorization_django/middleware.py#L50-L254
def authorization_middleware(get_response): """ Django middleware to parse incoming access tokens, validate them and set an authorization function on the request. The decision to use a generic middleware rather than an AuthenticationMiddleware is explicitly made, because inctances of the latter come with a number of assumptions (i.e. that user.is_authorized() exists, or that request.user uses the User model). Example usage: :: request.is_authorized_for() :param get_response: callable that creates the response object :return: response :todo: Two things needs to be done when we can completely remove the Django JWT plugin: - Nested function 'middleware' allows both 'JWT' (not IANA-registered) and 'Bearer' as Authorization header prefix; JWT should not be accepted. - The Django JWT middleware does not include the authz claim, so this plugin does not fail if it is not present; this behavior is wrong when we no longer use the Django JWT plugin. """ middleware_settings = settings() logger = _create_logger(middleware_settings) min_scope = middleware_settings['MIN_SCOPE'] def get_token_subject(sub): return sub def always_ok(*args, **kwargs): return True def authorize_function(scopes, token_signature, x_unique_id=None): """ Creates a partial around :func:`levels.is_authorized` that wraps the current user's scopes. :return func: """ log_msg_scopes = 'Granted access (needed: {}, granted: {}, token: {})' def is_authorized(*needed_scopes): granted_scopes = set(scopes) needed_scopes = set(needed_scopes) result = needed_scopes.issubset(granted_scopes) if result: msg = log_msg_scopes.format(needed_scopes, granted_scopes, token_signature) if x_unique_id: msg += ' X-Unique-ID: {}'.format(x_unique_id) logger.info(msg) return result return is_authorized def authorize_forced_anonymous(_): """ Authorize function for routes that are forced anonymous""" raise Exception( 'Should not call is_authorized_for in anonymous routes') def insufficient_scope(): """Returns an HttpResponse object with a 401.""" msg = 'Bearer realm="datapunt", error="insufficient_scope"' response = http.HttpResponse('Unauthorized', status=401) response['WWW-Authenticate'] = msg return response def expired_token(): """ Returns an HttpResponse object with a 401 """ msg = 'Bearer realm="datapunt", error="expired_token"' response = http.HttpResponse('Unauthorized', status=401) response['WWW-Authenticate'] = msg return response def invalid_token(): """ Returns an HttpResponse object with a 401 """ msg = 'Bearer realm="datapunt", error="invalid_token"' response = http.HttpResponse('Unauthorized', status=401) response['WWW-Authenticate'] = msg return response def invalid_request(): """ Returns an HttpResponse object with a 400 """ msg = ( "Bearer realm=\"datapunt\", error=\"invalid_request\", " "error_description=\"Invalid Authorization header format; " "should be: 'Bearer [token]'\"") response = http.HttpResponse('Bad Request', status=400) response['WWW-Authenticate'] = msg return response def token_data(authorization): """ Get the token data present in the given authorization header. """ try: prefix, token = authorization.split() except ValueError: logger.warning( 'Invalid Authorization header: {}'.format(authorization)) raise _AuthorizationHeaderError(invalid_request()) if prefix.lower() != 'bearer': logger.warning( 'Invalid Authorization header: {}'.format(authorization)) raise _AuthorizationHeaderError(invalid_request()) try: header = jwt.get_unverified_header(token) except jwt.ExpiredSignatureError: logger.info("Expired token") raise _AuthorizationHeaderError(expired_token()) except (jwt.InvalidTokenError, jwt.DecodeError): logger.exception("API authz problem: JWT decode error while reading header") raise _AuthorizationHeaderError(invalid_token()) if 'kid' not in header: logger.exception("Did not get a valid key identifier") raise _AuthorizationHeaderError(invalid_token()) keys = middleware_settings['JWKS'].verifiers if header['kid'] not in keys: logger.exception("Unknown key identifier: {}".format(header['kid'])) raise _AuthorizationHeaderError(invalid_token()) key = keys[header['kid']] try: decoded = jwt.decode(token, key=key.key, algorithms=(key.alg,)) except jwt.InvalidTokenError: logger.exception('API authz problem: could not decode access ' 'token {}'.format(token)) raise _AuthorizationHeaderError(invalid_token()) if 'scopes' not in decoded: logger.warning('API authz problem: access token misses ' 'authz and scopes claim: {}'.format(token)) raise _AuthorizationHeaderError(invalid_token()) else: scopes = decoded['scopes'] if 'sub' in decoded: sub = decoded['sub'] else: sub = None token_signature = token.split('.')[2] return scopes, token_signature, sub def middleware(request): """ Parses the Authorization header, decodes and validates the JWT and adds the is_authorized_for function to the request. """ request_path = request.path forced_anonymous = any( request_path.startswith(route) for route in middleware_settings['FORCED_ANONYMOUS_ROUTES']) if middleware_settings['ALWAYS_OK']: logger.warning('API authz DISABLED') request.is_authorized_for = always_ok request.get_token_subject = 'ALWAYS_OK' return get_response(request) is_options = request.method == 'OPTIONS' if forced_anonymous or is_options: authz_func = authorize_forced_anonymous subject = None else: authorization = request.META.get('HTTP_AUTHORIZATION') token_signature = '' sub = None if authorization: try: scopes, token_signature, sub = token_data(authorization) except _AuthorizationHeaderError as e: return e.response else: scopes = [] x_unique_id = request.META.get('HTTP_X_UNIQUE_ID') authz_func = authorize_function(scopes, token_signature, x_unique_id) subject = get_token_subject(sub) if len(min_scope) > 0 and not authz_func(min_scope): return insufficient_scope() request.is_authorized_for = authz_func request.get_token_subject = subject response = get_response(request) return response return middleware
[ "def", "authorization_middleware", "(", "get_response", ")", ":", "middleware_settings", "=", "settings", "(", ")", "logger", "=", "_create_logger", "(", "middleware_settings", ")", "min_scope", "=", "middleware_settings", "[", "'MIN_SCOPE'", "]", "def", "get_token_subject", "(", "sub", ")", ":", "return", "sub", "def", "always_ok", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "True", "def", "authorize_function", "(", "scopes", ",", "token_signature", ",", "x_unique_id", "=", "None", ")", ":", "\"\"\" Creates a partial around :func:`levels.is_authorized`\n that wraps the current user's scopes.\n\n :return func:\n \"\"\"", "log_msg_scopes", "=", "'Granted access (needed: {}, granted: {}, token: {})'", "def", "is_authorized", "(", "*", "needed_scopes", ")", ":", "granted_scopes", "=", "set", "(", "scopes", ")", "needed_scopes", "=", "set", "(", "needed_scopes", ")", "result", "=", "needed_scopes", ".", "issubset", "(", "granted_scopes", ")", "if", "result", ":", "msg", "=", "log_msg_scopes", ".", "format", "(", "needed_scopes", ",", "granted_scopes", ",", "token_signature", ")", "if", "x_unique_id", ":", "msg", "+=", "' X-Unique-ID: {}'", ".", "format", "(", "x_unique_id", ")", "logger", ".", "info", "(", "msg", ")", "return", "result", "return", "is_authorized", "def", "authorize_forced_anonymous", "(", "_", ")", ":", "\"\"\" Authorize function for routes that are forced anonymous\"\"\"", "raise", "Exception", "(", "'Should not call is_authorized_for in anonymous routes'", ")", "def", "insufficient_scope", "(", ")", ":", "\"\"\"Returns an HttpResponse object with a 401.\"\"\"", "msg", "=", "'Bearer realm=\"datapunt\", error=\"insufficient_scope\"'", "response", "=", "http", ".", "HttpResponse", "(", "'Unauthorized'", ",", "status", "=", "401", ")", "response", "[", "'WWW-Authenticate'", "]", "=", "msg", "return", "response", "def", "expired_token", "(", ")", ":", "\"\"\" Returns an HttpResponse object with a 401\n \"\"\"", "msg", "=", "'Bearer realm=\"datapunt\", error=\"expired_token\"'", "response", "=", "http", ".", "HttpResponse", "(", "'Unauthorized'", ",", "status", "=", "401", ")", "response", "[", "'WWW-Authenticate'", "]", "=", "msg", "return", "response", "def", "invalid_token", "(", ")", ":", "\"\"\" Returns an HttpResponse object with a 401\n \"\"\"", "msg", "=", "'Bearer realm=\"datapunt\", error=\"invalid_token\"'", "response", "=", "http", ".", "HttpResponse", "(", "'Unauthorized'", ",", "status", "=", "401", ")", "response", "[", "'WWW-Authenticate'", "]", "=", "msg", "return", "response", "def", "invalid_request", "(", ")", ":", "\"\"\" Returns an HttpResponse object with a 400\n \"\"\"", "msg", "=", "(", "\"Bearer realm=\\\"datapunt\\\", error=\\\"invalid_request\\\", \"", "\"error_description=\\\"Invalid Authorization header format; \"", "\"should be: 'Bearer [token]'\\\"\"", ")", "response", "=", "http", ".", "HttpResponse", "(", "'Bad Request'", ",", "status", "=", "400", ")", "response", "[", "'WWW-Authenticate'", "]", "=", "msg", "return", "response", "def", "token_data", "(", "authorization", ")", ":", "\"\"\" Get the token data present in the given authorization header.\n \"\"\"", "try", ":", "prefix", ",", "token", "=", "authorization", ".", "split", "(", ")", "except", "ValueError", ":", "logger", ".", "warning", "(", "'Invalid Authorization header: {}'", ".", "format", "(", "authorization", ")", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_request", "(", ")", ")", "if", "prefix", ".", "lower", "(", ")", "!=", "'bearer'", ":", "logger", ".", "warning", "(", "'Invalid Authorization header: {}'", ".", "format", "(", "authorization", ")", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_request", "(", ")", ")", "try", ":", "header", "=", "jwt", ".", "get_unverified_header", "(", "token", ")", "except", "jwt", ".", "ExpiredSignatureError", ":", "logger", ".", "info", "(", "\"Expired token\"", ")", "raise", "_AuthorizationHeaderError", "(", "expired_token", "(", ")", ")", "except", "(", "jwt", ".", "InvalidTokenError", ",", "jwt", ".", "DecodeError", ")", ":", "logger", ".", "exception", "(", "\"API authz problem: JWT decode error while reading header\"", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_token", "(", ")", ")", "if", "'kid'", "not", "in", "header", ":", "logger", ".", "exception", "(", "\"Did not get a valid key identifier\"", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_token", "(", ")", ")", "keys", "=", "middleware_settings", "[", "'JWKS'", "]", ".", "verifiers", "if", "header", "[", "'kid'", "]", "not", "in", "keys", ":", "logger", ".", "exception", "(", "\"Unknown key identifier: {}\"", ".", "format", "(", "header", "[", "'kid'", "]", ")", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_token", "(", ")", ")", "key", "=", "keys", "[", "header", "[", "'kid'", "]", "]", "try", ":", "decoded", "=", "jwt", ".", "decode", "(", "token", ",", "key", "=", "key", ".", "key", ",", "algorithms", "=", "(", "key", ".", "alg", ",", ")", ")", "except", "jwt", ".", "InvalidTokenError", ":", "logger", ".", "exception", "(", "'API authz problem: could not decode access '", "'token {}'", ".", "format", "(", "token", ")", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_token", "(", ")", ")", "if", "'scopes'", "not", "in", "decoded", ":", "logger", ".", "warning", "(", "'API authz problem: access token misses '", "'authz and scopes claim: {}'", ".", "format", "(", "token", ")", ")", "raise", "_AuthorizationHeaderError", "(", "invalid_token", "(", ")", ")", "else", ":", "scopes", "=", "decoded", "[", "'scopes'", "]", "if", "'sub'", "in", "decoded", ":", "sub", "=", "decoded", "[", "'sub'", "]", "else", ":", "sub", "=", "None", "token_signature", "=", "token", ".", "split", "(", "'.'", ")", "[", "2", "]", "return", "scopes", ",", "token_signature", ",", "sub", "def", "middleware", "(", "request", ")", ":", "\"\"\" Parses the Authorization header, decodes and validates the JWT and\n adds the is_authorized_for function to the request.\n \"\"\"", "request_path", "=", "request", ".", "path", "forced_anonymous", "=", "any", "(", "request_path", ".", "startswith", "(", "route", ")", "for", "route", "in", "middleware_settings", "[", "'FORCED_ANONYMOUS_ROUTES'", "]", ")", "if", "middleware_settings", "[", "'ALWAYS_OK'", "]", ":", "logger", ".", "warning", "(", "'API authz DISABLED'", ")", "request", ".", "is_authorized_for", "=", "always_ok", "request", ".", "get_token_subject", "=", "'ALWAYS_OK'", "return", "get_response", "(", "request", ")", "is_options", "=", "request", ".", "method", "==", "'OPTIONS'", "if", "forced_anonymous", "or", "is_options", ":", "authz_func", "=", "authorize_forced_anonymous", "subject", "=", "None", "else", ":", "authorization", "=", "request", ".", "META", ".", "get", "(", "'HTTP_AUTHORIZATION'", ")", "token_signature", "=", "''", "sub", "=", "None", "if", "authorization", ":", "try", ":", "scopes", ",", "token_signature", ",", "sub", "=", "token_data", "(", "authorization", ")", "except", "_AuthorizationHeaderError", "as", "e", ":", "return", "e", ".", "response", "else", ":", "scopes", "=", "[", "]", "x_unique_id", "=", "request", ".", "META", ".", "get", "(", "'HTTP_X_UNIQUE_ID'", ")", "authz_func", "=", "authorize_function", "(", "scopes", ",", "token_signature", ",", "x_unique_id", ")", "subject", "=", "get_token_subject", "(", "sub", ")", "if", "len", "(", "min_scope", ")", ">", "0", "and", "not", "authz_func", "(", "min_scope", ")", ":", "return", "insufficient_scope", "(", ")", "request", ".", "is_authorized_for", "=", "authz_func", "request", ".", "get_token_subject", "=", "subject", "response", "=", "get_response", "(", "request", ")", "return", "response", "return", "middleware" ]
Django middleware to parse incoming access tokens, validate them and set an authorization function on the request. The decision to use a generic middleware rather than an AuthenticationMiddleware is explicitly made, because inctances of the latter come with a number of assumptions (i.e. that user.is_authorized() exists, or that request.user uses the User model). Example usage: :: request.is_authorized_for() :param get_response: callable that creates the response object :return: response :todo: Two things needs to be done when we can completely remove the Django JWT plugin: - Nested function 'middleware' allows both 'JWT' (not IANA-registered) and 'Bearer' as Authorization header prefix; JWT should not be accepted. - The Django JWT middleware does not include the authz claim, so this plugin does not fail if it is not present; this behavior is wrong when we no longer use the Django JWT plugin.
[ "Django", "middleware", "to", "parse", "incoming", "access", "tokens", "validate", "them", "and", "set", "an", "authorization", "function", "on", "the", "request", "." ]
python
train
6809/MC6809
MC6809/components/mc6809_addressing.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_addressing.py#L76-L196
def get_ea_indexed(self): """ Calculate the address for all indexed addressing modes """ addr, postbyte = self.read_pc_byte() # log.debug("\tget_ea_indexed(): postbyte: $%02x (%s) from $%04x", # postbyte, byte2bit_string(postbyte), addr # ) rr = (postbyte >> 5) & 3 try: register_str = self.INDEX_POSTBYTE2STR[rr] except KeyError: raise RuntimeError("Register $%x doesn't exists! (postbyte: $%x)" % (rr, postbyte)) register_obj = self.register_str2object[register_str] register_value = register_obj.value # log.debug("\t%02x == register %s: value $%x", # rr, register_obj.name, register_value # ) if not is_bit_set(postbyte, bit=7): # bit 7 == 0 # EA = n, R - use 5-bit offset from post-byte offset = signed5(postbyte & 0x1f) ea = register_value + offset # log.debug( # "\tget_ea_indexed(): bit 7 == 0: reg.value: $%04x -> ea=$%04x + $%02x = $%04x", # register_value, register_value, offset, ea # ) return ea addr_mode = postbyte & 0x0f self.cycles += 1 offset = None # TODO: Optimized this, maybe use a dict mapping... if addr_mode == 0x0: # log.debug("\t0000 0x0 | ,R+ | increment by 1") ea = register_value register_obj.increment(1) elif addr_mode == 0x1: # log.debug("\t0001 0x1 | ,R++ | increment by 2") ea = register_value register_obj.increment(2) self.cycles += 1 elif addr_mode == 0x2: # log.debug("\t0010 0x2 | ,R- | decrement by 1") register_obj.decrement(1) ea = register_obj.value elif addr_mode == 0x3: # log.debug("\t0011 0x3 | ,R-- | decrement by 2") register_obj.decrement(2) ea = register_obj.value self.cycles += 1 elif addr_mode == 0x4: # log.debug("\t0100 0x4 | ,R | No offset") ea = register_value elif addr_mode == 0x5: # log.debug("\t0101 0x5 | B, R | B register offset") offset = signed8(self.accu_b.value) elif addr_mode == 0x6: # log.debug("\t0110 0x6 | A, R | A register offset") offset = signed8(self.accu_a.value) elif addr_mode == 0x8: # log.debug("\t1000 0x8 | n, R | 8 bit offset") offset = signed8(self.read_pc_byte()[1]) elif addr_mode == 0x9: # log.debug("\t1001 0x9 | n, R | 16 bit offset") offset = signed16(self.read_pc_word()[1]) self.cycles += 1 elif addr_mode == 0xa: # log.debug("\t1010 0xa | illegal, set ea=0") ea = 0 elif addr_mode == 0xb: # log.debug("\t1011 0xb | D, R | D register offset") # D - 16 bit concatenated reg. (A + B) offset = signed16(self.accu_d.value) # FIXME: signed16() ok? self.cycles += 1 elif addr_mode == 0xc: # log.debug("\t1100 0xc | n, PCR | 8 bit offset from program counter") __, value = self.read_pc_byte() value_signed = signed8(value) ea = self.program_counter.value + value_signed # log.debug("\tea = pc($%x) + $%x = $%x (dez.: %i + %i = %i)", # self.program_counter, value_signed, ea, # self.program_counter, value_signed, ea, # ) elif addr_mode == 0xd: # log.debug("\t1101 0xd | n, PCR | 16 bit offset from program counter") __, value = self.read_pc_word() value_signed = signed16(value) ea = self.program_counter.value + value_signed self.cycles += 1 # log.debug("\tea = pc($%x) + $%x = $%x (dez.: %i + %i = %i)", # self.program_counter, value_signed, ea, # self.program_counter, value_signed, ea, # ) elif addr_mode == 0xe: # log.error("\tget_ea_indexed(): illegal address mode, use 0xffff") ea = 0xffff # illegal elif addr_mode == 0xf: # log.debug("\t1111 0xf | [n] | 16 bit address - extended indirect") __, ea = self.read_pc_word() else: raise RuntimeError("Illegal indexed addressing mode: $%x" % addr_mode) if offset is not None: ea = register_value + offset # log.debug("\t$%x + $%x = $%x (dez: %i + %i = %i)", # register_value, offset, ea, # register_value, offset, ea # ) ea = ea & 0xffff if is_bit_set(postbyte, bit=4): # bit 4 is 1 -> Indirect # log.debug("\tIndirect addressing: get new ea from $%x", ea) ea = self.memory.read_word(ea) # log.debug("\tIndirect addressing: new ea is $%x", ea) # log.debug("\tget_ea_indexed(): return ea=$%x", ea) return ea
[ "def", "get_ea_indexed", "(", "self", ")", ":", "addr", ",", "postbyte", "=", "self", ".", "read_pc_byte", "(", ")", "# log.debug(\"\\tget_ea_indexed(): postbyte: $%02x (%s) from $%04x\",", "# postbyte, byte2bit_string(postbyte), addr", "# )", "rr", "=", "(", "postbyte", ">>", "5", ")", "&", "3", "try", ":", "register_str", "=", "self", ".", "INDEX_POSTBYTE2STR", "[", "rr", "]", "except", "KeyError", ":", "raise", "RuntimeError", "(", "\"Register $%x doesn't exists! (postbyte: $%x)\"", "%", "(", "rr", ",", "postbyte", ")", ")", "register_obj", "=", "self", ".", "register_str2object", "[", "register_str", "]", "register_value", "=", "register_obj", ".", "value", "# log.debug(\"\\t%02x == register %s: value $%x\",", "# rr, register_obj.name, register_value", "# )", "if", "not", "is_bit_set", "(", "postbyte", ",", "bit", "=", "7", ")", ":", "# bit 7 == 0", "# EA = n, R - use 5-bit offset from post-byte", "offset", "=", "signed5", "(", "postbyte", "&", "0x1f", ")", "ea", "=", "register_value", "+", "offset", "# log.debug(", "# \"\\tget_ea_indexed(): bit 7 == 0: reg.value: $%04x -> ea=$%04x + $%02x = $%04x\",", "# register_value, register_value, offset, ea", "# )", "return", "ea", "addr_mode", "=", "postbyte", "&", "0x0f", "self", ".", "cycles", "+=", "1", "offset", "=", "None", "# TODO: Optimized this, maybe use a dict mapping...", "if", "addr_mode", "==", "0x0", ":", "# log.debug(\"\\t0000 0x0 | ,R+ | increment by 1\")", "ea", "=", "register_value", "register_obj", ".", "increment", "(", "1", ")", "elif", "addr_mode", "==", "0x1", ":", "# log.debug(\"\\t0001 0x1 | ,R++ | increment by 2\")", "ea", "=", "register_value", "register_obj", ".", "increment", "(", "2", ")", "self", ".", "cycles", "+=", "1", "elif", "addr_mode", "==", "0x2", ":", "# log.debug(\"\\t0010 0x2 | ,R- | decrement by 1\")", "register_obj", ".", "decrement", "(", "1", ")", "ea", "=", "register_obj", ".", "value", "elif", "addr_mode", "==", "0x3", ":", "# log.debug(\"\\t0011 0x3 | ,R-- | decrement by 2\")", "register_obj", ".", "decrement", "(", "2", ")", "ea", "=", "register_obj", ".", "value", "self", ".", "cycles", "+=", "1", "elif", "addr_mode", "==", "0x4", ":", "# log.debug(\"\\t0100 0x4 | ,R | No offset\")", "ea", "=", "register_value", "elif", "addr_mode", "==", "0x5", ":", "# log.debug(\"\\t0101 0x5 | B, R | B register offset\")", "offset", "=", "signed8", "(", "self", ".", "accu_b", ".", "value", ")", "elif", "addr_mode", "==", "0x6", ":", "# log.debug(\"\\t0110 0x6 | A, R | A register offset\")", "offset", "=", "signed8", "(", "self", ".", "accu_a", ".", "value", ")", "elif", "addr_mode", "==", "0x8", ":", "# log.debug(\"\\t1000 0x8 | n, R | 8 bit offset\")", "offset", "=", "signed8", "(", "self", ".", "read_pc_byte", "(", ")", "[", "1", "]", ")", "elif", "addr_mode", "==", "0x9", ":", "# log.debug(\"\\t1001 0x9 | n, R | 16 bit offset\")", "offset", "=", "signed16", "(", "self", ".", "read_pc_word", "(", ")", "[", "1", "]", ")", "self", ".", "cycles", "+=", "1", "elif", "addr_mode", "==", "0xa", ":", "# log.debug(\"\\t1010 0xa | illegal, set ea=0\")", "ea", "=", "0", "elif", "addr_mode", "==", "0xb", ":", "# log.debug(\"\\t1011 0xb | D, R | D register offset\")", "# D - 16 bit concatenated reg. (A + B)", "offset", "=", "signed16", "(", "self", ".", "accu_d", ".", "value", ")", "# FIXME: signed16() ok?", "self", ".", "cycles", "+=", "1", "elif", "addr_mode", "==", "0xc", ":", "# log.debug(\"\\t1100 0xc | n, PCR | 8 bit offset from program counter\")", "__", ",", "value", "=", "self", ".", "read_pc_byte", "(", ")", "value_signed", "=", "signed8", "(", "value", ")", "ea", "=", "self", ".", "program_counter", ".", "value", "+", "value_signed", "# log.debug(\"\\tea = pc($%x) + $%x = $%x (dez.: %i + %i = %i)\",", "# self.program_counter, value_signed, ea,", "# self.program_counter, value_signed, ea,", "# )", "elif", "addr_mode", "==", "0xd", ":", "# log.debug(\"\\t1101 0xd | n, PCR | 16 bit offset from program counter\")", "__", ",", "value", "=", "self", ".", "read_pc_word", "(", ")", "value_signed", "=", "signed16", "(", "value", ")", "ea", "=", "self", ".", "program_counter", ".", "value", "+", "value_signed", "self", ".", "cycles", "+=", "1", "# log.debug(\"\\tea = pc($%x) + $%x = $%x (dez.: %i + %i = %i)\",", "# self.program_counter, value_signed, ea,", "# self.program_counter, value_signed, ea,", "# )", "elif", "addr_mode", "==", "0xe", ":", "# log.error(\"\\tget_ea_indexed(): illegal address mode, use 0xffff\")", "ea", "=", "0xffff", "# illegal", "elif", "addr_mode", "==", "0xf", ":", "# log.debug(\"\\t1111 0xf | [n] | 16 bit address - extended indirect\")", "__", ",", "ea", "=", "self", ".", "read_pc_word", "(", ")", "else", ":", "raise", "RuntimeError", "(", "\"Illegal indexed addressing mode: $%x\"", "%", "addr_mode", ")", "if", "offset", "is", "not", "None", ":", "ea", "=", "register_value", "+", "offset", "# log.debug(\"\\t$%x + $%x = $%x (dez: %i + %i = %i)\",", "# register_value, offset, ea,", "# register_value, offset, ea", "# )", "ea", "=", "ea", "&", "0xffff", "if", "is_bit_set", "(", "postbyte", ",", "bit", "=", "4", ")", ":", "# bit 4 is 1 -> Indirect", "# log.debug(\"\\tIndirect addressing: get new ea from $%x\", ea)", "ea", "=", "self", ".", "memory", ".", "read_word", "(", "ea", ")", "# log.debug(\"\\tIndirect addressing: new ea is $%x\", ea)", "# log.debug(\"\\tget_ea_indexed(): return ea=$%x\", ea)", "return", "ea" ]
Calculate the address for all indexed addressing modes
[ "Calculate", "the", "address", "for", "all", "indexed", "addressing", "modes" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/lib/mp_util.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/lib/mp_util.py#L207-L214
def wxToPIL(wimg): '''convert a wxImage to a PIL Image''' from PIL import Image (w,h) = wimg.GetSize() d = wimg.GetData() pimg = Image.new("RGB", (w,h), color=1) pimg.fromstring(d) return pimg
[ "def", "wxToPIL", "(", "wimg", ")", ":", "from", "PIL", "import", "Image", "(", "w", ",", "h", ")", "=", "wimg", ".", "GetSize", "(", ")", "d", "=", "wimg", ".", "GetData", "(", ")", "pimg", "=", "Image", ".", "new", "(", "\"RGB\"", ",", "(", "w", ",", "h", ")", ",", "color", "=", "1", ")", "pimg", ".", "fromstring", "(", "d", ")", "return", "pimg" ]
convert a wxImage to a PIL Image
[ "convert", "a", "wxImage", "to", "a", "PIL", "Image" ]
python
train
crytic/slither
slither/printers/functions/cfg.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/printers/functions/cfg.py#L14-L25
def output(self, original_filename): """ _filename is not used Args: _filename(string) """ for contract in self.contracts: for function in contract.functions + contract.modifiers: filename = "{}-{}-{}.dot".format(original_filename, contract.name, function.full_name) self.info('Export {}'.format(filename)) function.slithir_cfg_to_dot(filename)
[ "def", "output", "(", "self", ",", "original_filename", ")", ":", "for", "contract", "in", "self", ".", "contracts", ":", "for", "function", "in", "contract", ".", "functions", "+", "contract", ".", "modifiers", ":", "filename", "=", "\"{}-{}-{}.dot\"", ".", "format", "(", "original_filename", ",", "contract", ".", "name", ",", "function", ".", "full_name", ")", "self", ".", "info", "(", "'Export {}'", ".", "format", "(", "filename", ")", ")", "function", ".", "slithir_cfg_to_dot", "(", "filename", ")" ]
_filename is not used Args: _filename(string)
[ "_filename", "is", "not", "used", "Args", ":", "_filename", "(", "string", ")" ]
python
train
dmckeone/frosty
frosty/freezers.py
https://github.com/dmckeone/frosty/blob/868d81e72b6c8e354af3697531c20f116cd1fc9a/frosty/freezers.py#L195-L219
def resolve_freezer(freezer): """ Locate the appropriate freezer given FREEZER or string input from the programmer. :param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT) :return: """ # Set default freezer if there was none if not freezer: return _Default() # Allow character based lookups as well if isinstance(freezer, six.string_types): cls = _freezer_lookup(freezer) return cls() # Allow plain class definition lookups (we instantiate the class) if freezer.__class__ == type.__class__: return freezer() # Warn when a custom freezer implementation is used. if freezer not in FREEZER.ALL: warn(u"Using custom freezer implelmentation: {0}".format(freezer)) return freezer
[ "def", "resolve_freezer", "(", "freezer", ")", ":", "# Set default freezer if there was none", "if", "not", "freezer", ":", "return", "_Default", "(", ")", "# Allow character based lookups as well", "if", "isinstance", "(", "freezer", ",", "six", ".", "string_types", ")", ":", "cls", "=", "_freezer_lookup", "(", "freezer", ")", "return", "cls", "(", ")", "# Allow plain class definition lookups (we instantiate the class)", "if", "freezer", ".", "__class__", "==", "type", ".", "__class__", ":", "return", "freezer", "(", ")", "# Warn when a custom freezer implementation is used.", "if", "freezer", "not", "in", "FREEZER", ".", "ALL", ":", "warn", "(", "u\"Using custom freezer implelmentation: {0}\"", ".", "format", "(", "freezer", ")", ")", "return", "freezer" ]
Locate the appropriate freezer given FREEZER or string input from the programmer. :param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT) :return:
[ "Locate", "the", "appropriate", "freezer", "given", "FREEZER", "or", "string", "input", "from", "the", "programmer", "." ]
python
train
SeattleTestbed/seash
modules/factoids/__init__.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/modules/factoids/__init__.py#L23-L65
def initialize(): """ <Purpose> Used to print random seash factoid when user runs seash. <Arguments> None <Side Effects> Prints random factoid onto the screen. <Exceptions> UserError: Error during generating path to "factoid.txt" file or Error while opening, reading or closing "factoid.txt" file. <Return> None """ # Global 'factoids' list will be used to store factoids, fetched from a file. global factoids # Path to "factoid.txt" file is created. try: current_path = os.getcwd() file_path = os.path.join(current_path, "modules", "factoids", "factoid.txt") except OSError, error: raise seash_exceptions.InitializeError("Error during initializing factoids module: '" + str(error) + "'.") # We have to fatch list of factoids from "factoid.txt" file. try: file_object = open(file_path, 'r') factoids_temp = file_object.readlines() file_object.close() except IOError, error: raise seash_exceptions.InitializeError("Error during initializing factoids module: '" + str(error) + "'.") # Newline characters in a list, read from a file are removed. for factoid in factoids_temp: factoids.append(factoid.strip('\n')) # A random factoid is printed every time user runs seash. print random.choice(factoids)+"\n"
[ "def", "initialize", "(", ")", ":", "# Global 'factoids' list will be used to store factoids, fetched from a file.\r", "global", "factoids", "# Path to \"factoid.txt\" file is created.\r", "try", ":", "current_path", "=", "os", ".", "getcwd", "(", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "current_path", ",", "\"modules\"", ",", "\"factoids\"", ",", "\"factoid.txt\"", ")", "except", "OSError", ",", "error", ":", "raise", "seash_exceptions", ".", "InitializeError", "(", "\"Error during initializing factoids module: '\"", "+", "str", "(", "error", ")", "+", "\"'.\"", ")", "# We have to fatch list of factoids from \"factoid.txt\" file.\r", "try", ":", "file_object", "=", "open", "(", "file_path", ",", "'r'", ")", "factoids_temp", "=", "file_object", ".", "readlines", "(", ")", "file_object", ".", "close", "(", ")", "except", "IOError", ",", "error", ":", "raise", "seash_exceptions", ".", "InitializeError", "(", "\"Error during initializing factoids module: '\"", "+", "str", "(", "error", ")", "+", "\"'.\"", ")", "# Newline characters in a list, read from a file are removed.\r", "for", "factoid", "in", "factoids_temp", ":", "factoids", ".", "append", "(", "factoid", ".", "strip", "(", "'\\n'", ")", ")", "# A random factoid is printed every time user runs seash.\r", "print", "random", ".", "choice", "(", "factoids", ")", "+", "\"\\n\"" ]
<Purpose> Used to print random seash factoid when user runs seash. <Arguments> None <Side Effects> Prints random factoid onto the screen. <Exceptions> UserError: Error during generating path to "factoid.txt" file or Error while opening, reading or closing "factoid.txt" file. <Return> None
[ "<Purpose", ">", "Used", "to", "print", "random", "seash", "factoid", "when", "user", "runs", "seash", ".", "<Arguments", ">", "None", "<Side", "Effects", ">", "Prints", "random", "factoid", "onto", "the", "screen", ".", "<Exceptions", ">", "UserError", ":", "Error", "during", "generating", "path", "to", "factoid", ".", "txt", "file", "or", "Error", "while", "opening", "reading", "or", "closing", "factoid", ".", "txt", "file", ".", "<Return", ">", "None" ]
python
train
mnick/scikit-tensor
sktensor/cp.py
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/cp.py#L190-L205
def _init(init, X, N, rank, dtype): """ Initialization for CP models """ Uinit = [None for _ in range(N)] if isinstance(init, list): Uinit = init elif init == 'random': for n in range(1, N): Uinit[n] = array(rand(X.shape[n], rank), dtype=dtype) elif init == 'nvecs': for n in range(1, N): Uinit[n] = array(nvecs(X, n, rank), dtype=dtype) else: raise 'Unknown option (init=%s)' % str(init) return Uinit
[ "def", "_init", "(", "init", ",", "X", ",", "N", ",", "rank", ",", "dtype", ")", ":", "Uinit", "=", "[", "None", "for", "_", "in", "range", "(", "N", ")", "]", "if", "isinstance", "(", "init", ",", "list", ")", ":", "Uinit", "=", "init", "elif", "init", "==", "'random'", ":", "for", "n", "in", "range", "(", "1", ",", "N", ")", ":", "Uinit", "[", "n", "]", "=", "array", "(", "rand", "(", "X", ".", "shape", "[", "n", "]", ",", "rank", ")", ",", "dtype", "=", "dtype", ")", "elif", "init", "==", "'nvecs'", ":", "for", "n", "in", "range", "(", "1", ",", "N", ")", ":", "Uinit", "[", "n", "]", "=", "array", "(", "nvecs", "(", "X", ",", "n", ",", "rank", ")", ",", "dtype", "=", "dtype", ")", "else", ":", "raise", "'Unknown option (init=%s)'", "%", "str", "(", "init", ")", "return", "Uinit" ]
Initialization for CP models
[ "Initialization", "for", "CP", "models" ]
python
train
zimeon/iiif
iiif/flask_utils.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L68-L98
def identifiers(config): """Show list of identifiers for this prefix. Handles both the case of local file based identifiers and also image generators. Arguments: config - configuration object in which: config.klass_name - 'gen' if a generator function config.generator_dir - directory for generator code config.image_dir - directory for images Returns: ids - a list of ids """ ids = [] if (config.klass_name == 'gen'): for generator in os.listdir(config.generator_dir): if (generator == '__init__.py'): continue (gid, ext) = os.path.splitext(generator) if (ext == '.py' and os.path.isfile(os.path.join(config.generator_dir, generator))): ids.append(gid) else: for image_file in os.listdir(config.image_dir): (iid, ext) = os.path.splitext(image_file) if (ext in ['.jpg', '.png', '.tif'] and os.path.isfile(os.path.join(config.image_dir, image_file))): ids.append(iid) return ids
[ "def", "identifiers", "(", "config", ")", ":", "ids", "=", "[", "]", "if", "(", "config", ".", "klass_name", "==", "'gen'", ")", ":", "for", "generator", "in", "os", ".", "listdir", "(", "config", ".", "generator_dir", ")", ":", "if", "(", "generator", "==", "'__init__.py'", ")", ":", "continue", "(", "gid", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "generator", ")", "if", "(", "ext", "==", "'.py'", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "config", ".", "generator_dir", ",", "generator", ")", ")", ")", ":", "ids", ".", "append", "(", "gid", ")", "else", ":", "for", "image_file", "in", "os", ".", "listdir", "(", "config", ".", "image_dir", ")", ":", "(", "iid", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "image_file", ")", "if", "(", "ext", "in", "[", "'.jpg'", ",", "'.png'", ",", "'.tif'", "]", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "config", ".", "image_dir", ",", "image_file", ")", ")", ")", ":", "ids", ".", "append", "(", "iid", ")", "return", "ids" ]
Show list of identifiers for this prefix. Handles both the case of local file based identifiers and also image generators. Arguments: config - configuration object in which: config.klass_name - 'gen' if a generator function config.generator_dir - directory for generator code config.image_dir - directory for images Returns: ids - a list of ids
[ "Show", "list", "of", "identifiers", "for", "this", "prefix", "." ]
python
train
edx/edx-django-utils
edx_django_utils/cache/utils.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/cache/utils.py#L240-L250
def _set_request_cache_if_django_cache_hit(key, django_cached_response): """ Sets the value in the request cache if the django cached response was a hit. Args: key (string) django_cached_response (CachedResponse) """ if django_cached_response.is_found: DEFAULT_REQUEST_CACHE.set(key, django_cached_response.value)
[ "def", "_set_request_cache_if_django_cache_hit", "(", "key", ",", "django_cached_response", ")", ":", "if", "django_cached_response", ".", "is_found", ":", "DEFAULT_REQUEST_CACHE", ".", "set", "(", "key", ",", "django_cached_response", ".", "value", ")" ]
Sets the value in the request cache if the django cached response was a hit. Args: key (string) django_cached_response (CachedResponse)
[ "Sets", "the", "value", "in", "the", "request", "cache", "if", "the", "django", "cached", "response", "was", "a", "hit", "." ]
python
train
fangpenlin/pyramid-handy
pyramid_handy/tweens/api_headers.py
https://github.com/fangpenlin/pyramid-handy/blob/e3cbc19224ab1f0a14aab556990bceabd2d1f658/pyramid_handy/tweens/api_headers.py#L62-L73
def api_headers_tween_factory(handler, registry): """This tween provides necessary API headers """ def api_headers_tween(request): response = handler(request) set_version(request, response) set_req_guid(request, response) return response return api_headers_tween
[ "def", "api_headers_tween_factory", "(", "handler", ",", "registry", ")", ":", "def", "api_headers_tween", "(", "request", ")", ":", "response", "=", "handler", "(", "request", ")", "set_version", "(", "request", ",", "response", ")", "set_req_guid", "(", "request", ",", "response", ")", "return", "response", "return", "api_headers_tween" ]
This tween provides necessary API headers
[ "This", "tween", "provides", "necessary", "API", "headers" ]
python
train
AirtestProject/Poco
poco/utils/net/transport/simple_wss.py
https://github.com/AirtestProject/Poco/blob/2c559a586adf3fd11ee81cabc446d4d3f6f2d119/poco/utils/net/transport/simple_wss.py#L341-L353
def sendFragmentStart(self, data): """ Send the start of a data fragment stream to a websocket client. Subsequent data should be sent using sendFragment(). A fragment stream is completed when sendFragmentEnd() is called. If data is a unicode object then the frame is sent as Text. If the data is a bytearray object then the frame is sent as Binary. """ opcode = BINARY if _check_unicode(data): opcode = TEXT self._sendMessage(True, opcode, data)
[ "def", "sendFragmentStart", "(", "self", ",", "data", ")", ":", "opcode", "=", "BINARY", "if", "_check_unicode", "(", "data", ")", ":", "opcode", "=", "TEXT", "self", ".", "_sendMessage", "(", "True", ",", "opcode", ",", "data", ")" ]
Send the start of a data fragment stream to a websocket client. Subsequent data should be sent using sendFragment(). A fragment stream is completed when sendFragmentEnd() is called. If data is a unicode object then the frame is sent as Text. If the data is a bytearray object then the frame is sent as Binary.
[ "Send", "the", "start", "of", "a", "data", "fragment", "stream", "to", "a", "websocket", "client", ".", "Subsequent", "data", "should", "be", "sent", "using", "sendFragment", "()", ".", "A", "fragment", "stream", "is", "completed", "when", "sendFragmentEnd", "()", "is", "called", "." ]
python
train
phoebe-project/phoebe2
phoebe/dependencies/autofig/call.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/dependencies/autofig/call.py#L1422-L1441
def interpolate_at_i(self, i, unit=None): """ access the interpolated value at a give value of i (independent-variable) """ if isinstance(self.call.i._value, float): if self.call.i._value==i: return self._to_unit(self._value, unit) else: return None # we can't call i._value here because that may point to a string, and # we want this to resolve the array i_value = self.call.i.get_value(linebreak=False, sort_by_indep=False) if len(i_value) != len(self._value): raise ValueError("length mismatch with independent-variable") sort_inds = i_value.argsort() indep_value = i_value[sort_inds] this_value = self._value[sort_inds] return self._to_unit(np.interp(i, indep_value, this_value, left=np.nan, right=np.nan), unit)
[ "def", "interpolate_at_i", "(", "self", ",", "i", ",", "unit", "=", "None", ")", ":", "if", "isinstance", "(", "self", ".", "call", ".", "i", ".", "_value", ",", "float", ")", ":", "if", "self", ".", "call", ".", "i", ".", "_value", "==", "i", ":", "return", "self", ".", "_to_unit", "(", "self", ".", "_value", ",", "unit", ")", "else", ":", "return", "None", "# we can't call i._value here because that may point to a string, and", "# we want this to resolve the array", "i_value", "=", "self", ".", "call", ".", "i", ".", "get_value", "(", "linebreak", "=", "False", ",", "sort_by_indep", "=", "False", ")", "if", "len", "(", "i_value", ")", "!=", "len", "(", "self", ".", "_value", ")", ":", "raise", "ValueError", "(", "\"length mismatch with independent-variable\"", ")", "sort_inds", "=", "i_value", ".", "argsort", "(", ")", "indep_value", "=", "i_value", "[", "sort_inds", "]", "this_value", "=", "self", ".", "_value", "[", "sort_inds", "]", "return", "self", ".", "_to_unit", "(", "np", ".", "interp", "(", "i", ",", "indep_value", ",", "this_value", ",", "left", "=", "np", ".", "nan", ",", "right", "=", "np", ".", "nan", ")", ",", "unit", ")" ]
access the interpolated value at a give value of i (independent-variable)
[ "access", "the", "interpolated", "value", "at", "a", "give", "value", "of", "i", "(", "independent", "-", "variable", ")" ]
python
train
django-extensions/django-extensions
django_extensions/management/commands/syncdata.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/syncdata.py#L44-L70
def remove_objects_not_in(self, objects_to_keep, verbosity): """ Delete all the objects in the database that are not in objects_to_keep. - objects_to_keep: A map where the keys are classes, and the values are a set of the objects of that class we should keep. """ for class_ in objects_to_keep.keys(): current = class_.objects.all() current_ids = set([x.pk for x in current]) keep_ids = set([x.pk for x in objects_to_keep[class_]]) remove_these_ones = current_ids.difference(keep_ids) if remove_these_ones: for obj in current: if obj.pk in remove_these_ones: obj.delete() if verbosity >= 2: print("Deleted object: %s" % six.u(obj)) if verbosity > 0 and remove_these_ones: num_deleted = len(remove_these_ones) if num_deleted > 1: type_deleted = six.u(class_._meta.verbose_name_plural) else: type_deleted = six.u(class_._meta.verbose_name) print("Deleted %s %s" % (str(num_deleted), type_deleted))
[ "def", "remove_objects_not_in", "(", "self", ",", "objects_to_keep", ",", "verbosity", ")", ":", "for", "class_", "in", "objects_to_keep", ".", "keys", "(", ")", ":", "current", "=", "class_", ".", "objects", ".", "all", "(", ")", "current_ids", "=", "set", "(", "[", "x", ".", "pk", "for", "x", "in", "current", "]", ")", "keep_ids", "=", "set", "(", "[", "x", ".", "pk", "for", "x", "in", "objects_to_keep", "[", "class_", "]", "]", ")", "remove_these_ones", "=", "current_ids", ".", "difference", "(", "keep_ids", ")", "if", "remove_these_ones", ":", "for", "obj", "in", "current", ":", "if", "obj", ".", "pk", "in", "remove_these_ones", ":", "obj", ".", "delete", "(", ")", "if", "verbosity", ">=", "2", ":", "print", "(", "\"Deleted object: %s\"", "%", "six", ".", "u", "(", "obj", ")", ")", "if", "verbosity", ">", "0", "and", "remove_these_ones", ":", "num_deleted", "=", "len", "(", "remove_these_ones", ")", "if", "num_deleted", ">", "1", ":", "type_deleted", "=", "six", ".", "u", "(", "class_", ".", "_meta", ".", "verbose_name_plural", ")", "else", ":", "type_deleted", "=", "six", ".", "u", "(", "class_", ".", "_meta", ".", "verbose_name", ")", "print", "(", "\"Deleted %s %s\"", "%", "(", "str", "(", "num_deleted", ")", ",", "type_deleted", ")", ")" ]
Delete all the objects in the database that are not in objects_to_keep. - objects_to_keep: A map where the keys are classes, and the values are a set of the objects of that class we should keep.
[ "Delete", "all", "the", "objects", "in", "the", "database", "that", "are", "not", "in", "objects_to_keep", ".", "-", "objects_to_keep", ":", "A", "map", "where", "the", "keys", "are", "classes", "and", "the", "values", "are", "a", "set", "of", "the", "objects", "of", "that", "class", "we", "should", "keep", "." ]
python
train
HDI-Project/RDT
rdt/transformers/datetime.py
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/datetime.py#L35-L48
def transform(self, col): """Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ out = pd.DataFrame() out[self.col_name] = self.safe_datetime_cast(col) out[self.col_name] = self.to_timestamp(out) return out
[ "def", "transform", "(", "self", ",", "col", ")", ":", "out", "=", "pd", ".", "DataFrame", "(", ")", "out", "[", "self", ".", "col_name", "]", "=", "self", ".", "safe_datetime_cast", "(", "col", ")", "out", "[", "self", ".", "col_name", "]", "=", "self", ".", "to_timestamp", "(", "out", ")", "return", "out" ]
Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
[ "Prepare", "the", "transformer", "to", "convert", "data", "and", "return", "the", "processed", "table", "." ]
python
train
Alir3z4/django-databrowse
django_databrowse/sites.py
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L150-L164
def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'comments/comment/'. """ self.root_url = request.path[:len(request.path) - len(url)] url = url.rstrip('/') # Trim trailing slash, if it exists. if url == '': return self.index(request) elif '/' in url: return self.model_page(request, *url.split('/', 2)) raise http.Http404('The requested databrowse page does not exist.')
[ "def", "root", "(", "self", ",", "request", ",", "url", ")", ":", "self", ".", "root_url", "=", "request", ".", "path", "[", ":", "len", "(", "request", ".", "path", ")", "-", "len", "(", "url", ")", "]", "url", "=", "url", ".", "rstrip", "(", "'/'", ")", "# Trim trailing slash, if it exists.", "if", "url", "==", "''", ":", "return", "self", ".", "index", "(", "request", ")", "elif", "'/'", "in", "url", ":", "return", "self", ".", "model_page", "(", "request", ",", "*", "url", ".", "split", "(", "'/'", ",", "2", ")", ")", "raise", "http", ".", "Http404", "(", "'The requested databrowse page does not exist.'", ")" ]
Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'comments/comment/'.
[ "Handles", "main", "URL", "routing", "for", "the", "databrowse", "app", "." ]
python
train
twitterdev/search-tweets-python
searchtweets/utils.py
https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/utils.py#L100-L140
def write_result_stream(result_stream, filename_prefix=None, results_per_file=None, **kwargs): """ Wraps a ``ResultStream`` object to save it to a file. This function will still return all data from the result stream as a generator that wraps the ``write_ndjson`` method. Args: result_stream (ResultStream): the unstarted ResultStream object filename_prefix (str or None): the base name for file writing results_per_file (int or None): the maximum number of tweets to write per file. Defaults to having no max, which means one file. Multiple files will be named by datetime, according to ``<prefix>_YYY-mm-ddTHH_MM_SS.json``. """ if isinstance(result_stream, types.GeneratorType): stream = result_stream else: stream = result_stream.stream() file_time_formatter = "%Y-%m-%dT%H_%M_%S" if filename_prefix is None: filename_prefix = "twitter_search_results" if results_per_file: logger.info("chunking result stream to files with {} tweets per file" .format(results_per_file)) chunked_stream = partition(stream, results_per_file, pad_none=True) for chunk in chunked_stream: chunk = filter(lambda x: x is not None, chunk) curr_datetime = (datetime.datetime.utcnow() .strftime(file_time_formatter)) _filename = "{}_{}.json".format(filename_prefix, curr_datetime) yield from write_ndjson(_filename, chunk) else: curr_datetime = (datetime.datetime.utcnow() .strftime(file_time_formatter)) _filename = "{}.json".format(filename_prefix) yield from write_ndjson(_filename, stream)
[ "def", "write_result_stream", "(", "result_stream", ",", "filename_prefix", "=", "None", ",", "results_per_file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "result_stream", ",", "types", ".", "GeneratorType", ")", ":", "stream", "=", "result_stream", "else", ":", "stream", "=", "result_stream", ".", "stream", "(", ")", "file_time_formatter", "=", "\"%Y-%m-%dT%H_%M_%S\"", "if", "filename_prefix", "is", "None", ":", "filename_prefix", "=", "\"twitter_search_results\"", "if", "results_per_file", ":", "logger", ".", "info", "(", "\"chunking result stream to files with {} tweets per file\"", ".", "format", "(", "results_per_file", ")", ")", "chunked_stream", "=", "partition", "(", "stream", ",", "results_per_file", ",", "pad_none", "=", "True", ")", "for", "chunk", "in", "chunked_stream", ":", "chunk", "=", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "chunk", ")", "curr_datetime", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "file_time_formatter", ")", ")", "_filename", "=", "\"{}_{}.json\"", ".", "format", "(", "filename_prefix", ",", "curr_datetime", ")", "yield", "from", "write_ndjson", "(", "_filename", ",", "chunk", ")", "else", ":", "curr_datetime", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "file_time_formatter", ")", ")", "_filename", "=", "\"{}.json\"", ".", "format", "(", "filename_prefix", ")", "yield", "from", "write_ndjson", "(", "_filename", ",", "stream", ")" ]
Wraps a ``ResultStream`` object to save it to a file. This function will still return all data from the result stream as a generator that wraps the ``write_ndjson`` method. Args: result_stream (ResultStream): the unstarted ResultStream object filename_prefix (str or None): the base name for file writing results_per_file (int or None): the maximum number of tweets to write per file. Defaults to having no max, which means one file. Multiple files will be named by datetime, according to ``<prefix>_YYY-mm-ddTHH_MM_SS.json``.
[ "Wraps", "a", "ResultStream", "object", "to", "save", "it", "to", "a", "file", ".", "This", "function", "will", "still", "return", "all", "data", "from", "the", "result", "stream", "as", "a", "generator", "that", "wraps", "the", "write_ndjson", "method", "." ]
python
train
beregond/jsonmodels
jsonmodels/utilities.py
https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/utilities.py#L93-L112
def is_ecma_regex(regex): """Check if given regex is of type ECMA 262 or not. :rtype: bool """ parts = regex.split('/') if len(parts) == 1: return False if len(parts) < 3: raise ValueError('Given regex isn\'t ECMA regex nor Python regex.') parts.pop() parts.append('') raw_regex = '/'.join(parts) if raw_regex.startswith('/') and raw_regex.endswith('/'): return True return False
[ "def", "is_ecma_regex", "(", "regex", ")", ":", "parts", "=", "regex", ".", "split", "(", "'/'", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "return", "False", "if", "len", "(", "parts", ")", "<", "3", ":", "raise", "ValueError", "(", "'Given regex isn\\'t ECMA regex nor Python regex.'", ")", "parts", ".", "pop", "(", ")", "parts", ".", "append", "(", "''", ")", "raw_regex", "=", "'/'", ".", "join", "(", "parts", ")", "if", "raw_regex", ".", "startswith", "(", "'/'", ")", "and", "raw_regex", ".", "endswith", "(", "'/'", ")", ":", "return", "True", "return", "False" ]
Check if given regex is of type ECMA 262 or not. :rtype: bool
[ "Check", "if", "given", "regex", "is", "of", "type", "ECMA", "262", "or", "not", "." ]
python
train