repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
lablup/backend.ai-client-py
src/ai/backend/client/cli/files.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/files.py#L17-L35
def upload(sess_id_or_alias, files): """ Upload files to user's home folder. \b SESSID: Session ID or its alias given when creating the session. FILES: Path to upload. """ if len(files) < 1: return with Session() as session: try: print_wait('Uploading files...') kernel = session.Kernel(sess_id_or_alias) kernel.upload(files, show_progress=True) print_done('Uploaded.') except Exception as e: print_error(e) sys.exit(1)
[ "def", "upload", "(", "sess_id_or_alias", ",", "files", ")", ":", "if", "len", "(", "files", ")", "<", "1", ":", "return", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "print_wait", "(", "'Uploading files...'", ")", "kernel", "=", "session", ".", "Kernel", "(", "sess_id_or_alias", ")", "kernel", ".", "upload", "(", "files", ",", "show_progress", "=", "True", ")", "print_done", "(", "'Uploaded.'", ")", "except", "Exception", "as", "e", ":", "print_error", "(", "e", ")", "sys", ".", "exit", "(", "1", ")" ]
Upload files to user's home folder. \b SESSID: Session ID or its alias given when creating the session. FILES: Path to upload.
[ "Upload", "files", "to", "user", "s", "home", "folder", "." ]
python
train
27.894737
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L331-L340
def lon360to180(lon): """Convert longitude from (0, 360) to (-180, 180) """ if np.any(lon > 360.0) or np.any(lon < 0.0): print("Warning: lon outside expected range") lon = wraplon(lon) #lon[lon > 180.0] -= 360.0 #lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180 lon = lon - (lon.astype(int)/180)*360.0 return lon
[ "def", "lon360to180", "(", "lon", ")", ":", "if", "np", ".", "any", "(", "lon", ">", "360.0", ")", "or", "np", ".", "any", "(", "lon", "<", "0.0", ")", ":", "print", "(", "\"Warning: lon outside expected range\"", ")", "lon", "=", "wraplon", "(", "lon", ")", "#lon[lon > 180.0] -= 360.0", "#lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180", "lon", "=", "lon", "-", "(", "lon", ".", "astype", "(", "int", ")", "/", "180", ")", "*", "360.0", "return", "lon" ]
Convert longitude from (0, 360) to (-180, 180)
[ "Convert", "longitude", "from", "(", "0", "360", ")", "to", "(", "-", "180", "180", ")" ]
python
train
35.3
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/conspect_handler.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/conspect_handler.py#L212-L224
def show_error(cls, error=True): """ Show `error` around the conspect elements. If the `error` is ``False``, hide it. """ if error: cls.input_el.style.border = "2px solid red" cls.conspect_el.style.border = "2px solid red" cls.subconspect_el.style.border = "2px solid red" else: cls.input_el.style.border = "0" cls.conspect_el.style.border = "0" cls.subconspect_el.style.border = "0"
[ "def", "show_error", "(", "cls", ",", "error", "=", "True", ")", ":", "if", "error", ":", "cls", ".", "input_el", ".", "style", ".", "border", "=", "\"2px solid red\"", "cls", ".", "conspect_el", ".", "style", ".", "border", "=", "\"2px solid red\"", "cls", ".", "subconspect_el", ".", "style", ".", "border", "=", "\"2px solid red\"", "else", ":", "cls", ".", "input_el", ".", "style", ".", "border", "=", "\"0\"", "cls", ".", "conspect_el", ".", "style", ".", "border", "=", "\"0\"", "cls", ".", "subconspect_el", ".", "style", ".", "border", "=", "\"0\"" ]
Show `error` around the conspect elements. If the `error` is ``False``, hide it.
[ "Show", "error", "around", "the", "conspect", "elements", ".", "If", "the", "error", "is", "False", "hide", "it", "." ]
python
train
37.769231
GNS3/gns3-server
gns3server/compute/qemu/qemu_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/qemu/qemu_vm.py#L1275-L1288
def read_stdout(self): """ Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed. """ output = "" if self._stdout_file: try: with open(self._stdout_file, "rb") as file: output = file.read().decode("utf-8", errors="replace") except OSError as e: log.warning("Could not read {}: {}".format(self._stdout_file, e)) return output
[ "def", "read_stdout", "(", "self", ")", ":", "output", "=", "\"\"", "if", "self", ".", "_stdout_file", ":", "try", ":", "with", "open", "(", "self", ".", "_stdout_file", ",", "\"rb\"", ")", "as", "file", ":", "output", "=", "file", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ",", "errors", "=", "\"replace\"", ")", "except", "OSError", "as", "e", ":", "log", ".", "warning", "(", "\"Could not read {}: {}\"", ".", "format", "(", "self", ".", "_stdout_file", ",", "e", ")", ")", "return", "output" ]
Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed.
[ "Reads", "the", "standard", "output", "of", "the", "QEMU", "process", ".", "Only", "use", "when", "the", "process", "has", "been", "stopped", "or", "has", "crashed", "." ]
python
train
35.357143
mathiasertl/django-ca
ca/django_ca/models.py
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L272-L274
def issuer(self): """The certificate issuer field as :py:class:`~django_ca.subject.Subject`.""" return Subject([(s.oid, s.value) for s in self.x509.issuer])
[ "def", "issuer", "(", "self", ")", ":", "return", "Subject", "(", "[", "(", "s", ".", "oid", ",", "s", ".", "value", ")", "for", "s", "in", "self", ".", "x509", ".", "issuer", "]", ")" ]
The certificate issuer field as :py:class:`~django_ca.subject.Subject`.
[ "The", "certificate", "issuer", "field", "as", ":", "py", ":", "class", ":", "~django_ca", ".", "subject", ".", "Subject", "." ]
python
train
56.666667
kytos/kytos-utils
kytos/cli/commands/napps/parser.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/cli/commands/napps/parser.py#L57-L61
def call(subcommand, args): """Call a subcommand passing the args.""" args['<napp>'] = parse_napps(args['<napp>']) func = getattr(NAppsAPI, subcommand) func(args)
[ "def", "call", "(", "subcommand", ",", "args", ")", ":", "args", "[", "'<napp>'", "]", "=", "parse_napps", "(", "args", "[", "'<napp>'", "]", ")", "func", "=", "getattr", "(", "NAppsAPI", ",", "subcommand", ")", "func", "(", "args", ")" ]
Call a subcommand passing the args.
[ "Call", "a", "subcommand", "passing", "the", "args", "." ]
python
train
34.8
SiLab-Bonn/pyBAR
pybar/daq/readout_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/daq/readout_utils.py#L428-L499
def interpret_pixel_data(data, dc, pixel_array, invert=True): '''Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching. The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit). Parameters ---------- data : numpy.ndarray The raw data words. dc : int The double column where the data is from. pixel_array : numpy.ma.ndarray The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data. invert : boolean Invert the read pixel data. ''' # data validity cut, VR has to follow an AR index_value = np.where(is_address_record(data))[0] + 1 # assume value record follows address record index_value = index_value[is_value_record(data[index_value])] # delete all non value records index_address = index_value - 1 # calculate address record indices that are followed by an value record # create the pixel address/value arrays address = get_address_record_address(data[index_address]) value = get_value_record(data[index_address + 1]) # split array for each bit in pixel data, split is done on decreasing address values address_split = np.array_split(address, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1) value_split = np.array_split(value, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1) if len(address_split) > 5: pixel_array.mask[dc * 2, :] = True pixel_array.mask[dc * 2 + 1, :] = True logging.warning('Invalid pixel data for DC %d', dc) return mask = np.empty_like(pixel_array.data) # BUG in numpy: pixel_array is de-masked if not .data is used mask[:] = len(address_split) for bit, (bit_address, bit_value) in enumerate(zip(address_split, value_split)): # loop over all bits of the pixel data # error output, pixel data is often corrupt for FE-I4A if len(bit_address) == 0: logging.warning('No pixel data for DC %d', dc) continue if len(bit_address) != 42: logging.warning('Some pixel data missing for DC %d', dc) if (np.any(bit_address > 672)): RuntimeError('Pixel data corrupt for DC %d', dc) # set pixel that occurred in the data stream pixel = [] for i in bit_address: pixel.extend(range(i - 15, i + 1)) pixel = np.array(pixel) # create bit set array value_new = bit_value.view(np.uint8) # interpret 32 bit numpy array as uint8 to be able to use bit unpacking; byte unpacking is not supported yet if invert: value_new = np.invert(value_new) # read back values are inverted value_new = np.insert(value_new[::4], np.arange(len(value_new[1::4])), value_new[1::4]) # delete 0 padding value_bit = np.unpackbits(value_new, axis=0) if len(address_split) == 5: # detect TDAC data, here the bit order is flipped bit_set = len(address_split) - bit - 1 else: bit_set = bit pixel_array.data[dc * 2, pixel[pixel >= 336] - 336] = np.bitwise_or(pixel_array.data[dc * 2, pixel[pixel >= 336] - 336], np.left_shift(value_bit[pixel >= 336], bit_set)) pixel_array.data[dc * 2 + 1, pixel[pixel < 336]] = np.bitwise_or(pixel_array.data[dc * 2 + 1, pixel[pixel < 336]], np.left_shift(value_bit[pixel < 336], bit_set)[::-1]) mask[dc * 2, pixel[pixel >= 336] - 336] = mask[dc * 2, pixel[pixel >= 336] - 336] - 1 mask[dc * 2 + 1, pixel[pixel < 336]] = mask[dc * 2 + 1, pixel[pixel < 336]] - 1 pixel_array.mask[np.equal(mask, 0)] = False
[ "def", "interpret_pixel_data", "(", "data", ",", "dc", ",", "pixel_array", ",", "invert", "=", "True", ")", ":", "# data validity cut, VR has to follow an AR\r", "index_value", "=", "np", ".", "where", "(", "is_address_record", "(", "data", ")", ")", "[", "0", "]", "+", "1", "# assume value record follows address record\r", "index_value", "=", "index_value", "[", "is_value_record", "(", "data", "[", "index_value", "]", ")", "]", "# delete all non value records\r", "index_address", "=", "index_value", "-", "1", "# calculate address record indices that are followed by an value record\r", "# create the pixel address/value arrays\r", "address", "=", "get_address_record_address", "(", "data", "[", "index_address", "]", ")", "value", "=", "get_value_record", "(", "data", "[", "index_address", "+", "1", "]", ")", "# split array for each bit in pixel data, split is done on decreasing address values\r", "address_split", "=", "np", ".", "array_split", "(", "address", ",", "np", ".", "where", "(", "np", ".", "diff", "(", "address", ".", "astype", "(", "np", ".", "int32", ")", ")", "<", "0", ")", "[", "0", "]", "+", "1", ")", "value_split", "=", "np", ".", "array_split", "(", "value", ",", "np", ".", "where", "(", "np", ".", "diff", "(", "address", ".", "astype", "(", "np", ".", "int32", ")", ")", "<", "0", ")", "[", "0", "]", "+", "1", ")", "if", "len", "(", "address_split", ")", ">", "5", ":", "pixel_array", ".", "mask", "[", "dc", "*", "2", ",", ":", "]", "=", "True", "pixel_array", ".", "mask", "[", "dc", "*", "2", "+", "1", ",", ":", "]", "=", "True", "logging", ".", "warning", "(", "'Invalid pixel data for DC %d'", ",", "dc", ")", "return", "mask", "=", "np", ".", "empty_like", "(", "pixel_array", ".", "data", ")", "# BUG in numpy: pixel_array is de-masked if not .data is used\r", "mask", "[", ":", "]", "=", "len", "(", "address_split", ")", "for", "bit", ",", "(", "bit_address", ",", "bit_value", ")", "in", "enumerate", "(", "zip", "(", "address_split", ",", "value_split", ")", ")", ":", "# loop over all bits of the pixel data\r", "# error output, pixel data is often corrupt for FE-I4A\r", "if", "len", "(", "bit_address", ")", "==", "0", ":", "logging", ".", "warning", "(", "'No pixel data for DC %d'", ",", "dc", ")", "continue", "if", "len", "(", "bit_address", ")", "!=", "42", ":", "logging", ".", "warning", "(", "'Some pixel data missing for DC %d'", ",", "dc", ")", "if", "(", "np", ".", "any", "(", "bit_address", ">", "672", ")", ")", ":", "RuntimeError", "(", "'Pixel data corrupt for DC %d'", ",", "dc", ")", "# set pixel that occurred in the data stream\r", "pixel", "=", "[", "]", "for", "i", "in", "bit_address", ":", "pixel", ".", "extend", "(", "range", "(", "i", "-", "15", ",", "i", "+", "1", ")", ")", "pixel", "=", "np", ".", "array", "(", "pixel", ")", "# create bit set array\r", "value_new", "=", "bit_value", ".", "view", "(", "np", ".", "uint8", ")", "# interpret 32 bit numpy array as uint8 to be able to use bit unpacking; byte unpacking is not supported yet\r", "if", "invert", ":", "value_new", "=", "np", ".", "invert", "(", "value_new", ")", "# read back values are inverted\r", "value_new", "=", "np", ".", "insert", "(", "value_new", "[", ":", ":", "4", "]", ",", "np", ".", "arange", "(", "len", "(", "value_new", "[", "1", ":", ":", "4", "]", ")", ")", ",", "value_new", "[", "1", ":", ":", "4", "]", ")", "# delete 0 padding\r", "value_bit", "=", "np", ".", "unpackbits", "(", "value_new", ",", "axis", "=", "0", ")", "if", "len", "(", "address_split", ")", "==", "5", ":", "# detect TDAC data, here the bit order is flipped\r", "bit_set", "=", "len", "(", "address_split", ")", "-", "bit", "-", "1", "else", ":", "bit_set", "=", "bit", "pixel_array", ".", "data", "[", "dc", "*", "2", ",", "pixel", "[", "pixel", ">=", "336", "]", "-", "336", "]", "=", "np", ".", "bitwise_or", "(", "pixel_array", ".", "data", "[", "dc", "*", "2", ",", "pixel", "[", "pixel", ">=", "336", "]", "-", "336", "]", ",", "np", ".", "left_shift", "(", "value_bit", "[", "pixel", ">=", "336", "]", ",", "bit_set", ")", ")", "pixel_array", ".", "data", "[", "dc", "*", "2", "+", "1", ",", "pixel", "[", "pixel", "<", "336", "]", "]", "=", "np", ".", "bitwise_or", "(", "pixel_array", ".", "data", "[", "dc", "*", "2", "+", "1", ",", "pixel", "[", "pixel", "<", "336", "]", "]", ",", "np", ".", "left_shift", "(", "value_bit", "[", "pixel", "<", "336", "]", ",", "bit_set", ")", "[", ":", ":", "-", "1", "]", ")", "mask", "[", "dc", "*", "2", ",", "pixel", "[", "pixel", ">=", "336", "]", "-", "336", "]", "=", "mask", "[", "dc", "*", "2", ",", "pixel", "[", "pixel", ">=", "336", "]", "-", "336", "]", "-", "1", "mask", "[", "dc", "*", "2", "+", "1", ",", "pixel", "[", "pixel", "<", "336", "]", "]", "=", "mask", "[", "dc", "*", "2", "+", "1", ",", "pixel", "[", "pixel", "<", "336", "]", "]", "-", "1", "pixel_array", ".", "mask", "[", "np", ".", "equal", "(", "mask", ",", "0", ")", "]", "=", "False" ]
Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching. The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit). Parameters ---------- data : numpy.ndarray The raw data words. dc : int The double column where the data is from. pixel_array : numpy.ma.ndarray The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data. invert : boolean Invert the read pixel data.
[ "Takes", "the", "pixel", "raw", "data", "and", "interprets", "them", ".", "This", "includes", "consistency", "checks", "and", "pixel", "/", "data", "matching", ".", "The", "data", "has", "to", "come", "from", "one", "double", "column", "only", "but", "can", "have", "more", "than", "one", "pixel", "bit", "(", "e", ".", "g", ".", "TDAC", "=", "5", "bit", ")", ".", "Parameters", "----------", "data", ":", "numpy", ".", "ndarray", "The", "raw", "data", "words", ".", "dc", ":", "int", "The", "double", "column", "where", "the", "data", "is", "from", ".", "pixel_array", ":", "numpy", ".", "ma", ".", "ndarray", "The", "masked", "numpy", ".", "ndarrays", "to", "be", "filled", ".", "The", "masked", "is", "set", "to", "zero", "for", "pixels", "with", "valid", "data", ".", "invert", ":", "boolean", "Invert", "the", "read", "pixel", "data", "." ]
python
train
51.194444
willemarcel/osmcha
osmcha/changeset.py
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L227-L232
def get_area(self, geojson): """Read the first feature from the geojson and return it as a Polygon object. """ geojson = json.load(open(geojson, 'r')) self.area = Polygon(geojson['features'][0]['geometry']['coordinates'][0])
[ "def", "get_area", "(", "self", ",", "geojson", ")", ":", "geojson", "=", "json", ".", "load", "(", "open", "(", "geojson", ",", "'r'", ")", ")", "self", ".", "area", "=", "Polygon", "(", "geojson", "[", "'features'", "]", "[", "0", "]", "[", "'geometry'", "]", "[", "'coordinates'", "]", "[", "0", "]", ")" ]
Read the first feature from the geojson and return it as a Polygon object.
[ "Read", "the", "first", "feature", "from", "the", "geojson", "and", "return", "it", "as", "a", "Polygon", "object", "." ]
python
valid
43.166667
numenta/nupic
src/nupic/swarming/hypersearch/extended_logger.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/extended_logger.py#L47-L56
def debug(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) """ self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)
[ "def", "debug", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_baseLogger", ".", "debug", "(", "self", ",", "self", ".", "getExtendedMsg", "(", "msg", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
[ "Log", "msg", "%", "args", "with", "severity", "DEBUG", "." ]
python
valid
33.6
scott-griffiths/bitstring
bitstring.py
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L2183-L2188
def _ilshift(self, n): """Shift bits by n to the left in place. Return self.""" assert 0 < n <= self.len self._append(Bits(n)) self._truncatestart(n) return self
[ "def", "_ilshift", "(", "self", ",", "n", ")", ":", "assert", "0", "<", "n", "<=", "self", ".", "len", "self", ".", "_append", "(", "Bits", "(", "n", ")", ")", "self", ".", "_truncatestart", "(", "n", ")", "return", "self" ]
Shift bits by n to the left in place. Return self.
[ "Shift", "bits", "by", "n", "to", "the", "left", "in", "place", ".", "Return", "self", "." ]
python
train
32.666667
HDI-Project/ballet
ballet/util/fs.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/util/fs.py#L82-L110
def synctree(src, dst, onexist=None): """Recursively sync files at directory src to dst This is more or less equivalent to:: cp -n -R ${src}/ ${dst}/ If a file at the same path exists in src and dst, it is NOT overwritten in dst. Pass ``onexist`` in order to raise an error on such conditions. Args: src (path-like): source directory dst (path-like): destination directory, does not need to exist onexist (callable): function to call if file exists at destination, takes the full path to destination file as only argument """ src = pathlib.Path(src).resolve() dst = pathlib.Path(dst).resolve() if not src.is_dir(): raise ValueError if dst.exists() and not dst.is_dir(): raise ValueError if onexist is None: def onexist(): pass _synctree(src, dst, onexist)
[ "def", "synctree", "(", "src", ",", "dst", ",", "onexist", "=", "None", ")", ":", "src", "=", "pathlib", ".", "Path", "(", "src", ")", ".", "resolve", "(", ")", "dst", "=", "pathlib", ".", "Path", "(", "dst", ")", ".", "resolve", "(", ")", "if", "not", "src", ".", "is_dir", "(", ")", ":", "raise", "ValueError", "if", "dst", ".", "exists", "(", ")", "and", "not", "dst", ".", "is_dir", "(", ")", ":", "raise", "ValueError", "if", "onexist", "is", "None", ":", "def", "onexist", "(", ")", ":", "pass", "_synctree", "(", "src", ",", "dst", ",", "onexist", ")" ]
Recursively sync files at directory src to dst This is more or less equivalent to:: cp -n -R ${src}/ ${dst}/ If a file at the same path exists in src and dst, it is NOT overwritten in dst. Pass ``onexist`` in order to raise an error on such conditions. Args: src (path-like): source directory dst (path-like): destination directory, does not need to exist onexist (callable): function to call if file exists at destination, takes the full path to destination file as only argument
[ "Recursively", "sync", "files", "at", "directory", "src", "to", "dst" ]
python
train
29.310345
elliterate/capybara.py
capybara/__init__.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/__init__.py#L170-L189
def current_session(): """ Returns the :class:`Session` for the current driver and app, instantiating one if needed. Returns: Session: The :class:`Session` for the current driver and app. """ driver = current_driver or default_driver session_key = "{driver}:{session}:{app}".format( driver=driver, session=session_name, app=str(id(app))) session = _session_pool.get(session_key, None) if session is None: from capybara.session import Session session = Session(driver, app) _session_pool[session_key] = session return session
[ "def", "current_session", "(", ")", ":", "driver", "=", "current_driver", "or", "default_driver", "session_key", "=", "\"{driver}:{session}:{app}\"", ".", "format", "(", "driver", "=", "driver", ",", "session", "=", "session_name", ",", "app", "=", "str", "(", "id", "(", "app", ")", ")", ")", "session", "=", "_session_pool", ".", "get", "(", "session_key", ",", "None", ")", "if", "session", "is", "None", ":", "from", "capybara", ".", "session", "import", "Session", "session", "=", "Session", "(", "driver", ",", "app", ")", "_session_pool", "[", "session_key", "]", "=", "session", "return", "session" ]
Returns the :class:`Session` for the current driver and app, instantiating one if needed. Returns: Session: The :class:`Session` for the current driver and app.
[ "Returns", "the", ":", "class", ":", "Session", "for", "the", "current", "driver", "and", "app", "instantiating", "one", "if", "needed", "." ]
python
test
29.3
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/preprocessor.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/preprocessor.py#L31-L61
def merge_includes(code): """Merge all includes recursively.""" pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\_\-\.\/]+)"' regex = re.compile(pattern) includes = [] def replace(match): filename = match.group("filename") if filename not in includes: includes.append(filename) path = glsl.find(filename) if not path: logger.critical('"%s" not found' % filename) raise RuntimeError("File not found", filename) text = '\n// --- start of "%s" ---\n' % filename with open(path) as fh: text += fh.read() text += '// --- end of "%s" ---\n' % filename return text return '' # Limit recursion to depth 10 for i in range(10): if re.search(regex, code): code = re.sub(regex, replace, code) else: break return code
[ "def", "merge_includes", "(", "code", ")", ":", "pattern", "=", "'\\#\\s*include\\s*\"(?P<filename>[a-zA-Z0-9\\_\\-\\.\\/]+)\"'", "regex", "=", "re", ".", "compile", "(", "pattern", ")", "includes", "=", "[", "]", "def", "replace", "(", "match", ")", ":", "filename", "=", "match", ".", "group", "(", "\"filename\"", ")", "if", "filename", "not", "in", "includes", ":", "includes", ".", "append", "(", "filename", ")", "path", "=", "glsl", ".", "find", "(", "filename", ")", "if", "not", "path", ":", "logger", ".", "critical", "(", "'\"%s\" not found'", "%", "filename", ")", "raise", "RuntimeError", "(", "\"File not found\"", ",", "filename", ")", "text", "=", "'\\n// --- start of \"%s\" ---\\n'", "%", "filename", "with", "open", "(", "path", ")", "as", "fh", ":", "text", "+=", "fh", ".", "read", "(", ")", "text", "+=", "'// --- end of \"%s\" ---\\n'", "%", "filename", "return", "text", "return", "''", "# Limit recursion to depth 10", "for", "i", "in", "range", "(", "10", ")", ":", "if", "re", ".", "search", "(", "regex", ",", "code", ")", ":", "code", "=", "re", ".", "sub", "(", "regex", ",", "replace", ",", "code", ")", "else", ":", "break", "return", "code" ]
Merge all includes recursively.
[ "Merge", "all", "includes", "recursively", "." ]
python
train
29.322581
juju/python-libjuju
juju/application.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/application.py#L104-L131
async def add_unit(self, count=1, to=None): """Add one or more units to this application. :param int count: Number of units to add :param str to: Placement directive, e.g.:: '23' - machine 23 'lxc:7' - new lxc container on machine 7 '24/lxc/3' - lxc container 3 or machine 24 If None, a new machine is provisioned. """ app_facade = client.ApplicationFacade.from_connection(self.connection) log.debug( 'Adding %s unit%s to %s', count, '' if count == 1 else 's', self.name) result = await app_facade.AddUnits( application=self.name, placement=parse_placement(to) if to else None, num_units=count, ) return await asyncio.gather(*[ asyncio.ensure_future(self.model._wait_for_new('unit', unit_id)) for unit_id in result.units ])
[ "async", "def", "add_unit", "(", "self", ",", "count", "=", "1", ",", "to", "=", "None", ")", ":", "app_facade", "=", "client", ".", "ApplicationFacade", ".", "from_connection", "(", "self", ".", "connection", ")", "log", ".", "debug", "(", "'Adding %s unit%s to %s'", ",", "count", ",", "''", "if", "count", "==", "1", "else", "'s'", ",", "self", ".", "name", ")", "result", "=", "await", "app_facade", ".", "AddUnits", "(", "application", "=", "self", ".", "name", ",", "placement", "=", "parse_placement", "(", "to", ")", "if", "to", "else", "None", ",", "num_units", "=", "count", ",", ")", "return", "await", "asyncio", ".", "gather", "(", "*", "[", "asyncio", ".", "ensure_future", "(", "self", ".", "model", ".", "_wait_for_new", "(", "'unit'", ",", "unit_id", ")", ")", "for", "unit_id", "in", "result", ".", "units", "]", ")" ]
Add one or more units to this application. :param int count: Number of units to add :param str to: Placement directive, e.g.:: '23' - machine 23 'lxc:7' - new lxc container on machine 7 '24/lxc/3' - lxc container 3 or machine 24 If None, a new machine is provisioned.
[ "Add", "one", "or", "more", "units", "to", "this", "application", "." ]
python
train
32.642857
HEPData/hepdata-validator
hepdata_validator/__init__.py
https://github.com/HEPData/hepdata-validator/blob/d0b0cab742a009c8f0e8aac9f8c8e434a524d43c/hepdata_validator/__init__.py#L88-L96
def add_validation_message(self, message): """ Adds a message to the messages dict :param message: """ if message.file not in self.messages: self.messages[message.file] = [] self.messages[message.file].append(message)
[ "def", "add_validation_message", "(", "self", ",", "message", ")", ":", "if", "message", ".", "file", "not", "in", "self", ".", "messages", ":", "self", ".", "messages", "[", "message", ".", "file", "]", "=", "[", "]", "self", ".", "messages", "[", "message", ".", "file", "]", ".", "append", "(", "message", ")" ]
Adds a message to the messages dict :param message:
[ "Adds", "a", "message", "to", "the", "messages", "dict", ":", "param", "message", ":" ]
python
train
30
Gandi/gandi.cli
gandi/cli/modules/forward.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/forward.py#L47-L70
def update(cls, domain, source, dest_add, dest_del): """Update a domain mail forward destinations.""" result = None if dest_add or dest_del: current_destinations = cls.get_destinations(domain, source) fwds = current_destinations[:] if dest_add: for dest in dest_add: if dest not in fwds: fwds.append(dest) if dest_del: for dest in dest_del: if dest in fwds: fwds.remove(dest) if ((len(current_destinations) != len(fwds)) or (current_destinations != fwds)): cls.echo('Updating mail forward %s@%s' % (source, domain)) options = {'destinations': fwds} result = cls.call('domain.forward.update', domain, source, options) return result
[ "def", "update", "(", "cls", ",", "domain", ",", "source", ",", "dest_add", ",", "dest_del", ")", ":", "result", "=", "None", "if", "dest_add", "or", "dest_del", ":", "current_destinations", "=", "cls", ".", "get_destinations", "(", "domain", ",", "source", ")", "fwds", "=", "current_destinations", "[", ":", "]", "if", "dest_add", ":", "for", "dest", "in", "dest_add", ":", "if", "dest", "not", "in", "fwds", ":", "fwds", ".", "append", "(", "dest", ")", "if", "dest_del", ":", "for", "dest", "in", "dest_del", ":", "if", "dest", "in", "fwds", ":", "fwds", ".", "remove", "(", "dest", ")", "if", "(", "(", "len", "(", "current_destinations", ")", "!=", "len", "(", "fwds", ")", ")", "or", "(", "current_destinations", "!=", "fwds", ")", ")", ":", "cls", ".", "echo", "(", "'Updating mail forward %s@%s'", "%", "(", "source", ",", "domain", ")", ")", "options", "=", "{", "'destinations'", ":", "fwds", "}", "result", "=", "cls", ".", "call", "(", "'domain.forward.update'", ",", "domain", ",", "source", ",", "options", ")", "return", "result" ]
Update a domain mail forward destinations.
[ "Update", "a", "domain", "mail", "forward", "destinations", "." ]
python
train
38.5
tensorflow/mesh
mesh_tensorflow/placement_mesh_impl.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L185-L202
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name """call tf.Print. Args: x: a LaidOutTensor data: a list of LaidOutTensor message: a string **kwargs: keyword arguments to tf.print Returns: a LaidOutTensor """ tf.logging.info("PlacementMeshImpl::Print") new_slices = x.tensor_list[:] with tf.device(self._devices[0]): new_slices[0] = tf.Print( new_slices[0], [t for d in data for t in d.tensor_list], message, **kwargs) return self.LaidOutTensor(new_slices)
[ "def", "Print", "(", "self", ",", "x", ",", "data", ",", "message", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=invalid-name", "tf", ".", "logging", ".", "info", "(", "\"PlacementMeshImpl::Print\"", ")", "new_slices", "=", "x", ".", "tensor_list", "[", ":", "]", "with", "tf", ".", "device", "(", "self", ".", "_devices", "[", "0", "]", ")", ":", "new_slices", "[", "0", "]", "=", "tf", ".", "Print", "(", "new_slices", "[", "0", "]", ",", "[", "t", "for", "d", "in", "data", "for", "t", "in", "d", ".", "tensor_list", "]", ",", "message", ",", "*", "*", "kwargs", ")", "return", "self", ".", "LaidOutTensor", "(", "new_slices", ")" ]
call tf.Print. Args: x: a LaidOutTensor data: a list of LaidOutTensor message: a string **kwargs: keyword arguments to tf.print Returns: a LaidOutTensor
[ "call", "tf", ".", "Print", "." ]
python
train
30.888889
jrabbit/hitman
hitman.py
https://github.com/jrabbit/hitman/blob/407351cb729956e2e1673d0aae741e1fa5f61b31/hitman.py#L348-L362
def directory(): """Construct hitman_dir from os name""" home = os.path.expanduser('~') if platform.system() == 'Linux': hitman_dir = os.path.join(home, '.hitman') elif platform.system() == 'Darwin': hitman_dir = os.path.join(home, 'Library', 'Application Support', 'hitman') elif platform.system() == 'Windows': hitman_dir = os.path.join(os.environ['appdata'], 'hitman') else: hitman_dir = os.path.join(home, '.hitman') if not os.path.isdir(hitman_dir): os.mkdir(hitman_dir) return hitman_dir
[ "def", "directory", "(", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "if", "platform", ".", "system", "(", ")", "==", "'Linux'", ":", "hitman_dir", "=", "os", ".", "path", ".", "join", "(", "home", ",", "'.hitman'", ")", "elif", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "hitman_dir", "=", "os", ".", "path", ".", "join", "(", "home", ",", "'Library'", ",", "'Application Support'", ",", "'hitman'", ")", "elif", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "hitman_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'appdata'", "]", ",", "'hitman'", ")", "else", ":", "hitman_dir", "=", "os", ".", "path", ".", "join", "(", "home", ",", "'.hitman'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "hitman_dir", ")", ":", "os", ".", "mkdir", "(", "hitman_dir", ")", "return", "hitman_dir" ]
Construct hitman_dir from os name
[ "Construct", "hitman_dir", "from", "os", "name" ]
python
train
39
tango-controls/pytango
tango/encoded_attribute.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/encoded_attribute.py#L311-L355
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage): """Internal usage only""" if not is_seq(rgb24): raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple " "or bytearray) as first argument") is_str = is_pure_str(rgb24) if is_str: if not width or not height: raise ValueError("When giving a string as data, you must also " "supply width and height") if np and isinstance(rgb24, np.ndarray): if rgb24.ndim != 3: if not width or not height: raise ValueError("When giving a non 2D numpy array, width and " "height must be supplied") if rgb24.nbytes / 3 != width * height: raise ValueError("numpy array size mismatch") else: if rgb24.itemsize != 1: raise TypeError("Expected numpy array with itemsize == 1") if not rgb24.flags.c_contiguous: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not rgb24.flags.aligned: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not is_str and (not width or not height): height = len(rgb24) if height < 1: raise IndexError("Expected sequence with at least one row") row0 = rgb24[0] if not is_seq(row0): raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or " "bytearray) inside a sequence") width = len(row0) if is_pure_str(row0) or type(row0) == bytearray: width /= 3 if format == _ImageFormat.RawImage: self._encode_rgb24(rgb24, width, height) elif format == _ImageFormat.JpegImage: self._encode_jpeg_rgb24(rgb24, width, height, quality)
[ "def", "__EncodedAttribute_generic_encode_rgb24", "(", "self", ",", "rgb24", ",", "width", "=", "0", ",", "height", "=", "0", ",", "quality", "=", "0", ",", "format", "=", "_ImageFormat", ".", "RawImage", ")", ":", "if", "not", "is_seq", "(", "rgb24", ")", ":", "raise", "TypeError", "(", "\"Expected sequence (str, numpy.ndarray, list, tuple \"", "\"or bytearray) as first argument\"", ")", "is_str", "=", "is_pure_str", "(", "rgb24", ")", "if", "is_str", ":", "if", "not", "width", "or", "not", "height", ":", "raise", "ValueError", "(", "\"When giving a string as data, you must also \"", "\"supply width and height\"", ")", "if", "np", "and", "isinstance", "(", "rgb24", ",", "np", ".", "ndarray", ")", ":", "if", "rgb24", ".", "ndim", "!=", "3", ":", "if", "not", "width", "or", "not", "height", ":", "raise", "ValueError", "(", "\"When giving a non 2D numpy array, width and \"", "\"height must be supplied\"", ")", "if", "rgb24", ".", "nbytes", "/", "3", "!=", "width", "*", "height", ":", "raise", "ValueError", "(", "\"numpy array size mismatch\"", ")", "else", ":", "if", "rgb24", ".", "itemsize", "!=", "1", ":", "raise", "TypeError", "(", "\"Expected numpy array with itemsize == 1\"", ")", "if", "not", "rgb24", ".", "flags", ".", "c_contiguous", ":", "raise", "TypeError", "(", "\"Currently, only contiguous, aligned numpy arrays \"", "\"are supported\"", ")", "if", "not", "rgb24", ".", "flags", ".", "aligned", ":", "raise", "TypeError", "(", "\"Currently, only contiguous, aligned numpy arrays \"", "\"are supported\"", ")", "if", "not", "is_str", "and", "(", "not", "width", "or", "not", "height", ")", ":", "height", "=", "len", "(", "rgb24", ")", "if", "height", "<", "1", ":", "raise", "IndexError", "(", "\"Expected sequence with at least one row\"", ")", "row0", "=", "rgb24", "[", "0", "]", "if", "not", "is_seq", "(", "row0", ")", ":", "raise", "IndexError", "(", "\"Expected sequence (str, numpy.ndarray, list, tuple or \"", "\"bytearray) inside a sequence\"", ")", "width", "=", "len", "(", "row0", ")", "if", "is_pure_str", "(", "row0", ")", "or", "type", "(", "row0", ")", "==", "bytearray", ":", "width", "/=", "3", "if", "format", "==", "_ImageFormat", ".", "RawImage", ":", "self", ".", "_encode_rgb24", "(", "rgb24", ",", "width", ",", "height", ")", "elif", "format", "==", "_ImageFormat", ".", "JpegImage", ":", "self", ".", "_encode_jpeg_rgb24", "(", "rgb24", ",", "width", ",", "height", ",", "quality", ")" ]
Internal usage only
[ "Internal", "usage", "only" ]
python
train
43.911111
pytroll/satpy
satpy/readers/utils.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/utils.py#L105-L124
def _lonlat_from_geos_angle(x, y, geos_area): """Get lons and lats from x, y in projection coordinates.""" h = (geos_area.proj_dict['h'] + geos_area.proj_dict['a']) / 1000 b__ = (geos_area.proj_dict['a'] / geos_area.proj_dict['b']) ** 2 sd = np.sqrt((h * np.cos(x) * np.cos(y)) ** 2 - (np.cos(y)**2 + b__ * np.sin(y)**2) * (h**2 - (geos_area.proj_dict['a'] / 1000)**2)) # sd = 0 sn = (h * np.cos(x) * np.cos(y) - sd) / (np.cos(y)**2 + b__ * np.sin(y)**2) s1 = h - sn * np.cos(x) * np.cos(y) s2 = sn * np.sin(x) * np.cos(y) s3 = -sn * np.sin(y) sxy = np.sqrt(s1**2 + s2**2) lons = np.rad2deg(np.arctan2(s2, s1)) + geos_area.proj_dict.get('lon_0', 0) lats = np.rad2deg(-np.arctan2(b__ * s3, sxy)) return lons, lats
[ "def", "_lonlat_from_geos_angle", "(", "x", ",", "y", ",", "geos_area", ")", ":", "h", "=", "(", "geos_area", ".", "proj_dict", "[", "'h'", "]", "+", "geos_area", ".", "proj_dict", "[", "'a'", "]", ")", "/", "1000", "b__", "=", "(", "geos_area", ".", "proj_dict", "[", "'a'", "]", "/", "geos_area", ".", "proj_dict", "[", "'b'", "]", ")", "**", "2", "sd", "=", "np", ".", "sqrt", "(", "(", "h", "*", "np", ".", "cos", "(", "x", ")", "*", "np", ".", "cos", "(", "y", ")", ")", "**", "2", "-", "(", "np", ".", "cos", "(", "y", ")", "**", "2", "+", "b__", "*", "np", ".", "sin", "(", "y", ")", "**", "2", ")", "*", "(", "h", "**", "2", "-", "(", "geos_area", ".", "proj_dict", "[", "'a'", "]", "/", "1000", ")", "**", "2", ")", ")", "# sd = 0", "sn", "=", "(", "h", "*", "np", ".", "cos", "(", "x", ")", "*", "np", ".", "cos", "(", "y", ")", "-", "sd", ")", "/", "(", "np", ".", "cos", "(", "y", ")", "**", "2", "+", "b__", "*", "np", ".", "sin", "(", "y", ")", "**", "2", ")", "s1", "=", "h", "-", "sn", "*", "np", ".", "cos", "(", "x", ")", "*", "np", ".", "cos", "(", "y", ")", "s2", "=", "sn", "*", "np", ".", "sin", "(", "x", ")", "*", "np", ".", "cos", "(", "y", ")", "s3", "=", "-", "sn", "*", "np", ".", "sin", "(", "y", ")", "sxy", "=", "np", ".", "sqrt", "(", "s1", "**", "2", "+", "s2", "**", "2", ")", "lons", "=", "np", ".", "rad2deg", "(", "np", ".", "arctan2", "(", "s2", ",", "s1", ")", ")", "+", "geos_area", ".", "proj_dict", ".", "get", "(", "'lon_0'", ",", "0", ")", "lats", "=", "np", ".", "rad2deg", "(", "-", "np", ".", "arctan2", "(", "b__", "*", "s3", ",", "sxy", ")", ")", "return", "lons", ",", "lats" ]
Get lons and lats from x, y in projection coordinates.
[ "Get", "lons", "and", "lats", "from", "x", "y", "in", "projection", "coordinates", "." ]
python
train
39.15
instaloader/instaloader
instaloader/instaloadercontext.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloadercontext.py#L268-L276
def _dump_query_timestamps(self, current_time: float): """Output the number of GraphQL queries grouped by their query_hash within the last time.""" windows = [10, 11, 15, 20, 30, 60] print("GraphQL requests:", file=sys.stderr) for query_hash, times in self._graphql_query_timestamps.items(): print(" {}".format(query_hash), file=sys.stderr) for window in windows: reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times) print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr)
[ "def", "_dump_query_timestamps", "(", "self", ",", "current_time", ":", "float", ")", ":", "windows", "=", "[", "10", ",", "11", ",", "15", ",", "20", ",", "30", ",", "60", "]", "print", "(", "\"GraphQL requests:\"", ",", "file", "=", "sys", ".", "stderr", ")", "for", "query_hash", ",", "times", "in", "self", ".", "_graphql_query_timestamps", ".", "items", "(", ")", ":", "print", "(", "\" {}\"", ".", "format", "(", "query_hash", ")", ",", "file", "=", "sys", ".", "stderr", ")", "for", "window", "in", "windows", ":", "reqs_in_sliding_window", "=", "sum", "(", "t", ">", "current_time", "-", "window", "*", "60", "for", "t", "in", "times", ")", "print", "(", "\" last {} minutes: {} requests\"", ".", "format", "(", "window", ",", "reqs_in_sliding_window", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
Output the number of GraphQL queries grouped by their query_hash within the last time.
[ "Output", "the", "number", "of", "GraphQL", "queries", "grouped", "by", "their", "query_hash", "within", "the", "last", "time", "." ]
python
train
68.666667
CityOfZion/neo-python-core
neocore/Cryptography/ECCurve.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/ECCurve.py#L864-L870
def secp256k1(): """ create the secp256k1 curve """ GFp = FiniteField(2 ** 256 - 2 ** 32 - 977) # This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F ec = EllipticCurve(GFp, 0, 7) return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599)
[ "def", "secp256k1", "(", ")", ":", "GFp", "=", "FiniteField", "(", "2", "**", "256", "-", "2", "**", "32", "-", "977", ")", "# This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", "ec", "=", "EllipticCurve", "(", "GFp", ",", "0", ",", "7", ")", "return", "ECDSA", "(", "ec", ",", "ec", ".", "point", "(", "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", ",", "0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", ")", ",", "2", "**", "256", "-", "432420386565659656852420866394968145599", ")" ]
create the secp256k1 curve
[ "create", "the", "secp256k1", "curve" ]
python
train
68.285714
gnosis/gnosis-py
gnosis/safe/safe_service.py
https://github.com/gnosis/gnosis-py/blob/2a9a5d75a375fc9813ac04df133e6910c82f9d49/gnosis/safe/safe_service.py#L575-L588
def estimate_tx_operational_gas(self, safe_address: str, data_bytes_length: int): """ Estimates the gas for the verification of the signatures and other safe related tasks before and after executing a transaction. Calculation will be the sum of: - Base cost of 15000 gas - 100 of gas per word of `data_bytes` - Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas) :param safe_address: Address of the safe :param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1` :return: gas costs per signature * threshold of Safe """ threshold = self.retrieve_threshold(safe_address) return 15000 + data_bytes_length // 32 * 100 + 5000 * threshold
[ "def", "estimate_tx_operational_gas", "(", "self", ",", "safe_address", ":", "str", ",", "data_bytes_length", ":", "int", ")", ":", "threshold", "=", "self", ".", "retrieve_threshold", "(", "safe_address", ")", "return", "15000", "+", "data_bytes_length", "//", "32", "*", "100", "+", "5000", "*", "threshold" ]
Estimates the gas for the verification of the signatures and other safe related tasks before and after executing a transaction. Calculation will be the sum of: - Base cost of 15000 gas - 100 of gas per word of `data_bytes` - Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas) :param safe_address: Address of the safe :param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1` :return: gas costs per signature * threshold of Safe
[ "Estimates", "the", "gas", "for", "the", "verification", "of", "the", "signatures", "and", "other", "safe", "related", "tasks", "before", "and", "after", "executing", "a", "transaction", ".", "Calculation", "will", "be", "the", "sum", "of", ":", "-", "Base", "cost", "of", "15000", "gas", "-", "100", "of", "gas", "per", "word", "of", "data_bytes", "-", "Validate", "the", "signatures", "5000", "*", "threshold", "(", "ecrecover", "for", "ecdsa", "~", "=", "4K", "gas", ")", ":", "param", "safe_address", ":", "Address", "of", "the", "safe", ":", "param", "data_bytes_length", ":", "Length", "of", "the", "data", "(", "in", "bytes", "so", "len", "(", "HexBytes", "(", "0x12", "))", "would", "be", "1", ":", "return", ":", "gas", "costs", "per", "signature", "*", "threshold", "of", "Safe" ]
python
test
56.285714
theolind/pymysensors
mysensors/gateway_mqtt.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/gateway_mqtt.py#L34-L49
def _handle_subscription(self, topics): """Handle subscription of topics.""" if not isinstance(topics, list): topics = [topics] for topic in topics: topic_levels = topic.split('/') try: qos = int(topic_levels[-2]) except ValueError: qos = 0 try: _LOGGER.debug('Subscribing to: %s, qos: %s', topic, qos) self._sub_callback(topic, self.recv, qos) except Exception as exception: # pylint: disable=broad-except _LOGGER.exception( 'Subscribe to %s failed: %s', topic, exception)
[ "def", "_handle_subscription", "(", "self", ",", "topics", ")", ":", "if", "not", "isinstance", "(", "topics", ",", "list", ")", ":", "topics", "=", "[", "topics", "]", "for", "topic", "in", "topics", ":", "topic_levels", "=", "topic", ".", "split", "(", "'/'", ")", "try", ":", "qos", "=", "int", "(", "topic_levels", "[", "-", "2", "]", ")", "except", "ValueError", ":", "qos", "=", "0", "try", ":", "_LOGGER", ".", "debug", "(", "'Subscribing to: %s, qos: %s'", ",", "topic", ",", "qos", ")", "self", ".", "_sub_callback", "(", "topic", ",", "self", ".", "recv", ",", "qos", ")", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "_LOGGER", ".", "exception", "(", "'Subscribe to %s failed: %s'", ",", "topic", ",", "exception", ")" ]
Handle subscription of topics.
[ "Handle", "subscription", "of", "topics", "." ]
python
train
40.9375
SmokinCaterpillar/pypet
examples/example_24_large_scale_brian2_simulation/clusternet.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_24_large_scale_brian2_simulation/clusternet.py#L287-L325
def pre_build(self, traj, brian_list, network_dict): """Pre-builds the connections. Pre-build is only performed if none of the relevant parameters is explored and the relevant neuron groups exist. :param traj: Trajectory container :param brian_list: List of objects passed to BRIAN network constructor. Adds: Connections, amount depends on clustering :param network_dict: Dictionary of elements shared among the components Expects: 'neurons_i': Inhibitory neuron group 'neurons_e': Excitatory neuron group Adds: Connections, amount depends on clustering """ self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections) self._pre_build = (self._pre_build and 'neurons_i' in network_dict and 'neurons_e' in network_dict) if self._pre_build: self._build_connections(traj, brian_list, network_dict)
[ "def", "pre_build", "(", "self", ",", "traj", ",", "brian_list", ",", "network_dict", ")", ":", "self", ".", "_pre_build", "=", "not", "_explored_parameters_in_group", "(", "traj", ",", "traj", ".", "parameters", ".", "connections", ")", "self", ".", "_pre_build", "=", "(", "self", ".", "_pre_build", "and", "'neurons_i'", "in", "network_dict", "and", "'neurons_e'", "in", "network_dict", ")", "if", "self", ".", "_pre_build", ":", "self", ".", "_build_connections", "(", "traj", ",", "brian_list", ",", "network_dict", ")" ]
Pre-builds the connections. Pre-build is only performed if none of the relevant parameters is explored and the relevant neuron groups exist. :param traj: Trajectory container :param brian_list: List of objects passed to BRIAN network constructor. Adds: Connections, amount depends on clustering :param network_dict: Dictionary of elements shared among the components Expects: 'neurons_i': Inhibitory neuron group 'neurons_e': Excitatory neuron group Adds: Connections, amount depends on clustering
[ "Pre", "-", "builds", "the", "connections", "." ]
python
test
26.487179
mitsei/dlkit
dlkit/json_/resource/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/objects.py#L185-L190
def _init_map(self, record_types=None, **kwargs): """Initialize form map""" osid_objects.OsidObjectForm._init_map(self, record_types=record_types) self._my_map['assignedBinIds'] = [str(kwargs['bin_id'])] self._my_map['group'] = self._group_default self._my_map['avatarId'] = self._avatar_default
[ "def", "_init_map", "(", "self", ",", "record_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidObjectForm", ".", "_init_map", "(", "self", ",", "record_types", "=", "record_types", ")", "self", ".", "_my_map", "[", "'assignedBinIds'", "]", "=", "[", "str", "(", "kwargs", "[", "'bin_id'", "]", ")", "]", "self", ".", "_my_map", "[", "'group'", "]", "=", "self", ".", "_group_default", "self", ".", "_my_map", "[", "'avatarId'", "]", "=", "self", ".", "_avatar_default" ]
Initialize form map
[ "Initialize", "form", "map" ]
python
train
55
widdowquinn/pyani
pyani/pyani_tools.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L33-L37
def add_tot_length(self, qname, sname, value, sym=True): """Add a total length value to self.alignment_lengths.""" self.alignment_lengths.loc[qname, sname] = value if sym: self.alignment_lengths.loc[sname, qname] = value
[ "def", "add_tot_length", "(", "self", ",", "qname", ",", "sname", ",", "value", ",", "sym", "=", "True", ")", ":", "self", ".", "alignment_lengths", ".", "loc", "[", "qname", ",", "sname", "]", "=", "value", "if", "sym", ":", "self", ".", "alignment_lengths", ".", "loc", "[", "sname", ",", "qname", "]", "=", "value" ]
Add a total length value to self.alignment_lengths.
[ "Add", "a", "total", "length", "value", "to", "self", ".", "alignment_lengths", "." ]
python
train
50.4
GNS3/gns3-server
gns3server/compute/docker/__init__.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/docker/__init__.py#L117-L167
def http_query(self, method, path, data={}, params={}, timeout=300): """ Make a query to the docker daemon :param method: HTTP method :param path: Endpoint in API :param data: Dictionnary with the body. Will be transformed to a JSON :param params: Parameters added as a query arg :param timeout: Timeout :returns: HTTP response """ data = json.dumps(data) if timeout is None: timeout = 60 * 60 * 24 * 31 # One month timeout if path == 'version': url = "http://docker/v1.12/" + path # API of docker v1.0 else: url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path try: if path != "version": # version is use by check connection yield from self._check_connection() if self._session is None or self._session.closed: connector = self.connector() self._session = aiohttp.ClientSession(connector=connector) response = yield from self._session.request( method, url, params=params, data=data, headers={"content-type": "application/json", }, timeout=timeout ) except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e: raise DockerError("Docker has returned an error: {}".format(str(e))) except (asyncio.TimeoutError): raise DockerError("Docker timeout " + method + " " + path) if response.status >= 300: body = yield from response.read() try: body = json.loads(body.decode("utf-8"))["message"] except ValueError: pass log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body) if response.status == 304: raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body)) elif response.status == 404: raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body)) else: raise DockerError("Docker has returned an error: {} {}".format(response.status, body)) return response
[ "def", "http_query", "(", "self", ",", "method", ",", "path", ",", "data", "=", "{", "}", ",", "params", "=", "{", "}", ",", "timeout", "=", "300", ")", ":", "data", "=", "json", ".", "dumps", "(", "data", ")", "if", "timeout", "is", "None", ":", "timeout", "=", "60", "*", "60", "*", "24", "*", "31", "# One month timeout", "if", "path", "==", "'version'", ":", "url", "=", "\"http://docker/v1.12/\"", "+", "path", "# API of docker v1.0", "else", ":", "url", "=", "\"http://docker/v\"", "+", "DOCKER_MINIMUM_API_VERSION", "+", "\"/\"", "+", "path", "try", ":", "if", "path", "!=", "\"version\"", ":", "# version is use by check connection", "yield", "from", "self", ".", "_check_connection", "(", ")", "if", "self", ".", "_session", "is", "None", "or", "self", ".", "_session", ".", "closed", ":", "connector", "=", "self", ".", "connector", "(", ")", "self", ".", "_session", "=", "aiohttp", ".", "ClientSession", "(", "connector", "=", "connector", ")", "response", "=", "yield", "from", "self", ".", "_session", ".", "request", "(", "method", ",", "url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", ",", "}", ",", "timeout", "=", "timeout", ")", "except", "(", "aiohttp", ".", "ClientResponseError", ",", "aiohttp", ".", "ClientOSError", ")", "as", "e", ":", "raise", "DockerError", "(", "\"Docker has returned an error: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "except", "(", "asyncio", ".", "TimeoutError", ")", ":", "raise", "DockerError", "(", "\"Docker timeout \"", "+", "method", "+", "\" \"", "+", "path", ")", "if", "response", ".", "status", ">=", "300", ":", "body", "=", "yield", "from", "response", ".", "read", "(", ")", "try", ":", "body", "=", "json", ".", "loads", "(", "body", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "\"message\"", "]", "except", "ValueError", ":", "pass", "log", ".", "debug", "(", "\"Query Docker %s %s params=%s data=%s Response: %s\"", ",", "method", ",", "path", ",", "params", ",", "data", ",", "body", ")", "if", "response", ".", "status", "==", "304", ":", "raise", "DockerHttp304Error", "(", "\"Docker has returned an error: {} {}\"", ".", "format", "(", "response", ".", "status", ",", "body", ")", ")", "elif", "response", ".", "status", "==", "404", ":", "raise", "DockerHttp404Error", "(", "\"Docker has returned an error: {} {}\"", ".", "format", "(", "response", ".", "status", ",", "body", ")", ")", "else", ":", "raise", "DockerError", "(", "\"Docker has returned an error: {} {}\"", ".", "format", "(", "response", ".", "status", ",", "body", ")", ")", "return", "response" ]
Make a query to the docker daemon :param method: HTTP method :param path: Endpoint in API :param data: Dictionnary with the body. Will be transformed to a JSON :param params: Parameters added as a query arg :param timeout: Timeout :returns: HTTP response
[ "Make", "a", "query", "to", "the", "docker", "daemon" ]
python
train
45.098039
KelSolaar/Umbra
umbra/components/factory/script_editor/script_editor.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L3099-L3114
def get_editorTab(self, editor): """ Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor. :param editor: Editor to search tab for. :type editor: Editor :return: Tab index. :rtype: Editor """ for i in range(self.Script_Editor_tabWidget.count()): if not self.get_widget(i) == editor: continue LOGGER.debug("> Editor '{0}': Tab index '{1}'.".format(editor, i)) return i
[ "def", "get_editorTab", "(", "self", ",", "editor", ")", ":", "for", "i", "in", "range", "(", "self", ".", "Script_Editor_tabWidget", ".", "count", "(", ")", ")", ":", "if", "not", "self", ".", "get_widget", "(", "i", ")", "==", "editor", ":", "continue", "LOGGER", ".", "debug", "(", "\"> Editor '{0}': Tab index '{1}'.\"", ".", "format", "(", "editor", ",", "i", ")", ")", "return", "i" ]
Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor. :param editor: Editor to search tab for. :type editor: Editor :return: Tab index. :rtype: Editor
[ "Returns", "the", "**", "Script_Editor_tabWidget", "**", "Widget", "tab", "associated", "with", "the", "given", "editor", "." ]
python
train
31.375
joelfrederico/SciSalt
scisalt/numpy/functions.py
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/numpy/functions.py#L7-L22
def gaussian(x, mu, sigma): """ Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`. .. versionadded:: 1.5 Parameters ---------- x : float Function variable :math:`x`. mu : float Mean of the Gaussian function. sigma : float Standard deviation of the Gaussian function. """ return _np.exp(-(x-mu)**2/(2*sigma**2)) / (_np.sqrt(2*_np.pi) * sigma)
[ "def", "gaussian", "(", "x", ",", "mu", ",", "sigma", ")", ":", "return", "_np", ".", "exp", "(", "-", "(", "x", "-", "mu", ")", "**", "2", "/", "(", "2", "*", "sigma", "**", "2", ")", ")", "/", "(", "_np", ".", "sqrt", "(", "2", "*", "_np", ".", "pi", ")", "*", "sigma", ")" ]
Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`. .. versionadded:: 1.5 Parameters ---------- x : float Function variable :math:`x`. mu : float Mean of the Gaussian function. sigma : float Standard deviation of the Gaussian function.
[ "Gaussian", "function", "of", "the", "form", ":", "math", ":", "\\\\", "frac", "{", "1", "}", "{", "\\\\", "sqrt", "{", "2", "\\\\", "pi", "}", "\\\\", "sigma", "}", "e^", "{", "-", "\\\\", "frac", "{", "(", "x", "-", "\\\\", "mu", ")", "^2", "}", "{", "2", "\\\\", "sigma^2", "}}", "." ]
python
valid
28.0625
tensorflow/probability
tensorflow_probability/python/distributions/lkj.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L59-L63
def _replicate(n, tensor): """Replicate the input tensor n times along a new (major) dimension.""" # TODO(axch) Does this already exist somewhere? Should it get contributed? multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0) return tf.tile(tf.expand_dims(tensor, axis=0), multiples)
[ "def", "_replicate", "(", "n", ",", "tensor", ")", ":", "# TODO(axch) Does this already exist somewhere? Should it get contributed?", "multiples", "=", "tf", ".", "concat", "(", "[", "[", "n", "]", ",", "tf", ".", "ones_like", "(", "tensor", ".", "shape", ")", "]", ",", "axis", "=", "0", ")", "return", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "tensor", ",", "axis", "=", "0", ")", ",", "multiples", ")" ]
Replicate the input tensor n times along a new (major) dimension.
[ "Replicate", "the", "input", "tensor", "n", "times", "along", "a", "new", "(", "major", ")", "dimension", "." ]
python
test
60.2
facelessuser/pyspelling
pyspelling/filters/ooxml.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L93-L114
def determine_file_type(self, z): """Determine file type.""" content = z.read('[Content_Types].xml') with io.BytesIO(content) as b: encoding = self._analyze_file(b) if encoding is None: encoding = 'utf-8' b.seek(0) text = b.read().decode(encoding) soup = bs4.BeautifulSoup(text, 'xml') for o in soup.find_all('Override'): name = o.attrs.get('PartName') for k, v in MIMEMAP.items(): if name.startswith('/{}/'.format(k)): self.type = v break if self.type: break self.filepattern = DOC_PARAMS[self.type]['filepattern'] self.namespaces = DOC_PARAMS[self.type]['namespaces'] self.captures = sv.compile(DOC_PARAMS[self.type]['captures'], DOC_PARAMS[self.type]['namespaces'])
[ "def", "determine_file_type", "(", "self", ",", "z", ")", ":", "content", "=", "z", ".", "read", "(", "'[Content_Types].xml'", ")", "with", "io", ".", "BytesIO", "(", "content", ")", "as", "b", ":", "encoding", "=", "self", ".", "_analyze_file", "(", "b", ")", "if", "encoding", "is", "None", ":", "encoding", "=", "'utf-8'", "b", ".", "seek", "(", "0", ")", "text", "=", "b", ".", "read", "(", ")", ".", "decode", "(", "encoding", ")", "soup", "=", "bs4", ".", "BeautifulSoup", "(", "text", ",", "'xml'", ")", "for", "o", "in", "soup", ".", "find_all", "(", "'Override'", ")", ":", "name", "=", "o", ".", "attrs", ".", "get", "(", "'PartName'", ")", "for", "k", ",", "v", "in", "MIMEMAP", ".", "items", "(", ")", ":", "if", "name", ".", "startswith", "(", "'/{}/'", ".", "format", "(", "k", ")", ")", ":", "self", ".", "type", "=", "v", "break", "if", "self", ".", "type", ":", "break", "self", ".", "filepattern", "=", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'filepattern'", "]", "self", ".", "namespaces", "=", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'namespaces'", "]", "self", ".", "captures", "=", "sv", ".", "compile", "(", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'captures'", "]", ",", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'namespaces'", "]", ")" ]
Determine file type.
[ "Determine", "file", "type", "." ]
python
train
41.818182
philipsoutham/py-mysql2pgsql
mysql2pgsql/lib/postgres_db_writer.py
https://github.com/philipsoutham/py-mysql2pgsql/blob/66dc2a3a3119263b3fe77300fb636346509787ef/mysql2pgsql/lib/postgres_db_writer.py#L144-L154
def write_table(self, table): """Send DDL to create the specified `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. Returns None """ table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table) for sql in serial_key_sql + table_sql: self.execute(sql)
[ "def", "write_table", "(", "self", ",", "table", ")", ":", "table_sql", ",", "serial_key_sql", "=", "super", "(", "PostgresDbWriter", ",", "self", ")", ".", "write_table", "(", "table", ")", "for", "sql", "in", "serial_key_sql", "+", "table_sql", ":", "self", ".", "execute", "(", "sql", ")" ]
Send DDL to create the specified `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. Returns None
[ "Send", "DDL", "to", "create", "the", "specified", "table" ]
python
test
39.454545
projectatomic/osbs-client
osbs/build/plugins_configuration.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/plugins_configuration.py#L485-L492
def render_pull_base_image(self): """Configure pull_base_image""" phase = 'prebuild_plugins' plugin = 'pull_base_image' if self.user_params.parent_images_digests.value: self.pt.set_plugin_arg(phase, plugin, 'parent_images_digests', self.user_params.parent_images_digests.value)
[ "def", "render_pull_base_image", "(", "self", ")", ":", "phase", "=", "'prebuild_plugins'", "plugin", "=", "'pull_base_image'", "if", "self", ".", "user_params", ".", "parent_images_digests", ".", "value", ":", "self", ".", "pt", ".", "set_plugin_arg", "(", "phase", ",", "plugin", ",", "'parent_images_digests'", ",", "self", ".", "user_params", ".", "parent_images_digests", ".", "value", ")" ]
Configure pull_base_image
[ "Configure", "pull_base_image" ]
python
train
43.75
HIPS/autograd
examples/data.py
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/examples/data.py#L53-L67
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate, rs=npr.RandomState(0)): """Based on code by Ryan P. Adams.""" rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) features = rs.randn(num_classes*num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:, 0] += 1 labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:,0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return np.einsum('ti,tij->tj', features, rotations)
[ "def", "make_pinwheel", "(", "radial_std", ",", "tangential_std", ",", "num_classes", ",", "num_per_class", ",", "rate", ",", "rs", "=", "npr", ".", "RandomState", "(", "0", ")", ")", ":", "rads", "=", "np", ".", "linspace", "(", "0", ",", "2", "*", "np", ".", "pi", ",", "num_classes", ",", "endpoint", "=", "False", ")", "features", "=", "rs", ".", "randn", "(", "num_classes", "*", "num_per_class", ",", "2", ")", "*", "np", ".", "array", "(", "[", "radial_std", ",", "tangential_std", "]", ")", "features", "[", ":", ",", "0", "]", "+=", "1", "labels", "=", "np", ".", "repeat", "(", "np", ".", "arange", "(", "num_classes", ")", ",", "num_per_class", ")", "angles", "=", "rads", "[", "labels", "]", "+", "rate", "*", "np", ".", "exp", "(", "features", "[", ":", ",", "0", "]", ")", "rotations", "=", "np", ".", "stack", "(", "[", "np", ".", "cos", "(", "angles", ")", ",", "-", "np", ".", "sin", "(", "angles", ")", ",", "np", ".", "sin", "(", "angles", ")", ",", "np", ".", "cos", "(", "angles", ")", "]", ")", "rotations", "=", "np", ".", "reshape", "(", "rotations", ".", "T", ",", "(", "-", "1", ",", "2", ",", "2", ")", ")", "return", "np", ".", "einsum", "(", "'ti,tij->tj'", ",", "features", ",", "rotations", ")" ]
Based on code by Ryan P. Adams.
[ "Based", "on", "code", "by", "Ryan", "P", ".", "Adams", "." ]
python
train
44.266667
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/barracuda.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/barracuda.py#L63-L73
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon): # https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp """ float sqrt_var = sqrt(var_data[i]); a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var; b_data[i] = slope_data[i] / sqrt_var; ... ptr[i] = b * ptr[i] + a; """ scale = gamma / np.sqrt(var + epsilon) bias = beta - gamma * mean / np.sqrt(var + epsilon) return [scale, bias]
[ "def", "fuse_batchnorm_weights", "(", "gamma", ",", "beta", ",", "mean", ",", "var", ",", "epsilon", ")", ":", "# https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp", "scale", "=", "gamma", "/", "np", ".", "sqrt", "(", "var", "+", "epsilon", ")", "bias", "=", "beta", "-", "gamma", "*", "mean", "/", "np", ".", "sqrt", "(", "var", "+", "epsilon", ")", "return", "[", "scale", ",", "bias", "]" ]
float sqrt_var = sqrt(var_data[i]); a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var; b_data[i] = slope_data[i] / sqrt_var; ... ptr[i] = b * ptr[i] + a;
[ "float", "sqrt_var", "=", "sqrt", "(", "var_data", "[", "i", "]", ")", ";", "a_data", "[", "i", "]", "=", "bias_data", "[", "i", "]", "-", "slope_data", "[", "i", "]", "*", "mean_data", "[", "i", "]", "/", "sqrt_var", ";", "b_data", "[", "i", "]", "=", "slope_data", "[", "i", "]", "/", "sqrt_var", ";", "...", "ptr", "[", "i", "]", "=", "b", "*", "ptr", "[", "i", "]", "+", "a", ";" ]
python
train
42.454545
mcrute/pydora
pydora/utils.py
https://github.com/mcrute/pydora/blob/d9e353e7f19da741dcf372246b4d5640cb788488/pydora/utils.py#L178-L192
def iterate_forever(func, *args, **kwargs): """Iterate over a finite iterator forever When the iterator is exhausted will call the function again to generate a new iterator and keep iterating. """ output = func(*args, **kwargs) while True: try: playlist_item = next(output) playlist_item.prepare_playback() yield playlist_item except StopIteration: output = func(*args, **kwargs)
[ "def", "iterate_forever", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "output", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "while", "True", ":", "try", ":", "playlist_item", "=", "next", "(", "output", ")", "playlist_item", ".", "prepare_playback", "(", ")", "yield", "playlist_item", "except", "StopIteration", ":", "output", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Iterate over a finite iterator forever When the iterator is exhausted will call the function again to generate a new iterator and keep iterating.
[ "Iterate", "over", "a", "finite", "iterator", "forever" ]
python
valid
30.333333
miguelgrinberg/Flask-SocketIO
flask_socketio/namespace.py
https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/namespace.py#L28-L34
def emit(self, event, data=None, room=None, include_self=True, namespace=None, callback=None): """Emit a custom event to one or more connected clients.""" return self.socketio.emit(event, data, room=room, include_self=include_self, namespace=namespace or self.namespace, callback=callback)
[ "def", "emit", "(", "self", ",", "event", ",", "data", "=", "None", ",", "room", "=", "None", ",", "include_self", "=", "True", ",", "namespace", "=", "None", ",", "callback", "=", "None", ")", ":", "return", "self", ".", "socketio", ".", "emit", "(", "event", ",", "data", ",", "room", "=", "room", ",", "include_self", "=", "include_self", ",", "namespace", "=", "namespace", "or", "self", ".", "namespace", ",", "callback", "=", "callback", ")" ]
Emit a custom event to one or more connected clients.
[ "Emit", "a", "custom", "event", "to", "one", "or", "more", "connected", "clients", "." ]
python
train
59.142857
thumbor-community/redis
tc_redis/storages/redis_storage.py
https://github.com/thumbor-community/redis/blob/e434c151b2d32b2209ce9935493258ee29fb1d1d/tc_redis/storages/redis_storage.py#L28-L38
def get_storage(self): '''Get the storage instance. :return Redis: Redis instance ''' if self.storage: return self.storage self.storage = self.reconnect_redis() return self.storage
[ "def", "get_storage", "(", "self", ")", ":", "if", "self", ".", "storage", ":", "return", "self", ".", "storage", "self", ".", "storage", "=", "self", ".", "reconnect_redis", "(", ")", "return", "self", ".", "storage" ]
Get the storage instance. :return Redis: Redis instance
[ "Get", "the", "storage", "instance", "." ]
python
train
21.181818
librosa/librosa
librosa/display.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L352-L354
def __envelope(x, hop): '''Compute the max-envelope of x at a stride/frame length of h''' return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0)
[ "def", "__envelope", "(", "x", ",", "hop", ")", ":", "return", "util", ".", "frame", "(", "x", ",", "hop_length", "=", "hop", ",", "frame_length", "=", "hop", ")", ".", "max", "(", "axis", "=", "0", ")" ]
Compute the max-envelope of x at a stride/frame length of h
[ "Compute", "the", "max", "-", "envelope", "of", "x", "at", "a", "stride", "/", "frame", "length", "of", "h" ]
python
test
54
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L4211-L4232
def CSS_setMediaText(self, styleSheetId, range, text): """ Function path: CSS.setMediaText Domain: CSS Method name: setMediaText Parameters: Required arguments: 'styleSheetId' (type: StyleSheetId) -> No description 'range' (type: SourceRange) -> No description 'text' (type: string) -> No description Returns: 'media' (type: CSSMedia) -> The resulting CSS media rule after modification. Description: Modifies the rule selector. """ assert isinstance(text, (str,) ), "Argument 'text' must be of type '['str']'. Received type: '%s'" % type( text) subdom_funcs = self.synchronous_command('CSS.setMediaText', styleSheetId= styleSheetId, range=range, text=text) return subdom_funcs
[ "def", "CSS_setMediaText", "(", "self", ",", "styleSheetId", ",", "range", ",", "text", ")", ":", "assert", "isinstance", "(", "text", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'text' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "text", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'CSS.setMediaText'", ",", "styleSheetId", "=", "styleSheetId", ",", "range", "=", "range", ",", "text", "=", "text", ")", "return", "subdom_funcs" ]
Function path: CSS.setMediaText Domain: CSS Method name: setMediaText Parameters: Required arguments: 'styleSheetId' (type: StyleSheetId) -> No description 'range' (type: SourceRange) -> No description 'text' (type: string) -> No description Returns: 'media' (type: CSSMedia) -> The resulting CSS media rule after modification. Description: Modifies the rule selector.
[ "Function", "path", ":", "CSS", ".", "setMediaText", "Domain", ":", "CSS", "Method", "name", ":", "setMediaText", "Parameters", ":", "Required", "arguments", ":", "styleSheetId", "(", "type", ":", "StyleSheetId", ")", "-", ">", "No", "description", "range", "(", "type", ":", "SourceRange", ")", "-", ">", "No", "description", "text", "(", "type", ":", "string", ")", "-", ">", "No", "description", "Returns", ":", "media", "(", "type", ":", "CSSMedia", ")", "-", ">", "The", "resulting", "CSS", "media", "rule", "after", "modification", ".", "Description", ":", "Modifies", "the", "rule", "selector", "." ]
python
train
33.136364
apache/incubator-mxnet
example/cnn_text_classification/data_helpers.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L153-L165
def batch_iter(data, batch_size, num_epochs): """Generates a batch iterator for a dataset.""" data = np.array(data) data_size = len(data) num_batches_per_epoch = int(len(data)/batch_size) + 1 for epoch in range(num_epochs): # Shuffle the data at each epoch shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) yield shuffled_data[start_index:end_index]
[ "def", "batch_iter", "(", "data", ",", "batch_size", ",", "num_epochs", ")", ":", "data", "=", "np", ".", "array", "(", "data", ")", "data_size", "=", "len", "(", "data", ")", "num_batches_per_epoch", "=", "int", "(", "len", "(", "data", ")", "/", "batch_size", ")", "+", "1", "for", "epoch", "in", "range", "(", "num_epochs", ")", ":", "# Shuffle the data at each epoch", "shuffle_indices", "=", "np", ".", "random", ".", "permutation", "(", "np", ".", "arange", "(", "data_size", ")", ")", "shuffled_data", "=", "data", "[", "shuffle_indices", "]", "for", "batch_num", "in", "range", "(", "num_batches_per_epoch", ")", ":", "start_index", "=", "batch_num", "*", "batch_size", "end_index", "=", "min", "(", "(", "batch_num", "+", "1", ")", "*", "batch_size", ",", "data_size", ")", "yield", "shuffled_data", "[", "start_index", ":", "end_index", "]" ]
Generates a batch iterator for a dataset.
[ "Generates", "a", "batch", "iterator", "for", "a", "dataset", "." ]
python
train
47.384615
Qiskit/qiskit-terra
qiskit/pulse/ops.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/pulse/ops.py#L54-L64
def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule: """Return schedule shifted by `time`. Args: schedule: The schedule to shift time: The time to shift by name: Name of shifted schedule. Defaults to name of `schedule` """ if name is None: name = schedule.name return union((time, schedule), name=name)
[ "def", "shift", "(", "schedule", ":", "ScheduleComponent", ",", "time", ":", "int", ",", "name", ":", "str", "=", "None", ")", "->", "Schedule", ":", "if", "name", "is", "None", ":", "name", "=", "schedule", ".", "name", "return", "union", "(", "(", "time", ",", "schedule", ")", ",", "name", "=", "name", ")" ]
Return schedule shifted by `time`. Args: schedule: The schedule to shift time: The time to shift by name: Name of shifted schedule. Defaults to name of `schedule`
[ "Return", "schedule", "shifted", "by", "time", "." ]
python
test
33.909091
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L4164-L4198
def _get_dvportgroup_dict(pg_ref): ''' Returns a dictionary with a distributed virutal portgroup data pg_ref Portgroup reference ''' props = salt.utils.vmware.get_properties_of_managed_object( pg_ref, ['name', 'config.description', 'config.numPorts', 'config.type', 'config.defaultPortConfig']) pg_dict = {'name': props['name'], 'description': props.get('config.description'), 'num_ports': props['config.numPorts'], 'type': props['config.type']} if props['config.defaultPortConfig']: dpg = props['config.defaultPortConfig'] if dpg.vlan and \ isinstance(dpg.vlan, vim.VmwareDistributedVirtualSwitchVlanIdSpec): pg_dict.update({'vlan_id': dpg.vlan.vlanId}) pg_dict.update({'out_shaping': _get_dvportgroup_out_shaping( props['name'], props['config.defaultPortConfig'])}) pg_dict.update({'security_policy': _get_dvportgroup_security_policy( props['name'], props['config.defaultPortConfig'])}) pg_dict.update({'teaming': _get_dvportgroup_teaming( props['name'], props['config.defaultPortConfig'])}) return pg_dict
[ "def", "_get_dvportgroup_dict", "(", "pg_ref", ")", ":", "props", "=", "salt", ".", "utils", ".", "vmware", ".", "get_properties_of_managed_object", "(", "pg_ref", ",", "[", "'name'", ",", "'config.description'", ",", "'config.numPorts'", ",", "'config.type'", ",", "'config.defaultPortConfig'", "]", ")", "pg_dict", "=", "{", "'name'", ":", "props", "[", "'name'", "]", ",", "'description'", ":", "props", ".", "get", "(", "'config.description'", ")", ",", "'num_ports'", ":", "props", "[", "'config.numPorts'", "]", ",", "'type'", ":", "props", "[", "'config.type'", "]", "}", "if", "props", "[", "'config.defaultPortConfig'", "]", ":", "dpg", "=", "props", "[", "'config.defaultPortConfig'", "]", "if", "dpg", ".", "vlan", "and", "isinstance", "(", "dpg", ".", "vlan", ",", "vim", ".", "VmwareDistributedVirtualSwitchVlanIdSpec", ")", ":", "pg_dict", ".", "update", "(", "{", "'vlan_id'", ":", "dpg", ".", "vlan", ".", "vlanId", "}", ")", "pg_dict", ".", "update", "(", "{", "'out_shaping'", ":", "_get_dvportgroup_out_shaping", "(", "props", "[", "'name'", "]", ",", "props", "[", "'config.defaultPortConfig'", "]", ")", "}", ")", "pg_dict", ".", "update", "(", "{", "'security_policy'", ":", "_get_dvportgroup_security_policy", "(", "props", "[", "'name'", "]", ",", "props", "[", "'config.defaultPortConfig'", "]", ")", "}", ")", "pg_dict", ".", "update", "(", "{", "'teaming'", ":", "_get_dvportgroup_teaming", "(", "props", "[", "'name'", "]", ",", "props", "[", "'config.defaultPortConfig'", "]", ")", "}", ")", "return", "pg_dict" ]
Returns a dictionary with a distributed virutal portgroup data pg_ref Portgroup reference
[ "Returns", "a", "dictionary", "with", "a", "distributed", "virutal", "portgroup", "data" ]
python
train
40.342857
PyCQA/pylint
pylint/checkers/design_analysis.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/design_analysis.py#L299-L304
def open(self): """initialize visit variables""" self.stats = self.linter.add_stats() self._returns = [] self._branches = defaultdict(int) self._stmts = []
[ "def", "open", "(", "self", ")", ":", "self", ".", "stats", "=", "self", ".", "linter", ".", "add_stats", "(", ")", "self", ".", "_returns", "=", "[", "]", "self", ".", "_branches", "=", "defaultdict", "(", "int", ")", "self", ".", "_stmts", "=", "[", "]" ]
initialize visit variables
[ "initialize", "visit", "variables" ]
python
test
31.666667
BlackEarth/bl
bl/zip.py
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/zip.py#L20-L30
def write(self, fn=None): """copy the zip file from its filename to the given filename.""" fn = fn or self.fn if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) f = open(self.fn, 'rb') b = f.read() f.close() f = open(fn, 'wb') f.write(b) f.close()
[ "def", "write", "(", "self", ",", "fn", "=", "None", ")", ":", "fn", "=", "fn", "or", "self", ".", "fn", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "fn", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "fn", ")", ")", "f", "=", "open", "(", "self", ".", "fn", ",", "'rb'", ")", "b", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "f", "=", "open", "(", "fn", ",", "'wb'", ")", "f", ".", "write", "(", "b", ")", "f", ".", "close", "(", ")" ]
copy the zip file from its filename to the given filename.
[ "copy", "the", "zip", "file", "from", "its", "filename", "to", "the", "given", "filename", "." ]
python
train
31.545455
EconForge/dolo
dolo/numeric/extern/qz.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/extern/qz.py#L6-L18
def qzordered(A,B,crit=1.0): "Eigenvalues bigger than crit are sorted in the top-left." TOL = 1e-10 def select(alpha, beta): return alpha**2>crit*beta**2 [S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select) eigval = abs(numpy.diag(S)/numpy.diag(T)) return [S,T,U,V,eigval]
[ "def", "qzordered", "(", "A", ",", "B", ",", "crit", "=", "1.0", ")", ":", "TOL", "=", "1e-10", "def", "select", "(", "alpha", ",", "beta", ")", ":", "return", "alpha", "**", "2", ">", "crit", "*", "beta", "**", "2", "[", "S", ",", "T", ",", "alpha", ",", "beta", ",", "U", ",", "V", "]", "=", "ordqz", "(", "A", ",", "B", ",", "output", "=", "'real'", ",", "sort", "=", "select", ")", "eigval", "=", "abs", "(", "numpy", ".", "diag", "(", "S", ")", "/", "numpy", ".", "diag", "(", "T", ")", ")", "return", "[", "S", ",", "T", ",", "U", ",", "V", ",", "eigval", "]" ]
Eigenvalues bigger than crit are sorted in the top-left.
[ "Eigenvalues", "bigger", "than", "crit", "are", "sorted", "in", "the", "top", "-", "left", "." ]
python
train
23.384615
tensorflow/cleverhans
cleverhans/utils_tf.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L526-L531
def get_available_gpus(): """ Returns a list of string names of all available GPUs """ local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
[ "def", "get_available_gpus", "(", ")", ":", "local_device_protos", "=", "device_lib", ".", "list_local_devices", "(", ")", "return", "[", "x", ".", "name", "for", "x", "in", "local_device_protos", "if", "x", ".", "device_type", "==", "'GPU'", "]" ]
Returns a list of string names of all available GPUs
[ "Returns", "a", "list", "of", "string", "names", "of", "all", "available", "GPUs" ]
python
train
36
Chilipp/model-organization
model_organization/config.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/config.py#L45-L83
def get_configdir(name): """ Return the string representing the configuration directory. The directory is chosen as follows: 1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied, choose that. 2a. On Linux, choose `$HOME/.config`. 2b. On other platforms, choose `$HOME/.matplotlib`. 3. If the chosen directory exists, use that as the configuration directory. 4. A directory: return None. Notes ----- This function is taken from the matplotlib [1] module References ---------- [1]: http://matplotlib.org/api/""" configdir = os.environ.get('%sCONFIGDIR' % name.upper()) if configdir is not None: return os.path.abspath(configdir) p = None h = _get_home() if ((sys.platform.startswith('linux') or sys.platform.startswith('darwin')) and h is not None): p = os.path.join(h, '.config/' + name) elif h is not None: p = os.path.join(h, '.' + name) if not os.path.exists(p): os.makedirs(p) return p
[ "def", "get_configdir", "(", "name", ")", ":", "configdir", "=", "os", ".", "environ", ".", "get", "(", "'%sCONFIGDIR'", "%", "name", ".", "upper", "(", ")", ")", "if", "configdir", "is", "not", "None", ":", "return", "os", ".", "path", ".", "abspath", "(", "configdir", ")", "p", "=", "None", "h", "=", "_get_home", "(", ")", "if", "(", "(", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", "or", "sys", ".", "platform", ".", "startswith", "(", "'darwin'", ")", ")", "and", "h", "is", "not", "None", ")", ":", "p", "=", "os", ".", "path", ".", "join", "(", "h", ",", "'.config/'", "+", "name", ")", "elif", "h", "is", "not", "None", ":", "p", "=", "os", ".", "path", ".", "join", "(", "h", ",", "'.'", "+", "name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "os", ".", "makedirs", "(", "p", ")", "return", "p" ]
Return the string representing the configuration directory. The directory is chosen as follows: 1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied, choose that. 2a. On Linux, choose `$HOME/.config`. 2b. On other platforms, choose `$HOME/.matplotlib`. 3. If the chosen directory exists, use that as the configuration directory. 4. A directory: return None. Notes ----- This function is taken from the matplotlib [1] module References ---------- [1]: http://matplotlib.org/api/
[ "Return", "the", "string", "representing", "the", "configuration", "directory", "." ]
python
train
26.179487
materials-data-facility/toolbox
mdf_toolbox/search_helper.py
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L606-L645
def exclude_range(self, field, start="*", stop="*", inclusive=True, new_group=False): """Exclude a ``field:[some range]`` term from the query. Matches will not have any ``value`` in the range in the ``field``. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. start (str or int): The starting value, or ``None`` for no lower bound. **Default:** ``None``. stop (str or int): The ending value, or ``None`` for no upper bound. **Default:** ``None``. inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded from the search. If ``False``, the ``start`` and ``stop`` values will not be excluded from the search. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self """ # Accept None as * if start is None: start = "*" if stop is None: stop = "*" # *-* is the same as field doesn't exist if start == "*" and stop == "*": return self.match_not_exists(field, new_group=new_group) if inclusive: value = "[" + str(start) + " TO " + str(stop) + "]" else: value = "{" + str(start) + " TO " + str(stop) + "}" return self.exclude_field(field, value, new_group=new_group)
[ "def", "exclude_range", "(", "self", ",", "field", ",", "start", "=", "\"*\"", ",", "stop", "=", "\"*\"", ",", "inclusive", "=", "True", ",", "new_group", "=", "False", ")", ":", "# Accept None as *", "if", "start", "is", "None", ":", "start", "=", "\"*\"", "if", "stop", "is", "None", ":", "stop", "=", "\"*\"", "# *-* is the same as field doesn't exist", "if", "start", "==", "\"*\"", "and", "stop", "==", "\"*\"", ":", "return", "self", ".", "match_not_exists", "(", "field", ",", "new_group", "=", "new_group", ")", "if", "inclusive", ":", "value", "=", "\"[\"", "+", "str", "(", "start", ")", "+", "\" TO \"", "+", "str", "(", "stop", ")", "+", "\"]\"", "else", ":", "value", "=", "\"{\"", "+", "str", "(", "start", ")", "+", "\" TO \"", "+", "str", "(", "stop", ")", "+", "\"}\"", "return", "self", ".", "exclude_field", "(", "field", ",", "value", ",", "new_group", "=", "new_group", ")" ]
Exclude a ``field:[some range]`` term from the query. Matches will not have any ``value`` in the range in the ``field``. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. start (str or int): The starting value, or ``None`` for no lower bound. **Default:** ``None``. stop (str or int): The ending value, or ``None`` for no upper bound. **Default:** ``None``. inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded from the search. If ``False``, the ``start`` and ``stop`` values will not be excluded from the search. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self
[ "Exclude", "a", "field", ":", "[", "some", "range", "]", "term", "from", "the", "query", ".", "Matches", "will", "not", "have", "any", "value", "in", "the", "range", "in", "the", "field", "." ]
python
train
45.9
gem/oq-engine
openquake/hmtk/faults/fault_models.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/fault_models.py#L509-L557
def generate_fault_source_model(self): ''' Creates a resulting `openquake.hmtk` fault source set. :returns: source_model - list of instances of either the :class: `openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource` or :class: `openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource` model_weight - Corresponding weights for each source model ''' source_model = [] model_weight = [] for iloc in range(0, self.get_number_mfd_models()): model_mfd = EvenlyDiscretizedMFD( self.mfd[0][iloc].min_mag, self.mfd[0][iloc].bin_width, self.mfd[0][iloc].occur_rates.tolist()) if isinstance(self.geometry, ComplexFaultGeometry): # Complex fault class source = mtkComplexFaultSource( self.id, self.name, self.trt, self.geometry.surface, self.mfd[2][iloc], self.rupt_aspect_ratio, model_mfd, self.rake) source.fault_edges = self.geometry.trace else: # Simple Fault source source = mtkSimpleFaultSource( self.id, self.name, self.trt, self.geometry.surface, self.geometry.dip, self.geometry.upper_depth, self.geometry.lower_depth, self.mfd[2][iloc], self.rupt_aspect_ratio, model_mfd, self.rake) source.fault_trace = self.geometry.trace source_model.append(source) model_weight.append(self.mfd[1][iloc]) return source_model, model_weight
[ "def", "generate_fault_source_model", "(", "self", ")", ":", "source_model", "=", "[", "]", "model_weight", "=", "[", "]", "for", "iloc", "in", "range", "(", "0", ",", "self", ".", "get_number_mfd_models", "(", ")", ")", ":", "model_mfd", "=", "EvenlyDiscretizedMFD", "(", "self", ".", "mfd", "[", "0", "]", "[", "iloc", "]", ".", "min_mag", ",", "self", ".", "mfd", "[", "0", "]", "[", "iloc", "]", ".", "bin_width", ",", "self", ".", "mfd", "[", "0", "]", "[", "iloc", "]", ".", "occur_rates", ".", "tolist", "(", ")", ")", "if", "isinstance", "(", "self", ".", "geometry", ",", "ComplexFaultGeometry", ")", ":", "# Complex fault class", "source", "=", "mtkComplexFaultSource", "(", "self", ".", "id", ",", "self", ".", "name", ",", "self", ".", "trt", ",", "self", ".", "geometry", ".", "surface", ",", "self", ".", "mfd", "[", "2", "]", "[", "iloc", "]", ",", "self", ".", "rupt_aspect_ratio", ",", "model_mfd", ",", "self", ".", "rake", ")", "source", ".", "fault_edges", "=", "self", ".", "geometry", ".", "trace", "else", ":", "# Simple Fault source", "source", "=", "mtkSimpleFaultSource", "(", "self", ".", "id", ",", "self", ".", "name", ",", "self", ".", "trt", ",", "self", ".", "geometry", ".", "surface", ",", "self", ".", "geometry", ".", "dip", ",", "self", ".", "geometry", ".", "upper_depth", ",", "self", ".", "geometry", ".", "lower_depth", ",", "self", ".", "mfd", "[", "2", "]", "[", "iloc", "]", ",", "self", ".", "rupt_aspect_ratio", ",", "model_mfd", ",", "self", ".", "rake", ")", "source", ".", "fault_trace", "=", "self", ".", "geometry", ".", "trace", "source_model", ".", "append", "(", "source", ")", "model_weight", ".", "append", "(", "self", ".", "mfd", "[", "1", "]", "[", "iloc", "]", ")", "return", "source_model", ",", "model_weight" ]
Creates a resulting `openquake.hmtk` fault source set. :returns: source_model - list of instances of either the :class: `openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource` or :class: `openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource` model_weight - Corresponding weights for each source model
[ "Creates", "a", "resulting", "openquake", ".", "hmtk", "fault", "source", "set", "." ]
python
train
38.918367
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L1442-L1478
def _check_flag_meanings(self, ds, name): ''' Check a variable's flag_meanings attribute for compliance under CF - flag_meanings exists - flag_meanings is a string - flag_meanings elements are valid strings :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Variable name :rtype: compliance_checker.base.Result ''' variable = ds.variables[name] flag_meanings = getattr(variable, 'flag_meanings', None) valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles['3.5']) valid_meanings.assert_true(flag_meanings is not None, "{}'s flag_meanings attribute is required for flag variables".format(name)) valid_meanings.assert_true(isinstance(flag_meanings, basestring), "{}'s flag_meanings attribute must be a string".format(name)) # We can't perform any additional checks if it's not a string if not isinstance(flag_meanings, basestring): return valid_meanings.to_result() valid_meanings.assert_true(len(flag_meanings) > 0, "{}'s flag_meanings can't be empty".format(name)) flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$") meanings = flag_meanings.split() for meaning in meanings: if flag_regx.match(meaning) is None: valid_meanings.assert_true(False, "{}'s flag_meanings attribute defined an illegal flag meaning ".format(name)+\ "{}".format(meaning)) return valid_meanings.to_result()
[ "def", "_check_flag_meanings", "(", "self", ",", "ds", ",", "name", ")", ":", "variable", "=", "ds", ".", "variables", "[", "name", "]", "flag_meanings", "=", "getattr", "(", "variable", ",", "'flag_meanings'", ",", "None", ")", "valid_meanings", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "self", ".", "section_titles", "[", "'3.5'", "]", ")", "valid_meanings", ".", "assert_true", "(", "flag_meanings", "is", "not", "None", ",", "\"{}'s flag_meanings attribute is required for flag variables\"", ".", "format", "(", "name", ")", ")", "valid_meanings", ".", "assert_true", "(", "isinstance", "(", "flag_meanings", ",", "basestring", ")", ",", "\"{}'s flag_meanings attribute must be a string\"", ".", "format", "(", "name", ")", ")", "# We can't perform any additional checks if it's not a string", "if", "not", "isinstance", "(", "flag_meanings", ",", "basestring", ")", ":", "return", "valid_meanings", ".", "to_result", "(", ")", "valid_meanings", ".", "assert_true", "(", "len", "(", "flag_meanings", ")", ">", "0", ",", "\"{}'s flag_meanings can't be empty\"", ".", "format", "(", "name", ")", ")", "flag_regx", "=", "regex", ".", "compile", "(", "r\"^[0-9A-Za-z_\\-.+@]+$\"", ")", "meanings", "=", "flag_meanings", ".", "split", "(", ")", "for", "meaning", "in", "meanings", ":", "if", "flag_regx", ".", "match", "(", "meaning", ")", "is", "None", ":", "valid_meanings", ".", "assert_true", "(", "False", ",", "\"{}'s flag_meanings attribute defined an illegal flag meaning \"", ".", "format", "(", "name", ")", "+", "\"{}\"", ".", "format", "(", "meaning", ")", ")", "return", "valid_meanings", ".", "to_result", "(", ")" ]
Check a variable's flag_meanings attribute for compliance under CF - flag_meanings exists - flag_meanings is a string - flag_meanings elements are valid strings :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Variable name :rtype: compliance_checker.base.Result
[ "Check", "a", "variable", "s", "flag_meanings", "attribute", "for", "compliance", "under", "CF" ]
python
train
45.189189
CalebBell/ht
ht/boiling_nucleic.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/boiling_nucleic.py#L340-L494
def Stephan_Abdelsalam(rhol, rhog, mul, kl, Cpl, Hvap, sigma, Tsat, Te=None, q=None, kw=401, rhow=8.96, Cpw=384, angle=None, correlation='general'): r'''Calculates heat transfer coefficient for a evaporator operating in the nucleate boiling regime according to [2]_ as presented in [1]_. Five variants are possible. Either heat flux or excess temperature is required. The forms for `Te` are not shown here, but are similar to those of the other functions. .. math:: h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B X1 = \frac{q D_d}{K_L T_{sat}} X2 = \frac{\alpha^2 \rho_L}{\sigma D_d} X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2} X4 = \frac{H_{vap} D_d^2}{\alpha^2} X5 = \frac{\rho_V}{\rho_L} X6 = \frac{C_{p,l} \mu_L}{k_L} X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L} X8 = \frac{\rho_L-\rho_V}{\rho_L} D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}} Respectively, the following four correlations are for water, hydrocarbons, cryogenic fluids, and refrigerants. .. math:: h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B Parameters ---------- rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] mul : float Viscosity of liquid [Pa*s] kl : float Thermal conductivity of liquid [W/m/K] Cpl : float Heat capacity of liquid [J/kg/K] Hvap : float Heat of vaporization of the fluid at P, [J/kg] sigma : float Surface tension of liquid [N/m] Tsat : float Saturation temperature at operating pressure [Pa] Te : float, optional Excess wall temperature, [K] q : float, optional Heat flux, [W/m^2] kw : float, optional Thermal conductivity of wall (only for cryogenics) [W/m/K] rhow : float, optional Density of the wall (only for cryogenics) [kg/m^3] Cpw : float, optional Heat capacity of wall (only for cryogenics) [J/kg/K] angle : float, optional Contact angle of bubble with wall [degrees] correlation : str, optional Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant' Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- If cryogenic correlation is selected, metal properties are used. Default values are the properties of copper at STP. The angle is selected automatically if a correlation is selected; if angle is provided anyway, the automatic selection is ignored. A IndexError exception is raised if the correlation is not in the dictionary _angles_Stephan_Abdelsalam. Examples -------- Example is from [3]_ and matches. >>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6, ... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35) 26722.441071108373 References ---------- .. [1] Cao, Eduardo. Heat Transfer in Process Engineering. McGraw Hill Professional, 2009. .. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for Natural Convection Boiling." International Journal of Heat and Mass Transfer 23, no. 1 (January 1980): 73-87. doi:10.1016/0017-9310(80)90140-4. .. [3] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. ''' if Te is None and q is None: raise Exception('Either q or Te is needed for this correlation') angle = _angles_Stephan_Abdelsalam[correlation] db = 0.0146*angle*(2*sigma/g/(rhol-rhog))**0.5 diffusivity_L = kl/rhol/Cpl if Te: X1 = db/kl/Tsat*Te else: X1 = db/kl/Tsat*q X2 = diffusivity_L**2*rhol/sigma/db X3 = Hvap*db**2/diffusivity_L**2 X4 = Hvap*db**2/diffusivity_L**2 X5 = rhog/rhol X6 = Cpl*mul/kl X7 = rhow*Cpw*kw/(rhol*Cpl*kl) X8 = (rhol-rhog)/rhol if correlation == 'general': if Te: h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db)**(1/0.326) else: h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db) elif correlation == 'water': if Te: h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db)**(1/0.327) else: h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db) elif correlation == 'hydrocarbon': if Te: h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db)**(1/0.33) else: h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db) elif correlation == 'cryogenic': if Te: h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db)**(1/0.376) else: h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db) else: if Te: h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db)**(1/0.255) else: h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db) return h
[ "def", "Stephan_Abdelsalam", "(", "rhol", ",", "rhog", ",", "mul", ",", "kl", ",", "Cpl", ",", "Hvap", ",", "sigma", ",", "Tsat", ",", "Te", "=", "None", ",", "q", "=", "None", ",", "kw", "=", "401", ",", "rhow", "=", "8.96", ",", "Cpw", "=", "384", ",", "angle", "=", "None", ",", "correlation", "=", "'general'", ")", ":", "if", "Te", "is", "None", "and", "q", "is", "None", ":", "raise", "Exception", "(", "'Either q or Te is needed for this correlation'", ")", "angle", "=", "_angles_Stephan_Abdelsalam", "[", "correlation", "]", "db", "=", "0.0146", "*", "angle", "*", "(", "2", "*", "sigma", "/", "g", "/", "(", "rhol", "-", "rhog", ")", ")", "**", "0.5", "diffusivity_L", "=", "kl", "/", "rhol", "/", "Cpl", "if", "Te", ":", "X1", "=", "db", "/", "kl", "/", "Tsat", "*", "Te", "else", ":", "X1", "=", "db", "/", "kl", "/", "Tsat", "*", "q", "X2", "=", "diffusivity_L", "**", "2", "*", "rhol", "/", "sigma", "/", "db", "X3", "=", "Hvap", "*", "db", "**", "2", "/", "diffusivity_L", "**", "2", "X4", "=", "Hvap", "*", "db", "**", "2", "/", "diffusivity_L", "**", "2", "X5", "=", "rhog", "/", "rhol", "X6", "=", "Cpl", "*", "mul", "/", "kl", "X7", "=", "rhow", "*", "Cpw", "*", "kw", "/", "(", "rhol", "*", "Cpl", "*", "kl", ")", "X8", "=", "(", "rhol", "-", "rhog", ")", "/", "rhol", "if", "correlation", "==", "'general'", ":", "if", "Te", ":", "h", "=", "(", "0.23", "*", "X1", "**", "0.674", "*", "X2", "**", "0.35", "*", "X3", "**", "0.371", "*", "X5", "**", "0.297", "*", "X8", "**", "-", "1.73", "*", "kl", "/", "db", ")", "**", "(", "1", "/", "0.326", ")", "else", ":", "h", "=", "(", "0.23", "*", "X1", "**", "0.674", "*", "X2", "**", "0.35", "*", "X3", "**", "0.371", "*", "X5", "**", "0.297", "*", "X8", "**", "-", "1.73", "*", "kl", "/", "db", ")", "elif", "correlation", "==", "'water'", ":", "if", "Te", ":", "h", "=", "(", "0.246E7", "*", "X1", "**", "0.673", "*", "X4", "**", "-", "1.58", "*", "X3", "**", "1.26", "*", "X8", "**", "5.22", "*", "kl", "/", "db", ")", "**", "(", "1", "/", "0.327", ")", "else", ":", "h", "=", "(", "0.246E7", "*", "X1", "**", "0.673", "*", "X4", "**", "-", "1.58", "*", "X3", "**", "1.26", "*", "X8", "**", "5.22", "*", "kl", "/", "db", ")", "elif", "correlation", "==", "'hydrocarbon'", ":", "if", "Te", ":", "h", "=", "(", "0.0546", "*", "X5", "**", "0.335", "*", "X1", "**", "0.67", "*", "X8", "**", "-", "4.33", "*", "X4", "**", "0.248", "*", "kl", "/", "db", ")", "**", "(", "1", "/", "0.33", ")", "else", ":", "h", "=", "(", "0.0546", "*", "X5", "**", "0.335", "*", "X1", "**", "0.67", "*", "X8", "**", "-", "4.33", "*", "X4", "**", "0.248", "*", "kl", "/", "db", ")", "elif", "correlation", "==", "'cryogenic'", ":", "if", "Te", ":", "h", "=", "(", "4.82", "*", "X1", "**", "0.624", "*", "X7", "**", "0.117", "*", "X3", "**", "0.374", "*", "X4", "**", "-", "0.329", "*", "X5", "**", "0.257", "*", "kl", "/", "db", ")", "**", "(", "1", "/", "0.376", ")", "else", ":", "h", "=", "(", "4.82", "*", "X1", "**", "0.624", "*", "X7", "**", "0.117", "*", "X3", "**", "0.374", "*", "X4", "**", "-", "0.329", "*", "X5", "**", "0.257", "*", "kl", "/", "db", ")", "else", ":", "if", "Te", ":", "h", "=", "(", "207", "*", "X1", "**", "0.745", "*", "X5", "**", "0.581", "*", "X6", "**", "0.533", "*", "kl", "/", "db", ")", "**", "(", "1", "/", "0.255", ")", "else", ":", "h", "=", "(", "207", "*", "X1", "**", "0.745", "*", "X5", "**", "0.581", "*", "X6", "**", "0.533", "*", "kl", "/", "db", ")", "return", "h" ]
r'''Calculates heat transfer coefficient for a evaporator operating in the nucleate boiling regime according to [2]_ as presented in [1]_. Five variants are possible. Either heat flux or excess temperature is required. The forms for `Te` are not shown here, but are similar to those of the other functions. .. math:: h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B X1 = \frac{q D_d}{K_L T_{sat}} X2 = \frac{\alpha^2 \rho_L}{\sigma D_d} X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2} X4 = \frac{H_{vap} D_d^2}{\alpha^2} X5 = \frac{\rho_V}{\rho_L} X6 = \frac{C_{p,l} \mu_L}{k_L} X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L} X8 = \frac{\rho_L-\rho_V}{\rho_L} D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}} Respectively, the following four correlations are for water, hydrocarbons, cryogenic fluids, and refrigerants. .. math:: h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B Parameters ---------- rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] mul : float Viscosity of liquid [Pa*s] kl : float Thermal conductivity of liquid [W/m/K] Cpl : float Heat capacity of liquid [J/kg/K] Hvap : float Heat of vaporization of the fluid at P, [J/kg] sigma : float Surface tension of liquid [N/m] Tsat : float Saturation temperature at operating pressure [Pa] Te : float, optional Excess wall temperature, [K] q : float, optional Heat flux, [W/m^2] kw : float, optional Thermal conductivity of wall (only for cryogenics) [W/m/K] rhow : float, optional Density of the wall (only for cryogenics) [kg/m^3] Cpw : float, optional Heat capacity of wall (only for cryogenics) [J/kg/K] angle : float, optional Contact angle of bubble with wall [degrees] correlation : str, optional Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant' Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- If cryogenic correlation is selected, metal properties are used. Default values are the properties of copper at STP. The angle is selected automatically if a correlation is selected; if angle is provided anyway, the automatic selection is ignored. A IndexError exception is raised if the correlation is not in the dictionary _angles_Stephan_Abdelsalam. Examples -------- Example is from [3]_ and matches. >>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6, ... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35) 26722.441071108373 References ---------- .. [1] Cao, Eduardo. Heat Transfer in Process Engineering. McGraw Hill Professional, 2009. .. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for Natural Convection Boiling." International Journal of Heat and Mass Transfer 23, no. 1 (January 1980): 73-87. doi:10.1016/0017-9310(80)90140-4. .. [3] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
[ "r", "Calculates", "heat", "transfer", "coefficient", "for", "a", "evaporator", "operating", "in", "the", "nucleate", "boiling", "regime", "according", "to", "[", "2", "]", "_", "as", "presented", "in", "[", "1", "]", "_", ".", "Five", "variants", "are", "possible", "." ]
python
train
33.935484
openstack/horizon
horizon/workflows/views.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/workflows/views.py#L121-L127
def get_template_names(self): """Returns the template name to use for this request.""" if self.request.is_ajax(): template = self.ajax_template_name else: template = self.template_name return template
[ "def", "get_template_names", "(", "self", ")", ":", "if", "self", ".", "request", ".", "is_ajax", "(", ")", ":", "template", "=", "self", ".", "ajax_template_name", "else", ":", "template", "=", "self", ".", "template_name", "return", "template" ]
Returns the template name to use for this request.
[ "Returns", "the", "template", "name", "to", "use", "for", "this", "request", "." ]
python
train
35.714286
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L155-L177
def rhsm_register(self, rhsm): """Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id) """ # Get rhsm credentials login = rhsm.get('login') password = rhsm.get('password', os.environ.get('RHN_PW')) pool_id = rhsm.get('pool_id') # Ensure the RHEL beta channel are disabled self.run('rm /etc/pki/product/69.pem', ignore_error=True) custom_log = 'subscription-manager register --username %s --password *******' % login self.run( 'subscription-manager register --username %s --password "%s"' % ( login, password), success_status=(0, 64), custom_log=custom_log, retry=3) if pool_id: self.run('subscription-manager attach --pool %s' % pool_id) else: self.run('subscription-manager attach --auto') self.rhsm_active = True
[ "def", "rhsm_register", "(", "self", ",", "rhsm", ")", ":", "# Get rhsm credentials", "login", "=", "rhsm", ".", "get", "(", "'login'", ")", "password", "=", "rhsm", ".", "get", "(", "'password'", ",", "os", ".", "environ", ".", "get", "(", "'RHN_PW'", ")", ")", "pool_id", "=", "rhsm", ".", "get", "(", "'pool_id'", ")", "# Ensure the RHEL beta channel are disabled", "self", ".", "run", "(", "'rm /etc/pki/product/69.pem'", ",", "ignore_error", "=", "True", ")", "custom_log", "=", "'subscription-manager register --username %s --password *******'", "%", "login", "self", ".", "run", "(", "'subscription-manager register --username %s --password \"%s\"'", "%", "(", "login", ",", "password", ")", ",", "success_status", "=", "(", "0", ",", "64", ")", ",", "custom_log", "=", "custom_log", ",", "retry", "=", "3", ")", "if", "pool_id", ":", "self", ".", "run", "(", "'subscription-manager attach --pool %s'", "%", "pool_id", ")", "else", ":", "self", ".", "run", "(", "'subscription-manager attach --auto'", ")", "self", ".", "rhsm_active", "=", "True" ]
Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id)
[ "Register", "the", "host", "on", "the", "RHSM", "." ]
python
train
40.521739
tgbugs/pyontutils
ilxutils/ilxutils/mydifflib.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/mydifflib.py#L7-L24
def diff(s1, s2): ''' --word-diff=porcelain clone''' delta = difflib.Differ().compare(s1.split(), s2.split()) difflist = [] fullline = '' for line in delta: if line[0] == '?': continue elif line[0] == ' ': fullline += line.strip() + ' ' else: if fullline: difflist.append(fullline[:-1]) fullline = '' difflist.append(line) if fullline: difflist.append(fullline[:-1]) return [l[:] for l in '\n'.join(difflist).splitlines() if l]
[ "def", "diff", "(", "s1", ",", "s2", ")", ":", "delta", "=", "difflib", ".", "Differ", "(", ")", ".", "compare", "(", "s1", ".", "split", "(", ")", ",", "s2", ".", "split", "(", ")", ")", "difflist", "=", "[", "]", "fullline", "=", "''", "for", "line", "in", "delta", ":", "if", "line", "[", "0", "]", "==", "'?'", ":", "continue", "elif", "line", "[", "0", "]", "==", "' '", ":", "fullline", "+=", "line", ".", "strip", "(", ")", "+", "' '", "else", ":", "if", "fullline", ":", "difflist", ".", "append", "(", "fullline", "[", ":", "-", "1", "]", ")", "fullline", "=", "''", "difflist", ".", "append", "(", "line", ")", "if", "fullline", ":", "difflist", ".", "append", "(", "fullline", "[", ":", "-", "1", "]", ")", "return", "[", "l", "[", ":", "]", "for", "l", "in", "'\\n'", ".", "join", "(", "difflist", ")", ".", "splitlines", "(", ")", "if", "l", "]" ]
--word-diff=porcelain clone
[ "--", "word", "-", "diff", "=", "porcelain", "clone" ]
python
train
30.555556
modin-project/modin
modin/pandas/base.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L2561-L2615
def sort_values( self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ): """Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame. """ axis = self._get_axis_number(axis) if not is_list_like(by): by = [by] # Currently, sort_values will just reindex based on the sorted values. # TODO create a more efficient way to sort if axis == 0: broadcast_value_dict = {col: self[col] for col in by} broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index) new_index = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).index return self.reindex(index=new_index, copy=not inplace) else: broadcast_value_list = [ self[row :: len(self.index)]._to_pandas() for row in by ] index_builder = list(zip(broadcast_value_list, by)) broadcast_values = pandas.concat( [row for row, idx in index_builder], copy=False ) broadcast_values.columns = self.columns new_columns = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).columns return self.reindex(columns=new_columns, copy=not inplace)
[ "def", "sort_values", "(", "self", ",", "by", ",", "axis", "=", "0", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ",", "kind", "=", "\"quicksort\"", ",", "na_position", "=", "\"last\"", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "not", "is_list_like", "(", "by", ")", ":", "by", "=", "[", "by", "]", "# Currently, sort_values will just reindex based on the sorted values.\r", "# TODO create a more efficient way to sort\r", "if", "axis", "==", "0", ":", "broadcast_value_dict", "=", "{", "col", ":", "self", "[", "col", "]", "for", "col", "in", "by", "}", "broadcast_values", "=", "pandas", ".", "DataFrame", "(", "broadcast_value_dict", ",", "index", "=", "self", ".", "index", ")", "new_index", "=", "broadcast_values", ".", "sort_values", "(", "by", "=", "by", ",", "axis", "=", "axis", ",", "ascending", "=", "ascending", ",", "kind", "=", "kind", ",", "na_position", "=", "na_position", ",", ")", ".", "index", "return", "self", ".", "reindex", "(", "index", "=", "new_index", ",", "copy", "=", "not", "inplace", ")", "else", ":", "broadcast_value_list", "=", "[", "self", "[", "row", ":", ":", "len", "(", "self", ".", "index", ")", "]", ".", "_to_pandas", "(", ")", "for", "row", "in", "by", "]", "index_builder", "=", "list", "(", "zip", "(", "broadcast_value_list", ",", "by", ")", ")", "broadcast_values", "=", "pandas", ".", "concat", "(", "[", "row", "for", "row", ",", "idx", "in", "index_builder", "]", ",", "copy", "=", "False", ")", "broadcast_values", ".", "columns", "=", "self", ".", "columns", "new_columns", "=", "broadcast_values", ".", "sort_values", "(", "by", "=", "by", ",", "axis", "=", "axis", ",", "ascending", "=", "ascending", ",", "kind", "=", "kind", ",", "na_position", "=", "na_position", ",", ")", ".", "columns", "return", "self", ".", "reindex", "(", "columns", "=", "new_columns", ",", "copy", "=", "not", "inplace", ")" ]
Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame.
[ "Sorts", "by", "a", "column", "/", "row", "or", "list", "of", "columns", "/", "rows", ".", "Args", ":", "by", ":", "A", "list", "of", "labels", "for", "the", "axis", "to", "sort", "over", ".", "axis", ":", "The", "axis", "to", "sort", ".", "ascending", ":", "Sort", "in", "ascending", "or", "descending", "order", ".", "inplace", ":", "If", "true", "do", "the", "operation", "inplace", ".", "kind", ":", "How", "to", "sort", ".", "na_position", ":", "Where", "to", "put", "np", ".", "nan", "values", ".", "Returns", ":", "A", "sorted", "DataFrame", "." ]
python
train
36.018182
zwischenloesung/ardu-report-lib
libardurep/datastore.py
https://github.com/zwischenloesung/ardu-report-lib/blob/51bd4a07e036065aafcb1273b151bea3fdfa50fa/libardurep/datastore.py#L190-L205
def get_json(self, prettyprint=False, translate=True): """ Get the data in JSON form """ j = [] if translate: d = self.get_translated_data() else: d = self.data for k in d: j.append(d[k]) if prettyprint: j = json.dumps(j, indent=2, separators=(',',': ')) else: j = json.dumps(j) return j
[ "def", "get_json", "(", "self", ",", "prettyprint", "=", "False", ",", "translate", "=", "True", ")", ":", "j", "=", "[", "]", "if", "translate", ":", "d", "=", "self", ".", "get_translated_data", "(", ")", "else", ":", "d", "=", "self", ".", "data", "for", "k", "in", "d", ":", "j", ".", "append", "(", "d", "[", "k", "]", ")", "if", "prettyprint", ":", "j", "=", "json", ".", "dumps", "(", "j", ",", "indent", "=", "2", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "else", ":", "j", "=", "json", ".", "dumps", "(", "j", ")", "return", "j" ]
Get the data in JSON form
[ "Get", "the", "data", "in", "JSON", "form" ]
python
valid
25.75
google/apitools
apitools/base/py/http_wrapper.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/http_wrapper.py#L313-L356
def MakeRequest(http, http_request, retries=7, max_retry_wait=60, redirections=5, retry_func=HandleExceptionsAndRebuildHttpConnections, check_response_func=CheckResponse): """Send http_request via the given http, performing error/retry handling. Args: http: An httplib2.Http instance, or a http multiplexer that delegates to an underlying http, for example, HTTPMultiplexer. http_request: A Request to send. retries: (int, default 7) Number of retries to attempt on retryable replies (such as 429 or 5XX). max_retry_wait: (int, default 60) Maximum number of seconds to wait when retrying. redirections: (int, default 5) Number of redirects to follow. retry_func: Function to handle retries on exceptions. Argument is an ExceptionRetryArgs tuple. check_response_func: Function to validate the HTTP response. Arguments are (Response, response content, url). Raises: InvalidDataFromServerError: if there is no response after retries. Returns: A Response object. """ retry = 0 first_req_time = time.time() while True: try: return _MakeRequestNoRetry( http, http_request, redirections=redirections, check_response_func=check_response_func) # retry_func will consume the exception types it handles and raise. # pylint: disable=broad-except except Exception as e: retry += 1 if retry >= retries: raise else: total_wait_sec = time.time() - first_req_time retry_func(ExceptionRetryArgs(http, http_request, e, retry, max_retry_wait, total_wait_sec))
[ "def", "MakeRequest", "(", "http", ",", "http_request", ",", "retries", "=", "7", ",", "max_retry_wait", "=", "60", ",", "redirections", "=", "5", ",", "retry_func", "=", "HandleExceptionsAndRebuildHttpConnections", ",", "check_response_func", "=", "CheckResponse", ")", ":", "retry", "=", "0", "first_req_time", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "return", "_MakeRequestNoRetry", "(", "http", ",", "http_request", ",", "redirections", "=", "redirections", ",", "check_response_func", "=", "check_response_func", ")", "# retry_func will consume the exception types it handles and raise.", "# pylint: disable=broad-except", "except", "Exception", "as", "e", ":", "retry", "+=", "1", "if", "retry", ">=", "retries", ":", "raise", "else", ":", "total_wait_sec", "=", "time", ".", "time", "(", ")", "-", "first_req_time", "retry_func", "(", "ExceptionRetryArgs", "(", "http", ",", "http_request", ",", "e", ",", "retry", ",", "max_retry_wait", ",", "total_wait_sec", ")", ")" ]
Send http_request via the given http, performing error/retry handling. Args: http: An httplib2.Http instance, or a http multiplexer that delegates to an underlying http, for example, HTTPMultiplexer. http_request: A Request to send. retries: (int, default 7) Number of retries to attempt on retryable replies (such as 429 or 5XX). max_retry_wait: (int, default 60) Maximum number of seconds to wait when retrying. redirections: (int, default 5) Number of redirects to follow. retry_func: Function to handle retries on exceptions. Argument is an ExceptionRetryArgs tuple. check_response_func: Function to validate the HTTP response. Arguments are (Response, response content, url). Raises: InvalidDataFromServerError: if there is no response after retries. Returns: A Response object.
[ "Send", "http_request", "via", "the", "given", "http", "performing", "error", "/", "retry", "handling", "." ]
python
train
40.681818
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L3503-L3513
def makeAB(self): """ Munge A and B reads into single serial block with only unique fields.""" for fld in self.m_blk_a: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: self.m_req[fld] = self.m_blk_a[fld] for fld in self.m_blk_b: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: self.m_req[fld] = self.m_blk_b[fld] pass
[ "def", "makeAB", "(", "self", ")", ":", "for", "fld", "in", "self", ".", "m_blk_a", ":", "compare_fld", "=", "fld", ".", "upper", "(", ")", "if", "not", "\"RESERVED\"", "in", "compare_fld", "and", "not", "\"CRC\"", "in", "compare_fld", ":", "self", ".", "m_req", "[", "fld", "]", "=", "self", ".", "m_blk_a", "[", "fld", "]", "for", "fld", "in", "self", ".", "m_blk_b", ":", "compare_fld", "=", "fld", ".", "upper", "(", ")", "if", "not", "\"RESERVED\"", "in", "compare_fld", "and", "not", "\"CRC\"", "in", "compare_fld", ":", "self", ".", "m_req", "[", "fld", "]", "=", "self", ".", "m_blk_b", "[", "fld", "]", "pass" ]
Munge A and B reads into single serial block with only unique fields.
[ "Munge", "A", "and", "B", "reads", "into", "single", "serial", "block", "with", "only", "unique", "fields", "." ]
python
test
45.545455
Damgaard/PyImgur
pyimgur/__init__.py
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L66-L70
def _get_album_or_image(json, imgur): """Return a gallery image/album depending on what the json represent.""" if json['is_album']: return Gallery_album(json, imgur, has_fetched=False) return Gallery_image(json, imgur)
[ "def", "_get_album_or_image", "(", "json", ",", "imgur", ")", ":", "if", "json", "[", "'is_album'", "]", ":", "return", "Gallery_album", "(", "json", ",", "imgur", ",", "has_fetched", "=", "False", ")", "return", "Gallery_image", "(", "json", ",", "imgur", ")" ]
Return a gallery image/album depending on what the json represent.
[ "Return", "a", "gallery", "image", "/", "album", "depending", "on", "what", "the", "json", "represent", "." ]
python
train
46.8
ensime/ensime-vim
ensime_shared/client.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L248-L267
def send_at_position(self, what, useSelection, where="range"): """Ask the server to perform an operation on a range (sometimes named point) `what` is used as the prefix for the typehint. If `useSelection` is `False` the range is calculated based on the word under de cursor. Current selection start and end is used as the range otherwise. `where` defines the name of the property holding the range info within the request. Default value is 'range' but 'point' is sometimes used """ self.log.debug('send_at_position: in') b, e = self.editor.selection_pos() if useSelection else self.editor.word_under_cursor_pos() self.log.debug('useSelection: {}, beg: {}, end: {}'.format(useSelection, b, e)) beg = self.get_position(b[0], b[1]) end = self.get_position(e[0], e[1]) self.send_request( {"typehint": what + "AtPointReq", "file": self.editor.path(), where: {"from": beg, "to": end}})
[ "def", "send_at_position", "(", "self", ",", "what", ",", "useSelection", ",", "where", "=", "\"range\"", ")", ":", "self", ".", "log", ".", "debug", "(", "'send_at_position: in'", ")", "b", ",", "e", "=", "self", ".", "editor", ".", "selection_pos", "(", ")", "if", "useSelection", "else", "self", ".", "editor", ".", "word_under_cursor_pos", "(", ")", "self", ".", "log", ".", "debug", "(", "'useSelection: {}, beg: {}, end: {}'", ".", "format", "(", "useSelection", ",", "b", ",", "e", ")", ")", "beg", "=", "self", ".", "get_position", "(", "b", "[", "0", "]", ",", "b", "[", "1", "]", ")", "end", "=", "self", ".", "get_position", "(", "e", "[", "0", "]", ",", "e", "[", "1", "]", ")", "self", ".", "send_request", "(", "{", "\"typehint\"", ":", "what", "+", "\"AtPointReq\"", ",", "\"file\"", ":", "self", ".", "editor", ".", "path", "(", ")", ",", "where", ":", "{", "\"from\"", ":", "beg", ",", "\"to\"", ":", "end", "}", "}", ")" ]
Ask the server to perform an operation on a range (sometimes named point) `what` is used as the prefix for the typehint. If `useSelection` is `False` the range is calculated based on the word under de cursor. Current selection start and end is used as the range otherwise. `where` defines the name of the property holding the range info within the request. Default value is 'range' but 'point' is sometimes used
[ "Ask", "the", "server", "to", "perform", "an", "operation", "on", "a", "range", "(", "sometimes", "named", "point", ")" ]
python
train
50.25
spacetelescope/drizzlepac
drizzlepac/pixtosky.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/pixtosky.py#L102-L187
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None, hms=True, precision=6,output=None,verbose=True): """ Primary interface to perform coordinate transformations from pixel to sky coordinates using STWCS and full distortion models read from the input image header. """ single_coord = False # Only use value provided in `coords` if nothing has been specified for coordfile if coords is not None and coordfile is None: coordfile = coords warnings.simplefilter('always',DeprecationWarning) warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.", category=DeprecationWarning) warnings.simplefilter('default',DeprecationWarning) if coordfile is not None: if colnames in blank_list: colnames = ['c1','c2'] # Determine columns which contain pixel positions cols = util.parse_colnames(colnames,coordfile) # read in columns from input coordinates file xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator) if xyvals.ndim == 1: # only 1 entry in coordfile xlist = [xyvals[0].copy()] ylist = [xyvals[1].copy()] else: xlist = xyvals[:,0].copy() ylist = xyvals[:,1].copy() del xyvals else: if isinstance(x, np.ndarray): xlist = x.tolist() ylist = y.tolist() elif not isinstance(x,list): xlist = [x] ylist = [y] single_coord = True else: xlist = x ylist = y # start by reading in WCS+distortion info for input image inwcs = wcsutil.HSTWCS(input) if inwcs.wcs.is_unity(): print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input)) # Now, convert pixel coordinates into sky coordinates dra,ddec = inwcs.all_pix2world(xlist,ylist,1) # convert to HH:MM:SS.S format, if specified if hms: ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision) rastr = ra decstr = dec else: # add formatting based on precision here... rastr = [] decstr = [] fmt = "%."+repr(precision)+"f" for r,d in zip(dra,ddec): rastr.append(fmt%r) decstr.append(fmt%d) ra = dra dec = ddec if verbose or (not verbose and util.is_blank(output)): print('# Coordinate transformations for ',input) print('# X Y RA Dec\n') for x,y,r,d in zip(xlist,ylist,rastr,decstr): print("%.4f %.4f %s %s"%(x,y,r,d)) # Create output file, if specified if output: f = open(output,mode='w') f.write("# Coordinates converted from %s\n"%input) for r,d in zip(rastr,decstr): f.write('%s %s\n'%(r,d)) f.close() print('Wrote out results to: ',output) if single_coord: ra = ra[0] dec = dec[0] return ra,dec
[ "def", "xy2rd", "(", "input", ",", "x", "=", "None", ",", "y", "=", "None", ",", "coords", "=", "None", ",", "coordfile", "=", "None", ",", "colnames", "=", "None", ",", "separator", "=", "None", ",", "hms", "=", "True", ",", "precision", "=", "6", ",", "output", "=", "None", ",", "verbose", "=", "True", ")", ":", "single_coord", "=", "False", "# Only use value provided in `coords` if nothing has been specified for coordfile", "if", "coords", "is", "not", "None", "and", "coordfile", "is", "None", ":", "coordfile", "=", "coords", "warnings", ".", "simplefilter", "(", "'always'", ",", "DeprecationWarning", ")", "warnings", ".", "warn", "(", "\"Please update calling code to pass in `coordfile` instead of `coords`.\"", ",", "category", "=", "DeprecationWarning", ")", "warnings", ".", "simplefilter", "(", "'default'", ",", "DeprecationWarning", ")", "if", "coordfile", "is", "not", "None", ":", "if", "colnames", "in", "blank_list", ":", "colnames", "=", "[", "'c1'", ",", "'c2'", "]", "# Determine columns which contain pixel positions", "cols", "=", "util", ".", "parse_colnames", "(", "colnames", ",", "coordfile", ")", "# read in columns from input coordinates file", "xyvals", "=", "np", ".", "loadtxt", "(", "coordfile", ",", "usecols", "=", "cols", ",", "delimiter", "=", "separator", ")", "if", "xyvals", ".", "ndim", "==", "1", ":", "# only 1 entry in coordfile", "xlist", "=", "[", "xyvals", "[", "0", "]", ".", "copy", "(", ")", "]", "ylist", "=", "[", "xyvals", "[", "1", "]", ".", "copy", "(", ")", "]", "else", ":", "xlist", "=", "xyvals", "[", ":", ",", "0", "]", ".", "copy", "(", ")", "ylist", "=", "xyvals", "[", ":", ",", "1", "]", ".", "copy", "(", ")", "del", "xyvals", "else", ":", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "xlist", "=", "x", ".", "tolist", "(", ")", "ylist", "=", "y", ".", "tolist", "(", ")", "elif", "not", "isinstance", "(", "x", ",", "list", ")", ":", "xlist", "=", "[", "x", "]", "ylist", "=", "[", "y", "]", "single_coord", "=", "True", "else", ":", "xlist", "=", "x", "ylist", "=", "y", "# start by reading in WCS+distortion info for input image", "inwcs", "=", "wcsutil", ".", "HSTWCS", "(", "input", ")", "if", "inwcs", ".", "wcs", ".", "is_unity", "(", ")", ":", "print", "(", "\"####\\nNo valid WCS found in {}.\\n Results may be invalid.\\n####\\n\"", ".", "format", "(", "input", ")", ")", "# Now, convert pixel coordinates into sky coordinates", "dra", ",", "ddec", "=", "inwcs", ".", "all_pix2world", "(", "xlist", ",", "ylist", ",", "1", ")", "# convert to HH:MM:SS.S format, if specified", "if", "hms", ":", "ra", ",", "dec", "=", "wcs_functions", ".", "ddtohms", "(", "dra", ",", "ddec", ",", "precision", "=", "precision", ")", "rastr", "=", "ra", "decstr", "=", "dec", "else", ":", "# add formatting based on precision here...", "rastr", "=", "[", "]", "decstr", "=", "[", "]", "fmt", "=", "\"%.\"", "+", "repr", "(", "precision", ")", "+", "\"f\"", "for", "r", ",", "d", "in", "zip", "(", "dra", ",", "ddec", ")", ":", "rastr", ".", "append", "(", "fmt", "%", "r", ")", "decstr", ".", "append", "(", "fmt", "%", "d", ")", "ra", "=", "dra", "dec", "=", "ddec", "if", "verbose", "or", "(", "not", "verbose", "and", "util", ".", "is_blank", "(", "output", ")", ")", ":", "print", "(", "'# Coordinate transformations for '", ",", "input", ")", "print", "(", "'# X Y RA Dec\\n'", ")", "for", "x", ",", "y", ",", "r", ",", "d", "in", "zip", "(", "xlist", ",", "ylist", ",", "rastr", ",", "decstr", ")", ":", "print", "(", "\"%.4f %.4f %s %s\"", "%", "(", "x", ",", "y", ",", "r", ",", "d", ")", ")", "# Create output file, if specified", "if", "output", ":", "f", "=", "open", "(", "output", ",", "mode", "=", "'w'", ")", "f", ".", "write", "(", "\"# Coordinates converted from %s\\n\"", "%", "input", ")", "for", "r", ",", "d", "in", "zip", "(", "rastr", ",", "decstr", ")", ":", "f", ".", "write", "(", "'%s %s\\n'", "%", "(", "r", ",", "d", ")", ")", "f", ".", "close", "(", ")", "print", "(", "'Wrote out results to: '", ",", "output", ")", "if", "single_coord", ":", "ra", "=", "ra", "[", "0", "]", "dec", "=", "dec", "[", "0", "]", "return", "ra", ",", "dec" ]
Primary interface to perform coordinate transformations from pixel to sky coordinates using STWCS and full distortion models read from the input image header.
[ "Primary", "interface", "to", "perform", "coordinate", "transformations", "from", "pixel", "to", "sky", "coordinates", "using", "STWCS", "and", "full", "distortion", "models", "read", "from", "the", "input", "image", "header", "." ]
python
train
34.895349
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L4134-L4152
def libvlc_media_add_option(p_md, psz_options): '''Add an option to the media. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. @note: The options are listed in 'vlc --long-help' from the command line, e.g. "-sout-all". Keep in mind that available options and their semantics vary across LibVLC versions and builds. @warning: Not all options affects L{Media} objects: Specifically, due to architectural issues most audio and video options, such as text renderer options, have no effects on an individual media. These options must be set through L{libvlc_new}() instead. @param p_md: the media descriptor. @param psz_options: the options (as a string). ''' f = _Cfunctions.get('libvlc_media_add_option', None) or \ _Cfunction('libvlc_media_add_option', ((1,), (1,),), None, None, Media, ctypes.c_char_p) return f(p_md, psz_options)
[ "def", "libvlc_media_add_option", "(", "p_md", ",", "psz_options", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_add_option'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_add_option'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "Media", ",", "ctypes", ".", "c_char_p", ")", "return", "f", "(", "p_md", ",", "psz_options", ")" ]
Add an option to the media. This option will be used to determine how the media_player will read the media. This allows to use VLC's advanced reading/streaming options on a per-media basis. @note: The options are listed in 'vlc --long-help' from the command line, e.g. "-sout-all". Keep in mind that available options and their semantics vary across LibVLC versions and builds. @warning: Not all options affects L{Media} objects: Specifically, due to architectural issues most audio and video options, such as text renderer options, have no effects on an individual media. These options must be set through L{libvlc_new}() instead. @param p_md: the media descriptor. @param psz_options: the options (as a string).
[ "Add", "an", "option", "to", "the", "media", ".", "This", "option", "will", "be", "used", "to", "determine", "how", "the", "media_player", "will", "read", "the", "media", ".", "This", "allows", "to", "use", "VLC", "s", "advanced", "reading", "/", "streaming", "options", "on", "a", "per", "-", "media", "basis", "." ]
python
train
53.526316
shad7/tvrenamer
tvrenamer/core/episode.py
https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tvrenamer/core/episode.py#L237-L249
def rename(self): """Renames media file to formatted name. After parsing data from initial media filename and searching for additional data to using a data service, a formatted filename will be generated and the media file will be renamed to the generated name and optionally relocated. """ renamer.execute(self.original, self.out_location) if cfg.CONF.move_files_enabled: LOG.debug('relocated: %s', self) else: LOG.debug('renamed: %s', self)
[ "def", "rename", "(", "self", ")", ":", "renamer", ".", "execute", "(", "self", ".", "original", ",", "self", ".", "out_location", ")", "if", "cfg", ".", "CONF", ".", "move_files_enabled", ":", "LOG", ".", "debug", "(", "'relocated: %s'", ",", "self", ")", "else", ":", "LOG", ".", "debug", "(", "'renamed: %s'", ",", "self", ")" ]
Renames media file to formatted name. After parsing data from initial media filename and searching for additional data to using a data service, a formatted filename will be generated and the media file will be renamed to the generated name and optionally relocated.
[ "Renames", "media", "file", "to", "formatted", "name", "." ]
python
train
40.538462
flatangle/flatlib
flatlib/tools/planetarytime.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/tools/planetarytime.py#L98-L101
def getHourTable(date, pos): """ Returns an HourTable object. """ table = hourTable(date, pos) return HourTable(table, date)
[ "def", "getHourTable", "(", "date", ",", "pos", ")", ":", "table", "=", "hourTable", "(", "date", ",", "pos", ")", "return", "HourTable", "(", "table", ",", "date", ")" ]
Returns an HourTable object.
[ "Returns", "an", "HourTable", "object", "." ]
python
train
33.25
tapilab/brandelion
brandelion/cli/collect.py
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L160-L168
def fetch_list_members(list_url): """ Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """ match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url) if not match: print('cannot parse list url %s' % list_url) return [] screen_name, slug = match.groups() print('collecting list %s/%s' % (screen_name, slug)) return twutil.collect.list_members(slug, screen_name)
[ "def", "fetch_list_members", "(", "list_url", ")", ":", "match", "=", "re", ".", "match", "(", "r'.+twitter\\.com\\/(.+)\\/lists\\/(.+)'", ",", "list_url", ")", "if", "not", "match", ":", "print", "(", "'cannot parse list url %s'", "%", "list_url", ")", "return", "[", "]", "screen_name", ",", "slug", "=", "match", ".", "groups", "(", ")", "print", "(", "'collecting list %s/%s'", "%", "(", "screen_name", ",", "slug", ")", ")", "return", "twutil", ".", "collect", ".", "list_members", "(", "slug", ",", "screen_name", ")" ]
Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education
[ "Get", "all", "members", "of", "the", "list", "specified", "by", "the", "given", "url", ".", "E", ".", "g", ".", "https", ":", "//", "twitter", ".", "com", "/", "lore77", "/", "lists", "/", "libri", "-", "cultura", "-", "education" ]
python
train
52
projectatomic/atomic-reactor
atomic_reactor/plugins/post_koji_upload.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/post_koji_upload.py#L380-L467
def get_output(self, buildroot_id): """ Build the 'output' section of the metadata. :return: list, Output instances """ def add_buildroot_id(output): logfile, metadata = output metadata.update({'buildroot_id': buildroot_id}) return Output(file=logfile, metadata=metadata) def add_log_type(output, arch): logfile, metadata = output metadata.update({'type': 'log', 'arch': arch}) return Output(file=logfile, metadata=metadata) arch = os.uname()[4] output_files = [add_log_type(add_buildroot_id(metadata), arch) for metadata in self.get_logs()] # Parent of squashed built image is base image image_id = self.workflow.builder.image_id parent_id = None if not self.workflow.builder.base_from_scratch: parent_id = self.workflow.builder.base_image_inspect['Id'] # Read config from the registry using v2 schema 2 digest registries = self.workflow.push_conf.docker_registries if registries: config = copy.deepcopy(registries[0].config) else: config = {} # We don't need container_config section if config and 'container_config' in config: del config['container_config'] repositories, typed_digests = self.get_repositories_and_digests() tags = set(image.tag for image in self.workflow.tag_conf.images) metadata, output = self.get_image_output() metadata.update({ 'arch': arch, 'type': 'docker-image', 'components': self.get_image_components(), 'extra': { 'image': { 'arch': arch, }, 'docker': { 'id': image_id, 'parent_id': parent_id, 'repositories': repositories, 'layer_sizes': self.workflow.layer_sizes, 'tags': list(tags), 'config': config, 'digests': typed_digests }, }, }) if self.workflow.builder.base_from_scratch: del metadata['extra']['docker']['parent_id'] if not config: del metadata['extra']['docker']['config'] if not typed_digests: del metadata['extra']['docker']['digests'] # Add the 'docker save' image to the output image = add_buildroot_id(output) output_files.append(image) # add operator manifests to output operator_manifests_path = (self.workflow.postbuild_results .get(PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY)) if operator_manifests_path: operator_manifests_file = open(operator_manifests_path) manifests_metadata = self.get_output_metadata(operator_manifests_path, OPERATOR_MANIFESTS_ARCHIVE) operator_manifests_output = Output(file=operator_manifests_file, metadata=manifests_metadata) # We use log type here until a more appropriate type name is supported by koji operator_manifests_output.metadata.update({'arch': arch, 'type': 'log'}) operator_manifests = add_buildroot_id(operator_manifests_output) output_files.append(operator_manifests) return output_files
[ "def", "get_output", "(", "self", ",", "buildroot_id", ")", ":", "def", "add_buildroot_id", "(", "output", ")", ":", "logfile", ",", "metadata", "=", "output", "metadata", ".", "update", "(", "{", "'buildroot_id'", ":", "buildroot_id", "}", ")", "return", "Output", "(", "file", "=", "logfile", ",", "metadata", "=", "metadata", ")", "def", "add_log_type", "(", "output", ",", "arch", ")", ":", "logfile", ",", "metadata", "=", "output", "metadata", ".", "update", "(", "{", "'type'", ":", "'log'", ",", "'arch'", ":", "arch", "}", ")", "return", "Output", "(", "file", "=", "logfile", ",", "metadata", "=", "metadata", ")", "arch", "=", "os", ".", "uname", "(", ")", "[", "4", "]", "output_files", "=", "[", "add_log_type", "(", "add_buildroot_id", "(", "metadata", ")", ",", "arch", ")", "for", "metadata", "in", "self", ".", "get_logs", "(", ")", "]", "# Parent of squashed built image is base image", "image_id", "=", "self", ".", "workflow", ".", "builder", ".", "image_id", "parent_id", "=", "None", "if", "not", "self", ".", "workflow", ".", "builder", ".", "base_from_scratch", ":", "parent_id", "=", "self", ".", "workflow", ".", "builder", ".", "base_image_inspect", "[", "'Id'", "]", "# Read config from the registry using v2 schema 2 digest", "registries", "=", "self", ".", "workflow", ".", "push_conf", ".", "docker_registries", "if", "registries", ":", "config", "=", "copy", ".", "deepcopy", "(", "registries", "[", "0", "]", ".", "config", ")", "else", ":", "config", "=", "{", "}", "# We don't need container_config section", "if", "config", "and", "'container_config'", "in", "config", ":", "del", "config", "[", "'container_config'", "]", "repositories", ",", "typed_digests", "=", "self", ".", "get_repositories_and_digests", "(", ")", "tags", "=", "set", "(", "image", ".", "tag", "for", "image", "in", "self", ".", "workflow", ".", "tag_conf", ".", "images", ")", "metadata", ",", "output", "=", "self", ".", "get_image_output", "(", ")", "metadata", ".", "update", "(", "{", "'arch'", ":", "arch", ",", "'type'", ":", "'docker-image'", ",", "'components'", ":", "self", ".", "get_image_components", "(", ")", ",", "'extra'", ":", "{", "'image'", ":", "{", "'arch'", ":", "arch", ",", "}", ",", "'docker'", ":", "{", "'id'", ":", "image_id", ",", "'parent_id'", ":", "parent_id", ",", "'repositories'", ":", "repositories", ",", "'layer_sizes'", ":", "self", ".", "workflow", ".", "layer_sizes", ",", "'tags'", ":", "list", "(", "tags", ")", ",", "'config'", ":", "config", ",", "'digests'", ":", "typed_digests", "}", ",", "}", ",", "}", ")", "if", "self", ".", "workflow", ".", "builder", ".", "base_from_scratch", ":", "del", "metadata", "[", "'extra'", "]", "[", "'docker'", "]", "[", "'parent_id'", "]", "if", "not", "config", ":", "del", "metadata", "[", "'extra'", "]", "[", "'docker'", "]", "[", "'config'", "]", "if", "not", "typed_digests", ":", "del", "metadata", "[", "'extra'", "]", "[", "'docker'", "]", "[", "'digests'", "]", "# Add the 'docker save' image to the output", "image", "=", "add_buildroot_id", "(", "output", ")", "output_files", ".", "append", "(", "image", ")", "# add operator manifests to output", "operator_manifests_path", "=", "(", "self", ".", "workflow", ".", "postbuild_results", ".", "get", "(", "PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY", ")", ")", "if", "operator_manifests_path", ":", "operator_manifests_file", "=", "open", "(", "operator_manifests_path", ")", "manifests_metadata", "=", "self", ".", "get_output_metadata", "(", "operator_manifests_path", ",", "OPERATOR_MANIFESTS_ARCHIVE", ")", "operator_manifests_output", "=", "Output", "(", "file", "=", "operator_manifests_file", ",", "metadata", "=", "manifests_metadata", ")", "# We use log type here until a more appropriate type name is supported by koji", "operator_manifests_output", ".", "metadata", ".", "update", "(", "{", "'arch'", ":", "arch", ",", "'type'", ":", "'log'", "}", ")", "operator_manifests", "=", "add_buildroot_id", "(", "operator_manifests_output", ")", "output_files", ".", "append", "(", "operator_manifests", ")", "return", "output_files" ]
Build the 'output' section of the metadata. :return: list, Output instances
[ "Build", "the", "output", "section", "of", "the", "metadata", "." ]
python
train
39.238636
pandas-dev/pandas
pandas/core/indexing.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L1275-L1366
def _convert_to_indexer(self, obj, axis=None, is_setter=False, raise_missing=False): """ Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ if axis is None: axis = self.axis or 0 labels = self.obj._get_axis(axis) if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) # try to find out correct indexer, if not type correct raise try: obj = self._convert_scalar_indexer(obj, axis) except TypeError: # but we will allow setting if is_setter: pass # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(obj) and not is_int_index # if we are a label return me try: return labels.get_loc(obj) except LookupError: if isinstance(obj, tuple) and isinstance(labels, MultiIndex): if is_setter and len(obj) == labels.nlevels: return {'key': obj} raise except TypeError: pass except (ValueError): if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition if is_setter: # always valid if self.name == 'loc': return {'key': obj} # a positional if (obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex)): raise ValueError("cannot set by positional indexing with " "enlargement") return obj if is_nested_tuple(obj, labels): return labels.get_locs(obj) elif is_list_like_indexer(obj): if com.is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds else: # When setting, missing keys are not allowed, even with .loc: kwargs = {'raise_missing': True if is_setter else raise_missing} return self._get_listlike_indexer(obj, axis, **kwargs)[1] else: try: return labels.get_loc(obj) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(obj) and is_setter: return {'key': obj} raise
[ "def", "_convert_to_indexer", "(", "self", ",", "obj", ",", "axis", "=", "None", ",", "is_setter", "=", "False", ",", "raise_missing", "=", "False", ")", ":", "if", "axis", "is", "None", ":", "axis", "=", "self", ".", "axis", "or", "0", "labels", "=", "self", ".", "obj", ".", "_get_axis", "(", "axis", ")", "if", "isinstance", "(", "obj", ",", "slice", ")", ":", "return", "self", ".", "_convert_slice_indexer", "(", "obj", ",", "axis", ")", "# try to find out correct indexer, if not type correct raise", "try", ":", "obj", "=", "self", ".", "_convert_scalar_indexer", "(", "obj", ",", "axis", ")", "except", "TypeError", ":", "# but we will allow setting", "if", "is_setter", ":", "pass", "# see if we are positional in nature", "is_int_index", "=", "labels", ".", "is_integer", "(", ")", "is_int_positional", "=", "is_integer", "(", "obj", ")", "and", "not", "is_int_index", "# if we are a label return me", "try", ":", "return", "labels", ".", "get_loc", "(", "obj", ")", "except", "LookupError", ":", "if", "isinstance", "(", "obj", ",", "tuple", ")", "and", "isinstance", "(", "labels", ",", "MultiIndex", ")", ":", "if", "is_setter", "and", "len", "(", "obj", ")", "==", "labels", ".", "nlevels", ":", "return", "{", "'key'", ":", "obj", "}", "raise", "except", "TypeError", ":", "pass", "except", "(", "ValueError", ")", ":", "if", "not", "is_int_positional", ":", "raise", "# a positional", "if", "is_int_positional", ":", "# if we are setting and its not a valid location", "# its an insert which fails by definition", "if", "is_setter", ":", "# always valid", "if", "self", ".", "name", "==", "'loc'", ":", "return", "{", "'key'", ":", "obj", "}", "# a positional", "if", "(", "obj", ">=", "self", ".", "obj", ".", "shape", "[", "axis", "]", "and", "not", "isinstance", "(", "labels", ",", "MultiIndex", ")", ")", ":", "raise", "ValueError", "(", "\"cannot set by positional indexing with \"", "\"enlargement\"", ")", "return", "obj", "if", "is_nested_tuple", "(", "obj", ",", "labels", ")", ":", "return", "labels", ".", "get_locs", "(", "obj", ")", "elif", "is_list_like_indexer", "(", "obj", ")", ":", "if", "com", ".", "is_bool_indexer", "(", "obj", ")", ":", "obj", "=", "check_bool_indexer", "(", "labels", ",", "obj", ")", "inds", ",", "=", "obj", ".", "nonzero", "(", ")", "return", "inds", "else", ":", "# When setting, missing keys are not allowed, even with .loc:", "kwargs", "=", "{", "'raise_missing'", ":", "True", "if", "is_setter", "else", "raise_missing", "}", "return", "self", ".", "_get_listlike_indexer", "(", "obj", ",", "axis", ",", "*", "*", "kwargs", ")", "[", "1", "]", "else", ":", "try", ":", "return", "labels", ".", "get_loc", "(", "obj", ")", "except", "LookupError", ":", "# allow a not found key only if we are a setter", "if", "not", "is_list_like_indexer", "(", "obj", ")", "and", "is_setter", ":", "return", "{", "'key'", ":", "obj", "}", "raise" ]
Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing
[ "Convert", "indexing", "key", "into", "something", "we", "can", "use", "to", "do", "actual", "fancy", "indexing", "on", "an", "ndarray" ]
python
train
32.815217
xeroc/python-graphenelib
graphenecommon/chain.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/chain.py#L229-L245
def sign(self, tx=None, wifs=[]): """ Sign a provided transaction witht he provided key(s) :param dict tx: The transaction to be signed and returned :param string wifs: One or many wif keys to use for signing a transaction. If not present, the keys will be loaded from the wallet as defined in "missing_signatures" key of the transactions. """ if tx: txbuffer = self.transactionbuilder_class(tx, blockchain_instance=self) else: txbuffer = self.txbuffer txbuffer.appendWif(wifs) txbuffer.appendMissingSignatures() txbuffer.sign() return txbuffer.json()
[ "def", "sign", "(", "self", ",", "tx", "=", "None", ",", "wifs", "=", "[", "]", ")", ":", "if", "tx", ":", "txbuffer", "=", "self", ".", "transactionbuilder_class", "(", "tx", ",", "blockchain_instance", "=", "self", ")", "else", ":", "txbuffer", "=", "self", ".", "txbuffer", "txbuffer", ".", "appendWif", "(", "wifs", ")", "txbuffer", ".", "appendMissingSignatures", "(", ")", "txbuffer", ".", "sign", "(", ")", "return", "txbuffer", ".", "json", "(", ")" ]
Sign a provided transaction witht he provided key(s) :param dict tx: The transaction to be signed and returned :param string wifs: One or many wif keys to use for signing a transaction. If not present, the keys will be loaded from the wallet as defined in "missing_signatures" key of the transactions.
[ "Sign", "a", "provided", "transaction", "witht", "he", "provided", "key", "(", "s", ")" ]
python
valid
40.941176
fastai/fastai
fastai/vision/data.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/data.py#L173-L180
def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None: "Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)" if getattr(self,'norm',False): raise Exception('Can not call normalize twice') if stats is None: self.stats = self.batch_stats() else: self.stats = stats self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y) self.add_tfm(self.norm) return self
[ "def", "normalize", "(", "self", ",", "stats", ":", "Collection", "[", "Tensor", "]", "=", "None", ",", "do_x", ":", "bool", "=", "True", ",", "do_y", ":", "bool", "=", "False", ")", "->", "None", ":", "if", "getattr", "(", "self", ",", "'norm'", ",", "False", ")", ":", "raise", "Exception", "(", "'Can not call normalize twice'", ")", "if", "stats", "is", "None", ":", "self", ".", "stats", "=", "self", ".", "batch_stats", "(", ")", "else", ":", "self", ".", "stats", "=", "stats", "self", ".", "norm", ",", "self", ".", "denorm", "=", "normalize_funcs", "(", "*", "self", ".", "stats", ",", "do_x", "=", "do_x", ",", "do_y", "=", "do_y", ")", "self", ".", "add_tfm", "(", "self", ".", "norm", ")", "return", "self" ]
Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)
[ "Add", "normalize", "transform", "using", "stats", "(", "defaults", "to", "DataBunch", ".", "batch_stats", ")" ]
python
train
61.75
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py#L37-L66
def parse_arguments(argv): """Parse command line arguments. Args: argv: list of command line arguments, includeing programe name. Returns: An argparse Namespace object. """ parser = argparse.ArgumentParser( description='Runs Preprocessing on structured CSV data.') parser.add_argument('--input-file-pattern', type=str, required=True, help='Input CSV file names. May contain a file pattern') parser.add_argument('--output-dir', type=str, required=True, help='Google Cloud Storage which to place outputs.') parser.add_argument('--schema-file', type=str, required=True, help=('BigQuery json schema file')) args = parser.parse_args(args=argv[1:]) # Make sure the output folder exists if local folder. file_io.recursive_create_dir(args.output_dir) return args
[ "def", "parse_arguments", "(", "argv", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Runs Preprocessing on structured CSV data.'", ")", "parser", ".", "add_argument", "(", "'--input-file-pattern'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'Input CSV file names. May contain a file pattern'", ")", "parser", ".", "add_argument", "(", "'--output-dir'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'Google Cloud Storage which to place outputs.'", ")", "parser", ".", "add_argument", "(", "'--schema-file'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "(", "'BigQuery json schema file'", ")", ")", "args", "=", "parser", ".", "parse_args", "(", "args", "=", "argv", "[", "1", ":", "]", ")", "# Make sure the output folder exists if local folder.", "file_io", ".", "recursive_create_dir", "(", "args", ".", "output_dir", ")", "return", "args" ]
Parse command line arguments. Args: argv: list of command line arguments, includeing programe name. Returns: An argparse Namespace object.
[ "Parse", "command", "line", "arguments", "." ]
python
train
32.2
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/api.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/api.py#L35-L59
def init_app(self, app=None, blueprint=None, additional_blueprints=None): """Update flask application with our api :param Application app: a flask application """ if app is not None: self.app = app if blueprint is not None: self.blueprint = blueprint for resource in self.resources: self.route(resource['resource'], resource['view'], *resource['urls'], url_rule_options=resource['url_rule_options']) if self.blueprint is not None: self.app.register_blueprint(self.blueprint) if additional_blueprints is not None: for blueprint in additional_blueprints: self.app.register_blueprint(blueprint) self.app.config.setdefault('PAGE_SIZE', 30)
[ "def", "init_app", "(", "self", ",", "app", "=", "None", ",", "blueprint", "=", "None", ",", "additional_blueprints", "=", "None", ")", ":", "if", "app", "is", "not", "None", ":", "self", ".", "app", "=", "app", "if", "blueprint", "is", "not", "None", ":", "self", ".", "blueprint", "=", "blueprint", "for", "resource", "in", "self", ".", "resources", ":", "self", ".", "route", "(", "resource", "[", "'resource'", "]", ",", "resource", "[", "'view'", "]", ",", "*", "resource", "[", "'urls'", "]", ",", "url_rule_options", "=", "resource", "[", "'url_rule_options'", "]", ")", "if", "self", ".", "blueprint", "is", "not", "None", ":", "self", ".", "app", ".", "register_blueprint", "(", "self", ".", "blueprint", ")", "if", "additional_blueprints", "is", "not", "None", ":", "for", "blueprint", "in", "additional_blueprints", ":", "self", ".", "app", ".", "register_blueprint", "(", "blueprint", ")", "self", ".", "app", ".", "config", ".", "setdefault", "(", "'PAGE_SIZE'", ",", "30", ")" ]
Update flask application with our api :param Application app: a flask application
[ "Update", "flask", "application", "with", "our", "api" ]
python
train
33.36
bdcht/grandalf
grandalf/layouts.py
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L222-L238
def _medianindex(self,v): """ find new position of vertex v according to adjacency in layer l+dir. position is given by the median value of adjacent positions. median heuristic is proven to achieve at most 3 times the minimum of crossings (while barycenter achieve in theory the order of |V|) """ assert self.prevlayer()!=None N = self._neighbors(v) g=self.layout.grx pos = [g[x].pos for x in N] lp = len(pos) if lp==0: return [] pos.sort() pos = pos[::self.layout.dirh] i,j = divmod(lp-1,2) return [pos[i]] if j==0 else [pos[i],pos[i+j]]
[ "def", "_medianindex", "(", "self", ",", "v", ")", ":", "assert", "self", ".", "prevlayer", "(", ")", "!=", "None", "N", "=", "self", ".", "_neighbors", "(", "v", ")", "g", "=", "self", ".", "layout", ".", "grx", "pos", "=", "[", "g", "[", "x", "]", ".", "pos", "for", "x", "in", "N", "]", "lp", "=", "len", "(", "pos", ")", "if", "lp", "==", "0", ":", "return", "[", "]", "pos", ".", "sort", "(", ")", "pos", "=", "pos", "[", ":", ":", "self", ".", "layout", ".", "dirh", "]", "i", ",", "j", "=", "divmod", "(", "lp", "-", "1", ",", "2", ")", "return", "[", "pos", "[", "i", "]", "]", "if", "j", "==", "0", "else", "[", "pos", "[", "i", "]", ",", "pos", "[", "i", "+", "j", "]", "]" ]
find new position of vertex v according to adjacency in layer l+dir. position is given by the median value of adjacent positions. median heuristic is proven to achieve at most 3 times the minimum of crossings (while barycenter achieve in theory the order of |V|)
[ "find", "new", "position", "of", "vertex", "v", "according", "to", "adjacency", "in", "layer", "l", "+", "dir", ".", "position", "is", "given", "by", "the", "median", "value", "of", "adjacent", "positions", ".", "median", "heuristic", "is", "proven", "to", "achieve", "at", "most", "3", "times", "the", "minimum", "of", "crossings", "(", "while", "barycenter", "achieve", "in", "theory", "the", "order", "of", "|V|", ")" ]
python
train
38.235294
user-cont/conu
conu/backend/docker/skopeo.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/skopeo.py#L23-L65
def transport_param(image): """ Parse DockerImage info into skopeo parameter :param image: DockerImage :return: string. skopeo parameter specifying image """ transports = {SkopeoTransport.CONTAINERS_STORAGE: "containers-storage:", SkopeoTransport.DIRECTORY: "dir:", SkopeoTransport.DOCKER: "docker://", SkopeoTransport.DOCKER_ARCHIVE: "docker-archive", SkopeoTransport.DOCKER_DAEMON: "docker-daemon:", SkopeoTransport.OCI: "oci:", SkopeoTransport.OSTREE: "ostree:"} transport = image.transport tag = image.tag repository = image.name path = image.path if not transport: transport = SkopeoTransport.DOCKER command = transports[transport] path_required = [SkopeoTransport.DIRECTORY, SkopeoTransport.DOCKER_ARCHIVE, SkopeoTransport.OCI] if transport in path_required and path is None: raise ValueError(transports[transport] + " path is required to be specified") if transport == SkopeoTransport.DIRECTORY: return command + path if transport == SkopeoTransport.DOCKER_ARCHIVE: command += path if repository is None: return command command += ":" if transport in [SkopeoTransport.CONTAINERS_STORAGE, SkopeoTransport.DOCKER, SkopeoTransport.DOCKER_ARCHIVE, transport.DOCKER_DAEMON]: return command + repository + ":" + tag if transport == SkopeoTransport.OCI: return command + path + ":" + tag if transport == SkopeoTransport.OSTREE: return command + repository + ("@" + path if path else "") raise ConuException("This transport is not supported")
[ "def", "transport_param", "(", "image", ")", ":", "transports", "=", "{", "SkopeoTransport", ".", "CONTAINERS_STORAGE", ":", "\"containers-storage:\"", ",", "SkopeoTransport", ".", "DIRECTORY", ":", "\"dir:\"", ",", "SkopeoTransport", ".", "DOCKER", ":", "\"docker://\"", ",", "SkopeoTransport", ".", "DOCKER_ARCHIVE", ":", "\"docker-archive\"", ",", "SkopeoTransport", ".", "DOCKER_DAEMON", ":", "\"docker-daemon:\"", ",", "SkopeoTransport", ".", "OCI", ":", "\"oci:\"", ",", "SkopeoTransport", ".", "OSTREE", ":", "\"ostree:\"", "}", "transport", "=", "image", ".", "transport", "tag", "=", "image", ".", "tag", "repository", "=", "image", ".", "name", "path", "=", "image", ".", "path", "if", "not", "transport", ":", "transport", "=", "SkopeoTransport", ".", "DOCKER", "command", "=", "transports", "[", "transport", "]", "path_required", "=", "[", "SkopeoTransport", ".", "DIRECTORY", ",", "SkopeoTransport", ".", "DOCKER_ARCHIVE", ",", "SkopeoTransport", ".", "OCI", "]", "if", "transport", "in", "path_required", "and", "path", "is", "None", ":", "raise", "ValueError", "(", "transports", "[", "transport", "]", "+", "\" path is required to be specified\"", ")", "if", "transport", "==", "SkopeoTransport", ".", "DIRECTORY", ":", "return", "command", "+", "path", "if", "transport", "==", "SkopeoTransport", ".", "DOCKER_ARCHIVE", ":", "command", "+=", "path", "if", "repository", "is", "None", ":", "return", "command", "command", "+=", "\":\"", "if", "transport", "in", "[", "SkopeoTransport", ".", "CONTAINERS_STORAGE", ",", "SkopeoTransport", ".", "DOCKER", ",", "SkopeoTransport", ".", "DOCKER_ARCHIVE", ",", "transport", ".", "DOCKER_DAEMON", "]", ":", "return", "command", "+", "repository", "+", "\":\"", "+", "tag", "if", "transport", "==", "SkopeoTransport", ".", "OCI", ":", "return", "command", "+", "path", "+", "\":\"", "+", "tag", "if", "transport", "==", "SkopeoTransport", ".", "OSTREE", ":", "return", "command", "+", "repository", "+", "(", "\"@\"", "+", "path", "if", "path", "else", "\"\"", ")", "raise", "ConuException", "(", "\"This transport is not supported\"", ")" ]
Parse DockerImage info into skopeo parameter :param image: DockerImage :return: string. skopeo parameter specifying image
[ "Parse", "DockerImage", "info", "into", "skopeo", "parameter" ]
python
train
39.395349
titusjan/argos
argos/application.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/application.py#L314-L351
def addNewMainWindow(self, settings=None, inspectorFullName=None): """ Creates and shows a new MainWindow. If inspectorFullName is set, it will set the identifier from that name. If the inspector identifier is not found in the registry, a KeyError is raised. """ mainWindow = MainWindow(self) self.mainWindows.append(mainWindow) self.windowActionGroup.addAction(mainWindow.activateWindowAction) self.repopulateAllWindowMenus() if settings: mainWindow.readViewSettings(settings) if inspectorFullName: inspectorId = nameToIdentifier(inspectorFullName) mainWindow.setInspectorById(inspectorId) if mainWindow.inspectorRegItem: # can be None at start inspectorId = mainWindow.inspectorRegItem.identifier mainWindow.getInspectorActionById(inspectorId).setChecked(True) logger.info("Created new window with inspector: {}" .format(mainWindow.inspectorRegItem.fullName)) else: logger.info("Created new window without inspector") mainWindow.drawInspectorContents(reason=UpdateReason.NEW_MAIN_WINDOW) mainWindow.show() if sys.platform.startswith('darwin'): # Calling raise before the QApplication.exec_ only shows the last window # that was added. Therefore we also call activeWindow. However, this may not # always be desirable. TODO: make optional? mainWindow.raise_() pass return mainWindow
[ "def", "addNewMainWindow", "(", "self", ",", "settings", "=", "None", ",", "inspectorFullName", "=", "None", ")", ":", "mainWindow", "=", "MainWindow", "(", "self", ")", "self", ".", "mainWindows", ".", "append", "(", "mainWindow", ")", "self", ".", "windowActionGroup", ".", "addAction", "(", "mainWindow", ".", "activateWindowAction", ")", "self", ".", "repopulateAllWindowMenus", "(", ")", "if", "settings", ":", "mainWindow", ".", "readViewSettings", "(", "settings", ")", "if", "inspectorFullName", ":", "inspectorId", "=", "nameToIdentifier", "(", "inspectorFullName", ")", "mainWindow", ".", "setInspectorById", "(", "inspectorId", ")", "if", "mainWindow", ".", "inspectorRegItem", ":", "# can be None at start", "inspectorId", "=", "mainWindow", ".", "inspectorRegItem", ".", "identifier", "mainWindow", ".", "getInspectorActionById", "(", "inspectorId", ")", ".", "setChecked", "(", "True", ")", "logger", ".", "info", "(", "\"Created new window with inspector: {}\"", ".", "format", "(", "mainWindow", ".", "inspectorRegItem", ".", "fullName", ")", ")", "else", ":", "logger", ".", "info", "(", "\"Created new window without inspector\"", ")", "mainWindow", ".", "drawInspectorContents", "(", "reason", "=", "UpdateReason", ".", "NEW_MAIN_WINDOW", ")", "mainWindow", ".", "show", "(", ")", "if", "sys", ".", "platform", ".", "startswith", "(", "'darwin'", ")", ":", "# Calling raise before the QApplication.exec_ only shows the last window", "# that was added. Therefore we also call activeWindow. However, this may not", "# always be desirable. TODO: make optional?", "mainWindow", ".", "raise_", "(", ")", "pass", "return", "mainWindow" ]
Creates and shows a new MainWindow. If inspectorFullName is set, it will set the identifier from that name. If the inspector identifier is not found in the registry, a KeyError is raised.
[ "Creates", "and", "shows", "a", "new", "MainWindow", "." ]
python
train
40.973684
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L1293-L1303
def mirror(self, axes='x'): """ Generates a symmetry of the Polyhedron respect global axes. :param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'... :type axes: str :returns: ``pyny.Polyhedron`` """ polygon = np.array([[0,0], [0,1], [1,1]]) space = Space(Place(polygon, polyhedra=self)) return space.mirror(axes, inplace=False)[0].polyhedra[0]
[ "def", "mirror", "(", "self", ",", "axes", "=", "'x'", ")", ":", "polygon", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "1", "]", ",", "[", "1", ",", "1", "]", "]", ")", "space", "=", "Space", "(", "Place", "(", "polygon", ",", "polyhedra", "=", "self", ")", ")", "return", "space", ".", "mirror", "(", "axes", ",", "inplace", "=", "False", ")", "[", "0", "]", ".", "polyhedra", "[", "0", "]" ]
Generates a symmetry of the Polyhedron respect global axes. :param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'... :type axes: str :returns: ``pyny.Polyhedron``
[ "Generates", "a", "symmetry", "of", "the", "Polyhedron", "respect", "global", "axes", ".", ":", "param", "axes", ":", "x", "y", "z", "xy", "xz", "yz", "...", ":", "type", "axes", ":", "str", ":", "returns", ":", "pyny", ".", "Polyhedron" ]
python
train
37.727273
cloudendpoints/endpoints-python
endpoints/api_config.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config.py#L2001-L2026
def __get_merged_api_info(self, services): """Builds a description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: The _ApiInfo object to use for the API that the given services implement. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()). """ merged_api_info = services[0].api_info # Verify that, if there are multiple classes here, they're allowed to # implement the same API. for service in services[1:]: if not merged_api_info.is_same_api(service.api_info): raise api_exceptions.ApiConfigurationError( _MULTICLASS_MISMATCH_ERROR_TEMPLATE % (service.api_info.name, service.api_info.api_version)) return merged_api_info
[ "def", "__get_merged_api_info", "(", "self", ",", "services", ")", ":", "merged_api_info", "=", "services", "[", "0", "]", ".", "api_info", "# Verify that, if there are multiple classes here, they're allowed to", "# implement the same API.", "for", "service", "in", "services", "[", "1", ":", "]", ":", "if", "not", "merged_api_info", ".", "is_same_api", "(", "service", ".", "api_info", ")", ":", "raise", "api_exceptions", ".", "ApiConfigurationError", "(", "_MULTICLASS_MISMATCH_ERROR_TEMPLATE", "%", "(", "service", ".", "api_info", ".", "name", ",", "service", ".", "api_info", ".", "api_version", ")", ")", "return", "merged_api_info" ]
Builds a description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: The _ApiInfo object to use for the API that the given services implement. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()).
[ "Builds", "a", "description", "of", "an", "API", "." ]
python
train
36.576923
discogs/python-cas-client
cas_client/cas_client.py
https://github.com/discogs/python-cas-client/blob/f1efa2f49a22d43135014cb1b8d9dd3875304318/cas_client/cas_client.py#L77-L83
def delete_session(self, ticket): ''' Delete a session record associated with a service ticket. ''' assert isinstance(self.session_storage_adapter, CASSessionAdapter) logging.debug('[CAS] Deleting session for ticket {}'.format(ticket)) self.session_storage_adapter.delete(ticket)
[ "def", "delete_session", "(", "self", ",", "ticket", ")", ":", "assert", "isinstance", "(", "self", ".", "session_storage_adapter", ",", "CASSessionAdapter", ")", "logging", ".", "debug", "(", "'[CAS] Deleting session for ticket {}'", ".", "format", "(", "ticket", ")", ")", "self", ".", "session_storage_adapter", ".", "delete", "(", "ticket", ")" ]
Delete a session record associated with a service ticket.
[ "Delete", "a", "session", "record", "associated", "with", "a", "service", "ticket", "." ]
python
train
45.857143
rigetti/quantumflow
quantumflow/forest/__init__.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/forest/__init__.py#L188-L213
def pyquil_to_circuit(program: pyquil.Program) -> Circuit: """Convert a protoquil pyQuil program to a QuantumFlow Circuit""" circ = Circuit() for inst in program.instructions: # print(type(inst)) if isinstance(inst, pyquil.Declare): # Ignore continue if isinstance(inst, pyquil.Halt): # Ignore continue if isinstance(inst, pyquil.Pragma): # TODO Barrier? continue elif isinstance(inst, pyquil.Measurement): circ += Measure(inst.qubit.index) # elif isinstance(inst, pyquil.ResetQubit): # TODO # continue elif isinstance(inst, pyquil.Gate): defgate = STDGATES[inst.name] gate = defgate(*inst.params) qubits = [q.index for q in inst.qubits] gate = gate.relabel(qubits) circ += gate else: raise ValueError('PyQuil program is not protoquil') return circ
[ "def", "pyquil_to_circuit", "(", "program", ":", "pyquil", ".", "Program", ")", "->", "Circuit", ":", "circ", "=", "Circuit", "(", ")", "for", "inst", "in", "program", ".", "instructions", ":", "# print(type(inst))", "if", "isinstance", "(", "inst", ",", "pyquil", ".", "Declare", ")", ":", "# Ignore", "continue", "if", "isinstance", "(", "inst", ",", "pyquil", ".", "Halt", ")", ":", "# Ignore", "continue", "if", "isinstance", "(", "inst", ",", "pyquil", ".", "Pragma", ")", ":", "# TODO Barrier?", "continue", "elif", "isinstance", "(", "inst", ",", "pyquil", ".", "Measurement", ")", ":", "circ", "+=", "Measure", "(", "inst", ".", "qubit", ".", "index", ")", "# elif isinstance(inst, pyquil.ResetQubit): # TODO", "# continue", "elif", "isinstance", "(", "inst", ",", "pyquil", ".", "Gate", ")", ":", "defgate", "=", "STDGATES", "[", "inst", ".", "name", "]", "gate", "=", "defgate", "(", "*", "inst", ".", "params", ")", "qubits", "=", "[", "q", ".", "index", "for", "q", "in", "inst", ".", "qubits", "]", "gate", "=", "gate", ".", "relabel", "(", "qubits", ")", "circ", "+=", "gate", "else", ":", "raise", "ValueError", "(", "'PyQuil program is not protoquil'", ")", "return", "circ" ]
Convert a protoquil pyQuil program to a QuantumFlow Circuit
[ "Convert", "a", "protoquil", "pyQuil", "program", "to", "a", "QuantumFlow", "Circuit" ]
python
train
37.615385
not-na/peng3d
peng3d/model.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/model.py#L487-L493
def getVertices(self,data): """ Returns the vertices of this region already transformed and ready-to-use. Internally uses :py:meth:`Bone.transformVertices()`\ . """ return self.bone.transformVertices(data,self.vertices,self.dims)
[ "def", "getVertices", "(", "self", ",", "data", ")", ":", "return", "self", ".", "bone", ".", "transformVertices", "(", "data", ",", "self", ".", "vertices", ",", "self", ".", "dims", ")" ]
Returns the vertices of this region already transformed and ready-to-use. Internally uses :py:meth:`Bone.transformVertices()`\ .
[ "Returns", "the", "vertices", "of", "this", "region", "already", "transformed", "and", "ready", "-", "to", "-", "use", ".", "Internally", "uses", ":", "py", ":", "meth", ":", "Bone", ".", "transformVertices", "()", "\\", "." ]
python
test
38.857143
wummel/dosage
dosagelib/plugins/t.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/plugins/t.py#L185-L189
def namer(cls, imageUrl, pageUrl): """Use page URL sequence which is apparently increasing.""" num = pageUrl.split('/')[-1] ext = imageUrl.rsplit('.', 1)[1] return "thethinhline-%s.%s" % (num, ext)
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "num", "=", "pageUrl", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "ext", "=", "imageUrl", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "return", "\"thethinhline-%s.%s\"", "%", "(", "num", ",", "ext", ")" ]
Use page URL sequence which is apparently increasing.
[ "Use", "page", "URL", "sequence", "which", "is", "apparently", "increasing", "." ]
python
train
45
icometrix/dicom2nifti
dicom2nifti/common.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L478-L492
def write_bvec_file(bvecs, bvec_file): """ Write an array of bvecs to a bvec file :param bvecs: array with the vectors :param bvec_file: filepath to write to """ if bvec_file is None: return logger.info('Saving BVEC file: %s' % bvec_file) with open(bvec_file, 'w') as text_file: # Map a dicection to string join them using a space and write to the file text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0]))) text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1]))) text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2])))
[ "def", "write_bvec_file", "(", "bvecs", ",", "bvec_file", ")", ":", "if", "bvec_file", "is", "None", ":", "return", "logger", ".", "info", "(", "'Saving BVEC file: %s'", "%", "bvec_file", ")", "with", "open", "(", "bvec_file", ",", "'w'", ")", "as", "text_file", ":", "# Map a dicection to string join them using a space and write to the file", "text_file", ".", "write", "(", "'%s\\n'", "%", "' '", ".", "join", "(", "map", "(", "str", ",", "bvecs", "[", ":", ",", "0", "]", ")", ")", ")", "text_file", ".", "write", "(", "'%s\\n'", "%", "' '", ".", "join", "(", "map", "(", "str", ",", "bvecs", "[", ":", ",", "1", "]", ")", ")", ")", "text_file", ".", "write", "(", "'%s\\n'", "%", "' '", ".", "join", "(", "map", "(", "str", ",", "bvecs", "[", ":", ",", "2", "]", ")", ")", ")" ]
Write an array of bvecs to a bvec file :param bvecs: array with the vectors :param bvec_file: filepath to write to
[ "Write", "an", "array", "of", "bvecs", "to", "a", "bvec", "file" ]
python
train
39
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L867-L888
def get_users(self, usernames): """Fetch user info for given usernames :param username: The usernames you want metadata for (max. 50) """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/user/whois', post_data={ "usernames":usernames }) users = [] for item in response['results']: u = User() u.from_dict(item) users.append(u) return users
[ "def", "get_users", "(", "self", ",", "usernames", ")", ":", "if", "self", ".", "standard_grant_type", "is", "not", "\"authorization_code\"", ":", "raise", "DeviantartError", "(", "\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\"", ")", "response", "=", "self", ".", "_req", "(", "'/user/whois'", ",", "post_data", "=", "{", "\"usernames\"", ":", "usernames", "}", ")", "users", "=", "[", "]", "for", "item", "in", "response", "[", "'results'", "]", ":", "u", "=", "User", "(", ")", "u", ".", "from_dict", "(", "item", ")", "users", ".", "append", "(", "u", ")", "return", "users" ]
Fetch user info for given usernames :param username: The usernames you want metadata for (max. 50)
[ "Fetch", "user", "info", "for", "given", "usernames" ]
python
train
28.045455
CybOXProject/mixbox
mixbox/datautils.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L83-L104
def needkwargs(*argnames): """Function decorator which checks that the decorated function is called with a set of required kwargs. Args: *argnames: String keyword argument names. Raises: ValueError: If a required kwarg is missing in the decorated function call. """ required = set(argnames) def decorator(func): def inner(*args, **kwargs): missing = required - set(kwargs) if missing: err = "%s kwargs are missing." % list(missing) raise ValueError(err) return func(*args, **kwargs) return inner return decorator
[ "def", "needkwargs", "(", "*", "argnames", ")", ":", "required", "=", "set", "(", "argnames", ")", "def", "decorator", "(", "func", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "missing", "=", "required", "-", "set", "(", "kwargs", ")", "if", "missing", ":", "err", "=", "\"%s kwargs are missing.\"", "%", "list", "(", "missing", ")", "raise", "ValueError", "(", "err", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inner", "return", "decorator" ]
Function decorator which checks that the decorated function is called with a set of required kwargs. Args: *argnames: String keyword argument names. Raises: ValueError: If a required kwarg is missing in the decorated function call.
[ "Function", "decorator", "which", "checks", "that", "the", "decorated", "function", "is", "called", "with", "a", "set", "of", "required", "kwargs", "." ]
python
train
29
blockadeio/analyst_toolbench
blockade/api.py
https://github.com/blockadeio/analyst_toolbench/blob/159b6f8cf8a91c5ff050f1579636ea90ab269863/blockade/api.py#L70-L88
def _endpoint(self, endpoint, action, *url_args): """Return the URL for the action. :param str endpoint: The controller :param str action: The action provided by the controller :param url_args: Additional endpoints(for endpoints that take part of the url as option) :return: Full URL for the requested action """ args = (self.api_base, endpoint, action) if action == '': args = (self.api_base, endpoint) api_url = "/".join(args) if url_args: if len(url_args) == 1: api_url += "/" + url_args[0] else: api_url += "/".join(url_args) return api_url
[ "def", "_endpoint", "(", "self", ",", "endpoint", ",", "action", ",", "*", "url_args", ")", ":", "args", "=", "(", "self", ".", "api_base", ",", "endpoint", ",", "action", ")", "if", "action", "==", "''", ":", "args", "=", "(", "self", ".", "api_base", ",", "endpoint", ")", "api_url", "=", "\"/\"", ".", "join", "(", "args", ")", "if", "url_args", ":", "if", "len", "(", "url_args", ")", "==", "1", ":", "api_url", "+=", "\"/\"", "+", "url_args", "[", "0", "]", "else", ":", "api_url", "+=", "\"/\"", ".", "join", "(", "url_args", ")", "return", "api_url" ]
Return the URL for the action. :param str endpoint: The controller :param str action: The action provided by the controller :param url_args: Additional endpoints(for endpoints that take part of the url as option) :return: Full URL for the requested action
[ "Return", "the", "URL", "for", "the", "action", "." ]
python
train
37.263158
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L1413-L1416
def until_any_child_in_state(self, state, timeout=None): """Return a tornado Future; resolves when any client is in specified state""" return until_any(*[r.until_state(state) for r in dict.values(self.children)], timeout=timeout)
[ "def", "until_any_child_in_state", "(", "self", ",", "state", ",", "timeout", "=", "None", ")", ":", "return", "until_any", "(", "*", "[", "r", ".", "until_state", "(", "state", ")", "for", "r", "in", "dict", ".", "values", "(", "self", ".", "children", ")", "]", ",", "timeout", "=", "timeout", ")" ]
Return a tornado Future; resolves when any client is in specified state
[ "Return", "a", "tornado", "Future", ";", "resolves", "when", "any", "client", "is", "in", "specified", "state" ]
python
train
66.75
taborlab/FlowCal
FlowCal/mef.py
https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/mef.py#L36-L213
def clustering_gmm(data, n_clusters, tol=1e-7, min_covar=None, scale='logicle'): """ Find clusters in an array using a Gaussian Mixture Model. Before clustering, `data` can be automatically rescaled as specified by the `scale` argument. Parameters ---------- data : FCSData or array_like Data to cluster. n_clusters : int Number of clusters to find. tol : float, optional Tolerance for convergence. Directly passed to either ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s version. min_covar : float, optional The minimum trace that the initial covariance matrix will have. If ``scikit-learn``'s version is older than 0.18, `min_covar` is also passed directly to ``GMM``. scale : str, optional Rescaling applied to `data` before performing clustering. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- labels : array Nx1 array with labels for each element in `data`, assigning ``data[i]`` to cluster ``labels[i]``. Notes ----- A Gaussian Mixture Model finds clusters by fitting a linear combination of `n_clusters` Gaussian probability density functions (pdf) to `data` using Expectation Maximization (EM). This method can be fairly sensitive to the initial parameter choice. To generate a reasonable set of initial conditions, `clustering_gmm` first divides all points in `data` into `n_clusters` groups of the same size based on their Euclidean distance to the minimum value. Then, for each group, the 50% samples farther away from the mean are discarded. The mean and covariance are calculated from the remaining samples of each group, and used as initial conditions for the GMM EM algorithm. `clustering_gmm` internally uses a `GaussianMixture` object from the ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is lower than 0.18), with full covariance matrices for each cluster. For more information, consult ``scikit-learn``'s documentation. """ # Initialize min_covar parameter # Parameter is initialized differently depending on scikit's version if min_covar is None: if packaging.version.parse(sklearn.__version__) \ >= packaging.version.parse('0.18'): min_covar = 1e-3 else: min_covar = 5e-5 # Copy events before rescaling data = data.copy() # Apply rescaling if scale=='linear': # No rescaling pass elif scale=='log': # Logarithm of zero and negatives is undefined. Therefore, saturate # any non-positives to a small positive value. # The machine epsilon `eps` is the smallest number such that # `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`. data[data < 1e-15] = 1e-15 # Rescale data = np.log10(data) elif scale=='logicle': # Use the logicle transform class in the plot module, and transform # data one channel at a time. for ch in range(data.shape[1]): # We need a transformation from "data value" to "display scale" # units. To do so, we use an inverse logicle transformation. t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted() data[:,ch] = t.transform_non_affine(data[:,ch], mask_out_of_range=False) else: raise ValueError("scale {} not supported".format(scale)) ### # Parameter initialization ### weights = np.tile(1.0 / n_clusters, n_clusters) means = [] covars = [] # Calculate distance to minimum value. Then, sort based on this distance. dist = np.sum((data - np.min(data, axis=0))**2., axis=1) sorted_idx = np.argsort(dist) # Expected number of elements per cluster n_per_cluster = data.shape[0]/float(n_clusters) # Get means and covariances per cluster # We will just use a fraction of ``1 - discard_frac`` of the data. # Data at the edges that actually corresponds to another cluster can # really mess up the final result. discard_frac = 0.5 for i in range(n_clusters): il = int((i + discard_frac/2)*n_per_cluster) ih = int((i + 1 - discard_frac/2)*n_per_cluster) sorted_idx_cluster = sorted_idx[il:ih] data_cluster = data[sorted_idx_cluster] # Calculate means and covariances means.append(np.mean(data_cluster, axis=0)) if data.shape[1] == 1: cov = np.cov(data_cluster.T).reshape(1,1) else: cov = np.cov(data_cluster.T) # Add small number to diagonal to avoid near-singular covariances cov += np.eye(data.shape[1]) * min_covar covars.append(cov) # Means should be an array means = np.array(means) ### # Run Gaussian Mixture Model Clustering ### if packaging.version.parse(sklearn.__version__) \ >= packaging.version.parse('0.18'): # GaussianMixture uses precisions, the inverse of covariances. # To get the inverse, we solve the linear equation C*P = I. We also # use the fact that C is positive definite. precisions = [scipy.linalg.solve(c, np.eye(c.shape[0]), assume_a='pos') for c in covars] precisions = np.array(precisions) # Initialize GaussianMixture object gmm = GaussianMixture(n_components=n_clusters, tol=tol, covariance_type='full', weights_init=weights, means_init=means, precisions_init=precisions, max_iter=500) else: # Initialize GMM object gmm = GMM(n_components=n_clusters, tol=tol, min_covar=min_covar, covariance_type='full', params='mc', init_params='') # Set initial parameters gmm.weight_ = weights gmm.means_ = means gmm.covars_ = covars # Fit gmm.fit(data) # Get labels by sampling from the responsibilities # This avoids the complete elimination of a cluster if two or more # clusters have very similar means. resp = gmm.predict_proba(data) labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp] return labels
[ "def", "clustering_gmm", "(", "data", ",", "n_clusters", ",", "tol", "=", "1e-7", ",", "min_covar", "=", "None", ",", "scale", "=", "'logicle'", ")", ":", "# Initialize min_covar parameter", "# Parameter is initialized differently depending on scikit's version", "if", "min_covar", "is", "None", ":", "if", "packaging", ".", "version", ".", "parse", "(", "sklearn", ".", "__version__", ")", ">=", "packaging", ".", "version", ".", "parse", "(", "'0.18'", ")", ":", "min_covar", "=", "1e-3", "else", ":", "min_covar", "=", "5e-5", "# Copy events before rescaling", "data", "=", "data", ".", "copy", "(", ")", "# Apply rescaling", "if", "scale", "==", "'linear'", ":", "# No rescaling", "pass", "elif", "scale", "==", "'log'", ":", "# Logarithm of zero and negatives is undefined. Therefore, saturate", "# any non-positives to a small positive value.", "# The machine epsilon `eps` is the smallest number such that", "# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.", "data", "[", "data", "<", "1e-15", "]", "=", "1e-15", "# Rescale", "data", "=", "np", ".", "log10", "(", "data", ")", "elif", "scale", "==", "'logicle'", ":", "# Use the logicle transform class in the plot module, and transform", "# data one channel at a time.", "for", "ch", "in", "range", "(", "data", ".", "shape", "[", "1", "]", ")", ":", "# We need a transformation from \"data value\" to \"display scale\"", "# units. To do so, we use an inverse logicle transformation.", "t", "=", "FlowCal", ".", "plot", ".", "_LogicleTransform", "(", "data", "=", "data", ",", "channel", "=", "ch", ")", ".", "inverted", "(", ")", "data", "[", ":", ",", "ch", "]", "=", "t", ".", "transform_non_affine", "(", "data", "[", ":", ",", "ch", "]", ",", "mask_out_of_range", "=", "False", ")", "else", ":", "raise", "ValueError", "(", "\"scale {} not supported\"", ".", "format", "(", "scale", ")", ")", "###", "# Parameter initialization", "###", "weights", "=", "np", ".", "tile", "(", "1.0", "/", "n_clusters", ",", "n_clusters", ")", "means", "=", "[", "]", "covars", "=", "[", "]", "# Calculate distance to minimum value. Then, sort based on this distance.", "dist", "=", "np", ".", "sum", "(", "(", "data", "-", "np", ".", "min", "(", "data", ",", "axis", "=", "0", ")", ")", "**", "2.", ",", "axis", "=", "1", ")", "sorted_idx", "=", "np", ".", "argsort", "(", "dist", ")", "# Expected number of elements per cluster", "n_per_cluster", "=", "data", ".", "shape", "[", "0", "]", "/", "float", "(", "n_clusters", ")", "# Get means and covariances per cluster", "# We will just use a fraction of ``1 - discard_frac`` of the data.", "# Data at the edges that actually corresponds to another cluster can", "# really mess up the final result.", "discard_frac", "=", "0.5", "for", "i", "in", "range", "(", "n_clusters", ")", ":", "il", "=", "int", "(", "(", "i", "+", "discard_frac", "/", "2", ")", "*", "n_per_cluster", ")", "ih", "=", "int", "(", "(", "i", "+", "1", "-", "discard_frac", "/", "2", ")", "*", "n_per_cluster", ")", "sorted_idx_cluster", "=", "sorted_idx", "[", "il", ":", "ih", "]", "data_cluster", "=", "data", "[", "sorted_idx_cluster", "]", "# Calculate means and covariances", "means", ".", "append", "(", "np", ".", "mean", "(", "data_cluster", ",", "axis", "=", "0", ")", ")", "if", "data", ".", "shape", "[", "1", "]", "==", "1", ":", "cov", "=", "np", ".", "cov", "(", "data_cluster", ".", "T", ")", ".", "reshape", "(", "1", ",", "1", ")", "else", ":", "cov", "=", "np", ".", "cov", "(", "data_cluster", ".", "T", ")", "# Add small number to diagonal to avoid near-singular covariances", "cov", "+=", "np", ".", "eye", "(", "data", ".", "shape", "[", "1", "]", ")", "*", "min_covar", "covars", ".", "append", "(", "cov", ")", "# Means should be an array", "means", "=", "np", ".", "array", "(", "means", ")", "###", "# Run Gaussian Mixture Model Clustering", "###", "if", "packaging", ".", "version", ".", "parse", "(", "sklearn", ".", "__version__", ")", ">=", "packaging", ".", "version", ".", "parse", "(", "'0.18'", ")", ":", "# GaussianMixture uses precisions, the inverse of covariances.", "# To get the inverse, we solve the linear equation C*P = I. We also", "# use the fact that C is positive definite.", "precisions", "=", "[", "scipy", ".", "linalg", ".", "solve", "(", "c", ",", "np", ".", "eye", "(", "c", ".", "shape", "[", "0", "]", ")", ",", "assume_a", "=", "'pos'", ")", "for", "c", "in", "covars", "]", "precisions", "=", "np", ".", "array", "(", "precisions", ")", "# Initialize GaussianMixture object", "gmm", "=", "GaussianMixture", "(", "n_components", "=", "n_clusters", ",", "tol", "=", "tol", ",", "covariance_type", "=", "'full'", ",", "weights_init", "=", "weights", ",", "means_init", "=", "means", ",", "precisions_init", "=", "precisions", ",", "max_iter", "=", "500", ")", "else", ":", "# Initialize GMM object", "gmm", "=", "GMM", "(", "n_components", "=", "n_clusters", ",", "tol", "=", "tol", ",", "min_covar", "=", "min_covar", ",", "covariance_type", "=", "'full'", ",", "params", "=", "'mc'", ",", "init_params", "=", "''", ")", "# Set initial parameters", "gmm", ".", "weight_", "=", "weights", "gmm", ".", "means_", "=", "means", "gmm", ".", "covars_", "=", "covars", "# Fit ", "gmm", ".", "fit", "(", "data", ")", "# Get labels by sampling from the responsibilities", "# This avoids the complete elimination of a cluster if two or more ", "# clusters have very similar means.", "resp", "=", "gmm", ".", "predict_proba", "(", "data", ")", "labels", "=", "[", "np", ".", "random", ".", "choice", "(", "range", "(", "n_clusters", ")", ",", "p", "=", "ri", ")", "for", "ri", "in", "resp", "]", "return", "labels" ]
Find clusters in an array using a Gaussian Mixture Model. Before clustering, `data` can be automatically rescaled as specified by the `scale` argument. Parameters ---------- data : FCSData or array_like Data to cluster. n_clusters : int Number of clusters to find. tol : float, optional Tolerance for convergence. Directly passed to either ``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s version. min_covar : float, optional The minimum trace that the initial covariance matrix will have. If ``scikit-learn``'s version is older than 0.18, `min_covar` is also passed directly to ``GMM``. scale : str, optional Rescaling applied to `data` before performing clustering. Can be either ``linear`` (no rescaling), ``log``, or ``logicle``. Returns ------- labels : array Nx1 array with labels for each element in `data`, assigning ``data[i]`` to cluster ``labels[i]``. Notes ----- A Gaussian Mixture Model finds clusters by fitting a linear combination of `n_clusters` Gaussian probability density functions (pdf) to `data` using Expectation Maximization (EM). This method can be fairly sensitive to the initial parameter choice. To generate a reasonable set of initial conditions, `clustering_gmm` first divides all points in `data` into `n_clusters` groups of the same size based on their Euclidean distance to the minimum value. Then, for each group, the 50% samples farther away from the mean are discarded. The mean and covariance are calculated from the remaining samples of each group, and used as initial conditions for the GMM EM algorithm. `clustering_gmm` internally uses a `GaussianMixture` object from the ``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is lower than 0.18), with full covariance matrices for each cluster. For more information, consult ``scikit-learn``'s documentation.
[ "Find", "clusters", "in", "an", "array", "using", "a", "Gaussian", "Mixture", "Model", "." ]
python
train
36.764045
ucsb-cs-education/hairball
hairball/plugins/checks.py
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/checks.py#L120-L127
def get_receive(self, script_list): """Return a list of received events contained in script_list.""" events = defaultdict(set) for script in script_list: if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE: event = script.blocks[0].args[0].lower() events[event].add(script) return events
[ "def", "get_receive", "(", "self", ",", "script_list", ")", ":", "events", "=", "defaultdict", "(", "set", ")", "for", "script", "in", "script_list", ":", "if", "self", ".", "script_start_type", "(", "script", ")", "==", "self", ".", "HAT_WHEN_I_RECEIVE", ":", "event", "=", "script", ".", "blocks", "[", "0", "]", ".", "args", "[", "0", "]", ".", "lower", "(", ")", "events", "[", "event", "]", ".", "add", "(", "script", ")", "return", "events" ]
Return a list of received events contained in script_list.
[ "Return", "a", "list", "of", "received", "events", "contained", "in", "script_list", "." ]
python
train
45.625
noobermin/pys
pys/__init__.py
https://github.com/noobermin/pys/blob/e01b74210c65eb96d019bb42e0a3c9e6676da943/pys/__init__.py#L10-L23
def conv(arg,default=None,func=None): ''' essentially, the generalization of arg if arg else default or func(arg) if arg else default ''' if func: return func(arg) if arg else default; else: return arg if arg else default;
[ "def", "conv", "(", "arg", ",", "default", "=", "None", ",", "func", "=", "None", ")", ":", "if", "func", ":", "return", "func", "(", "arg", ")", "if", "arg", "else", "default", "else", ":", "return", "arg", "if", "arg", "else", "default" ]
essentially, the generalization of arg if arg else default or func(arg) if arg else default
[ "essentially", "the", "generalization", "of", "arg", "if", "arg", "else", "default" ]
python
train
18.857143
T-002/pycast
pycast/common/matrix.py
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L642-L729
def householder(self): """Return Matrices u,b,v with self = ubv and b is in bidiagonal form The algorithm uses householder transformations. :return tuple (u,b,v): A tuple with the Matrix u, b and v. and self = ubv (except some rounding errors) u is a unitary matrix b is a bidiagonal matrix. v is a unitary matrix. :note: Currently the algorithm only works for squared matrices :todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal. Due to rounding errors, this is currently not ensured """ # copy instance to transform it to bidiagonal form. bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix) # build identity matrix, which is used to calculate householder transformations identityMatrixRow = Matrix(self.get_height(), self.get_height()) for i in xrange(self.get_height()): identityMatrixRow.set_value(i, i, 1.0) identityMatrixCol = Matrix(self.get_width(), self.get_width()) for i in xrange(self.get_width()): identityMatrixCol.set_value(i, i, 1.0) # zero out the k'th column and row for k in xrange(self.get_width() - 1): # vector with the values of the k'th column (first k-1 rows are 0) x = Vector(self.get_height()) y = Vector(self.get_height()) if k > 0: x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1)) y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1)) s = 0.0 for i in xrange(k, self.get_height()): val = bidiagMatrix.get_value(k, i) x.set_value(0, i, val) s += (val ** 2) s = sqrt(s) # y must have same length as x y.set_value(0, k, s) tmp = x - y norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array())) # calculate w = (x-y)/(|x-y|) w = tmp / norm # uk is the k'th householder matrix for the column uk = identityMatrixRow - 2 * (w * w.transform()) bidiagMatrix = uk * bidiagMatrix if k == 0: # set u in first iteration. u = uk else: u = u * uk # zero out the the row if k < self.get_width() - 2: x = Vector(self.get_width()) y = Vector(self.get_width()) x.set_value(0, k, bidiagMatrix.get_value(k, k)) y.set_value(0, k, bidiagMatrix.get_value(k, k)) s = 0.0 for i in xrange(k + 1, bidiagMatrix.get_width()): val = bidiagMatrix.get_value(i, k) x.set_value(0, i, val) s += (val ** 2) # length of vector x ignoring the k'th value s = sqrt(s) # y must have same length as x, since k'th value is equal # set k+1 value to s y.set_value(0, k + 1, s) tmp = x - y norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array())) w = tmp / norm # vk is the k'th householder matrix for the row vk = identityMatrixCol - (2 * (w * w.transform())) bidiagMatrix = bidiagMatrix * vk if k == 0: # set v in first iteration v = vk else: v = vk * v return (u, bidiagMatrix, v)
[ "def", "householder", "(", "self", ")", ":", "# copy instance to transform it to bidiagonal form.", "bidiagMatrix", "=", "Matrix", ".", "from_two_dim_array", "(", "self", ".", "get_width", "(", ")", ",", "self", ".", "get_height", "(", ")", ",", "self", ".", "matrix", ")", "# build identity matrix, which is used to calculate householder transformations", "identityMatrixRow", "=", "Matrix", "(", "self", ".", "get_height", "(", ")", ",", "self", ".", "get_height", "(", ")", ")", "for", "i", "in", "xrange", "(", "self", ".", "get_height", "(", ")", ")", ":", "identityMatrixRow", ".", "set_value", "(", "i", ",", "i", ",", "1.0", ")", "identityMatrixCol", "=", "Matrix", "(", "self", ".", "get_width", "(", ")", ",", "self", ".", "get_width", "(", ")", ")", "for", "i", "in", "xrange", "(", "self", ".", "get_width", "(", ")", ")", ":", "identityMatrixCol", ".", "set_value", "(", "i", ",", "i", ",", "1.0", ")", "# zero out the k'th column and row", "for", "k", "in", "xrange", "(", "self", ".", "get_width", "(", ")", "-", "1", ")", ":", "# vector with the values of the k'th column (first k-1 rows are 0)", "x", "=", "Vector", "(", "self", ".", "get_height", "(", ")", ")", "y", "=", "Vector", "(", "self", ".", "get_height", "(", ")", ")", "if", "k", ">", "0", ":", "x", ".", "set_value", "(", "0", ",", "k", "-", "1", ",", "bidiagMatrix", ".", "get_value", "(", "k", ",", "k", "-", "1", ")", ")", "y", ".", "set_value", "(", "0", ",", "k", "-", "1", ",", "bidiagMatrix", ".", "get_value", "(", "k", ",", "k", "-", "1", ")", ")", "s", "=", "0.0", "for", "i", "in", "xrange", "(", "k", ",", "self", ".", "get_height", "(", ")", ")", ":", "val", "=", "bidiagMatrix", ".", "get_value", "(", "k", ",", "i", ")", "x", ".", "set_value", "(", "0", ",", "i", ",", "val", ")", "s", "+=", "(", "val", "**", "2", ")", "s", "=", "sqrt", "(", "s", ")", "# y must have same length as x", "y", ".", "set_value", "(", "0", ",", "k", ",", "s", ")", "tmp", "=", "x", "-", "y", "norm", "=", "sqrt", "(", "sum", "(", "i", "[", "0", "]", "**", "2", "for", "i", "in", "tmp", ".", "get_array", "(", ")", ")", ")", "# calculate w = (x-y)/(|x-y|)", "w", "=", "tmp", "/", "norm", "# uk is the k'th householder matrix for the column", "uk", "=", "identityMatrixRow", "-", "2", "*", "(", "w", "*", "w", ".", "transform", "(", ")", ")", "bidiagMatrix", "=", "uk", "*", "bidiagMatrix", "if", "k", "==", "0", ":", "# set u in first iteration.", "u", "=", "uk", "else", ":", "u", "=", "u", "*", "uk", "# zero out the the row", "if", "k", "<", "self", ".", "get_width", "(", ")", "-", "2", ":", "x", "=", "Vector", "(", "self", ".", "get_width", "(", ")", ")", "y", "=", "Vector", "(", "self", ".", "get_width", "(", ")", ")", "x", ".", "set_value", "(", "0", ",", "k", ",", "bidiagMatrix", ".", "get_value", "(", "k", ",", "k", ")", ")", "y", ".", "set_value", "(", "0", ",", "k", ",", "bidiagMatrix", ".", "get_value", "(", "k", ",", "k", ")", ")", "s", "=", "0.0", "for", "i", "in", "xrange", "(", "k", "+", "1", ",", "bidiagMatrix", ".", "get_width", "(", ")", ")", ":", "val", "=", "bidiagMatrix", ".", "get_value", "(", "i", ",", "k", ")", "x", ".", "set_value", "(", "0", ",", "i", ",", "val", ")", "s", "+=", "(", "val", "**", "2", ")", "# length of vector x ignoring the k'th value", "s", "=", "sqrt", "(", "s", ")", "# y must have same length as x, since k'th value is equal", "# set k+1 value to s", "y", ".", "set_value", "(", "0", ",", "k", "+", "1", ",", "s", ")", "tmp", "=", "x", "-", "y", "norm", "=", "sqrt", "(", "sum", "(", "i", "[", "0", "]", "**", "2", "for", "i", "in", "tmp", ".", "get_array", "(", ")", ")", ")", "w", "=", "tmp", "/", "norm", "# vk is the k'th householder matrix for the row", "vk", "=", "identityMatrixCol", "-", "(", "2", "*", "(", "w", "*", "w", ".", "transform", "(", ")", ")", ")", "bidiagMatrix", "=", "bidiagMatrix", "*", "vk", "if", "k", "==", "0", ":", "# set v in first iteration", "v", "=", "vk", "else", ":", "v", "=", "vk", "*", "v", "return", "(", "u", ",", "bidiagMatrix", ",", "v", ")" ]
Return Matrices u,b,v with self = ubv and b is in bidiagonal form The algorithm uses householder transformations. :return tuple (u,b,v): A tuple with the Matrix u, b and v. and self = ubv (except some rounding errors) u is a unitary matrix b is a bidiagonal matrix. v is a unitary matrix. :note: Currently the algorithm only works for squared matrices :todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal. Due to rounding errors, this is currently not ensured
[ "Return", "Matrices", "u", "b", "v", "with", "self", "=", "ubv", "and", "b", "is", "in", "bidiagonal", "form" ]
python
train
40.602273
Azure/azure-sdk-for-python
azure-mgmt-resource/azure/mgmt/resource/resources/resource_management_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-resource/azure/mgmt/resource/resources/resource_management_client.py#L96-L120
def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>` * 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>` * 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>` """ if api_version == '2016-02-01': from .v2016_02_01 import models return models elif api_version == '2016-09-01': from .v2016_09_01 import models return models elif api_version == '2017-05-10': from .v2017_05_10 import models return models elif api_version == '2018-02-01': from .v2018_02_01 import models return models elif api_version == '2018-05-01': from .v2018_05_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
[ "def", "models", "(", "cls", ",", "api_version", "=", "DEFAULT_API_VERSION", ")", ":", "if", "api_version", "==", "'2016-02-01'", ":", "from", ".", "v2016_02_01", "import", "models", "return", "models", "elif", "api_version", "==", "'2016-09-01'", ":", "from", ".", "v2016_09_01", "import", "models", "return", "models", "elif", "api_version", "==", "'2017-05-10'", ":", "from", ".", "v2017_05_10", "import", "models", "return", "models", "elif", "api_version", "==", "'2018-02-01'", ":", "from", ".", "v2018_02_01", "import", "models", "return", "models", "elif", "api_version", "==", "'2018-05-01'", ":", "from", ".", "v2018_05_01", "import", "models", "return", "models", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")" ]
Module depends on the API version: * 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>` * 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>` * 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>`
[ "Module", "depends", "on", "the", "API", "version", ":" ]
python
test
49.4
orb-framework/orb
orb/core/schema.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/schema.py#L381-L412
def register(self, item): """ Registers a new orb object to this schema. This could be a column, index, or collector -- including a virtual object defined through the orb.virtual decorator. :param item: <variant> :return: """ if callable(item) and hasattr(item, '__orb__'): item = item.__orb__ key = item.name() model = self.__model # create class methods for indexes if isinstance(item, orb.Index): self.__indexes[key] = item item.setSchema(self) if model and not hasattr(model, key): setattr(model, key, classmethod(item)) # create instance methods for collectors elif isinstance(item, orb.Collector): self.__collectors[key] = item item.setSchema(self) # create instance methods for columns elif isinstance(item, orb.Column): self.__columns[key] = item item.setSchema(self)
[ "def", "register", "(", "self", ",", "item", ")", ":", "if", "callable", "(", "item", ")", "and", "hasattr", "(", "item", ",", "'__orb__'", ")", ":", "item", "=", "item", ".", "__orb__", "key", "=", "item", ".", "name", "(", ")", "model", "=", "self", ".", "__model", "# create class methods for indexes", "if", "isinstance", "(", "item", ",", "orb", ".", "Index", ")", ":", "self", ".", "__indexes", "[", "key", "]", "=", "item", "item", ".", "setSchema", "(", "self", ")", "if", "model", "and", "not", "hasattr", "(", "model", ",", "key", ")", ":", "setattr", "(", "model", ",", "key", ",", "classmethod", "(", "item", ")", ")", "# create instance methods for collectors", "elif", "isinstance", "(", "item", ",", "orb", ".", "Collector", ")", ":", "self", ".", "__collectors", "[", "key", "]", "=", "item", "item", ".", "setSchema", "(", "self", ")", "# create instance methods for columns", "elif", "isinstance", "(", "item", ",", "orb", ".", "Column", ")", ":", "self", ".", "__columns", "[", "key", "]", "=", "item", "item", ".", "setSchema", "(", "self", ")" ]
Registers a new orb object to this schema. This could be a column, index, or collector -- including a virtual object defined through the orb.virtual decorator. :param item: <variant> :return:
[ "Registers", "a", "new", "orb", "object", "to", "this", "schema", ".", "This", "could", "be", "a", "column", "index", "or", "collector", "--", "including", "a", "virtual", "object", "defined", "through", "the", "orb", ".", "virtual", "decorator", "." ]
python
train
30.75
TissueMAPS/TmClient
src/python/tmclient/api.py
https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L1052-L1083
def get_microscope_files(self, plate_name, acquisition_name): '''Gets status and name of files that have been registered for upload. Parameters ---------- plate_name: str name of the parent plate acquisition_name: str name of the parent acquisition Returns ------- List[Dict[str, str]] names and status of uploaded files See also -------- :func:`tmserver.api.acquisition.get_microscope_image_files_information` :func:`tmserver.api.acquisition.get_microscope_metadata_file_information` :class:`tmlib.models.acquisition.Acquisition` :class:`tmlib.models.file.MicroscopeImageFile` :class:`tmlib.models.file.MicroscopeMetadataFile` ''' logger.info( 'get names of already uploaded files for experiment "%s", ' 'plate "%s" and acquisition "%s"', self.experiment_name, plate_name, acquisition_name ) acquisition_id = self._get_acquisition_id(plate_name, acquisition_name) image_files = self._get_image_files(acquisition_id) metadata_files = self._get_metadata_files(acquisition_id) return image_files + metadata_files
[ "def", "get_microscope_files", "(", "self", ",", "plate_name", ",", "acquisition_name", ")", ":", "logger", ".", "info", "(", "'get names of already uploaded files for experiment \"%s\", '", "'plate \"%s\" and acquisition \"%s\"'", ",", "self", ".", "experiment_name", ",", "plate_name", ",", "acquisition_name", ")", "acquisition_id", "=", "self", ".", "_get_acquisition_id", "(", "plate_name", ",", "acquisition_name", ")", "image_files", "=", "self", ".", "_get_image_files", "(", "acquisition_id", ")", "metadata_files", "=", "self", ".", "_get_metadata_files", "(", "acquisition_id", ")", "return", "image_files", "+", "metadata_files" ]
Gets status and name of files that have been registered for upload. Parameters ---------- plate_name: str name of the parent plate acquisition_name: str name of the parent acquisition Returns ------- List[Dict[str, str]] names and status of uploaded files See also -------- :func:`tmserver.api.acquisition.get_microscope_image_files_information` :func:`tmserver.api.acquisition.get_microscope_metadata_file_information` :class:`tmlib.models.acquisition.Acquisition` :class:`tmlib.models.file.MicroscopeImageFile` :class:`tmlib.models.file.MicroscopeMetadataFile`
[ "Gets", "status", "and", "name", "of", "files", "that", "have", "been", "registered", "for", "upload", "." ]
python
train
38.4375
DarkEnergySurvey/ugali
ugali/utils/skymap.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/skymap.py#L94-L149
def randomPositions(input, nside_pix, n=1): """ Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates. Parameters: ----------- input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels. nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in. Returns: -------- lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2). """ input = np.array(input) if len(input.shape) == 1: if hp.npix2nside(len(input)) < nside_pix: logger.warning('Expected coarser resolution nside_pix in skymap.randomPositions') subpix = np.nonzero(input)[0] # All the valid pixels in the mask at the NSIDE for the input mask lon, lat = pix2ang(hp.npix2nside(len(input)), subpix) elif len(input.shape) == 2: lon, lat = input[0], input[1] # All catalog object positions else: logger.warning('Unexpected input dimensions for skymap.randomPositions') pix = surveyPixel(lon, lat, nside_pix) # Area with which the random points are thrown area = len(pix) * hp.nside2pixarea(nside_pix, degrees=True) # Create mask at the coarser resolution mask = np.tile(False, hp.nside2npix(nside_pix)) mask[pix] = True # Estimate the number of points that need to be thrown based off # coverage fraction of the HEALPix mask coverage_fraction = float(np.sum(mask)) / len(mask) n_throw = int(n / coverage_fraction) lon, lat = [], [] count = 0 while len(lon) < n: lon_throw = np.random.uniform(0., 360., n_throw) lat_throw = np.degrees(np.arcsin(np.random.uniform(-1., 1., n_throw))) pix_throw = ugali.utils.healpix.angToPix(nside_pix, lon_throw, lat_throw) cut = mask[pix_throw].astype(bool) lon = np.append(lon, lon_throw[cut]) lat = np.append(lat, lat_throw[cut]) count += 1 if count > 10: raise RuntimeError('Too many loops...') return lon[0:n], lat[0:n], area
[ "def", "randomPositions", "(", "input", ",", "nside_pix", ",", "n", "=", "1", ")", ":", "input", "=", "np", ".", "array", "(", "input", ")", "if", "len", "(", "input", ".", "shape", ")", "==", "1", ":", "if", "hp", ".", "npix2nside", "(", "len", "(", "input", ")", ")", "<", "nside_pix", ":", "logger", ".", "warning", "(", "'Expected coarser resolution nside_pix in skymap.randomPositions'", ")", "subpix", "=", "np", ".", "nonzero", "(", "input", ")", "[", "0", "]", "# All the valid pixels in the mask at the NSIDE for the input mask", "lon", ",", "lat", "=", "pix2ang", "(", "hp", ".", "npix2nside", "(", "len", "(", "input", ")", ")", ",", "subpix", ")", "elif", "len", "(", "input", ".", "shape", ")", "==", "2", ":", "lon", ",", "lat", "=", "input", "[", "0", "]", ",", "input", "[", "1", "]", "# All catalog object positions", "else", ":", "logger", ".", "warning", "(", "'Unexpected input dimensions for skymap.randomPositions'", ")", "pix", "=", "surveyPixel", "(", "lon", ",", "lat", ",", "nside_pix", ")", "# Area with which the random points are thrown", "area", "=", "len", "(", "pix", ")", "*", "hp", ".", "nside2pixarea", "(", "nside_pix", ",", "degrees", "=", "True", ")", "# Create mask at the coarser resolution", "mask", "=", "np", ".", "tile", "(", "False", ",", "hp", ".", "nside2npix", "(", "nside_pix", ")", ")", "mask", "[", "pix", "]", "=", "True", "# Estimate the number of points that need to be thrown based off", "# coverage fraction of the HEALPix mask", "coverage_fraction", "=", "float", "(", "np", ".", "sum", "(", "mask", ")", ")", "/", "len", "(", "mask", ")", "n_throw", "=", "int", "(", "n", "/", "coverage_fraction", ")", "lon", ",", "lat", "=", "[", "]", ",", "[", "]", "count", "=", "0", "while", "len", "(", "lon", ")", "<", "n", ":", "lon_throw", "=", "np", ".", "random", ".", "uniform", "(", "0.", ",", "360.", ",", "n_throw", ")", "lat_throw", "=", "np", ".", "degrees", "(", "np", ".", "arcsin", "(", "np", ".", "random", ".", "uniform", "(", "-", "1.", ",", "1.", ",", "n_throw", ")", ")", ")", "pix_throw", "=", "ugali", ".", "utils", ".", "healpix", ".", "angToPix", "(", "nside_pix", ",", "lon_throw", ",", "lat_throw", ")", "cut", "=", "mask", "[", "pix_throw", "]", ".", "astype", "(", "bool", ")", "lon", "=", "np", ".", "append", "(", "lon", ",", "lon_throw", "[", "cut", "]", ")", "lat", "=", "np", ".", "append", "(", "lat", ",", "lat_throw", "[", "cut", "]", ")", "count", "+=", "1", "if", "count", ">", "10", ":", "raise", "RuntimeError", "(", "'Too many loops...'", ")", "return", "lon", "[", "0", ":", "n", "]", ",", "lat", "[", "0", ":", "n", "]", ",", "area" ]
Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates. Parameters: ----------- input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels. nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in. Returns: -------- lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2).
[ "Generate", "n", "random", "positions", "within", "a", "full", "HEALPix", "mask", "of", "booleans", "or", "a", "set", "of", "(", "lon", "lat", ")", "coordinates", "." ]
python
train
39.714286
JoelBender/bacpypes
py25/bacpypes/apdu.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/apdu.py#L173-L251
def encode(self, pdu): """encode the contents of the APCI into the PDU.""" if _debug: APCI._debug("encode %r", pdu) PCI.update(pdu, self) if (self.apduType == ConfirmedRequestPDU.pduType): # PDU type buff = self.apduType << 4 if self.apduSeg: buff += 0x08 if self.apduMor: buff += 0x04 if self.apduSA: buff += 0x02 pdu.put(buff) pdu.put((self.apduMaxSegs << 4) + self.apduMaxResp) pdu.put(self.apduInvokeID) if self.apduSeg: pdu.put(self.apduSeq) pdu.put(self.apduWin) pdu.put(self.apduService) elif (self.apduType == UnconfirmedRequestPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduService) elif (self.apduType == SimpleAckPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduInvokeID) pdu.put(self.apduService) elif (self.apduType == ComplexAckPDU.pduType): # PDU type buff = self.apduType << 4 if self.apduSeg: buff += 0x08 if self.apduMor: buff += 0x04 pdu.put(buff) pdu.put(self.apduInvokeID) if self.apduSeg: pdu.put(self.apduSeq) pdu.put(self.apduWin) pdu.put(self.apduService) elif (self.apduType == SegmentAckPDU.pduType): # PDU type buff = self.apduType << 4 if self.apduNak: buff += 0x02 if self.apduSrv: buff += 0x01 pdu.put(buff) pdu.put(self.apduInvokeID) pdu.put(self.apduSeq) pdu.put(self.apduWin) elif (self.apduType == ErrorPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduInvokeID) pdu.put(self.apduService) elif (self.apduType == RejectPDU.pduType): pdu.put(self.apduType << 4) pdu.put(self.apduInvokeID) pdu.put(self.apduAbortRejectReason) elif (self.apduType == AbortPDU.pduType): # PDU type buff = self.apduType << 4 if self.apduSrv: buff += 0x01 pdu.put(buff) pdu.put(self.apduInvokeID) pdu.put(self.apduAbortRejectReason) else: raise ValueError("invalid APCI.apduType")
[ "def", "encode", "(", "self", ",", "pdu", ")", ":", "if", "_debug", ":", "APCI", ".", "_debug", "(", "\"encode %r\"", ",", "pdu", ")", "PCI", ".", "update", "(", "pdu", ",", "self", ")", "if", "(", "self", ".", "apduType", "==", "ConfirmedRequestPDU", ".", "pduType", ")", ":", "# PDU type", "buff", "=", "self", ".", "apduType", "<<", "4", "if", "self", ".", "apduSeg", ":", "buff", "+=", "0x08", "if", "self", ".", "apduMor", ":", "buff", "+=", "0x04", "if", "self", ".", "apduSA", ":", "buff", "+=", "0x02", "pdu", ".", "put", "(", "buff", ")", "pdu", ".", "put", "(", "(", "self", ".", "apduMaxSegs", "<<", "4", ")", "+", "self", ".", "apduMaxResp", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "if", "self", ".", "apduSeg", ":", "pdu", ".", "put", "(", "self", ".", "apduSeq", ")", "pdu", ".", "put", "(", "self", ".", "apduWin", ")", "pdu", ".", "put", "(", "self", ".", "apduService", ")", "elif", "(", "self", ".", "apduType", "==", "UnconfirmedRequestPDU", ".", "pduType", ")", ":", "pdu", ".", "put", "(", "self", ".", "apduType", "<<", "4", ")", "pdu", ".", "put", "(", "self", ".", "apduService", ")", "elif", "(", "self", ".", "apduType", "==", "SimpleAckPDU", ".", "pduType", ")", ":", "pdu", ".", "put", "(", "self", ".", "apduType", "<<", "4", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "pdu", ".", "put", "(", "self", ".", "apduService", ")", "elif", "(", "self", ".", "apduType", "==", "ComplexAckPDU", ".", "pduType", ")", ":", "# PDU type", "buff", "=", "self", ".", "apduType", "<<", "4", "if", "self", ".", "apduSeg", ":", "buff", "+=", "0x08", "if", "self", ".", "apduMor", ":", "buff", "+=", "0x04", "pdu", ".", "put", "(", "buff", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "if", "self", ".", "apduSeg", ":", "pdu", ".", "put", "(", "self", ".", "apduSeq", ")", "pdu", ".", "put", "(", "self", ".", "apduWin", ")", "pdu", ".", "put", "(", "self", ".", "apduService", ")", "elif", "(", "self", ".", "apduType", "==", "SegmentAckPDU", ".", "pduType", ")", ":", "# PDU type", "buff", "=", "self", ".", "apduType", "<<", "4", "if", "self", ".", "apduNak", ":", "buff", "+=", "0x02", "if", "self", ".", "apduSrv", ":", "buff", "+=", "0x01", "pdu", ".", "put", "(", "buff", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "pdu", ".", "put", "(", "self", ".", "apduSeq", ")", "pdu", ".", "put", "(", "self", ".", "apduWin", ")", "elif", "(", "self", ".", "apduType", "==", "ErrorPDU", ".", "pduType", ")", ":", "pdu", ".", "put", "(", "self", ".", "apduType", "<<", "4", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "pdu", ".", "put", "(", "self", ".", "apduService", ")", "elif", "(", "self", ".", "apduType", "==", "RejectPDU", ".", "pduType", ")", ":", "pdu", ".", "put", "(", "self", ".", "apduType", "<<", "4", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "pdu", ".", "put", "(", "self", ".", "apduAbortRejectReason", ")", "elif", "(", "self", ".", "apduType", "==", "AbortPDU", ".", "pduType", ")", ":", "# PDU type", "buff", "=", "self", ".", "apduType", "<<", "4", "if", "self", ".", "apduSrv", ":", "buff", "+=", "0x01", "pdu", ".", "put", "(", "buff", ")", "pdu", ".", "put", "(", "self", ".", "apduInvokeID", ")", "pdu", ".", "put", "(", "self", ".", "apduAbortRejectReason", ")", "else", ":", "raise", "ValueError", "(", "\"invalid APCI.apduType\"", ")" ]
encode the contents of the APCI into the PDU.
[ "encode", "the", "contents", "of", "the", "APCI", "into", "the", "PDU", "." ]
python
train
31.265823
peterwittek/ncpol2sdpa
ncpol2sdpa/physics_utils.py
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/physics_utils.py#L124-L163
def pauli_constraints(X, Y, Z): """Return a set of constraints that define Pauli spin operators. :param X: List of Pauli X operator on sites. :type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param Y: List of Pauli Y operator on sites. :type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param Z: List of Pauli Z operator on sites. :type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :returns: tuple of substitutions and equalities. """ substitutions = {} n_vars = len(X) for i in range(n_vars): # They square to the identity substitutions[X[i] * X[i]] = 1 substitutions[Y[i] * Y[i]] = 1 substitutions[Z[i] * Z[i]] = 1 # Anticommutation relations substitutions[Y[i] * X[i]] = - X[i] * Y[i] substitutions[Z[i] * X[i]] = - X[i] * Z[i] substitutions[Z[i] * Y[i]] = - Y[i] * Z[i] # Commutation relations. # equalities.append(X[i]*Y[i] - 1j*Z[i]) # equalities.append(X[i]*Z[i] + 1j*Y[i]) # equalities.append(Y[i]*Z[i] - 1j*X[i]) # They commute between the sites for j in range(i + 1, n_vars): substitutions[X[j] * X[i]] = X[i] * X[j] substitutions[Y[j] * Y[i]] = Y[i] * Y[j] substitutions[Y[j] * X[i]] = X[i] * Y[j] substitutions[Y[i] * X[j]] = X[j] * Y[i] substitutions[Z[j] * Z[i]] = Z[i] * Z[j] substitutions[Z[j] * X[i]] = X[i] * Z[j] substitutions[Z[i] * X[j]] = X[j] * Z[i] substitutions[Z[j] * Y[i]] = Y[i] * Z[j] substitutions[Z[i] * Y[j]] = Y[j] * Z[i] return substitutions
[ "def", "pauli_constraints", "(", "X", ",", "Y", ",", "Z", ")", ":", "substitutions", "=", "{", "}", "n_vars", "=", "len", "(", "X", ")", "for", "i", "in", "range", "(", "n_vars", ")", ":", "# They square to the identity", "substitutions", "[", "X", "[", "i", "]", "*", "X", "[", "i", "]", "]", "=", "1", "substitutions", "[", "Y", "[", "i", "]", "*", "Y", "[", "i", "]", "]", "=", "1", "substitutions", "[", "Z", "[", "i", "]", "*", "Z", "[", "i", "]", "]", "=", "1", "# Anticommutation relations", "substitutions", "[", "Y", "[", "i", "]", "*", "X", "[", "i", "]", "]", "=", "-", "X", "[", "i", "]", "*", "Y", "[", "i", "]", "substitutions", "[", "Z", "[", "i", "]", "*", "X", "[", "i", "]", "]", "=", "-", "X", "[", "i", "]", "*", "Z", "[", "i", "]", "substitutions", "[", "Z", "[", "i", "]", "*", "Y", "[", "i", "]", "]", "=", "-", "Y", "[", "i", "]", "*", "Z", "[", "i", "]", "# Commutation relations.", "# equalities.append(X[i]*Y[i] - 1j*Z[i])", "# equalities.append(X[i]*Z[i] + 1j*Y[i])", "# equalities.append(Y[i]*Z[i] - 1j*X[i])", "# They commute between the sites", "for", "j", "in", "range", "(", "i", "+", "1", ",", "n_vars", ")", ":", "substitutions", "[", "X", "[", "j", "]", "*", "X", "[", "i", "]", "]", "=", "X", "[", "i", "]", "*", "X", "[", "j", "]", "substitutions", "[", "Y", "[", "j", "]", "*", "Y", "[", "i", "]", "]", "=", "Y", "[", "i", "]", "*", "Y", "[", "j", "]", "substitutions", "[", "Y", "[", "j", "]", "*", "X", "[", "i", "]", "]", "=", "X", "[", "i", "]", "*", "Y", "[", "j", "]", "substitutions", "[", "Y", "[", "i", "]", "*", "X", "[", "j", "]", "]", "=", "X", "[", "j", "]", "*", "Y", "[", "i", "]", "substitutions", "[", "Z", "[", "j", "]", "*", "Z", "[", "i", "]", "]", "=", "Z", "[", "i", "]", "*", "Z", "[", "j", "]", "substitutions", "[", "Z", "[", "j", "]", "*", "X", "[", "i", "]", "]", "=", "X", "[", "i", "]", "*", "Z", "[", "j", "]", "substitutions", "[", "Z", "[", "i", "]", "*", "X", "[", "j", "]", "]", "=", "X", "[", "j", "]", "*", "Z", "[", "i", "]", "substitutions", "[", "Z", "[", "j", "]", "*", "Y", "[", "i", "]", "]", "=", "Y", "[", "i", "]", "*", "Z", "[", "j", "]", "substitutions", "[", "Z", "[", "i", "]", "*", "Y", "[", "j", "]", "]", "=", "Y", "[", "j", "]", "*", "Z", "[", "i", "]", "return", "substitutions" ]
Return a set of constraints that define Pauli spin operators. :param X: List of Pauli X operator on sites. :type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param Y: List of Pauli Y operator on sites. :type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param Z: List of Pauli Z operator on sites. :type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :returns: tuple of substitutions and equalities.
[ "Return", "a", "set", "of", "constraints", "that", "define", "Pauli", "spin", "operators", "." ]
python
train
42.25
PlaidWeb/Publ
publ/category.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L154-L158
def sort_name(self): """ Get the sorting name of this category """ if self._record and self._record.sort_name: return self._record.sort_name return self.name
[ "def", "sort_name", "(", "self", ")", ":", "if", "self", ".", "_record", "and", "self", ".", "_record", ".", "sort_name", ":", "return", "self", ".", "_record", ".", "sort_name", "return", "self", ".", "name" ]
Get the sorting name of this category
[ "Get", "the", "sorting", "name", "of", "this", "category" ]
python
train
37.8
minhhh/pelican_git
pelican_git/plugin.py
https://github.com/minhhh/pelican_git/blob/9e4758adb5c70b95979f1953823a2fcf1c76e50c/pelican_git/plugin.py#L147-L153
def register(): """Plugin registration.""" from pelican import signals signals.initialized.connect(setup_git) signals.article_generator_finalized.connect(replace_git_url)
[ "def", "register", "(", ")", ":", "from", "pelican", "import", "signals", "signals", ".", "initialized", ".", "connect", "(", "setup_git", ")", "signals", ".", "article_generator_finalized", ".", "connect", "(", "replace_git_url", ")" ]
Plugin registration.
[ "Plugin", "registration", "." ]
python
train
26
riga/law
law/util.py
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L286-L309
def merge_dicts(*dicts, **kwargs): """ merge_dicts(*dicts, cls=None) Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the passed dicts and therefore, values of rear objects have precedence in case of field collisions. The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is inferred from the first dict object in *dicts*. """ # get or infer the class cls = kwargs.get("cls", None) if cls is None: for d in dicts: if isinstance(d, dict): cls = d.__class__ break else: raise TypeError("cannot infer cls as none of the passed objects is of type dict") # start merging merged_dict = cls() for d in dicts: if isinstance(d, dict): merged_dict.update(d) return merged_dict
[ "def", "merge_dicts", "(", "*", "dicts", ",", "*", "*", "kwargs", ")", ":", "# get or infer the class", "cls", "=", "kwargs", ".", "get", "(", "\"cls\"", ",", "None", ")", "if", "cls", "is", "None", ":", "for", "d", "in", "dicts", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "cls", "=", "d", ".", "__class__", "break", "else", ":", "raise", "TypeError", "(", "\"cannot infer cls as none of the passed objects is of type dict\"", ")", "# start merging", "merged_dict", "=", "cls", "(", ")", "for", "d", "in", "dicts", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "merged_dict", ".", "update", "(", "d", ")", "return", "merged_dict" ]
merge_dicts(*dicts, cls=None) Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the passed dicts and therefore, values of rear objects have precedence in case of field collisions. The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is inferred from the first dict object in *dicts*.
[ "merge_dicts", "(", "*", "dicts", "cls", "=", "None", ")", "Takes", "multiple", "*", "dicts", "*", "and", "returns", "a", "single", "merged", "dict", ".", "The", "merging", "takes", "place", "in", "order", "of", "the", "passed", "dicts", "and", "therefore", "values", "of", "rear", "objects", "have", "precedence", "in", "case", "of", "field", "collisions", ".", "The", "class", "of", "the", "returned", "merged", "dict", "is", "configurable", "via", "*", "cls", "*", ".", "If", "it", "is", "*", "None", "*", "the", "class", "is", "inferred", "from", "the", "first", "dict", "object", "in", "*", "dicts", "*", "." ]
python
train
36.291667