Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
377,000 | def get_income_statement(self, **kwargs):
def fmt(out):
return {symbol: out[symbol]["income"]
for symbol in self.symbols}
def fmt_p(out):
data = {(symbol, sheet["reportDate"]): sheet for symbol in out
for sheet in out[symbol]["income"]}
return pd.DataFrame(data)
return self._get_endpoint("income", fmt_j=fmt, fmt_p=fmt_p,
params=kwargs) | Income Statement
Pulls income statement data. Available quarterly (4 quarters) or
annually (4 years).
Reference: https://iexcloud.io/docs/api/#income-statement
Data Weighting: ``1000`` per symbol per period
.. warning:: This endpoint is only available using IEX Cloud. See
:ref:`Migrating` for more information.
Parameters
----------
period: str, default 'quarterly', optional
Allows you to specify annual or quarterly income statement.
Defaults to quarterly. Values should be annual or quarter
Returns
-------
list or pandas.DataFrame
Stocks Income Statement endpoint data |
377,001 | def add_validation_patch(self, patch):
self._transform_truth(patch)
self._count_truth_pixels()
self._classify(patch)
self._count_classified_pixels()
self.n_validation_sets = self.n_validation_sets + 1 | Extracts ground truth and classification results from the EOPatch and
aggregates the results. |
377,002 | def list_proxy(root_package = ):
proxy_dict = OrderedDict()
pkg = __import__(root_package, fromlist=[])
for imp, module, _ in walk_packages(pkg.__path__, root_package + ):
m = __import__(module, fromlist = [])
for _, v in vars(m).items():
if v is not None and isinstance(v, type) and issubclass(v, _ProxyModule) \
and v is not _ProxyModule \
and v.__module__ == module \
and hasattr(v, ):
name = v.__name__.lower()
if name not in proxy_dict:
proxy_dict[name] = {: v._default.__name__.lower(),
: repr(v._default.__module__ + + v._default.__name__)}
return proxy_dict | Walk through all the sub modules, find subclasses of vlcp.server.module._ProxyModule,
list their default values |
377,003 | def _cmp(self, other):
if not isinstance(other, Version):
other = Version(other)
num1 = self.version_nums
num2 = other.version_nums
ver_len = max(len(num1), len(num2))
num1 += tuple([0 for n in range(len(num1), ver_len)])
num2 += tuple([0 for n in range(len(num2), ver_len)])
for (p1, p2) in zip(num1, num2):
if p1 < p2:
return -1
elif p1 > p2:
return 1
if self.version_extra is None:
if other.version_extra is None:
return 0
else:
return -1
elif other.version_extra is None:
return 1
elif self.version_extra == other.version_extra:
return 0
elif self.version_extra < other.version_extra:
return -1
else:
return 1 | Compare two Project Haystack version strings, then return
-1 if self < other,
0 if self == other
or 1 if self > other. |
377,004 | def generate_data(self, data_dir, tmp_dir, task_id=-1):
tf.logging.info("generate_data task_id=%s" % task_id)
encoder = self.get_or_create_vocab(data_dir, tmp_dir)
assert task_id >= 0 and task_id < self.num_generate_tasks
if task_id < self.num_train_shards:
out_file = self.training_filepaths(
data_dir, self.num_train_shards, shuffled=False)[task_id]
else:
out_file = self.dev_filepaths(
data_dir, self.num_dev_shards,
shuffled=False)[task_id - self.num_train_shards]
generator_utils.generate_files(
self.example_generator(encoder, tmp_dir, task_id), [out_file])
generator_utils.shuffle_dataset([out_file]) | Generates training/dev data.
Args:
data_dir: a string
tmp_dir: a string
task_id: an optional integer
Returns:
shard or shards for which data was generated. |
377,005 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.gateways is not None:
_dict[] = [x._to_dict() for x in self.gateways]
return _dict | Return a json dictionary representing this model. |
377,006 | def build_cpp(build_context, target, compiler_config, workspace_dir):
rmtree(workspace_dir)
binary = join(*split(target.name))
objects = link_cpp_artifacts(build_context, target, workspace_dir, True)
buildenv_workspace = build_context.conf.host_to_buildenv_path(
workspace_dir)
objects.extend(compile_cc(
build_context, compiler_config, target.props.in_buildenv,
get_source_files(target, build_context), workspace_dir,
buildenv_workspace, target.props.cmd_env))
bin_file = join(buildenv_workspace, binary)
link_cmd = (
[compiler_config.linker, , bin_file] +
objects + compiler_config.link_flags)
build_context.run_in_buildenv(
target.props.in_buildenv, link_cmd, target.props.cmd_env)
target.artifacts.add(AT.binary, relpath(join(workspace_dir, binary),
build_context.conf.project_root), binary) | Compile and link a C++ binary for `target`. |
377,007 | def merge_perchrom_vcfs(job, perchrom_vcfs, tool_name, univ_options):
work_dir = os.getcwd()
input_files = {.join([chrom, ]): jsid for chrom, jsid in perchrom_vcfs.items()}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
first = True
with open(.join([work_dir, , ]), ) as outvcf:
for chromvcfname in chrom_sorted([x.rstrip() for x in input_files.keys()]):
with open(input_files[chromvcfname + ], ) as infile:
for line in infile:
line = line.strip()
if line.startswith():
if first:
print(line, file=outvcf)
continue
first = False
print(line, file=outvcf)
output_file = job.fileStore.writeGlobalFile(outvcf.name)
export_results(job, output_file, outvcf.name, univ_options, subfolder= + tool_name)
job.fileStore.logToMaster( % tool_name)
return output_file | Merge per-chromosome vcf files into a single genome level vcf.
:param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding
vcf as value
:param str tool_name: Name of the tool that generated the vcfs
:returns: fsID for the merged vcf
:rtype: toil.fileStore.FileID |
377,008 | def create(host, port, result_converter=None, testcase_converter=None, args=None):
return SampleClient(host, port, result_converter, testcase_converter, args) | Function which is called by Icetea to create an instance of the cloud client. This function
must exists.
This function myust not return None. Either return an instance of Client or raise. |
377,009 | def get_dashboards(self):
res = requests.get(self.url + self._dashboards_api_endpoint, headers=self.hdrs, verify=self.ssl_verify)
return self._request_result(res) | **Description**
Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users.
**Success Return Value**
A dictionary containing the list of available sampling intervals.
**Example**
`examples/list_dashboards.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_dashboards.py>`_ |
377,010 | def extractColumns(TableName,SourceParameterName,ParameterFormats,ParameterNames=None,FixCol=False):
if type(LOCAL_TABLE_CACHE[TableName][][][SourceParameterName]) not in set([str,unicode]):
raise Exception()
i=-1
if ParameterNames and type(ParameterNames) not in set([list,tuple]):
ParameterNames = [ParameterNames]
if ParameterFormats and type(ParameterFormats) not in set([list,tuple]):
ParameterFormats = [ParameterFormats]
if not ParameterNames:
ParameterNames = []
for par_format in ParameterFormats:
while True:
i+=1
par_name = % i
fmt = LOCAL_TABLE_CACHE[TableName][][].get(par_name,None)
if not fmt: break
ParameterNames.append(par_name)
Intersection = set(ParameterNames).intersection(LOCAL_TABLE_CACHE[TableName][][])
if Intersection:
raise Exception( % str(list(Intersection)))
i=0
for par_name in ParameterNames:
par_format = ParameterFormats[i]
LOCAL_TABLE_CACHE[TableName][][][par_name]=par_format
LOCAL_TABLE_CACHE[TableName][][par_name]=[]
i+=1
LOCAL_TABLE_CACHE[TableName][][] += ParameterNames
i=0
format_regex = []
format_types = []
for par_format in ParameterFormats:
par_name = ParameterNames[i]
regex = FORMAT_PYTHON_REGEX
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
ty = ty.lower()
if ty == :
par_type = int
if FixCol:
format_regex_part = REGEX_INTEGER_FIXCOL(lng)
else:
format_regex_part = REGEX_INTEGER
elif ty == :
par_type = str
if FixCol:
format_regex_part = REGEX_STRING_FIXCOL(lng)
else:
format_regex_part = REGEX_STRING
elif ty == :
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_F_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_F
elif ty == :
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_E_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_E
else:
raise Exception()
format_regex.append(+format_regex_part+)
format_types.append(par_type)
def_val = getDefaultValue(par_type)
LOCAL_TABLE_CACHE[TableName][][][par_name]=def_val
i+=1
format_regex = .join(format_regex)
for SourceParameterString in LOCAL_TABLE_CACHE[TableName][][SourceParameterName]:
try:
ExtractedValues = list(re.search(format_regex,SourceParameterString).groups())
except:
raise Exception( % SourceParameterString)
i=0
for par_name in ParameterNames:
par_value = format_types[i](ExtractedValues[i])
LOCAL_TABLE_CACHE[TableName][][par_name].append(par_value)
i+=1
number_of_rows = LOCAL_TABLE_CACHE[TableName][][]
number_of_rows2 = len(LOCAL_TABLE_CACHE[TableName][][SourceParameterName])
number_of_rows3 = len(LOCAL_TABLE_CACHE[TableName][][ParameterNames[0]])
if not (number_of_rows == number_of_rows2 == number_of_rows3):
raise Exception() | INPUT PARAMETERS:
TableName: name of source table (required)
SourceParameterName: name of source column to process (required)
ParameterFormats: c formats of unpacked parameters (required)
ParameterNames: list of resulting parameter names (optional)
FixCol: column-fixed (True) format of source column (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Note, that this function is aimed to do some extra job on
interpreting string parameters which is normally supposed
to be done by the user.
---
EXAMPLE OF USAGE:
extractColumns('sampletab',SourceParameterName='p5',
ParameterFormats=('%d','%d','%d'),
ParameterNames=('p5_1','p5_2','p5_3'))
This example extracts three integer parameters from
a source column 'p5' and puts results in ('p5_1','p5_2','p5_3').
--- |
377,011 | def get_areas(self, area_id=None, **kwargs):
return self.get_elements(Area, elem_id=area_id, **kwargs) | Alias for get_elements() but filter the result by Area
:param area_id: The Id of the area
:type area_id: Integer
:return: List of elements |
377,012 | async def send_script(self, conn_id, data):
progress_callback = functools.partial(_on_progress, self, , conn_id)
resp = await self._execute(self._adapter.send_script_sync, conn_id, data, progress_callback)
_raise_error(conn_id, , resp) | Send a a script to a device.
See :meth:`AbstractDeviceAdapter.send_script`. |
377,013 | def report_hit_filename(zipfilename: str, contentsfilename: str,
show_inner_file: bool) -> None:
if show_inner_file:
print("{} [{}]".format(zipfilename, contentsfilename))
else:
print(zipfilename) | For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename
and the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
show_inner_file: if ``True``, show both; if ``False``, show just the
``.zip`` filename
Returns: |
377,014 | def bam2fastq(job, bamfile, univ_options):
work_dir = os.path.split(bamfile)[0]
base_name = os.path.split(os.path.splitext(bamfile)[0])[1]
parameters = [,
.join([, docker_path(bamfile)]),
.join([, base_name, ]),
.join([, base_name, ]),
.join([, base_name, ])]
docker_call(tool=, tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options[], java_opts=univ_options[])
first_fastq = .join([work_dir, , base_name, ])
assert os.path.exists(first_fastq)
return first_fastq | split an input bam to paired fastqs.
ARGUMENTS
1. bamfile: Path to a bam file
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
|- 'dockerhub': <dockerhub to use>
+- 'java_Xmx': value for max heap passed to java |
377,015 | def combinations(l):
result = []
for x in xrange(len(l) - 1):
ls = l[x + 1:]
for y in ls:
result.append((l[x], y))
return result | Pure-Python implementation of itertools.combinations(l, 2). |
377,016 | def find_worst(rho, pval, m=1, rlim=.10, plim=.35):
n = len(rho)
r = list(np.abs(rho))
p = list(pval)
i = list(range(n))
if m > n:
warnings.warn(
)
m = n
selected = list()
it = 0
while (len(selected) < m) and (it < n):
temp = p.index(max(p))
worst = i[temp]
if (r[temp] <= rlim) and (p[temp] > plim):
r.pop(temp)
p.pop(temp)
i.pop(temp)
selected.append(worst)
it = it + 1
it = 0
n2 = len(i)
while (len(selected) < m) and (it < n2):
temp = p.index(max(p))
worst = i[temp]
if (r[temp] <= rlim):
r.pop(temp)
p.pop(temp)
i.pop(temp)
selected.append(worst)
it = it + 1
it = 0
n3 = len(i)
while (len(selected) < m) and (it < n3):
temp = r.index(min(r))
worst = i[temp]
r.pop(temp)
p.pop(temp)
i.pop(temp)
selected.append(worst)
it = it + 1
return selected | Find the N "worst", i.e. insignificant/random and low, correlations
Parameters
----------
rho : ndarray, list
1D array with correlation coefficients
pval : ndarray, list
1D array with p-values
m : int
The desired number of indicies to return
(How many "worst" correlations to find?)
rlim : float
Desired maximum absolute correlation coefficient
(Default: 0.10)
plim : float
Desired minimum p-value
(Default: 0.35)
Return
------
selected : list
Indicies of rho and pval of the "worst" correlations. |
377,017 | def error_count(self):
count = 0
for error_list in self.error_dict.values():
count += len(error_list)
return count | Returns the total number of validation errors for this row. |
377,018 | def _setStartSegment(self, segmentIndex, **kwargs):
segments = self.segments
oldStart = segments[-1]
oldLast = segments[0]
if oldLast.type == "curve" or oldLast.type == "qcurve":
startOn = oldStart.onCurve
lastOn = oldLast.onCurve
if startOn.x == lastOn.x and startOn.y == lastOn.y:
self.removeSegment(0)
segmentIndex = segmentIndex - 1
segments = self.segments
if segments[0].type == "move":
segments[0].type = "line"
segments = segments[segmentIndex - 1:] + segments[:segmentIndex - 1]
points = []
for segment in segments:
for point in segment:
points.append(((point.x, point.y), point.type,
point.smooth, point.name, point.identifier))
for point in self.points:
self.removePoint(point)
for point in points:
position, type, smooth, name, identifier = point
self.appendPoint(
position,
type=type,
smooth=smooth,
name=name,
identifier=identifier
) | Subclasses may override this method. |
377,019 | def is_dsub_operation(op):
if not is_pipeline(op):
return False
for name in [, , , ]:
if not get_label(op, name):
return False
return True | Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed. The dsub-version
label has always existed for the google-v2 provider.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub. |
377,020 | def get_rotations(self):
if self.centrosymmetric:
return np.vstack((self.rotations, -self.rotations))
else:
return self.rotations | Return all rotations, including inversions for
centrosymmetric crystals. |
377,021 | def wrap_url(s, l):
parts = s.split()
if len(parts) == 1:
return parts[0]
else:
i = 0
lines = []
for j in range(i, len(parts) + 1):
tv = .join(parts[i:j])
nv = .join(parts[i:j + 1])
if len(nv) > l or nv == tv:
i = j
lines.append(tv)
return .join(lines) | Wrap a URL string |
377,022 | def report(self, item_id, report_format="json"):
report_format = report_format.lower()
response = self._request("tasks/report/{id}/{format}".format(id=item_id, format=report_format))
if report_format == "json":
try:
return json.loads(response.content.decode())
except ValueError:
pass
return response.content | Retrieves the specified report for the analyzed item, referenced by item_id.
Available formats include: json, html, all, dropped, package_files.
:type item_id: int
:param item_id: Task ID number
:type report_format: str
:param report_format: Return format
:rtype: dict
:return: Dictionary representing the JSON parsed data or raw, for other
formats / JSON parsing failure. |
377,023 | def _group(self, element):
for v in _get_xml_version(element):
if "name" in element.attrib:
g = TemplateGroup(element, self.versions[v].comment)
self.versions[v].entries[g.identifier] = g
self.versions[v].order.append(g.identifier)
else:
msg.warn("no name element in {}. Ignored. (_group)".format(element)) | Parses the XML element as a group of [unknown] number of lines. |
377,024 | def process(self):
if self.num_attachments > 0:
self.status = u
fs_dirty_archive = self._create_backup()
self._process_attachments()
if self.status_int < 500 and not self.send_attachments:
self._create_archive()
if self.status_int >= 500 and self.status_int < 600:
if in self.settings:
shutil.move(
fs_dirty_archive,
% (self.container.fs_archive_dirty, self.drop_id))
self.cleanup()
return self.status | Calls the external cleanser scripts to (optionally) purge the meta data and then
send the contents of the dropbox via email. |
377,025 | def from_folder(cls, path:PathOrStr, train:str=, valid:str=, test:Optional[str]=None,
classes:Collection[Any]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000,
min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs):
"Create a `TextDataBunch` from text files in folders."
path = Path(path).absolute()
processor = [OpenFileProcessor()] + _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,
min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos)
src = (TextList.from_folder(path, processor=processor)
.split_by_folder(train=train, valid=valid))
src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_folder(classes=classes)
if test is not None: src.add_test_folder(path/test)
return src.databunch(**kwargs) | Create a `TextDataBunch` from text files in folders. |
377,026 | def _setup_profiles(self, conversion_profiles):
for key, path in conversion_profiles.items():
if isinstance(path, str):
path = (path, )
for left, right in pair_looper(path):
pair = (_format(left), _format(right))
if pair not in self.converters:
msg =
log.warning(msg % (repr(key), repr(pair)))
break
else:
self.conversion_profiles[key] = path | Add given conversion profiles checking for invalid profiles |
377,027 | def set_title(self,s, panel=):
"set plot title"
panel = self.get_panel(panel)
panel.set_title(s) | set plot title |
377,028 | def get_child_bank_ids(self, bank_id):
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=bank_id)
return self._hierarchy_session.get_children(id_=bank_id) | Gets the child ``Ids`` of the given bank.
arg: bank_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the bank
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
377,029 | def on_channel_closed(self, channel, reply_code, reply_text):
self._logger.warning(, reply_code, reply_text)
if not self._closing:
self.close_connection() | Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed |
377,030 | def run(self):
retries = 0
try:
while not self._stopping:
try:
data = self.notifications_api.long_poll_notifications()
except mds.rest.ApiException as e:
retries += 1
if retries >= 10:
retries = 0
time.sleep(backoff)
else:
handle_channel_message(
db=self.db,
queues=self.queues,
b64decode=self._b64decode,
notification_object=data
)
if self.subscription_manager:
self.subscription_manager.notify(data.to_dict())
finally:
self._stopped.set() | Thread main loop |
377,031 | def get_random_name(retry=False):
name = "%s_%s" % (left[random.randint(0, len(left) - 1)], right[random.randint(0, len(right) - 1)])
if retry is True:
name = "%s%d" % (name, random.randint(0, 100))
return name | generates a random name from the list of adjectives and birds in this package
formatted as "adjective_surname". For example 'loving_sugarbird'. If retry is non-zero, a random
integer between 0 and 100 will be added to the end of the name, e.g `loving_sugarbird3` |
377,032 | def new(params, event_shape=(), validate_args=False, name=None):
with tf.compat.v1.name_scope(name, ,
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name=)
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name=, dtype_hint=tf.int32),
tensor_name=)
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
return tfd.Independent(
tfd.Logistic(
loc=tf.reshape(loc_params, output_shape),
scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args) | Create the distribution instance from a `params` vector. |
377,033 | def gen_data_files(src_dir):
fpaths = []
base = os.path.dirname(src_dir)
for root, dir, files in os.walk(src_dir):
if len(files) != 0:
for f in files:
fpaths.append(os.path.relpath(os.path.join(root, f), base))
return fpaths | generates a list of files contained in the given directory (and its
subdirectories) in the format required by the ``package_data`` parameter
of the ``setuptools.setup`` function.
Parameters
----------
src_dir : str
(relative) path to the directory structure containing the files to
be included in the package distribution
Returns
-------
fpaths : list(str)
a list of file paths |
377,034 | def safe_display_name(numobj, lang, script=None, region=None):
if is_mobile_number_portable_region(region_code_for_number(numobj)):
return U_EMPTY_STRING
return name_for_number(numobj, lang, script, region) | Gets the name of the carrier for the given PhoneNumber object only when
it is 'safe' to display to users. A carrier name is onsidered safe if the
number is valid and for a region that doesn't support mobile number
portability (http://en.wikipedia.org/wiki/Mobile_number_portability).
This function explicitly checks the validity of the number passed in
Arguments:
numobj -- The PhoneNumber object for which we want to get a carrier name.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB")
Returns a carrier name that is safe to display to users, or the empty string. |
377,035 | def reorder_categories(self, new_categories, ordered=None, inplace=False):
inplace = validate_bool_kwarg(inplace, )
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace) | Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories |
377,036 | def sort_return_tuples(response, **options):
if not response or not options.get():
return response
n = options[]
return list(izip(*[response[i::n] for i in range(n)])) | If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups'] |
377,037 | def _key(self, username, frozen=False):
if frozen:
return self.frozen + username
return self.prefix + username | Translate a username into a key for Redis. |
377,038 | def get_app_logger_color(appname, app_log_level=logging.INFO, log_level=logging.WARN, logfile=None):
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(log_level)
name = "reliure"
name += "_"*(max(0, len(appname)-len(name)))
formatter = ColorFormatter( % name)
stderr_handler.setFormatter(formatter)
logger = logging.getLogger("reliure")
logger.setLevel(logging.DEBUG)
logger.addHandler(stderr_handler)
app_stderr_handler = logging.StreamHandler()
app_stderr_handler.setLevel(app_log_level)
app_formatter = ColorFormatter("$BG-CYAN$WHITE%s$RESET:%%(asctime)s:$COLOR%%(levelname)s$RESET:$BOLD%%(name)s$RESET: %%(message)s" % appname.upper())
app_stderr_handler.setFormatter(app_formatter)
app_logger = logging.getLogger(appname)
app_logger.setLevel(logging.DEBUG)
app_logger.addHandler(app_stderr_handler)
if logfile is not None:
file_format =
from logging.handlers import TimedRotatingFileHandler
file_handler = TimedRotatingFileHandler(logfile, when="D", interval=1, backupCount=7)
file_handler.setFormatter(logging.Formatter(file_format))
logger.addHandler(file_handler)
app_logger.addHandler(file_handler)
return app_logger | Configure the logging for an app using reliure (it log's both the app and reliure lib)
:param appname: the name of the application to log
:parap app_log_level: log level for the app
:param log_level: log level for the reliure
:param logfile: file that store the log, time rotating file (by day), no if None |
377,039 | def plot_di_mean_ellipse(dictionary, fignum=1, color=, marker=, markersize=20, label=, legend=):
pars = []
pars.append(dictionary[])
pars.append(dictionary[])
pars.append(dictionary[])
pars.append(dictionary[])
pars.append(dictionary[])
pars.append(dictionary[])
pars.append(dictionary[])
pars.append(dictionary[])
DI_dimap = pmag.dimap(dictionary[], dictionary[])
if dictionary[] < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=,
marker=marker, s=markersize, label=label)
if dictionary[] >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label)
pmagplotlib.plot_ell(fignum, pars, color, 0, 1) | Plot a mean direction (declination, inclination) confidence ellipse.
Parameters
-----------
dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent funcitons |
377,040 | def Xor(bytestr, key):
from builtins import bytes
precondition.AssertType(bytestr, bytes)
bytestr = bytes(bytestr)
return bytes([byte ^ key for byte in bytestr]) | Returns a `bytes` object where each byte has been xored with key. |
377,041 | def _run_check(self, check_method, ds, max_level):
val = check_method(ds)
if isinstance(val, list):
check_val = []
for v in val:
res = fix_return_value(v, check_method.__func__.__name__,
check_method, check_method.__self__)
if max_level is None or res.weight > max_level:
check_val.append(res)
return check_val
else:
check_val = fix_return_value(val, check_method.__func__.__name__,
check_method, check_method.__self__)
if max_level is None or check_val.weight > max_level:
return [check_val]
else:
return [] | Runs a check and appends a result to the values list.
@param bound method check_method: a given check method
@param netCDF4 dataset ds
@param int max_level: check level
@return list: list of Result objects |
377,042 | def update_query(self, *args, **kwargs):
s = self._get_str_query(*args, **kwargs)
new_query = MultiDict(parse_qsl(s, keep_blank_values=True))
query = MultiDict(self.query)
query.update(new_query)
return URL(self._val._replace(query=self._get_str_query(query)), encoded=True) | Return a new URL with query part updated. |
377,043 | def determine_master(port=4000):
if os.environ.get():
return os.environ[] + ":" + str(port)
else:
return gethostbyname(gethostname()) + ":" + str(port) | Determine address of master so that workers
can connect to it. If the environment variable
SPARK_LOCAL_IP is set, that address will be used.
:param port: port on which the application runs
:return: Master address
Example usage:
SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \
local[8] examples/mllib_mlp.py |
377,044 | def update_course_enrollment(self, email, course_url, purchase_incomplete, mode, unit_cost=None, course_id=None,
currency=None, message_id=None, site_code=None, sku=None):
config = get_sailthru_configuration(site_code)
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
return
new_enroll = False
send_template = None
if not purchase_incomplete:
if mode == :
send_template = config.get()
elif mode == or mode == :
new_enroll = True
send_template = config.get()
else:
new_enroll = True
send_template = config.get()
cost_in_cents = int(unit_cost * 100)
if new_enroll:
if not _update_unenrolled_list(sailthru_client, email, course_url, False):
schedule_retry(self, config)
course_data = _get_course_content(course_id, course_url, sailthru_client, site_code, config)
item = _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku)
options = {}
if purchase_incomplete and config.get():
options[] = config.get()
options[] = "+{} minutes".format(config.get())
if send_template:
options[] = send_template
if not _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
schedule_retry(self, config) | Adds/updates Sailthru when a user adds to cart/purchases/upgrades a course
Args:
email(str): The user's email address
course_url(str): Course home page url
purchase_incomplete(boolean): True if adding to cart
mode(string): enroll mode (audit, verification, ...)
unit_cost(decimal): cost if purchase event
course_id(CourseKey): course id
currency(str): currency if purchase event - currently ignored since Sailthru only supports USD
message_id(str): value from Sailthru marketing campaign cookie
site_code(str): site code
Returns:
None |
377,045 | def unalias(self, annotationtype, alias):
if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE
return self.alias_set[annotationtype][alias] | Return the set for an alias (if applicable, raises an exception otherwise) |
377,046 | def import_sql_select(connection_url, select_query, username, password, optimize=True,
use_temp_table=None, temp_table_name=None, fetch_mode=None):
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
assert_is_type(use_temp_table, bool, None)
assert_is_type(temp_table_name, str, None)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"use_temp_table": use_temp_table, "temp_table_name": temp_table_name, "fetch_mode": fetch_mode}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key) | Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle
and Microsoft SQL Server.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param use_temp_table: whether a temporary table should be created from select_query
:param temp_table_name: name of temporary table to be created from select_query
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password, fetch_mode) |
377,047 | def makeServiceDocXML(title, collections):
serviceTag = etree.Element("service")
workspaceTag = etree.SubElement(serviceTag, "workspace")
titleTag = etree.SubElement(workspaceTag, ATOM + "title", nsmap=ATOM_NSMAP)
titleTag.text = title
for collection in collections:
collectionTag = etree.SubElement(workspaceTag, "collection")
if in collection:
collectionTag.set("href", collection[])
if in collection:
colTitleTag = etree.SubElement(
collectionTag, ATOM + "title", nsmap=ATOM_NSMAP
)
colTitleTag.text = collection[]
if in collection:
acceptTag = etree.SubElement(collectionTag, "accept")
acceptTag.text = collection[]
return serviceTag | Make an ATOM service doc here. The 'collections' parameter is a list of
dictionaries, with the keys of 'title', 'accept' and 'categories'
being valid |
377,048 | def start_listener_thread(self, timeout_ms: int = 30000, exception_handler: Callable = None):
assert not self.should_listen and self.sync_thread is None,
self.should_listen = True
self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler)
self.sync_thread.name = f | Start a listener greenlet to listen for events in the background.
Args:
timeout_ms: How long to poll the Home Server for before retrying.
exception_handler: Optional exception handler function which can
be used to handle exceptions in the caller thread. |
377,049 | def uncloak(request):
try:
del request.session[SESSION_USER_KEY]
except KeyError:
pass
next = request.POST.get(REDIRECT_FIELD_NAME) or request.session.get(SESSION_REDIRECT_KEY)
if next and is_safe_url(next, request.get_host()):
return HttpResponseRedirect(next)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) | Undo a masquerade session and redirect the user back to where they started
cloaking from (or where ever the "next" POST parameter points) |
377,050 | def attribute(element, attribute, default=None):
attribute_value = element.get(attribute)
return attribute_value if attribute_value is not None else default | Returns the value of an attribute, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param attribute: The name of the attribute to evaluate
:type attribute: basestring
:param default: The default value to return if the attribute is not defined |
377,051 | def _pwr_optfcn(df, loc):
I = _lambertw_i_from_v(df[], df[],
df[], df[loc], df[], df[])
return I * df[loc] | Function to find power from ``i_from_v``. |
377,052 | def geo_max_distance(left, right):
op = ops.GeoMaxDistance(left, right)
return op.to_expr() | Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar |
377,053 | def _isdictclass(obj):
c = getattr(obj, , None)
return c and c.__name__ in _dict_classes.get(c.__module__, ()) | Return True for known dict objects. |
377,054 | def mock(config_or_spec=None, spec=None, strict=OMITTED):
if type(config_or_spec) is dict:
config = config_or_spec
else:
config = {}
spec = config_or_spec
if strict is OMITTED:
strict = False if spec is None else True
class Dummy(_Dummy):
if spec:
__class__ = spec
def __getattr__(self, method_name):
if strict:
raise AttributeError(
" has no attribute %r configured" % method_name)
return functools.partial(
remembered_invocation_builder, theMock, method_name)
def __repr__(self):
name =
if spec:
name += spec.__name__
return "<%s id=%s>" % (name, id(self))
obj = Dummy()
theMock = Mock(Dummy, strict=strict, spec=spec)
for n, v in config.items():
if inspect.isfunction(v):
invocation.StubbedInvocation(theMock, n)(Ellipsis).thenAnswer(v)
else:
setattr(obj, n, v)
mock_registry.register(obj, theMock)
return obj | Create 'empty' objects ('Mocks').
Will create an empty unconfigured object, that you can pass
around. All interactions (method calls) will be recorded and can be
verified using :func:`verify` et.al.
A plain `mock()` will be not `strict`, and thus all methods regardless
of the arguments will return ``None``.
.. note:: Technically all attributes will return an internal interface.
Because of that a simple ``if mock().foo:`` will surprisingly pass.
If you set strict to ``True``: ``mock(strict=True)`` all unexpected
interactions will raise an error instead.
You configure a mock using :func:`when`, :func:`when2` or :func:`expect`.
You can also very conveniently just pass in a dict here::
response = mock({'text': 'ok', 'raise_for_status': lambda: None})
You can also create an empty Mock which is specced against a given
`spec`: ``mock(requests.Response)``. These mock are by default strict,
thus they raise if you want to stub a method, the spec does not implement.
Mockito will also match the function signature.
You can pre-configure a specced mock as well::
response = mock({'json': lambda: {'status': 'Ok'}},
spec=requests.Response)
Mocks are by default callable. Configure the callable behavior using
`when`::
dummy = mock()
when(dummy).__call_(1).thenReturn(2)
All other magic methods must be configured this way or they will raise an
AttributeError.
See :func:`verify` to verify your interactions after usage. |
377,055 | def cubic_bezier(document, coords):
"cubic bezier polyline"
element = document.createElement()
points = [(coords[i], coords[i+1]) for i in range(0, len(coords), 2)]
path = ["M%s %s" %points[0]]
for n in xrange(1, len(points), 3):
A, B, C = points[n:n+3]
path.append("C%s,%s %s,%s %s,%s" % (A[0], A[1], B[0], B[1], C[0], C[1]))
element.setAttribute(, .join(path))
return element | cubic bezier polyline |
377,056 | def shot_end_data(shot, role):
if role == QtCore.Qt.DisplayRole:
return str(shot.endframe) | Return the data for endframe
:param shot: the shot that holds the data
:type shot: :class:`jukeboxcore.djadapter.models.Shot`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the end
:rtype: depending on role
:raises: None |
377,057 | def array_map2(*referls,**kwargs):
++++====: Q?
map_func = kwargs[]
if( in kwargs):
map_func_args = kwargs[]
else:
map_func_args = []
length = referls.__len__()
rslt = []
anum = list(referls)[0].__len__()
for j in range(0,anum):
args = []
for i in range(0,length):
refl = referls[i]
args.append(refl[j])
args.extend(map_func_args)
v = map_func(*args)
rslt.append(v)
return(rslt) | obseleted just for compatible
from elist.elist import *
ol = [1,2,3,4]
refl1 = ['+','+','+','+']
refl2 = [7,7,7,7]
refl3 = ['=','=','=','=']
def map_func(ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix):
s = prefix+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix
return(s)
####
rslt = array_map2(ol,refl1,refl2,refl3,map_func=map_func,map_func_args=['Q','?'])
pobj(rslt) |
377,058 | def do_types_overlap(schema, type_a, type_b):
if type_a is type_b:
return True
if is_abstract_type(type_a):
if is_abstract_type(type_b):
return any(
schema.is_possible_type(type_b, type_)
for type_ in schema.get_possible_types(type_a)
)
return schema.is_possible_type(type_a, type_b)
if is_abstract_type(type_b):
return schema.is_possible_type(type_b, type_a)
return False | Check whether two types overlap in a given schema.
Provided two composite types, determine if they "overlap". Two composite types
overlap when the Sets of possible concrete types for each intersect.
This is often used to determine if a fragment of a given type could possibly be
visited in a context of another type.
This function is commutative. |
377,059 | def getRgbdData(self):
self.lock.acquire()
data = self.data
self.lock.release()
return data | Returns last RgbdData.
@return last JdeRobotTypes Rgbd saved |
377,060 | def from_geometry(cls, molecule, do_orders=False, scaling=1.0):
from molmod.bonds import bonds
unit_cell = molecule.unit_cell
pair_search = PairSearchIntra(
molecule.coordinates,
bonds.max_length*bonds.bond_tolerance*scaling,
unit_cell
)
orders = []
lengths = []
edges = []
for i0, i1, delta, distance in pair_search:
bond_order = bonds.bonded(molecule.numbers[i0], molecule.numbers[i1], distance/scaling)
if bond_order is not None:
if do_orders:
orders.append(bond_order)
lengths.append(distance)
edges.append((i0,i1))
if do_orders:
result = cls(edges, molecule.numbers, orders, symbols=molecule.symbols)
else:
result = cls(edges, molecule.numbers, symbols=molecule.symbols)
slated_for_removal = set([])
threshold = 0.5**0.5
for c, ns in result.neighbors.items():
lengths_ns = []
for n in ns:
delta = molecule.coordinates[n] - molecule.coordinates[c]
if unit_cell is not None:
delta = unit_cell.shortest_vector(delta)
length = np.linalg.norm(delta)
lengths_ns.append([length, delta, n])
lengths_ns.sort(reverse=True, key=(lambda r: r[0]))
for i0, (length0, delta0, n0) in enumerate(lengths_ns):
for i1, (length1, delta1, n1) in enumerate(lengths_ns[:i0]):
if length1 == 0.0:
continue
cosine = np.dot(delta0, delta1)/length0/length1
if cosine > threshold:
slated_for_removal.add((c,n1))
lengths_ns[i1][0] = 0.0
mask = np.ones(len(edges), bool)
for i0, i1 in slated_for_removal:
edge_index = result.edge_index.get(frozenset([i0,i1]))
if edge_index is None:
raise ValueError( % (i0, i1))
mask[edge_index] = False
edges = [edges[i] for i in range(len(edges)) if mask[i]]
if do_orders:
bond_order = [bond_order[i] for i in range(len(bond_order)) if mask[i]]
result = cls(edges, molecule.numbers, orders)
else:
result = cls(edges, molecule.numbers)
lengths = [lengths[i] for i in range(len(lengths)) if mask[i]]
result.bond_lengths = np.array(lengths)
return result | Construct a MolecularGraph object based on interatomic distances
All short distances are computed with the binning module and compared
with a database of bond lengths. Based on this comparison, bonded
atoms are detected.
Before marking a pair of atoms A and B as bonded, it is also checked
that there is no third atom C somewhat between A and B.
When an atom C exists that is closer to B (than A) and the angle
A-B-C is less than 45 degrees, atoms A and B are not bonded.
Similarly if C is closer to A (than B) and the angle B-A-C is less
then 45 degrees, A and B are not connected.
Argument:
| ``molecule`` -- The molecule to derive the graph from
Optional arguments:
| ``do_orders`` -- set to True to estimate the bond order
| ``scaling`` -- scale the threshold for the connectivity. increase
this to 1.5 in case of transition states when a
fully connected topology is required. |
377,061 | def copy(self):
copy = ClusterGraph(self.edges())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
copy.add_factors(*factors_copy)
return copy | Returns a copy of ClusterGraph.
Returns
-------
ClusterGraph: copy of ClusterGraph
Examples
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2)
>>> graph_copy = G.copy()
>>> graph_copy.factors
[<DiscreteFactor representing phi(a:2, b:2) at 0xb71b19cc>,
<DiscreteFactor representing phi(b:2, c:2) at 0xb4eaf3ac>]
>>> graph_copy.edges()
[(('a', 'b'), ('b', 'c'))]
>>> graph_copy.nodes()
[('a', 'b'), ('b', 'c')] |
377,062 | def get_flagged_args():
expected = [, ]
arguments = {}
try:
opts, adds = \
getopt.getopt(sys.argv, , map(lambda x: x + "=", expected))
except getopt.GetoptError as Error:
print(str(Error))
print("Defaulting to standard run...")
return arguments
for o, a in opts:
opt = re.sub(, , o)
if opt in expected:
arguments[opt] = a
if arguments:
if not in arguments:
print("Unsupported means of operation!")
print("You can either specify both os_type and os_version " +
"or just os_type")
arguments = {}
return arguments | get_flagged_args
Collects from the execution statement the arguments provided to this script.
The items are then interpretted and returned. The object expected are the
KvP's:
--os_type - the operating system type to be built
--os_version - the operating system version to be built
NOTE: by not using these options, both Debian .deb and Redhat .rpm files are
generated for the operating systems and versions natively as set by a global
variable at the top of this script.
FURTHER NOTE: there should be a
dist_dir/Docker/<os_type>/<os_version>/DockerFile*
Present for this script to work.
CONFIGURATION: It is part of the standard for this script to run its own
configuration parameters to generate a:
dist_dir/scripts/config.JSON
This is a part of a separate script and is executed by this one in an effort
to make this code as translatable from project segment to segment. |
377,063 | def find_skew(self):
with _LeptonicaErrorTrap():
angle = ffi.new(, 0.0)
confidence = ffi.new(, 0.0)
result = lept.pixFindSkew(self._cdata, angle, confidence)
if result == 0:
return (angle[0], confidence[0])
else:
return (None, None) | Returns a tuple (deskew angle in degrees, confidence value).
Returns (None, None) if no angle is available. |
377,064 | def write_json(dictionary, filename):
with open(filename, ) as data_file:
json.dump(dictionary, data_file, indent=4, sort_keys=True)
print( + os.path.basename(filename)) | Write dictionary to JSON |
377,065 | def import_pyqt4(version=2):
import sip
if version is not None:
sip.setapi(, version)
sip.setapi(, version)
from PyQt4 import QtGui, QtCore, QtSvg
if not check_version(QtCore.PYQT_VERSION_STR, ):
raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
QtCore.PYQT_VERSION_STR)
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
version = sip.getapi()
api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
return QtCore, QtGui, QtSvg, api | Import PyQt4
Parameters
----------
version : 1, 2, or None
Which QString/QVariant API to use. Set to None to use the system
default
ImportErrors raised within this function are non-recoverable |
377,066 | def role_create(auth=None, **kwargs):
**
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_role(**kwargs) | Create a role
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_create name=role1
salt '*' keystoneng.role_create name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e |
377,067 | def _deps_only_toggled(self, widget, data=None):
active = widget.get_active()
self.dir_name.set_sensitive(not active)
self.entry_project_name.set_sensitive(not active)
self.dir_name_browse_btn.set_sensitive(not active)
self.run_btn.set_sensitive(active or not self.project_name_shown or self.entry_project_name.get_text() != "") | Function deactivate options in case of deps_only and opposite |
377,068 | def _schema_get_docstring(starting_class):
for cls in inspect.getmro(starting_class):
if inspect.getdoc(cls):
return inspect.getdoc(cls) | Given a class, return its docstring.
If no docstring is present for the class, search base classes in MRO for a
docstring. |
377,069 | def _add_timedelta(self, delta):
if isinstance(delta, pendulum.Duration):
return self.add(
years=delta.years,
months=delta.months,
weeks=delta.weeks,
days=delta.remaining_days,
)
return self.add(days=delta.days) | Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: pendulum.Duration or datetime.timedelta
:rtype: Date |
377,070 | def update_vip_request(self, vip_request, vip_request_id):
uri = % vip_request_id
data = dict()
data[] = list()
data[].append(vip_request)
return super(ApiVipRequest, self).put(uri, data) | Method to update vip request
param vip_request: vip_request object
param vip_request_id: vip_request id |
377,071 | def _handle_input_request(self, msg):
if self._hidden:
raise RuntimeError()
self.kernel_client.iopub_channel.flush()
def callback(line):
if not (len(self._control.history) > 0
and self._control.history[-1] == line):
cmd = line.split(" ")[0]
if "do_" + cmd not in dir(pdb.Pdb):
self._control.history.append(line)
if line.startswith():
line = line.split()[-1]
code = "__spy_code__ = get_ipython().run_cell()" % line
self.kernel_client.input(code)
else:
self.kernel_client.input(line)
if self._reading:
self._reading = False
self._readline(msg[][], callback=callback,
password=msg[][]) | Save history and add a %plot magic. |
377,072 | def paragraph(
self,
nb_sentences=3,
variable_nb_sentences=True,
ext_word_list=None):
if nb_sentences <= 0:
return
if variable_nb_sentences:
nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)
para = self.word_connector.join(self.sentences(
nb_sentences, ext_word_list=ext_word_list,
))
return para | :returns: A single paragraph. For example: 'Sapiente sunt omnis. Ut
pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
Keyword arguments:
:param nb_sentences: around how many sentences the paragraph should contain
:param variable_nb_sentences: set to false if you want exactly ``nb``
sentences returned, otherwise the result may include a number of
sentences of ``nb`` +/-40% (with a minimum of 1)
:param ext_word_list: a list of words you would like to have instead of
'Lorem ipsum'.
:rtype: str |
377,073 | def datetime_from_iso_format(string):
match = DATE_ISO_REGEX.match(string)
if match:
date = datetime.datetime(year=int(match.group(DATE_ISO_YEAR_GRP)),
month=int(match.group(DATE_ISO_MONTH_GRP)),
day=int(match.group(DATE_ISO_DAY_GRP)),
hour=int(match.group(DATE_ISO_HOUR_GRP)),
second=int(match.group(DATE_ISO_SEC_GRP)),
minute=int(match.group(DATE_ISO_MIN_GRP)))
return date
else:
return None | Return a datetime object from an iso 8601 representation.
Return None if string is non conforming. |
377,074 | def next_frame_glow_hparams():
hparams = glow.glow_hparams()
hparams.add_hparam("gen_mode", "conditional")
hparams.add_hparam("learn_top_scale", False)
hparams.add_hparam("condition_all_levels", True)
hparams.add_hparam("num_train_frames", -1)
hparams.add_hparam("latent_dist_encoder", "conv_net")
hparams.add_hparam("num_cond_latents", 1)
hparams.add_hparam("latent_architecture", "glow_resnet")
hparams.add_hparam("latent_apply_dilations", False)
hparams.add_hparam("latent_dilation_rates", [1, 3])
hparams.add_hparam("model_input", False)
hparams.add_hparam("cond_first_frame", False)
hparams.add_hparam("latent_skip", True)
hparams.add_hparam("latent_encoder_depth", 2)
hparams.add_hparam("latent_encoder_width", 512)
hparams.add_hparam("latent_dropout", 0.0)
hparams.add_hparam("latent_pre_output_channels", 512)
hparams.add_hparam("latent_activation", "relu")
hparams.add_hparam("latent_noise", 0.0)
hparams.add_hparam("pretrain_steps", -1)
hparams.bottom = {
"inputs": modalities.video_raw_bottom,
"targets": modalities.video_raw_targets_bottom,
}
hparams.loss = {
"targets": modalities.video_l1_raw_loss,
}
hparams.top = {
"targets": modalities.video_raw_top,
}
hparams.init_batch_size = 256
hparams.batch_size = 32
hparams.top_prior = "single_conv"
return hparams | Hparams for next_frame_glow. |
377,075 | def __build_libxml2(target, source, env):
xsl_style = env.subst()
styledoc = libxml2.parseFile(xsl_style)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.readFile(str(source[0]),None,libxml2.XML_PARSE_NOENT)
parampass = {}
if parampass:
result = style.applyStylesheet(doc, parampass)
else:
result = style.applyStylesheet(doc, None)
style.saveResultToFilename(str(target[0]), result, 0)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
return None | General XSLT builder (HTML/FO), using the libxml2 module. |
377,076 | def getComment(self, repo_user, repo_name, comment_id):
return self.api.makeRequest(
[, repo_user, repo_name,
, , str(comment_id)]) | GET /repos/:owner/:repo/pull/comments/:number
:param comment_id: The review comment's ID. |
377,077 | def transform(source):
source = extract_transformers_from_source(source)
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
if not failed:
break
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed
return source | Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row. |
377,078 | def get(self, name, default=None):
if self.vars is None:
return default
return self.vars.get(name, default) | Returns the value of the given variable, or the given default
value if the variable is not defined.
:type name: string
:param name: The name of the variable.
:type default: object
:param default: The default value.
:rtype: object
:return: The value of the variable. |
377,079 | def _get_url(self, resource, item, sys_id=None):
url_str = % (
{
: self.base_url,
: self.base_path,
: resource,
: item
}
)
if sys_id:
return "%s/%s" % (url_str, sys_id)
return url_str | Takes table and sys_id (if present), and returns a URL
:param resource: API resource
:param item: API resource item
:param sys_id: Record sys_id
:return:
- url string |
377,080 | def get_resource(self, name=None, store=None, workspace=None):
resources = self.get_resources(names=name, stores=store, workspaces=workspace)
return self._return_first_item(resources) | returns a single resource object.
Will return None if no resource is found.
Will raise an error if more than one resource with the same name is found. |
377,081 | def load_csv(ctx, model, path, header=None, header_exclude=None, **fmtparams):
if not os.path.isabs(path):
if ctx.options.odoo_data_path:
path = os.path.join(ctx.options.odoo_data_path, path)
else:
raise AnthemError(
)
with open(path, ) as data:
load_csv_stream(ctx, model, data,
header=header, header_exclude=header_exclude,
**fmtparams) | Load a CSV from a file path.
:param ctx: Anthem context
:param model: Odoo model name or model klass from env
:param path: absolute or relative path to CSV file.
If a relative path is given you must provide a value for
`ODOO_DATA_PATH` in your environment
or set `--odoo-data-path` option.
:param header: whitelist of CSV columns to load
:param header_exclude: blacklist of CSV columns to not load
:param fmtparams: keyword params for `csv_unireader`
Usage example::
from pkg_resources import Requirement, resource_string
req = Requirement.parse('my-project')
load_csv(ctx, ctx.env['res.users'],
resource_string(req, 'data/users.csv'),
delimiter=',') |
377,082 | def Vgg19_simple_api(rgb):
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
net_in = InputLayer(bgr, name=)
net = Conv2d(net_in, 64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding=, name=)
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding=, name=)
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding=, name=)
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding=, name=)
net = FlattenLayer(net, name=)
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name=)
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name=)
net = DenseLayer(net, n_units=1000, act=None, name=)
print("build model finished: %fs" % (time.time() - start_time))
return net | Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1] |
377,083 | def add_price_entity(self, price: dal.Price):
from decimal import Decimal
repo = self.get_price_repository()
existing = (
repo.query
.filter(dal.Price.namespace == price.namespace)
.filter(dal.Price.symbol == price.symbol)
.filter(dal.Price.date == price.date)
.filter(dal.Price.time == price.time)
.first()
)
if existing:
new_value = Decimal(price.value) / Decimal(price.denom)
self.logger.info(f"Exists: {price}")
if price.currency != existing.currency:
raise ValueError(
f"The currency is different for price {price}!")
if existing.value != price.value:
existing.value = price.value
self.logger.info(f"Updating to {new_value}.")
if existing.denom != price.denom:
existing.denom = price.denom
else:
self.session.add(price)
self.logger.info(f"Added {price}") | Adds the price |
377,084 | def update_payload(self, fields=None):
payload = super(ProvisioningTemplate, self).update_payload(fields)
if in payload:
payload[] = payload.pop(
)
return {u: payload} | Wrap submitted data within an extra dict. |
377,085 | def insert_sections_some(ol,*secs,**kwargs):
aaacccc
if( in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
loc = kwargs[]
secs = list(secs)
secs = [concat(*secs)]
locs = [loc]
return(insert_sections_many(ol,secs,locs,mode=mode)) | ol = initRange(0,20,1)
ol
loc = 6
rslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc)
rslt
#### |
377,086 | def rmswidth(self, floor=0):
mywaveunits = self.waveunits.name
self.convert()
wave = self.wave
thru = self.throughput
self.convert(mywaveunits)
if floor != 0:
idx = N.where(thru >= floor)
wave = wave[idx]
thru = thru[idx]
integrand = (wave-self.avgwave())**2 * thru
num = self.trapezoidIntegration(wave, integrand)
den = self.trapezoidIntegration(wave, thru)
if 0.0 in (num, den):
return 0.0
else:
ans = math.sqrt(num/den)
return ans | Calculate :ref:`pysynphot-formula-rmswidth`.
Parameters
----------
floor : float
Throughput values equal or below this threshold are not
included in the calculation. By default (0), all points
are included.
Returns
-------
ans : float
RMS band width. |
377,087 | def collapse( self, direction ):
if ( self.isCollapsed() ):
return False
splitter = self.parent()
if ( not splitter ):
return False
sizes = splitter.sizes()
handles = [splitter.handle(i) for i in range(len(sizes))]
index = handles.index(self)
self.markCollapsed(direction, sizes)
if ( direction == XSplitterHandle.CollapseDirection.Before ):
sizes = [0 for i in range(i)] + sizes[i+1:]
else:
sizes = sizes[:i] + [0 for i in range(i, len(sizes))]
splitter.setSizes(sizes)
return True | Collapses this splitter handle before or after other widgets based on \
the inputed CollapseDirection.
:param direction | <XSplitterHandle.CollapseDirection>
:return <bool> | success |
377,088 | def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
output_callback = output_callback if output_callback else self.output_callback
| This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever. |
377,089 | def setParametersFromFile(dna, filename, parameters=None, bp=None):
gotParameterList = False
param_type = None
if parameters is None:
parameters = checkParametersInputFile(filename)
if parameters is None:
raise AssertionError(" Cannot determine the parameters name from file {0}.".format(filename))
if isinstance(parameters, list) or isinstance(parameters, np.ndarray):
gotParameterList = True
parameter = list(parameters)
param_type = getParameterType(parameter[0])
else:
param_type = getParameterType(parameters)
if bp is None:
if param_type == :
bp = [dna.startBP, dna.num_step]
else:
bp = [dna.startBP, dna.num_bp]
if len(bp) == 1:
bp_range = False
else:
bp_range = True
if not gotParameterList:
tempParamName = parameters
inputParameter = [ parameters ]
else:
tempParamName = parameters[0]
inputParameter = parameter
sys.stdout.write(.format(inputParameter))
success = False
if tempParamName in basePairParameters:
dna.set_base_pair_parameters(filename, bp, parameters=inputParameter, bp_range=bp_range)
success = True
if tempParamName in baseStepParameters:
dna.set_base_step_parameters(filename, bp, parameters=inputParameter, step_range=bp_range, helical=False)
success = True
if tempParamName in helicalBaseStepParameters:
dna.set_base_step_parameters(filename, bp, parameters=inputParameter, step_range=bp_range, helical=True)
success = True
if tempParamName in groovesParameters:
dna.set_major_minor_groove(filename, bp, parameters=inputParameter, step_range=bp_range)
success = True
if tempParamName in backboneDihedrals:
dna.set_backbone_dihedrals(filename, bp, parameters=inputParameter, bp_range=bp_range)
success = True
if tempParamName in helicalRadiusParameters:
dna.set_helical_radius(filename, bp, full=True, bp_range=bp_range)
success = True
if tempParamName in helicalAxisParameters:
if len(bp) == 1:
raise AssertionError("Axis cannot be read for a single base-step.\n Use a segment spanned over several basepairs.")
dna.set_helical_axis(filename, step_range=True, step=bp)
success = True
if not success:
raise ValueError (.format(parameter)) | Read a specific parameter from the do_x3dna output file.
It automatically load the input parameter from a file to dna object or HDF5 file.
It automatically decides from input parameter, what will be format of input file.
Parameters
----------
dna : :class:`DNA`
Input :class:`DNA` instance.
filename : str
Input filename. This file should be output from do_x3dna.
parameter : str, list, None
Name of parameter. For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`.
Note that parameter that are calculated from do_x3dna cannot be used here.
In case of `Ǹone`, parameters name will be automatically determine from the input file.
bp : list
List containing lower and higher limit of base-pair/step range.
* This list should not contain more than two number.
* First number should be less than second number.
Example for base-pairs/steps 4 to 15:
``bp = [4,15] # step_range = True``
If ``None``, all base-pairs/steps will be considered. |
377,090 | def load_project_definition(path: str) -> dict:
source_path = get_project_source_path(path)
if not os.path.exists(source_path):
raise FileNotFoundError(.format(source_path))
with open(source_path, ) as f:
out = json.load(f)
project_folder = os.path.split(os.path.dirname(source_path))[-1]
if not in out or not out[]:
out[] = project_folder
return out | Load the cauldron.json project definition file for the given path. The
path can be either a source path to the cauldron.json file or the source
directory where a cauldron.json file resides.
:param path:
The source path or directory where the definition file will be loaded |
377,091 | def reply_message(self, message_url, body):
id = re.findall(r, message_url)[0]
api =
url = api % id
data = {
: body
}
response = self.request(url, , data=data)
return response.json()[] == 0 | 回复某条站内消息
:param message_url: 该条消息的页面 URL
:param body: 内容(不能超过 1024 个字符) |
377,092 | def blockvisit(self, nodes, frame):
if frame.buffer is None:
self.writeline()
else:
self.writeline()
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass | Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False. |
377,093 | def _check_pillar(kwargs, pillar=None):
if kwargs.get():
return True
pillar_dict = pillar if pillar is not None else __pillar__
if in pillar_dict:
return False
return True | Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors |
377,094 | def _generate_create_dict(self,
hostname=None,
domain=None,
flavor=None,
router=None,
datacenter=None,
hourly=True):
package = self._get_package()
item = self._get_item(package, flavor)
location = self._get_location(package[], datacenter)
price = self._get_price(item)
routers = self._get_backend_router(
location[][], item)
router = self._get_default_router(routers, router)
hardware = {
: hostname,
: domain,
: {
: {
: router
}
}
}
complex_type = "SoftLayer_Container_Product_Order_Virtual_DedicatedHost"
order = {
"complexType": complex_type,
"quantity": 1,
: location[],
: package[],
: [{: price}],
: [hardware],
: hourly,
}
return order | Translates args into a dictionary for creating a dedicated host. |
377,095 | def get_gender(data):
g = str(dd.get_gender(data))
if g and str(g).lower() in ["male", "m", "1"]:
return "male"
elif g and str(g).lower() in ["female", "f", "2"]:
return "female"
else:
return "unknown" | Retrieve gender from metadata, codified as male/female/unknown. |
377,096 | def write_yum_repo(content, filename=):
repo_path = os.path.join(, filename)
if not isinstance(content, str):
content = content.decode()
write_file(repo_path, content.encode()) | add yum repo file in /etc/yum.repos.d/ |
377,097 | def task_add(self, description, tags=None, **kw):
task = self._stub_task(description, tags, **kw)
return added_task | Add a new task.
Takes any of the keywords allowed by taskwarrior like proj or prior. |
377,098 | def collect_results():
results = []
for exe, backendname in EXE_BACKEND_MATRIX:
results.extend(benchmark_process_and_backend(exe, backendname))
results.extend(benchmark_go())
results.sort(
key=lambda br: (br.benchmark, float(br.time), br.platform, br.backend))
return results | Runs all platforms/backends/benchmarks and returns as list of
BenchmarkResults, sorted by benchmark and time taken. |
377,099 | def phases_with(self, **kwargs) -> [PhaseOutput]:
return [phase for phase in self.phases if
all([getattr(phase, key) == value for key, value in kwargs.items()])] | Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with
phase, data or pipeline as the key.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.