Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,700 | def is_byte_range_valid(start, stop, length):
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length | Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7 |
18,701 | def _build_specs(self, specs, kwargs, fp_precision):
if specs is None:
overrides = param.ParamOverrides(self, kwargs,
allow_extra_keywords=True)
extra_kwargs = overrides.extra_keywords()
kwargs = dict([(k,v) for (k,v) in kwargs.items()
if k not in extra_kwargs])
rounded_specs = list(self.round_floats([extra_kwargs],
fp_precision))
if extra_kwargs=={}: return [], kwargs, True
else: return rounded_specs, kwargs, False
return list(self.round_floats(specs, fp_precision)), kwargs, True | Returns the specs, the remaining kwargs and whether or not the
constructor was called with kwarg or explicit specs. |
18,702 | def process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True):
stars = []
stars.extend(st)
botstars = []
botstars.extend(bst)
if len(stars) == 1:
stars.append("undefined")
if len(botstars) == 1:
botstars.append("undefined")
matcher = re.findall(RE.reply_array, reply)
for match in matcher:
name = match
if name in self.master._array:
result = "{random}" + "|".join(self.master._array[name]) + "{/random}"
else:
result = "\x00@" + name + "\x00"
reply = reply.replace("(@"+name+")", result)
reply = re.sub(RE.ph_array, r, reply)
reply = reply.replace(, )
reply = reply.replace(, )
reply = reply.replace(, )
reply = reply.replace(, )
reply = reply.replace(, )
reply = reply.replace(, )
reply = re.sub(RE.weight, , reply)
if len(stars) > 0:
reply = reply.replace(, text_type(stars[1]))
reStars = re.findall(RE.star_tags, reply)
for match in reStars:
if int(match) < len(stars):
reply = reply.replace(.format(match=match), text_type(stars[int(match)]))
if len(botstars) > 0:
reply = reply.replace(, botstars[1])
reStars = re.findall(RE.botstars, reply)
for match in reStars:
if int(match) < len(botstars):
reply = reply.replace(.format(match=match), text_type(botstars[int(match)]))
history = self.master.get_uservar(user, "__history__")
if type(history) is not dict:
history = self.default_history()
reply = reply.replace(, history[][0])
reply = reply.replace(, history[][0])
reInput = re.findall(RE.input_tags, reply)
for match in reInput:
reply = reply.replace(.format(match=match),
history[][int(match) - 1])
reReply = re.findall(RE.reply_tags, reply)
for match in reReply:
reply = reply.replace(.format(match=match),
history[][int(match) - 1])
reply = reply.replace(, user)
reply = reply.replace(, )
reply = reply.replace(, "\n")
reply = reply.replace(, )
reRandom = re.findall(RE.random_tags, reply)
for match in reRandom:
output =
if in match:
output = utils.random_choice(match.split())
else:
output = utils.random_choice(match.split())
reply = reply.replace(.format(match=match), output, 1)
for item in [, , , , ]:
matcher = re.findall(r + item + r + item + r, reply)
for match in matcher:
output = None
if item == :
output = self.substitute(match, "person")
else:
output = utils.string_format(match, item)
reply = reply.replace(.format(item=item, match=match), output)
try:
orig = int(curv)
new = 0
if tag == "add":
new = orig + value
elif tag == "sub":
new = orig - value
elif tag == "mult":
new = orig * value
elif tag == "div":
new = orig // value
self.master.set_uservar(user, var, new)
except:
insert = "[ERR: Math couldn{}{}{!s topic to " + match)
self.master.set_uservar(user, "topic", match)
reply = reply.replace(.format(match=match), )
reRedir = re.findall(RE.redir_tag, reply)
for match in reRedir:
self.say("Redirect to " + match)
at = match.strip()
subreply = self._getreply(user, at, step=(depth + 1))
reply = reply.replace(.format(match=match), subreply)
reply = reply.replace("{__call__}", "<call>")
reply = reply.replace("{/__call__}", "</call>")
reCall = re.findall(r, reply)
for match in reCall:
parts = re.split(RE.ws, match)
output =
obj = parts[0]
args = []
if len(parts) > 1:
args = parts[1:]
if obj in self.master._objlangs:
lang = self.master._objlangs[obj]
if lang in self.master._handlers:
try:
output = self.master._handlers[lang].call(self.master, obj, user, args)
except python.PythonObjectError as e:
self.warn(str(e))
if not ignore_object_errors:
raise ObjectError(str(e))
output = RS_ERR_OBJECT
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_HANDLER)
output = RS_ERR_OBJECT_HANDLER
else:
if not ignore_object_errors:
raise ObjectError(RS_ERR_OBJECT_MISSING)
output = RS_ERR_OBJECT_MISSING
reply = reply.replace(.format(match=match), output)
return reply | Post process tags in a message.
:param str user: The user ID.
:param str msg: The user's formatted message.
:param str reply: The raw RiveScript reply for the message.
:param []str st: The array of ``<star>`` matches from the trigger.
:param []str bst: The array of ``<botstar>`` matches from a
``%Previous`` command.
:param int depth: The recursion depth counter.
:param bool ignore_object_errors: Whether to ignore errors in Python
object macros instead of raising an ``ObjectError`` exception.
:return str: The final reply after tags have been processed. |
18,703 | def run(self, module, post_check):
try:
_cwd = os.getcwd()
_sys_path = list(sys.path)
_sys_argv = list(sys.argv)
sys.path.insert(0, os.path.dirname(self._path))
sys.argv = [os.path.basename(self._path)] + self._argv
exec(self._code, module.__dict__)
post_check()
except Exception as e:
self._failed = True
self._error_detail = traceback.format_exc()
_exc_type, _exc_value, exc_traceback = sys.exc_info()
filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt)
finally:
os.chdir(_cwd)
sys.path = _sys_path
sys.argv = _sys_argv
self.ran = True | Execute the configured source code in a module and run any post
checks.
Args:
module (Module) : a module to execute the configured code in.
post_check(callable) : a function that can raise an exception
if expected post-conditions are not met after code execution. |
18,704 | def merge_extras(items, config):
final = {}
for extra_name in items[0]["disambiguate"].keys():
in_files = []
for data in items:
in_files.append(data["disambiguate"][extra_name])
out_file = "%s-allmerged%s" % os.path.splitext(in_files[0])
if in_files[0].endswith(".bam"):
merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), items[0],
out_file=out_file)
else:
assert extra_name == "summary", extra_name
merged_file = _merge_summary(in_files, out_file, items[0])
final[extra_name] = merged_file
out = []
for data in items:
data["disambiguate"] = final
out.append([data])
return out | Merge extra disambiguated reads into a final BAM file. |
18,705 | def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]:
if not hasattr(self, "_raw_xsrf_token"):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
assert token is not None
assert timestamp is not None
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token | Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies) |
18,706 | def group_add(self, name=):
if not hasattr(self, name):
self.__dict__[name] = Group(self, name)
self.loaded_groups.append(name) | Dynamically add a group instance to the system if not exist.
Parameters
----------
name : str, optional ('Ungrouped' as default)
Name of the group
Returns
-------
None |
18,707 | def to_json(data, filename=, indent=4):
with open(filename, ) as f:
f.write(json.dumps(data, indent=indent)) | Write an object to a json file
:param data: The object
:param filename: The name of the file
:param indent: The indentation of the file
:return: None |
18,708 | def send_once(remote, codes, count=None, device=None, address=None):
args = [, remote] + codes
_call(args, count, device, address) | All parameters are passed to irsend. See the man page for irsend
for details about their usage.
Parameters
----------
remote: str
codes: [str]
count: int
device: str
address: str
Notes
-----
No attempt is made to catch or handle errors. See the documentation
for subprocess.check_output to see the types of exceptions it may raise. |
18,709 | def POST(self):
if self._POST is None:
save_env = dict()
for key in (, , ):
if key in self.environ:
save_env[key] = self.environ[key]
save_env[] =
if TextIOWrapper:
fb = TextIOWrapper(self.body, encoding=)
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=save_env)
self._POST = MultiDict()
for item in data.list:
self._POST[item.name] = item if item.filename else item.value
return self._POST | The HTTP POST body parsed into a MultiDict.
This supports urlencoded and multipart POST requests. Multipart
is commonly used for file uploads and may result in some of the
values beeing cgi.FieldStorage objects instead of strings.
Multiple values per key are possible. See MultiDict for details. |
18,710 | def crossdomain(f):
@wraps(f)
def decorator(self, *args, **kwargs):
if not self.cors_enabled and in request.headers:
return self._make_response(405, "CORS request rejected")
resp = f(self, *args, **kwargs)
h = resp.headers
current_app.logger.debug("Request Headers: {}".format(request.headers))
allowed_methods = self.cors_config[] + ["OPTIONS"]
h[] = ", ".join(allowed_methods)
h[] = self.cors_config.get(, 21600)
hostname = urlparse(request.headers[]).netloc \
if in request.headers else request.headers[]
if hostname in self.cors_config.get(, []):
return self._make_response(405, "CORS request blacklisted")
if self.cors_config.get(, None) is not None and \
hostname not in self.cors_config.get(, None):
return self._make_response(405, "CORS request refused")
if in request.headers:
h[] = request.headers[]
if in request.headers:
if self.cors_config.get(, None) is None:
allowed_headers = \
request.headers.get(, "*")
else:
allowed_headers = []
for k in request.headers.get(
, []):
if k in self.cors_config.get(, []):
allowed_headers.append(k)
allowed_headers = " ,".join(allowed_headers)
h[] = allowed_headers
return resp
return decorator | This decorator sets the rules for the crossdomain request per http
method. The settings are taken from the actual resource itself, and
returned as per the CORS spec.
All CORS requests are rejected if the resource's `allow_methods`
doesn't include the 'OPTIONS' method. |
18,711 | def _expand_variable_match(positional_vars, named_vars, match):
positional = match.group("positional")
name = match.group("name")
if name is not None:
try:
return six.text_type(named_vars[name])
except KeyError:
raise ValueError(
"Named variable not specified and needed by template "
"`{}` at position {}".format(name, match.string, match.start())
)
elif positional is not None:
try:
return six.text_type(positional_vars.pop(0))
except IndexError:
raise ValueError(
"Positional variable not specified and needed by template "
"`{}` at position {}".format(match.string, match.start())
)
else:
raise ValueError("Unknown template expression {}".format(match.group(0))) | Expand a matched variable with its value.
Args:
positional_vars (list): A list of positonal variables. This list will
be modified.
named_vars (dict): A dictionary of named variables.
match (re.Match): A regular expression match.
Returns:
str: The expanded variable to replace the match.
Raises:
ValueError: If a positional or named variable is required by the
template but not specified or if an unexpected template expression
is encountered. |
18,712 | def hash_str(data, hasher=None):
hasher = hasher or hashlib.sha1()
hasher.update(data)
return hasher | Checksum hash a string. |
18,713 | def assign_objective_requisite(self, objective_id=None, requisite_objective_id=None):
if objective_id is None or requisite_objective_id is None:
raise NullArgument()
ors = ObjectiveRequisiteSession(self._objective_bank_id, runtime=self._runtime)
ids_arg = {: []}
for objective in ors.get_requisite_objectives(objective_id):
if objective.get_id() == requisite_objective_id:
raise AlreadyExists()
ids_arg[].append(str(objective.get_id()))
ids_arg[].append(str(requisite_objective_id))
url_path = construct_url(,
bank_id=self._catalog_idstr,
obj_id=objective_id)
try:
result = self._put_request(url_path, ids_arg)
except Exception:
raise
id_list = list()
for identifier in result[]:
id_list.append(Id(idstr=identifier))
return id_objects.IdList(id_list) | Creates a requirement dependency between two Objectives.
arg: objective_id (osid.id.Id): the Id of the dependent
Objective
arg: requisite_objective_id (osid.id.Id): the Id of the
required Objective
raise: AlreadyExists - objective_id already mapped to
requisite_objective_id
raise: NotFound - objective_id or requisite_objective_id not
found
raise: NullArgument - objective_id or requisite_objective_id is
null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented. |
18,714 | def sunion(self, keys, *args):
func = lambda left, right: left.union(right)
return self._apply_to_sets(func, "SUNION", keys, *args) | Emulate sunion. |
18,715 | def verify_submit(self, job_ids, timeout, delay, **kwargs):
if self.skip:
return False
jobs = self.wait_for_jobs(job_ids, timeout, delay)
self.get_logs(jobs, log_file=kwargs.get("log_file"))
return self._check_outcome(jobs) | Verifies that the results were successfully submitted. |
18,716 | def key_size(self):
if self.key_algorithm in {PubKeyAlgorithm.ECDSA, PubKeyAlgorithm.ECDH}:
return self._key.keymaterial.oid
return next(iter(self._key.keymaterial)).bit_length() | *new in 0.4.1*
The size pertaining to this key. ``int`` for non-EC key algorithms; :py:obj:`constants.EllipticCurveOID` for EC keys. |
18,717 | def run_toy_DistilledSGLD(gpu_id):
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {: nd.zeros(data_shape, ctx=dev(gpu_id)),
: nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
student_data_inputs = {: nd.zeros(data_shape, ctx=dev(gpu_id))}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task=,
dev=dev(gpu_id)) | Run DistilledSGLD on toy dataset |
18,718 | def input_fields(self, preamble, *args):
self.new_section()
if preamble is not None:
self.message(preamble)
if any([True for x in args if len(x) > 3]):
self.message()
output_dict = { }
for field in args:
(field_name, prompt, field_type) = field[:3]
default = None
if len(field) > 3:
default = field[3]
if field_type == :
output_dict[field_name] = self.input(prompt, default = default)
elif field_type == :
output_dict[field_name] = self.input(prompt, no_echo=True)
elif field_type == :
output_dict[field_name] = self.input_boolean(prompt, default = default)
elif field_type == :
output_dict[field_name] = self.input_integer(prompt, default = default)
return output_dict | Get a set of fields from the user. Optionally a preamble may be
shown to the user secribing the fields to return. The fields are
specified as the remaining arguments with each field being a a
list with the following entries:
- a programmer-visible name for the field
- a string prompt to show to the user
- one of the following values:
- string: return a string from the user
- password: return a string from the user but do not echo the
input to the screen
- boolean: return a boolean value from the user
- integer: return an integer value from the user
- the default value (optional)
Fields are requested from the user in the order specified.
Fields are returned in a dictionary with the field names being the keys
and the values being the items. |
18,719 | def _extract_authors(pub, idx, _root):
logger_ts.info("enter extract_authors")
try:
names = False
logger_ts.info("extract_authors: KeyError: author data not provided, {}".format(e))
if names:
auth =
if isinstance(names, list):
for name in names:
if isinstance(name, str):
auth += name +
elif isinstance(name, dict):
for k, v in name.items():
auth += v +
elif isinstance(names, str):
auth = names
_root[ + str(idx + 1) + ] = auth[:-1]
return _root | Create a concatenated string of author names. Separate names with semi-colons.
:param any pub: Publication author structure is ambiguous
:param int idx: Index number of Pub |
18,720 | def create_queue_wrapper(name, queue_size, fed_arrays, data_sources, *args, **kwargs):
qtype = SingleInputMultiQueueWrapper if in kwargs else QueueWrapper
return qtype(name, queue_size, fed_arrays, data_sources, *args, **kwargs) | Arguments
name: string
Name of the queue
queue_size: integer
Size of the queue
fed_arrays: list
array names that will be fed by this queue
data_sources: dict
(lambda/method, dtype) tuples, keyed on array names |
18,721 | def read_playlists(self):
self.playlists = []
self.selected_playlist = -1
files = glob.glob(path.join(self.stations_dir, ))
if len(files) == 0:
return 0, -1
else:
for a_file in files:
a_file_name = .join(path.basename(a_file).split()[:-1])
a_file_size = self._bytes_to_human(path.getsize(a_file))
a_file_time = ctime(path.getmtime(a_file))
self.playlists.append([a_file_name, a_file_time, a_file_size, a_file])
self.playlists.sort()
for i, a_playlist in enumerate(self.playlists):
if a_playlist[-1] == self.stations_file:
self.selected_playlist = i
break
return len(self.playlists), self.selected_playlist | get already loaded playlist id |
18,722 | def negative_directional_index(close_data, high_data, low_data, period):
catch_errors.check_for_input_len_diff(close_data, high_data, low_data)
ndi = (100 *
smma(negative_directional_movement(high_data, low_data), period) /
atr(close_data, period)
)
return ndi | Negative Directional Index (-DI).
Formula:
-DI = 100 * SMMA(-DM) / ATR |
18,723 | def exec_container_commands(self, action, c_name, **kwargs):
config_cmds = action.config.exec_commands
if not config_cmds:
return None
return self.exec_commands(action, c_name, run_cmds=config_cmds) | Runs all configured commands of a container configuration inside the container instance.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType |
18,724 | def get_composite_keywords(ckw_db, fulltext, skw_spans):
timer_start = time.clock()
ckw_out = {}
skw_as_components = []
for composite_keyword in ckw_db.values():
ckw_count = 0
matched_spans = []
for regex in composite_keyword.regex:
for match in regex.finditer(fulltext):
span = list(match.span())
span[1] -= 1
span = tuple(span)
if span not in matched_spans:
ckw_count += 1
matched_spans.append(span)
try:
components = composite_keyword.compositeof
except AttributeError:
current_app.logger.error(
"Cached ontology is corrupted. Please "
"remove the cached ontology in your temporary file."
)
raise OntologyError()
spans = []
try:
spans = [skw_spans[component][0] for component in components]
except KeyError:
pass
ckw_spans = []
for index in range(len(spans) - 1):
len_ckw = len(ckw_spans)
if ckw_spans:
previous_spans = ckw_spans
else:
previous_spans = spans[index]
for new_span in [(span0, colmd1) for span0 in previous_spans
for colmd1 in spans[index + 1]]:
span = _get_ckw_span(fulltext, new_span)
if span is not None:
ckw_spans.append(span)
if index > 0 and ckw_spans:
_ckw_spans = []
for _span in ckw_spans[len_ckw:]:
for _colmd2 in ckw_spans[:len_ckw]:
s = _span_overlapping(_span, _colmd2)
if s:
_ckw_spans.append(s)
ckw_spans = _ckw_spans
for matched_span in [mspan for mspan in ckw_spans
if mspan not in matched_spans]:
ckw_count += 1
matched_spans.append(matched_span)
if ckw_count:
component_counts = []
for component in components:
skw_as_components.append(component)
try:
component_counts.append(len(skw_spans[component][0]))
except KeyError:
component_counts.append(0)
ckw_out[composite_keyword] = [matched_spans, component_counts]
for skw in skw_as_components:
try:
del skw_spans[skw]
except KeyError:
pass
_ckw_base = filter(lambda x: len(x.compositeof) == 2, ckw_out.keys())
_ckw_extended = sorted(
filter(lambda x: len(x.compositeof) > 2, ckw_out.keys()),
key=lambda x: len(x.compositeof))
if _ckw_extended:
candidates = []
for kw1 in _ckw_base:
s1 = set(kw1.compositeof)
for kw2 in _ckw_extended:
s2 = set(kw2.compositeof)
if s1.issubset(s2):
candidates.append((kw1, kw2))
for i in range(len(_ckw_extended)):
kw1 = _ckw_extended[i]
s1 = set(kw1.compositeof)
for ii in range(i + 1, len(_ckw_extended)):
kw2 = _ckw_extended[ii]
s2 = set(kw2.compositeof)
if s1.issubset(s2):
candidates.append((kw1, kw2))
break
if candidates:
for kw1, kw2 in candidates:
try:
match1 = ckw_out[kw1]
match2 = ckw_out[kw2]
except KeyError:
continue
positions1 = match1[0]
for pos1 in positions1:
for pos2 in match2[0]:
if _span_overlapping(pos1, pos2):
del positions1[positions1.index(pos1)]
if len(positions1) == 0:
del ckw_out[kw1]
break
current_app.logger.info(
"Matching composite keywords... %d keywords found "
"in %.1f sec." % (len(ckw_out), time.clock() - timer_start),
)
return ckw_out | Return a list of composite keywords bound with number of occurrences.
:param ckw_db: list of KewordToken objects
(they are supposed to be composite ones)
:param fulltext: string to search in
:param skw_spans: dictionary of already identified single keywords
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], [info_about_matches] ],
..
} |
18,725 | def make_prototype_request(*args, **kwargs):
if args and inspect.isclass(args[0]) and issubclass(args[0], Request):
request_cls, arg_list = args[0], args[1:]
return request_cls(*arg_list, **kwargs)
if args and isinstance(args[0], Request):
if args[1:] or kwargs:
raise_args_err("can't interpret args")
return args[0]
return Request(*args, **kwargs) | Make a prototype Request for a Matcher. |
18,726 | def get_autype_list(self, code_list):
code_list = unique_and_normalize_list(code_list)
for code in code_list:
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
ExrightQuery.pack_req, ExrightQuery.unpack_rsp)
kargs = {
"stock_list": code_list,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, exr_record = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
col_list = [
, , , ,
, , ,
, , ,
, ,
,
]
exr_frame_table = pd.DataFrame(exr_record, columns=col_list)
return RET_OK, exr_frame_table | 获取给定股票列表的复权因子
:param code_list: 股票列表,例如['HK.00700']
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== =================================================================================
参数 类型 说明
===================== =========== =================================================================================
code str 股票代码
ex_div_date str 除权除息日
split_ratio float 拆合股比例(该字段为比例字段,默认不展示%),例如,对于5股合1股为1/5,对于1股拆5股为5/1
per_cash_div float 每股派现
per_share_div_ratio float 每股送股比例(该字段为比例字段,默认不展示%)
per_share_trans_ratio float 每股转增股比例(该字段为比例字段,默认不展示%)
allotment_ratio float 每股配股比例(该字段为比例字段,默认不展示%)
allotment_price float 配股价
stk_spo_ratio float 增发比例(该字段为比例字段,默认不展示%)
stk_spo_price float 增发价格
forward_adj_factorA float 前复权因子A
forward_adj_factorB float 前复权因子B
backward_adj_factorA float 后复权因子A
backward_adj_factorB float 后复权因子B
===================== =========== ================================================================================= |
18,727 | def registerDirectory(self,name,physicalPath,directoryType,cleanupMode,
maxFileAge,description):
url = self._url + "/directories/register"
params = {
"f" : "json",
"name" : name,
"physicalPath" : physicalPath,
"directoryType" : directoryType,
"cleanupMode" : cleanupMode,
"maxFileAge" : maxFileAge,
"description" : description
}
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res | Registers a new server directory. While registering the server directory,
you can also specify the directory's cleanup parameters. You can also
register a directory by using its JSON representation as a value of the
directory parameter.
Inputs:
name - The name of the server directory.
physicalPath - The absolute physical path of the server directory.
directoryType - The type of server directory.
cleanupMode - Defines if files in the server directory needs to
be cleaned up. Default: NONE
maxFileAge - Defines how long a file in the directory needs to be
kept before it is deleted (in minutes).
description - An optional description for the server directory. |
18,728 | def is_git_directory_clean(path_to_repo: Path,
search_parent_dirs: bool = True,
check_untracked: bool = False) -> None:
repo = Repo(str(path_to_repo), search_parent_directories=search_parent_dirs)
logger.debug("is_git_directory_clean check for repo in path={} from "\
"cwd={} with search_parent_directories={}".format(
path_to_repo, os.getcwd(), search_parent_dirs))
if repo.is_dirty():
raise DirtyRepoException("Changes to the index or working tree."
"Commit them first .")
if check_untracked:
if repo.untracked_files:
raise DirtyRepoException("Untracked files. Commit them first.") | Check that the git working directory is in a clean state
and raise exceptions if not.
:path_to_repo: The path of the git repo |
18,729 | def _generate_for_subfolder(self, sid):
name = self._sanitise_sheetname(uni(Folders.id_to_name(sid)))
ws = self.workbook.add_worksheet(name)
fmt = self.formats
ws.write("A1", "Dossier report", fmt[])
ws.write("A2", "%s | %s" % (uni(self.folder_name), name))
ws.set_column(, 37)
ws.set_column(, 37)
ws.set_column(, 37)
ws.set_column(, 8)
ws.set_column(, 30)
ws.set_column(, 37)
ws.write("A4", "Id", fmt[])
ws.write("B4", "URL", fmt[])
ws.write("C4", "Subtopic Id", fmt[])
ws.write("D4", "Type", fmt[])
ws.write("E4", "Content", fmt[])
ws.write("F4", "Image URL", fmt[])
row = 4
for i in subtopics(self.store, self.folders, self.folder_id, sid, self.user):
Item.construct(self, i).generate_to(ws, row)
row += 1 | Generate report for a subfolder.
:param sid: The subfolder id; assumed valid |
18,730 | def dispatch(restricted=False):
FlagWatch.reset()
def _test(to_test):
return list(filter(lambda h: h.test(), to_test))
def _invoke(to_invoke):
while to_invoke:
unitdata.kv().set(, False)
for handler in list(to_invoke):
to_invoke.remove(handler)
hookenv.log( % handler.id(), level=hookenv.INFO)
handler.invoke()
if unitdata.kv().get():
to_invoke = _test(to_invoke)
break
FlagWatch.commit()
tracer().start_dispatch()
if restricted:
unitdata.kv().set(, )
hook_handlers = _test(Handler.get_handlers())
tracer().start_dispatch_phase(, hook_handlers)
_invoke(hook_handlers)
return
unitdata.kv().set(, )
hook_handlers = _test(Handler.get_handlers())
tracer().start_dispatch_phase(, hook_handlers)
_invoke(hook_handlers)
unitdata.kv().set(, )
for i in range(100):
FlagWatch.iteration(i)
other_handlers = _test(Handler.get_handlers())
if i == 0:
tracer().start_dispatch_phase(, other_handlers)
tracer().start_dispatch_iteration(i, other_handlers)
if not other_handlers:
break
_invoke(other_handlers)
FlagWatch.reset() | Dispatch registered handlers.
When dispatching in restricted mode, only matching hook handlers are executed.
Handlers are dispatched according to the following rules:
* Handlers are repeatedly tested and invoked in iterations, until the system
settles into quiescence (that is, until no new handlers match to be invoked).
* In the first iteration, :func:`@hook <charms.reactive.decorators.hook>`
and :func:`@action <charms.reactive.decorators.action>` handlers will
be invoked, if they match.
* In subsequent iterations, other handlers are invoked, if they match.
* Added flags will not trigger new handlers until the next iteration,
to ensure that chained flags are invoked in a predictable order.
* Removed flags will cause the current set of matched handlers to be
re-tested, to ensure that no handler is invoked after its matching
flag has been removed.
* Other than the guarantees mentioned above, the order in which matching
handlers are invoked is undefined.
* Flags are preserved between hook and action invocations, and all matching
handlers are re-invoked for every hook and action. There are
:doc:`decorators <charms.reactive.decorators>` and
:doc:`helpers <charms.reactive.helpers>`
to prevent unnecessary reinvocations, such as
:func:`~charms.reactive.decorators.only_once`. |
18,731 | def random_walk(network):
latest = network.latest_transmission_recipient()
if (not network.transmissions() or latest is None):
sender = random.choice(network.nodes(type=Source))
else:
sender = latest
receiver = random.choice(sender.neighbors(direction="to", type=Agent))
sender.transmit(to_whom=receiver) | Take a random walk from a source.
Start at a node randomly selected from those that receive input from a
source. At each step, transmit to a randomly-selected downstream node. |
18,732 | def num_dml_affected_rows(self):
result = self._job_statistics().get("numDmlAffectedRows")
if result is not None:
result = int(result)
return result | Return the number of DML rows affected by the job.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.numDmlAffectedRows
:rtype: int or None
:returns: number of DML rows affected by the job, or None if job is not
yet complete. |
18,733 | def bbox(width=1.0, height=1.0, depth=1.0):
width, height, depth = width / 2.0, height / 2.0, depth / 2.0
pos = numpy.array([
width, -height, depth,
width, height, depth,
-width, -height, depth,
width, height, depth,
-width, height, depth,
-width, -height, depth,
width, -height, -depth,
width, height, -depth,
width, -height, depth,
width, height, -depth,
width, height, depth,
width, -height, depth,
width, -height, -depth,
width, -height, depth,
-width, -height, depth,
width, -height, -depth,
-width, -height, depth,
-width, -height, -depth,
-width, -height, depth,
-width, height, depth,
-width, height, -depth,
-width, -height, depth,
-width, height, -depth,
-width, -height, -depth,
width, height, -depth,
width, -height, -depth,
-width, -height, -depth,
width, height, -depth,
-width, -height, -depth,
-width, height, -depth,
width, height, -depth,
-width, height, -depth,
width, height, depth,
-width, height, -depth,
-width, height, depth,
width, height, depth,
], dtype=numpy.float32)
vao = VAO("geometry:cube", mode=moderngl.LINE_STRIP)
vao.buffer(pos, , ["in_position"])
return vao | Generates a bounding box with (0.0, 0.0, 0.0) as the center.
This is simply a box with ``LINE_STRIP`` as draw mode.
Keyword Args:
width (float): Width of the box
height (float): Height of the box
depth (float): Depth of the box
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance |
18,734 | def valid_processor_options(processors=None):
if processors is None:
processors = [
dynamic_import(p) for p in
tuple(settings.THUMBNAIL_PROCESSORS) +
tuple(settings.THUMBNAIL_SOURCE_GENERATORS)]
valid_options = set([, , ])
for processor in processors:
args = inspect.getfullargspec(processor)[0] if six.PY3 else inspect.getargspec(processor)[0]
valid_options.update(args[1:])
return list(valid_options) | Return a list of unique valid options for a list of image processors
(and/or source generators) |
18,735 | def training_data(job_id):
offset = request.args.get(, 0)
limit = request.args.get(, 0)
cur.execute(,
(job_id, offset, limit))
training_examples = [{:v,:l} for v,l in cur]
data = { : training_examples }
if int(request.args.get(, 0)) > 0:
cur.execute("SELECT vector->> AS num_reductions FROM vectors WHERE job_id=%s GROUP BY num_reductions",
(job_id,))
unique_num_reductions = cur.fetchall()
if len(unique_num_reductions) > 1:
data[] = -1
else:
data[] = unique_num_reductions[0][0]
cur.execute("SELECT count(*) FROM vectors WHERE job_id=%s",
(job_id,))
data[] = cur.fetchone()[0]
cur.execute("SELECT count(*) FROM (SELECT label FROM vectors WHERE job_id=%s GROUP BY label) AS all_vecs_for_job",
(job_id,))
data[] = cur.fetchone()[0]
return jsonify(data) | Returns training_examples for a given job_id from offset to limit
If full_info parameter is greater than 0, will return extra architecture
info,
GET /jobs/139/vectors?offset=0&limit=10&full_info=1
{
"labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0},
{"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1},
...],
"vector_length": 3, # non-negative int or -1 if vector length is inconsistent
"num_labeled_vectors": 1600000, # non-negative int
"num_classes": 2, # pos integer, probably 2 or more
} |
18,736 | def flux_balance(model, reaction, tfba, solver):
fba = _get_fba_problem(model, tfba, solver)
fba.maximize(reaction)
for reaction in model.reactions:
yield reaction, fba.get_flux(reaction) | Run flux balance analysis on the given model.
Yields the reaction id and flux value for each reaction in the model.
This is a convenience function for sertting up and running the
FluxBalanceProblem. If the FBA is solved for more than one parameter
it is recommended to setup and reuse the FluxBalanceProblem manually
for a speed up.
This is an implementation of flux balance analysis (FBA) as described in
[Orth10]_ and [Fell86]_.
Args:
model: MetabolicModel to solve.
reaction: Reaction to maximize. If a dict is given, this instead
represents the objective function weights on each reaction.
tfba: If True enable thermodynamic constraints.
solver: LP solver instance to use.
Returns:
Iterator over reaction ID and reaction flux pairs. |
18,737 | def princomp(x):
(M,N) = x.shape
Mean = x.mean(0)
y = x - Mean
cov = numpy.dot(y.transpose(),y) / (M-1)
(V,PC) = numpy.linalg.eig(cov)
order = (-V).argsort()
coeff = PC[:,order]
return coeff | Determine the principal components of a vector of measurements
Determine the principal components of a vector of measurements
x should be a M x N numpy array composed of M observations of n variables
The output is:
coeffs - the NxN correlation matrix that can be used to transform x into its components
The code for this function is based on "A Tutorial on Principal Component
Analysis", Shlens, 2005 http://www.snl.salk.edu/~shlens/pub/notes/pca.pdf
(unpublished) |
18,738 | def edit_asn(self, auth, asn, attr):
self._logger.debug("edit_asn called; asn: %s attr: %s" %
(unicode(asn), unicode(attr)))
req_attr = [ ]
allowed_attr = [ , ]
self._check_attr(attr, req_attr, allowed_attr)
asns = self.list_asn(auth, asn)
where, params1 = self._expand_asn_spec(asn)
update, params2 = self._sql_expand_update(attr)
params = dict(params2.items() + params1.items())
sql = "UPDATE ip_net_asn SET " + update + " WHERE " + where
sql += " RETURNING *"
self._execute(sql, params)
updated_asns = []
for row in self._curs_pg:
updated_asns.append(dict(row))
for a in asns:
audit_params = {
: auth.username,
: auth.authenticated_as,
: auth.full_name,
: auth.authoritative_source
}
audit_params[] = % (unicode(a[]), unicode(attr))
sql, params = self._sql_expand_insert(audit_params)
self._execute( % sql, params)
return updated_asns | Edit AS number
* `auth` [BaseAuth]
AAA options.
* `asn` [integer]
AS number to edit.
* `attr` [asn_attr]
New AS attributes.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_asn` for full
understanding. |
18,739 | def html_print_file(self, catalog, destination):
with open(destination, mode=, encoding=) as t_f:
for text in catalog:
pnum = catalog[text][]
edition = catalog[text][]
metadata = .join(catalog[text][])
transliteration = .join(catalog[text][])
normalization = .join(catalog[text][])
translation = .join(catalog[text][])
self.html_file = .format(
pnum=pnum, edition=edition, metadata=metadata,
trans=transliteration, norm=normalization,
translation=translation)
t_f.write(self.html_file) | Prints text_file in html.
:param catalog: text file you wish to pretty print
:param destination: where you wish to save the HTML data
:return: output in html_file.html. |
18,740 | def startElement (self, name, attrs):
s a start method for this element, call it
start_' + name, None)
if func:
func(attrs) | if there's a start method for this element, call it |
18,741 | def delete_view(self, request, object_id, extra_context=None):
parent_folder = None
try:
obj = self.queryset(request).get(pk=unquote(object_id))
parent_folder = obj.parent
except self.model.DoesNotExist:
obj = None
r = super(FolderAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
if url in ["../../../../", "../../"] or url == self._get_post_url(obj):
if parent_folder:
url = reverse(,
kwargs={: parent_folder.id})
else:
url = reverse()
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
return r | Overrides the default to enable redirecting to the directory view after
deletion of a folder.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to. |
18,742 | def notebook_to_rst(npth, rpth, rdir, cr=None):
ntbk = nbformat.read(npth, nbformat.NO_CONVERT)
notebook_object_to_rst(ntbk, rpth, rdir, cr) | Convert notebook at `npth` to rst document at `rpth`, in directory
`rdir`. Parameter `cr` is a CrossReferenceLookup object. |
18,743 | def requireCompatibleAPI():
if in sys.modules:
import sip
for api in (, ):
if sip.getapi(api) != 2:
raise RuntimeError( % (api, sip.getapi(api))) | If PyQt4's API should be configured to be compatible with PySide's
(i.e. QString and QVariant should not be explicitly exported,
cf. documentation of sip.setapi()), call this function to check that
the PyQt4 was properly imported. (It will always be configured this
way by this module, but it could have been imported before we got a
hand on doing so.) |
18,744 | def get_info(
self,
userSpecifier,
**kwargs
):
request = Request(
,
)
request.set_path_param(
,
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
if str(response.status) == "200":
if jbody.get() is not None:
parsed_body[] = \
self.ctx.user.UserInfo.from_dict(
jbody[],
self.ctx
)
elif str(response.status) == "401":
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
elif str(response.status) == "403":
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
elif str(response.status) == "405":
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
else:
parsed_body = jbody
response.body = parsed_body
return response | Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request |
18,745 | def get_text_for_repeated_menu_item(
self, request=None, current_site=None, original_menu_tag=, **kwargs
):
source_field_name = settings.PAGE_FIELD_FOR_MENU_ITEM_TEXT
return self.repeated_item_text or getattr(
self, source_field_name, self.title
) | Return the a string to use as 'text' for this page when it is being
included as a 'repeated' menu item in a menu. You might want to
override this method if you're creating a multilingual site and you
have different translations of 'repeated_item_text' that you wish to
surface. |
18,746 | def disable_on_env(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
function_name = func.__name__
VALIDATORS_DISABLED = os.getenv(, )
disabled_functions = [x.strip() for x in VALIDATORS_DISABLED.split()]
force_run = kwargs.get(, False)
try:
value = args[0]
except IndexError:
raise ValidatorUsageError()
if function_name in disabled_functions and not force_run:
return value
else:
updated_kwargs = {key : kwargs[key]
for key in kwargs
if key != }
return func(*args, **updated_kwargs)
return func_wrapper | Disable the ``func`` called if its name is present in ``VALIDATORS_DISABLED``.
:param func: The function/validator to be disabled.
:type func: callable
:returns: If disabled, the ``value`` (first positional argument) passed to
``func``. If enabled, the result of ``func``. |
18,747 | def imbox(xy, w, h, angle=0.0, **kwargs):
from matplotlib.patches import Rectangle
return imbound(Rectangle, xy, w, h, angle, **kwargs) | draw boundary box
:param xy: start index xy (ji)
:param w: width
:param h: height
:param angle:
:param kwargs:
:return: |
18,748 | def load_files(files, tag=None, sat_id=None, altitude_bin=None):
output = [None]*len(files)
drop_idx = []
for (i,file) in enumerate(files):
try:
data = netcdf_file(file, mode=, mmap=False)
new = {}
ncattrsList = data._attributes.keys()
for d in ncattrsList:
new[d] = data._attributes[d]
loadedVars={}
keys = data.variables.keys()
for key in keys:
if data.variables[key][:].dtype.byteorder != :
loadedVars[key] = data.variables[key][:].byteswap().newbyteorder()
else:
loadedVars[key] = data.variables[key][:]
new[] = pysat.DataFrame(loadedVars)
output[i] = new
data.close()
except RuntimeError:
drop_idx.append(i)
drop_idx.reverse()
for i in drop_idx:
del output[i]
if tag == :
if altitude_bin is not None:
for out in output:
out[].index = (out[][]/altitude_bin).round().values*altitude_bin
out[] = out[].groupby(out[].index.values).mean()
else:
for out in output:
out[].index = out[][]
return output | Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file. |
18,749 | def write_data(filename, data, data_format=None, compress=False, add=False):
create_parent_folder(filename)
if not isinstance(data_format, MimeType):
data_format = get_data_format(filename)
if data_format.is_tiff_format():
return write_tiff_image(filename, data, compress)
if data_format.is_image_format():
return write_image(filename, data)
if data_format is MimeType.TXT:
return write_text(filename, data, add=add)
try:
return {
MimeType.CSV: write_csv,
MimeType.JSON: write_json,
MimeType.XML: write_xml,
MimeType.GML: write_xml
}[data_format](filename, data)
except KeyError:
raise ValueError(.format(data_format.value)) | Write image data to file
Function to write image data to specified file. If file format is not provided
explicitly, it is guessed from the filename extension. If format is TIFF, geo
information and compression can be optionally added.
:param filename: name of file to write data to
:type filename: str
:param data: image data to write to file
:type data: numpy array
:param data_format: format of output file. Default is ``None``
:type data_format: MimeType
:param compress: whether to compress data or not. Default is ``False``
:type compress: bool
:param add: whether to append to existing text file or not. Default is ``False``
:type add: bool
:raises: exception if numpy format is not supported or file cannot be written |
18,750 | def flatten(nested_list):
return_list = []
for i in nested_list:
if isinstance(i,list):
return_list += flatten(i)
else:
return_list.append(i)
return return_list | converts a list-of-lists to a single flat list |
18,751 | def _win32_junction(path, link, verbose=0):
path = os.path.abspath(path)
link = os.path.abspath(link)
from ubelt import util_cmd
if os.path.isdir(path):
if verbose:
print()
command = .format(link, path)
else:
if verbose:
print()
try:
jwfs.link(path, link)
except Exception:
print(.format(link, path))
raise
command = None
if command is not None:
info = util_cmd.cmd(command, shell=True)
if info[] != 0:
from ubelt import util_format
print()
print(info[])
print(util_format.repr2(info, nl=1))
raise OSError(str(info))
return link | On older (pre 10) versions of windows we need admin privledges to make
symlinks, however junctions seem to work.
For paths we do a junction (softlink) and for files we use a hard link
CommandLine:
python -m ubelt._win32_links _win32_junction
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction')
>>> ub.delete(root)
>>> ub.ensuredir(root)
>>> fpath = join(root, 'fpath.txt')
>>> dpath = join(root, 'dpath')
>>> fjunc = join(root, 'fjunc.txt')
>>> djunc = join(root, 'djunc')
>>> ub.touch(fpath)
>>> ub.ensuredir(dpath)
>>> ub.ensuredir(join(root, 'djunc_fake'))
>>> ub.ensuredir(join(root, 'djunc_fake with space'))
>>> ub.touch(join(root, 'djunc_fake with space file'))
>>> _win32_junction(fpath, fjunc)
>>> _win32_junction(dpath, djunc)
>>> # thank god colons are not allowed
>>> djunc2 = join(root, 'djunc2 [with pathological attrs]')
>>> _win32_junction(dpath, djunc2)
>>> _win32_is_junction(djunc)
>>> ub.writeto(join(djunc, 'afile.txt'), 'foo')
>>> assert ub.readfrom(join(dpath, 'afile.txt')) == 'foo'
>>> ub.writeto(fjunc, 'foo') |
18,752 | def _check_nonlocal_and_global(self, node):
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node) | Check that a name is both nonlocal and global. |
18,753 | def drawPoints(points, bg=):
import sys
points = list(points)
try:
points = [(int(x), int(y)) for x, y in points]
except:
raise PyBresenhamException()
minx = min([x for x, y in points])
maxx = max([x for x, y in points])
miny = min([y for x, y in points])
maxy = max([y for x, y in points])
charGrid = [[] * (maxy - miny + 1) for i in range(maxx - minx + 1)]
for x, y in points:
charGrid[x - minx][y - miny] =
for y in range(len(charGrid[0])):
for x in range(len(charGrid)):
if charGrid[x][y] in (None, ):
charToDraw = bg
else:
charToDraw = charGrid[x][y]
sys.stdout.write(charToDraw)
print() | A small debug function that takes an iterable of (x, y) integer tuples
and draws them to the screen. |
18,754 | def cart_add(self, items, CartId=None, HMAC=None, **kwargs):
if not CartId or not HMAC:
raise CartException()
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You canItem.{0}.OfferListingIdItem.{0}.Quantityoffer_idquantity']
response = self.api.CartAdd(CartId=CartId, HMAC=HMAC, **kwargs)
root = objectify.fromstring(response)
new_cart = AmazonCart(root)
self._check_for_cart_error(new_cart)
return new_cart | CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
It is not possible to create an empty cart!
example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`. |
18,755 | def _save_state(self):
ns_prefixes_floating_in = copy.copy(self._ns_prefixes_floating_in)
ns_prefixes_floating_out = copy.copy(self._ns_prefixes_floating_out)
ns_decls_floating_in = copy.copy(self._ns_decls_floating_in)
curr_ns_map = copy.copy(self._curr_ns_map)
ns_map_stack = copy.copy(self._ns_map_stack)
pending_start_element = self._pending_start_element
ns_counter = self._ns_counter
ns_auto_prefixes_floating_in = \
copy.copy(self._ns_auto_prefixes_floating_in)
try:
yield
except:
self._ns_prefixes_floating_in = ns_prefixes_floating_in
self._ns_prefixes_floating_out = ns_prefixes_floating_out
self._ns_decls_floating_in = ns_decls_floating_in
self._pending_start_element = pending_start_element
self._curr_ns_map = curr_ns_map
self._ns_map_stack = ns_map_stack
self._ns_counter = ns_counter
self._ns_auto_prefixes_floating_in = ns_auto_prefixes_floating_in
raise | Helper context manager for :meth:`buffer` which saves the whole state.
This is broken out in a separate method for readability and tested
indirectly by testing :meth:`buffer`. |
18,756 | def _get_node(template, context, name):
for node in template:
if isinstance(node, BlockNode) and node.name == name:
return node.nodelist.render(context)
elif isinstance(node, ExtendsNode):
return _get_node(node.nodelist, context, name)
return "" | taken originally from
http://stackoverflow.com/questions/2687173/django-how-can-i-get-a-block-from-a-template |
18,757 | def jsonify(symbol):
try:
return json.dumps(symbol.toJson(), indent=)
except AttributeError:
pass
return json.dumps(symbol, indent=) | returns json format for symbol |
18,758 | def dnld_assc(assc_name, go2obj=None, prt=sys.stdout):
dirloc, assc_base = os.path.split(assc_name)
if not dirloc:
dirloc = os.getcwd()
assc_locfile = os.path.join(dirloc, assc_base) if not dirloc else assc_name
dnld_annotation(assc_locfile, prt)
assc_orig = read_gaf(assc_locfile, prt)
if go2obj is None:
return assc_orig
assc = {}
goids_dag = set(go2obj.keys())
for gene, goids_cur in assc_orig.items():
assc[gene] = goids_cur.intersection(goids_dag)
return assc | Download association from http://geneontology.org/gene-associations. |
18,759 | def __sort_registry(self, svc_ref):
with self.__svc_lock:
if svc_ref not in self.__svc_registry:
raise BundleException("Unknown service: {0}".format(svc_ref))
for spec in svc_ref.get_property(OBJECTCLASS):
spec_refs = self.__svc_specs[spec]
idx = bisect.bisect_left(spec_refs, svc_ref)
del spec_refs[idx]
svc_ref.update_sort_key()
for spec in svc_ref.get_property(OBJECTCLASS):
spec_refs = self.__svc_specs[spec]
bisect.insort_left(spec_refs, svc_ref) | Sorts the registry, after the update of the sort key of given service
reference
:param svc_ref: A service reference with a modified sort key |
18,760 | def _at(self, idx):
handle = NDArrayHandle()
if idx < 0:
length = self.shape[0]
idx += length
if idx < 0:
raise IndexError(
% (idx-length, length))
check_call(_LIB.MXNDArrayAt(
self.handle, mx_uint(idx), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable) | Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32) |
18,761 | def image(request, data):
try:
width = int(request.GET.get("w", PYDENTICON_WIDTH))
except ValueError:
raise SuspiciousOperation("Identicon width must be a positive integer.")
try:
height = int(request.GET.get("h", PYDENTICON_HEIGHT))
except ValueError:
raise SuspiciousOperation("Identicon height must be a positive integer.")
output_format = request.GET.get("f", PYDENTICON_FORMAT)
try:
padding = [int(p) for p in request.GET["p"].split(",")]
except KeyError:
padding = PYDENTICON_PADDING
except ValueError:
raise SuspiciousOperation("Identicon padding must consist out of 4 positive integers separated with commas.")
if "i" in request.GET:
inverted = request.GET.get("i")
if inverted.lower() == "true":
inverted = True
elif inverted.lower() == "false":
inverted = False
else:
raise SuspiciousOperation("Inversion parameter must be a boolean (true/false).")
else:
inverted = PYDENTICON_INVERT
if not isinstance(width, int) or width <= 0:
raise SuspiciousOperation("Identicon width must be a positive integer.")
if not isinstance(height, int) or height <= 0:
raise SuspiciousOperation("Identicon height must be a positive integer.")
if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4:
raise SuspiciousOperation("Padding must be a 4-element tuple consisting out of positive integers.")
if output_format == "png":
content_type = "image/png"
elif output_format == "ascii":
content_type = "text/plain"
else:
raise SuspiciousOperation("Unsupported identicon format requested - % output_format")
generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS,
foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND,
digest = PYDENTICON_DIGEST)
content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)
response = HttpResponse(content, content_type=content_type)
return response | Generates identicon image based on passed data.
Arguments:
data - Data which should be used for generating an identicon. This data
will be used in order to create a digest which is used for generating the
identicon. If the data passed is a hex digest already, the digest will be
used as-is.
Returns:
Identicon image in raw format. |
18,762 | def extra_reading_spec(self):
field_names = ("frame_number", "action", "reward", "done")
data_fields = {
name: tf.FixedLenFeature([1], tf.int64) for name in field_names
}
decoders = {
name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name)
for name in field_names
}
return (data_fields, decoders) | Additional data fields to store on disk and their decoders. |
18,763 | def split_array_like(df, columns=None):
Split cells with array-like values along row axis.
Column names are maintained. The index is dropped.
Parameters
----------
df : ~pandas.DataFrame
Data frame ``df[columns]`` should contain :py:class:`~pytil.numpy.ArrayLike`
values.
columns : ~typing.Collection[str] or str or None
Columns (or column) whose values to split. Defaults to ``df.columns``.
Returns
-------
~pandas.DataFrame
Data frame with array-like values in ``df[columns]`` split across rows,
and corresponding values in other columns repeated.
Examples
--------
>>> df = pd.DataFrame([[1,[1,2],[1]],[1,[1,2],[3,4,5]],[2,[1],[1,2]]], columns=(, , ))
>>> df
check a b
0 1 [1, 2] [1]
1 1 [1, 2] [3, 4, 5]
2 2 [1] [1, 2]
>>> split_array_like(df, [, ])
check a b
0 1 1 1
1 1 2 1
2 1 1 3
3 1 1 4
4 1 1 5
5 1 2 3
6 1 2 4
7 1 2 5
8 2 1 1
9 2 1 2
indexlevel_0'. If MultiIndex.names is
dtypes = df.dtypes
if columns is None:
columns = df.columns
elif isinstance(columns, str):
columns = [columns]
for column in columns:
expanded = np.repeat(df.values, df[column].apply(len).values, axis=0)
expanded[:, df.columns.get_loc(column)] = np.concatenate(df[column].tolist())
df = pd.DataFrame(expanded, columns=df.columns)
for i, dtype in enumerate(dtypes):
df.iloc[:,i] = df.iloc[:,i].astype(dtype)
return df | Split cells with array-like values along row axis.
Column names are maintained. The index is dropped.
Parameters
----------
df : ~pandas.DataFrame
Data frame ``df[columns]`` should contain :py:class:`~pytil.numpy.ArrayLike`
values.
columns : ~typing.Collection[str] or str or None
Columns (or column) whose values to split. Defaults to ``df.columns``.
Returns
-------
~pandas.DataFrame
Data frame with array-like values in ``df[columns]`` split across rows,
and corresponding values in other columns repeated.
Examples
--------
>>> df = pd.DataFrame([[1,[1,2],[1]],[1,[1,2],[3,4,5]],[2,[1],[1,2]]], columns=('check', 'a', 'b'))
>>> df
check a b
0 1 [1, 2] [1]
1 1 [1, 2] [3, 4, 5]
2 2 [1] [1, 2]
>>> split_array_like(df, ['a', 'b'])
check a b
0 1 1 1
1 1 2 1
2 1 1 3
3 1 1 4
4 1 1 5
5 1 2 3
6 1 2 4
7 1 2 5
8 2 1 1
9 2 1 2 |
18,764 | def getrefnames(idf, objname):
iddinfo = idf.idd_info
dtls = idf.model.dtls
index = dtls.index(objname)
fieldidds = iddinfo[index]
for fieldidd in fieldidds:
if in fieldidd:
if fieldidd[][0].endswith():
if in fieldidd:
return fieldidd[]
else:
return [] | get the reference names for this object |
18,765 | def mpub(self, topic, messages, binary=True):
if binary:
return self.post(, data=pack(messages)[4:],
params={: topic, : True})
elif any( in m for m in messages):
raise ClientException(
)
else:
return self.post(
, params={: topic}, data=.join(messages)) | Send multiple messages to a topic. Optionally pack the messages |
18,766 | def _write_value(self, field_type, value):
if len(field_type) > 1:
field_type = field_type[0]
if field_type == self.TYPE_BOOLEAN:
self._writeStruct(">B", 1, (1 if value else 0,))
elif field_type == self.TYPE_BYTE:
self._writeStruct(">b", 1, (value,))
elif field_type == self.TYPE_CHAR:
self._writeStruct(">H", 1, (ord(value),))
elif field_type == self.TYPE_SHORT:
self._writeStruct(">h", 1, (value,))
elif field_type == self.TYPE_INTEGER:
self._writeStruct(">i", 1, (value,))
elif field_type == self.TYPE_LONG:
self._writeStruct(">q", 1, (value,))
elif field_type == self.TYPE_FLOAT:
self._writeStruct(">f", 1, (value,))
elif field_type == self.TYPE_DOUBLE:
self._writeStruct(">d", 1, (value,))
elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY:
if value is None:
self.write_null()
elif isinstance(value, JavaEnum):
self.write_enum(value)
elif isinstance(value, (JavaArray, JavaByteArray)):
self.write_array(value)
elif isinstance(value, JavaObject):
self.write_object(value)
elif isinstance(value, JavaString):
self.write_string(value)
elif isinstance(value, str):
self.write_blockdata(value)
else:
raise RuntimeError("Unknown typecode: {0}".format(field_type))
else:
raise RuntimeError("Unknown typecode: {0}".format(field_type)) | Writes an item of an array
:param field_type: Value type
:param value: The value itself |
18,767 | def encrypt_to(self, f, mac_bytes=10):
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish() | Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. |
18,768 | def salt_and_pepper_noise(X, v):
X_noise = X.copy()
n_features = X.shape[1]
mn = X.min()
mx = X.max()
for i, sample in enumerate(X):
mask = np.random.randint(0, n_features, v)
for m in mask:
if np.random.random() < 0.5:
X_noise[i][m] = mn
else:
X_noise[i][m] = mx
return X_noise | Apply salt and pepper noise to data in X.
In other words a fraction v of elements of X
(chosen at random) is set to its maximum or minimum value according to a
fair coin flip.
If minimum or maximum are not given, the min (max) value in X is taken.
:param X: array_like, Input data
:param v: int, fraction of elements to distort
:return: transformed data |
18,769 | def load(self):
self._validate()
self._logger.logging_load()
formatter = MediaWikiTableFormatter(self.source)
formatter.accept(self)
return formatter.to_table_data() | Extract tabular data as |TableData| instances from a MediaWiki text
object.
|load_source_desc_text|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` ``""``
``%(key)s`` | This replaced to:
| **(1)** ``caption`` mark of the table
| **(2)** ``%(format_name)s%(format_id)s``
| if ``caption`` mark not included
| in the table.
``%(format_name)s`` ``"mediawiki"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the MediaWiki data is invalid or empty. |
18,770 | def import_new_atlas_pointings(
self,
recent=False):
self.log.info()
if recent:
mjd = mjdnow(
log=self.log
).get_mjd()
recent = mjd - 14
recent = " mjd_obs > %(recent)s " % locals()
else:
recent = "1=1"
sqlQuery = u % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlas3DbConn,
quiet=False
)
dbSettings = self.settings["database settings"]["atlasMovers"]
entries = list(rows)
if len(rows) > 0:
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=entries,
dbTableName="atlas_exposures",
uniqueKeyList=["expname"],
dateModified=False,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
recent = recent.replace("mjd_obs", "mjd")
sqlQuery = u % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlas4DbConn,
quiet=False
)
entries = list(rows)
if len(rows) > 0:
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=entries,
dbTableName="atlas_exposures",
uniqueKeyList=["expname"],
dateModified=False,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
add_htm_ids_to_mysql_database_table(
raColName="raDeg",
declColName="decDeg",
tableName="atlas_exposures",
dbConn=self.atlasMoversDBConn,
log=self.log,
primaryIdColumnName="primaryId"
)
print "ATLAS pointings synced between ATLAS3/ATLAS4 databases and the ATLAS Movers `atlas_exposures` database table"
self.log.info()
return None | *Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database*
**Key Arguments:**
- ``recent`` -- only sync the most recent 2 weeks of data (speeds things up)
**Return:**
- None
**Usage:**
.. code-block:: python
from rockAtlas.bookkeeping import bookkeeper
bk = bookkeeper(
log=log,
settings=settings
)
bk.import_new_atlas_pointings() |
18,771 | def compress_encoder(inputs,
hparams,
strides=(2, 2),
kernel_size=(3, 3),
name=None):
with tf.variable_scope(name, default_name="compress"):
x = inputs
for i in range(hparams.num_compress_steps // 2):
with tf.variable_scope("compress_conv_%d" % i):
y = common_layers.conv_block(
common_layers.layer_norm(
x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size,
dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)],
strides=strides,
padding="SAME",
name="compress_conv_%d" % i)
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
if hparams.do_compress_attend:
y = compress_self_attention_layer(
x, hparams, name="compress_selfatt_%d" % i)
y += x
x = y
x = residual_block_layer(x, hparams)
shape_x = common_layers.shape_list(x)
x = tf.layers.dense(x,
hparams.num_latents * hparams.hidden_size,
name=name + "_dense")
return tf.reshape(x, [shape_x[0],
shape_x[1] * shape_x[2] * hparams.num_latents,
hparams.hidden_size]) | Encoder that compresses 2-D inputs by 2**num_compress_steps.
Args:
inputs: Tensor of shape [batch, height, width, channels].
hparams: HParams.
strides: Tuple, strides for conv block.
kernel_size: Tuple, kernel window size for conv block.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps). |
18,772 | def getObjectWorkflowStates(self):
workflow = getToolByName(self, )
states = {}
for w in workflow.getWorkflowsFor(self):
state = api.get_workflow_status_of(self, w.state_var)
states[w.state_var] = state
return states | This method is used to populate catalog values
Returns a dictionary with the workflow id as key and workflow state as
value.
:return: {'review_state':'active',...} |
18,773 | def _map_term_using_schema(master, path, term, schema_edges):
output = FlatList()
for k, v in term.items():
dimension = schema_edges[k]
if isinstance(dimension, Dimension):
domain = dimension.getDomain()
if dimension.fields:
if is_data(dimension.fields):
for local_field, es_field in dimension.fields.items():
local_value = v[local_field]
if local_value == None:
output.append({"missing": {"field": es_field}})
else:
output.append({"term": {es_field: local_value}})
continue
if len(dimension.fields) == 1 and is_variable_name(dimension.fields[0]):
if domain.getPartByKey(v) is domain.NULL:
output.append({"missing": {"field": dimension.fields[0]}})
else:
output.append({"term": {dimension.fields[0]: v}})
continue
if AND(is_variable_name(f) for f in dimension.fields):
if not isinstance(v, tuple):
Log.error("expecing {{name}}={{value}} to be a tuple", name= k, value= v)
for i, f in enumerate(dimension.fields):
vv = v[i]
if vv == None:
output.append({"missing": {"field": f}})
else:
output.append({"term": {f: vv}})
continue
if len(dimension.fields) == 1 and is_variable_name(dimension.fields[0]):
if domain.getPartByKey(v) is domain.NULL:
output.append({"missing": {"field": dimension.fields[0]}})
else:
output.append({"term": {dimension.fields[0]: v}})
continue
if domain.partitions:
part = domain.getPartByKey(v)
if part is domain.NULL or not part.esfilter:
Log.error("not expected to get NULL")
output.append(part.esfilter)
continue
else:
Log.error("not expected")
elif is_data(v):
sub = _map_term_using_schema(master, path + [k], v, schema_edges[k])
output.append(sub)
continue
output.append({"term": {k: v}})
return {"and": output} | IF THE WHERE CLAUSE REFERS TO FIELDS IN THE SCHEMA, THEN EXPAND THEM |
18,774 | def PrintField(self, field, value):
out = self.out
out.write( * self.indent)
if self.use_field_number:
out.write(str(field.number))
else:
if field.is_extension:
out.write()
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write()
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
| Print a single field name/value pair. |
18,775 | def create_doc_dict(self, document, doc_key=None, owner_document=None):
if owner_document:
doc_field = owner_document._fields.get(doc_key, None) if doc_key else None
else:
doc_field = document._fields.get(doc_key, None) if doc_key else None
doc_dict = {"_document": document if owner_document is None else owner_document,
"_key": document.__class__.__name__.lower() if doc_key is None else doc_key,
"_document_field": doc_field}
if not isinstance(document, TopLevelDocumentMetaclass) and doc_key:
doc_dict.update({"_field_type": EmbeddedDocumentField})
for key, field in document._fields.items():
doc_dict[key] = field
return doc_dict | Generate a dictionary representation of the document. (no recursion)
DO NOT CALL DIRECTLY |
18,776 | def from_ep_string(cls, ep_string, location):
ep_string = ep_string.strip()
if in ep_string:
ep_lines = ep_string.split()
else:
ep_lines = ep_string.split()
lines = [l.split()[0].strip().replace(, ) for l in ep_lines]
assert len(lines) == 27 or len(lines) == 26, "Number " \
"of lines of text [{}] does not correspond" \
" to an EP Design Day [26 or 27]".format(
len(lines))
lines[-1] = lines[-1].split()[0]
name = lines[1]
day_type = lines[4]
dry_bulb_condition = DryBulbCondition(
float(lines[5]), float(lines[6]), lines[7], lines[8])
h_type = lines[9]
h_val = 0 if lines[10] == else float(lines[10])
if h_type == :
h_val = float(lines[12])
elif h_type == :
h_val = float(lines[13])
humidity_condition = HumidityCondition(
h_type, h_val, float(lines[15]), lines[11])
wind_condition = WindCondition(
float(lines[16]), float(lines[17]), lines[18], lines[19])
sky_model = lines[21]
if sky_model == :
sky_condition = OriginalClearSkyCondition(
int(lines[2]), int(lines[3]), float(lines[26]), lines[20])
elif sky_model == :
sky_condition = RevisedClearSkyCondition(
int(lines[2]), int(lines[3]), float(lines[24]),
float(lines[25]), lines[20])
else:
sky_condition = SkyCondition(
sky_model, int(lines[2]), int(lines[3]), lines[20])
if sky_model == :
sky_condition.beam_shced = lines[22]
sky_condition.diff_shced = lines[23]
return cls(name, day_type, location, dry_bulb_condition,
humidity_condition, wind_condition, sky_condition) | Initalize from an EnergyPlus string of a SizingPeriod:DesignDay.
args:
ep_string: A full string representing a SizingPeriod:DesignDay. |
18,777 | def _op_to_matrix(self,
op: Optional[ops.Operation],
qubits: Tuple[ops.Qid, ...]
) -> Optional[np.ndarray]:
q1, q2 = qubits
matrix = protocols.unitary(op, None)
if matrix is None:
return None
assert op is not None
if op.qubits == qubits:
return matrix
if op.qubits == (q2, q1):
return MergeInteractions._flip_kron_order(matrix)
if op.qubits == (q1,):
return np.kron(matrix, np.eye(2))
if op.qubits == (q2,):
return np.kron(np.eye(2), matrix)
return None | Determines the effect of an operation on the given qubits.
If the operation is a 1-qubit operation on one of the given qubits,
or a 2-qubit operation on both of the given qubits, and also the
operation has a known matrix, then a matrix is returned. Otherwise None
is returned.
Args:
op: The operation to understand.
qubits: The qubits we care about. Order determines matrix tensor
order.
Returns:
None, or else a matrix equivalent to the effect of the operation. |
18,778 | def delete_node(self, node_name, graph=None):
if not graph:
graph = self.graph
if node_name not in graph:
raise KeyError( % node_name)
graph.pop(node_name)
for node, edges in six.iteritems(graph):
if node_name in edges:
edges.remove(node_name) | Deletes this node and all edges referencing it. |
18,779 | def get_line_pattern_rules(declarations, dirs):
property_map = {: , : ,
: , : ,
: , : }
property_names = property_map.keys()
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height \
= values.has_key() \
and post_process_symbolizer_image_file(str(values[].value), dirs) \
or (None, None, None, None)
line_pattern_width = values.has_key() and values[].value or line_pattern_width
line_pattern_height = values.has_key() and values[].value or line_pattern_height
symbolizer = line_pattern_file and output.LinePatternSymbolizer(line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height)
if symbolizer:
rules.append(make_rule(filter, symbolizer))
return rules | Given a list of declarations, return a list of output.Rule objects.
Optionally provide an output directory for local copies of image files. |
18,780 | def __check_classes(self):
msg = (
"Sanic JWT was not initialized properly. It did not received "
"an instance of {}"
)
if not issubclass(self.authentication_class, Authentication):
raise exceptions.InitializationFailure(
message=msg.format("Authentication")
)
if not issubclass(self.configuration_class, Configuration):
raise exceptions.InitializationFailure(
message=msg.format("Configuration")
)
if not issubclass(self.responses_class, Responses):
raise exceptions.InitializationFailure(
message=msg.format("Responses")
) | Check if any of the default classes (`Authentication`, `Configuration`
and / or `Responses`) have been overwitten and if they're still valid |
18,781 | def read(self, size=None):
if size is None:
size = self.__size
ret_list = []
while size > 0 and self.__buf:
data = self.__buf.popleft()
size -= len(data)
ret_list.append(data)
if size < 0:
ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:]
self.__buf.appendleft(remainder)
ret = b.join(ret_list)
self.__size -= len(ret)
return ret | Read at most size bytes from this buffer.
Bytes read from this buffer are consumed and are permanently removed.
Args:
size: If provided, read no more than size bytes from the buffer.
Otherwise, this reads the entire buffer.
Returns:
The bytes read from this buffer. |
18,782 | def lowercase(state):
return state.to_child(
student_result={k.lower(): v for k, v in state.student_result.items()},
solution_result={k.lower(): v for k, v in state.solution_result.items()},
) | Convert all column names to their lower case versions to improve robustness
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id FROM artists``
* student : ``SELECT artist_id as ID FROM artists``
We can write the following SCTs: ::
# fails, as id and ID have different case
Ex().check_column('id').has_equal_value()
# passes, as lowercase() is being used
Ex().lowercase().check_column('id').has_equal_value() |
18,783 | def get_contents_debug_adapter_protocol(self, lst, fmt=None):
t need to have the `resolve` method called later on, so, keys don
l = len(lst)
ret = []
format_str = + str(int(len(str(l - 1)))) +
if fmt is not None and fmt.get(, False):
format_str = + str(int(len(hex(l).lstrip()))) +
for i, item in enumerate(lst):
ret.append((format_str % i, item, % i))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
ret.append((, len(lst), partial(_apply_evaluate_name, evaluate_name=)))
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt)
if from_default_resolver:
ret = from_default_resolver + ret
return ret | This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str)) |
18,784 | def series_resistance(self, channel, resistor_index=None):
if resistor_index is None:
resistor_index = self.series_resistor_index(channel)
value = self._series_resistance(channel)
try:
if channel == 0:
self.calibration.R_hv[resistor_index] = value
else:
self.calibration.R_fb[resistor_index] = value
except:
pass
return value | Parameters
----------
channel : int
Analog channel index.
resistor_index : int, optional
Series resistor channel index.
If :data:`resistor_index` is not specified, the resistor-index from
the current context _(i.e., the result of
:attr:`series_resistor_index`)_ is used.
Otherwise, the series-resistor is temporarily set to the value of
:data:`resistor_index` to set the capacitance before restoring back
to the original value.
See definition of :meth:`safe_series_resistor_index_read`
decorator.
Returns
-------
float
Return the current series resistance value for the specified
channel. |
18,785 | def cartpole():
locals().update(default())
env =
max_length = 500
steps = 2e5
normalize_ranges = False
network = networks.feed_forward_categorical
return locals() | Configuration for the cart pole classic control task. |
18,786 | def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None,
exclude=None):
fields = []
unlist = set([_get_string_vid(x) for x in unlist])
input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)])
to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None
to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None
added = set([])
for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]:
cur_record = is_cwl_record(raw_v)
if cur_record:
nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]]
else:
nested_vs = [raw_v]
for orig_v in nested_vs:
if (get_base_id(orig_v["id"]) not in added
and (not to_include or get_base_id(orig_v["id"]) in to_include)):
if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude):
cur_v = {}
cur_v["name"] = get_base_id(orig_v["id"])
cur_v["type"] = orig_v["type"]
if cur_v["name"] in unlist:
cur_v = _flatten_nested_input(cur_v)
fields.append(_add_secondary_to_rec_field(orig_v, cur_v))
added.add(get_base_id(orig_v["id"]))
return fields | Infer the outputs of a record from the original inputs |
18,787 | def _ows_check_charm_func(state, message, charm_func_with_configs):
if charm_func_with_configs:
charm_state, charm_message = charm_func_with_configs()
if (charm_state != and
charm_state != and
charm_state is not None):
state = workload_state_compare(state, charm_state)
if message:
charm_message = charm_message.replace("Incomplete relations: ",
"")
message = "{}, {}".format(message, charm_message)
else:
message = charm_message
return state, message | Run a custom check function for the charm to see if it wants to
change the state. This is only run if not in 'maintenance' and
tests to see if the new state is more important that the previous
one determined by the interfaces/relations check.
@param state: the previously determined state so far.
@param message: the user orientated message so far.
@param charm_func: a callable function that returns state, message
@returns state, message strings. |
18,788 | def S_isothermal_pipe_to_two_planes(D, Z, L=1.):
r
return 2.*pi*L/log(8.*Z/(pi*D)) | r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011. |
18,789 | def init_layout(self):
for child in self.children():
self.child_added(child)
self.update_shape({}) | Initialize the layout of the toolkit shape.
This method is called during the bottom-up pass. This method
should initialize the layout of the widget. The child widgets
will be fully initialized and layed out when this is called. |
18,790 | def fetch_by_client_id(self, client_id):
if client_id not in self.clients:
raise ClientNotFoundError
return self.clients[client_id] | Retrieve a client by its identifier.
:param client_id: Identifier of a client app.
:return: An instance of :class:`oauth2.Client`.
:raises: ClientNotFoundError |
18,791 | def fillna(self, column_name, value):
if type(column_name) is not str:
raise TypeError("column_name must be a str")
ret = self[self.column_names()]
ret[column_name] = ret[column_name].fillna(value)
return ret | Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column_name``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column_name : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = turicreate.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns] |
18,792 | def audio_send_stream(self, httptype=None,
channel=None, path_file=None, encode=None):
if httptype is None or channel is None:
raise RuntimeError("Requires htttype and channel")
file_audio = {
: open(path_file, ),
}
header = {
: + encode,
:
}
self.command_audio(
.format(
httptype, channel),
file_content=file_audio,
http_header=header
) | Params:
path_file - path to audio file
channel: - integer
httptype - type string (singlepart or multipart)
singlepart: HTTP content is a continuos flow of audio packets
multipart: HTTP content type is multipart/x-mixed-replace, and
each audio packet ends with a boundary string
Supported audio encode type according with documentation:
PCM
ADPCM
G.711A
G.711.Mu
G.726
G.729
MPEG2
AMR
AAC |
18,793 | def AssignVar(self, value):
self.value = value
[option.OnAssignVar() for option in self.options] | Assign a value to this Value. |
18,794 | def dependents(self, on_predicate=None, from_predicate=None):
core = set(self.targets(on_predicate))
dependees = defaultdict(set)
for target in self.targets(from_predicate):
for dependency in target.dependencies:
if dependency in core:
dependees[target].add(dependency)
return dependees | Returns a map from targets that satisfy the from_predicate to targets they depend on that
satisfy the on_predicate.
:API: public |
18,795 | def __fetch_issue_attachments(self, issue_id):
for attachments_raw in self.client.issue_collection(issue_id, "attachments"):
attachments = json.loads(attachments_raw)
for attachment in attachments[]:
yield attachment | Get attachments of an issue |
18,796 | def getEvents(self):
caught_events = self._observer.caught_events
self._observer.caught_events = []
for event in caught_events:
self._observer.activate_event(event["name"])
return caught_events | Returns a list of all events that have occurred.
Empties the internal queue. |
18,797 | def session_end(self):
self.session_depth -= 1
self.session_depth = max(0, self.session_depth)
if self.session_depth == 0:
self._session_end() | End a session. Se session_begin for an in depth description of TREZOR sessions. |
18,798 | def report_errors_to_ga(self, errors):
hits = []
responses = []
for field_name in sorted(errors):
for error_message in errors[field_name]:
event = self.format_ga_hit(field_name, error_message)
if event:
hits.append(event)
if self.ga_batch_hits:
for hit_batch in _batch_hits(hits):
response = requests.post(self.get_ga_batch_endpoint(), data=hit_batch)
responses.append(response)
else:
for hit in hits:
response = requests.post(self.get_ga_single_endpoint(), data=hit)
responses.append(response)
return responses | Report errors to Google Analytics
https://developers.google.com/analytics/devguides/collection/protocol/v1/devguide |
18,799 | def postman(host, port=587, auth=(None, None),
force_tls=False, options=None):
return Postman(
host=host,
port=port,
middlewares=[
middleware.tls(force=force_tls),
middleware.auth(*auth),
],
**options
) | Creates a Postman object with TLS and Auth
middleware. TLS is placed before authentication
because usually authentication happens and is
accepted only after TLS is enabled.
:param auth: Tuple of (username, password) to
be used to ``login`` to the server.
:param force_tls: Whether TLS should be forced.
:param options: Dictionary of keyword arguments
to be used when the SMTP class is called. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.