Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
375,100 | def require_template_debug(f):
def _(*args, **kwargs):
TEMPLATE_DEBUG = getattr(settings, , False)
return f(*args, **kwargs) if TEMPLATE_DEBUG else
return _ | Decorated function is a no-op if TEMPLATE_DEBUG is False |
375,101 | def CopyToDateTimeString(cls, time_elements_tuple, fraction_of_second):
if fraction_of_second < 0.0 or fraction_of_second >= 1.0:
raise ValueError(.format(
fraction_of_second))
milliseconds = int(fraction_of_second * definitions.MILLISECONDS_PER_SECOND)
return .format(
time_elements_tuple[0], time_elements_tuple[1], time_elements_tuple[2],
time_elements_tuple[3], time_elements_tuple[4], time_elements_tuple[5],
milliseconds) | Copies the time elements and fraction of second to a string.
Args:
time_elements_tuple (tuple[int, int, int, int, int, int]):
time elements, contains year, month, day of month, hours, minutes and
seconds.
fraction_of_second (decimal.Decimal): fraction of second, which must be a
value between 0.0 and 1.0.
Returns:
str: date and time value formatted as:
YYYY-MM-DD hh:mm:ss.###
Raises:
ValueError: if the fraction of second is out of bounds. |
375,102 | def do_db(self, arg):
if arg[]:
self.db_get_config()
elif arg[]:
self.db_use_local_file(arg, filename=arg[])
elif arg[]:
self.db_use_aws_instance(arg[], arg)
elif arg[]:
self.db_aws_list_regions()
elif arg[]:
self.db_aws_get_region()
elif arg[]:
self.db_aws_set_region(arg[])
elif arg[]:
self.db_aws_list_instances()
elif arg[]:
self.db_create_aws_db_instance(arg[], arg[],
arg[],
arg[], arg[])
elif arg[]:
self.db_aws_delete_instance(arg[])
else:
self.help_db() | Usage:
db get_config
db use_local_file [<filename>]
db use_aws_instance [<instance_id>]
db aws_list_regions
db aws_get_region
db aws_set_region [<region_name>]
db aws_list_instances
db aws_create_instance [<instance_id> <size> <username> <password>
<dbname>]
db aws_delete_instance [<instance_id>]
db help |
375,103 | def get_POST_data(self):
self._postprocess()
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST | Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library |
375,104 | def until(self, regex):
logger.debug(, regex)
r = re.compile(regex, re.M)
self.tn.expect([r]) | Wait until the regex encountered |
375,105 | def create_app(app_name, config={}, db=None, celery=None):
track_mode = os.environ[] ==
if track_mode:
print()
active_db = os.environ[] ==
if track_mode:
print()
from webargs.flaskparser import parser
from . import error_handler, hacker, cli
hacker.hack_webargs()
migrations_root = os.path.join(
os.environ.get(,
os.getcwd()),
)
if track_mode:
print()
mod = importlib.import_module(app_name)
app = FantasyFlask(__name__, root_path=os.path.dirname(mod.__file__))
if track_mode:
print()
if config:
app.config.update(config)
config_module = os.environ.get(, None)
if track_mode:
print(" found config module %s,try load it..." % config_module)
if config_module:
app.config.from_object(config_module)
if track_mode:
print()
if celery:
app.celery = celery
pass
if track_mode:
print()
with app.app_context():
if track_mode:
print()
if db is None:
global _db
app.db = _db
else:
app.db = db
if track_mode:
print()
if os.environ[] != :
from flask_caching import Cache
app.cache = Cache(app, config=app.config)
pass
if track_mode:
print()
if os.environ.get() != :
from raven.contrib.flask import Sentry
Sentry(app)
pass
if track_mode:
print()
if hasattr(mod, ):
run_app = getattr(mod, )
try:
run_app(app)
except Exception as e:
if hasattr(app, ):
app.sentry.handle_exception(e)
pass
import sys
import traceback
traceback.print_exc()
sys.exit(-1)
pass
if active_db and app.db:
if track_mode:
print()
smart_database(app)
smart_migrate(app, migrations_root)
smart_account(app)
app.db.init_app(app)
@app.teardown_request
def session_clear(exception=None):
if exception and app.db.session.is_active:
app.db.session.rollback()
app.db.session.remove()
pass
if track_mode:
print()
@parser.error_handler
def h_webargs(error):
return error_handler.webargs_error(error)
@app.errorhandler(422)
def h_422(error):
return error_handler.http422(error)
@app.errorhandler(500)
def h_500(error):
return error_handler.http500(error)
if hasattr(mod, ):
error_handle = getattr(mod, )
error_handle(app)
pass
if track_mode:
print()
if hasattr(mod, ):
import flask_admin
admin = flask_admin.Admin(name=os.environ.get(,
),
template_mode=os.environ.get(
,
))
run_admin = getattr(mod, )
run_admin(admin)
admin.init_app(app)
pass
pass
if track_mode:
print()
app.cli.add_command(cli.ff)
if track_mode:
print()
if hasattr(mod, ):
run_cli = getattr(mod, )
run_cli(app)
pass
return app | App Factory 工具
策略是:
- 初始化app
- 根据app_name,装载指定的模块
- 尝试装载app.run_app
- 如果指定了`FANTASY_PRIMARY_NODE`,则尝试进行migrate操作
- 装载error handler
:return: |
375,106 | def start_with(self, x):
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if arg.startswith(x):
_args.append(arg)
break
else:
if arg.startswith(x):
_args.append(arg)
return ArgsList(_args, no_argv=True) | Returns all arguments beginning with given string
(or list thereof). |
375,107 | def read_uint16(self):
if self.pos + 2 > self.remaining_length:
return NC.ERR_PROTOCOL
msb = self.payload[self.pos]
self.pos += 1
lsb = self.payload[self.pos]
self.pos += 1
word = (msb << 8) + lsb
return NC.ERR_SUCCESS, word | Read 2 bytes. |
375,108 | def find_matching_endpoints(self, discovery_ns):
def match_func(operation, ns, rule):
return operation in self.matching_operations
return list(iter_endpoints(self.graph, match_func)) | Compute current matching endpoints.
Evaluated as a property to defer evaluation. |
375,109 | def run_analysis(self, argv):
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but donverbosityworkdir_regex\.xml$|\.npy$basenorm')
gta.fit(covar=True)
gta.print_roi()
gta.print_params()
gta.write_roi(args.roi_baseline, make_plots=args.make_plots) | Run this analysis |
375,110 | def train_cv(self, num_folds, fold, random=None):
if random is None:
return Instances(
javabridge.call(self.jobject, "trainCV", "(II)Lweka/core/Instances;",
num_folds, fold))
else:
return Instances(
javabridge.call(self.jobject, "trainCV", "(IILjava/util/Random;)Lweka/core/Instances;",
num_folds, fold, random.jobject)) | Generates a training fold for cross-validation.
:param num_folds: the number of folds of cross-validation, eg 10
:type num_folds: int
:param fold: the current fold (0-based)
:type fold: int
:param random: the random number generator
:type random: Random
:return: the training fold
:rtype: Instances |
375,111 | def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna):
if antenna1.ndim != 1:
raise ValueError("antenna1 shape should be (row,)")
if antenna2.ndim != 1:
raise ValueError("antenna2 shape should be (row,)")
if uvw.ndim != 2 or uvw.shape[1] != 3:
raise ValueError("uvw shape should be (row, 3)")
if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]):
raise ValueError("First dimension of uvw, antenna1 "
"and antenna2 do not match")
if chunks.ndim != 1:
raise ValueError("chunks shape should be (utime,)")
if nr_of_antenna < 1:
raise ValueError("nr_of_antenna < 1")
ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3)
antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype)
start = 0
for ci, chunk in enumerate(chunks):
end = start + chunk
_antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end)
start = end
return antenna_uvw | numba implementation of antenna_uvw |
375,112 | def _fileobj_to_fd(fileobj):
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd | Return a file descriptor from a file object. If
given an integer will simply return that integer back. |
375,113 | def mget(self, ids, index=None, doc_type=None, **query_params):
if not ids:
return []
body = []
for value in ids:
if isinstance(value, tuple):
if len(value) == 3:
a, b, c = value
body.append({"_index": a,
"_type": b,
"_id": c})
elif len(value) == 4:
a, b, c, d = value
body.append({"_index": a,
"_type": b,
"_id": c,
"fields": d})
else:
if index is None:
raise InvalidQuery("index value is required for id")
if doc_type is None:
raise InvalidQuery("doc_type value is required for id")
body.append({"_index": index,
"_type": doc_type,
"_id": value})
results = self._send_request(, "/_mget", body={: body},
params=query_params)
if in results:
model = self.model
return [model(self, item) for item in results[]]
return [] | Get multi JSON documents.
ids can be:
list of tuple: (index, type, id)
list of ids: index and doc_type are required |
375,114 | def mpl_get_cb_bound_below_plot(ax):
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03]
return sizes | Return the coordinates for a colorbar axes below the provided axes object.
Take into account the changes of the axes due to aspect ratio settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
======= |
375,115 | def check_labels_file_header(filename):
with tf.gfile.Open(filename, ) as f:
magic = read32(f)
read32(f)
if magic != 2049:
raise ValueError( % (magic,
f.name)) | Validate that filename corresponds to labels for the MNIST dataset. |
375,116 | def _check_embedded_object(embedded_object, type, value, element_kind,
element_name):
if embedded_object not in (, ):
raise ValueError(
_format("{0} {1!A} specifies an invalid value for "
"embedded_object: {2!A} (must be or )",
element_kind, element_name, embedded_object))
if type != :
raise ValueError(
_format("{0} {1!A} specifies embedded_object {2!A} but its CIM "
"type is invalid: {3!A} (must be )",
element_kind, element_name, embedded_object, type))
if value is not None:
if isinstance(value, list):
if value:
v0 = value[0]
if v0 is not None and \
not isinstance(v0, (CIMInstance, CIMClass)):
raise ValueError(
_format("Array {0} {1!A} specifies embedded_object "
"{2!A} but the Python type of its first array "
"value is invalid: {3} (must be CIMInstance "
"or CIMClass)",
element_kind, element_name, embedded_object,
builtin_type(v0)))
else:
if not isinstance(value, (CIMInstance, CIMClass)):
raise ValueError(
_format("{0} {1!A} specifies embedded_object {2!A} but "
"the Python type of its value is invalid: {3} "
"(must be CIMInstance or CIMClass)",
element_kind, element_name, embedded_object,
builtin_type(value))) | Check whether embedded-object-related parameters are ok. |
375,117 | def imap(requests, stream=False, size=2, exception_handler=None):
pool = Pool(size)
def send(r):
return r.send(stream=stream)
for request in pool.imap_unordered(send, requests):
if request.response is not None:
yield request.response
elif exception_handler:
ex_result = exception_handler(request, request.exception)
if ex_result is not None:
yield ex_result
pool.join() | Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param stream: If True, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception |
375,118 | def update_lazyevals(self):
if self.lazy_evals is None:
return
elif isinstance(self.lazy_evals, LazyEval):
self.lazy_evals.get_updated()
else:
for lz in self.lazy_evals:
lz.get_updated() | Update all LazyEvals in self
self.lzy_evals must be set to LazyEval object(s) enough to
update all owned LazyEval objects. |
375,119 | def _prune_maps_to_sequences(self):
B 3
for c, seq in self.atom_sequences.iteritems():
res_ids = [r[0] for r in seq]
for_removal = []
for k, _, _ in self.atom_to_seqres_sequence_maps[c]:
if k not in res_ids:
for_removal.append(k)
for res_id in for_removal:
self.atom_to_seqres_sequence_maps[c].remove(res_id) | When we merge the SIFTS maps, we can extend the sequence maps such that they have elements in their domain that we removed
from the sequence e.g. 1A2P, residue 'B 3 ' is removed because Rosetta barfs on it. Here, we prune the maps so that their
domains do not have elements that were removed from sequences. |
375,120 | def add(self, child):
if isinstance(child, StateVariable):
self.add_state_variable(child)
elif isinstance(child, DerivedVariable):
self.add_derived_variable(child)
elif isinstance(child, ConditionalDerivedVariable):
self.add_conditional_derived_variable(child)
elif isinstance(child, TimeDerivative):
self.add_time_derivative(child)
elif isinstance(child, EventHandler):
self.add_event_handler(child)
elif isinstance(child, KineticScheme):
self.add_kinetic_scheme(child)
else:
raise ModelError() | Adds a typed child object to the behavioral object.
@param child: Child object to be added. |
375,121 | def copy(self, empty=False):
newobject = self.__new__(self.__class__)
if empty:
return
for prop in ["_properties","_side_properties",
"_derived_properties","_build_properties"
]:
if prop not in dir(self):
continue
try:
newobject.__dict__[prop] = copy.deepcopy(self.__dict__[prop])
except:
newobject.__dict__[prop] = copy.copy(self.__dict__[prop])
newobject._update_()
return newobject | returns an independent copy of the current object. |
375,122 | def scp(args):
if args.scp_args[0] == "--":
del args.scp_args[0]
user_or_hostname_chars = string.ascii_letters + string.digits
for i, arg in enumerate(args.scp_args):
if arg[0] in user_or_hostname_chars and ":" in arg:
hostname, colon, path = arg.partition(":")
username, at, hostname = hostname.rpartition("@")
hostname = resolve_instance_public_dns(hostname)
if not (username or at):
try:
username, at = get_linux_username(), "@"
except Exception:
logger.info("Unable to determine IAM username, using local username")
args.scp_args[i] = username + at + hostname + colon + path
os.execvp("scp", ["scp"] + args.scp_args) | Transfer files to or from EC2 instance.
Use "--" to separate scp args from aegea args:
aegea scp -- -r local_dir instance_name:~/remote_dir |
375,123 | def setCommonInput(configObj, createOutwcs=True):
if not in configObj:
configObj[] = False
if not createOutwcs or not configObj[]:
reportResourceUsage(imageObjectList, outwcs, num_cores)
except ValueError:
imageObjectList = None
return imageObjectList, outwcs | The common interface interpreter for MultiDrizzle tasks which not only runs
'process_input()' but 'createImageObject()' and 'defineOutput()' as well to
fully setup all inputs for use with the rest of the MultiDrizzle steps either
as stand-alone tasks or internally to MultiDrizzle itself.
Parameters
----------
configObj : object
configObj instance or simple dictionary of input parameters
imageObjectList : list of imageObject objects
list of imageObject instances, 1 for each input exposure
outwcs : object
imageObject instance defining the final output frame
Notes
-----
At a minimum, the configObj instance (dictionary) should contain:
configObj = {'input':None,'output':None }
If provided, the configObj should contain the values of all the multidrizzle parameters
as set by the user with TEAL. If no configObj is given, it will retrieve
the default values automatically. In either case, the values from the input_dict
will be merged in with the configObj before being used by the rest of the
code.
Examples
--------
You can set *createOutwcs=False* for the cases where you only want the
images processed and no output wcs information in necessary; as in:
>>> imageObjectList,outwcs = processInput.processCommonInput(configObj) |
375,124 | def next_frame_savp_gan():
hparams = next_frame_savp()
hparams.use_gan = True
hparams.use_vae = False
hparams.gan_loss_multiplier = 0.001
hparams.optimizer_adam_beta1 = 0.5
hparams.learning_rate_constant = 2e-4
hparams.gan_loss = "cross_entropy"
hparams.learning_rate_decay_steps = 100000
hparams.learning_rate_schedule = "constant*linear_decay"
return hparams | SAVP - GAN only model. |
375,125 | def set_iscsi_info(self, target_name, lun, ip_address,
port=, auth_method=None, username=None,
password=None):
return self._call_method(, target_name, lun,
ip_address, port, auth_method, username,
password) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. |
375,126 | def import_parameters_from_file(parameters_file):
params = {}
with open(parameters_file, ) as f:
matches = re.findall(, f.read())
for m in matches:
params[m[0]] = ast.literal_eval(m[1])
return params | Try importing a parameter dictionary from file.
We expect values in parameters_file to be defined as follows:
param1: value1
param2: [value2, value3] |
375,127 | def shapeRef_to_iriref(self, ref: ShExDocParser.ShapeRefContext) -> ShExJ.IRIREF:
if ref.ATPNAME_NS():
return ShExJ.IRIREF(self._lookup_prefix(ref.ATPNAME_NS().getText()[1:]))
elif ref.ATPNAME_LN():
prefix, local = ref.ATPNAME_LN().getText()[1:].split(, 1)
return ShExJ.IRIREF(self._lookup_prefix(prefix + ) + (local if local else ""))
else:
return self.shapeexprlabel_to_IRI(ref.shapeExprLabel()) | shapeRef: ATPNAME_NS | ATPNAME_LN | '@' shapeExprLabel |
375,128 | def list_sensors(parent_class, sensor_items, filter, strategy, status,
use_python_identifiers, tuple, refresh):
filter_re = re.compile(filter)
found_sensors = []
none_strat = resource.normalize_strategy_parameters()
sensor_dict = dict(sensor_items)
for sensor_identifier in sorted(sensor_dict.keys()):
sensor_obj = sensor_dict[sensor_identifier]
search_name = (sensor_identifier if use_python_identifiers
else sensor_obj.name)
name_match = filter_re.search(search_name)
strat_match = not strategy or sensor_obj.sampling_strategy != none_strat
if name_match and strat_match:
if refresh:
yield sensor_obj.get_value()
prefix = ""
if isinstance(parent_class, KATCPClientResourceContainer):
if sensor_obj.name.startswith("agg_"):
prefix = ""
else:
prefix = sensor_obj.parent_name + "."
if not status or (sensor_obj.reading.status in status):
if tuple:
found_sensors.append((
prefix+sensor_obj.name,
sensor_obj.reading.value,
sensor_obj.reading.timestamp,
sensor_obj.type,
sensor_obj.units,
sensor_obj.reading.received_timestamp,
sensor_obj.reading.status,
sensor_obj.sampling_strategy
))
else:
found_sensors.append(resource.SensorResultTuple(
object=sensor_obj,
name=prefix+sensor_obj.name,
python_identifier=sensor_identifier,
description=sensor_obj.description,
units=sensor_obj.units,
type=sensor_obj.type,
reading=sensor_obj.reading))
raise tornado.gen.Return(found_sensors) | Helper for implementing :meth:`katcp.resource.KATCPResource.list_sensors`
Parameters
----------
sensor_items : tuple of sensor-item tuples
As would be returned the items() method of a dict containing KATCPSensor objects
keyed by Python-identifiers.
parent_class: KATCPClientResource or KATCPClientResourceContainer
Is used for prefix calculation
Rest of parameters as for :meth:`katcp.resource.KATCPResource.list_sensors` |
375,129 | def create_process(cmd, root_helper=None, addl_env=None, log_output=True):
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
log_output and LOG.info("Running command: %s", cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
return obj, cmd | Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it. |
375,130 | def _setup_no_fallback(parser):
cli_dest =
halp = (
s built-in default logic.{0} plugin options--tox-pyenv-no-fallback-Fstore_falseIf `pyenv which {basepython}` exits non-zero when looking up the python executable, allow fallback to tox\
),
) | Add the option, --tox-pyenv-no-fallback.
If this option is set, do not allow fallback to tox's built-in
strategy for looking up python executables if the call to `pyenv which`
by this plugin fails. This will allow the error to raise instead
of falling back to tox's default behavior. |
375,131 | def _mark_lines(lines, sender):
global EXTRACTOR
candidate = get_signature_candidate(lines)
markers = list( * len(lines))
for i, line in reversed(list(enumerate(candidate))):
j = len(lines) - len(candidate) + i
if not line.strip():
markers[j] =
elif is_signature_line(line, sender, EXTRACTOR):
markers[j] =
return "".join(markers) | Mark message lines with markers to distinguish signature lines.
Markers:
* e - empty line
* s - line identified as signature
* t - other i.e. ordinary text line
>>> mark_message_lines(['Some text', '', 'Bob'], 'Bob')
'tes' |
375,132 | def bam2fastq(self, input_bam, output_fastq,
output_fastq2=None, unpaired_fastq=None):
self._ensure_folders(output_fastq, output_fastq2, unpaired_fastq)
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " INPUT={0}".format(input_bam)
cmd += " FASTQ={0}".format(output_fastq)
if output_fastq2 is not None and unpaired_fastq is not None:
cmd += " SECOND_END_FASTQ={0}".format(output_fastq2)
cmd += " UNPAIRED_FASTQ={0}".format(unpaired_fastq)
return cmd | Create command to convert BAM(s) to FASTQ(s).
:param str input_bam: Path to sequencing reads file to convert
:param output_fastq: Path to FASTQ to write
:param output_fastq2: Path to (R2) FASTQ to write
:param unpaired_fastq: Path to unpaired FASTQ to write
:return str: Command to convert BAM(s) to FASTQ(s) |
375,133 | def slip_reader(port, trace_function):
partial_packet = None
in_escape = False
while True:
waiting = port.inWaiting()
read_bytes = port.read(1 if waiting == 0 else waiting)
if read_bytes == b:
waiting_for = "header" if partial_packet is None else "content"
trace_function("Timed out waiting for packet %s", waiting_for)
raise FatalError("Timed out waiting for packet %s" % waiting_for)
trace_function("Read %d bytes: %s", len(read_bytes), HexFormatter(read_bytes))
for b in read_bytes:
if type(b) is int:
b = bytes([b])
if partial_packet is None:
if b == b:
partial_packet = b""
else:
trace_function("Read invalid data: %s", HexFormatter(read_bytes))
trace_function("Remaining data in serial buffer: %s", HexFormatter(port.read(port.inWaiting())))
raise FatalError( % hexify(b))
elif in_escape:
in_escape = False
if b == b:
partial_packet += b
elif b == b:
partial_packet += b
else:
trace_function("Read invalid data: %s", HexFormatter(read_bytes))
trace_function("Remaining data in serial buffer: %s", HexFormatter(port.read(port.inWaiting())))
raise FatalError( % (hexify(b)))
elif b == b:
in_escape = True
elif b == b:
trace_function("Received full packet: %s", HexFormatter(partial_packet))
yield partial_packet
partial_packet = None
else:
partial_packet += b | Generator to read SLIP packets from a serial port.
Yields one full SLIP packet at a time, raises exception on timeout or invalid data.
Designed to avoid too many calls to serial.read(1), which can bog
down on slow systems. |
375,134 | def generate(self):
key = self._propose_new_key()
while self.key_exists(key):
_logger.warning(
)
key = self._propose_new_key()
return key | Generate a new string and return it. |
375,135 | def set_auto_reply(self, message, status=AutoReplyStatus.ALWAYS_ENABLED, start=None, end=None,
external_message=None, audience=AutoReplyAudience.ALL):
start_is_none = start is None
end_is_none = end is None
if (not start_is_none and end_is_none) or (start_is_none and not end_is_none):
raise ValueError()
start_is_datetime = isinstance(start, datetime)
end_is_datetime = isinstance(end, datetime)
if not start_is_datetime and not start_is_none or not end_is_datetime and not end_is_none:
raise ValueError()
request_data = dict(Status=status, ExternalAudience=audience)
if external_message is None:
external_message = message
request_data.update(InternalReplyMessage=message, ExternalReplyMessage=external_message)
if not start_is_none and not end_is_none:
request_data.update(ScheduledStartDateTime=dict(DateTime=str(start)))
request_data.update(ScheduledEndDateTime=dict(DateTime=str(end)))
data = {
"@odata.context": "https://outlook.office.com/api/v2.0/$metadata
"AutomaticRepliesSetting": request_data
}
requests.patch(,
headers=self._headers, data=json.dumps(data))
self._auto_reply = message | Set an automatic reply for the account.
Args:
message (str): The message to be sent in replies. If external_message is provided this is the message sent
to internal recipients
status (OutlookAccount.AutoReplyStatus): Whether the auto-reply should be always enabled, scheduled, or
disabled. You can use :class:`AutoReplyStatus <pyOutlook.core.main.OutlookAccount.AutoReplyStatus>` to
provide the value. Defaults to ALWAYS_ENABLED.
start (datetime): If status is set to SCHEDULED, this is when the replies will start being sent.
end (datetime): If status is set to SCHEDULED, this is when the replies will stop being sent.
external_message (str): If provided, this message will be sent to external recipients.
audience (OutlookAccount.AutoReplyAudience): Whether replies should be sent to everyone, contacts only,
or internal recipients only. You can use
:class:`AutoReplyAudience <pyOutlook.core.main.OutlookAccount.AutoReplyAudience>` to provide the value. |
375,136 | def get_player_id(player):
players_df = get_all_player_ids("all_data")
player = players_df[players_df.DISPLAY_LAST_COMMA_FIRST == player]
if len(player) == 0:
er = "Invalid player name passed or there is no player with that name."
raise ValueError(er)
player_id = player.PERSON_ID.values
return player_id | Returns the player ID(s) associated with the player name that is passed in.
There are instances where players have the same name so there are multiple
player IDs associated with it.
Parameters
----------
player : str
The desired player's name in 'Last Name, First Name' format. Passing in
a single name returns a numpy array containing all the player IDs
associated with that name.
Returns
-------
player_id : numpy array
The numpy array that contains the player ID(s). |
375,137 | def _headers(self, **kwargs):
headers = BASE_HEADERS.copy()
if self._token:
headers[] = self._token
headers.update(kwargs)
return headers | Returns dict containing base headers for all requests to the server. |
375,138 | def save(self, filename, content):
with open(filename, "w") as f:
if hasattr(content, ):
f.write(.join([row for row in content]))
else:
print()
f.write(str(content)) | default is to save a file from list of lines |
375,139 | def update(self, data):
updated = self.set_property(, data.description)
updated |= self.set_property(, data.state)
tags = {x[]: x[] for x in data.tags or {}}
existing_tags = {x.key: x for x in self.tags}
for key, value in list(tags.items()):
updated |= self.set_tag(key, value)
for key in list(existing_tags.keys()):
if key not in tags:
updated |= self.delete_tag(key)
return updated | Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(ami)` on the object.
Args:
data (bunch): Data fetched from AWS API
Returns:
True if there were any changes to the object, else false |
375,140 | def ontologyShapeTree(self):
treedict = {}
if self.all_shapes:
treedict[0] = self.toplayer_shapes
for element in self.all_shapes:
if element.children():
treedict[element] = element.children()
return treedict
return treedict | Returns a dict representing the ontology tree
Top level = {0:[top properties]}
Multi inheritance is represented explicitly |
375,141 | def group_experiments(experiments: TomographyExperiment,
method: str = ) -> TomographyExperiment:
allowed_methods = [, ]
assert method in allowed_methods, f" should be one of {allowed_methods}."
if method == :
return group_experiments_greedy(experiments)
elif method == :
return group_experiments_clique_removal(experiments) | Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number
of QPU runs.
Background
----------
Given some PauliTerm operator, the 'natural' tensor product basis to
diagonalize this term is the one which diagonalizes each Pauli operator in the
product term-by-term.
For example, X(1) * Z(0) would be diagonal in the 'natural' tensor product basis
{(|0> +/- |1>)/Sqrt[2]} * {|0>, |1>}, whereas Z(1) * X(0) would be diagonal
in the 'natural' tpb {|0>, |1>} * {(|0> +/- |1>)/Sqrt[2]}. The two operators
commute but are not diagonal in each others 'natural' tpb (in fact, they are
anti-diagonal in each others 'natural' tpb). This function tests whether two
operators given as PauliTerms are both diagonal in each others 'natural' tpb.
Note that for the given example of X(1) * Z(0) and Z(1) * X(0), we can construct
the following basis which simultaneously diagonalizes both operators:
-- |0>' = |0> (|+>) + |1> (|->)
-- |1>' = |0> (|+>) - |1> (|->)
-- |2>' = |0> (|->) + |1> (|+>)
-- |3>' = |0> (-|->) + |1> (|+>)
In this basis, X Z looks like diag(1, -1, 1, -1), and Z X looks like diag(1, 1, -1, -1).
Notice however that this basis cannot be constructed with single-qubit operations, as each
of the basis vectors are entangled states.
Methods
-------
The "greedy" method will keep a running set of 'buckets' into which grouped ExperimentSettings
will be placed. Each new ExperimentSetting considered is assigned to the first applicable
bucket and a new bucket is created if there are no applicable buckets.
The "clique-removal" method maps the term grouping problem onto Max Clique graph problem.
This method constructs a NetworkX graph where an edge exists between two settings that
share an nTPB and then uses networkx's algorithm for clique removal. This method can give
you marginally better groupings in certain circumstances, but constructing the
graph is pretty slow so "greedy" is the default.
:param experiments: a tomography experiment
:param method: method used for grouping; the allowed methods are one of
['greedy', 'clique-removal']
:return: a tomography experiment with all the same settings, just grouped according to shared
TPBs. |
375,142 | def _neg32(ins):
output = _32bit_oper(ins.quad[2])
output.append()
output.append()
output.append()
REQUIRES.add()
return output | Negates top of the stack (32 bits in DEHL) |
375,143 | def _process_model_dict(self, d):
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
del d[]
if d[] == self.default_model_expr:
del d[]
d["name"] = yamlio.to_scalar_safe(d["name"])
return d | Remove redundant items from a model's configuration dict.
Parameters
----------
d : dict
Modified in place.
Returns
-------
dict
Modified `d`. |
375,144 | def _is_valid_integer(self, inpt, metadata):
if not isinstance(inpt, int):
return False
if metadata.get_minimum_integer() and inpt < metadata.get_maximum_integer():
return False
if metadata.get_maximum_integer() and inpt > metadata.get_minimum_integer():
return False
if metadata.get_integer_set() and inpt not in metadata.get_integer_set():
return False
else:
return True | Checks if input is a valid integer value |
375,145 | def received_message(self, message):
if message.data.decode() == :
self.pipe.send(message)
self.send(, False) | Checks if the client has sent a ready message.
A ready message causes ``send()`` to be called on the
``parent end`` of the pipe.
Clients need to ensure that the pipe assigned to ``self.pipe`` is
the ``parent end`` of a pipe.
This ensures completion of the underlying websocket connection
and can be used to synchronize parallel senders. |
375,146 | def _append_path(new_path):
for path in sys.path:
path = os.path.abspath(path)
if new_path == path:
return
sys.path.append(new_path) | Given a path string, append it to sys.path |
375,147 | def avail_images(call=None):
if call == :
raise SaltCloudSystemExit(
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
for image in response[]:
ret[image[][][]] = {
: image[][][],
: image[][],
}
return ret | Return a dict of all available VM images on the cloud provider. |
375,148 | def unlink(self):
links = self.registry.get(self.source)
if self in links:
links.pop(links.index(self)) | Unregisters the Link |
375,149 | def level(self):
ev = self._query_waiters.request(self.__do_query_level)
ev.wait(1.0)
return self._level | Returns the current output level by querying the remote controller. |
375,150 | def _log_likelihood_per_sample(X, means, covars):
logden = _log_multivariate_density(X, means, covars)
logden_max = logden.max(axis=1)
log_likelihood = np.log(np.sum(np.exp(logden.T - logden_max) + Epsilon, axis=0))
log_likelihood += logden_max
post_proba = np.exp(logden - log_likelihood[:, np.newaxis])
return (log_likelihood, post_proba) | Theta = (theta_1, theta_2, ... theta_M)
Likelihood of mixture parameters given data: L(Theta | X) = product_i P(x_i | Theta)
log likelihood: log L(Theta | X) = sum_i log(P(x_i | Theta))
and note that p(x_i | Theta) = sum_j prior_j * p(x_i | theta_j)
Probability of sample x being generated from component i:
P(w_i | x) = P(x|w_i) * P(w_i) / P(X)
where P(X) = sum_i P(x|w_i) * P(w_i)
Here post_proba = P/(w_i | x)
and log_likelihood = log(P(x|w_i)) |
375,151 | def normalize_ident(ident):
if isinstance(ident, tuple) and len(ident) == 2:
return ident[0], ident[1]
else:
return ident, None | Splits a generic identifier.
If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned.
Otherwise, ``(ident[0], None)`` is returned. |
375,152 | def _getColumnNeighborhood(self, centerColumn):
if self._wrapAround:
return topology.wrappingNeighborhood(centerColumn,
self._inhibitionRadius,
self._columnDimensions)
else:
return topology.neighborhood(centerColumn,
self._inhibitionRadius,
self._columnDimensions) | Gets a neighborhood of columns.
Simply calls topology.neighborhood or topology.wrappingNeighborhood
A subclass can insert different topology behavior by overriding this method.
:param centerColumn (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The columns in the neighborhood. |
375,153 | def _build_resolver(cls, session: AppSession):
args = session.args
dns_timeout = args.dns_timeout
if args.timeout:
dns_timeout = args.timeout
if args.inet_family == :
family = IPFamilyPreference.ipv4_only
elif args.inet_family == :
family = IPFamilyPreference.ipv6_only
elif args.prefer_family == :
family = IPFamilyPreference.prefer_ipv6
elif args.prefer_family == :
family = IPFamilyPreference.prefer_ipv4
else:
family = IPFamilyPreference.any
return session.factory.new(
,
family=family,
timeout=dns_timeout,
rotate=args.rotate_dns,
cache=session.factory.class_map[].new_cache() if args.dns_cache else None,
) | Build resolver. |
375,154 | def multi_pop(d, *args):
retval = {}
for key in args:
if key in d:
retval[key] = d.pop(key)
return retval | pops multiple keys off a dict like object |
375,155 | def read_unsigned_var_int(file_obj):
result = 0
shift = 0
while True:
byte = struct.unpack(b"<B", file_obj.read(1))[0]
result |= ((byte & 0x7F) << shift)
if (byte & 0x80) == 0:
break
shift += 7
return result | Read a value using the unsigned, variable int encoding. |
375,156 | def isEnabled( self ):
if ( self._disableWithLayer and self._layer ):
lenabled = self._layer.isEnabled()
else:
lenabled = True
return self._enabled and lenabled | Returns whether or not this node is enabled. |
375,157 | def robust_topological_sort(graph: Graph) -> list:
assert check_argument_types()
components = strongly_connected_components(graph)
node_component = {}
for component in components:
for node in component:
node_component[node] = component
component_graph = {}
for component in components:
component_graph[component] = []
for node in graph:
node_c = node_component[node]
for successor in graph[node]:
successor_c = node_component[successor]
if node_c != successor_c:
component_graph[node_c].append(successor_c)
return topological_sort(component_graph) | Identify strongly connected components then perform a topological sort of those components. |
375,158 | def subscribe(self, stream, callback, transform=""):
if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting":
self.connect()
if self.status is not "connected":
return False
logging.debug("Subscribing to %s", stream)
self.send({"cmd": "subscribe", "arg": stream, "transform": transform})
with self.subscription_lock:
self.subscriptions[stream + ":" + transform] = callback
return True | Given a stream, a callback and an optional transform, sets up the subscription |
375,159 | def do(self, arg):
".example - This is an example plugin for the command line debugger"
print "This is an example command."
print "%s.do(%r, %r):" % (__name__, self, arg)
print " last event", self.lastEvent
print " prefix", self.cmdprefix
print " arguments", self.split_tokens(arg) | .example - This is an example plugin for the command line debugger |
375,160 | def get_code(self):
if self.code is None:
self.code = urlopen(self.url).read()
return self.code | Opens the link and returns the response's content. |
375,161 | def tparse(instring, lenout=_default_len_out):
errmsg = stypes.stringToCharP(lenout)
lenout = ctypes.c_int(lenout)
instring = stypes.stringToCharP(instring)
sp2000 = ctypes.c_double()
libspice.tparse_c(instring, lenout, ctypes.byref(sp2000), errmsg)
return sp2000.value, stypes.toPythonString(errmsg) | Parse a time string and return seconds past the J2000
epoch on a formal calendar.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tparse_c.html
:param instring: Input time string, UTC.
:type instring: str
:param lenout: Available space in output error message string.
:type lenout: int
:return: Equivalent UTC seconds past J2000, Descriptive error message.
:rtype: tuple |
375,162 | def start(self, max):
try:
self.widget.max = max
display(self.widget)
except:
pass | Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar. |
375,163 | def get_metrics(self, name=None):
return self._get_elements(self.metrics, , Metric, name=name) | Get metrics for this operator.
Args:
name(str, optional): Only return metrics matching `name`, where `name` can be a regular expression. If
`name` is not supplied, then all metrics for this operator are returned.
Returns:
list(Metric): List of matching metrics.
Retrieving a list of metrics whose name contains the string "temperatureSensor" could be performed as followed
Example:
>>> from streamsx import rest
>>> sc = rest.StreamingAnalyticsConnection()
>>> instances = sc.get_instances()
>>> operator = instances[0].get_operators()[0]
>>> metrics = op.get_metrics(name='*temperatureSensor*') |
375,164 | def load_json(filename):
try:
if PY2:
args =
else:
args =
with open(filename, args) as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err) | Load a json file as a dictionary |
375,165 | def register_user(self, data):
error = False
msg = ""
email_re = re.compile(
r"(^[-!
r
r, re.IGNORECASE)
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif email_re.match(data["email"]) is None:
error = True
msg = _("Invalid email format.")
elif len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
existing_user = self.database.users.find_one({"$or": [{"username": data["username"]}, {"email": data["email"]}]})
if existing_user is not None:
error = True
if existing_user["username"] == data["username"]:
msg = _("This username is already taken !")
else:
msg = _("This email address is already in use !")
else:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
activate_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest()
self.database.users.insert({"username": data["username"],
"realname": data["realname"],
"email": data["email"],
"password": passwd_hash,
"activate": activate_hash,
"bindings": {},
"language": self.user_manager._session.get("language", "en")})
try:
web.sendmail(web.config.smtp_sendername, data["email"], _("Welcome on INGInious"),
_()
+ web.ctx.home + "/register?activate=" + activate_hash)
msg = _("You are succesfully registered. An email has been sent to you for activation.")
except:
error = True
msg = _("Something went wrong while sending you activation email. Please contact the administrator.")
return msg, error | Parses input and register user |
375,166 | def mutation_jwt_refresh_token_required(fn):
@wraps(fn)
def wrapper(cls, *args, **kwargs):
token = kwargs.pop(current_app.config[])
try:
verify_refresh_jwt_in_argument(token)
except Exception as e:
return cls(AuthInfoField(message=str(e)))
return fn(*args, **kwargs)
return wrapper | A decorator to protect a mutation.
If you decorate anmutation with this, it will ensure that the requester
has a valid refresh token before allowing the mutation to be called. |
375,167 | def get_one(cls, db, *args, **kwargs):
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None | Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'}) |
375,168 | def get_handler(self, request):
try:
f = self._json_rpc_methods[request.method]
except (AttributeError, KeyError):
raise RPCMethodError("Received invalid method ".format(request.method))
return f | Get callable from JSON RPC request
:param RPCRequest request: JSON RPC request
:return: Method
:rtype: callable |
375,169 | def set_data_matrix_chunk_size(df_shape, max_chunk_kb, elem_per_kb):
row_chunk_size = min(df_shape[0], 1000)
col_chunk_size = min(((max_chunk_kb*elem_per_kb)//row_chunk_size), df_shape[1])
return (row_chunk_size, col_chunk_size) | Sets chunk size to use for writing data matrix.
Note. Calculation used here is for compatibility with cmapM and cmapR.
Input:
- df_shape (tuple): shape of input data_df.
- max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy
- elem_per_kb (int): Number of elements per kb
Returns:
chunk size (tuple) to use for chunking the data matrix |
375,170 | def autobuild_arm_program(elfname, test_dir=os.path.join(, ), patch=True):
try:
family = utilities.get_family()
family.for_all_targets(family.tile.short_name, lambda x: arm.build_program(family.tile, elfname, x, patch=patch))
unit_test.build_units(os.path.join(,), family.targets(family.tile.short_name))
Alias(, os.path.join(, ))
Alias(, os.path.join(, , ))
Default([, ])
autobuild_release(family)
if os.path.exists():
autobuild_documentation(family.tile)
except IOTileException as e:
print(e.format())
sys.exit(1) | Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those. |
375,171 | def mirrored(setup):
@wraps(setup)
def wrapped_setup(self):
self.mirror, self.mock = mirror()
return setup(self, self.mirror, self.mock)
return wrapped_setup | Convience decorator for setUp in testcases::
@mirrored
def setUp(self, mirror, mock):
...
is the same as::
def setUp(self):
self.mirror, self.mock = mirror()
mirror, mock = self.mirror, self.mock
... |
375,172 | def permission_denied(request, template_name=None, extra_context=None):
if template_name is None:
template_name = (, )
context = {
: request.path,
}
if extra_context:
context.update(extra_context)
return HttpResponseForbidden(loader.render_to_string(
template_name=template_name,
context=context,
request=request,
)) | Default 403 handler.
Templates: `403.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/') |
375,173 | def _merge_sections(sec_a, sec_b):
sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:])
sec_b.ntype = sec_a.ntype
sec_b.pid = sec_a.pid
sec_a.ids = []
sec_a.pid = -1
sec_a.ntype = 0 | Merge two sections
Merges sec_a into sec_b and sets sec_a attributes to default |
375,174 | def bulk_create_posts(self, posts, post_categories, post_tags, post_media_attachments):
Post.objects.bulk_create(posts)
for post_wp_id, categories in six.iteritems(post_categories):
Post.objects.get(site_id=self.site_id, wp_id=post_wp_id).categories.add(*categories)
for post_id, tags in six.iteritems(post_tags):
Post.objects.get(site_id=self.site_id, wp_id=post_id).tags.add(*tags)
for post_id, attachments in six.iteritems(post_media_attachments):
Post.objects.get(site_id=self.site_id, wp_id=post_id).attachments.add(*attachments) | Actually do a db bulk creation of posts, and link up the many-to-many fields
:param posts: the list of Post objects to bulk create
:param post_categories: a mapping of Categories to add to newly created Posts
:param post_tags: a mapping of Tags to add to newly created Posts
:param post_media_attachments: a mapping of Medias to add to newly created Posts
:return: None |
375,175 | def _do_exit(self, cmd, args):
if cmd == :
if not args:
return
else:
self.stderr.write(textwrap.dedent()).format(args)
if not args:
return True
if len(args) > 1:
self.stderr.write(textwrap.dedent()).format(args)
exit_directive = args[0]
if exit_directive == :
return
if exit_directive == :
return
self.stderr.write(textwrap.dedent()).format(args) | \
Exit shell.
exit | C-D Exit to the parent shell.
exit root | end Exit to the root shell.
exit all Exit to the command line. |
375,176 | def evolved_transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
losses=None):
del losses
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
hidden_state = decoder_input
for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):
layer_name = "layer_%d" % layer
layer_cache = cache[layer_name] if cache is not None else None
with tf.variable_scope(layer_name):
with tf.variable_scope(_SIXTEEN_HEAD_ATTENTION_NAME):
residual_state = hidden_state
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
attention_cache = layer_cache[
_SIXTEEN_HEAD_ATTENTION_NAME] if layer_cache is not None else None
left_state = common_attention.multihead_attention(
hidden_state,
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
_capped_double_heads(hparams.num_heads),
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=attention_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
decode_loop_step=decode_loop_step,
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"))
if encoder_output is not None:
with tf.variable_scope(_FIRST_ATTEND_TO_ENCODER_NAME):
attention_cache = (
layer_cache[_FIRST_ATTEND_TO_ENCODER_NAME]
if layer_cache is not None else None)
right_state = common_attention.multihead_attention(
hidden_state,
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=attention_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"))
left_state = tf.nn.dropout(left_state,
1 - hparams.layer_prepostprocess_dropout)
right_state = tf.nn.dropout(
right_state, 1 - hparams.layer_prepostprocess_dropout)
hidden_state = residual_state + left_state + right_state
else:
hidden_state = common_layers.layer_postprocess(
residual_state, left_state, hparams)
with tf.variable_scope(_CONV_BRANCHES_NAME):
residual_state = hidden_state
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
if nonpadding is not None:
mask = tf.tile(
tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size])
hidden_state *= mask
if layer_cache:
if decode_loop_step is None:
hidden_state = layer_cache[
_CONV_BRANCHES_FIRST_LAYER_NAME] = tf.concat(
[
layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME],
hidden_state
],
axis=1)[:, -1 * _DECODER_LEFT_CONV_PADDING - 1:, :]
left_state = hidden_state
right_state = hidden_state[:, _DECODER_LEFT_CONV_PADDING -
_DECODER_RIGHT_CONV_PADDING:, :]
else:
tmp = tf.transpose(
layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], perm=[1, 0, 2])
tmp = tf.expand_dims(tmp, axis=1)
tmp = inplace_ops.alias_inplace_update(
tmp,
decode_loop_step * tf.shape(hidden_state)[1] +
_DECODER_LEFT_CONV_PADDING,
tf.transpose(hidden_state, perm=[1, 0, 2]))
tmp = tf.squeeze(tmp, axis=1)
hidden_state = layer_cache[
_CONV_BRANCHES_FIRST_LAYER_NAME] = tf.transpose(
tmp, perm=[1, 0, 2])
left_state_indexes = [
decode_loop_step + i
for i in range(_DECODER_LEFT_CONV_PADDING + 1)
]
left_state = tf.gather(hidden_state, left_state_indexes, axis=1)
right_state_indexes = [
decode_loop_step + i +
(_DECODER_LEFT_CONV_PADDING - _DECODER_RIGHT_CONV_PADDING)
for i in range(_DECODER_RIGHT_CONV_PADDING + 1)
]
right_state = tf.gather(hidden_state, right_state_indexes, axis=1)
else:
left_state = tf.pad(
hidden_state,
paddings=[[0, 0], [_DECODER_LEFT_CONV_PADDING, 0], [0, 0]])
right_state = tf.pad(
hidden_state,
paddings=[[0, 0], [_DECODER_RIGHT_CONV_PADDING, 0], [0, 0]])
left_output_dim = int(hparams.hidden_size * 2)
separable_conv_11x1 = tf.layers.SeparableConv1D(
left_output_dim,
11,
padding="VALID",
name="separable_conv11x1",
activation=tf.nn.relu)
left_state = separable_conv_11x1.apply(left_state)
left_state = tf.nn.dropout(left_state,
1 - hparams.layer_prepostprocess_dropout)
right_output_dim = int(hparams.hidden_size / 2)
separable_conv_7x1_1 = tf.layers.SeparableConv1D(
right_output_dim, 7, padding="VALID", name="separable_conv_7x1_1")
right_state = separable_conv_7x1_1.apply(right_state)
right_state = tf.nn.dropout(right_state,
1 - hparams.layer_prepostprocess_dropout)
right_state = tf.pad(
right_state,
[[0, 0], [0, 0], [0, left_output_dim - right_output_dim]],
constant_values=0)
hidden_state = left_state + right_state
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
if nonpadding is not None:
mask = tf.tile(
tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size * 2])
hidden_state *= mask
if layer_cache:
if decode_loop_step is None:
hidden_state = layer_cache[
_CONV_BRANCHES_SECOND_LAYER_NAME] = tf.concat(
[
layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME],
hidden_state
],
axis=1)[:, -1 * _DECODER_FINAL_CONV_PADDING - 1:, :]
else:
tmp = tf.transpose(
layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], perm=[1, 0, 2])
tmp = tf.expand_dims(tmp, axis=1)
tmp = inplace_ops.alias_inplace_update(
tmp, (decode_loop_step + _DECODER_FINAL_CONV_PADDING) *
tf.shape(hidden_state)[1],
tf.transpose(hidden_state, perm=[1, 0, 2]))
tmp = tf.squeeze(tmp, axis=1)
hidden_state = layer_cache[
_CONV_BRANCHES_SECOND_LAYER_NAME] = tf.transpose(
tmp, perm=[1, 0, 2])
hidden_state_indexes = [
decode_loop_step + i
for i in range(_DECODER_FINAL_CONV_PADDING + 1)
]
hidden_state = tf.gather(
hidden_state, hidden_state_indexes, axis=1)
else:
hidden_state = tf.pad(
hidden_state,
paddings=[[0, 0], [_DECODER_FINAL_CONV_PADDING, 0], [0, 0]])
separable_conv_7x1_2 = tf.layers.SeparableConv1D(
hparams.hidden_size,
7,
padding="VALID",
name="separable_conv_7x1_2")
hidden_state = separable_conv_7x1_2.apply(hidden_state)
hidden_state = common_layers.layer_postprocess(
residual_state, hidden_state, hparams)
with tf.variable_scope(_VANILLA_ATTENTION_NAME):
residual_state = hidden_state
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
attention_cache = layer_cache[
_VANILLA_ATTENTION_NAME] if layer_cache is not None else None
hidden_state = common_attention.multihead_attention(
hidden_state,
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=attention_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
decode_loop_step=decode_loop_step,
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"))
hidden_state = common_layers.layer_postprocess(
residual_state, hidden_state, hparams)
if encoder_output is not None:
with tf.variable_scope(_SECOND_ATTEND_TO_ENCODER_NAME):
residual_state = hidden_state
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
attention_cache = (
layer_cache[_SECOND_ATTEND_TO_ENCODER_NAME]
if layer_cache is not None else None)
hidden_state = common_attention.multihead_attention(
hidden_state,
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=attention_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"))
hidden_state = common_layers.layer_postprocess(
residual_state, hidden_state, hparams)
with tf.variable_scope("dense_layers"):
residual_state = hidden_state
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
hidden_state = tf.layers.dense(
hidden_state,
int(hparams.hidden_size * 4),
activation=tf.nn.swish)
hidden_state = tf.nn.dropout(hidden_state,
1 - hparams.layer_prepostprocess_dropout)
hidden_state = common_layers.layer_preprocess(hidden_state, hparams)
hidden_state = tf.layers.dense(hidden_state, hparams.hidden_size)
hidden_state = common_layers.layer_postprocess(
residual_state, hidden_state, hparams)
return common_layers.layer_preprocess(hidden_state, hparams) | Evolved Transformer decoder. See arxiv.org/abs/1901.11117 for more details.
Args:
decoder_input: a Tensor.
encoder_output: a Tensor.
decoder_self_attention_bias: bias Tensor for self-attention (see
common_attention.attention_bias()).
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias()).
hparams: hyperparameters for model.
cache: dict, containing tensors which are the results of previous
layers, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
name: a string.
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used to mask out
padding in convolutional layers. We generally only need this mask for
"packed" datasets, because for ordinary datasets, no padding is ever
followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: Not supported.
Returns:
Decoder output tensor. |
375,177 | def _validate_name(name):
if name is None:
return
if not all([name[0].isalnum(), name[-1].isalnum()]):
raise ValueError("Bucket names must start and end with a number or letter.")
return name | Pre-flight ``Bucket`` name validation.
:type name: str or :data:`NoneType`
:param name: Proposed bucket name.
:rtype: str or :data:`NoneType`
:returns: ``name`` if valid. |
375,178 | def compute_panel(cls, data, scales, params):
msg =
raise NotImplementedError(msg.format(cls.__name__)) | Positions must override this function
Notes
-----
Make necessary adjustments to the columns in the dataframe.
Create the position transformation functions and
use self.transform_position() do the rest.
See Also
--------
position_jitter.compute_panel |
375,179 | def _run_evolve(ssm_file, cnv_file, work_dir, data):
exe = os.path.join(os.path.dirname(sys.executable), "evolve.py")
assert os.path.exists(exe), "Could not find evolve script for PhyloWGS runs."
out_dir = os.path.join(work_dir, "evolve")
out_file = os.path.join(out_dir, "top_k_trees")
if not utils.file_uptodate(out_file, cnv_file):
with file_transaction(data, out_dir) as tx_out_dir:
with utils.chdir(tx_out_dir):
cmd = [sys.executable, exe, "-r", "42", ssm_file, cnv_file]
do.run(cmd, "Run PhyloWGS evolution")
return out_file | Run evolve.py to infer subclonal composition. |
375,180 | def invoke(self):
self._iter += 1
if self._iter - max(self._trainer.best_iter, self._annealed_iter) >= self._patience:
if self._annealed_times >= self._anneal_times:
logging.info("ending")
self._trainer.exit()
else:
self._trainer.set_params(*self._trainer.best_params)
self._learning_rate.set_value(self._learning_rate.get_value() * 0.5)
self._annealed_times += 1
self._annealed_iter = self._iter
logging.info("annealed learning rate to %f" % self._learning_rate.get_value()) | Run it, return whether to end training. |
375,181 | def process_next_message(self, timeout):
message = self.worker_manager.receive(timeout)
if isinstance(message, Acknowledgement):
self.task_manager.task_start(message.task, message.worker)
elif isinstance(message, Result):
self.task_manager.task_done(message.task, message.result) | Processes the next message coming from the workers. |
375,182 | def get_handlers(self, kind=None):
with self._lock:
if kind is not None:
try:
return self._handlers[kind][:]
except KeyError:
return []
return self.__all_handlers.copy() | Retrieves the handlers of the given kind. If kind is None, all handlers
are returned.
:param kind: The kind of the handlers to return
:return: A list of handlers, or an empty list |
375,183 | def close(self):
if self.done:
return self.result
self.done = True
if not self._got_data:
self.logger.debug()
prober.charset_name,
prober.language,
prober.get_confidence())
return self.result | Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute, a ``dict`` with the keys
`encoding`, `confidence`, and `language`. |
375,184 | def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
with tf.compat.v1.name_scope(name, ,
[fn_arg_list, result, grads]):
def _convert_to_tensor(x, name):
ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(
value=x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, )
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, )
if grads is not None:
grads = _convert_to_tensor(grads, )
return result, grads
if is_list_like(result) and len(result) == len(fn_arg_list):
def fn_slice(i):
return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:]))
grads = [
tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1]
for i in range(len(result))
]
else:
_, grads = tfp_math_value_and_gradients(fn, fn_arg_list)
return result, grads | Helper to `maybe_call_fn_and_grads`. |
375,185 | def _add_field_column(self, field):
@self.add_column(name=field)
def get_my_label(cluster_id):
return self.cluster_meta.get(field, cluster_id) | Add a column for a given label field. |
375,186 | def getVariable(self, name):
return lock_and_call(
lambda: Variable(self._impl.getVariable(name)),
self._lock
) | Get the variable with the corresponding name.
Args:
name: Name of the variable to be found.
Raises:
TypeError: if the specified variable does not exist. |
375,187 | def get_owned_games(self, steamID, include_appinfo=1,
include_played_free_games=0, appids_filter=None, format=None):
parameters = {
: steamID,
: include_appinfo,
: include_played_free_games
}
if format is not None:
parameters[] = format
if appids_filter is not None:
parameters[] = appids_filter
url = self.create_request_url(self.interface, , 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of games owned by a given steam id.
steamID: The users id
include_appinfo: boolean.
include_played_free_games: boolean.
appids_filter: a json encoded list of app ids.
format: Return format. None defaults to json. (json, xml, vdf) |
375,188 | def dAbr_dV(dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St):
dAf_dPf = spdiag(2 * Sf.real())
dAf_dQf = spdiag(2 * Sf.imag())
dAt_dPt = spdiag(2 * St.real())
dAt_dQt = spdiag(2 * St.imag())
dAf_dVa = dAf_dPf * dSf_dVa.real() + dAf_dQf * dSf_dVa.imag()
dAt_dVa = dAt_dPt * dSt_dVa.real() + dAt_dQt * dSt_dVa.imag()
dAf_dVm = dAf_dPf * dSf_dVm.real() + dAf_dQf * dSf_dVm.imag()
dAt_dVm = dAt_dPt * dSt_dVm.real() + dAt_dQt * dSt_dVm.imag()
return dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm | Partial derivatives of squared flow magnitudes w.r.t voltage.
Computes partial derivatives of apparent power w.r.t active and
reactive power flows. Partial derivative must equal 1 for lines
with zero flow to avoid division by zero errors (1 comes from
L'Hopital). |
375,189 | def build_error_handler(*tasks):
def _handler(error, tasks=[]):
[t(error) for t in tasks]
return error.jsonify(), error.status_code, error.headers
return functools.partial(_handler, tasks=tasks) | Provides a generic error function that packages a flask_buzz exception
so that it can be handled nicely by the flask error handler::
app.register_error_handler(
FlaskBuzz, FlaskBuzz.build_error_handler(),
)
Additionally, extra tasks may be applied to the error prior to
packaging::
app.register_error_handler(
FlaskBuzz,
build_error_handler(print, lambda e: foo(e)),
)
This latter example will print the error to stdout and also call the
foo() function with the error prior to packaing it for flask's handler |
375,190 | def find_multiline_pattern(self, regexp, cursor, findflag):
pattern = to_text_string(regexp.pattern())
text = to_text_string(self.toPlainText())
try:
regobj = re.compile(pattern)
except sre_constants.error:
return
if findflag & QTextDocument.FindBackward:
offset = min([cursor.selectionEnd(), cursor.selectionStart()])
text = text[:offset]
matches = [_m for _m in regobj.finditer(text, 0, offset)]
if matches:
match = matches[-1]
else:
return
else:
offset = max([cursor.selectionEnd(), cursor.selectionStart()])
match = regobj.search(text, offset)
if match:
pos1, pos2 = match.span()
fcursor = self.textCursor()
fcursor.setPosition(pos1)
fcursor.setPosition(pos2, QTextCursor.KeepAnchor)
return fcursor | Reimplement QTextDocument's find method
Add support for *multiline* regular expressions |
375,191 | def version_history(soup, html_flag=True):
"extract the article version history details"
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
version_history = []
related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup))
for tag in related_object_tags:
article_version = OrderedDict()
date_tag = first(raw_parser.date(tag))
if date_tag:
copy_attribute(date_tag.attrs, , article_version, )
(day, month, year) = ymd(date_tag)
article_version[] = day
article_version[] = month
article_version[] = year
article_version[] = date_struct_nn(year, month, day)
copy_attribute(tag.attrs, , article_version, )
set_if_value(article_version, "comment",
convert(node_contents_str(first(raw_parser.comment(tag)))))
version_history.append(article_version)
return version_history | extract the article version history details |
375,192 | def print_variables_info(self, output_file=sys.stdout):
table = ( +
)
for name, var_info in list(self.variables.items()):
table += .format(name, var_info[0], var_info[1])
print(prefix_indent(, table), file=output_file) | Print variables information in human readble format. |
375,193 | def locate_file(filename, env_var=, directory=):
f = locate_by_env(filename, env_var) or locate_by_dir(filename, directory)
return os.path.abspath(f) if can_locate(f) else None | Locates a file given an environment variable or directory
:param filename: filename to search for
:param env_var: environment variable to look under
:param directory: directory to look in
:return: (string) absolute path to filename or None if not found |
375,194 | def has_perm(self, perm):
if in perm:
app_label, codename = perm.split()
permissions = self.permissions.filter(
content_type__app_label = app_label,
codename = codename)
groups = self.groups.filter(
permissions__content_type__app_label = app_label,
permissions__codename = codename )
else:
permissions = self.permissions.filter(codename = perm)
groups = self.groups.filter(permissions__codename = perm)
return permissions.exists() or groups.exists() | Checks if key has the given django's auth Permission |
375,195 | def add_filter_rule(
self, name, condition, filters, actions, active=1, way=):
filters[] = condition
new_rule = {
: name,
: active,
: filters,
: actions
}
new_rules = [zobjects.FilterRule.from_dict(new_rule)]
prev_rules = self.get_filter_rules(way=way)
if prev_rules:
for rule in prev_rules:
return new_rules | :param: name filter name
:param: condition allof or anyof
:param: filters dict of filters
:param: actions dict of actions
:param: way string discribing if filter is for 'in' or 'out' messages
:returns: list of user's zobjects.FilterRule |
375,196 | def _flatten(self, element):
result = [(element.text or )]
if element.attrib.get():
result.append(Symbol(element.attrib.get()).textbox)
for sel in element:
result.append(self._flatten(sel))
result.append(sel.tail or )
return .join(result).replace(, ) | Recursively enter and extract text from all child
elements. |
375,197 | def check_ok_button(self):
login = self.login.text()
password = self.password.text()
url = self.url.text()
if self.layers.count() >= 1 and login and password and url:
self.ok_button.setEnabled(True)
else:
self.ok_button.setEnabled(False) | Helper to enable or not the OK button. |
375,198 | def limit_spec(self, spec):
if list(spec.keys()) == []:
return spec | Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution. |
375,199 | def return_self_updater(func):
newobj
@functools.wraps(func)
def decorator(k,v):
func(k,v)
return v
return decorator | Run func, but still return v. Useful for using knowledge.update with operates like append, extend, etc.
e.g. return_self(lambda k,v: v.append('newobj')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.