Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
12,500 | def get_app_data(app_id):
try:
conn = get_conn()
c = conn.cursor()
c.execute("SELECT id,name,app_data FROM app WHERE id= ".format(app_id))
result = c.fetchone()
conn.close()
if result:
appname = result[1]
app_data = utils.str2dict( base64.b64decode(result[2]) )
return (appname,app_data)
else:
return (None,None)
except Exception,e:
raise RuntimeError( % e) | get app data ( include name and app_data ) |
12,501 | def find_parents(self):
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i) | Take a tree and set the parents according to the children
Takes a tree structure which lists the children of each vertex
and computes the parents for each vertex and places them in. |
12,502 | def _configuration(self, kwargs, config_item):
if not in config_item:
if not in kwargs:
return
nc = kwargs[] = {}
for field in Resource.configuration[kwargs[]]:
if field not in config_item:
raise exc.TowerCLIError(
% field)
else:
nc[field] = config_item[field]
else:
kwargs[] = \
config_item[] | Combine configuration-related keyworded arguments into
notification_configuration. |
12,503 | def read_env(cls, env_file=None, **overrides):
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), )
if not os.path.exists(env_file):
warnings.warn(
"%s doesnre not configuring your "
"environment separately, create one." % env_file)
return
try:
with open(env_file) if isinstance(env_file, basestring) else env_file as f:
content = f.read()
except IOError:
warnings.warn(
"Error reading %s - if youRead environment variables from: {0}\A(?:export )?([A-Za-z_0-9]+)=(.*)\Z(.*)\A"(.*)"\Z\\(.)\1', m3.group(1))
cls.ENVIRON.setdefault(key, str(val))
for key, value in overrides.items():
cls.ENVIRON.setdefault(key, value) | Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
http://www.wellfireinteractive.com/blog/easier-12-factor-django/
https://gist.github.com/bennylope/2999704 |
12,504 | def run(self):
loop = GLib.MainLoop()
context = loop.get_context()
while True:
time.sleep(0.1)
if context.pending():
context.iteration()
self._manager[ATTR_POSITION] = self._position()
try:
method, args = self._task_queue.get(False)
getattr(self, method)(**args)
except queue.Empty:
pass
if self.state != STATE_IDLE:
continue
try:
uri = self._media_queue.get(False)
self.media(uri)
except queue.Empty:
pass | Run the process.
Iterate the GLib main loop and process the task queue. |
12,505 | def init_app(self, app):
self.app = app
if not hasattr(app, ):
app.extensions = {}
app.extensions[] = self
self.reload() | Initialize this Flask extension for given app. |
12,506 | def python(source):
r
obj = type(, (object,), {})()
_exec(source, obj.__dict__, obj.__dict__)
return obj | r"""
>>> python('def add(a, b): return a + b').add(40, 2)
42 |
12,507 | def grains_dict(self):
d = {grain.duration: grain for grain in self.grains()}
d.update({grain.label: grain for grain in self.grains()})
return d | Allowing to lookup grain by either label or duration
For backward compatibility |
12,508 | def update_metric_tags(self, metric_type, metric_id, **tags):
self._put(self._get_metrics_tags_url(self._get_metrics_single_url(metric_type, metric_id)), tags, parse_json=False) | Replace the metric_id's tags with given **tags
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param tags: Updated key/value tag values of the metric |
12,509 | def _setup_output_file(self, output_filename, args, write_header=True):
try:
output_file = open(output_filename, )
except IOError as e:
sys.exit(e)
if write_header:
output_file.write(.join(map(util.escape_string_shell, self._build_cmdline(args)))
+ + * 80 + )
output_file.flush()
return output_file | Open and prepare output file. |
12,510 | def run_config_diagnostics(config_path=CONFIG_PATH):
config = read_config(config_path)
missing_sections = set()
malformed_entries = defaultdict(set)
for section, expected_section_keys in SECTION_KEYS.items():
section_content = config.get(section)
if not section_content:
missing_sections.add(section)
else:
for option in expected_section_keys:
option_value = section_content.get(option)
if not option_value:
malformed_entries[section].add(option)
return config_path, missing_sections, malformed_entries | Run diagnostics on the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing
sections and a dict that maps each section to the entries that have either missing or empty
options. |
12,511 | def handle_document_error(self, item_session: ItemSession) -> Actions:
self._waiter.increment()
self._statistics.errors[ServerError] += 1
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.error)
return action | Callback for when the document only describes an server error.
Returns:
A value from :class:`.hook.Actions`. |
12,512 | def handle_result(self, idents, parent, raw_msg, success=True):
engine = idents[0]
client = idents[1]
raw_msg[:2] = [client,engine]
self.client_stream.send_multipart(raw_msg, copy=False)
msg_id = parent[]
self.pending[engine].pop(msg_id)
if success:
self.completed[engine].add(msg_id)
self.all_completed.add(msg_id)
else:
self.failed[engine].add(msg_id)
self.all_failed.add(msg_id)
self.all_done.add(msg_id)
self.destinations[msg_id] = engine
self.update_graph(msg_id, success) | handle a real task result, either success or failure |
12,513 | def plot_histogram(self, filename=None):
header, data = self.read_next_data_block()
data = data.view()
plt.figure("Histogram")
plt.hist(data.flatten(), 65, facecolor=)
if filename:
plt.savefig(filename)
plt.show() | Plot a histogram of data values |
12,514 | def _fill_syns(self, new_syns, rpacketlists_per_worker):
logger.debug("rank:{}/{} {}._fill_syns".format(mpi.myrank, mpi.nprocs, self.__class__.__name__))
for packetlists in rpacketlists_per_worker:
for packetlist in packetlists:
for packet in packetlist:
new_syns.set_value(**packet)
return new_syns | rpacket_per_worker is a list of packetlists as returned by _run_chunk |
12,515 | def inline(text, data=None):
if not data:
data = text.encode()
elif not isinstance(data, (bytes, bytearray, memoryview)):
data = str(data).encode()
if len(data) > 64:
raise ValueError()
return types.KeyboardButtonCallback(text, data) | Creates a new inline button.
If `data` is omitted, the given `text` will be used as `data`.
In any case `data` should be either ``bytes`` or ``str``.
Note that the given `data` must be less or equal to 64 bytes.
If more than 64 bytes are passed as data, ``ValueError`` is raised. |
12,516 | def error(self, fail=True, action=):
e =
if action:
e = % (action, e)
log.error(e)
if fail:
raise IOError(e) | SHOULD BE PRIVATE METHOD |
12,517 | def best_diff(img1, img2, opts):
w1, h1 = img1.size
w2, h2 = img2.size
w, h = min(w1, w2), min(h1, h2)
best = None
best_value = 255 * w * h + 1
xr = abs(w1 - w2) + 1
yr = abs(h1 - h2) + 1
p = Progress(xr * yr, timeout=opts.timeout)
for x in range(xr):
if w1 > w2:
x1, x2 = x, 0
else:
x1, x2 = 0, x
for y in range(yr):
if h1 > h2:
y1, y2 = y, 0
else:
y1, y2 = 0, y
p.next()
this = diff(img1, img2, (x1, y1), (x2, y2))
this_value = diff_badness(this)
if this_value < best_value:
best = this
best_value = this_value
best_pos = (x1, y1), (x2, y2)
return best, best_pos | Find the best alignment of two images that minimizes the differences.
Returns (diff, alignments) where ``diff`` is a difference map, and
``alignments`` is a tuple ((x1, y2), (x2, y2)).
See ``diff()`` for the description of the alignment numbers. |
12,518 | def exciter(self, Xexc, Pexc, Vexc):
exciters = self.exciters
F = zeros(Xexc.shape)
typ1 = [e.generator._i for e in exciters if e.model ==CONST_EXCITATION]
typ2 = [e.generator._i for e in exciters if e.model == IEEE_DC1A]
F[typ1, :] = 0.0
Efd = Xexc[typ2, 0]
Uf = Xexc[typ2, 1]
Ur = Xexc[typ2, 2]
Ka = Pexc[typ2, 0]
Ta = Pexc[typ2, 1]
Ke = Pexc[typ2, 2]
Te = Pexc[typ2, 3]
Kf = Pexc[typ2, 4]
Tf = Pexc[typ2, 5]
Aex = Pexc[typ2, 6]
Bex = Pexc[typ2, 7]
Ur_min = Pexc[typ2, 8]
Ur_max = Pexc[typ2, 9]
Uref = Pexc[typ2, 10]
Uref2 = Pexc[typ2, 11]
U = Vexc[typ2, 1]
Ux = Aex * exp(Bex * Efd)
dUr = 1 / Ta * (Ka * (Uref - U + Uref2 - Uf) - Ur)
dUf = 1 / Tf * (Kf / Te * (Ur - Ux - Ke * Efd) - Uf)
if sum(flatnonzero(Ur > Ur_max)) >= 1:
Ur2 = Ur_max
elif sum(flatnonzero(Ur < Ur_max)) >= 1:
Ur2 = Ur_min
else:
Ur2 = Ur
dEfd = 1 / Te * (Ur2 - Ux - Ke * Efd)
F[typ2, :] = c_[dEfd, dUf, dUr]
return F | Exciter model.
Based on Exciter.m from MatDyn by Stijn Cole, developed at Katholieke
Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/
matdyn/} for more information. |
12,519 | def build_network(self, network=None, *args, **kwargs):
self.network = network
return network | Core method to construct PyPSA Network object. |
12,520 | def random_box(molecules, total=None, proportions=None, size=[1.,1.,1.], maxtries=100):
if proportions is None:
proportions = np.ones(len(molecules)) / len(molecules)
else:
proportions = np.array(proportions)
size = np.array(size)
tree = CoverTree(metric="periodic", metric_args={: size})
type_array = []
result = []
vdw_radii = []
max_vdw = max(vdw_radius(np.concatenate([m.type_array for m in molecules])))
first = True
for l, n in enumerate((proportions * total).astype(int)):
for i in range(n):
for k in range(maxtries):
template = molecules[l].copy()
reference = np.random.uniform(0, 1, 3) * size
r_array = template.r_array + reference
pts_list, distances_list = tree.query_ball_many(r_array, vdw_radius(template.type_array) + max_vdw)
ok = True
for i, (dist, pts) in enumerate(zip(distances_list, pts_list)):
if len(dist) == 0:
break
found_vdw = np.array([vdw_radii[p] for p in pts])
ok &= all(dist > found_vdw + vdw_radius(template.type_array[i]))
if ok:
tree.insert_many(r_array)
template.r_array = r_array
result.append(template)
vdw_radii.extend(vdw_radius(template.type_array))
break
if not ok:
raise Exception("Trials exceeded")
system = System(result)
system.box_vectors[0, 0] = size[0]
system.box_vectors[1, 1] = size[1]
system.box_vectors[2, 2] = size[2]
return system | Create a System made of a series of random molecules.
Parameters:
total:
molecules:
proportions: |
12,521 | def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
prefix = node.get()
results = []
match = self.find_obj(env, prefix, target, None, 1)
if match is not None:
(name, obj) = match
results.append(( + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name, contnode,
name)))
return results | Look for any references, without object type
This always searches in "refspecific" mode |
12,522 | def PwDecrypt(self, password):
buf = password
pw = six.b()
last = self.authenticator
while buf:
hash = md5_constructor(self.secret + last).digest()
if six.PY3:
for i in range(16):
pw += bytes((hash[i] ^ buf[i],))
else:
for i in range(16):
pw += chr(ord(hash[i]) ^ ord(buf[i]))
(last, buf) = (buf[:16], buf[16:])
while pw.endswith(six.b()):
pw = pw[:-1]
return pw.decode() | Unobfuscate a RADIUS password. RADIUS hides passwords in packets by
using an algorithm based on the MD5 hash of the packet authenticator
and RADIUS secret. This function reverses the obfuscation process.
:param password: obfuscated form of password
:type password: binary string
:return: plaintext password
:rtype: unicode string |
12,523 | def create(cls, name, ne_ref=None, operator=,
sub_expression=None, comment=None):
sub_expression = [] if sub_expression is None else [sub_expression]
json = {: name,
: operator,
: ne_ref,
: sub_expression,
: comment}
return ElementCreator(cls, json) | Create the expression
:param str name: name of expression
:param list ne_ref: network element references for expression
:param str operator: 'exclusion' (negation), 'union', 'intersection'
(default: exclusion)
:param dict sub_expression: sub expression used
:param str comment: optional comment
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: Expression |
12,524 | def get_corpus(self):
corpus = []
cd = 0
tag = None
for i in range(0, self.init_corpus[0][0]):
init_unit = self.unit_raw[self.init_corpus[0][0] - i]
cdm = CDM(init_unit)
alpha = cdm.get_alpha()
if cd <= self.cd_min and cdm.NC is not 0:
tag = True
if cd > self.cd_max or cdm.NC == 0:
tag = False
if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0:
if alpha > 0:
tag = True
else:
tag = False
if cdm.NC == 0:
cd += 1
else:
cd = 0
if tag == True:
corpus.append(init_unit)
elif tag == False:
if alpha < 0 or cd > self.cd_max:
break
else:
continue
corpus = list(reversed(corpus))
try:
self.index = self.init_corpus[0][0] - i + 1
except UnboundLocalError:
log(, )
self.index = self.init_corpus[0][0]
cd = 0
tag = None
for i in range(1, len(self.unit_raw) - self.init_corpus[0][0]):
init_unit = self.unit_raw[self.init_corpus[0][0] + i]
cdm = CDM(init_unit)
alpha = cdm.get_alpha()
if cd <= self.cd_min and cdm.NC is not 0:
tag = True
if cd > self.cd_max or cdm.NC == 0:
tag = False
if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0:
if alpha > 0:
tag = True
else:
tag = False
if cdm.NC == 0:
cd += 1
else:
cd = 0
if tag == True:
corpus.append(init_unit)
elif tag == False:
if alpha < 0 or cd > self.cd_max:
break
else:
continue
log(, .format(corpus))
return .join(corpus) | 获取语料库
Return:
corpus -- 语料库,str类型 |
12,525 | def get_working_login(self, database, username=None, password=None):
login_user = None
self.get_db(database, username=username, password=password,
never_auth_with_admin=True)
login_user = self.get_login_user(database)
if login_user:
username = login_user["username"]
password = (login_user["password"] if "password" in login_user
else None)
return username, password | authenticate to the specified database starting with specified
username/password (if present), try to return a successful login
within 3 attempts |
12,526 | def reboot_autopilot(self, hold_in_bootloader=False):
if self.mavlink10():
if hold_in_bootloader:
param1 = 3
else:
param1 = 1
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0,
param1, 0, 0, 0, 0, 0, 0)
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0,
1, 0, 0, 0, 0, 0, 0) | reboot the autopilot |
12,527 | def create_session(docker_image=None,
docker_rm=None,
echo=False,
loglevel=,
nocolor=False,
session_type=,
vagrant_session_name=None,
vagrant_image=,
vagrant_gui=False,
vagrant_memory=,
vagrant_num_machines=,
vagrant_provider=,
vagrant_root_folder=None,
vagrant_swapsize=,
vagrant_version=,
vagrant_virt_method=,
vagrant_cpu=,
video=-1,
walkthrough=False):
assert session_type in (,,), shutit_util.print_debug()
shutit_global_object = shutit_global.shutit_global_object
if video != -1 and video > 0:
walkthrough = True
if session_type in (,):
return shutit_global_object.create_session(session_type,
docker_image=docker_image,
rm=docker_rm,
echo=echo,
walkthrough=walkthrough,
walkthrough_wait=video,
nocolor=nocolor,
loglevel=loglevel)
elif session_type == :
if vagrant_session_name is None:
vagrant_session_name = + shutit_util.random_id()
if isinstance(vagrant_num_machines, int):
vagrant_num_machines = str(vagrant_num_machines)
assert isinstance(vagrant_num_machines, str)
assert isinstance(int(vagrant_num_machines), int)
if vagrant_root_folder is None:
vagrant_root_folder = shutit_global.shutit_global_object.owd
return create_session_vagrant(vagrant_session_name,
vagrant_num_machines,
vagrant_image,
vagrant_provider,
vagrant_gui,
vagrant_memory,
vagrant_swapsize,
echo,
walkthrough,
nocolor,
video,
vagrant_version,
vagrant_virt_method,
vagrant_root_folder,
vagrant_cpu,
loglevel) | Creates a distinct ShutIt session. Sessions can be of type:
bash - a bash shell is spawned and
vagrant - a Vagrantfile is created and 'vagrant up'ped |
12,528 | def _ip_int_from_string(self, ip_str):
parts = ip_str.split()
if len(parts) < 3:
raise AddressValueError(ip_str)
if in parts[-1]:
ipv4_int = IPv4Address(parts.pop())._ip
parts.append( % ((ipv4_int >> 16) & 0xFFFF))
parts.append( % (ipv4_int & 0xFFFF))
raise AddressValueError(ip_str)
if skip_index is not None:
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
raise AddressValueError(ip_str)
if not parts[-1]:
parts_lo -= 1
if parts_lo:
raise AddressValueError(ip_str)
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
raise AddressValueError(ip_str)
else:
if len(parts) != self._HEXTET_COUNT:
raise AddressValueError(ip_str)
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
ip_int = 0L
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in xrange(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError:
raise AddressValueError(ip_str) | Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address. |
12,529 | def write_graph(self, filename):
f = open(filename, )
f.write(self._get_graphviz_data())
f.close() | Write raw graph data which can be post-processed using graphviz. |
12,530 | def to_timedelta(value, strict=True):
if isinstance(value, int):
return timedelta(seconds=value)
value, type(value).__name__
)
)
return timedelta(hours=hours, minutes=minutes, seconds=seconds) | converts duration string to timedelta
strict=True (by default) raises StrictnessError if either hours,
minutes or seconds in duration string exceed allowed values |
12,531 | def acceptNavigationRequest(self, url, navigation_type, isMainFrame):
if navigation_type == QWebEnginePage.NavigationTypeLinkClicked:
self.linkClicked.emit(url)
return False
return True | Overloaded method to handle links ourselves |
12,532 | def serialize(self, o):
if isinstance(o, (list, tuple)):
return [self.serialize(i) for i in o]
if isinstance(o, dict):
return {k: self.serialize(v) for k, v in o.items()}
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, Result):
return self.serialize(o.serialize())
return o | Returns a safe serializable object that can be serialized into JSON.
@param o Python object to serialize |
12,533 | def timeout(timeout_time, default):
def timeout_function(f):
def f2(*args):
def timeout_handler(signum, frame):
raise MethodTimer.DecoratorTimeout()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout_time)
try:
retval = f(*args)
except MethodTimer.DecoratorTimeout:
return default
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return retval
return f2
return timeout_function | Decorate a method so it is required to execute in a given time period,
or return a default value. |
12,534 | def load_secret(self, secret):
if isinstance(secret, pyhsm.aead_cmd.YHSM_YubiKeySecret):
secret = secret.pack()
return pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load(self.stick, secret).execute() | Ask YubiHSM to load a pre-existing YubiKey secret.
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L{generate_aead}
commands to actually retreive the generated secret (in encrypted form).
@param secret: YubiKey secret to load
@type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string
@returns: Number of bytes in YubiHSM internal buffer after load
@rtype: integer
@see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load} |
12,535 | def output_file_job(job, filename, file_id, output_dir, s3_key_path=None):
job.fileStore.logToMaster(.format(filename, output_dir))
work_dir = job.fileStore.getLocalTempDir()
filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename))
if urlparse(output_dir).scheme == :
s3am_upload(job=job, fpath=os.path.join(work_dir, filepath),
s3_dir=output_dir,
s3_key_path=s3_key_path)
elif os.path.exists(os.path.join(output_dir, filename)):
job.fileStore.logToMaster("File already exists: {}".format(filename))
else:
mkdir_p(output_dir)
copy_files([filepath], output_dir) | Uploads a file from the FileStore to an output directory on the local filesystem or S3.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str filename: basename for file
:param str file_id: FileStoreID
:param str output_dir: Amazon S3 URL or local path
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
:return: |
12,536 | def get_fit_failed_candidate_model(model_type, formula):
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type),
description=(
"Error encountered in statsmodels.formula.api.ols method. (Empty data?)"
),
data={"traceback": traceback.format_exc()},
)
]
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type, formula=formula, status="ERROR", warnings=warnings
) | Return a Candidate model that indicates the fitting routine failed.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
formula : :any:`float`
The candidate model formula.
Returns
-------
candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate model instance with status ``'ERROR'``, and warning with
traceback. |
12,537 | def fetch_all_records(self):
r
api = self.doapi_manager
return map(self._record, api.paginate(self.record_url, )) | r"""
Returns a generator that yields all of the DNS records for the domain
:rtype: generator of `DomainRecord`\ s
:raises DOAPIError: if the API endpoint replies with an error |
12,538 | def _initialize(self, funs_to_tally, length):
chain = self.nchains
self._chains[chain] = self._h5file.create_group(
, % chain, % chain)
for name, fun in six.iteritems(funs_to_tally):
arr = np.asarray(fun())
assert arr.dtype != np.dtype()
array = self._h5file.createEArray(
self._chains[chain], name,
tables.Atom.from_dtype(arr.dtype), (0,) + arr.shape,
filters=self.filter)
self._arrays[chain, name] = array
self._traces[name] = Trace(name, getfunc=fun, db=self)
self._traces[name]._initialize(self.chains, length)
self.trace_names.append(list(funs_to_tally.keys())) | Create a group named ``chain#`` to store all data for this chain. |
12,539 | def set_type(self, value):
if value not in self.types_available:
log = "Sources field should be in one of %s" % (
self.types_available
)
raise MalFormattedSource(log)
self._type = value | Setter for type attribute |
12,540 | def isdir(self, path):
try:
self.remote_context.check_output(["test", "-d", path])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
return True | Return `True` if directory at `path` exist, False otherwise. |
12,541 | def get_best_local_timezone():
zone_name = tzlocal.get_localzone().zone
if zone_name in pytz.all_timezones:
return zone_name
if time.daylight:
local_offset = time.altzone
localtz = time.tzname[1]
else:
local_offset = time.timezone
localtz = time.tzname[0]
local_offset = datetime.timedelta(seconds=-local_offset)
for zone_name in pytz.all_timezones:
timezone = pytz.timezone(zone_name)
if not hasattr(timezone, ):
continue
for utcoffset, daylight, tzname in timezone._tzinfos:
if utcoffset == local_offset and tzname == localtz:
return zone_name | Compares local timezone offset to pytz's timezone db, to determine
a matching timezone name to use when TIME_ZONE is not set. |
12,542 | def data_objet_class(data_mode=, time_mode=):
classes_table = {(, ): GlobalValueObject,
(, ): EventValueObject,
(, ): SegmentValueObject,
(, ): FrameValueObject,
(, ): GlobalLabelObject,
(, ): EventLabelObject,
(, ): SegmentLabelObject,
(, ): FrameLabelObject}
try:
return classes_table[(data_mode, time_mode)]
except KeyError as e:
raise ValueError() | Factory function for Analyzer result |
12,543 | def unitary(self, obj, qubits, label=None):
if isinstance(qubits, QuantumRegister):
qubits = qubits[:]
return self.append(UnitaryGate(obj, label=label), qubits, []) | Apply u2 to q. |
12,544 | def _handle_dbproc_call(self, parts, parameters_metadata):
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind == part_kinds.TRANSACTIONFLAGS:
pass
elif part.kind == part_kinds.STATEMENTCONTEXT:
pass
elif part.kind == part_kinds.OUTPUTPARAMETERS:
self._buffer = part.unpack_rows(parameters_metadata, self.connection)
self._received_last_resultset_part = True
self._executed = True
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
else:
raise InterfaceError("Stored procedure call, unexpected part kind %d." % part.kind)
self._executed = True | Handle reply messages from STORED PROCEDURE statements |
12,545 | def get_scales(self, aesthetic):
bool_lst = self.find(aesthetic)
try:
idx = bool_lst.index(True)
return self[idx]
except ValueError:
return None | Return the scale for the aesthetic or None if there
isn't one.
These are the scales specified by the user e.g
`ggplot() + scale_x_continuous()`
or those added by default during the plot building
process |
12,546 | def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
if not has_explicit_title:
target = target.lstrip()
if title[0] == :
title = title[1:].rpartition()[2]
app = inliner.document.settings.env.app
ref = get_javadoc_ref(app, rawtext, target)
if not ref:
raise ValueError("no Javadoc source found for %s in javadoc_url_map" % (target,))
ref.append(nodes.Text(title, title))
return [ref], [] | Role for linking to external Javadoc |
12,547 | def loadRule(rule_json_object):
name = rule_json_object[]
rule_type = rule_json_object[]
validation_regex = None
required = False
removehtml = False
include_end_regex = False
strip_end_regex = None
sub_rules = []
begin_stripe_id = None
end_stripe_id = None
begin_shift = 0
end_shift = 0
if in rule_json_object:
sub_rules = rule_json_object[]
if in rule_json_object:
validation_regex = rule_json_object[]
if in rule_json_object:
required = rule_json_object[]
if in rule_json_object:
removehtml = rule_json_object[]
if in rule_json_object:
include_end_regex = rule_json_object[]
if in rule_json_object:
strip_end_regex = rule_json_object[]
if in rule_json_object:
begin_stripe_id = rule_json_object[]
if in rule_json_object:
end_stripe_id = rule_json_object[]
if in rule_json_object:
begin_shift = rule_json_object[]
if in rule_json_object:
end_shift = rule_json_object[]
rule = {}
if rule_type == ITEM_RULE or rule_type == :
begin_regex = rule_json_object[]
end_regex = rule_json_object[]
rule = ItemRule(name, begin_regex, end_regex, include_end_regex, strip_end_regex, validation_regex, required,
removehtml, sub_rules, begin_stripe_id, end_stripe_id, begin_shift, end_shift)
if rule_type == ITERATION_RULE or rule_type == :
begin_regex = rule_json_object[]
end_regex = rule_json_object[]
iter_begin_regex = rule_json_object[]
iter_end_regex = rule_json_object[]
no_first_begin_iter_rule = False
if in rule_json_object:
no_first_begin_iter_rule = rule_json_object[]
no_last_end_iter_rule = False
if in rule_json_object:
no_last_end_iter_rule = rule_json_object[]
rule = IterationRule(name, begin_regex, end_regex, iter_begin_regex, iter_end_regex,
include_end_regex, strip_end_regex, no_first_begin_iter_rule,
no_last_end_iter_rule, validation_regex, required, removehtml,
sub_rules, begin_shift=begin_shift, end_shift=end_shift)
if in rule_json_object:
rule.id = rule_json_object[]
return rule | Method to load the rules - when adding a new rule it must be added to the if statement within this method. |
12,548 | def best_model(seq2hmm):
for seq in seq2hmm:
best = []
for model in seq2hmm[seq]:
best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]])
best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0]
seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]]
return seq2hmm | determine the best model: archaea, bacteria, eukarya (best score) |
12,549 | def get_used_entities(self,use_specs):
if len(use_specs.strip()) == 0:
return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars)
only = bool(self.ONLY_RE.match(use_specs))
use_specs = self.ONLY_RE.sub(,use_specs)
ulist = self.SPLIT_RE.split(use_specs)
ulist[-1] = ulist[-1].strip()
uspecs = {}
for item in ulist:
match = self.RENAME_RE.search(item)
if match:
uspecs[match.group(1).lower()] = match.group(2)
else:
uspecs[item.lower()] = item
ret_procs = {}
ret_absints = {}
ret_types = {}
ret_vars = {}
for name, obj in self.pub_procs.items():
name = name.lower()
if only:
if name in uspecs:
ret_procs[name] = obj
else:
ret_procs[name] = obj
for name, obj in self.pub_absints.items():
name = name.lower()
if only:
if name in uspecs:
ret_absints[name] = obj
else:
ret_absints[name] = obj
for name, obj in self.pub_types.items():
name = name.lower()
if only:
if name in uspecs:
ret_types[name] = obj
else:
ret_types[name] = obj
for name, obj in self.pub_vars.items():
name = name.lower()
if only:
if name in uspecs:
ret_vars[name] = obj
else:
ret_vars[name] = obj
return (ret_procs,ret_absints,ret_types,ret_vars) | Returns the entities which are imported by a use statement. These
are contained in dicts. |
12,550 | def reset_object(self, driver_wrapper=None):
from toolium.pageelements.page_elements import PageElements
if driver_wrapper:
self.driver_wrapper = driver_wrapper
self._web_element = None
for element in self._get_page_elements():
element.reset_object(driver_wrapper)
if isinstance(element, (PageElement, PageElements)):
element.parent = self | Reset each page element object
:param driver_wrapper: driver wrapper instance |
12,551 | def plot_h(data, cols, wspace=.1, plot_kw=None, **kwargs):
import matplotlib.pyplot as plt
if plot_kw is None: plot_kw = [dict()] * len(cols)
_, axes = plt.subplots(nrows=1, ncols=len(cols), **kwargs)
plt.subplots_adjust(wspace=wspace)
for n, col in enumerate(cols):
data.loc[:, col].plot(ax=axes[n], **plot_kw[n])
return axes | Plot horizontally
Args:
data: DataFrame of data
cols: columns to be plotted
wspace: spacing between plots
plot_kw: kwargs for each plot
**kwargs: kwargs for the whole plot
Returns:
axes for plots
Examples:
>>> import pandas as pd
>>> import numpy as np
>>>
>>> idx = range(5)
>>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx)
>>> # plot_h(data=data, cols=['a', 'b'], wspace=.2, plot_kw=[dict(style='.-'), dict()]) |
12,552 | def _compute_nfp_uniform(l, u, cum_counts, sizes):
if l > u:
raise ValueError("l must be less or equal to u")
if l == 0:
n = cum_counts[u]
else:
n = cum_counts[u]-cum_counts[l-1]
return n * float(sizes[u] - sizes[l]) / float(2*sizes[u]) | Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], assuming uniform
distribution of set sizes within the interval.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives. |
12,553 | def disallow(nodes):
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = .format(node=node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed | Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable |
12,554 | def is_email_simple(value):
if not in value or value.startswith() or value.endswith():
return False
try:
p1, p2 = value.split()
except ValueError:
return False
if not in p2 or p2.startswith():
return False
return True | Return True if value looks like an email address. |
12,555 | def right_join_where(self, table, one, operator, two):
return self.join_where(table, one, operator, two, "right") | Add a "right join where" clause to the query
:param table: The table to join with, can also be a JoinClause instance
:type table: str or JoinClause
:param one: The first column of the join condition
:type one: str
:param operator: The operator of the join condition
:type operator: str
:param two: The second column of the join condition
:type two: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder |
12,556 | def get_rt_data(self, code):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
for x in rt_data_list:
x[] = code
col_list = [
, , , , ,
, , ,
]
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table | 获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ========================================================================== |
12,557 | def scale_cb(self, setting, value):
zoomlevel = self.zoom.calc_level(value)
self.t_.set(zoomlevel=zoomlevel)
self.redraw(whence=0) | Handle callback related to image scaling. |
12,558 | def lnprior(x):
per, t0, b = x
if b < -1 or b > 1:
return -np.inf
elif per < 7 or per > 10:
return -np.inf
elif t0 < 1978 or t0 > 1979:
return -np.inf
else:
return 0. | Return the log prior given parameter vector `x`. |
12,559 | def getNodeRefs(self):
retn = []
for name, valu in self.props.items():
pobj = self.form.props.get(name)
if isinstance(pobj.type, s_types.Ndef):
retn.append((name, valu))
continue
if self.snap.model.forms.get(pobj.type.name) is None:
continue
ndef = (pobj.type.name, valu)
if ndef == self.ndef:
continue
retn.append((name, ndef))
return retn | Return a list of (prop, (form, valu)) refs out for the node. |
12,560 | def addSubsumableToGroups(self, proteinIds, groupIds):
for groupId in AUX.toList(groupIds):
self.groups[groupId].addSubsumableProteins(proteinIds)
self._addProteinIdsToGroupMapping(proteinIds, groupId) | Add one or multiple subsumable proteins to one or multiple protein
groups.
:param proteinIds: a proteinId or a list of proteinIds, a proteinId
must be a string.
:param groupIds: a groupId or a list of groupIds, a groupId
must be a string. |
12,561 | def _pop_api_call(self, method, _url, kwargs):
call_queue = self._call_queue.setdefault(self._get_thread_id(), [])
if not call_queue:
self._pre_process_call(name= % (_url, method),
endpoint_params=kwargs)
return call_queue.pop() | This will initialize an api_call or pop one that has already
been initialized with the endpoint parameters
:param method: str of the html method ['GET','POST','PUT','DELETE']
:param _url: str of the sub url of the api call (ex. g/device/list)
:param kwargs: dict of additional arguments
:return: ApiCall |
12,562 | def parent_ids(self):
action = os.path.join(self.record_url, "parent_ids")
res = requests.get(url=action, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json()["biosamples"] | Returns an array of parent Biosample IDs. If the current Biosample has a part_of relationship,
the Biosampled referenced there will be returned. Otherwise, if the current Biosample was
generated from a pool of Biosamples (pooled_from_biosample_ids), then those will be returned.
Otherwise, the result will be an empty array. |
12,563 | def to_bytes(s, encoding="utf-8"):
if isinstance(s, six.binary_type):
return s
else:
return six.text_type(s).encode(encoding) | Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3. |
12,564 | def make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars):
outhandle = os.path.join(data.dirs.fastqs, )
outfile = open(outhandle, )
outfile.write(.\
format("raw_file", "total_reads", "cut_found", "bar_matched"))
r1names = sorted(perfile)
for fname in r1names:
dat = perfile[fname]
outfile.write(.\
format(fname, dat[0], dat[1], dat[2]))
if in data.paramsdict["datatype"]:
fname = fname.replace("_R1_", "_R2_")
outfile.write(.\
format(fname, dat[0], dat[1], dat[2]))
outfile.write(.format("sample_name", "total_reads"))
snames = set()
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
snames.add(sname)
for sname in sorted(list(snames)):
outfile.write("{:<35} {:>13}\n".format(sname, fsamplehits[sname]))
outfile.write(.\
format("sample_name", "true_bar", "obs_bar", "N_records"))
for sname in sorted(data.barcodes):
if "-technical-replicate-" in sname:
fname = sname.rsplit("-technical-replicate", 1)[0]
else:
fname = sname
hit = data.barcodes[sname]
offhitstring = ""
if fname in fdbars:
offkeys = list(fdbars.get(fname))
for offhit in offkeys[::-1]:
if offhit not in data.barcodes.values():
offhitstring += .\
format(sname, hit, offhit, fbarhits[offhit]/2)
outfile.write(.\
format(sname, hit, hit, fbarhits[hit]/2))
outfile.write(offhitstring)
misskeys = list(fmisses.keys())
misskeys.sort(key=fmisses.get)
for key in misskeys[::-1]:
outfile.write(.\
format("no_match", "_", key, fmisses[key]))
outfile.close()
for sname in snames:
sample = Sample()
sample.name = sname
barcodes = []
for n in xrange(500):
fname = sname+"-technical-replicate-{}".format(n)
fbar = data.barcodes.get(fname)
if fbar:
barcodes.append(fbar)
if barcodes:
sample.barcode = barcodes
else:
sample.barcode = data.barcodes[sname]
if in data.paramsdict["datatype"]:
sample.files.fastqs = [(os.path.join(data.dirs.fastqs,
sname+"_R1_.fastq.gz"),
os.path.join(data.dirs.fastqs,
sname+"_R2_.fastq.gz"))]
else:
sample.files.fastqs = [(os.path.join(data.dirs.fastqs,
sname+"_R1_.fastq.gz"), "")]
sample.stats["reads_raw"] = int(fsamplehits[sname])
sample.stats_dfs.s1["reads_raw"] = int(fsamplehits[sname])
if sample.stats["reads_raw"]:
sample.stats.state = 1
data.samples[sample.name] = sample
else:
print("Excluded sample: no data found for", sname)
data.stats_dfs.s1 = data._build_stat("s1")
data.stats_files.s1 = outhandle | Write stats and stores to Assembly object. |
12,565 | def delay(self, dl=0):
if dl is None:
time.sleep(self.dl)
elif dl < 0:
sys.stderr.write(
"delay cannot less than zero, this takes no effects.\n")
else:
time.sleep(dl) | Delay for ``dl`` seconds. |
12,566 | def set(self, *components):
self.reset()
if len(components) == 1:
self.append(components[0])
else:
for comp in components:
self.append(comp) | Set the possible components of the block
:param components: components to append Optionables or Composables |
12,567 | def _read_metrics(repo, metrics, branch):
res = {}
for out, typ, xpath in metrics:
assert out.scheme == "local"
if not typ:
typ = os.path.splitext(out.path.lower())[1].replace(".", "")
if out.use_cache:
open_fun = open
path = repo.cache.local.get(out.checksum)
else:
open_fun = repo.tree.open
path = out.path
try:
with open_fun(path) as fd:
metric = _read_metric(
fd,
typ=typ,
xpath=xpath,
rel_path=out.rel_path,
branch=branch,
)
except IOError as e:
if e.errno == errno.ENOENT:
logger.warning(
NO_METRICS_FILE_AT_REFERENCE_WARNING.format(
out.rel_path, branch
)
)
metric = None
else:
raise
if not metric:
continue
res[out.rel_path] = metric
return res | Read the content of each metric file and format it.
Args:
metrics (list): List of metric touples
branch (str): Branch to look up for metrics.
Returns:
A dict mapping keys with metrics path name and content.
For example:
{'metric.csv': ("value_mse deviation_mse data_set\n"
"0.421601 0.173461 train\n"
"0.67528 0.289545 testing\n"
"0.671502 0.297848 validation\n")} |
12,568 | def tags_getrelated(tag):
method =
data = _doget(method, auth=False, tag=tag)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text] | Gets the related tags for given tag. |
12,569 | def AddArguments(cls, argument_group):
argument_group.add_argument(
, metavar=, dest=, type=str, default=,
action=, help=(
))
argument_group.add_argument(
, , dest=, type=int, default=5,
action=, help=(
))
argument_group.add_argument(
, dest=, action=, default=False, help=(
))
argument_group.add_argument(
, nargs=, action=, metavar=, default=None,
type=str, help=(
).format(
cls._DOCUMENTATION_URL)) | Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group. |
12,570 | def transform(self, X, y=None, **params):
data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0]
phase, order = data[data[:,0].argsort()].T
design_matrix = self.design_matrix(phase, self.degree)
return design_matrix[order.argsort()] | Transforms *X* from phase-space to Fourier-space, returning the design
matrix produced by :func:`Fourier.design_matrix` for input to a
regressor.
**Parameters**
X : array-like, shape = [n_samples, 1]
Column vector of phases.
y : None, optional
Unused argument for conformity (default None).
**Returns**
design_matrix : array-like, shape = [n_samples, 2*degree+1]
Fourier design matrix produced by :func:`Fourier.design_matrix`. |
12,571 | def precision_score(df, col_true=None, col_pred=, pos_label=1, average=None):
r
if not col_pred:
col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
mat, label_list = _run_cm_node(df, col_true, col_pred)
class_dict = dict((label, idx) for idx, label in enumerate(label_list))
tps = np.diag(mat)
pred_count = np.sum(mat, axis=0)
if average is None:
return tps * 1.0 / pred_count
elif average == :
class_idx = class_dict[pos_label]
return tps[class_idx] * 1.0 / pred_count[class_idx]
elif average == :
return np.sum(tps) / np.sum(pred_count)
elif average == :
return np.mean(tps * 1.0 / pred_count)
elif average == :
support = np.sum(mat, axis=1)
return np.sum(tps * 1.0 / pred_count * support) / np.sum(support) | r"""
Compute precision of a predicted DataFrame. Precision is defined as :math:`\frac{TP}{TP + TN}`
:Parameters:
- **df** - predicted data frame
- **col_true** - column name of true label
- **col_pred** - column name of predicted label, 'prediction_result' by default.
- **pos_label** - denote the desired class label when ``average`` == `binary`
- **average** - denote the method to compute average.
:Returns:
Precision score
:Return type:
float or numpy.array[float]
The parameter ``average`` controls the behavior of the function.
- When ``average`` == None (by default), precision of every class is given as a list.
- When ``average`` == 'binary', precision of class specified in ``pos_label`` is given.
- When ``average`` == 'micro', STP / (STP + STN) is given, where STP and STN are summations of TP and TN for every class.
- When ``average`` == 'macro', average precision of all the class is given.
- When ``average`` == `weighted`, average precision of all the class weighted by support of every true classes is given.
:Example:
Assume we have a table named 'predicted' as follows:
======== ===================
label prediction_result
======== ===================
0 0
1 2
2 1
0 0
1 0
2 1
======== ===================
Different options of ``average`` parameter outputs different values:
.. code-block:: python
>>> precision_score(predicted, 'label', average=None)
array([ 0.66..., 0. , 0. ])
>>> precision_score(predicted, 'label', average='macro')
0.22
>>> precision_score(predicted, 'label', average='micro')
0.33
>>> precision_score(predicted, 'label', average='weighted')
0.22 |
12,572 | async def trigger(self, event, kwargs):
await self._queue.put((event, kwargs))
self._resume_processing.set() | Enqueue an event for processing |
12,573 | def active_joined_organisations(doc):
if doc.get() == and doc.get() != :
for org_id, state in doc.get(, {}).items():
if state[] == :
continue
org = {: org_id}
yield [doc[], None], org
try:
yield [doc[], state[]], org
except KeyError:
pass | View for getting organisations associated with a user |
12,574 | def get_proteome_counts_impute_missing(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(index=_all_counts, columns=loaded.columns)
first = False
new_columns = list(set(loaded.columns.tolist()).difference(big_strain_counts_df.columns))
if new_columns:
for col in new_columns:
big_strain_counts_df[col] = big_strain_counts_df.mean(axis=1)
not_in_loaded = list(set(big_strain_counts_df.columns).difference(loaded.columns.tolist()))
if not_in_loaded:
for col in not_in_loaded:
big_strain_counts_df[col] = big_strain_counts_df[col] + loaded.mean(axis=1)
big_strain_counts_df = big_strain_counts_df.add(loaded, fill_value=0)
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index() | Get counts, uses the mean feature vector to fill in missing proteins for a strain |
12,575 | def get_time_remaining_estimate(self):
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
elif power_status.BatteryLifeTime == -1:
return common.TIME_REMAINING_UNKNOWN
else:
return float(power_status.BatteryLifeTime) / 60.0 | Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime |
12,576 | def _init_glyph(self, plot, mapping, properties):
box = Span(level=properties.get(, ), **mapping)
plot.renderers.append(box)
return None, box | Returns a Bokeh glyph object. |
12,577 | def convert(self, *args, **kwargs):
self.strings()
self.metadata()
self.result.save(self.output()) | Yes it is, thanks captain. |
12,578 | def _prepare_value(val, maxlen=50, notype=False):
if val is None or val is True or val is False:
return str(val)
sval = repr(val)
sval = sval.replace("\n", " ").replace("\t", " ").replace("`", "'")
if len(sval) > maxlen:
sval = sval[:maxlen - 4] + "..." + sval[-1]
if notype:
return sval
else:
tval = checker_for_type(type(val)).name()
return "%s of type %s" % (sval, tval) | Stringify value `val`, ensuring that it is not too long. |
12,579 | def _create_group_assignment(self, mesh_axes):
partitioning = {}
for logical_pnum in xrange(self.size):
group = mtf.pnum_to_group(self.shape, mesh_axes, logical_pnum)
if group not in partitioning:
partitioning[group] = []
partitioning[group].append(self.l2p(logical_pnum))
group_assignment = []
for group, physical_pnums in partitioning.items():
group_assignment.append(physical_pnums)
return group_assignment | Create group assignment for XLA cross replica ops (physical pnums). |
12,580 | def show(self, baseAppInstance):
self.from_dict_to_fields(self.configDict)
super(ProjectConfigurationDialog, self).show(baseAppInstance) | Allows to show the widget as root window |
12,581 | def share_column_widths(self, tables, shared_limit=None):
for table in tables:
record = (table, shared_limit)
if not record in self.shared_tables and table is not self:
self.shared_tables.append(record) | To have this table use sync with the columns in tables
Note, this will need to be called on the other tables to be fully
synced.
:param tables: list of SeabornTables to share column widths
:param shared_limit: int if diff is greater than this than ignore it.
:return: None |
12,582 | def get_location_observation(lat, lng, token):
req = requests.get(
API_ENDPOINT_GEO % (lat, lng),
params={
: token
})
if req.status_code == 200 and req.json()["status"] == "ok":
return parse_observation_response(req.json()["data"])
return {} | Lookup observations by geo coordinates. |
12,583 | def get_absolute_url_with_date(self):
pub_date = self.published_on
if pub_date and settings.USE_TZ:
pub_date = make_naive(pub_date, pytz.utc)
pub_date = pytz.timezone(settings.TIME_ZONE).localize(pub_date)
if pub_date:
args = [
pub_date.strftime("%Y"),
pub_date.strftime("%m"),
pub_date.strftime("%d"),
self.slug
]
else:
args = [self.slug]
return reverse(, args=args) | URL based on the entry's date & slug. |
12,584 | def queryResponse(self, queryEngine, query=None, vendorSpecific=None, **kwargs):
return self.GET(
[, queryEngine, query], headers=vendorSpecific, query=kwargs
) | CNRead.query(session, queryEngine, query) → OctetStream
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.query.
Args:
queryEngine:
query:
vendorSpecific:
**kwargs:
Returns: |
12,585 | def draw_cloud_tree(self,
axes=None,
html=False,
fixed_order=True,
**kwargs):
if not self.treelist:
print("Treelist is empty")
return None, None
if not self.all_tips_shared:
print("All trees in treelist do not share the same tips")
return None, None
try:
changed_autoformat = False
if not html:
toyplot.config.autoformat = "png"
changed_autoformat = True
mstyle = STYLES[]
mstyle.update(
{i: j for (i, j) in kwargs.items() if
(j is not None) & (i != "tip_labels")}
)
for tree in treelist:
tree.style.update(mstyle)
draw = CloudTree(treelist, **kwargs)
if kwargs.get("debug"):
return draw
canvas, axes = draw.update(axes)
return canvas, axes
finally:
if changed_autoformat:
toyplot.config.autoformat = "html" | Draw a series of trees overlapping each other in coordinate space.
The order of tip_labels is fixed in cloud trees so that trees with
discordant relationships can be seen in conflict. To change the tip
order use the 'fixed_order' argument in toytree.mtree() when creating
the MultiTree object.
Parameters:
axes (toyplot.Cartesian): toyplot Cartesian axes object.
html (bool): whether to return the drawing as html (default=PNG).
edge_styles: (list): option to enter a list of edge dictionaries.
**kwargs (dict): styling options should be input as a dictionary. |
12,586 | def warn_startup_with_shell_off(platform, gdb_args):
darwin_match = re.match("darwin-(\d+)\..*", platform)
on_darwin = darwin_match is not None and int(darwin_match.groups()[0]) >= 16
if on_darwin:
shell_is_off = "startup-with-shell off" in gdb_args
return not shell_is_off
return False | return True if user may need to turn shell off
if mac OS version is 16 (sierra) or higher, may need to set shell off due
to os's security requirements
http://stackoverflow.com/questions/39702871/gdb-kind-of-doesnt-work-on-macos-sierra |
12,587 | def lines_table(html_doc, tofloat=True):
soup = BeautifulSoup(html_doc, "html.parser")
linestables = []
elements = soup.p.next_elements
for element in elements:
tabletup = []
if not _has_name(element):
continue
if element.name == :
beforetable = []
prev_elements = element.previous_elements
for prev_element in prev_elements:
if not _has_name(prev_element):
continue
if prev_element.name not in (, None):
if prev_element.name in (, , , ):
break
if prev_element.parent.name == "p":
pass
else:
if prev_element.get_text():
beforetable.append(prev_element.get_text())
beforetable.reverse()
tabletup.append(beforetable)
function_selector = {True:table2val_matrix, False:table2matrix}
function = function_selector[tofloat]
tabletup.append(function(element))
if tabletup:
linestables.append(tabletup)
return linestables | return a list of [(lines, table), .....]
lines = all the significant lines before the table.
These are lines between this table and
the previous table or 'hr' tag
table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]
The lines act as a description for what is in the table |
12,588 | def getInstance(self, aLocation, axisOnly=False, getFactors=False):
self._collectAxisPoints()
factors = self.getFactors(aLocation, axisOnly)
total = None
for f, item, name in factors:
if total is None:
total = f * item
continue
total += f * item
if total is None:
total = 0 * self._neutral
if getFactors:
return total, factors
return total | Calculate the delta at aLocation.
* aLocation: a Location object, expected to be in bent space
* axisOnly:
* True: calculate an instance only with the on-axis masters.
* False: calculate an instance with on-axis and off-axis masters.
* getFactors:
* True: return a list of the calculated factors. |
12,589 | def stop_threadsafe(self):
if self.stopped:
return
try:
self._loop.run_coroutine(self.stop())
except asyncio.TimeoutError:
raise TimeoutExpiredError("Timeout stopping task {} with {} subtasks".format(self.name, len(self.subtasks))) | Stop this task from another thread and wait for it to finish.
This method must not be called from within the BackgroundEventLoop but
will inject self.stop() into the event loop and block until it
returns.
Raises:
TimeoutExpiredError: If the task does not stop in the given
timeout specified in __init__() |
12,590 | def render_customizations(self):
disable_plugins = self.pt.customize_conf.get(, [])
if not disable_plugins:
logger.debug()
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin[], plugin[],
)
except KeyError:
logger.info()
enable_plugins = self.pt.customize_conf.get(, [])
if not enable_plugins:
logger.debug()
else:
for plugin in enable_plugins:
try:
msg =
self.pt.add_plugin(plugin[], plugin[],
plugin[], msg)
except KeyError:
logger.info() | Customize template for site user specified customizations |
12,591 | def skewvT(self,R,romberg=False,nsigma=None,phi=0.):
surfmass= self.surfacemass(R,romberg=romberg,nsigma=nsigma,
use_physical=False)
vt= self._vmomentsurfacemass(R,0,1,romberg=romberg,nsigma=nsigma)\
/surfmass
vt2= self._vmomentsurfacemass(R,0,2,romberg=romberg,nsigma=nsigma)\
/surfmass
vt3= self._vmomentsurfacemass(R,0,3,romberg=romberg,nsigma=nsigma)\
/surfmass
s2= vt2-vt**2.
return (vt3-3.*vt*vt2+2.*vt**3.)*s2**(-1.5) | NAME:
skewvT
PURPOSE:
calculate skew in vT at R by marginalizing over velocity
INPUT:
R - radius at which to calculate <vR> (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
KEYWORDS:
romberg - if True, use a romberg integrator (default: False)
OUTPUT:
skewvT
HISTORY:
2011-12-07 - Written - Bovy (NYU) |
12,592 | def format_list(self, at_char, user, list_name):
return \
% (user, list_name, at_char, user, list_name) | Return formatted HTML for a list. |
12,593 | def add_logging(parser, log_format=LOG_FORMAT, log_level=LOG_LEVEL, color=True):
parser.set_defaults(log_level=log_level)
parser.add_argument(, dest=, action=_LogLevelAddAction, const=1, help=)
parser.add_argument(, dest=, action=_LogLevelAddAction, const=-1, help=)
root_logger = logging.getLogger()
root_logger.setLevel(log_level)
handler = logging.StreamHandler()
if hasattr(sys.stderr, ) and sys.stderr.isatty():
class ColorAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
handler.setFormatter(_ColorLogFormatter(log_format))
class NoColorAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, False)
handler.setFormatter(_NoColorLogFormatter(log_format))
parser.add_argument(, dest=, action=ColorAction, nargs=0, help=)
parser.add_argument(, dest=, action=NoColorAction, nargs=0, help=)
if color:
formatter_class = _ColorLogFormatter
else:
formatter_class = _NoColorLogFormatter
else:
formatter_class = _NoColorLogFormatter
handler.setFormatter(formatter_class(log_format))
root_logger.addHandler(handler) | Configures the `argparse.ArgumentParser` with arguments to configure
logging.
This adds arguments:
* ``-v`` to increase the log level
* ``-q`` to decrease the log level
* ``--color`` to enable color logging when available
* ``--no-color`` to disable color logging
The root logger is configured with the given format and log level. ANSI
color codes are supported in the logging format string. If color is enabled
and stderr is a tty, the codes will be passed through. Otherwise the
logging formatter will strip them out. The logging format supports these
additional format variables for coloration:
%(levelcolor)s If stderr is a terminal, an ANSI color code
appropriate for the level of the logged record.
%(resetcolor)s If stderr is a terminal, an ANSI color reset code. |
12,594 | def validate_dict(in_dict, **kwargs):
if not isinstance(in_dict, dict):
raise ValueError()
for key, value in kwargs.iteritems():
if key == :
for required_key in value:
if required_key not in in_dict:
return False
elif key not in in_dict:
continue
elif value == bool:
in_dict[key] = (True
if str(in_dict[key]).lower() ==
else False)
else:
if (isinstance(in_dict[key], list) and
len(in_dict[key]) == 1 and
value != list):
in_dict[key] = in_dict[key][0]
try:
if key in in_dict:
in_dict[key] = value(in_dict[key])
except ValueError:
return False
return True | Returns Boolean of whether given dict conforms to type specifications
given in kwargs. |
12,595 | def grid_at_redshift_from_image_plane_grid_and_redshift(self, image_plane_grid, redshift):
image_plane_grid_stack = grids.GridStack(regular=image_plane_grid, sub=np.array([[0.0, 0.0]]),
blurring=np.array([[0.0, 0.0]]))
tracer = TracerMultiPlanes(galaxies=self.galaxies, image_plane_grid_stack=image_plane_grid_stack,
border=None, cosmology=self.cosmology)
for plane_index in range(0, len(self.plane_redshifts)):
new_grid_stack = image_plane_grid_stack
if redshift <= tracer.plane_redshifts[plane_index]:
if plane_index > 0:
for previous_plane_index in range(plane_index):
scaling_factor = cosmology_util.scaling_factor_between_redshifts_from_redshifts_and_cosmology(
redshift_0=tracer.plane_redshifts[previous_plane_index], redshift_1=redshift,
redshift_final=tracer.plane_redshifts[-1], cosmology=tracer.cosmology)
scaled_deflection_stack = lens_util.scaled_deflection_stack_from_plane_and_scaling_factor(
plane=tracer.planes[previous_plane_index], scaling_factor=scaling_factor)
new_grid_stack = \
lens_util.grid_stack_from_deflection_stack(grid_stack=new_grid_stack,
deflection_stack=scaled_deflection_stack)
elif plane_index == 0:
return new_grid_stack.regular
return new_grid_stack.regular | For an input grid of (y,x) arc-second image-plane coordinates, ray-trace the coordinates to any redshift in \
the strong lens configuration.
This is performed using multi-plane ray-tracing and the existing redshifts and planes of the tracer. However, \
any redshift can be input even if a plane does not exist there, including redshifts before the first plane \
of the lensing system.
Parameters
----------
image_plane_grid : ndsrray or grids.RegularGrid
The image-plane grid which is traced to the redshift.
redshift : float
The redshift the image-plane grid is traced to. |
12,596 | def get_group(self, group_id):
response = self._do_request(
, .format(group_id=group_id))
return self._parse_response(response, MarathonGroup) | Get a single group.
:param str group_id: group ID
:returns: group
:rtype: :class:`marathon.models.group.MarathonGroup` |
12,597 | def _execute_command(self, command, sql):
if not self._sock:
raise err.InterfaceError("(0, )")
if self._result is not None:
if self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
self._result._finish_unbuffered_query()
while self._result.has_next:
self.next_result()
self._result = None
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
packet_size = min(MAX_PACKET_LEN, len(sql) + 1)
prelude = struct.pack(, packet_size, command)
packet = prelude + sql[:packet_size-1]
self._write_bytes(packet)
if DEBUG: dump_packet(packet)
self._next_seq_id = 1
if packet_size < MAX_PACKET_LEN:
return
sql = sql[packet_size-1:]
while True:
packet_size = min(MAX_PACKET_LEN, len(sql))
self.write_packet(sql[:packet_size])
sql = sql[packet_size:]
if not sql and packet_size < MAX_PACKET_LEN:
break | :raise InterfaceError: If the connection is closed.
:raise ValueError: If no username was specified. |
12,598 | def operator(self):
return {
: operator.eq,
: operator.ne,
: operator.gt,
: operator.ge,
: operator.lt,
: operator.le,
: self._starts_with,
: self._in,
: self._ni,
} | Supported Filter Operators
+ EQ - Equal To
+ NE - Not Equal To
+ GT - Greater Than
+ GE - Greater Than or Equal To
+ LT - Less Than
+ LE - Less Than or Equal To
+ SW - Starts With
+ IN - In String or Array
+ NI - Not in String or Array |
12,599 | def median_slitlets_rectified(
input_image,
mode=0,
minimum_slitlet_width_mm=EMIR_MINIMUM_SLITLET_WIDTH_MM,
maximum_slitlet_width_mm=EMIR_MAXIMUM_SLITLET_WIDTH_MM,
debugplot=0
):
image_header = input_image[0].header
image2d = input_image[0].data
naxis2_expected = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED
naxis2, naxis1 = image2d.shape
if naxis2 != naxis2_expected:
raise ValueError("NAXIS2={0} should be {1}".format(
naxis2, naxis2_expected
))
instrument = image_header[]
if instrument != :
raise ValueError("INSTRUME keyword is not !")
if mode == 0:
image2d_median = np.zeros((naxis2, naxis1))
else:
image2d_median = np.zeros((EMIR_NBARS, naxis1))
for i in range(EMIR_NBARS):
ns1 = i * EMIR_NPIXPERSLIT_RECTIFIED + 1
ns2 = ns1 + EMIR_NPIXPERSLIT_RECTIFIED - 1
sp_median = np.median(image2d[(ns1-1):ns2, :], axis=0)
if mode == 0:
image2d_median[(ns1-1):ns2, :] = np.tile(
sp_median, (EMIR_NPIXPERSLIT_RECTIFIED, 1)
)
else:
image2d_median[i] = np.copy(sp_median)
if mode == 2:
csu_config = CsuConfiguration.define_from_header(image_header)
crpix1 = image_header[]
crval1 = image_header[]
cdelt1 = image_header[]
list_useful_slitlets = csu_config.widths_in_range_mm(
minwidth=minimum_slitlet_width_mm,
maxwidth=maximum_slitlet_width_mm
)
list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1))
if i not in list_useful_slitlets]
if abs(debugplot) != 0:
print(, list_useful_slitlets)
print(, list_not_useful_slitlets)
mask2d, borders = define_mask_borders(image2d_median, sought_value=0)
if abs(debugplot) % 10 != 0:
ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1,
crval1=crval1, cdelt1=cdelt1, debugplot=debugplot)
for islitlet in list_not_useful_slitlets:
mask2d[islitlet - 1, :] = np.array([True] * naxis1)
if abs(debugplot) % 10 != 0:
ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1,
crval1=crval1, cdelt1=cdelt1, debugplot=debugplot)
image2d_masked = image2d_median * (1 - mask2d.astype(int))
if abs(debugplot) % 10 != 0:
ximshow(image2d_masked, crpix1=crpix1, crval1=crval1,
cdelt1=cdelt1, debugplot=debugplot)
image2d_masked = np.ma.masked_array(image2d_median, mask=mask2d)
image1d_median = np.ma.median(image2d_masked, axis=0).data
image_median = fits.PrimaryHDU(data=image1d_median,
header=image_header)
else:
image_median = fits.PrimaryHDU(data=image2d_median,
header=image_header)
return fits.HDUList([image_median]) | Compute median spectrum for each slitlet.
Parameters
----------
input_image : HDUList object
Input 2D image.
mode : int
Indicate desired result:
0 : image with the same size as the input image, with the
median spectrum of each slitlet spanning all the spectra
of the corresponding slitlet
1 : image with 55 spectra, containing the median spectra of
each slitlet
2 : single collapsed median spectrum, using exclusively the
useful slitlets from the input image
minimum_slitlet_width_mm : float
Minimum slitlet width (mm) for a valid slitlet.
maximum_slitlet_width_mm : float
Maximum slitlet width (mm) for a valid slitlet.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
image_median : HDUList object
Output image. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.