Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
9,100 | def workdir_is_clean(self, quiet=False):
self.run(, **RUN_KWARGS)
unchanged = True
try:
self.run(, report_error=False, **RUN_KWARGS)
except exceptions.Failure:
unchanged = False
if not quiet:
notify.warning()
self.run(, **RUN_KWARGS)
try:
self.run(, report_error=False, **RUN_KWARGS)
except exceptions.Failure:
unchanged = False
if not quiet:
notify.warning()
self.run(, **RUN_KWARGS)
return unchanged | Check for uncommitted changes, return `True` if everything is clean.
Inspired by http://stackoverflow.com/questions/3878624/. |
9,101 | def load_response_microservices(plugin_path, plugins, internal_attributes, base_url):
response_services = _load_microservices(plugin_path, plugins, _response_micro_service_filter, internal_attributes,
base_url)
logger.info("Loaded response micro services: %s" % [type(k).__name__ for k in response_services])
return response_services | Loads response micro services (handling outgoing responses).
:type plugin_path: list[str]
:type plugins: list[str]
:type internal_attributes: dict[string, dict[str, str | list[str]]]
:type base_url: str
:rtype satosa.micro_service.service_base.ResponseMicroService
:param plugin_path: Path to the plugin directory
:param plugins: A list with the name of the plugin files
:param: base_url: base url of the SATOSA server
:return: Response micro service |
9,102 | def _validate(self, key, cls=None):
if key not in self.manifest:
raise ValueError("Manifest %s requires ."
% (self.manifest_path, key))
if cls:
if not isinstance(self.manifest[key], cls):
raise TypeError("Manifest value should be %s, not %s"
% (key, cls, type(self.manifest[key]))) | Verify the manifest schema. |
9,103 | def _rescale_and_convert_field_inplace(self, array, name, scale, zero):
self._rescale_array(array[name], scale, zero)
if array[name].dtype == numpy.bool:
array[name] = self._convert_bool_array(array[name])
return array | Apply fits scalings. Also, convert bool to proper
numpy boolean values |
9,104 | def get_plugin_actions(self):
quit_action = create_action(self, _("&Quit"),
icon=ima.icon(),
tip=_("Quit"),
triggered=self.quit)
self.register_shortcut(quit_action, "_", "Quit", "Ctrl+Q")
run_action = create_action(self, _("&Run..."), None,
ima.icon(),
_("Run a Python script"),
triggered=self.run_script)
environ_action = create_action(self,
_("Environment variables..."),
icon=ima.icon(),
tip=_("Show and edit environment variables"
" (for current session)"),
triggered=self.show_env)
syspath_action = create_action(self,
_("Show sys.path contents..."),
icon=ima.icon(),
tip=_("Show (read-only) sys.path"),
triggered=self.show_syspath)
buffer_action = create_action(self,
_("Buffer..."), None,
tip=_("Set maximum line count"),
triggered=self.change_max_line_count)
exteditor_action = create_action(self,
_("External editor path..."), None, None,
_("Set external editor executable path"),
triggered=self.change_exteditor)
wrap_action = create_action(self,
_("Wrap lines"),
toggled=self.toggle_wrap_mode)
wrap_action.setChecked(self.get_option())
codecompletion_action = create_action(self,
_("Automatic code completion"),
toggled=self.toggle_codecompletion)
codecompletion_action.setChecked(self.get_option())
option_menu = QMenu(_(), self)
option_menu.setIcon(ima.icon())
add_actions(option_menu, (buffer_action, wrap_action,
codecompletion_action,
exteditor_action))
plugin_actions = [None, run_action, environ_action, syspath_action,
option_menu, MENU_SEPARATOR, quit_action,
self.undock_action]
return plugin_actions | Return a list of actions related to plugin |
9,105 | def stats_shooting(self, kind=, summary=False):
return self._get_stats_table(, kind=kind, summary=summary) | Returns a DataFrame of shooting stats. |
9,106 | def check_all_servers():
data = datatools.get_data()
for server_id in data["discord"]["servers"]:
is_in_client = False
for client_server in client.servers:
if server_id == client_server.id:
is_in_client = True
break
if not is_in_client:
remove_server_data(server_id) | Checks all servers, removing any that Modis isn't part of any more |
9,107 | def _validate_sort_field(self, sort_by):
if (
sort_by not in self.RESPONSE_FIELD_MAP
or not self.RESPONSE_FIELD_MAP[sort_by].is_sort
):
raise InvalidSortFieldException(
.format(sort_by)
) | :param sort_by: string
:raises: pybomb.exceptions.InvalidSortFieldException |
9,108 | def metafetcher(bamfile, metacontig2contig, metatag):
for metacontig in metacontig2contig:
for contig in metacontig2contig[metacontig]:
for read in bamfile.fetch(contig):
read.set_tag(metatag, metacontig)
yield read | return reads in order of metacontigs |
9,109 | def _get_headers(self):
headers = {
: .format(version=sys.version_info[0]),
:
}
if self.access_token:
headers[] = .format(self.access_token)
return headers | Built headers for request to IPinfo API. |
9,110 | def encode_exception(exception):
import sys
return AsyncException(unicode(exception),
exception.args,
sys.exc_info(),
exception) | Encode exception to a form that can be passed around and serialized.
This will grab the stack, then strip off the last two calls which are
encode_exception and the function that called it. |
9,111 | def from_metadata(self, db_path, db_name=):
self.__engine.fromMetadata(db_path, db_name)
return self | Registers in the current session the views of the MetadataSource so the
data is obtained from the metadata database instead of reading the
repositories with the DefaultSource.
:param db_path: path to the folder that contains the database.
:type db_path: str
:param db_name: name of the database file (engine_metadata.db) by default.
:type db_name: str
:returns: the same instance of the engine
:rtype: Engine |
9,112 | def p2th_address(self) -> Optional[str]:
if self.id:
return Kutil(network=self.network,
privkey=bytearray.fromhex(self.id)).address
else:
return None | P2TH address of this deck |
9,113 | def long_description(*filenames):
res = []
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append( + line)
res.append()
res.append()
return EMPTYSTRING.join(res) | Provide a long description. |
9,114 | def _shrink_update(self, rmstart: int, rmstop: int) -> None:
for spans in self._type_to_spans.values():
i = len(spans) - 1
while i >= 0:
s, e = span = spans[i]
if rmstop <= s:
rmlength = rmstop - rmstart
span[:] = s - rmlength, e - rmlength
i -= 1
continue
break
else:
continue
while True:
if rmstart <= s:
if rmstop < e:
span[:] = rmstart, e + rmstart - rmstop
i -= 1
if i < 0:
break
s, e = span = spans[i]
continue
spans.pop(i)[:] = -1, -1
i -= 1
if i < 0:
break
s, e = span = spans[i]
continue
break
while i >= 0:
if e <= rmstart:
i -= 1
if i < 0:
break
s, e = span = spans[i]
continue
span[1] -= rmstop - rmstart
i -= 1
if i < 0:
break
s, e = span = spans[i]
continue | Update self._type_to_spans according to the removed span.
Warning: If an operation involves both _shrink_update and
_insert_update, you might wanna consider doing the
_insert_update before the _shrink_update as this function
can cause data loss in self._type_to_spans. |
9,115 | def configfield_ref_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
node = pending_configfield_xref(rawsource=text)
return [node], [] | Process a role that references the Task configuration field nodes
created by the ``lsst-config-fields``, ``lsst-task-config-subtasks``,
and ``lsst-task-config-subtasks`` directives.
Parameters
----------
name
The role name used in the document.
rawtext
The entire markup snippet, with role.
text
The text marked with the role.
lineno
The line number where ``rawtext`` appears in the input.
inliner
The inliner instance that called us.
options
Directive options for customization.
content
The directive content for customization.
Returns
-------
nodes : `list`
List of nodes to insert into the document.
messages : `list`
List of system messages.
See also
--------
`format_configfield_id`
`pending_configfield_xref`
`process_pending_configfield_xref_nodes` |
9,116 | def plot_mean_field_conv(N=1, n=0.5, Uspan=np.arange(0, 3.6, 0.5)):
sl = Spinon(slaves=2*N, orbitals=N, avg_particles=2*n,
hopping=[0.5]*2*N, orbital_e=[0]*2*N)
hlog = solve_loop(sl, Uspan, [0.])[1]
f, (ax1, ax2) = plt.subplots(2, sharex=True)
for field in hlog:
field = np.asarray(field)
ax1.semilogy(abs(field[1:]-field[:-1]))
ax2.plot(field)
plt.title()
ax1.set_ylabel()
ax2.set_ylabel()
plt.xlabel()
return hlog | Generates the plot on the convergenge of the mean field in single
site spin hamiltonian under with N degenerate half-filled orbitals |
9,117 | def sanitize_and_wrap(self, task_id, args, kwargs):
dep_failures = []
new_args = []
for dep in args:
if isinstance(dep, Future):
try:
new_args.extend([dep.result()])
except Exception as e:
if self.tasks[dep.tid][] in FINAL_FAILURE_STATES:
dep_failures.extend([e])
else:
new_args.extend([dep])
for key in kwargs:
dep = kwargs[key]
if isinstance(dep, Future):
try:
kwargs[key] = dep.result()
except Exception as e:
if self.tasks[dep.tid][] in FINAL_FAILURE_STATES:
dep_failures.extend([e])
if in kwargs:
new_inputs = []
for dep in kwargs[]:
if isinstance(dep, Future):
try:
new_inputs.extend([dep.result()])
except Exception as e:
if self.tasks[dep.tid][] in FINAL_FAILURE_STATES:
dep_failures.extend([e])
else:
new_inputs.extend([dep])
kwargs[] = new_inputs
return new_args, kwargs, dep_failures | This function should be called **ONLY** when all the futures we track have been resolved.
If the user hid futures a level below, we will not catch
it, and will (most likely) result in a type error.
Args:
task_id (uuid str) : Task id
func (Function) : App function
args (List) : Positional args to app function
kwargs (Dict) : Kwargs to app function
Return:
partial function evaluated with all dependencies in args, kwargs and kwargs['inputs'] evaluated. |
9,118 | def import_keypair(kwargs=None, call=None):
if call != :
log.error(
)
return False
if not kwargs:
kwargs = {}
if not in kwargs:
log.error()
return False
if not in kwargs:
log.error()
return False
params = {: ,
: kwargs[]}
public_key_file = kwargs[]
if os.path.exists(public_key_file):
with salt.utils.files.fopen(public_key_file, ) as fh_:
public_key = salt.utils.stringutils.to_unicode(fh_.read())
if public_key is not None:
params[] = base64.b64encode(public_key)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver=)
return data | Import an SSH public key.
.. versionadded:: 2015.8.3 |
9,119 | def find_sdl_attrs(prefix: str) -> Iterator[Tuple[str, Any]]:
from tcod._libtcod import lib
if prefix.startswith("SDL_"):
name_starts_at = 4
elif prefix.startswith("SDL"):
name_starts_at = 3
else:
name_starts_at = 0
for attr in dir(lib):
if attr.startswith(prefix):
yield attr[name_starts_at:], getattr(lib, attr) | Return names and values from `tcod.lib`.
`prefix` is used to filter out which names to copy. |
9,120 | def read_array(self, key, start=None, stop=None):
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, , False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, , None)
shape = getattr(attrs, , None)
if shape is not None:
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == :
ret = _set_tz(ret, getattr(attrs, , None), coerce=True)
elif dtype == :
ret = np.asarray(ret, dtype=)
if transposed:
return ret.T
else:
return ret | read an array for the specified node (off of group |
9,121 | def get_range(self):
classes = concrete_descendents(self.class_)
d=OrderedDict((name,class_) for name,class_ in classes.items())
if self.allow_None:
d[]=None
return d | Return the possible types for this parameter's value.
(I.e. return {name: <class>} for all classes that are
concrete_descendents() of self.class_.)
Only classes from modules that have been imported are added
(see concrete_descendents()). |
9,122 | def simple_moving_matrix(x, n=10):
if x.ndim > 1 and len(x[0]) > 1:
x = np.average(x, axis=1)
h = n / 2
o = 0 if h * 2 == n else 1
xx = []
for i in range(h, len(x) - h):
xx.append(x[i-h:i+h+o])
return np.array(xx) | Create simple moving matrix.
Parameters
----------
x : ndarray
A numpy array
n : integer
The number of sample points used to make average
Returns
-------
ndarray
A n x n numpy array which will be useful for calculating confidentail
interval of simple moving average |
9,123 | def get_templates(self):
use = getattr(self, , )
if isinstance(use, list):
return [n.strip() for n in use if n.strip()]
return [n.strip() for n in use.split() if n.strip()] | Get list of templates this object use
:return: list of templates
:rtype: list |
9,124 | def local_assortativity_wu_sign(W):
n = len(W)
np.fill_diagonal(W, 0)
r_pos = assortativity_wei(W * (W > 0))
r_neg = assortativity_wei(W * (W < 0))
str_pos, str_neg, _, _ = strengths_und_sign(W)
loc_assort_pos = np.zeros((n,))
loc_assort_neg = np.zeros((n,))
for curr_node in range(n):
jp = np.where(W[curr_node, :] > 0)
loc_assort_pos[curr_node] = np.sum(np.abs(str_pos[jp] -
str_pos[curr_node])) / str_pos[curr_node]
jn = np.where(W[curr_node, :] < 0)
loc_assort_neg[curr_node] = np.sum(np.abs(str_neg[jn] -
str_neg[curr_node])) / str_neg[curr_node]
loc_assort_pos = ((r_pos + 1) / n -
loc_assort_pos / np.sum(loc_assort_pos))
loc_assort_neg = ((r_neg + 1) / n -
loc_assort_neg / np.sum(loc_assort_neg))
return loc_assort_pos, loc_assort_neg | Local assortativity measures the extent to which nodes are connected to
nodes of similar strength. Adapted from Thedchanamoorthy et al. 2014
formula to allowed weighted/signed networks.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
Returns
-------
loc_assort_pos : Nx1 np.ndarray
local assortativity from positive weights
loc_assort_neg : Nx1 np.ndarray
local assortativity from negative weights |
9,125 | def add_message(self, text, type=None):
key = self._msg_key
self.setdefault(key, [])
self[key].append(message(type, text))
self.save() | Add a message with an optional type. |
9,126 | def decode_transformer(encoder_output,
encoder_decoder_attention_bias,
targets,
hparams,
name,
task=None,
causal=True):
orig_hparams = hparams
with tf.variable_scope(name):
if task is None:
task = hparams.task
if task == "translate":
targets = common_layers.flatten4d3d(targets)
decoder_input, decoder_self_bias = (
transformer.transformer_prepare_decoder(targets, hparams))
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
if not causal:
decoder_self_bias *= 0.
decoder_output = transformer.transformer_decoder(
decoder_input,
encoder_output,
decoder_self_bias,
encoder_decoder_attention_bias,
hparams)
decoder_output = tf.expand_dims(decoder_output, axis=2)
else:
assert task == "image"
inputs = None
targets = tf.reshape(targets, [tf.shape(targets)[0], hparams.img_len,
hparams.img_len,
hparams.num_channels*hparams.hidden_size])
decoder_input, _, _ = cia.prepare_decoder(targets, hparams)
bias = None
if not hparams.drop_inputs:
decoder_input += tf.reshape(
inputs,
[common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output=None,
num_layers=hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams=hparams,
self_attention_bias=bias,
attention_type=hparams.dec_attention_type,
name="decoder")
decoder_output_shape = common_layers.shape_list(decoder_output)
decoder_output = tf.reshape(decoder_output, [decoder_output_shape[0], -1, 1,
hparams.hidden_size])
hparams = orig_hparams
return decoder_output | Original Transformer decoder. |
9,127 | def cover(ctx, html=False):
header()
cmd =
if html:
cmd = .join((cmd, ))
with ctx.cd(ROOT):
ctx.run(cmd, pty=True) | Run tests suite with coverage |
9,128 | def global_maxpooling(attrs, inputs, proto_obj):
new_attrs = translation_utils._add_extra_attributes(attrs, {: True,
: (1, 1),
: })
return , new_attrs, inputs | Performs max pooling on the input. |
9,129 | def buffered_write(self, buf):
if self.closed:
raise ConnectionClosed()
if len(buf) + len(self.write_buffer) > self.max_size:
raise BufferOverflowError()
else:
self.write_buffer.extend(buf) | Appends a bytes like object to the transport write buffer.
Raises BufferOverflowError if buf would cause the buffer to grow beyond
the specified maximum.
buf -- bytes to send |
9,130 | def full_name(self, gender: Optional[Gender] = None,
reverse: bool = False) -> str:
if gender is None:
gender = get_random_item(Gender, rnd=self.random)
if gender and isinstance(gender, Gender):
gender = gender
else:
raise NonEnumerableError(Gender)
fmt = if reverse else
return fmt.format(
self.name(gender),
self.surname(gender),
) | Generate a random full name.
:param reverse: Return reversed full name.
:param gender: Gender's enum object.
:return: Full name.
:Example:
Johann Wolfgang. |
9,131 | def index(credentials=None):
user, oauth_access_token = parsecredentials(credentials)
if not settings.ADMINS or user not in settings.ADMINS:
return flask.make_response(,403)
usersprojects = {}
totalsize = {}
for f in glob.glob(settings.ROOT + "projects/*"):
if os.path.isdir(f):
u = os.path.basename(f)
usersprojects[u], totalsize[u] = getprojects(u)
usersprojects[u].sort()
return withheaders(flask.make_response(flask.render_template(,
version=VERSION,
system_id=settings.SYSTEM_ID,
system_name=settings.SYSTEM_NAME,
system_description=settings.SYSTEM_DESCRIPTION,
system_author=settings.SYSTEM_AUTHOR,
system_version=settings.SYSTEM_VERSION,
system_email=settings.SYSTEM_EMAIL,
user=user,
url=getrooturl(),
usersprojects = sorted(usersprojects.items()),
totalsize=totalsize,
allow_origin=settings.ALLOW_ORIGIN,
oauth_access_token=oauth_encrypt(oauth_access_token)
)), "text/html; charset=UTF-8", {:settings.ALLOW_ORIGIN}) | Get list of projects |
9,132 | def failover(self, sync=None, force=None):
req_body = self._cli.make_body(sync=sync, force=force)
resp = self.action(, **req_body)
resp.raise_if_err()
return resp | Fails over a replication session.
:param sync: True - sync the source and destination resources before
failing over the asynchronous replication session or keep them in
sync after failing over the synchronous replication session.
False - don't sync.
:param force: True - skip pre-checks on file system(s) replication
sessions of a NAS server when a replication failover is issued from
the source NAS server.
False - don't skip pre-checks. |
9,133 | def process_config(self):
if in self.config:
if isinstance(self.config[], basestring):
self.config[] = self.config[].split()
if in self.config:
self.config[] = str_to_bool(self.config[])
if in self.config:
self.config[] = str_to_bool(
self.config[])
if ((self.config.get(, None) and
self.config.get(, None))):
raise DiamondException(
+
% self.configfile)
if self.config.get(, None):
self.config[] = re.compile(
self.config[])
elif self.config.get(, None):
self.config[] = re.compile(
self.config[]) | Intended to put any code that should be run after any config reload
event |
9,134 | def main(
gpus:Param("The GPUs to use for distributed training", str)=,
script:Param("Script to run", str, opt=False)=,
args:Param("Args to pass to script", nargs=, opt=False)=
):
"PyTorch distributed training launch helper that spawns multiple distributed processes"
current_env = os.environ.copy()
gpus = list(range(torch.cuda.device_count())) if gpus== else list(gpus)
current_env["WORLD_SIZE"] = str(len(gpus))
current_env["MASTER_ADDR"] =
current_env["MASTER_PORT"] =
processes = []
for i,gpu in enumerate(gpus):
current_env["RANK"] = str(i)
cmd = [sys.executable, "-u", script, f"--gpu={gpu}"] + args
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes: process.wait() | PyTorch distributed training launch helper that spawns multiple distributed processes |
9,135 | def _pluck_feature_pacts(pr_body: str) -> FrozenSet[str]:
feature_pact_lines = [line for line in pr_body.split()
if line.startswith()]
if not feature_pact_lines:
return frozenset()
if len(feature_pact_lines) > 1:
raise GithubPrError(f)
return frozenset(feature_pact_lines[0].split()[1:]) | # Returns set with one feature pact if specified
>>> body = 'gitlab: mygitlaburl.com\\r\\nfeature-pacts: zh-feature-a'
>>> _pluck_feature_pacts(body) == frozenset({'zh-feature-a'})
True
# Returns set with multiple feature pacts if specified
>>> body = 'gitlab: mygitlaburl.com\\r\\nfeature-pacts: zh-feature-a eh-feature-b\\r\\nurgent!'
>>> _pluck_feature_pacts(body) == frozenset({'zh-feature-a', 'eh-feature-b'})
True
# Returns empty set if no feature-pacts line
>>> body = 'gitlab: mygitlaburl.com\\r\\n\\r\\nJust adding some documentation'
>>> _pluck_feature_pacts(body)
frozenset()
# Returns empty set if feature-pacts line with no tags
>>> body = 'gitlab: mygitlaburl.com\\r\\n\\r\\nfeature-pacts:\\r\\nJust adding some docs'
>>> _pluck_feature_pacts(body)
frozenset()
# Raises a GithubPrError if multiple feature-pact lines found
>>> body = 'feature-pacts: zh-feature-a\\r\\nfeature-pacts: eh-feature-b'
>>> _pluck_feature_pacts(body)
Traceback (most recent call last):
...
faaspact_verifier.delivery.github_prs.GithubPrError: ... |
9,136 | def __process_warc_gz_file(self, path_name):
counter_article_total = 0
counter_article_passed = 0
counter_article_discarded = 0
start_time = time.time()
with open(path_name, ) as stream:
for record in ArchiveIterator(stream):
if record.rec_type == :
counter_article_total += 1
filter_pass, article = self.__filter_record(record)
if filter_pass:
counter_article_passed += 1
if not article:
article = NewsPlease.from_warc(record)
self.__logger.info(, article.source_domain, article.date_publish,
article.title)
self.__callback_on_article_extracted(article)
else:
counter_article_discarded += 1
if article:
self.__logger.info(, article.source_domain,
article.date_publish,
article.title)
else:
self.__logger.info(,
record.rec_headers.get_header())
if counter_article_total % 10 == 0:
elapsed_secs = time.time() - start_time
secs_per_article = elapsed_secs / counter_article_total
self.__logger.info()
self.__logger.info(, counter_article_passed,
counter_article_discarded, counter_article_total)
self.__logger.info(,
human(start_time), secs_per_article)
if self.__delete_warc_after_extraction:
os.remove(path_name)
self.__register_fully_extracted_warc_file(self.__warc_download_url) | Iterates all transactions in one WARC file and for each transaction tries to extract an article object.
Afterwards, each article is checked against the filter criteria and if all are passed, the function
on_valid_article_extracted is invoked with the article object.
:param path_name:
:return: |
9,137 | def _getData(self, data):
if not isinstance(data, dict):
raise ValidationError(
% (str(type(data)),))
return data | Check that data is acceptable and return it.
Default behavior is that the data has to be of type `dict`. In derived
classes this method could for example allow `None` or empty strings and
just return empty dictionary.
:raises: ``ValidationError`` if data is missing or wrong type
:return: the data to be validated |
9,138 | def getAnalogChannelData(self,ChNumber):
if not self.DatFileContent:
print "No data file content. Use the method ReadDataFile first"
return 0
if (ChNumber > self.A):
print "Channel number greater than the total number of channels."
return 0
str_struct = "ii%dh" %(self.A + int(numpy.ceil((float(self.D)/float(16)))))
NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2
N = self.getNumberOfSamples()
values = numpy.empty((N,1))
ch_index = self.An.index(ChNumber)
for i in range(N):
data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB])
values[i] = data[ChNumber+1]
values = values * self.a[ch_index]
values = values + self.b[ch_index]
return values | Returns an array of numbers containing the data values of the channel
number "ChNumber".
ChNumber is the number of the channal as in .cfg file. |
9,139 | def fixLabel(label, maxlen, delim=None, repl=, truncend=True):
if len(label) <= maxlen:
return label
else:
maxlen -= len(repl)
if delim is not None:
if truncend:
end = label.rfind(delim, 0, maxlen)
if end > 0:
return label[:end+1] + repl
else:
start = label.find(delim, len(label) - maxlen)
if start > 0:
return repl + label[start:]
if truncend:
return label[:maxlen] + repl
else:
return repl + label[-maxlen:] | Truncate long graph and field labels.
@param label: Label text.
@param maxlen: Maximum field label length in characters.
No maximum field label length is enforced by default.
@param delim: Delimiter for field labels field labels longer than
maxlen will preferably be truncated at delimiter.
@param repl: Replacement string for truncated part.
@param truncend: Truncate the end of label name if True. (Default)
The beginning part of label will be truncated if False. |
9,140 | def get_special_scen_code(regions, emissions):
if sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0)) == sorted(set(emissions)):
scenfile_emissions_code = 0
elif sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1)) == sorted(set(emissions)):
scenfile_emissions_code = 1
else:
msg = "Could not determine scen special code for emissions {}".format(emissions)
raise ValueError(msg)
if set(regions) == set(["WORLD"]):
scenfile_region_code = 1
elif set(regions) == set(["WORLD", "OECD90", "REF", "ASIA", "ALM"]):
scenfile_region_code = 2
elif set(regions) == set(["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM"]):
scenfile_region_code = 3
elif set(regions) == set(
["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM", "BUNKERS"]
):
scenfile_region_code = 4
try:
return scenfile_region_code * 10 + scenfile_emissions_code
except NameError:
msg = "Could not determine scen special code for regions {}".format(regions)
raise ValueError(msg) | Get special code for MAGICC6 SCEN files.
At the top of every MAGICC6 and MAGICC5 SCEN file there is a two digit
number. The first digit, the 'scenfile_region_code' tells MAGICC how many regions
data is being provided for. The second digit, the 'scenfile_emissions_code', tells
MAGICC which gases are in the SCEN file.
The variables which are part of ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1`` are the
emissions species which are expected when scenfile_emissions_code is 1. Similarly,
``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0`` defines the emissions species which are
expected when scenfile_emissions_code is 0.
Having these definitions allows Pymagicc to check that the right
set of emissions has been provided before writing SCEN files.
Parameters
----------
region : list_like
Regions to get code for.
emissions : list-like
Emissions to get code for.
Raises
------
ValueError
If the special scen code cannot be determined.
Returns
-------
int
The special scen code for the regions-emissions combination provided. |
9,141 | def update_record(self, name, recordid, content, username, password):
req = requests.put(self.api_server + + name + +
str(recordid), data=json.dumps(content),
auth=(username, password))
return req | Update record |
9,142 | def make_cell(table, span, widths, heights, use_headers):
width = get_span_char_width(span, widths)
height = get_span_char_height(span, heights)
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
lines = text.split("\n")
for i in range(len(lines)):
width_difference = width - len(lines[i])
lines[i] = .join([lines[i], " " * width_difference])
height_difference = height - len(lines)
empty_lines = []
for i in range(0, height_difference):
empty_lines.append(" " * width)
lines.extend(empty_lines)
output = [
.join(["+", (width * "-") + "+"])
]
for i in range(0, height):
output.append("|" + lines[i] + "|")
if use_headers and span[0][0] == 0:
symbol = "="
else:
symbol = "-"
output.append(
.join(["+", width * symbol, "+"])
)
text = "\n".join(output)
row_count = get_span_row_count(span)
column_count = get_span_column_count(span)
cell = Cell(text, text_row, text_column, row_count, column_count)
return cell | Convert the contents of a span of the table to a grid table cell
Parameters
----------
table : list of lists of str
The table of rows containg strings to convert to a grid table
span : list of lists of int
list of [row, column] pairs that make up a span in the table
widths : list of int
list of the column widths of the table
heights : list of int
list of the heights of each row in the table
use_headers : bool
Whether or not to use headers in the table
Returns
-------
cell : dashtable.data2rst.Cell |
9,143 | def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None,
ndigits=5, unit=, stroke_width=1, fill_opacity=.9,
geojson_properties=None, strdump=False, serialize=True):
polygon_features = []
contourf_idx = 0
for collection in contourf.collections:
color = collection.get_facecolor()
for path in collection.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg)
coord = np.around(coord, ndigits) if ndigits else coord
polygon = Polygon(coordinates=[coord.tolist()])
fcolor = rgb2hex(color[0])
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties)
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature)
contourf_idx += 1
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize) | Transform matplotlib.contourf to geojson with overlapping filled contours. |
9,144 | def printData(self, output = sys.stdout):
self.printDatum("Name : ", self.fileName, output)
self.printDatum("Author : ", self.author, output)
self.printDatum("Repository : ", self.repository, output)
self.printDatum("Category : ", self.category, output)
self.printDatum("Downloads : ", self.downloads, output)
self.printDatum("Date Uploaded : ", self.fileDate, output)
self.printDatum("File Size : ", self.fileSize, output)
self.printDatum("Documentation : ", self.documentation, output)
self.printDatum("Source Code : ", self.sourceCode, output)
self.printDatum("Description : ", self.description, output)
print >> output, "\n\n" | Output all the file data to be written to any writable output |
9,145 | async def update_object(obj, only=None):
warnings.warn("update_object() is deprecated, Manager.update() "
"should be used instead",
DeprecationWarning)
field_dict = dict(obj.__data__)
pk_field = obj._meta.primary_key
if only:
field_dict = obj._prune_fields(field_dict, only)
if not isinstance(pk_field, peewee.CompositeKey):
field_dict.pop(pk_field.name, None)
else:
field_dict = obj._prune_fields(field_dict, obj.dirty_fields)
rows = await update(obj.update(**field_dict).where(obj._pk_expr()))
obj._dirty.clear()
return rows | Update object asynchronously.
:param obj: object to update
:param only: list or tuple of fields to updata, is `None` then all fields
updated
This function does the same as `Model.save()`_ for already saved object,
but it doesn't invoke ``save()`` method on model class. That is
important to know if you overrided save method for your model.
.. _Model.save(): http://peewee.readthedocs.io/en/latest/peewee/
api.html#Model.save |
9,146 | def detokenize(self, inputs, delim=):
detok = delim.join([self.idx2tok[idx] for idx in inputs])
detok = detok.replace(self.separator + , )
detok = detok.replace(self.separator, )
detok = detok.replace(config.BOS_TOKEN, )
detok = detok.replace(config.EOS_TOKEN, )
detok = detok.replace(config.PAD_TOKEN, )
detok = detok.strip()
return detok | Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence |
9,147 | def perform_request(self, request):
connection = self.get_connection(request)
try:
connection.putrequest(request.method, request.path)
self.send_request_headers(connection, request.headers)
self.send_request_body(connection, request.body)
if DEBUG_REQUESTS and request.body:
print()
try:
print(request.body)
except:
pass
resp = connection.getresponse()
status = int(resp.status)
message = resp.reason
respheaders = resp.getheaders()
for i, value in enumerate(respheaders):
respheaders[i] = (value[0].lower(), value[1])
respbody = None
if resp.length is None:
respbody = resp.read()
elif resp.length > 0:
respbody = resp.read(resp.length)
if DEBUG_RESPONSES and respbody:
print()
try:
print(respbody)
except:
pass
response = HTTPResponse(
status, resp.reason, respheaders, respbody)
if status == 307:
new_url = urlparse(dict(respheaders)[])
request.host = new_url.hostname
request.path = new_url.path
request.path, request.query = self._update_request_uri_query(request)
return self.perform_request(request)
if status >= 300:
raise HTTPError(status, message, respheaders, respbody)
return response
finally:
connection.close() | Sends request to cloud service server and return the response. |
9,148 | def DateStringToDateObject(date_string):
if re.match(, date_string) == None:
return None
try:
return datetime.date(int(date_string[0:4]), int(date_string[4:6]),
int(date_string[6:8]))
except ValueError:
return None | Return a date object for a string "YYYYMMDD". |
9,149 | def _load(self, filename=None):
if not filename:
filename = self.filename
wb_ = open_workbook(filename)
self.rsr = {}
sheet_names = []
for sheet in wb_.sheets():
if sheet.name in [, ]:
continue
ch_name = AHI_BAND_NAMES.get(
sheet.name.strip(), sheet.name.strip())
sheet_names.append(sheet.name.strip())
self.rsr[ch_name] = {: None,
: None}
wvl = np.array(
sheet.col_values(0, start_rowx=5, end_rowx=5453))
resp = np.array(
sheet.col_values(2, start_rowx=5, end_rowx=5453))
self.rsr[ch_name][] = wvl
self.rsr[ch_name][] = resp | Load the Himawari AHI RSR data for the band requested |
9,150 | def get(remote_file, local_file):
board_files = files.Files(_board)
contents = board_files.get(remote_file)
if local_file is None:
print(contents.decode("utf-8"))
else:
local_file.write(contents) | Retrieve a file from the board.
Get will download a file from the board and print its contents or save it
locally. You must pass at least one argument which is the path to the file
to download from the board. If you don't specify a second argument then
the file contents will be printed to standard output. However if you pass
a file name as the second argument then the contents of the downloaded file
will be saved to that file (overwriting anything inside it!).
For example to retrieve the boot.py and print it out run:
ampy --port /board/serial/port get boot.py
Or to get main.py and save it as main.py locally run:
ampy --port /board/serial/port get main.py main.py |
9,151 | def get_relative_abundance(biomfile):
biomf = biom.load_table(biomfile)
norm_biomf = biomf.norm(inplace=False)
rel_abd = {}
for sid in norm_biomf.ids():
rel_abd[sid] = {}
for otuid in norm_biomf.ids("observation"):
otuname = oc.otu_name(norm_biomf.metadata(otuid, axis="observation")["taxonomy"])
otuname = " ".join(otuname.split("_"))
abd = norm_biomf.get_value_by_ids(otuid, sid)
rel_abd[sid][otuname] = abd
ast_rel_abd = bc.arcsine_sqrt_transform(rel_abd)
return ast_rel_abd | Return arcsine transformed relative abundance from a BIOM format file.
:type biomfile: BIOM format file
:param biomfile: BIOM format file used to obtain relative abundances for each OTU in
a SampleID, which are used as node sizes in network plots.
:type return: Dictionary of dictionaries.
:return: Dictionary keyed on SampleID whose value is a dictionarykeyed on OTU Name
whose value is the arc sine tranfsormed relative abundance value for that
SampleID-OTU Name pair. |
9,152 | def pool(arr, block_size, func, cval=0, preserve_dtype=True):
from . import dtypes as iadt
iadt.gate_dtypes(arr,
allowed=["bool", "uint8", "uint16", "uint32", "int8", "int16", "int32",
"float16", "float32", "float64", "float128"],
disallowed=["uint64", "uint128", "uint256", "int64", "int128", "int256",
"float256"],
augmenter=None)
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced | Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling. |
9,153 | def decode(self, bytes, raw=False):
code = super(EVRType, self).decode(bytes)
result = None
if raw:
result = code
elif code in self.evrs.codes:
result = self.evrs.codes[code]
else:
result = code
log.warn( % code)
return result | decode(bytearray, raw=False) -> value
Decodes the given bytearray according the corresponding
EVR Definition (:class:`EVRDefn`) for the underlying
'MSB_U16' EVR code.
If the optional parameter ``raw`` is ``True``, the EVR code
itself will be returned instead of the EVR Definition
(:class:`EVRDefn`). |
9,154 | def simple_newton(f, x0, lb=None, ub=None, infos=False, verbose=False, maxit=50, tol=1e-8, eps=1e-8, numdiff=True):
precision = x0.dtype
from numpy.linalg import solve
err = 1
it = 0
while err > tol and it <= maxit:
if not numdiff:
[res,dres] = f(x0)
else:
res = f(x0)
dres = numpy.zeros( (res.shape[0], x0.shape[0]), dtype=precision )
for i in range(x0.shape[0]):
xi = x0.copy()
xi[i] += eps
resi = f(xi)
dres[:,i] = (resi - res)/eps
dx = - solve(dres,res)
x = x0 + dx
print(.format(x0))
err = abs(res).max()
print(.format(it, err))
x0 = x
it += 1
if not infos:
return x
else:
return [x, it] | Solves many independent systems f(x)=0 simultaneously using a simple gradient descent.
:param f: objective function to be solved with values p x N . The second output argument represents the derivative with
values in (p x p x N)
:param x0: initial value ( p x N )
:return: solution x such that f(x) = 0 |
9,155 | def construct_user_list(raw_users=None):
users = Users(oktypes=User)
for user_dict in raw_users:
public_keys = None
if user_dict.get():
public_keys = [PublicKey(b64encoded=x, raw=None)
for x in user_dict.get()]
users.append(User(name=user_dict.get(),
passwd=user_dict.get(),
uid=user_dict.get(),
gid=user_dict.get(),
home_dir=user_dict.get(),
gecos=user_dict.get(),
shell=user_dict.get(),
public_keys=public_keys,
sudoers_entry=user_dict.get()))
return users | Construct a list of User objects from a list of dicts. |
9,156 | def bond_canonical_statistics(
microcanonical_statistics,
convolution_factors,
**kwargs
):
spanning_cluster = (
in microcanonical_statistics.dtype.names
)
ret = np.empty(1, dtype=canonical_statistics_dtype(spanning_cluster))
if spanning_cluster:
ret[] = np.sum(
convolution_factors *
microcanonical_statistics[]
)
ret[] = np.sum(
convolution_factors *
microcanonical_statistics[]
)
ret[] = np.sum(
convolution_factors[:, np.newaxis] *
microcanonical_statistics[],
axis=0,
)
return ret | canonical cluster statistics for a single run and a single probability
Parameters
----------
microcanonical_statistics : ndarray
Return value of `bond_microcanonical_statistics`
convolution_factors : 1-D array_like
The coefficients of the convolution for the given probabilty ``p``
and for each occupation number ``n``.
Returns
-------
ret : ndarray of size ``1``
Structured array with dtype as returned by
`canonical_statistics_dtype`
ret['percolation_probability'] : ndarray of float
The "percolation probability" of this run at the value of ``p``.
Only exists if `microcanonical_statistics` argument has the
``has_spanning_cluster`` field.
ret['max_cluster_size'] : ndarray of int
Weighted size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of float
Array of size ``5``.
The ``k``-th entry is the weighted ``k``-th raw moment of the
(absolute) cluster size distribution, with ``k`` ranging from ``0`` to
``4``.
See Also
--------
bond_microcanonical_statistics
canonical_statistics_dtype |
9,157 | def _add_to_index(index, obj):
id_set = index.value_map.setdefault(indexed_value(index, obj), set())
if index.unique:
if len(id_set) > 0:
raise UniqueConstraintError()
id_set.add(obj.id) | Adds the given object ``obj`` to the given ``index`` |
9,158 | def highlight_text(text, lexer_name=, **kwargs):
r
lexer_name = {
: ,
: ,
: ,
: ,
}.get(lexer_name.replace(, ), lexer_name)
if lexer_name in [, , , ]:
return color_text(text, lexer_name)
import utool as ut
if ENABLE_COLORS:
try:
import pygments
import pygments.lexers
import pygments.formatters
import pygments.formatters.terminal
formater = pygments.formatters.terminal.TerminalFormatter(bg=)
lexer = pygments.lexers.get_lexer_by_name(lexer_name, **kwargs)
return pygments.highlight(text, lexer, formater)
except Exception:
if ut.SUPER_STRICT:
raise
return text
return text | r"""
SeeAlso:
color_text |
9,159 | def datasets_view(self, owner_slug, dataset_slug, **kwargs):
kwargs[] = True
if kwargs.get():
return self.datasets_view_with_http_info(owner_slug, dataset_slug, **kwargs)
else:
(data) = self.datasets_view_with_http_info(owner_slug, dataset_slug, **kwargs)
return data | Show details about a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datasets_view(owner_slug, dataset_slug, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_slug: Dataset owner (required)
:param str dataset_slug: Dataset name (required)
:return: Result
If the method is called asynchronously,
returns the request thread. |
9,160 | def p_expr_LE_expr(p):
p[0] = make_binary(p.lineno(2), , p[1], p[3], lambda x, y: x <= y) | expr : expr LE expr |
9,161 | def __convertRlocToRouterId(self, xRloc16):
routerList = []
routerList = self.__sendCommand(WPANCTL_CMD + )
print routerList
print xRloc16
for line in routerList:
if re.match(, line):
continue
if re.match(WPAN_CARRIER_PROMPT, line, re.M|re.I):
break
router = []
router = self.__stripValue(line).split()
for item in router:
if in item:
routerid = item.split()[1]
elif in line:
rloc16 = line.split()[1]
else:
pass
if isinstance(xRloc16, str):
rloc16 = + rloc16
if rloc16 == xRloc16:
return routerid
elif isinstance(xRloc16, int):
if int(rloc16, 16) == xRloc16:
return routerid
else:
pass
return None | mapping Rloc16 to router id
Args:
xRloc16: hex rloc16 short address
Returns:
actual router id allocated by leader |
9,162 | def on_menu_exit(self, event):
if self.close_warning:
TEXT = "Data is not saved to a file yet!\nTo properly save your data:\n1) Analysis --> Save current interpretations to a redo file.\nor\n1) File --> Save MagIC tables.\n\n Press OK to exit without saving."
dlg1 = wx.MessageDialog(
None, caption="Warning:", message=TEXT, style=wx.OK | wx.CANCEL | wx.ICON_EXCLAMATION)
if self.show_dlg(dlg1) == wx.ID_OK:
dlg1.Destroy()
self.GUI_log.close()
self.Destroy()
if self.evt_quit:
event = self.evt_quit(self.GetId())
self.GetEventHandler().ProcessEvent(event)
if self.standalone:
sys.exit()
else:
self.GUI_log.close()
self.Destroy()
if self.evt_quit:
event = self.evt_quit(self.GetId())
self.GetEventHandler().ProcessEvent(event)
if self.standalone:
sys.exit() | Runs whenever Thellier GUI exits |
9,163 | def count_function(func):
@use_defaults
@wraps(func)
def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs):
per_patient_data = func(row=row,
cohort=cohort,
filter_fn=filter_fn,
normalized_per_mb=normalized_per_mb,
**kwargs)
patient_id = row["patient_id"]
if patient_id in per_patient_data:
count = len(per_patient_data[patient_id])
if normalized_per_mb:
count /= float(get_patient_to_mb(cohort)[patient_id])
return count
return np.nan
return wrapper | Decorator for functions that return a collection (technically a dict of collections) that should be
counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if
not specified. |
9,164 | def index_based_complete(self, text: str, line: str, begidx: int, endidx: int,
index_dict: Mapping[int, Union[Iterable, Callable]],
all_else: Union[None, Iterable, Callable] = None) -> List[str]:
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if not tokens:
return []
matches = []
index = len(tokens) - 1
if index in index_dict:
match_against = index_dict[index]
else:
match_against = all_else
if isinstance(match_against, Collection):
matches = self.basic_complete(text, line, begidx, endidx, match_against)
elif callable(match_against):
matches = match_against(text, line, begidx, endidx)
return matches | Tab completes based on a fixed position in the input string
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param index_dict: dictionary whose structure is the following:
keys - 0-based token indexes into command line that determine which tokens
perform tab completion
values - there are two types of values
1. iterable list of strings to match against (dictionaries, lists, etc.)
2. function that performs tab completion (ex: path_complete)
:param all_else: an optional parameter for tab completing any token that isn't at an index in index_dict
:return: a list of possible tab completions |
9,165 | def from_record(cls, record, crs):
if not in record:
raise TypeError("The data isn't a valid record.")
return cls(to_shape(record), crs) | Load vector from record. |
9,166 | def prepend(cls, d, s, filter=Filter()):
i = 0
for x in s:
if x in filter:
d.insert(i, x)
i += 1 | Prepend schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be prepended.
@type filter: L{Filter} |
9,167 | def send_document(chat_id, document,
reply_to_message_id=None, reply_markup=None,
**kwargs):
files = None
if isinstance(document, InputFile):
files = [document]
document = None
elif not isinstance(document, str):
raise Exception()
params = dict(
chat_id=chat_id,
document=document
)
params.update(
_clean_params(
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup
)
)
return TelegramBotRPCRequest(, params=params, files=files, on_result=Message.from_result, **kwargs) | Use this method to send general files.
:param chat_id: Unique identifier for the message recipient — User or GroupChat id
:param document: File to send. You can either pass a file_id as String to resend a file that is already on
the Telegram servers, or upload a new file using multipart/form-data.
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard,
instructions to hide keyboard or to force a reply from the user.
:param \*\*kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int
:type document: InputFile or str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest |
9,168 | def get_login_redirect(self, provider, account):
info = self.model._meta.app_label, self.model._meta.model_name
from .admin import PRESERVED_FILTERS_SESSION_KEY
preserved_filters = self.request.session.get(PRESERVED_FILTERS_SESSION_KEY, None)
redirect_url = reverse( % info)
if preserved_filters:
redirect_url = add_preserved_filters(
{: preserved_filters, : self.model._meta}, redirect_url)
return redirect_url | Return url to redirect authenticated users. |
9,169 | def delete(self, name, **kwargs):
self.gitlab.http_delete(self.path, query_data={: name}, **kwargs) | Delete a Label on the server.
Args:
name: The name of the label
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request |
9,170 | def _normalize_path(self, path):
norm_path = os.path.normpath(path)
return os.path.relpath(norm_path, start=self._get_working_dir()) | Normalizes a file path so that it returns a path relative to the root repo directory. |
9,171 | def plot(self, series, series_diff=None, label=, color=None, style=None):
color = self.get_color(color)
if series_diff is None and self.autodiffs:
series_diff = series.diff()
if self.stacked:
series += self.running_sum
self.ax1.fill_between(series.index, self.running_sum, series,
facecolor=ALPHAS[color])
self.running_sum = series
self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05))
series.plot(label=label, c=COLORS[color], linewidth=2, style=style,
ax=self.ax1)
if series_diff is not None:
series_diff.plot(label=label, c=COLORS[color], linewidth=2,
style=style, ax=self.ax2) | :param pandas.Series series:
The series to be plotted, all values must be positive if stacked
is True.
:param pandas.Series series_diff:
The series representing the diff that will be plotted in the
bottom part.
:param string label:
The label for the series.
:param integer/string color:
Color for the plot. Can be an index for the color from COLORS
or a key(string) from CNAMES.
:param string style:
Style forwarded to the plt.plot. |
9,172 | def verify(self, tool):
if os.path.isfile(tool[]):
print( + tool[])
return True
else:
print( + tool[])
return False | check that the tool exists |
9,173 | def template(self):
s = Template(self._IPSET_TEMPLATE)
return s.substitute(sets=.join(self.sets),
date=datetime.today()) | Create a rules file in ipset --restore format |
9,174 | def add_training_sample(self, text=u, lang=):
self.trainer.add(text=text, lang=lang) | Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text. |
9,175 | async def send_rpc(self, conn_id, address, rpc_id, payload, timeout):
adapter_id = self._get_property(conn_id, )
return await self.adapters[adapter_id].send_rpc(conn_id, address, rpc_id, payload, timeout) | Send an RPC to a device.
See :meth:`AbstractDeviceAdapter.send_rpc`. |
9,176 | def users_get_avatar(self, user_id=None, username=None, **kwargs):
if user_id:
return self.__call_api_get(, userId=user_id, kwargs=kwargs)
elif username:
return self.__call_api_get(, username=username, kwargs=kwargs)
else:
raise RocketMissingParamException() | Gets the URL for a user’s avatar. |
9,177 | def _at_if(self, calculator, rule, scope, block):
if block.directive != :
if not in rule.options:
raise SyntaxError("@else with no @if (%s)" % (rule.file_and_line,))
if rule.options[]:
return
condition = calculator.calculate(block.argument)
if condition:
inner_rule = rule.copy()
inner_rule.unparsed_contents = block.unparsed_contents
if not self.should_scope_loop_in_rule(inner_rule):
inner_rule.namespace = rule.namespace
self.manage_children(inner_rule, scope)
rule.options[] = condition | Implements @if and @else if |
9,178 | def run_in_transaction(self, func, *args, **kw):
if getattr(self._local, "transaction_running", False):
raise RuntimeError("Spanner does not support nested transactions.")
self._local.transaction_running = True
try:
with SessionCheckout(self._pool) as session:
return session.run_in_transaction(func, *args, **kw)
finally:
self._local.transaction_running = False | Perform a unit of work in a transaction, retrying on abort.
:type func: callable
:param func: takes a required positional argument, the transaction,
and additional positional / keyword arguments as supplied
by the caller.
:type args: tuple
:param args: additional positional arguments to be passed to ``func``.
:type kw: dict
:param kw: optional keyword arguments to be passed to ``func``.
If passed, "timeout_secs" will be removed and used to
override the default timeout.
:rtype: :class:`datetime.datetime`
:returns: timestamp of committed transaction |
9,179 | def get_album_songs(self, album_id):
url = .format(album_id)
result = self.get_request(url)
songs = result[][]
songs = [Song(song[], song[]) for song in songs]
return songs | Get a album's all songs.
warning: use old api.
:params album_id: album id.
:return: a list of Song object. |
9,180 | def resolve_symbol(self, symbol, bCaseSensitive = False):
if bCaseSensitive:
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if symbol == SymbolName:
return SymbolAddress
else:
symbol = symbol.lower()
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if symbol == SymbolName.lower():
return SymbolAddress | Resolves a debugging symbol's address.
@type symbol: str
@param symbol: Name of the symbol to resolve.
@type bCaseSensitive: bool
@param bCaseSensitive: C{True} for case sensitive matches,
C{False} for case insensitive.
@rtype: int or None
@return: Memory address of symbol. C{None} if not found. |
9,181 | def _fix_unsafe(shell_input):
_unsafe = re.compile(r, 256)
try:
if len(_unsafe.findall(shell_input)) == 0:
return shell_input.strip()
else:
clean = "", "\""
return clean
except TypeError:
return None | Find characters used to escape from a string into a shell, and wrap them in
quotes if they exist. Regex pilfered from Python3 :mod:`shlex` module.
:param str shell_input: The input intended for the GnuPG process. |
9,182 | def padded_to_same_length(seq1, seq2, item=0):
len1, len2 = len(seq1), len(seq2)
if len1 == len2:
return (seq1, seq2)
elif len1 < len2:
return (cons.ed(seq1, yield_n(len2-len1, item)), seq2)
else:
return (seq1, cons.ed(seq2, yield_n(len1-len2, item))) | Return a pair of sequences of the same length by padding the shorter
sequence with ``item``.
The padded sequence is a tuple. The unpadded sequence is returned as-is. |
9,183 | def validate(self):
results = []
from ..specs import Join
def recursive_find_loop(task, history):
current = history[:]
current.append(task)
if isinstance(task, Join):
if task in history:
msg = "Found loop with : %s then again" % (
task.name, .join([p.name for p in history]),
task.name)
raise Exception(msg)
for predecessor in task.inputs:
recursive_find_loop(predecessor, current)
for parent in task.inputs:
recursive_find_loop(parent, current)
for task_id, task in list(self.task_specs.items()):
try:
recursive_find_loop(task, [])
except Exception as exc:
results.append(exc.__str__())
if not task.inputs and task.name not in [, ]:
if task.outputs:
results.append("Task is disconnected (no inputs)" %
task.name)
else:
LOG.debug("Task is not being used" % task.name)
return results | Checks integrity of workflow and reports any problems with it.
Detects:
- loops (tasks that wait on each other in a loop)
:returns: empty list if valid, a list of errors if not |
9,184 | def install():
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install(, fatal=True) | Basic Ceph client installation. |
9,185 | def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
if len(self._image_columns) > 1 and not column_name:
raise ValueError( +
)
elif column_name and column_name not in self._image_columns:
raise ValueError( %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, ) as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype()
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode()
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator=)
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images | Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label. |
9,186 | def seek_to_end(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError()
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions,
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(),
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST) | Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned. |
9,187 | def write_header(self, out_strm, delim, f1_num_fields, f2_num_fields,
f1_header=None, f2_header=None, missing_val=None):
mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None:
raise InvalidHeaderError("Cannot generate output header when one " +
"input file is missing a header and no " +
"missing value was provided to replace " +
"unknown entries.")
if f1_header is not None and f2_header is not None:
out_strm.write(delim.join(f1_header) + delim +
delim.join(f2_header) + "\n")
elif f1_header is None and f2_header is not None:
dummy_h = f1_num_fields * [missing_val]
out_strm.write(delim.join(dummy_h) + delim +
delim.join(f2_header) + "\n")
elif f1_header is not None and f2_header is None:
dummy_h = f2_num_fields * [missing_val]
out_strm.write(delim.join(f1_header) + delim +
delim.join(dummy_h) + "\n") | Write the header for a joined file. If headers are provided for one or more
of the input files, then a header is generated for the output file.
Otherwise, this does not output anything.
:param out_strm: write to this stream
:param delim:
:param f1_num_fields: the number of columns in the first file
:param f2_num_fields: the number of columns in the second file
:param f1_header:
:param f2_header:
:param missing_val: |
9,188 | def updateFromKwargs(self, kwargs, properties, collector, **kw):
yield self.collectChildProperties(kwargs=kwargs, properties=properties,
collector=collector, **kw)
if self.name:
d = properties.setdefault(self.name, {})
else:
d = properties
d.update(kwargs[self.fullName]) | By default, the child values will be collapsed into a dictionary. If
the parent is anonymous, this dictionary is the top-level properties. |
9,189 | def md2rst(md_lines):
lvl2header_char = {1: , 2: , 3: }
for md_line in md_lines:
if md_line.startswith():
header_indent, header_text = md_line.split(, 1)
yield header_text
header_char = lvl2header_char[len(header_indent)]
yield header_char * len(header_text)
else:
yield md_line | Only converts headers |
9,190 | def overlap(self, x, ctrs, kdtree=None):
q = len(self.within(x, ctrs, kdtree=kdtree))
return q | Check how many balls `x` falls within. Uses a K-D Tree to
perform the search if provided. |
9,191 | def GET_subdomain_ops(self, path_info, txid):
blockstackd_url = get_blockstackd_url()
subdomain_ops = None
try:
subdomain_ops = blockstackd_client.get_subdomain_ops_at_txid(txid, hostport=blockstackd_url)
except ValueError:
return self._reply_json({: }, status_code=400)
if json_is_error(subdomain_ops):
log.error(.format(txid, subdomain_ops[]))
return self._reply_json({: }, status_code=subdomain_ops.get(, 500))
return self._reply_json(subdomain_ops) | Get all subdomain operations processed in a given transaction.
Returns the list of subdomains on success (can be empty)
Returns 502 on failure to get subdomains |
9,192 | def create_srv_record(self, name, values, ttl=60):
self._halt_if_already_deleted()
return self._add_record(SRVResourceRecordSet, **values) | Creates a SRV record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created SRVResourceRecordSet instance. |
9,193 | def pickleFile(self, name, minPartitions=None):
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self) | Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] |
9,194 | def sort_key_for_numeric_suffixes(path, sep=, suffix_index=-2):
chunks = path.split(sep)
if chunks[suffix_index].isdigit():
return sep.join(chunks[:suffix_index] + chunks[suffix_index+1:]), int(chunks[suffix_index])
return path, 0 | Sort files taking into account potentially absent suffixes like
somefile.dcd
somefile.1000.dcd
somefile.2000.dcd
To be used with sorted(..., key=callable). |
9,195 | def get_sum(path, form=):
*
path = os.path.expanduser(path)
if not os.path.isfile(path):
return
return salt.utils.hashutils.get_hash(path, form, 4096) | Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512 |
9,196 | def _check_valid(key, val, valid):
if val not in valid:
raise ValueError(
% (key, valid, val)) | Helper to check valid options |
9,197 | def read_group_info(self):
self.groups = []
for i in range(self.tabs.count()):
one_group = self.tabs.widget(i).get_info()
self.groups.append(one_group) | Get information about groups directly from the widget. |
9,198 | async def validate(self, request: web.Request):
parameters = {}
files = {}
errors = self.errors_factory()
body = None
if request.method in request.POST_METHODS:
try:
body = await self._content_receiver.receive(request)
except ValueError as e:
errors[request.content_type].add(str(e))
except TypeError:
errors[request.content_type].add()
for name, param in self._parameters.items():
where = param[]
schema = param.get(, param)
vtype = schema[]
is_array = vtype ==
if where == :
source = request.query
elif where == :
source = request.headers
elif where == :
source = request.match_info
elif body is None:
source = ()
elif where == :
source = body
elif where == :
if isinstance(body, BaseException):
errors[name].add(str(body))
else:
parameters[name] = body
continue
else:
raise ValueError(where)
if is_array and hasattr(source, ):
collection_format = param.get()
default = param.get(, [])
value = get_collection(source, name,
collection_format, default)
if param.get() and not value \
and name not in self._required:
continue
elif isinstance(source, Mapping) and name in source and (
vtype not in (, ) or source[name] !=
):
value = source[name]
elif in param:
parameters[name] = param[]
continue
elif name in self._required:
errors[name].add()
if isinstance(source, BaseException):
errors[name].add(str(body))
continue
else:
continue
if is_array:
vtype = schema[][]
vformat = schema[].get()
else:
vformat = schema.get()
if source is body and isinstance(body, dict):
pass
elif vtype not in (, ):
value = convert(name, value, vtype, vformat, errors)
if vtype == :
files[name] = value
else:
parameters[name] = value
parameters = self._validate(parameters, errors)
parameters.update(files)
return parameters, errors | Returns parameters extract from request and multidict errors
:param request: Request
:return: tuple of parameters and errors |
9,199 | def write_training_data(self, features, targets):
assert len(features) == len(targets)
data = dict(zip(features, targets))
with open(os.path.join(self.repopath, ), ) as fp:
pickle.dump(data, fp) | Writes data dictionary to filename |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.