Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,900 |
def get_api_gateway_resource(name):
client = boto3.client(, region_name=PRIMARY_REGION)
matches = [x for x in client.get_rest_apis().get(, list())
if x[] == API_GATEWAY]
match = matches.pop()
resources = client.get_resources(restApiId=match.get())
resource_id = None
for item in resources.get(, list()):
if item.get(, ) != name:
continue
resource_id = item[]
return resource_id
|
Get the resource associated with our gateway.
|
24,901 |
def optimize(self, piter=3, pmaxf=300, ppert=0.1):
self._save_npz()
optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf,
ppert=ppert, debug=True, clobber=True)
optimized.publish()
self.reset()
|
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
|
24,902 |
def length_of_associated_transcript(effect):
return apply_to_transcript_if_exists(
effect=effect,
fn=lambda t: len(t.sequence),
default=0)
|
Length of spliced mRNA sequence of transcript associated with effect,
if there is one (otherwise return 0).
|
24,903 |
def remove(self, line):
nb = 0
for block in self.blocks:
nb += block.remove(line)
return nb
|
Delete all lines matching the given line.
|
24,904 |
def is_multilingual_project(site_id=None):
from parler import appsettings
if site_id is None:
site_id = getattr(settings, , None)
return appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS or site_id in appsettings.PARLER_LANGUAGES
|
Whether the current Django project is configured for multilingual support.
|
24,905 |
def axis_names_without(self, axis):
if self.axis_names is None:
return None
return itemgetter(*self.other_axes(axis))(self.axis_names)
|
Return axis names without axis, or None if axis_names is None
|
24,906 |
def now_heating(self):
try:
if self.side == :
heat = self.device.device_data[]
elif self.side == :
heat = self.device.device_data[]
return heat
except TypeError:
return None
|
Return current heating state.
|
24,907 |
def get_object(self):
obj = super(PublishView, self).get_object()
if not obj or not hasattr(obj, ):
raise http.Http404
return obj
|
Get the object for publishing
Raises a http404 error if the object is not found.
|
24,908 |
def update_position(self, newpos):
tnow = time.time()
if tnow >= self.last_time + self.timestep:
self.points.append(newpos.latlon)
self.last_time = tnow
while len(self.points) > self.count:
self.points.pop(0)
|
update trail
|
24,909 |
def printComparison(results, class_or_prop):
data = []
Row = namedtuple(,[class_or_prop,])
for k,v in sorted(results.items(), key=lambda x: x[1]):
data += [Row(k, str(v))]
pprinttable(data)
|
print(out the results of the comparison using a nice table)
|
24,910 |
def _make_images(self, images):
process = (,
,
,
,
,
,
,
)
for key in process:
data = images.get(key)
if not data:
continue
parts = key.split()
if len(parts) > 2:
attr, subattr = .join(parts[:-1]), parts[-1]
else:
attr, subattr = .join(parts), None
img = AttrDict(self._normalized(data))
if subattr is None:
setattr(self, attr, img)
else:
setattr(getattr(self, attr), subattr, img)
|
Takes an image dict from the giphy api and converts it to attributes.
Any fields expected to be int (width, height, size, frames) will be attempted
to be converted. Also, the keys of `data` serve as the attribute names, but
with special action taken. Keys are split by the last underscore; anything prior
becomes the attribute name, anything after becomes a sub-attribute. For example:
fixed_width_downsampled will end up at `self.fixed_width.downsampled`
|
24,911 |
def submit(self, fn, *args, **kwargs):
future = super().submit(fn, *args, **kwargs)
work_queue_size = self._work_queue.qsize()
if work_queue_size > self.max_queue_size_reached:
self.max_queue_size_reached = work_queue_size
return future
|
Submits a callable to be executed with the given arguments.
Count maximum reached work queue size in ThreadPoolExecutor.max_queue_size_reached.
|
24,912 |
def parse(self, fo):
motifs = []
p = re.compile(r)
wm = []
name = ""
for line in fo.readlines():
if line.startswith("Motif") and line.strip().endswith(":"):
if name:
motifs.append(Motif(wm))
motifs[-1].id = name
name = ""
wm = []
name = "%s_%s" % (self.name, line.split(":")[0])
else:
m = p.search(line)
if m:
wm.append([float(m.group(x)) for x in range(1,5)])
motifs.append(Motif(wm))
motifs[-1].id = name
return motifs
|
Convert AMD output to motifs
Parameters
----------
fo : file-like
File object containing AMD output.
Returns
-------
motifs : list
List of Motif instances.
|
24,913 |
def refresh(self, thread=None):
if not thread:
thread = self._dbman._get_notmuch_thread(self._id)
self._total_messages = thread.get_total_messages()
self._notmuch_authors_string = thread.get_authors()
subject_type = settings.get()
if subject_type == :
subject = thread.get_subject()
elif subject_type == :
try:
first_msg = list(thread.get_toplevel_messages())[0]
subject = first_msg.get_header()
except IndexError:
subject =
self._subject = subject
self._authors = None
ts = thread.get_oldest_date()
try:
self._oldest_date = datetime.fromtimestamp(ts)
except ValueError:
self._oldest_date = None
try:
timestamp = thread.get_newest_date()
self._newest_date = datetime.fromtimestamp(timestamp)
except ValueError:
self._newest_date = None
self._tags = {t for t in thread.get_tags()}
self._messages = {}
self._toplevel_messages = []
|
refresh thread metadata from the index
|
24,914 |
def create_partition(self, partition_spec, if_not_exists=False, async_=False, **kw):
async_ = kw.get(, async_)
return self.partitions.create(partition_spec, if_not_exists=if_not_exists, async_=async_)
|
Create a partition within the table.
:param partition_spec: specification of the partition.
:param if_not_exists:
:param async_:
:return: partition object
:rtype: odps.models.partition.Partition
|
24,915 |
def parse_form_request(api_secret, request):
if not check_sign(api_secret, request):
raise SignError(u"message sign error")
return Storage(request)
|
>>> parse_form_request("123456",{"nonce": 1451122677, "msg": "helllo", "code": 0, "sign": "DB30F4D1112C20DFA736F65458F89C64"})
<Storage {'nonce': 1451122677, 'msg': 'helllo', 'code': 0, 'sign': 'DB30F4D1112C20DFA736F65458F89C64'}>
|
24,916 |
def process_common_disease_file(self, raw, unpadded_doids, limit=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
assoc_count = 0
replace_id_flag = False
col = self.small_files[]
with open(raw, , encoding="utf8") as tsvfile:
reader = csv.reader(tsvfile, delimiter=, quotechar=)
header = tsvfile.readline()
if header != col:
LOG.error("HEADER: has changed in %s.", raw)
raise ValueError(col - header)
disease_id = None
for row in reader:
row = [str(x).strip() for x in row]
did = row[col.index()]
phenotype_id = row[col.index()]
age_of_onset_id = row[col.index()]
eid = row[col.index()]
frequency = row[col.index()]
negation_id = row[col.index()]
description = row[col.index()]
pub_ids = row[col.index()]
disease_id = re.sub(r, , did)
disease_id = re.sub(r, , disease_id)
if not re.search(r, disease_id):
LOG.warning("Invalid id format: %s", disease_id)
if re.match(r, disease_id):
unpadded_num = re.sub(r, , disease_id)
unpadded_num = unpadded_num.lstrip()
if unpadded_num in unpadded_doids:
fixed_id = + unpadded_num
replace_id_flag = True
disease_id = fixed_id.strip()
if self.test_mode and disease_id not in self.test_ids:
return 0
if negation_id != :
continue
if disease_id != and phenotype_id != :
assoc = D2PAssoc(
graph, self.name, disease_id, phenotype_id.strip())
if age_of_onset_id != :
assoc.onset = age_of_onset_id
if frequency != :
assoc.frequency = frequency
eco_id = self.localtt[eid]
if eco_id is None:
eco_id = self.localtt[]
assoc.add_evidence(eco_id)
if description != :
assoc.set_description(description)
if pub_ids != :
for pub in pub_ids.split():
pub = re.sub(r, , pub)
if pub[:4] != and \
graph.curie_regexp.fullmatch(pub) is None:
LOG.warning(
, did, pub)
continue
if re.search(
r, pub) or re.search(
r, description):
continue
assoc.add_source(pub.strip())
assoc.add_association_to_graph()
assoc_count += 1
if not self.test_mode and limit is not None\
and reader.line_num > limit:
break
if replace_id_flag:
LOG.info("replaced DOID with unpadded version")
self.replaced_id_count += 1
LOG.info(
"Added %d associations for %s.", assoc_count, disease_id)
return assoc_count
|
Make disaese-phenotype associations.
Some identifiers need clean up:
* DOIDs are listed as DOID-DOID: --> DOID:
* DOIDs may be unnecessarily zero-padded.
these are remapped to their non-padded equivalent.
:param raw:
:param unpadded_doids:
:param limit:
:return:
|
24,917 |
def _delete_request(self, url, headers, data=None):
return self._session.delete(url, headers=headers, data=data)
|
Issue a DELETE request to the specified endpoint with the data provided.
:param url: str
:pararm headers: dict
:param data: dict
|
24,918 |
def set_config(self, key, option, value):
if not in self.proxy:
newopt = dict(default_cfg)
newopt[option] = value
self.proxy[] = {key: newopt}
else:
if key in self.proxy[]:
self.proxy[][key][option] = value
else:
newopt = dict(default_cfg)
newopt[option] = value
self.proxy[][key] = newopt
|
Set a configuration option for a key
|
24,919 |
def ask_int(question: str, default: int = None) -> int:
default_q = " [default: {0}]: ".format(
default) if default is not None else ""
answer = input("{0} [{1}]: ".format(question, default_q))
if not answer:
if default is None:
print("No default set, try again.")
return ask_int(question, default)
return default
if any(x not in "1234567890" for x in answer):
print("Please enter only numbers (0-9).")
return ask_int(question, default)
return int(answer)
|
Asks for a number in a question
|
24,920 |
def extractLargestSubNetwork(cls,
network_file,
out_subset_network_file,
river_id_field,
next_down_id_field,
river_magnitude_field,
safe_mode=True):
network_shapefile = ogr.Open(network_file)
network_layer = network_shapefile.GetLayer()
number_of_features = network_layer.GetFeatureCount()
riv_magnuitude_list = np.zeros(number_of_features, dtype=np.int32)
for feature_idx, drainage_line_feature in enumerate(network_layer):
riv_magnuitude_list[feature_idx] =\
drainage_line_feature.GetField(river_magnitude_field)
max_magnitude_feature = \
network_layer.GetFeature(np.argmax(riv_magnuitude_list))
cls.extractSubNetwork(network_file,
out_subset_network_file,
[max_magnitude_feature.GetField(river_id_field)],
river_id_field,
next_down_id_field,
river_magnitude_field,
safe_mode)
|
Extracts the larges sub network from the watershed based on the
magnitude parameter.
Parameters
----------
network_file: str
Path to the stream network shapefile.
out_subset_network_file: str
Path to the output subset stream network shapefile.
river_id_field: str
Name of the river ID field in the stream network shapefile.
next_down_id_field: str
Name of the field with the river ID of the next downstream river
segment in the stream network shapefile.
river_magnitude_field: str
Name of the river magnitude field in the stream network shapefile.
safe_mode: bool, optional
If True, it will kill the simulation early before over taxing
your computer. If you are confident your computer can handle it,
set it to False.
Here is an example of how to use this:
.. code:: python
import os
from RAPIDpy.gis.taudem import TauDEM
output_directory = '/path/to/output/files'
network_shp = os.path.join(output_directory,
"stream_reach_file.shp")
out_shp = os.path.join(output_directory,
"stream_reach_file_subset.shp")
TauDEM.extractLargestSubNetwork(
network_file=network_shp,
out_subset_network_file=out_shp,
river_id_field="LINKNO",
next_down_id_field="DSLINKNO",
river_magnitude_field="Magnitude",
)
|
24,921 |
def recompute(self, quiet=False, **kwargs):
if not self.computed:
if not (hasattr(self, "_x") and hasattr(self, "_yerr2")):
raise RuntimeError("You need to compute the model first")
try:
self.compute(self._x, np.sqrt(self._yerr2), **kwargs)
except (ValueError, LinAlgError):
if quiet:
return False
raise
return True
|
Re-compute a previously computed model. You might want to do this if
the kernel parameters change and the kernel is labeled as ``dirty``.
:param quiet: (optional)
If ``True``, return false when the computation fails. Otherwise,
throw an error if something goes wrong. (default: ``False``)
|
24,922 |
def window_visu(N=51, name=, **kargs):
mindB = kargs.pop(, -100)
maxdB = kargs.pop(, None)
norm = kargs.pop(, True)
w = Window(N, name, **kargs)
w.plot_time_freq(mindB=mindB, maxdB=maxdB, norm=norm)
|
A Window visualisation tool
:param N: length of the window
:param name: name of the window
:param NFFT: padding used by the FFT
:param mindB: the minimum frequency power in dB
:param maxdB: the maximum frequency power in dB
:param kargs: optional arguments passed to :func:`create_window`
This function plot the window shape and its equivalent in the Fourier domain.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'kaiser', beta=8.)
|
24,923 |
def search(self, query):
if self.authSubToken is None:
raise LoginError("You need to login before executing any request")
path = SEARCH_URL + "?c=3&q={}".format(requests.utils.quote(query))
self.toc()
data = self.executeRequestApi2(path)
if utils.hasPrefetch(data):
response = data.preFetch[0].response
else:
response = data
resIterator = response.payload.listResponse.doc
return list(map(utils.parseProtobufObj, resIterator))
|
Search the play store for an app.
nb_result (int): is the maximum number of result to be returned
offset (int): is used to take result starting from an index.
|
24,924 |
def t_FLOAT(tok):
r
tok.value = (tok.type, float(tok.value))
return tok
|
r'\d+\.\d+
|
24,925 |
def htmlNodeDumpOutput(self, doc, cur, encoding):
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpOutput(self._o, doc__o, cur__o, encoding)
|
Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns/spaces are added.
|
24,926 |
def fail(message=None, exit_status=None):
print(, message, file=sys.stderr)
sys.exit(exit_status or 1)
|
Prints the specified message and exits the program with the specified
exit status.
|
24,927 |
def get_oa_policy(doi):
try:
request = requests.get("%s%s" % (DISSEMIN_API, doi))
request.raise_for_status()
result = request.json()
assert result["status"] == "ok"
return ([i
for i in result["paper"]["publications"]
if i["doi"] == doi][0])["policy"]
except (AssertionError, ValueError,
KeyError, RequestException, IndexError):
return None
|
Get OA policy for a given DOI.
.. note::
Uses beta.dissem.in API.
:param doi: A canonical DOI.
:returns: The OpenAccess policy for the associated publications, or \
``None`` if unknown.
>>> tmp = get_oa_policy('10.1209/0295-5075/111/40005'); (tmp["published"], tmp["preprint"], tmp["postprint"], tmp["romeo_id"])
('can', 'can', 'can', '1896')
>>> get_oa_policy('10.1215/9780822387268') is None
True
|
24,928 |
def load(cls, file):
d = json.load(file)
return cls.create(d)
|
Loads a :class:`~pypot.primitive.move.Move` from a json file.
|
24,929 |
def _reset(self, index, total, percentage_step, length):
self._start_time = datetime.datetime.now()
self._start_index = index
self._current_index = index
self._percentage_step = percentage_step
self._total = float(total)
self._total_minus_one = total - 1
self._length = length
self._norm_factor = total * percentage_step / 100.0
self._current_interval = int((index + 1.0) / self._norm_factor)
|
Resets to the progressbar to start a new one
|
24,930 |
def loadVars(filename, ask=True, into=None, only=None):
r
filename = os.path.expanduser(filename)
if into is None: into = magicGlobals()
varH = loadDict(filename)
toUnpickle = only or varH.keys()
alreadyDefined = filter(into.has_key, toUnpickle)
if alreadyDefined and ask:
print "The following vars already exist; overwrite (yes/NO)?\n",\
"\n".join(alreadyDefined)
if raw_input() != "yes":
toUnpickle = without(toUnpickle, alreadyDefined)
if not toUnpickle:
print "nothing to unpickle"
return None
print "unpickling:\n",\
"\n".join(sorted(toUnpickle))
for k in varH.keys():
if k not in toUnpickle:
del varH[k]
into.update(varH)
|
r"""Load variables pickled with `saveVars`.
Parameters:
- `ask`: If `True` then don't overwrite existing variables without
asking.
- `only`: A list to limit the variables to or `None`.
- `into`: The dictionary the variables should be loaded into (defaults
to global dictionary).
|
24,931 |
def check_column_id(
problems: List,
table: str,
df: DataFrame,
column: str,
*,
column_required: bool = True,
) -> List:
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(valid_str)
problems = check_table(
problems,
table,
f,
cond,
f"Invalid {column}; maybe has extra space characters",
)
cond = f[column].duplicated()
problems = check_table(problems, table, f, cond, f"Repeated {column}")
return problems
|
A specialization of :func:`check_column`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` where the given column has
duplicated entry or an invalid strings.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
|
24,932 |
def get_commit_request(self, id):
schema = RequestSchema()
resp = self.service.get(self.base+str(id)+)
return self.service.decode(schema, resp)
|
Get a commit request for a staged import.
:param id: Staged import ID as an int.
:return: :class:`imports.Request <imports.Request>` object
:rtype: imports.Request
|
24,933 |
def check_dns_name_availability(name, region, **kwargs):
netconn = __utils__[](, **kwargs)
try:
check_dns_name = netconn.check_dns_name_availability(
location=region,
domain_name_label=name
)
result = check_dns_name.as_dict()
except CloudError as exc:
__utils__[](, str(exc), **kwargs)
result = {: str(exc)}
return result
|
.. versionadded:: 2019.2.0
Check whether a domain name in the current zone is available for use.
:param name: The DNS name to query.
:param region: The region to query for the DNS name in question.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.check_dns_name_availability testdnsname westus
|
24,934 |
def reset_stats(self):
scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners]))
for v in self._runners:
v.total_scores.clear()
try:
return np.mean(scores), np.max(scores)
except Exception:
logger.exception("Cannot compute total scores in EnvRunner.")
return None, None
|
Returns:
mean, max: two stats of the runners, to be added to backend
|
24,935 |
def add_extra_chain_cert(self, certobj):
if not isinstance(certobj, X509):
raise TypeError("certobj must be an X509 instance")
copy = _lib.X509_dup(certobj._x509)
add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)
if not add_result:
_lib.X509_free(copy)
_raise_current_error()
|
Add certificate to chain
:param certobj: The X509 certificate object to add to the chain
:return: None
|
24,936 |
def run_container(name, image, command=None, environment=None,
ro=None, rw=None, links=None, detach=True, volumes_from=None,
port_bindings=None, log_syslog=False):
binds = ro_rw_to_binds(ro, rw)
log_config = LogConfig(type=LogConfig.types.JSON)
if log_syslog:
log_config = LogConfig(
type=LogConfig.types.SYSLOG,
config={: name})
host_config = _get_docker().create_host_config(binds=binds, log_config=log_config, links=links, volumes_from=volumes_from, port_bindings=port_bindings)
c = _get_docker().create_container(
name=name,
image=image,
command=command,
environment=environment,
volumes=binds_to_volumes(binds),
detach=detach,
stdin_open=False,
tty=False,
ports=list(port_bindings) if port_bindings else None,
host_config=host_config)
try:
_get_docker().start(
container=c[],
)
except APIError as e:
if in e.explanation:
try:
_get_docker().remove_container(name, force=True)
except APIError:
pass
raise PortAllocatedError()
raise
return c
|
Wrapper for docker create_container, start calls
:param log_syslog: bool flag to redirect container's logs to host's syslog
:returns: container info dict or None if container couldn't be created
Raises PortAllocatedError if container couldn't start on the
requested port.
|
24,937 |
def geocode(client, address=None, components=None, bounds=None, region=None,
language=None):
params = {}
if address:
params["address"] = address
if components:
params["components"] = convert.components(components)
if bounds:
params["bounds"] = convert.bounds(bounds)
if region:
params["region"] = region
if language:
params["language"] = language
return client._request("/maps/api/geocode/json", params).get("results", [])
|
Geocoding is the process of converting addresses
(like ``"1600 Amphitheatre Parkway, Mountain View, CA"``) into geographic
coordinates (like latitude 37.423021 and longitude -122.083739), which you
can use to place markers or position the map.
:param address: The address to geocode.
:type address: string
:param components: A component filter for which you wish to obtain a
geocode, for example: ``{'administrative_area': 'TX','country': 'US'}``
:type components: dict
:param bounds: The bounding box of the viewport within which to bias geocode
results more prominently.
:type bounds: string or dict with northeast and southwest keys.
:param region: The region code, specified as a ccTLD ("top-level domain")
two-character value.
:type region: string
:param language: The language in which to return results.
:type language: string
:rtype: list of geocoding results.
|
24,938 |
def _fill_naked_singles(self):
simple_found = False
for i in utils.range_(self.side):
for j in utils.range_(self.side):
if self[i][j] > 0:
continue
p = self._possibles[i][j]
if len(p) == 1:
self.set_cell(i, j, list(p)[0])
self.solution_steps.append(self._format_step("NAKED", (i, j), self[i][j]))
simple_found = True
elif len(p) == 0:
raise SudokuHasNoSolutionError("Error made! No possible value for ({0},{1})!".format(i + 1, j + 1))
return simple_found
|
Look for naked singles, i.e. cells with ony one possible value.
:return: If any Naked Single has been found.
:rtype: bool
|
24,939 |
def check_interactive_docker_worker(link):
errors = []
log.info("Checking for {} {} interactive docker-worker".format(link.name, link.task_id))
try:
if link.task[][].get():
errors.append("{} is interactive: task.payload.features.interactive!".format(link.name))
if link.task[][].get():
errors.append("{} is interactive: task.payload.env.TASKCLUSTER_INTERACTIVE!".format(link.name))
except KeyError:
errors.append("check_interactive_docker_worker: {} task definition is malformed!".format(link.name))
return errors
|
Given a task, make sure the task was not defined as interactive.
* ``task.payload.features.interactive`` must be absent or False.
* ``task.payload.env.TASKCLUSTER_INTERACTIVE`` must be absent or False.
Args:
link (LinkOfTrust): the task link we're checking.
Returns:
list: the list of error errors. Success is an empty list.
|
24,940 |
def iter_rows(self):
fileobj = self._fileobj
cls_row = self.cls_row
fields = self.fields
for idx in range(self.prolog.records_count):
data = fileobj.read(1)
marker = struct.unpack(, data)[0]
is_deleted = marker == b
if is_deleted:
continue
row_values = []
for field in fields:
val = field.cast(fileobj.read(field.len))
row_values.append(val)
yield cls_row(*row_values)
|
Generator reading .dbf row one by one.
Yields named tuple Row object.
:rtype: Row
|
24,941 |
def selects(self, code, start, end=None):
def _selects(code, start, end):
if end is not None:
return self.data.loc[(slice(pd.Timestamp(start), pd.Timestamp(end)), code), :]
else:
return self.data.loc[(slice(pd.Timestamp(start), None), code), :]
try:
return self.new(_selects(code, start, end), self.type, self.if_fq)
except:
raise ValueError(
.format(
code,
start,
end
)
)
|
选择code,start,end
如果end不填写,默认获取到结尾
@2018/06/03 pandas 的索引问题导致
https://github.com/pandas-dev/pandas/issues/21299
因此先用set_index去重做一次index
影响的有selects,select_time,select_month,get_bar
@2018/06/04
当选择的时间越界/股票不存在,raise ValueError
@2018/06/04 pandas索引问题已经解决
全部恢复
|
24,942 |
def init(self):
self.custom.add(add_quasi_dipole_coordinates, )
self.custom.add(add_aacgm_coordinates, )
self.custom.add(calculate_ecef_velocity, )
self.custom.add(add_sc_attitude_vectors, )
self.custom.add(add_iri_thermal_plasma, )
self.custom.add(add_hwm_winds_and_ecef_vectors, )
self.custom.add(add_igrf, )
in_meta = {:,
:}
self.custom.add(project_ecef_vector_onto_sc, , , , , ,
, , ,
meta=[in_meta.copy(), in_meta.copy(), in_meta.copy()])
self.custom.add(project_hwm_onto_sc, )
self.custom.add(add_msis, )
|
Adds custom calculations to orbit simulation.
This routine is run once, and only once, upon instantiation.
Adds quasi-dipole coordiantes, velocity calculation in ECEF coords,
adds the attitude vectors of spacecraft assuming x is ram pointing and
z is generally nadir, adds ionospheric parameters from the Interational
Reference Ionosphere (IRI), as well as simulated winds from the
Horiontal Wind Model (HWM).
|
24,943 |
def calc_signal_power(params):
for i, data_type in enumerate([,,, ]):
if i % SIZE == RANK:
if data_type in [,]:
fname=os.path.join(params.savefolder, data_type+)
else:
fname=os.path.join(params.populations_path, ,
str.split(data_type,)[0] + +
str.split(data_type,)[1] + +
str.split(data_type,)[2] + )
f = h5py.File(fname)
data = f[].value
srate = f[].value
tvec = np.arange(data.shape[1]) * 1000. / srate
slica = (tvec >= ana_params.transient)
data = data[:,slica]
dataT = data.T - data.mean(axis=1)
data = dataT.T
f.close()
PSD=[]
for i in np.arange(len(params.electrodeParams[])):
if ana_params.mlab:
Pxx, freqs=plt.mlab.psd(data[i], NFFT=ana_params.NFFT,
Fs=srate, noverlap=ana_params.noverlap,
window=ana_params.window)
else:
[freqs, Pxx] = hlp.powerspec([data[i]], tbin= 1.,
Df=ana_params.Df, pointProcess=False)
mask = np.where(freqs >= 0.)
freqs = freqs[mask]
Pxx = Pxx.flatten()
Pxx = Pxx[mask]
Pxx = Pxx/tvec[tvec >= ana_params.transient].size**2
PSD +=[Pxx.flatten()]
PSD=np.array(PSD)
f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder,
data_type + ana_params.fname_psd),)
f[]=freqs
f[]=PSD
f[]=ana_params.transient
f[]=ana_params.mlab
f[]=ana_params.NFFT
f[]=ana_params.noverlap
f[]=str(ana_params.window)
f[]=str(ana_params.Df)
f.close()
return
|
calculates power spectrum of sum signal for all channels
|
24,944 |
def t_UFIXEDMN(t):
r"ufixed(?P<M>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)x(?P<N>80|79|78|77|76|75|74|73|72|71|70|69|68|67|66|65|64|63|62|61|60|59|58|57|56|55|54|53|52|51|50|49|48|47|46|45|44|43|42|41|40|39|38|37|36|35|34|33|32|31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)"
M = int(t.lexer.lexmatch.group())
N = int(t.lexer.lexmatch.group())
t.value = ("ufixed", M, N)
return t
|
r"ufixed(?P<M>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)x(?P<N>80|79|78|77|76|75|74|73|72|71|70|69|68|67|66|65|64|63|62|61|60|59|58|57|56|55|54|53|52|51|50|49|48|47|46|45|44|43|42|41|40|39|38|37|36|35|34|33|32|31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)
|
24,945 |
def get_genus_type(self):
if self._my_genus_type_map is None:
url_path = + self._my_map[]
self._my_genus_type_map = self._get_request(url_path)
return Type(self._my_genus_type_map)
|
Gets the genus type of this object.
return: (osid.type.Type) - the genus type of this object
compliance: mandatory - This method must be implemented.
|
24,946 |
def disassemble_current(self, dwThreadId):
aThread = self.get_thread(dwThreadId)
return self.disassemble_instruction(aThread.get_pc())
|
Disassemble the instruction at the program counter of the given thread.
@type dwThreadId: int
@param dwThreadId: Global thread ID.
The program counter for this thread will be used as the disassembly
address.
@rtype: tuple( long, int, str, str )
@return: The tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
|
24,947 |
def remove_user_from_group(group, role, email):
uri = "groups/{0}/{1}/{2}".format(group, role, email)
return __delete(uri)
|
Remove a user from a group the caller owns
Args:
group (str): Group name
role (str) : Role of user for group; either 'member' or 'admin'
email (str): Email of user or group to remove
Swagger:
https://api.firecloud.org/#!/Groups/removeUserFromGroup
|
24,948 |
def set_XY(self, X, Y):
self.update_model(False)
self.set_Y(Y)
self.set_X(X)
self.update_model(True)
|
Set the input / output data of the model
This is useful if we wish to change our existing data but maintain the same model
:param X: input observations
:type X: np.ndarray
:param Y: output observations
:type Y: np.ndarray or ObsAr
|
24,949 |
def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels):
r
align_check = [x_intervals[0, 0] == y_intervals[0, 0],
x_intervals[-1, 1] == y_intervals[-1, 1]]
if False in align_check:
raise ValueError(
"Time intervals do not align; did you mean to call "
" first?")
time_boundaries = np.unique(
np.concatenate([x_intervals, y_intervals], axis=0))
output_intervals = np.array(
[time_boundaries[:-1], time_boundaries[1:]]).T
x_labels_out, y_labels_out = [], []
x_label_range = np.arange(len(x_labels))
y_label_range = np.arange(len(y_labels))
for t0, _ in output_intervals:
x_idx = x_label_range[(t0 >= x_intervals[:, 0])]
x_labels_out.append(x_labels[x_idx[-1]])
y_idx = y_label_range[(t0 >= y_intervals[:, 0])]
y_labels_out.append(y_labels[y_idx[-1]])
return output_intervals, x_labels_out, y_labels_out
|
r"""Merge the time intervals of two sequences.
Parameters
----------
x_intervals : np.ndarray
Array of interval times (seconds)
x_labels : list or None
List of labels
y_intervals : np.ndarray
Array of interval times (seconds)
y_labels : list or None
List of labels
Returns
-------
new_intervals : np.ndarray
New interval times of the merged sequences.
new_x_labels : list
New labels for the sequence ``x``
new_y_labels : list
New labels for the sequence ``y``
|
24,950 |
def delete_password(self, service, username):
items = self._find_passwords(service, username, deleting=True)
if not items:
raise PasswordDeleteError("Password not found")
for current in items:
result = GnomeKeyring.item_delete_sync(current.keyring,
current.item_id)
if result == GnomeKeyring.Result.CANCELLED:
raise PasswordDeleteError("Cancelled by user")
elif result != GnomeKeyring.Result.OK:
raise PasswordDeleteError(result.value_name)
|
Delete the password for the username of the service.
|
24,951 |
def tc_at_grid_col(self, idx):
grid_col = 0
for tc in self.tc_lst:
if grid_col == idx:
return tc
grid_col += tc.grid_span
if grid_col > idx:
raise ValueError( % idx)
raise ValueError()
|
The ``<w:tc>`` element appearing at grid column *idx*. Raises
|ValueError| if no ``w:tc`` element begins at that grid column.
|
24,952 |
def sbd_to_steem(self, sbd=0, price=0, account=None):
if not account:
account = self.mainaccount
if self.check_balances(account):
if sbd == 0:
sbd = self.sbdbal
elif sbd > self.sbdbal:
self.msg.error_message("INSUFFICIENT FUNDS. CURRENT SBD BAL: "
+ str(self.sbdbal))
return False
if price == 0:
price = 1 / self.dex_ticker()[]
try:
self.dex.sell(sbd, "SBD", price, account=account)
except Exception as e:
self.msg.error_message("COULD NOT SELL SBD FOR STEEM: " + str(e))
return False
else:
self.msg.message("TRANSFERED "
+ str(sbd)
+ " SBD TO STEEM AT THE PRICE OF: $"
+ str(price))
return True
else:
return False
|
Uses the ticker to get the lowest ask
and moves the sbd at that price.
|
24,953 |
def clear_widget(self):
if not self.gui_up:
return
canvas = self.c_view.get_canvas()
canvas.delete_all_objects()
self.c_view.redraw(whence=0)
|
Clears the thumbnail display widget of all thumbnails, but does
not remove them from the thumb_dict or thumb_list.
|
24,954 |
def success(self):
self._check_valid()
return self._problem._cp.solution.get_status() in (
self._problem._cp.solution.status.optimal,
self._problem._cp.solution.status.optimal_tolerance,
self._problem._cp.solution.status.MIP_optimal)
|
Return boolean indicating whether a solution was found
|
24,955 |
def declare_api_routes(config):
add_route = config.add_route
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(,
)
add_route(,
)
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(, )
add_route(, )
|
Declaration of routing
|
24,956 |
def select_storage_for(cls, section_name, storage):
section_storage = storage
storage_name = cls.get_storage_name_for(section_name)
if storage_name:
section_storage = storage.get(storage_name, None)
if section_storage is None:
section_storage = storage[storage_name] = dict()
return section_storage
|
Selects the data storage for a config section within the
:param:`storage`. The primary config section is normally merged into
the :param:`storage`.
:param section_name: Config section (name) to process.
:param storage: Data storage to use.
:return: :param:`storage` or a part of it (as section storage).
|
24,957 |
def _parseStats(self, lines, parse_slabs = False):
info_dict = {}
info_dict[] = {}
for line in lines:
mobj = re.match(, line)
if mobj:
info_dict[mobj.group(1)] = util.parse_value(mobj.group(2), True)
continue
elif parse_slabs:
mobj = re.match(, line)
if mobj:
(slab, key, val) = mobj.groups()[-3:]
if not info_dict[].has_key(slab):
info_dict[][slab] = {}
info_dict[][slab][key] = util.parse_value(val, True)
return info_dict
|
Parse stats output from memcached and return dictionary of stats-
@param lines: Array of lines of input text.
@param parse_slabs: Parse slab stats if True.
@return: Stats dictionary.
|
24,958 |
def _quoteattr(self, attr):
attr = xml_safe(attr)
if isinstance(attr, unicode) and not UNICODE_STRINGS:
attr = attr.encode(self.encoding)
return saxutils.quoteattr(attr)
|
Escape an XML attribute. Value can be unicode.
|
24,959 |
def get_action(self, parent, undo_stack: QUndoStack, sel_range, protocol, view: int):
self.command = ZeroHideAction(protocol, self.following_zeros, view, self.zero_hide_offsets)
action = QAction(self.command.text(), parent)
action.triggered.connect(self.action_triggered)
self.undo_stack = undo_stack
return action
|
:type parent: QTableView
:type undo_stack: QUndoStack
|
24,960 |
def append(self, P, closed=False, itemsize=None, **kwargs):
itemsize = itemsize or len(P)
itemcount = len(P) / itemsize
n, p = len(P), P.shape[-1]
Z = np.tile(P, 2).reshape(2 * len(P), p)
V = np.empty(n, dtype=self.vtype)
V[][1:-1] = Z[0::2][:-2]
V[][:-1] = Z[1::2][:-1]
V[][:-1] = Z[1::2][+1:]
V[][:-2] = Z[0::2][+2:]
for name in self.vtype.names:
if name not in [, , , , ]:
V[name] = kwargs.get(name, self._defaults[name])
V = (V.reshape(n / itemsize, itemsize)[:, :-1])
if closed:
V[][:, 0] = V[][:, -1]
V[][:, -1] = V[][:, 0]
else:
V[][:, 0] = V[][:, 0]
V[][:, -1] = V[][:, -1]
V = V.ravel()
V = V.ravel()
n = itemsize
if closed:
I = np.resize(
np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32), n * 2 * 3)
I += np.repeat(4 * np.arange(n, dtype=np.uint32), 6)
I[-6:] = 4 * n - 6, 4 * n - 5, 0, 4 * n - 5, 0, 1
else:
I = np.resize(
np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32), (n - 1) * 2 * 3)
I += np.repeat(4 * np.arange(n - 1, dtype=np.uint32), 6)
I = I.ravel()
if self.utype:
U = np.zeros(itemcount, dtype=self.utype)
for name in self.utype.names:
if name not in ["__unused__"]:
U[name] = kwargs.get(name, self._defaults[name])
else:
U = None
Collection.append(self, vertices=V, uniforms=U,
indices=I, itemsize=itemsize * 4 - 4)
|
Append a new set of vertices to the collection.
For kwargs argument, n is the number of vertices (local) or the number
of item (shared)
Parameters
----------
P : np.array
Vertices positions of the path(s) to be added
closed: bool
Whether path(s) is/are closed
itemsize: int or None
Size of an individual path
caps : list, array or 2-tuple
Path start /end cap
join : list, array or float
path segment join
color : list, array or 4-tuple
Path color
miter_limit : list, array or float
Miter limit for join
linewidth : list, array or float
Path linewidth
antialias : list, array or float
Path antialias area
|
24,961 |
def _setup_rpc(self):
self._state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
|
Setup the RPC client for the current agent.
|
24,962 |
def _make_package(args):
from lingpy.sequence.sound_classes import token2class
from lingpy.data import Model
columns = [, , , , , ]
bipa = TranscriptionSystem()
for src, rows in args.repos.iter_sources(type=):
args.log.info(.format(src[]))
uritemplate = URITemplate(src[]) if src[] else None
out = [[, , , ,
, ] + columns]
graphemes = set()
for row in rows:
if row[] in graphemes:
args.log.warn(.format(row[]))
continue
graphemes.add(row[])
if not row[]:
bipa_sound = bipa[row[]]
explicit =
else:
bipa_sound = bipa[row[]]
explicit =
generated = if bipa_sound.generated else
if is_valid_sound(bipa_sound, bipa):
bipa_grapheme = bipa_sound.s
bipa_name = bipa_sound.name
else:
bipa_grapheme, bipa_name = ,
url = uritemplate.expand(**row) if uritemplate else row.get(, )
out.append(
[bipa_grapheme, bipa_name, generated, explicit, row[],
url] + [
row.get(c, ) for c in columns])
found = len([o for o in out if o[0] != ])
args.log.info(.format(
found, len(out), found / len(out) * 100))
with UnicodeWriter(
pkg_path(, .format(src[])), delimiter=
) as writer:
writer.writerows(out)
count = 0
with UnicodeWriter(pkg_path(, ), delimiter=) as writer:
writer.writerow([, ] + SOUNDCLASS_SYSTEMS)
for grapheme, sound in sorted(bipa.sounds.items()):
if not sound.alias:
writer.writerow(
[sound.name, grapheme] + [token2class(
grapheme, Model(cls)) for cls in SOUNDCLASS_SYSTEMS])
count += 1
args.log.info(.format(count))
|
Prepare transcriptiondata from the transcription sources.
|
24,963 |
def get_parameter_names(self, include_frozen=False):
if include_frozen:
return self.parameter_names
return tuple(p
for p, f in zip(self.parameter_names, self.unfrozen_mask)
if f)
|
Get a list of the parameter names
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
|
24,964 |
def setup(app):
app.add_role(, bokeh_commit)
app.add_role(, bokeh_issue)
app.add_role(, bokeh_pull)
app.add_role(, bokeh_tree)
|
Required Sphinx extension setup function.
|
24,965 |
def getActionReflexRules(self, analysis, wf_action):
self.analyses_catalog = getToolByName(self, CATALOG_ANALYSIS_LISTING)
action_sets = self.getReflexRules()
rules_list = []
condition = False
for action_set in action_sets:
if action_set.get(, ) == wf_action:
condition = self._areConditionsMet(action_set, analysis)
if condition:
actions = action_set.get(, [])
for act in actions:
act[] = action_set.get(, )
act[] = self.Title()
rules_list.append(act)
return rules_list
|
This function returns a list of dictionaries with the rules to be done
for the analysis service.
:analysis: the analysis full object which we want to obtain the
rules for.
:wf_action: it is the workflow action that the analysis is doing, we
have to act in consideration of the action_set 'trigger' variable
:returns: [{'action': 'duplicate', ...}, {,}, ...]
|
24,966 |
def lam_at_index(self, lidx):
if self.path_ is None:
return self.lam * self.lam_scale_
return self.lam * self.lam_scale_ * self.path_[lidx]
|
Compute the scaled lambda used at index lidx.
|
24,967 |
def add_warning (self, s, tag=None):
item = (tag, s)
if item not in self.warnings and \
tag not in self.aggregate.config["ignorewarnings"]:
self.warnings.append(item)
|
Add a warning string.
|
24,968 |
def add_sms_spec_to_request(self, req, federation=, loes=None,
context=, url=):
if federation:
if not isinstance(federation, list):
federation = [federation]
if not url:
url = "{}/getsmscol/{}/{}".format(self.mdss_endpoint, context,
quote_plus(self.entity_id))
http_resp = self.httpcli(method=, url=url, verify=self.verify_ssl)
if http_resp.status_code >= 400:
raise ConnectionError(.format(http_resp.text))
msg = JsonWebToken().from_jwt(http_resp.text, keyjar=self.mdss_keys)
if msg[] != self.mdss_owner:
raise KeyError()
if federation:
_sms = dict(
[(fo, _ms) for fo, _ms in msg.items() if fo in federation])
else:
_sms = msg.extra()
try:
del _sms[]
except KeyError:
pass
req.update({: _sms})
return req
|
Add signed metadata statements to the request
:param req: The request so far
:param federation: If only signed metadata statements from a specific
set of federations should be included this is the set.
:param loes: - not used -
:param context: What kind of request/response it is: 'registration',
'discovery' or 'response'. The later being registration response.
:param url: Just for testing !!
:return: A possibly augmented request.
|
24,969 |
def pipe(self, other_task):
other_task._source = self
self._listeners.append(PipeListener(other_task))
return other_task
|
Add a pipe listener to the execution of this task. The
output of this task is required to be an iterable. Each item in
the iterable will be queued as the sole argument to an execution
of the listener task.
Can also be written as::
pipeline = task1 | task2
|
24,970 |
def remove_im_params(model, im):
for param in model.parameters:
try:
im.remove_node(param.name)
except:
pass
|
Remove parameter nodes from the influence map.
Parameters
----------
model : pysb.core.Model
PySB model.
im : networkx.MultiDiGraph
Influence map.
Returns
-------
networkx.MultiDiGraph
Influence map with the parameter nodes removed.
|
24,971 |
def size_in_bytes(self, offset, timestamp, key, value, headers=None):
assert not headers, "Headers not supported in v0/v1"
magic = self._magic
return self.LOG_OVERHEAD + self.record_size(magic, key, value)
|
Actual size of message to add
|
24,972 |
def _check_resources(name, expr, resources):
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError( % name)
if bound and resources:
raise ValueError(
% name,
)
|
Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
|
24,973 |
async def setvolume(self, value):
self.logger.debug("volume command")
if self.state != :
return
logger.debug("Volume command received")
if value == :
if self.volume < 100:
self.statuslog.debug("Volume up")
self.volume = (10 * (self.volume // 10)) + 10
self.volumelog.info(str(self.volume))
try:
self.streamer.volume = self.volume / 100
except AttributeError:
pass
else:
self.statuslog.warning("Already at maximum volume")
elif value == :
if self.volume > 0:
self.statuslog.debug("Volume down")
self.volume = (10 * ((self.volume + 9) // 10)) - 10
self.volumelog.info(str(self.volume))
try:
self.streamer.volume = self.volume / 100
except AttributeError:
pass
else:
self.statuslog.warning("Already at minimum volume")
else:
try:
value = int(value)
except ValueError:
self.statuslog.error("Volume argument must be +, -, or a %")
else:
if 0 <= value <= 200:
self.statuslog.debug("Setting volume")
self.volume = value
self.volumelog.info(str(self.volume))
try:
self.streamer.volume = self.volume / 100
except AttributeError:
pass
else:
self.statuslog.error("Volume must be between 0 and 200")
self.write_volume()
|
The volume command
Args:
value (str): The value to set the volume to
|
24,974 |
def units(self):
return {:hash_to_unit(self.python_unit_l), :hash_to_unit(self.python_unit_m), :hash_to_unit(self.python_unit_t)}
|
Tuple of the units for length, time and mass. Can be set in any order, and strings are not case-sensitive. See ipython_examples/Units.ipynb for more information. You can check the units' exact values and add Additional units in rebound/rebound/units.py. Units should be set before adding particles to the simulation (will give error otherwise).
Currently supported Units
-------------------------
Times:
Hr : Hours
Yr : Julian years
Jyr : Julian years
Sidereal_yr : Sidereal year
Yr2pi : Year divided by 2pi, with year defined as orbital period of planet at 1AU around 1Msun star
Kyr : Kiloyears (Julian)
Myr : Megayears (Julian)
Gyr : Gigayears (Julian)
Lengths:
M : Meters
Cm : Centimeters
Km : Kilometers
AU : Astronomical Units
Masses:
Kg : Kilograms
Msun : Solar masses
Mmercury : Mercury masses
Mvenus : Venus masses
Mearth : Earth masses
Mmars : Mars masses
Mjupiter : Jupiter masses
Msaturn : Saturn masses
Muranus : Neptune masses
Mpluto : Pluto masses
Examples
--------
>>> sim = rebound.Simulation()
>>> sim.units = ('yr', 'AU', 'Msun')
|
24,975 |
def getChildTimeoutValue(self):
print % self.port
childTimeout = self.__sendCommand(WPANCTL_CMD + )[0]
return int(childTimeout)
|
get child timeout
|
24,976 |
def getDarkCurrentFunction(exposuretimes, imgs, **kwargs):
exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs)
offs, ascent, rmse = getLinearityFunction(exposuretimes, imgs, **kwargs)
return offs, ascent, rmse
|
get dark current function from given images and exposure times
|
24,977 |
def show(self):
args, ret = self.method.get_descriptor()[1:].split(")")
if self.code:
args = args.split(" ")
reg_len = self.code.get_registers_size()
nb_args = len(args)
start_reg = reg_len - nb_args
args = ["{} v{}".format(a, start_reg + i) for i, a in enumerate(args)]
print("METHOD {} {} {} ({}){}".format(
self.method.get_class_name(),
self.method.get_access_flags_string(),
self.method.get_name(),
", ".join(args), ret))
bytecode.PrettyShow(self, self.basic_blocks.gets(), self.method.notes)
|
Prints the content of this method to stdout.
This will print the method signature and the decompiled code.
|
24,978 |
def isubset(self, *keys):
return ww.g((key, self[key]) for key in keys)
|
Return key, self[key] as generator for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3))
[(1, 1), (3, 3)]
|
24,979 |
def add_arguments(self, parser):
parser.add_argument(, default=self.length,
type=int, help=_( % self.length))
parser.add_argument(, default=self.allowed_chars,
type=str, help=_( % self.allowed_chars))
|
Define optional arguments with default values
|
24,980 |
def main(client, adgroup_id):
adgroup_criterion_service = client.GetService(
, version=)
helper = ProductPartitionHelper(adgroup_id)
root = helper.CreateSubdivision()
new_product_canonical_condition = {
: ,
:
}
used_product_canonical_condition = {
: ,
:
}
other_product_canonical_condition = {
: ,
}
helper.CreateUnit(root, new_product_canonical_condition, 200000)
helper.CreateUnit(root, used_product_canonical_condition, 100000)
other_condition = helper.CreateSubdivision(
root, other_product_canonical_condition)
cool_product_brand = {
: ,
:
}
cheap_product_brand = {
: ,
:
}
other_product_brand = {
: ,
}
helper.CreateUnit(other_condition, cool_product_brand, 900000)
helper.CreateUnit(other_condition, cheap_product_brand, 10000)
other_brand = helper.CreateSubdivision(other_condition, other_product_brand)
luggage_category = {
: ,
: ,
:
}
generic_category = {
: ,
: ,
}
helper.CreateUnit(other_brand, luggage_category, 750000)
helper.CreateUnit(other_brand, generic_category, 110000)
result = adgroup_criterion_service.mutate(helper.GetOperations())
children = {}
root_node = None
for adgroup_criterion in result[]:
children[adgroup_criterion[][]] = []
if in adgroup_criterion[]:
children[adgroup_criterion[][]].append(
adgroup_criterion[])
else:
root_node = adgroup_criterion[]
DisplayTree(root_node, children)
|
Runs the example.
|
24,981 |
def clone_name(s, ca=False):
if not ca:
return s[:-1]
if s[0] == :
return s[2:]
return s.rstrip()
|
>>> clone_name("120038881639")
"0038881639"
>>> clone_name("GW11W6RK01DAJDWa")
"GW11W6RK01DAJDW"
|
24,982 |
def get_report(time=, threshold=, online=False):
if threshold not in THRESHOLDS:
raise USGSException(.format(threshold))
if time not in TIMES:
raise USGSException(.format(time))
try:
result = _get_report_string(time, threshold, online)
except HTTPError as e:
raise USGSException("Internet error ({}): {}".format(e.code, e.reason))
if result == "":
formatted_threshold = if threshold not in (, ) else threshold.title()
result = Report._from_json({: {: .format(formatted_threshold, time.title())}})
if _USE_CLASSES:
return result
else:
return result._to_dict()
elif result:
try:
json_result = _from_json(result)
except ValueError:
raise USGSException("The response from the server didn{}{}').".format(time, threshold))
|
Retrieves a new Report about recent earthquakes.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: :ref:`Report`
|
24,983 |
def rc(self):
ntx = self.copy()
newstrand =
if ntx.strand == : newstrand =
ntx._options = ntx._options._replace(direction=newstrand)
return ntx
|
Flip the direction
|
24,984 |
def train(model, X_train=None, Y_train=None, save=False,
predictions_adv=None, evaluate=None,
args=None, rng=None, var_list=None,
attack=None, attack_args=None):
assert isinstance(model, Model)
args = _ArgsWrapper(args or {})
if ((attack is None) != (attack_args is None)):
raise ValueError("attack and attack_args must be "
"passed together.")
if X_train is None or Y_train is None:
raise ValueError("X_train argument and Y_train argument "
"must be supplied.")
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
tfe = tf.contrib.eager
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
batch_x = tfe.Variable(X_train[0:args.batch_size], dtype=tf.float32)
batch_y = tfe.Variable(Y_train[0:args.batch_size], dtype=tf.float32)
for epoch in xrange(args.nb_epochs):
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
start, end = batch_indices(
batch, len(X_train), args.batch_size)
tf.assign(batch_x, X_train[index_shuf[start:end]])
tf.assign(batch_y, Y_train[index_shuf[start:end]])
with tf.GradientTape() as tape:
loss_clean_obj = LossCrossEntropy(model, smoothing=0.)
loss_clean = loss_clean_obj.fprop(x=batch_x, y=batch_y)
loss = loss_clean
if attack is not None:
batch_adv_x = attack.generate(batch_x, **attack_args)
loss_adv_obj = LossCrossEntropy(model, smoothing=0.)
loss_adv = loss_adv_obj.fprop(x=batch_adv_x, y=batch_y)
loss = (loss_clean + loss_adv) / 2.0
model_variables = model.get_params()
grads = tape.gradient(loss, model_variables)
optimizer.apply_gradients(zip(grads, model_variables))
assert end >= len(X_train)
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(save_path, model_variables)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
|
Train a TF Eager model
:param model: cleverhans.model.Model
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: List of variables to train.
:param attack: Instance of the class cleverhans.attacks.attacks_eager
:param attack_args: Parameters required for the attack.
:return: True if model trained
|
24,985 |
def generate_protocol(self):
self.offsetX = int(self.sweepSize/64)
self.protoX,self.protoY=[0,self.sweepX[-1]],[self.holding,self.holding]
self.protoSeqX,self.protoSeqY=[0],[self.holding]
return
proto=self.header[][self.channel]
self.protoX,self.protoY=[] ,[]
self.protoX.append(0)
self.protoY.append(self.holding)
for step in proto:
dX = proto[step][]
Y = proto[step][]+proto[step][]*self.sweep
self.protoX.append(self.protoX[-1])
self.protoY.append(Y)
self.protoX.append(self.protoX[-1]+dX)
self.protoY.append(Y)
if self.header[][0][]:
finalVal=self.protoY[-1]
self.protoX.append(self.protoX[-1])
self.protoY.append(finalVal)
self.protoX.append(self.sweepSize)
self.protoY.append(finalVal)
for i in range(1,len(self.protoX)-1):
self.protoX[i]=self.protoX[i]+self.offsetX
self.protoSeqY=[self.protoY[0]]
self.protoSeqX=[self.protoX[0]]
for i in range(1,len(self.protoY)):
if not self.protoY[i]==self.protoY[i-1]:
self.protoSeqY.append(self.protoY[i])
self.protoSeqX.append(self.protoX[i])
if self.protoY[0]!=self.protoY[1]:
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[1])
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[0]+self.offsetX/2)
self.protoSeqY.append(finalVal)
self.protoSeqX.append(self.sweepSize)
self.protoX=np.array(self.protoX)/self.pointsPerSec
self.protoY=np.array(self.protoY)
|
Recreate the command stimulus (protocol) for the current sweep.
It's not stored point by point (that's a waste of time and memory!)
Instead it's stored as a few (x,y) points which can be easily graphed.
TODO: THIS
for segment in abf.ABFreader.read_protocol():
for analogsignal in segment.analogsignals:
print(analogsignal)
plt.plot(analogsignal)
plt.show()
plt.close('all')
|
24,986 |
def _get_column(cls, name):
if name == :
return cls._meta.get_field(cls._meta.pk.name)
return cls._columns[name]
|
Based on cqlengine.models.BaseModel._get_column.
But to work with 'pk'
|
24,987 |
def _rebin(self,xdx,rebin):
x = xdx[0]
dx = xdx[1]
rebin = int(rebin)
if rebin <= 1:
return (x,dx)
lenx = len(x)
x_rebin = []
dx_rebin = []
dx[dx==0] = np.inf
for i in np.arange(0,lenx,rebin):
w = 1./dx[i:i+rebin-1]**2
wsum = np.sum(w)
if wsum == 0:
x_rebin.append(np.mean(x[i:i+rebin-1]))
dx_rebin.append(np.std(x[i:i+rebin-1]))
else:
x_rebin.append(np.sum(x[i:i+rebin-1]*w)/wsum)
dx_rebin.append(1./wsum**0.5)
return np.array([x_rebin,dx_rebin])
|
Rebin array x with weights 1/dx**2 by factor rebin.
Inputs:
xdx = [x,dx]
rebin = int
Returns [x,dx] after rebinning.
|
24,988 |
def add_nest(self, name=None, **kw):
def deco(func):
self.add(name or func.__name__, func, **kw)
return func
return deco
|
A simple decorator which wraps :meth:`nestly.core.Nest.add`.
|
24,989 |
def cancel(batch_fn, cancel_fn, ops):
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages
|
Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
|
24,990 |
def coverage(self,
container: Container,
*,
instrument: bool = True
) -> TestSuiteCoverage:
uid = container.uid
logger.info("Fetching coverage information for container: %s",
uid)
uri = .format(uid)
r = self.__api.post(uri,
params={: if instrument else })
if r.status_code == 200:
jsn = r.json()
coverage = TestSuiteCoverage.from_dict(jsn)
logger.info("Fetched coverage information for container: %s",
uid)
return coverage
try:
self.__api.handle_erroneous_response(r)
except exceptions.BugZooException as err:
logger.exception("Failed to fetch coverage information for container %s: %s", uid, err.message)
raise
except Exception as err:
logger.exception("Failed to fetch coverage information for container %s due to unexpected failure: %s", uid, err)
raise
|
Computes complete test suite coverage for a given container.
Parameters:
container: the container for which coverage should be computed.
rebuild: if set to True, the program will be rebuilt before
coverage is computed.
|
24,991 |
def handle_message(received_message, control_plane_sockets, data_plane_sockets):
logger.debug(u"Handling message
received_message.message.__class__.__name__,
received_message.source[0]))
try:
if isinstance(received_message.message, MapRequestMessage):
handle_map_request(received_message, control_plane_sockets, data_plane_sockets)
elif isinstance(received_message.message, MapReplyMessage):
handle_map_reply(received_message, control_plane_sockets, data_plane_sockets)
elif isinstance(received_message.message, MapNotifyMessage):
handle_map_notify(received_message, control_plane_sockets, data_plane_sockets)
elif isinstance(received_message.message, MapRegisterMessage):
handle_map_register(received_message, control_plane_sockets, data_plane_sockets)
elif isinstance(received_message.message, MapReferralMessage):
handle_map_referral(received_message, control_plane_sockets, data_plane_sockets)
elif isinstance(received_message.message, EncapsulatedControlMessage):
if isinstance(received_message.inner_message, MapRequestMessage):
if received_message.message.ddt_originated:
handle_ddt_map_request(received_message, control_plane_sockets, data_plane_sockets)
else:
handle_enc_map_request(received_message, control_plane_sockets, data_plane_sockets)
else:
logger.warning("ECM does not contain a map-request in message %d", received_message.message_nr)
elif isinstance(received_message.message, InfoMessage):
handle_info_message(received_message, control_plane_sockets, data_plane_sockets)
else:
logger.warning("Unknown content in message %d", received_message.message_nr)
except:
logger.exception("Unexpected exception while handling message %d", received_message.message_nr)
|
Handle a LISP message. The default handle method determines the type
of message and delegates it to the more specific method
|
24,992 |
def reset_and_halt(self, reset_type=None):
delegateResult = self.call_delegate(, core=self, reset_type=reset_type)
if not delegateResult:
self.halt()
demcr = self.read_memory(CortexM.DEMCR)
if not delegateResult:
self.write_memory(CortexM.DEMCR, demcr | CortexM.DEMCR_VC_CORERESET)
self.reset(reset_type)
with timeout.Timeout(2.0) as t_o:
while t_o.check():
if self.get_state() not in (Target.TARGET_RESET, Target.TARGET_RUNNING):
break
sleep(0.01)
xpsr = self.read_core_register()
if xpsr & self.XPSR_THUMB == 0:
self.write_core_register(, xpsr | self.XPSR_THUMB)
self.call_delegate(, core=self, reset_type=reset_type)
self.write_memory(CortexM.DEMCR, demcr)
|
perform a reset and stop the core on the reset handler
|
24,993 |
def _tofile(self, fh, pam=False):
fh.seek(0)
fh.write(self._header(pam))
data = self.asarray(copy=False)
if self.maxval == 1:
data = numpy.packbits(data, axis=-1)
data.tofile(fh)
|
Write Netbm file.
|
24,994 |
def __write(path, data, mode="w"):
w
with open(path, mode) as data_file:
data = json.dumps(data, indent=4)
data_file.write(data)
return data
|
Writes to a File. Returns the data written.
path - (string) path to the file to write to.
data - (json) data from a request.
mode - (string) mode to open the file in. Default to 'w'. Overwrites.
|
24,995 |
def _logmessage_transform(cls, s, by=2):
if len(s) >= by:
return s[by:].strip()
return s.strip()
|
Preprocess/cleanup a bzr log message before parsing
Args:
s (str): log message string
by (int): cutoff threshold for log message length
Returns:
str: preprocessed log message string
|
24,996 |
def get_river_index(self, river_id):
try:
return np.where(self.get_river_id_array() == river_id)[0][0]
except IndexError:
raise IndexError("ERROR: River ID {0} not found in dataset "
"...".format(river_id))
|
This method retrieves the river index in the netCDF
dataset corresponding to the river ID.
Parameters
----------
river_id: int
The ID of the river segment.
Returns
-------
int:
The index of the river ID's in the file.
Example::
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
river_id = 53458
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
river_index = qout_nc.get_river_index(river_id)
|
24,997 |
def remove_colormap(self, removal_type):
with _LeptonicaErrorTrap():
return Pix(
lept.pixRemoveColormapGeneral(self._cdata, removal_type, lept.L_COPY)
)
|
Remove a palette (colormap); if no colormap, returns a copy of this
image
removal_type - any of lept.REMOVE_CMAP_*
|
24,998 |
def get_create_option(self, context, q):
create_option = []
display_create_option = False
if self.create_field and q:
page_obj = context.get(, None)
if page_obj is None or page_obj.number == 1:
display_create_option = True
if display_create_option and self.has_add_permission(self.request):
for s in Location.objects.filter(
Q(
Q(name__istartswith=q) & Q(transactionparty__isnull=True)
)
):
create_option += [{
: % s.id,
: _() % {: s.name},
: True,
}]
for s in StaffMember.objects.filter(
Q(
(Q(firstName__istartswith=q) | Q(lastName__istartswith=q)) &
Q(transactionparty__isnull=True)
)
):
create_option += [{
: % s.id,
: _() % {: s.fullName},
: True,
}]
for s in User.objects.filter(
Q(
(Q(first_name__istartswith=q) | Q(last_name__istartswith=q)) &
Q(staffmember__isnull=True) & Q(transactionparty__isnull=True)
)
):
create_option += [{
: % s.id,
: _() % {: s.get_full_name()},
: True,
}]
create_option += [{
: q,
: _() % {: q},
: True,
}]
return create_option
|
Form the correct create_option to append to results.
|
24,999 |
def remove_all_cts_records_by(file_name, crypto_idfp):
db = XonoticDB.load_path(file_name)
db.remove_all_cts_records_by(crypto_idfp)
db.save(file_name)
|
Remove all cts records set by player with CRYPTO_IDFP
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.