Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
28,400 |
def tags(self, ticket_id):
return self._query_zendesk(self.endpoint.tags, , id=ticket_id)
|
Lists the most popular recent tags in decreasing popularity from a specific ticket.
|
28,401 |
def p_property_list(self, p):
if len(p) == 2:
p[0] = [p[1]]
else:
p[1].append(p[3])
p[0] = p[1]
|
property_list : property_assignment
| property_list COMMA property_assignment
|
28,402 |
def exc_message(exc_info):
exc = exc_info[1]
if exc is None:
result = exc_info[0]
else:
try:
result = str(exc)
except UnicodeEncodeError:
try:
result = unicode(exc)
except UnicodeError:
result = exc.args[0]
return result
|
Return the exception's message.
|
28,403 |
def update(self):
self.holder = siget(self.holder.FullName)
for key, value in self.__dict__.iteritems():
key = self.namespace + key
if self._validate_key(key):
if not self.holder.Parameters(key):
self.holder.AddParameter3(key, C.siString)
self.holder.Parameters(key).Value = encode(value)
|
This method should be called when you want to ensure all cached attributes
are in sync with the actual object attributes at runtime.
This happens because attributes could store mutable objects and be
modified outside the scope of this class.
The most common idiom that isn't automagically caught is mutating a list
or dictionary. Lets say 'user' object have an attribute named 'friends'
containing a list, calling 'user.friends.append(new_friend)' only get the
attribute, SIWrapper isn't aware that the object returned was modified
and the cached data is not updated.
|
28,404 |
def load(theTask, canExecute=True, strict=True, defaults=False):
return teal(theTask, parent=None, loadOnly=True, returnAs="dict",
canExecute=canExecute, strict=strict, errorsToTerm=True,
defaults=defaults)
|
Shortcut to load TEAL .cfg files for non-GUI access where
loadOnly=True.
|
28,405 |
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):
X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)
tokenizer.build_vocab(X_train)
process_save(X_train, y_train, tokenizer, path.join(
proc_data_dir, ), train=True, **kwargs)
process_save(X_val, y_val, tokenizer, path.join(
proc_data_dir, ), **kwargs)
process_save(X_test, y_test, tokenizer, path.join(
proc_data_dir, ), **kwargs)
|
Setup data while splitting into a training, validation, and test set.
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_dir: Directory for the split and processed data
|
28,406 |
def negated(self):
op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or
return QueryCompound(*self.__queries, op=op)
|
Negates this instance and returns it.
:return self
|
28,407 |
def __getBarFCName(pressure):
if pressure is None:
return None
press = __to_float1(pressure)
if press < 974:
return "Thunderstorms"
if press < 990:
return "Stormy"
if press < 1002:
return "Rain"
if press < 1010:
return "Cloudy"
if press < 1022:
return "Unstable"
if press < 1035:
return "Stable"
return "Very dry"
|
Parse the pressure and return FC (String).
|
28,408 |
def flatten(*caches):
return list(OrderedDict.fromkeys(e for c in caches for e in c))
|
Flatten a nested list of cache entries
Parameters
----------
*caches : `list`
One or more lists of file paths
(`str` or :class:`~lal.utils.CacheEntry`).
Returns
-------
flat : `list`
A flat `list` containing the unique set of entries across
each input.
|
28,409 |
def _check_args(self, source):
path = [source]
args = self.parsed_yaml.get(, {})
self._assert_struct_type(args, , (dict, list), path)
path.append()
if isinstance(args, dict):
for argn, argattrs in args.items():
self._check_one_arg(path, argn, argattrs)
else:
for argdict in args:
self._assert_command_dict(argdict, , path)
argn, argattrs = list(argdict.items())[0]
self._check_one_arg(path, argn, argattrs)
|
Validate the argument section.
Args may be either a dict or a list (to allow multiple positional args).
|
28,410 |
def max_await_time_ms(self, max_await_time_ms):
if (not isinstance(max_await_time_ms, integer_types)
and max_await_time_ms is not None):
raise TypeError("max_await_time_ms must be an integer or None")
self.__check_okay_to_chain()
if self.__query_flags & CursorType.TAILABLE_AWAIT:
self.__max_await_time_ms = max_await_time_ms
return self
|
Specifies a time limit for a getMore operation on a
:attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other
types of cursor max_await_time_ms is ignored.
Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or
``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this
:class:`Cursor` has already been used.
.. note:: `max_await_time_ms` requires server version **>= 3.2**
:Parameters:
- `max_await_time_ms`: the time limit after which the operation is
aborted
.. versionadded:: 3.2
|
28,411 |
def subsample(partitions,dataset,seed):
parts=np.arange(dataset.shape[0]/partitions,dataset.shape[0],dataset.shape[0]/partitions).astype(int)
subOut={}
for i in range(parts.shape[0]):
subOut["{0}cells".format(parts[i])]=np.asarray(shuffle(dataset,random_state=seed))[0:parts[i],:]
return subOut
|
Function to generate randomly sampled datasets with replacement. This is in the context of cells in the native dataset
which are the rows of the matrix
:param partitions: int designating the number of evenly spaced sample sizes to randomly select from the native dataset
:param dataset: DataFrame of the native dataset compatible with the suffle function
:param seed: pseudorandom seed, compatible with the replicate wrapper since it adds the index to the seed
:return subOut: dictionary of the randomly sampled datasets, keys are the number of cells
|
28,412 |
def add_digital_object(
self,
parent_archival_object,
identifier,
title=None,
uri=None,
location_of_originals=None,
object_type="text",
xlink_show="embed",
xlink_actuate="onLoad",
restricted=False,
use_statement="",
use_conditions=None,
access_conditions=None,
size=None,
format_name=None,
format_version=None,
inherit_dates=False,
inherit_notes=False,
):
parent_record = self.get_record(parent_archival_object)
repository = parent_record["repository"]["ref"]
language = parent_record.get("language", "")
if not title:
filename = os.path.basename(uri) if uri is not None else "Untitled"
title = parent_record.get("display_string", filename)
new_object = {
"title": title,
"digital_object_id": identifier,
"digital_object_type": object_type,
"language": language,
"notes": [],
"restrictions": restricted,
"subjects": parent_record["subjects"],
"linked_agents": parent_record["linked_agents"],
}
if inherit_dates:
new_object["dates"] = parent_record["dates"]
if location_of_originals is not None:
new_object["notes"].append(
{
"jsonmodel_type": "note_digital_object",
"type": "originalsloc",
"content": [location_of_originals],
"publish": False,
}
)
if uri is not None:
new_object["file_versions"] = [
{
"file_uri": uri,
"use_statement": use_statement,
"xlink_show_attribute": xlink_show,
"xlink_actuate_attribute": xlink_actuate,
}
]
note_digital_object_type = [
"summary",
"bioghist",
"accessrestrict",
"userestrict",
"custodhist",
"dimensions",
"edition",
"extent",
"altformavail",
"originalsloc",
"note",
"acqinfo",
"inscription",
"langmaterial",
"legalstatus",
"physdesc",
"prefercite",
"processinfo",
"relatedmaterial",
]
if inherit_notes:
for pnote in parent_record["notes"]:
if pnote["type"] in note_digital_object_type:
dnote = pnote["type"]
else:
dnote = "note"
if "subnotes" in pnote:
content = []
for subnote in pnote["subnotes"]:
if "content" in subnote:
content.append(subnote["content"])
else:
LOGGER.info(
"No content field in %s, skipping adding to child digital object.",
subnote,
)
else:
content = pnote.get("content", "")
new_object["notes"].append(
{
"jsonmodel_type": "note_digital_object",
"type": dnote,
"label": pnote.get("label", ""),
"content": content,
"publish": pnote["publish"],
}
)
if use_conditions:
new_object["notes"].append(
{
"jsonmodel_type": "note_digital_object",
"type": "userestrict",
"content": [use_conditions],
"publish": True,
}
)
if access_conditions:
new_object["notes"].append(
{
"jsonmodel_type": "note_digital_object",
"type": "accessrestrict",
"content": [access_conditions],
"publish": True,
}
)
if restricted:
new_object["file_versions"][0]["publish"] = False
new_object["publish"] = False
if size:
new_object["file_versions"][0]["file_size_bytes"] = size
if format_name:
new_object["file_versions"][0]["file_format_name"] = format_name
if format_version:
new_object["file_versions"][0]["file_format_version"] = format_version
new_object_uri = self._post(
repository + "/digital_objects", data=json.dumps(new_object)
).json()["uri"]
parent_record["instances"].append(
{
"instance_type": "digital_object",
"digital_object": {"ref": new_object_uri},
}
)
self._post(parent_archival_object, data=json.dumps(parent_record))
new_object["id"] = new_object_uri
return new_object
|
Creates a new digital object.
:param string parent_archival_object: The archival object to which the newly-created digital object will be parented.
:param string identifier: A unique identifier for the digital object, in any format.
:param string title: The title of the digital object.
:param string uri: The URI to an instantiation of the digital object.
:param string location_of_originals: If provided, will create an `originalsloc` (location of originals) note in the digital object using this text.
:param string object_type: The type of the digital object.
Defaults to "text".
:param string xlink_show: Controls how the file will be displayed.
For supported values, see: http://www.w3.org/TR/xlink/#link-behaviors
:param string xlink_actuate:
:param string use_statement:
:param string use_conditions: A paragraph of human-readable text to specify conditions of use for the digital object.
If provided, creates a "conditions governing use" note in the digital object.
:param string access_conditions: A paragraph of human-readable text to specify conditions of use for the digital object.
If provided, creates a "conditions governing access" note in the digital object.
:param int size: Size in bytes of the digital object
:param str format_name: Name of the digital object's format
:param str format_version: Name of the digital object's format version
:param bool inherit_dates: Inherit dates
:param bool inherit_notes: Inherit parent notes
|
28,413 |
def find_taskruns(project_id, **kwargs):
try:
kwargs[] = project_id
res = _pybossa_req(, , params=kwargs)
if type(res).__name__ == :
return [TaskRun(taskrun) for taskrun in res]
else:
return res
except:
raise
|
Return a list of matched task runs for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param kwargs: PYBOSSA Task Run members
:rtype: list
:returns: A List of task runs that match the query members
|
28,414 |
def filter_by_col(self, column_names):
if not isinstance(column_names, (list, tuple)):
column_names = [column_names, ]
sheet = self.table
identity = self.db_sheet_cols.id
exists = self.db_sheet_cols.exists
criterion = True
for column_name in column_names:
_criterion = sheet.loc[:, column_name] > 0
_exists = sheet.loc[:, exists] > 0
criterion = criterion & _criterion & _exists
return sheet.loc[criterion, identity].values.astype(int)
|
filters sheet/table by columns (input is column header)
The routine returns the serial numbers with values>1 in the selected
columns.
Args:
column_names (list): the column headers.
Returns:
pandas.DataFrame
|
28,415 |
def handle(self, request_headers={}, signature_header=None):
if self.client.webhook_secret is None:
raise ValueError()
encoded_header = self._get_signature_header(signature_header, request_headers)
decoded_request = self._decode_request(encoded_header)
if not in decoded_request:
raise ValueError("Error invalid request: no type field found.")
handler = self._getHandlerForEvent(decoded_request[])
if handler is None:
return
if (self._get_fct_number_of_arg(handler) == 1):
handler(decoded_request)
return
handler(decoded_request, decoded_request[])
|
Handle request.
|
28,416 |
def hgmd(self):
tstart = datetime.now()
if os.path.isfile(settings.hgmd_file):
hgmd_obj = hgmd.HGMD(self.vcf_file)
hgmd_obj.run()
tend = datetime.now()
execution_time = tend - tstart
|
Hi Index
|
28,417 |
def loads(
s,
record_store=None,
schema=None,
loader=from_json_compatible,
record_class=None
):
if record_class is not None:
warnings.warn(
"The record_class parameter is deprecated in favour of schema",
DeprecationWarning,
stacklevel=2
)
schema = record_class
if not isinstance(s, unicode):
s = s.decode()
if s.startswith(u"{"):
json_dct = json.loads(s)
return load_json_dct(json_dct, record_store, schema, loader)
else:
raise ParseError("Not a json record")
|
Create a Record instance from a json serialized dictionary
:param s:
String with a json-serialized dictionary
:param record_store:
Record store to use for schema lookups (when $schema field is present)
:param loader:
Function called to fetch attributes from json. Typically shouldn't be used by end users
:param schema:
PySchema Record class for the record to load.
This will override any $schema fields specified in `s`
:param record_class:
DEPRECATED option, old name for the `schema` parameter
|
28,418 |
def copy_file_upload(self, targetdir):
assert(self.file_upload)
tempdir = tempfile.mkdtemp()
try:
if zipfile.is_zipfile(self.file_upload.absolute_path()):
f = zipfile.ZipFile(self.file_upload.absolute_path(), )
f.extractall(targetdir)
elif tarfile.is_tarfile(self.file_upload.absolute_path()):
tar = tarfile.open(self.file_upload.absolute_path())
tar.extractall(targetdir)
tar.close()
else:
shutil.copyfile(self.file_upload.absolute_path(),
targetdir + "/" + self.file_upload.basename())
except IOError:
logger.error("I/O exception while accessing %s." %
(self.file_upload.absolute_path()))
pass
except (UnicodeEncodeError, NotImplementedError) as e:
shutil.copyfile(self.file_upload.absolute_path(),
targetdir + "/" + self.file_upload.basename())
pass
|
Copies the currently valid file upload into the given directory.
If possible, the content is un-archived in the target directory.
|
28,419 |
def get_tax_class_by_id(cls, tax_class_id, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs)
else:
(data) = cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs)
return data
|
Find TaxClass
Return single instance of TaxClass by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tax_class_by_id(tax_class_id, async=True)
>>> result = thread.get()
:param async bool
:param str tax_class_id: ID of taxClass to return (required)
:return: TaxClass
If the method is called asynchronously,
returns the request thread.
|
28,420 |
def get_callee_account(
global_state: GlobalState, callee_address: str, dynamic_loader: DynLoader
):
environment = global_state.environment
accounts = global_state.accounts
try:
return global_state.accounts[callee_address]
except KeyError:
log.debug("Module with address " + callee_address + " not loaded.")
if dynamic_loader is None:
raise ValueError()
log.debug("Attempting to load dependency")
try:
code = dynamic_loader.dynld(callee_address)
except ValueError as error:
log.debug("Unable to execute dynamic loader because: {}".format(str(error)))
raise error
if code is None:
log.debug("No code returned, not a contract account?")
raise ValueError()
log.debug("Dependency loaded: " + callee_address)
callee_account = Account(
callee_address, code, callee_address, dynamic_loader=dynamic_loader
)
accounts[callee_address] = callee_account
return callee_account
|
Gets the callees account from the global_state.
:param global_state: state to look in
:param callee_address: address of the callee
:param dynamic_loader: dynamic loader to use
:return: Account belonging to callee
|
28,421 |
def parse_at_root(
self,
root,
state
):
xml_value = self._processor.parse_at_root(root, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value)
|
Parse the given element as the root of the document.
|
28,422 |
def _add_section_to_report(self, data):
pass_count = error_count = only_warning_count = 0
for sample_data in data.values():
if sample_data[] == :
error_count += 1
elif sample_data[] == :
only_warning_count += 1
else:
pass_count += 1
plot_html = []
note_html = _generate_overview_note(
pass_count=pass_count,
only_warning_count=only_warning_count,
error_count=error_count,
total_count=len(data)
)
plot_html.append(note_html)
if error_count or only_warning_count:
table_html = _generate_detailed_table(data)
plot_html.append(table_html)
self.add_section(
name=,
anchor=,
description=(
),
helptext=,
plot="\n".join(plot_html),
)
|
Adds found data to the report via several HTML generators
|
28,423 |
def contains(self, data):
bfo = BitFieldOperation(self.database, self.key)
for bit_index in self._get_seeds(data):
bfo.get(, bit_index)
return all(bfo.execute())
|
Check if an item has been added to the bloomfilter.
:param bytes data: a bytestring representing the item to check.
:returns: a boolean indicating whether or not the item is present in
the bloomfilter. False-positives are possible, but a negative
return value is definitive.
|
28,424 |
def cached(cls, minimum_version=None, maximum_version=None, jdk=False):
try:
return cls.global_instance()._locator().locate(
minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
except _Locator.Error as e:
raise cls.Error(.format(e))
|
Finds a java distribution that meets the given constraints and returns it.
:API: public
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
The stricter of this and `--jvm-distributions-minimum-version` is used.
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
The stricter of this and `--jvm-distributions-maximum-version` is used.
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`Distribution`
:raises: :class:`Distribution.Error` if no suitable java distribution could be found.
|
28,425 |
def hist(data):
win = CurveDialog(edit=False, toolbar=True, wintitle="Histogram test")
plot = win.get_plot()
plot.add_item(make.histogram(data))
win.show()
win.exec_()
|
Plots histogram
|
28,426 |
def walk_perimeter(self, startx, starty):
startx = max(startx, 0)
startx = min(startx, self.xsize)
starty = max(starty, 0)
starty = min(starty, self.ysize)
points = []
x, y = startx, starty
while True:
self.step(x, y)
if 0 <= x <= self.xsize and 0 <= y <= self.ysize:
points.append((x, y))
if self.next == self.UP:
y -= 1
elif self.next == self.LEFT:
x -= 1
elif self.next == self.DOWN:
y += 1
elif self.next == self.RIGHT:
x += 1
elif self.next == self.NOWHERE:
break
if x == startx and y == starty:
break
return points
|
Starting at a point on the perimeter of a region, 'walk' the perimeter to return
to the starting point. Record the path taken.
Parameters
----------
startx, starty : int
The starting location. Assumed to be on the perimeter of a region.
Returns
-------
perimeter : list
A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region.
|
28,427 |
def has(self, id, domain):
assert isinstance(id, (str, unicode))
assert isinstance(domain, (str, unicode))
if self.defines(id, domain):
return True
if self.fallback_catalogue is not None:
return self.fallback_catalogue.has(id, domain)
return False
|
Checks if a message has a translation.
@rtype: bool
@return: true if the message has a translation, false otherwise
|
28,428 |
def swarm_denovo_cluster(seq_path,
d=1,
threads=1,
HALT_EXEC=False):
if not exists(seq_path):
raise ValueError("%s does not exist" % seq_path)
swarm = Swarm(HALT_EXEC=HALT_EXEC)
if d > 0:
swarm.Parameters[].on(d)
else:
raise ValueError("Resolution -d must be a positive integer.")
if threads > 0:
swarm.Parameters[].on(threads)
else:
raise ValueError("Number of threads must be a positive integer.")
f, tmp_swarm_otumap = mkstemp(prefix=,
suffix=)
close(f)
swarm.Parameters[].on(tmp_swarm_otumap)
swarm.files_to_remove.append(tmp_swarm_otumap)
clusters = swarm(seq_path)
remove_files(swarm.files_to_remove, error_on_missing=False)
return clusters
|
Function : launch the Swarm de novo OTU picker
Parameters: seq_path, filepath to reads
d, resolution
threads, number of threads to use
Return : clusters, list of lists
|
28,429 |
def is_native_ion_gate(gate: ops.Gate) -> bool:
return isinstance(gate, (ops.XXPowGate,
ops.MeasurementGate,
ops.XPowGate,
ops.YPowGate,
ops.ZPowGate))
|
Check if a gate is a native ion gate.
Args:
gate: Input gate.
Returns:
True if the gate is native to the ion, false otherwise.
|
28,430 |
def mkstemp(self, suffix, prefix, directory=None):
if not directory:
directory = self.artifacts_dir
fd, fname = tempfile.mkstemp(suffix, prefix, directory)
os.close(fd)
os.chmod(fname, 0o644)
return fname
|
Generate temp file name in artifacts base dir
and close temp file handle
|
28,431 |
def act(self, world_state, agent_host, current_r ):
obs_text = world_state.observations[-1].text
obs = json.loads(obs_text)
self.logger.debug(obs)
if not u in obs or not u in obs:
self.logger.error("Incomplete observation received: %s" % obs_text)
return 0
current_s = "%d:%d" % (int(obs[u]), int(obs[u]))
self.logger.debug("State: %s (x = %.2f, z = %.2f)" % (current_s, float(obs[u]), float(obs[u])))
if current_s not in self.q_table:
self.q_table[current_s] = ([0] * len(self.actions))
if self.prev_s is not None and self.prev_a is not None:
self.updateQTable( current_r, current_s )
self.drawQ( curr_x = int(obs[u]), curr_y = int(obs[u]) )
rnd = random.random()
if rnd < self.epsilon:
a = random.randint(0, len(self.actions) - 1)
self.logger.info("Random action: %s" % self.actions[a])
else:
m = max(self.q_table[current_s])
self.logger.debug("Current values: %s" % ",".join(str(x) for x in self.q_table[current_s]))
l = list()
for x in range(0, len(self.actions)):
if self.q_table[current_s][x] == m:
l.append(x)
y = random.randint(0, len(l)-1)
a = l[y]
self.logger.info("Taking q action: %s" % self.actions[a])
try:
agent_host.sendCommand(self.actions[a])
self.prev_s = current_s
self.prev_a = a
except RuntimeError as e:
self.logger.error("Failed to send command: %s" % e)
return current_r
|
take 1 action in response to the current world state
|
28,432 |
def status_subversion(path, ignore_set, options):
subrepos = ()
if path in ignore_set:
return None, subrepos
keepers = []
for line in run([, , ], cwd=path):
if not line.strip():
continue
if line.startswith(b) or line[0] in b:
continue
status = line[:8]
ignored_states = options.ignore_svn_states
if ignored_states and status.strip() in ignored_states:
continue
filename = line[8:].split(None, 3)[-1]
ignore_set.add(os.path.join(path, filename))
if status.strip():
keepers.append(b + status + filename)
return keepers, subrepos
|
Run svn status.
Returns a 2-element tuple:
* Text lines describing the status of the repository.
* Empty sequence of subrepos, since svn does not support them.
|
28,433 |
def StartCli(args, adb_commands, extra=None, **device_kwargs):
try:
dev = adb_commands()
dev.ConnectDevice(port_path=args.port_path, serial=args.serial, default_timeout_ms=args.timeout_ms,
**device_kwargs)
except usb_exceptions.DeviceNotFoundError as e:
print(.format(e), file=sys.stderr)
return 1
except usb_exceptions.CommonUsbError as e:
print(.format(e), file=sys.stderr)
return 1
try:
return _RunMethod(dev, args, extra or {})
except Exception as e:
sys.stdout.write(str(e))
return 1
finally:
dev.Close()
|
Starts a common CLI interface for this usb path and protocol.
|
28,434 |
def geometries(self):
fname = .format(self.name, self.scale)
for extension in [, ]:
get_test_data(fname + extension)
path = get_test_data(fname + , as_file_obj=False)
return iter(tuple(shpreader.Reader(path).geometries()))
|
Return an iterator of (shapely) geometries for this feature.
|
28,435 |
def paths_from_version(version):
if platform.system() == :
eplus_home = "C:/EnergyPlusV{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, )
elif platform.system() == "Linux":
eplus_home = "/usr/local/EnergyPlus-{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, )
else:
eplus_home = "/Applications/EnergyPlus-{version}".format(version=version)
eplus_exe = os.path.join(eplus_home, )
return eplus_exe, eplus_home
|
Get the EnergyPlus install directory and executable path.
Parameters
----------
version : str, optional
EnergyPlus version in the format "X-X-X", e.g. "8-7-0".
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_home : str
Full path to the EnergyPlus install directory.
|
28,436 |
def is_published(self):
field773 = record_get_field_instances(self.record, )
for f773 in field773:
if in field_get_subfields(f773):
return True
return False
|
Check fields 980 and 773 to see if the record has already been published.
:return: True is published, else False
|
28,437 |
def dirty(name,
target,
user=None,
username=None,
password=None,
ignore_unversioned=False):
ret = {: name, : True, : , : {}}
return _fail(ret, )
|
Determine if the working directory has been changed.
|
28,438 |
def upload_sticker_file(self, user_id, png_sticker):
from pytgbot.api_types.sendable.files import InputFile
assert_type_or_raise(user_id, int, parameter_name="user_id")
assert_type_or_raise(png_sticker, InputFile, parameter_name="png_sticker")
result = self.do("uploadStickerFile", user_id=user_id, png_sticker=png_sticker)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.media import File
try:
return File.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type File", exc_info=True)
raise TgApiParseException("Could not parse result.")
return result
|
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success.
https://core.telegram.org/bots/api#uploadstickerfile
Parameters:
:param user_id: User identifier of sticker file owner
:type user_id: int
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files »
:type png_sticker: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns the uploaded File on success
:rtype: pytgbot.api_types.receivable.media.File
|
28,439 |
def save_signal(self,filename=None):
if filename is None:
filename = os.path.join(self.folder,)
self.trsig.save(filename)
|
Saves TransitSignal.
Calls :func:`TransitSignal.save`; default filename is
``trsig.pkl`` in ``self.folder``.
|
28,440 |
def process_target(self):
if isinstance(self.target, str):
self.target = self.target.replace("").replace(, "\'")
return "\"{target}\"".format(target=self.target)
return self.target
|
Return target with transformations, if any
|
28,441 |
def usb_control_out(library, session, request_type_bitmap_field, request_id, request_value,
index, data=""):
length = len(data)
return library.viUsbControlOut(session, request_type_bitmap_field, request_id,
request_value, index, length, data)
|
Performs a USB control pipe transfer to the device.
Corresponds to viUsbControlOut function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param request_type_bitmap_field: bmRequestType parameter of the setup stage of a USB control transfer.
:param request_id: bRequest parameter of the setup stage of a USB control transfer.
:param request_value: wValue parameter of the setup stage of a USB control transfer.
:param index: wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
:param data: The data buffer that sends the data in the optional data stage of the control transfer.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
|
28,442 |
def overlay_gateway_monitor_vlan_range(self, **kwargs):
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop()
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop()
vlan_range = ET.SubElement(monitor, "vlan-range")
vlan_range.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
28,443 |
def normalise_angle(th):
return th - (2.0 * np.pi) * np.floor((th + np.pi) / (2.0 * np.pi))
|
Normalise an angle to be in the range [-pi, pi].
|
28,444 |
def findReference(self, name, cls=QtGui.QWidget):
return self.scene().findReference(name, cls)
|
Looks up a reference from the widget based on its object name.
:param name | <str>
cls | <subclass of QtGui.QObject>
:return <QtGui.QObject> || None
|
28,445 |
def find_records(self, check, keys=None):
if keys:
bad_keys = [ key for key in keys if key not in self._keys ]
if bad_keys:
raise KeyError("Bad record key(s): %s"%bad_keys)
if keys:
if in keys:
keys.remove()
keys.insert(0, )
req = .join(keys)
else:
req =
expr,args = self._render_expression(check)
query = %(req, self.table, expr)
cursor = self._db.execute(query, args)
matches = cursor.fetchall()
records = []
for line in matches:
rec = self._list_to_dict(line, keys)
records.append(rec)
return records
|
Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
|
28,446 |
def get_num_ruptures(self):
return {grp.id: sum(src.num_ruptures for src in grp)
for grp in self.src_groups}
|
:returns: the number of ruptures per source group ID
|
28,447 |
def _format_syslog_config(cmd_ret):
ret_dict = {: cmd_ret[] == 0}
if cmd_ret[] != 0:
ret_dict[] = cmd_ret[]
else:
for line in cmd_ret[].splitlines():
line = line.strip()
cfgvars = line.split()
key = cfgvars[0].strip()
value = cfgvars[1].strip()
ret_dict[key] = value
return ret_dict
|
Helper function to format the stdout from the get_syslog_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
|
28,448 |
def read(self, pin, is_differential=False):
pin = pin if is_differential else pin + 0x04
return self._read(pin)
|
I2C Interface for ADS1x15-based ADCs reads.
params:
:param pin: individual or differential pin.
:param bool is_differential: single-ended or differential read.
|
28,449 |
def _adj(self, k):
G = np.zeros((self.m, self.m))
for i in range(self.m):
for j in range(self.m):
if i == j+1 or j == i+1:
G[i][j] = 1
return G
|
Description:
Adjacent breaking
Paramters:
k: not used
|
28,450 |
def mother(self):
if self._mother == []:
self._mother = self.sub_tag("FAMC/WIFE")
return self._mother
|
Parent of this individual
|
28,451 |
def get_value(row, field_name):
result = None
dict_row = convert_to_dict(row)
if detect_list(field_name):
temp = row
for field in field_name:
dict_temp = convert_to_dict(temp)
temp = dict_temp.get(field, None)
result = temp
else:
result = dict_row.get(field_name, None)
return result
|
Returns the value found in the field_name attribute of the row dictionary.
|
28,452 |
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen):
contract_fn = _resource_context(
"Licencni_smlouva_o_dodavani_elektronickych_publikaci"
"_a_jejich_uziti.rst"
)
with open(contract_fn) as f:
contract = f.read()
firma = firma.strip()
firma = firma + "\n" + ((len(firma) + 1) * "-")
contract = Template(contract).substitute(
firma=firma,
pravni_forma=pravni_forma.strip(),
sidlo=sidlo.strip(),
ic=ic.strip(),
dic=dic.strip(),
zastoupen=zastoupen.strip(),
resources_path=RES_PATH
)
return gen_pdf(
contract,
open(_resource_context("style.json")).read(),
)
|
Compose contract and create PDF.
Args:
firma (str): firma
pravni_forma (str): pravni_forma
sidlo (str): sidlo
ic (str): ic
dic (str): dic
zastoupen (str): zastoupen
Returns:
obj: StringIO file instance containing PDF file.
|
28,453 |
def chk(self, annotations, fout_err):
for idx, ntd in enumerate(annotations):
self._chk_fld(ntd, "Qualifier")
self._chk_fld(ntd, "DB_Reference", 1)
self._chk_fld(ntd, "With_From")
self._chk_fld(ntd, "DB_Name", 0, 1)
self._chk_fld(ntd, "DB_Synonym")
self._chk_fld(ntd, "Taxon", 1, 2)
flds = list(ntd)
self._chk_qty_eq_1(flds)
if not ntd.Taxon or len(ntd.Taxon) not in {1, 2}:
self.illegal_lines[].append((idx, .format(I=idx, NT=ntd)))
if self.illegal_lines:
self.prt_error_summary(fout_err)
return not self.illegal_lines
|
Check annotations.
|
28,454 |
def service_present(name, service_type, description=None,
profile=None, **connection_args):
ret = {: name,
: {},
: True,
: .format(name)}
role = __salt__[](name=name,
profile=profile,
**connection_args)
if not in role:
return ret
else:
if __opts__.get():
ret[] = None
ret[] = .format(name)
return ret
__salt__[](name, service_type,
description,
profile=profile,
**connection_args)
ret[] = .format(name)
ret[][] =
return ret
|
Ensure service present in Keystone catalog
name
The name of the service
service_type
The type of Openstack Service
description (optional)
Description of the service
|
28,455 |
def calc_system(self, x, Y, Y_agg=None, L=None, population=None):
if Y_agg is None:
try:
Y_agg = Y.sum(level=,
axis=1).reindex(self.get_regions(),
axis=1)
except (AssertionError, KeyError):
Y_agg = Y.sum(level=0,
axis=1,).reindex(self.get_regions(),
axis=1)
y_vec = Y.sum(axis=0)
if self.F is None:
self.F = calc_F(self.S, x)
logging.debug(
.format(self.name))
if self.S is None:
self.S = calc_S(self.F, x)
logging.debug(.format(self.name))
if (self.FY is None) and (self.SY is not None):
self.FY = calc_FY(self.SY, y_vec)
logging.debug(.format(self.name))
if (self.SY is None) and (self.FY is not None):
self.SY = calc_SY(self.FY, y_vec)
logging.debug(.format(self.name))
if self.M is None:
if L is not None:
self.M = calc_M(self.S, L)
logging.debug(.format(
self.name))
else:
try:
self.M = recalc_M(self.S, self.D_cba,
Y=Y_agg,
nr_sectors=self.get_sectors().size)
logging.debug(
.format(self.name))
except Exception as ex:
logging.debug(
.
format(ex))
FY_agg = 0
if self.FY is not None:
try:
FY_agg = (self.FY.sum(level=, axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
FY_agg = (self.FY.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
if ((self.D_cba is None) or
(self.D_pba is None) or
(self.D_imp is None) or
(self.D_exp is None)):
if L is None:
logging.debug(
)
return
else:
self.D_cba, self.D_pba, self.D_imp, self.D_exp = (
calc_accounts(self.S, L, Y_agg, self.get_sectors().size))
logging.debug(
.format(self.name))
if ((self.D_cba_reg is None) or (self.D_pba_reg is None) or
(self.D_imp_reg is None) or (self.D_exp_reg is None)):
try:
self.D_cba_reg = (
self.D_cba.sum(level=, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_cba_reg = (
self.D_cba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_pba_reg = (
self.D_pba.sum(level=, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_pba_reg = (
self.D_pba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_imp_reg = (
self.D_imp.sum(level=, axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_imp_reg = (
self.D_imp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
try:
self.D_exp_reg = (
self.D_exp.sum(level=, axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_exp_reg = (
self.D_exp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
logging.debug(
.format(self.name))
if population is not None:
if type(population) is pd.DataFrame:
if (population.columns.tolist() !=
self.D_cba_reg.columns.tolist()):
logging.warning(
)
population = population.values
if ((self.D_cba_cap is None) or (self.D_pba_cap is None) or
(self.D_imp_cap is None) or (self.D_exp_cap is None)):
self.D_cba_cap = self.D_cba_reg.dot(
np.diagflat(1./population))
self.D_pba_cap = self.D_pba_reg.dot(
np.diagflat(1./population))
self.D_imp_cap = self.D_imp_reg.dot(
np.diagflat(1./population))
self.D_exp_cap = self.D_exp_reg.dot(
np.diagflat(1./population))
self.D_cba_cap.columns = self.D_cba_reg.columns
self.D_pba_cap.columns = self.D_pba_reg.columns
self.D_imp_cap.columns = self.D_imp_reg.columns
self.D_exp_cap.columns = self.D_exp_reg.columns
logging.debug(
.format(self.name))
return self
|
Calculates the missing part of the extension plus accounts
This method allows to specify an aggregated Y_agg for the
account calculation (see Y_agg below). However, the full Y needs
to be specified for the calculation of FY or SY.
Calculates:
- for each sector and country:
S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector,
D_exp_sector
- for each region:
D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg,
- for each region (if population vector is given):
D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap
Notes
-----
Only attributes which are not None are recalculated (for D_* this is
checked for each group (reg, cap, and w/o appendix)).
Parameters
----------
x : pandas.DataFrame or numpy.array
Industry output column vector
Y : pandas.DataFrame or numpy.arry
Full final demand array
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
L : pandas.DataFrame or numpy.array, optional
Leontief input output table L. If this is not given,
the method recalculates M based on D_cba (must be present in
the extension).
population : pandas.DataFrame or np.array, optional
Row vector with population per region
|
28,456 |
def docinfo2dict(doctree):
nodes = doctree.traverse(docutils.nodes.docinfo)
md = {}
if not nodes:
return md
for node in nodes[0]:
elif isinstance(node, docutils.nodes.TextElement):
md[node.__class__.__name__] = node.astext()
else:
name, body = node
md[name.astext()] = body.astext()
return md
|
Return the docinfo field list from a doctree as a dictionary
Note: there can be multiple instances of a single field in the docinfo.
Since a dictionary is returned, the last instance's value will win.
Example:
pub = rst2pub(rst_string)
print docinfo2dict(pub.document)
|
28,457 |
def assuan_serialize(data):
for c in [b, b, b]:
escaped = .format(ord(c)).encode()
data = data.replace(c, escaped)
return data
|
Serialize data according to ASSUAN protocol (for GPG daemon communication).
|
28,458 |
def create_parser(prog):
parser = argparse.ArgumentParser(prog=prog, formatter_class=DsubHelpFormatter)
parser.add_argument(
,
default=,
choices=[, , , ],
help=,
metavar=)
return parser
|
Create an argument parser, adding in the list of providers.
|
28,459 |
def set_source_filter(self, source):
if isinstance(source, str if py3k else basestring) and len(source) >= 2:
self.source_filter = source
else:
raise TwitterSearchException(1009)
|
Only search for tweets entered via given source
:param source: String. Name of the source to search for. An example \
would be ``source=twitterfeed`` for tweets submitted via TwitterFeed
:raises: TwitterSearchException
|
28,460 |
def clusterStatus(self):
servers = yield self.getClusterServers()
d = {
: {},
: {},
: {}
}
now = time.time()
reverse_map = {}
for sname in servers:
last = yield self._get_key( % sname)
status = yield self._get_key( % sname)
uuid = yield self._get_key( % sname)
reverse_map[uuid] = sname
if not last:
last = 0
last = float(last)
if (status == ) and (now - last > 5):
status =
if not sname in d[]:
d[][sname] = []
d[][sname].append({
: last,
: status,
: uuid
})
crons = yield self.keys()
for queue in crons:
if queue not in d[]:
d[][queue] = {: {}}
methods = yield self.keys( % queue)
for method in methods:
last = yield self._get_key( % (queue, method))
if last:
d[][queue][][method] = float(last)
uid = yield self._get_key( % queue)
if uid:
d[][queue][] = % (uid, reverse_map[uid])
queue_keys = yield self.keys()
for qname in queue_keys:
if qname not in d[]:
qlen = yield self.queueSize(qname)
stats = yield self.getQueueMessageStats(qname)
d[][qname] = {
: qlen,
: stats
}
defer.returnValue(d)
|
Returns a dict of cluster nodes and their status information
|
28,461 |
def to_pwm(self, precision=4, extra_str=""):
motif_id = self.id
if extra_str:
motif_id += "_%s" % extra_str
if not self.pwm:
self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()]
return ">%s\n%s" % (
motif_id,
self._pwm_to_str(precision)
)
|
Return pwm as string.
Parameters
----------
precision : int, optional, default 4
Floating-point precision.
extra_str |: str, optional
Extra text to include with motif id line.
Returns
-------
motif_str : str
Motif formatted in PWM format.
|
28,462 |
def simple_moving_average(data, period):
catch_errors.check_for_period_error(data, period)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
sma = [np.mean(data[idx-(period-1):idx+1]) for idx in range(0, len(data))]
sma = fill_for_noncomputable_vals(data, sma)
return sma
|
Simple Moving Average.
Formula:
SUM(data / N)
|
28,463 |
def diff_charsToLines(self, diffs, lineArray):
for i in range(len(diffs)):
text = []
for char in diffs[i][1]:
text.append(lineArray[ord(char)])
diffs[i] = (diffs[i][0], "".join(text))
|
Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
|
28,464 |
def connect_delete_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs):
kwargs[] = True
if kwargs.get():
return self.connect_delete_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
else:
(data) = self.connect_delete_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
return data
|
connect_delete_namespaced_pod_proxy_with_path # noqa: E501
connect DELETE requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
|
28,465 |
def create_user(self, username, first_name=None, last_name=None):
url = api_url+
payload = {
:username
}
if first_name:
payload[] = first_name
if last_name:
payload[] = last_name
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return User(r.json())
|
Creates a new user object on database
Returns the User Object. Must be linked to a new trainer soon after
|
28,466 |
def register_properties_handler(self, handler_function):
handler = filter_properties_signals(
signal_wrapper(handler_function), self.IFACE)
self.bus.add_signal_receiver(handler,
signal_name=,
dbus_interface=IPROPERTIES,
bus_name=self.name,
path=self.OBJ_PATH)
|
register `handler_function` to receive `signal_name`.
Uses dbus interface IPROPERTIES and objects path self.OBJ_PATH
to match 'PropertiesChanged' signal.
:param function handler_function: The function to be called.
|
28,467 |
def wait(self, readfds, writefds, timeout):
logger.debug("WAIT:")
logger.debug(f"\tProcess {self._current} is going to wait for [ {readfds!r} {writefds!r} {timeout!r} ]")
logger.debug(f"\tProcess: {self.procs!r}")
logger.debug(f"\tRunning: {self.running!r}")
logger.debug(f"\tRWait: {self.rwait!r}")
logger.debug(f"\tTWait: {self.twait!r}")
logger.debug(f"\tTimers: {self.timers!r}")
for fd in readfds:
self.rwait[fd].add(self._current)
for fd in writefds:
self.twait[fd].add(self._current)
if timeout is not None:
self.timers[self._current] = self.clocks + timeout
procid = self._current
next_index = (self.running.index(procid) + 1) % len(self.running)
self._current = self.running[next_index]
logger.debug(f"\tTransfer control from process {procid} to {self._current}")
logger.debug(f"\tREMOVING {procid!r} from {self.running!r}. Current: {self._current!r}")
self.running.remove(procid)
if self._current not in self.running:
logger.debug("\tCurrent not running. Checking for timers...")
self._current = None
self.check_timers()
|
Wait for file descriptors or timeout.
Adds the current process in the correspondent waiting list and
yield the cpu to another running process.
|
28,468 |
def js2str(js, sort_keys=True, indent=4):
return json.dumps(js, sort_keys=sort_keys,
indent=indent, separators=(",", ": "))
|
Encode js to nicely formatted human readable string. (utf-8 encoding)
Usage::
>>> from weatherlab.lib.dataIO.js import js2str
>>> s = js2str({"a": 1, "b": 2})
>>> print(s)
{
"a": 1,
"b": 2
}
**中文文档**
将可Json化的Python对象转化成格式化的字符串。
|
28,469 |
def setAlternatingRowColors( self, state ):
self._alternatingRowColors = state
self.treeWidget().setAlternatingRowColors(state)
|
Sets the alternating row colors state for this widget.
:param state | <bool>
|
28,470 |
def draw(data, size=(600, 400), node_size=2.0, edge_size=0.25,
default_node_color=0x5bc0de, default_edge_color=0xaaaaaa, z=100,
shader=, optimize=True, directed=True, display_html=True,
show_save=False):
shader_options = [, , , ]
if shader not in shader_options:
raise Exception( +
.join(shader_options))
if isinstance(default_edge_color, int):
default_edge_color = hex(default_edge_color)
if isinstance(default_node_color, int):
default_node_color = hex(default_node_color)
if isinstance(data, list):
graph = json_formatter.dumps(generate(data, iterations=1))
elif isinstance(data, dict):
for node_key in data[]:
node = data[][node_key]
if in node and isinstance(node[], int):
node[] = hex(node[])
for edge in data[]:
if in edge and isinstance(edge[], int):
edge[] = hex(edge[])
graph = json_formatter.dumps(data)
else:
try:
with open(data) as in_file:
graph = in_file.read()
except:
graph = data
div_id = uuid.uuid4()
html = /%(local)s%(remote)sjgraph
remote=remote_path[:-3], w=size[0], h=size[1],
node_size=node_size, edge_size=edge_size,
node_color=default_node_color,
edge_color=default_edge_color, shader=shader,
z=z, graph=graph,
optimize= if optimize else ,
directed= if directed else ,
show_save= if show_save else )
if display_html:
display(HTML(html))
else:
return html
|
Draws an interactive 3D visualization of the inputted graph.
Args:
data: Either an adjacency list of tuples (ie. [(1,2),...]) or object
size: (Optional) Dimensions of visualization, in pixels
node_size: (Optional) Defaults to 2.0
edge_size: (Optional) Defaults to 0.25
default_node_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0x5bc0de
default_edge_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0xaaaaaa
z: (Optional) Starting z position of the camera. Default is 100.
shader: (Optional) Specifies shading algorithm to use. Can be 'toon',
'basic', 'phong', or 'lambert'. Default is 'basic'.
optimize: (Optional) Runs a force-directed layout algorithm on the
graph. Default True.
directed: (Optional) Includes arrows on edges to indicate direction.
Default True.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
show_save: If True, displays a save icon for rendering graph as an
image.
Inputting an adjacency list into `data` results in a 'default' graph type.
For more customization, use the more expressive object format.
|
28,471 |
def linkify(self, timeperiods):
new_exclude = []
if hasattr(self, ) and self.exclude != []:
logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude
for tp_name in excluded_tps:
timepriod = timeperiods.find_by_name(tp_name.strip())
if timepriod is not None:
new_exclude.append(timepriod.uuid)
else:
msg = "[timeentry::%s] unknown %s timeperiod" % (self.get_name(), tp_name)
self.add_error(msg)
self.exclude = new_exclude
|
Will make timeperiod in exclude with id of the timeperiods
:param timeperiods: Timeperiods object
:type timeperiods:
:return: None
|
28,472 |
def parse_uri_path(self, path):
options = {}
db, *_ = path[1:].split("/")
if db:
options["db"] = db
return options
|
Given a uri path, return the Redis specific configuration
options in that path string according to iana definition
http://www.iana.org/assignments/uri-schemes/prov/redis
:param path: string containing the path. Example: "/0"
:return: mapping containing the options. Example: {"db": "0"}
|
28,473 |
def forward(self, obj):
assert isinstance(obj, (IncomingMessage, MessageStatus)), .format(obj)
clients = self.choose_clients(obj)
if Parallel:
pll = Parallel(self._forward_object_to_client)
for client in clients:
pll(client, obj)
results, errors = pll.join()
if errors:
raise errors[0]
else:
for client in clients:
self._forward_object_to_client(client, obj)
|
Forward an object to clients.
:param obj: The object to be forwarded
:type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus
:raises Exception: if any of the clients failed
|
28,474 |
def upload(self, src_dir, replica, staging_bucket, timeout_seconds=1200):
bundle_uuid = str(uuid.uuid4())
version = datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ")
files_to_upload, files_uploaded = [], []
for filename in iter_paths(src_dir):
full_file_name = filename.path
files_to_upload.append(open(full_file_name, "rb"))
logger.info("Uploading %i files from %s to %s", len(files_to_upload), src_dir, staging_bucket)
file_uuids, uploaded_keys, abs_file_paths = upload_to_cloud(files_to_upload, staging_bucket=staging_bucket,
replica=replica, from_cloud=False)
for file_handle in files_to_upload:
file_handle.close()
filenames = [object_name_builder(p, src_dir) for p in abs_file_paths]
filename_key_list = list(zip(filenames, file_uuids, uploaded_keys))
for filename, file_uuid, key in filename_key_list:
filename = filename.replace(, )
if filename.startswith():
filename = filename.lstrip()
logger.info("File %s: registering...", filename)
creator_uid = self.config.get("creator_uid", 0)
source_url = "s3://{}/{}".format(staging_bucket, key)
logger.info("File %s: registering from %s -> uuid %s", filename, source_url, file_uuid)
response = self.put_file._request(dict(
uuid=file_uuid,
bundle_uuid=bundle_uuid,
version=version,
creator_uid=creator_uid,
source_url=source_url
))
files_uploaded.append(dict(name=filename, version=version, uuid=file_uuid, creator_uid=creator_uid))
if response.status_code in (requests.codes.ok, requests.codes.created):
logger.info("File %s: Sync copy -> %s", filename, version)
else:
assert response.status_code == requests.codes.accepted
logger.info("File %s: Async copy -> %s", filename, version)
timeout = time.time() + timeout_seconds
wait = 1.0
while time.time() < timeout:
try:
self.head_file(uuid=file_uuid, replica="aws", version=version)
break
except SwaggerAPIException as e:
if e.code != requests.codes.not_found:
msg = "File {}: Unexpected server response during registration"
req_id = .format(response.headers.get("X-AWS-REQUEST-ID"))
raise RuntimeError(msg.format(filename), req_id)
time.sleep(wait)
wait = min(60.0, wait * self.UPLOAD_BACKOFF_FACTOR)
else:
req_id = .format(response.headers.get("X-AWS-REQUEST-ID"))
raise RuntimeError("File {}: registration FAILED".format(filename), req_id)
logger.debug("Successfully uploaded file")
file_args = [{: file_["name"].endswith(".json"),
: file_[],
: file_[],
: file_[]} for file_ in files_uploaded]
logger.info("%s", "Bundle {}: Registering...".format(bundle_uuid))
response = self.put_bundle(uuid=bundle_uuid,
version=version,
replica=replica,
creator_uid=creator_uid,
files=file_args)
logger.info("%s", "Bundle {}: Registered successfully".format(bundle_uuid))
return {
"bundle_uuid": bundle_uuid,
"creator_uid": creator_uid,
"replica": replica,
"version": response["version"],
"files": files_uploaded
}
|
Upload a directory of files from the local filesystem and create a bundle containing the uploaded files.
:param str src_dir: file path to a directory of files to upload to the replica.
:param str replica: the replica to upload to. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str staging_bucket: a client controlled AWS S3 storage bucket to upload from.
:param int timeout_seconds: the time to wait for a file to upload to replica.
Upload a directory of files from the local filesystem and create a bundle containing the uploaded files.
This method requires the use of a client-controlled object storage bucket to stage the data for upload.
|
28,475 |
def day_fraction(time):
hour = int(time.split(":")[0])
minute = int(time.split(":")[1])
return hour/24 + minute/1440
|
Convert a 24-hour time to a fraction of a day.
For example, midnight corresponds to 0.0, and noon to 0.5.
:param time: Time in the form of 'HH:MM' (24-hour time)
:type time: string
:return: A day fraction
:rtype: float
:Examples:
.. code-block:: python
day_fraction("18:30")
|
28,476 |
def delete_project(project_id):
project = get_data_or_404(, project_id)
if project[] != get_current_user_id():
return jsonify(message=), 403
delete_instance(, project_id)
return jsonify({})
|
Delete Project.
|
28,477 |
def set_offset( self, offset ):
assert offset in range( len( self.buffer ) )
self.pos = offset
self._fill_buffer()
|
Set the current read offset (in bytes) for the instance.
|
28,478 |
def get_import_resource_kwargs(self, request, *args, **kwargs):
return self.get_resource_kwargs(request, *args, **kwargs)
|
Prepares/returns kwargs used when initializing Resource
|
28,479 |
def sequence(self, per_exon=False):
db = self.db
if not per_exon:
start = self.txStart + 1
return _sequence(db, self.chrom, start, self.txEnd)
else:
seqs = []
for start, end in self.exons:
seqs.append(_sequence(db, self.chrom, start + 1, end))
return seqs
|
Return the sequence for this feature.
if per-exon is True, return an array of exon sequences
This sequence is never reverse complemented
|
28,480 |
def remove_sources(self, sources):
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
self.mixing_ = np.delete(self.mixing_, sources, 0)
self.unmixing_ = np.delete(self.unmixing_, sources, 1)
if self.activations_ is not None:
self.activations_ = np.delete(self.activations_, sources, 1)
self.var_model_ = None
self.var_cov_ = None
self.connectivity_ = None
self.mixmaps_ = []
self.unmixmaps_ = []
return self
|
Remove sources from the decomposition.
This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and
connectivity estimates.
Parameters
----------
sources : {slice, int, array of ints}
Indices of components to remove.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain a source decomposition.
|
28,481 |
def _null_sia(subsystem, phi=0.0):
return SystemIrreducibilityAnalysis(subsystem=subsystem,
cut_subsystem=subsystem,
phi=phi,
ces=_null_ces(subsystem),
partitioned_ces=_null_ces(subsystem))
|
Return a |SystemIrreducibilityAnalysis| with zero |big_phi| and empty
cause-effect structures.
This is the analysis result for a reducible subsystem.
|
28,482 |
def find_endurance_tier_iops_per_gb(volume):
tier = volume[]
iops_per_gb = 0.25
if tier == "LOW_INTENSITY_TIER":
iops_per_gb = 0.25
elif tier == "READHEAVY_TIER":
iops_per_gb = 2
elif tier == "WRITEHEAVY_TIER":
iops_per_gb = 4
elif tier == "10_IOPS_PER_GB":
iops_per_gb = 10
else:
raise ValueError("Could not find tier IOPS per GB for this volume")
return iops_per_gb
|
Find the tier for the given endurance volume (IOPS per GB)
:param volume: The volume for which the tier level is desired
:return: Returns a float value indicating the IOPS per GB for the volume
|
28,483 |
def sync(self):
if self.writeback and self.cache:
super(_TimeoutMixin, self).__delitem__(self._INDEX)
super(_TimeoutMixin, self).sync()
self.writeback = False
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
self.writeback = True
if hasattr(self.dict, ):
self.dict.sync()
|
Sync the timeout index entry with the shelf.
|
28,484 |
def wrap_results(self, **kwargs):
if not in kwargs \
or not in kwargs \
or not in kwargs:
logging.error("Missing arguments in wrap_results function")
return {}
external = kwargs[] if in kwargs else None
fd = kwargs[]
url = kwargs[]
length = kwargs[]
results = {}
files = []
wait_time = 15
host = self.divide_url(url)[0]
time.sleep(0.5)
while len(os.listdir(fd)) <= length + self.parsed:
time.sleep(1)
wait_time -= 1
if wait_time == 0:
logging.warning("%s waiting har file result timed out" % url)
results[] = "wrap har file timeout"
if external is not None:
external[url] = results
return results
time.sleep(1)
for fn in os.listdir(fd):
if fn.endswith(".har") and host in fn:
path = os.path.join(fd, fn)
files.append((fn, os.stat(path).st_mtime))
files.sort(key=lambda x: x[1])
if len(files) > 0:
with open(fd + + files[-1][0]) as f:
raw_data = json.load(f)[][]
results = [{} for i in range(0, len(raw_data))]
for i in range(0, len(results)):
results[i][] = {}
results[i][][] = raw_data[i][][]
headers = {}
for header in raw_data[i][][]:
headers[header[]] = header[]
results[i][][] = headers
results[i][] = {}
results[i][][] = raw_data[i][][]
results[i][][] = raw_data[i][][]
headers = {}
for header in raw_data[i][][]:
headers[header[]] = header[]
results[i][][] = headers
results[i][][] = raw_data[i][][]
results[i][][] = raw_data[i][][]
self.parsed += 1
else:
logging.warning("Cannot find har file for %s" % url)
if external is not None:
external[url] = results
else:
return results
|
Wrap returned http response into a well formatted dict
:param kwargs: this dict param should contains following keys:
fd: file directory to
url: the test url fo the result
files_count: the number of files under har/ directory
:return (dict): the results of all
|
28,485 |
def dump_process_memory(self, pid, working_dir="c:\\windows\\carbonblack\\", path_to_procdump=None):
self.go_live()
print("~ dumping memory where pid={} for {}".format(pid, self.sensor.computer_name))
procdump_host_path = None
dir_output = self.lr_session.list_directory(working_dir)
for dir_item in dir_output:
if dir_item[] == :
logging.info("procdump.exe already on host.")
procdump_host_path = working_dir + "procdump.exe"
break
else:
logging.info("Dropping procdump.exe on host.")
if not procdump_host_path:
if not os.path.exists(path_to_procdump):
HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),,))
path_to_procdump = os.path.join(HOME_DIR, , )
if not os.path.exists(path_to_procdump):
logging.warn("{} not found".format(path_to_procdump))
return False
print("~ dropping procdump.exe on host.")
filedata = None
with open(path_to_procdump, ) as f:
filedata = f.read()
try:
self.lr_session.create_directory(working_dir)
except LiveResponseError:
logging.debug("working directory already exists")
self.lr_session.put_file(filedata, working_dir + "procdump.exe")
procdump_host_path = working_dir + "procdump.exe"
print("~ Executing procdump..")
command_str = procdump_host_path + " -accepteula -ma " + str(pid)
result = self.lr_session.create_process(command_str)
time.sleep(1)
print("+ procdump output:\n-------------------------")
result = result.decode()
print(result + "\n-------------------------")
dumpfile_name = result[result.rfind()+1:result.rfind()+4]
while True:
if not in str(self.lr_session.list_processes()):
break
else:
time.sleep(1)
self.getFile_with_timeout(working_dir + dumpfile_name)
|
Use sysinternals procdump to dump process memory on a specific process. If only the pid is specified, the default
behavior is to use the version of ProcDump supplied with cbinterface's pip3 installer.
:requires: SysInternals ProcDump v9.0 included with cbinterface==1.1.0
:arguments pid: Process id to dump memory for
:arguments working_dir: Specify a directoy on the windows sensor to work out of. Default: C:\\Windows\\CarbonBlack\\
:arguments path_to_procdump: Specify the path to a version of procdump you want to use. Default is included copy
|
28,486 |
def open(self):
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args)
|
Opens the connection.
|
28,487 |
def write_file_list(filename, file_list=[], glob=None):
if glob:
file_list = iglob(glob)
with open(filename, ) as f:
for line in file_list:
f.write(line + )
|
Write a list of files to a file.
:param filename: the name of the file to write the list to
:param file_list: a list of filenames to write to a file
:param glob: if glob is specified, it will ignore file_list and instead
create a list of files based on the pattern provide by glob (ex. *.cub)
|
28,488 |
def split_text(text: str, length: int = MAX_MESSAGE_LENGTH) -> typing.List[str]:
return [text[i:i + length] for i in range(0, len(text), length)]
|
Split long text
:param text:
:param length:
:return: list of parts
:rtype: :obj:`typing.List[str]`
|
28,489 |
def setLevel(self, level):
r
if logging.DEBUG >= level:
formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)",
"%d.%m.%Y %H:%M:%S")
self._handler.setFormatter(formatter)
else:
formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s",
"%d.%m.%Y %H:%M:%S")
self._handler.setFormatter(formatter)
NativeLogger.setLevel(self, level)
|
r"""Overrides the parent method to adapt the formatting string to the level.
Parameters
----------
level : int
The new log level to set. See the logging levels in the logging module for details.
Examples
--------
>>> import logging
>>> Logger.setLevel(logging.DEBUG)
|
28,490 |
def shift(self, delta):
self.x0 = self.x0 + Quantity(delta, self.xunit)
|
Shift this `Series` forward on the X-axis by ``delta``
This modifies the series in-place.
Parameters
----------
delta : `float`, `~astropy.units.Quantity`, `str`
The amount by which to shift (in x-axis units if `float`), give
a negative value to shift backwards in time
Examples
--------
>>> from gwpy.types import Series
>>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m')
>>> print(a.x0)
0.0 m
>>> a.shift(5)
>>> print(a.x0)
5.0 m
>>> a.shift('-1 km')
-995.0 m
|
28,491 |
def get_person(people_id):
result = _get(people_id, settings.PEOPLE)
return People(result.content)
|
Return a single person
|
28,492 |
def _create_alpha(self, data, fill_value=None):
not_alpha = [b for b in data.coords[].values if b != ]
null_mask = data.sel(bands=not_alpha)
if np.issubdtype(data.dtype, np.integer) and fill_value is not None:
null_mask = null_mask != fill_value
else:
null_mask = null_mask.notnull()
return null_mask
|
Create an alpha band DataArray object.
If `fill_value` is provided and input data is an integer type
then it is used to determine invalid "null" pixels instead of
xarray's `isnull` and `notnull` methods.
The returned array is 1 where data is valid, 0 where invalid.
|
28,493 |
def _initialize(self,
provide_data: List[mx.io.DataDesc],
provide_label: List[mx.io.DataDesc],
default_bucket_key: Tuple[int, int]) -> None:
source = mx.sym.Variable(C.SOURCE_NAME)
source_words = source.split(num_outputs=self.config.config_embed_source.num_factors,
axis=2, squeeze_axis=True)[0]
source_length = utils.compute_lengths(source_words)
target = mx.sym.Variable(C.TARGET_NAME)
target_length = utils.compute_lengths(target)
labels = mx.sym.Variable(C.TARGET_LABEL_NAME)
data_names = [C.SOURCE_NAME, C.TARGET_NAME]
label_names = [C.TARGET_LABEL_NAME]
provide_data_names = [d[0] for d in provide_data]
utils.check_condition(provide_data_names == data_names,
"incompatible provide_data: %s, names should be %s" % (provide_data_names, data_names))
provide_label_names = [d[0] for d in provide_label]
utils.check_condition(provide_label_names == label_names,
"incompatible provide_label: %s, names should be %s" % (provide_label_names, label_names))
def sym_gen(seq_lens):
source_seq_len, target_seq_len = seq_lens
(source_embed,
source_embed_length,
source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len)
(target_embed,
target_embed_length,
target_embed_seq_len) = self.embedding_target.encode(target, target_length, target_seq_len)
(source_encoded,
source_encoded_length,
source_encoded_seq_len) = self.encoder.encode(source_embed,
source_embed_length,
source_embed_seq_len)
target_decoded = self.decoder.decode_sequence(source_encoded, source_encoded_length, source_encoded_seq_len,
target_embed, target_embed_length, target_embed_seq_len)
logits = self.output_layer(mx.sym.reshape(data=target_decoded, shape=(-3, 0)))
logits = mx.sym.reshape(data=logits, shape=(-4, -1, target_embed_seq_len, 0))
if self.softmax_temperature is not None:
logits = logits / self.softmax_temperature
target_dists = mx.sym.softmax(data=logits, axis=2, name=C.SOFTMAX_NAME)
probs = mx.sym.pick(target_dists, labels)
scores = mx.sym.log(probs)
if self.score_type == C.SCORING_TYPE_NEGLOGPROB:
scores = -1 * scores
zeros = mx.sym.zeros_like(scores)
sums = mx.sym.sum(mx.sym.where(labels != 0, scores, zeros), axis=1) / (self.length_penalty(target_length - 1))
if self.constant_length_ratio > 0.0:
length_ratio = self.constant_length_ratio * mx.sym.ones_like(sums)
else:
length_ratio = self.length_ratio(source_encoded, source_encoded_length).reshape((-1,)) \
if self.length_ratio is not None else mx.sym.zeros_like(sums)
sums = sums - self.brevity_penalty(target_length - 1, length_ratio * source_encoded_length)
return mx.sym.Group([sums, target_dists]), data_names, label_names
symbol, _, __ = sym_gen(default_bucket_key)
self.module = mx.mod.Module(symbol=symbol,
data_names=data_names,
label_names=label_names,
logger=logger,
context=self.context)
self.module.bind(data_shapes=provide_data,
label_shapes=provide_label,
for_training=False,
force_rebind=False,
grad_req=)
|
Initializes model components, creates scoring symbol and module, and binds it.
:param provide_data: List of data descriptors.
:param provide_label: List of label descriptors.
:param default_bucket_key: The default maximum (source, target) lengths.
|
28,494 |
def show_account():
click.echo("
for (key, env) in REVERSE_MAPPING.items():
value = QUBELL.get(key, None)
if value:
click.echo("export %s=" % (env, value))
if any(map(lambda x: PROVIDER.get(x), REVERSE_PROVIDER_MAPPING.keys())):
click.echo("
for (key, env) in REVERSE_PROVIDER_MAPPING.items():
value = PROVIDER.get(key, None)
if value:
click.echo("export %s=" % (env, value))
|
Exports current account configuration in
shell-friendly form. Takes into account
explicit top-level flags like --organization.
|
28,495 |
def silence_warnings(*warnings):
for warning in warnings:
silence(warning)
try:
yield
finally:
for warning in warnings:
silence(warning, False)
|
Context manager for silencing bokeh validation warnings.
|
28,496 |
def vsreenqueue(item_id, item_s, args, **kwargs):
charset = kwargs.get(, _c.FSQ_CHARSET)
if kwargs.has_key():
del kwargs[]
kwargs[] = item_id
if isinstance(item_s, unicode):
try:
item_s = item_s.encode(charset)
except UnicodeEncodeError:
raise FSQCoerceError(errno.EINVAL, u\
u.format(charset))
return vreenqueue(StringIO(item_s), item_id, args, **kwargs)
|
Enqueue a string, or string-like object to other queues, with arbitrary
arguments, sreenqueue is to reenqueue what sprintf is to printf,
sreenqueue is to vsreenqueue what sprintf is to vsprintf.
|
28,497 |
def run_process(self, process):
message = u
message += u.format(str(process)[:68]).ljust(69, )
stashed = False
if self.unstaged_changes and not self.include_unstaged_changes:
out, err, code = self.git.stash(keep_index=True, quiet=True)
stashed = code == 0
try:
result = process(files=self.files, cwd=self.cwd, fix=self.fix)
out, err, code = self.git.status(porcelain=True, untracked_files=)
for line in out.splitlines():
file_status = Status(line)
if file_status.path in self.files and file_status.is_modified:
mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0
if mtime > self.file_mtimes.get(file_status.path, 0):
self.file_mtimes[file_status.path] = mtime
result.add_modified_file(file_status.path)
if self.stage_modified_files:
self.git.add(file_status.path)
except:
raise
finally:
if stashed:
self.git.reset(hard=True, quiet=True)
self.git.stash.pop(index=True, quiet=True)
if result.is_success:
message += u
elif result.is_failure:
message += u
elif result.is_skip:
message += u
elif result.is_error:
message += u
return result, message
|
Runs a single action.
|
28,498 |
def import_from_dict(session, data, sync=[]):
if isinstance(data, dict):
logging.info(,
len(data.get(DATABASES_KEY, [])),
DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logging.info(,
len(data.get(DRUID_CLUSTERS_KEY, [])),
DRUID_CLUSTERS_KEY)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logging.info()
|
Imports databases and druid clusters from dictionary
|
28,499 |
def evaluate():
print(eval_model)
eval_model.initialize(mx.init.Xavier(), ctx=context[0])
eval_model.hybridize(static_alloc=True, static_shape=True)
epoch = args.from_epoch if args.from_epoch else 0
while epoch < args.epochs:
checkpoint_name = %(args.save, format(epoch, ))
if not os.path.exists(checkpoint_name):
print()
time.sleep(600)
continue
eval_model.load_parameters(checkpoint_name)
print(%(checkpoint_name))
start_epoch_time = time.time()
final_test_L = test(test_data, test_batch_size, ctx=context[0])
end_epoch_time = time.time()
print(%
(epoch, final_test_L, math.exp(final_test_L)))
print(%(epoch, end_epoch_time - start_epoch_time))
sys.stdout.flush()
epoch += 1
|
Evaluate loop for the trained model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.