Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
22,200 | def queryTs(ts, expression):
_idx = []
if isinstance(expressions, str):
expr_lst = translate_expression(expressions)
if expr_lst:
new_ts, _idx = get_matches(expr_lst, new_ts)
elif isinstance(expressions, list):
for expr in expressions:
expr_lst = translate_expression(expr)
if expr_lst:
new_ts, _idx = get_matches(expr_lst, new_ts)
return _idx | Find the indices of the time series entries that match the given expression.
| Example:
| D = lipd.loadLipd()
| ts = lipd.extractTs(D)
| matches = queryTs(ts, "archiveType == marine sediment")
| matches = queryTs(ts, "geo_meanElev <= 2000")
:param str expression: Expression
:param list ts: Time series
:return list _idx: Indices of entries that match the criteria |
22,201 | def now(utc=False, tz=None):
return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz) | Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime |
22,202 | def includes(self):
incs = self.combined_properties()
processed_incs = []
for prop in incs:
if isinstance(prop, str):
processed_incs.append(prop)
else:
processed_incs.append(os.path.join(*prop))
fullpaths = [os.path.normpath(os.path.join(, x)) for x in processed_incs]
fullpaths.append(os.path.normpath(os.path.abspath(self.build_dirs()[])))
return fullpaths | Return all of the include directories for this chip as a list. |
22,203 | def backwards(self, orm):
"Write your backwards methods here."
orm[].objects.all().delete()
orm[].objects.exclude(name=DEFAULT_COHORT_NAME).delete() | Write your backwards methods here. |
22,204 | def files(self, creds, options, dry_run):
if options.mode == blobxfer.models.azure.StorageModes.File:
for file in self._populate_from_list_files(
creds, options, dry_run):
yield file
else:
for blob in self._populate_from_list_blobs(
creds, options, dry_run):
yield blob | Generator of Azure remote files or blobs
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param blobxfer.models.options.Download options: download options
:param bool dry_run: dry run
:rtype: StorageEntity
:return: Azure storage entity object |
22,205 | def serve(destination, port, config):
if os.path.exists(destination):
pass
elif os.path.exists(config):
settings = read_settings(config)
destination = settings.get()
if not os.path.exists(destination):
sys.stderr.write("The directory doesnt exist "
"and the config file ({config}) could not be read.\n"
.format(destination=destination, config=config))
sys.exit(2)
print(.format(destination))
os.chdir(destination)
Handler = server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), Handler, False)
print(" * Running on http://127.0.0.1:{}/".format(port))
try:
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
httpd.serve_forever()
except KeyboardInterrupt:
print() | Run a simple web server. |
22,206 | def copyfileobj(src, dst, length=None, exception=OSError):
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
blocks, remainder = divmod(length, BUFSIZE)
for _ in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise exception("unexpected end of data")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise exception("unexpected end of data")
dst.write(buf)
return | Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content. |
22,207 | def set_data(self, frames):
data_frames = []
for frame in frames:
frame = frame.swapaxes(0, 1)
if len(frame.shape) < 3:
frame = np.array([frame]).swapaxes(0, 2).swapaxes(0, 1)
data_frames.append(frame)
frames_n = len(data_frames)
data_frames = np.array(data_frames)
data_frames = np.rollaxis(data_frames, 3)
data_frames = data_frames.swapaxes(2, 3)
self.data = data_frames
self.length = frames_n | Prepare the input of model |
22,208 | def _get_upload_session_status(res):
response = json.loads(res.body.decode())
if not in response:
try:
info = (
response[][]
[]
[][]
)
reason = .format(info[], info[])
except KeyError:
reason =
raise exceptions.NetworkError(.format(
reason
))
return response[] | Parse the image upload response to obtain status.
Args:
res: http_utils.FetchResponse instance, the upload response
Returns:
dict, sessionStatus of the response
Raises:
hangups.NetworkError: If the upload request failed. |
22,209 | def _fire_bundle_event(self, kind):
self.__framework._dispatcher.fire_bundle_event(BundleEvent(kind, self)) | Fires a bundle event of the given kind
:param kind: Kind of event |
22,210 | def get_object_cat1(con, token, cat, kwargs):
req_str = "/"+kwargs[]+"?"
req_str += "access_token="+token
del kwargs[]
key = settings.get_object_cat1_param[cat]
req_str += "&"+key+"="
if key in kwargs.keys():
length = len( kwargs[key] )
for i in range(length):
if i == 0:
req_str += kwargs[key][i]
else:
req_str += ","+kwargs[key][i]
else:
return "Parameter Error"
res = wiring.send_request("GET", con, req_str, )
return res | Constructs the "GET" URL. The functions is used by the get_object method
First Category of "GET" URL construction. Again calling it first category because more
complex functions maybe added later. |
22,211 | def organize_objects(self):
def _render_children(obj):
for child in obj.children_strings:
child_object = self.objects.get(child)
if child_object:
obj.item_map[child_object.plural].append(child_object)
obj.children.append(child_object)
for key in obj.item_map:
obj.item_map[key].sort()
def _recurse_ns(obj):
if not obj:
return
namespace = obj.top_namespace
if namespace is not None:
ns_obj = self.top_namespaces.get(namespace)
if ns_obj is None or not isinstance(ns_obj, DotNetNamespace):
for ns_obj in self.create_class(
{"uid": namespace, "type": "namespace"}
):
self.top_namespaces[ns_obj.id] = ns_obj
if obj not in ns_obj.children and namespace != obj.id:
ns_obj.children.append(obj)
for obj in self.objects.values():
_render_children(obj)
_recurse_ns(obj)
for key, ns in self.top_namespaces.copy().items():
if not ns.children:
del self.top_namespaces[key]
for key, ns in self.namespaces.items():
if not ns.children:
del self.namespaces[key] | Organize objects and namespaces |
22,212 | def bounding_box(self):
from . import primitives
transform = np.eye(4)
transform[:3, 3] = self.bounds.mean(axis=0)
aabb = primitives.Box(transform=transform,
extents=self.extents,
mutable=False)
return aabb | An axis aligned bounding box for the current mesh.
Returns
----------
aabb : trimesh.primitives.Box
Box object with transform and extents defined
representing the axis aligned bounding box of the mesh |
22,213 | def priority_enqueue(self,
function,
name=None,
force_start=False,
times=1,
data=None):
self._check_if_ready()
return self.main_loop.priority_enqueue(function,
name,
force_start,
times,
data) | Like :class:`enqueue()`, but adds the given function at the top of the
queue.
If force_start is True, the function is immediately started even when
the maximum number of concurrent threads is already reached.
:type function: callable
:param function: The function that is executed.
:type name: str
:param name: Stored in Job.name.
:type force_start: bool
:param force_start: Whether to start execution immediately.
:type times: int
:param times: The maximum number of attempts.
:type data: object
:param data: Optional data to store in Job.data.
:rtype: int
:return: The id of the new job. |
22,214 | def organisations(self):
class Org:
def __init__(self, sdo_id, org_id, members, obj):
self.sdo_id = sdo_id
self.org_id = org_id
self.members = members
self.obj = obj
with self._mutex:
if not self._orgs:
for org in self._obj.get_owned_organizations():
owner = org.get_owner()
if owner:
sdo_id = owner._narrow(SDOPackage.SDO).get_sdo_id()
else:
sdo_id =
org_id = org.get_organization_id()
members = [m.get_sdo_id() for m in org.get_members()]
self._orgs.append(Org(sdo_id, org_id, members, org))
return self._orgs | The organisations of this composition. |
22,215 | def create_filters(model, filter_info, resource):
filters = []
for filter_ in filter_info:
filters.append(Node(model, filter_, resource, resource.schema).resolve())
return filters | Apply filters from filters information to base query
:param DeclarativeMeta model: the model of the node
:param dict filter_info: current node filter information
:param Resource resource: the resource |
22,216 | def Plus(self, other):
return Point(self.x + other.x,
self.y + other.y,
self.z + other.z) | Returns a new point which is the pointwise sum of self and other. |
22,217 | def simulated_binary_crossover(random, mom, dad, args):
crossover_rate = args.setdefault(, 1.0)
if random.random() < crossover_rate:
di = args.setdefault(, 10)
bounder = args[].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
pass
return [bro, sis]
else:
return [mom, dad] | Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions. |
22,218 | def extract_error_message(cls, e):
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message | Extract error message for queries |
22,219 | def decode_fetch_response(cls, response):
return [
kafka.structs.FetchResponsePayload(
topic, partition, error, highwater_offset, [
offset_and_msg
for offset_and_msg in cls.decode_message_set(messages)])
for topic, partitions in response.topics
for partition, error, highwater_offset, messages in partitions
] | Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse |
22,220 | def head_coaches_by_game(self, year):
coach_str = self._year_info_pq(year, ).text()
regex = r
coachAndTenure = []
m = True
while m:
m = re.search(regex, coach_str)
coachID, wins, losses, ties = m.groups()
nextIndex = m.end(4) + 1
coachStr = coachStr[nextIndex:]
tenure = int(wins) + int(losses) + int(ties)
coachAndTenure.append((coachID, tenure))
coachIDs = [
cID for cID, games in coachAndTenure for _ in range(games)
]
return np.array(coachIDs[::-1]) | Returns head coach data by game.
:year: An int representing the season in question.
:returns: An array with an entry per game of the season that the team
played (including playoffs). Each entry is the head coach's ID for that
game in the season. |
22,221 | def _collect_memory_descriptors(program: Program) -> Dict[str, ParameterSpec]:
return {
instr.name: ParameterSpec(type=instr.memory_type, length=instr.memory_size)
for instr in program if isinstance(instr, Declare)
} | Collect Declare instructions that are important for building the patch table.
This is secretly stored on BinaryExecutableResponse. We're careful to make sure
these objects are json serializable.
:return: A dictionary of variable names to specs about the declared region. |
22,222 | def get_authorizations_for_agent_and_function(self, agent_id, function_id):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
result = collection.find(
dict({: str(agent_id),
: str(function_id)},
**self._view_filter())).sort(, ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime) | Gets a list of ``Authorizations`` associated with a given agent.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``agent_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
22,223 | def set_moving_image(self, image):
if not isinstance(image, iio.ANTsImage):
raise ValueError()
if image.dimension != self.dimension:
raise ValueError( % (image.dimension, self.dimension))
self._metric.setMovingImage(image.pointer, False)
self.moving_image = image | Set Moving ANTsImage for metric |
22,224 | def get_z_variable(nc):
z_variables = get_z_variables(nc)
if not z_variables:
return None
for var in z_variables:
ncvar = nc.variables[var]
if getattr(ncvar, , None) in (, , ):
return var
for var in z_variables:
ncvar = nc.variables[var]
units = getattr(ncvar, , None)
if isinstance(units, basestring):
if units_convertible(units, ):
return var
if units_convertible(units, ):
return var
return z_variables[0] | Returns the name of the variable that defines the Z axis or height/depth
:param netCDF4.Dataset nc: netCDF dataset |
22,225 | def call(self, cmd, **kwargs):
if isinstance(cmd, basestring):
cmd = cmd.split()
self.log.info(, cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
out, err = p.communicate()
if out:
self.log.info(out)
if err:
if p.returncode == 0:
self.log.info(err)
else:
self.log.error(err)
if p.returncode != 0:
self.log.error( % p.returncode)
raise Exception
return out, err, p.returncode | A simple subprocess wrapper |
22,226 | def mark_all_as_read(self, recipient=None):
qset = self.unread(True)
if recipient:
qset = qset.filter(recipient=recipient)
return qset.update(unread=False) | Mark as read any unread messages in the current queryset.
Optionally, filter these by recipient first. |
22,227 | def map(self, width, height):
template = ti.load(os.path.join(script_dir, , ))[]
template.set_view(0, 0, width*template.tw, height*template.th)
border_x = template.cells[width]
for y in xrange(0,height+1):
border_x[y].tile = template.cells[0][0].tile
for x in xrange(0,width):
template.cells[x][height].tile = template.cells[0][0].tile
self.recursive_division(template.cells, 3, width, height, 0, 0)
return template | Creates and returns a new randomly generated map |
22,228 | def get_brandings(self):
connection = Connection(self.token)
connection.set_url(self.production, self.BRANDINGS_URL)
return connection.get_request() | Get all account brandings
@return List of brandings |
22,229 | def get_closest(self, sma):
index = (np.abs(self.sma - sma)).argmin()
return self._list[index] | Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value. |
22,230 | def contains(self, k):
if self._changed():
self._read()
return k in self.store.keys() | Return True if key `k` exists |
22,231 | def push(self, x):
if isinstance(x, Frame):
frame = x
else:
frame = Frame(x)
self.stack.append(frame)
log.debug(, Repr(frame), Repr(self.stack))
return frame | Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame} |
22,232 | def get_types(self):
types = [str, int, int]
if self.strandPos is not None:
types.append(str)
if self.otherPos:
for o in self.otherPos:
types.append(o[2])
return types | Returns the unordered list of data types
:return: list of data types |
22,233 | def sensor(self, sensor_type):
_LOGGER.debug("Reading %s sensor.", sensor_type)
return self._session.read_sensor(self.device_id, sensor_type) | Update and return sensor value. |
22,234 | def root_rhx_gis(self) -> Optional[str]:
if self.is_logged_in:
return None
if not self._root_rhx_gis:
self._root_rhx_gis = self.get_json(, {})[]
return self._root_rhx_gis | rhx_gis string returned in the / query. |
22,235 | def blkid(device=None, token=None):
***UUID=6a38ee5-7235-44e7-8b22-816a403bad5d*TYPE=ext4
cmd = []
if device:
cmd.append(device)
elif token:
cmd.extend([, token])
ret = {}
blkid_result = __salt__[](cmd, python_shell=False)
if blkid_result[] > 0:
return ret
for line in blkid_result[].splitlines():
if not line:
continue
comps = line.split()
device = comps[0][:-1]
info = {}
device_attributes = re.split((), line.partition()[2])
for key, value in zip(*[iter(device_attributes)]*2):
key = key.strip().strip()
info[key] = value.strip()
ret[device] = info
return ret | Return block device attributes: UUID, LABEL, etc. This function only works
on systems where blkid is available.
device
Device name from the system
token
Any valid token used for the search
CLI Example:
.. code-block:: bash
salt '*' disk.blkid
salt '*' disk.blkid /dev/sda
salt '*' disk.blkid token='UUID=6a38ee5-7235-44e7-8b22-816a403bad5d'
salt '*' disk.blkid token='TYPE=ext4' |
22,236 | def _parse_outgoing_mail(sender, to, msgstring):
global OUTBOX
OUTBOX.append(email.message_from_string(msgstring))
return | Parse an outgoing mail and put it into the OUTBOX.
Arguments:
- `sender`: str
- `to`: str
- `msgstring`: str
Return: None
Exceptions: None |
22,237 | def validate_path(xj_path):
if not isinstance(xj_path, str):
raise XJPathError()
for path in split(xj_path, ):
if path == :
continue
if path.startswith():
if path == or path == :
continue
try:
int(path[1:])
except ValueError:
raise XJPathError(
) | Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails. |
22,238 | def set_sp_template_updated(self, vlan_id, sp_template, device_id):
entry = self.get_sp_template_vlan_entry(vlan_id,
sp_template,
device_id)
if entry:
entry.updated_on_ucs = True
self.session.merge(entry)
return entry
else:
return False | Sets update_on_ucs flag to True. |
22,239 | def on_for_degrees(self, steering, speed, degrees, brake=True, block=True):
(left_speed, right_speed) = self.get_speed_steering(steering, speed)
MoveTank.on_for_degrees(self, SpeedNativeUnits(left_speed), SpeedNativeUnits(right_speed), degrees, brake, block) | Rotate the motors according to the provided ``steering``.
The distance each motor will travel follows the rules of :meth:`MoveTank.on_for_degrees`. |
22,240 | def extract_domain(host):
host = re.sub(, , host)
host = re.match(, host).groups()[0]
domain = .join(host.split()[-2:])
if domain in _domains:
domain = .join(host.split()[-3:])
return domain | Domain name extractor. Turns host names into domain names, ported
from pwdhash javascript code |
22,241 | def insert(self, table, value, ignore=False, commit=True):
value_q, _args = self._value_parser(value, columnname=False)
_sql = .join([, if ignore else , , self._backtick(table),
, self._backtick_columns(value), , value_q, ])
if self.debug:
return self.cur.mogrify(_sql, _args)
self.cur.execute(_sql, _args)
if commit:
self.conn.commit()
return self.cur.lastrowid | Insert a dict into db.
:type table: string
:type value: dict
:type ignore: bool
:type commit: bool
:return: int. The row id of the insert. |
22,242 | def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) | Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array |
22,243 | def get_answers(self, assessment_section_id, item_id):
if self.is_answer_available(assessment_section_id, item_id):
return self.get_assessment_section(assessment_section_id).get_answers(question_id=item_id)
raise errors.IllegalState() | Gets the acceptable answers to the associated item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (osid.assessment.AnswerList) - the answers
raise: IllegalState - ``is_answer_available()`` is ``false``
raise: NotFound - ``assessment_section_id or item_id is not
found, or item_id not part of assessment_section_id``
raise: NullArgument - ``assessment_section_id or item_id is
null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
22,244 | def load(cls, filename, project=None, delim=):
r
net = {}
filename = cls._parse_filename(filename, ext=)
tree = ET.parse(filename)
piece_node = tree.find().find()
conn_element = piece_node.find().find()
conns = VTK._element_to_array(conn_element, 2)
coord_element = piece_node.find().find()
coords = VTK._element_to_array(coord_element, 3)
for item in piece_node.find().iter():
key = item.get()
array = VTK._element_to_array(item)
net[key] = array
for item in piece_node.find().iter():
key = item.get()
array = VTK._element_to_array(item)
net[key] = array
if project is None:
project = ws.new_project()
project = Dict.from_dict(dct=net, project=project, delim=delim)
return project | r"""
Read in pore and throat data from a saved VTK file.
Parameters
----------
filename : string (optional)
The name of the file containing the data to import. The formatting
of this file is outlined below.
project : OpenPNM Project object
A GenericNetwork is created and added to the specified Project.
If no Project is supplied then one will be created and returned. |
22,245 | def set_params(self, data):
bytes_io = BytesIO(data)
assert struct.unpack(, bytes_io.read(4))[0] == resPQ.constructor
self.nonce = bytes_io.read(16)
self.server_nonce = bytes_io.read(16)
self.pq = deserialize_string(bytes_io)
assert struct.unpack(, bytes_io.read(4))[0] == 0x1cb5c415
count = struct.unpack(, bytes_io.read(4))[0]
for _ in range(count):
self.server_public_key_fingerprints.append(struct.unpack(, bytes_io.read(8))[0]) | resPQ#05162463 nonce:int128 server_nonce:int128 pq:string server_public_key_fingerprints:Vector long = ResPQ |
22,246 | def raw_pressure_encode(self, time_usec, press_abs, press_diff1, press_diff2, temperature):
return MAVLink_raw_pressure_message(time_usec, press_abs, press_diff1, press_diff2, temperature) | The RAW pressure readings for the typical setup of one absolute
pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
press_abs : Absolute pressure (raw) (int16_t)
press_diff1 : Differential pressure 1 (raw, 0 if nonexistant) (int16_t)
press_diff2 : Differential pressure 2 (raw, 0 if nonexistant) (int16_t)
temperature : Raw Temperature measurement (raw) (int16_t) |
22,247 | def __convert_json_to_projects_map(self, json):
ds_repo_to_prj = {}
for project in json:
for ds in json[project]:
if ds == "meta":
continue
if ds not in ds_repo_to_prj:
if ds not in ds_repo_to_prj:
ds_repo_to_prj[ds] = {}
for repo in json[project][ds]:
if repo in ds_repo_to_prj[ds]:
if project == ds_repo_to_prj[ds][repo]:
logger.debug("Duplicated repo: %s %s %s", ds, repo, project)
else:
if len(project.split(".")) > len(ds_repo_to_prj[ds][repo].split(".")):
logger.debug("Changed repo project because we found a leaf: %s leaf vs %s (%s, %s)",
project, ds_repo_to_prj[ds][repo], repo, ds)
ds_repo_to_prj[ds][repo] = project
else:
ds_repo_to_prj[ds][repo] = project
return ds_repo_to_prj | Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source |
22,248 | def initialize_state(self):
if self.__hardware_source:
self.__profile_changed_event_listener = self.__hardware_source.profile_changed_event.listen(self.__update_profile_index)
self.__frame_parameters_changed_event_listener = self.__hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters)
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
self.__log_messages_event_listener = self.__hardware_source.log_messages_event.listen(self.__log_messages)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
if self.on_binning_values_changed:
self.on_binning_values_changed(self.__hardware_source.binning_values)
if self.on_monitor_button_state_changed:
has_monitor = self.__hardware_source and self.__hardware_source.features.get("has_monitor", False)
self.on_monitor_button_state_changed(has_monitor, has_monitor)
self.__update_buttons()
if self.on_profiles_changed:
profile_items = self.__hardware_source.modes
self.on_profiles_changed(profile_items)
self.__update_profile_index(self.__hardware_source.selected_profile_index)
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) | Call this to initialize the state of the UI after everything has been connected. |
22,249 | def parse_v3_signing_block(self):
self._v3_signing_data = []
if not self.is_signed_v3():
return
block_bytes = self._v2_blocks[self._APK_SIG_KEY_V3_SIGNATURE]
block = io.BytesIO(block_bytes)
view = block.getvalue()
size_sequence = self.read_uint32_le(block)
if size_sequence + 4 != len(block_bytes):
raise BrokenAPKError("size of sequence and blocksize does not match")
while block.tell() < len(block_bytes):
off_signer = block.tell()
size_signer = self.read_uint32_le(block)
len_signed_data = self.read_uint32_le(block)
signed_data_bytes = block.read(len_signed_data)
signed_data = io.BytesIO(signed_data_bytes)
len_digests = self.read_uint32_le(signed_data)
raw_digests = signed_data.read(len_digests)
digests = self.parse_signatures_or_digests(raw_digests)
certs = []
len_certs = self.read_uint32_le(signed_data)
start_certs = signed_data.tell()
while signed_data.tell() < start_certs + len_certs:
len_cert = self.read_uint32_le(signed_data)
cert = signed_data.read(len_cert)
certs.append(cert)
signed_data_min_sdk = self.read_uint32_le(signed_data)
signed_data_max_sdk = self.read_uint32_le(signed_data)
len_attr = self.read_uint32_le(signed_data)
attr = signed_data.read(len_attr)
signed_data_object = APKV3SignedData()
signed_data_object._bytes = signed_data_bytes
signed_data_object.digests = digests
signed_data_object.certificates = certs
signed_data_object.additional_attributes = attr
signed_data_object.minSDK = signed_data_min_sdk
signed_data_object.maxSDK = signed_data_max_sdk
signer_min_sdk = self.read_uint32_le(block)
signer_max_sdk = self.read_uint32_le(block)
len_sigs = self.read_uint32_le(block)
raw_sigs = block.read(len_sigs)
sigs = self.parse_signatures_or_digests(raw_sigs)
len_publickey = self.read_uint32_le(block)
publickey = block.read(len_publickey)
signer = APKV3Signer()
signer._bytes = view[off_signer:off_signer+size_signer]
signer.signed_data = signed_data_object
signer.signatures = sigs
signer.public_key = publickey
signer.minSDK = signer_min_sdk
signer.maxSDK = signer_max_sdk
self._v3_signing_data.append(signer) | Parse the V2 signing block and extract all features |
22,250 | def start(self):
if self._timer.isActive():
return
self._starttime = datetime.datetime.now()
self._timer.start() | Starts running the timer. If the timer is currently running, then
this method will do nothing.
:sa stop, reset |
22,251 | def _get_voltage_angle_var(self, refs, buses):
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vau = Inf * ones(len(buses))
Val = -Vau
Vau[refs] = Va[refs]
Val[refs] = Va[refs]
return Variable("Va", len(buses), Va, Val, Vau) | Returns the voltage angle variable set. |
22,252 | def add_snmp(data, interfaces):
snmp_interface = []
if interfaces:
interfaces = map(str, interfaces)
for interface in data:
interface_id = str(interface.get())
for if_def in interface.get(, []):
_interface_id = None
if in if_def:
_interface_id = .format(
interface_id, if_def[])
else:
_interface_id = interface_id
if _interface_id in interfaces and not in interface:
for node in if_def.get(, []):
snmp_interface.append(
{: node.get(),
: _interface_id})
return snmp_interface | Format data for adding SNMP to an engine.
:param list data: list of interfaces as provided by kw
:param list interfaces: interfaces to enable SNMP by id |
22,253 | def to_netcdf(ds, *args, **kwargs):
to_update = {}
for v, obj in six.iteritems(ds.variables):
units = obj.attrs.get(, obj.encoding.get(, None))
if units == and np.issubdtype(
obj.dtype, np.datetime64):
to_update[v] = xr.Variable(
obj.dims, AbsoluteTimeEncoder(obj), attrs=obj.attrs.copy(),
encoding=obj.encoding)
to_update[v].attrs[] = units
if to_update:
ds = ds.copy()
ds.update(to_update)
return xarray_api.to_netcdf(ds, *args, **kwargs) | Store the given dataset as a netCDF file
This functions works essentially the same as the usual
:meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time
units
Parameters
----------
ds: xarray.Dataset
The dataset to store
%(xarray.Dataset.to_netcdf.parameters)s |
22,254 | def bdh(self, tickers, flds, start_date, end_date, elms=None,
ovrds=None, longdata=False):
ovrds = [] if not ovrds else ovrds
elms = [] if not elms else elms
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = pd.DataFrame(data, columns=[, , , ])
df.loc[:, ] = pd.to_datetime(df.loc[:, ])
if not longdata:
cols = [, ]
df = df.set_index([] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df | Get tickers and fields, return pandas DataFrame with columns as
MultiIndex with levels "ticker" and "field" and indexed by "date".
If long data is requested return DataFrame with columns
["date", "ticker", "field", "value"].
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")].
Refer to the HistoricalDataRequest section in the
'Services & schemas reference guide' for more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted |
22,255 | def _get_secrets_to_compare(old_baseline, new_baseline):
def _check_string(a, b):
if a == b:
return 0
if a < b:
return -1
return 1
def _check_secret(a, b):
if a == b:
return 0
if a[] < b[]:
return -1
elif a[] > b[]:
return 1
return _check_string(a[], b[])
secrets_to_compare = []
for old_filename, new_filename in _comparison_generator(
sorted(old_baseline[].keys()),
sorted(new_baseline[].keys()),
compare_fn=_check_string,
):
if not new_filename:
secrets_to_compare += list(
map(
lambda x: (old_filename, x, True,),
old_baseline[][old_filename],
),
)
continue
elif not old_filename:
secrets_to_compare += list(
map(
lambda x: (new_filename, x, False,),
new_baseline[][new_filename],
),
)
continue
for old_secret, new_secret in _comparison_generator(
old_baseline[][old_filename],
new_baseline[][new_filename],
compare_fn=_check_secret,
):
if old_secret == new_secret:
continue
if old_secret:
secrets_to_compare.append(
(old_filename, old_secret, True,),
)
else:
secrets_to_compare.append(
(new_filename, new_secret, False,),
)
return secrets_to_compare | :rtype: list(tuple)
:param: tuple is in the following format:
filename: str; filename where identified secret is found
secret: dict; PotentialSecret json representation
is_secret_removed: bool; has the secret been removed from the
new baseline? |
22,256 | def gather_metadata(fn_glob, parser):
meta = pd.DataFrame(parser.parse_fn(fn) for fn in glob.iglob(fn_glob))
return meta.set_index(parser.index).sort_index() | Given a glob and a parser object, create a metadata dataframe.
Parameters
----------
fn_glob : str
Glob string to find trajectory files.
parser : descendant of _Parser
Object that handles conversion of filenames to metadata rows. |
22,257 | def clear_surroundings(self):
cells_to_clear = self.grd.eight_neighbors(self.current_y, self.current_x)
for cell in cells_to_clear:
self.grd.set_tile(cell[0], cell[1], ) | clears the cells immediately around the grid of the agent
(just to make it find to see on the screen) |
22,258 | def _is_zero(x):
if x is None:
return True
if isinstance(x, numbers.Number):
return x == 0.0
if isinstance(x, np.ndarray):
return np.all(x == 0)
return False | Returns True if x is numerically 0 or an array with 0's. |
22,259 | def params(self):
params = odict([])
for key,model in self.models.items():
params.update(model.params)
return params | Return a *copy* (we hope) of the parameters.
DANGER: Altering properties directly doesn't call model._cache |
22,260 | def format_row(self, row):
res = []
headers = getattr(self, , [])
for column in headers:
column_name = column[]
value = row.get(column_name, )
if hasattr(self, "format_%s" % column_name):
value = getattr(self, "format_%s" % column_name)(value)
res.append(value)
return res | The render method expects rows as lists, here we switch our row format
from dict to list respecting the order of the headers |
22,261 | def _load_next(self):
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1) | Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag. |
22,262 | def pbkdf1(hash_algorithm, password, salt, iterations, key_length):
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
,
(type_name(password))
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
,
(type_name(salt))
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
,
(type_name(iterations))
))
if iterations < 1:
raise ValueError(pretty_message(
,
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
,
(type_name(key_length))
))
if key_length < 1:
raise ValueError(pretty_message(
,
repr(key_length)
))
if hash_algorithm not in set([, , ]):
raise ValueError(pretty_message(
,
repr(hash_algorithm)
))
if key_length > 16 and hash_algorithm in set([, ]):
raise ValueError(pretty_message(
,
(hash_algorithm, repr(key_length))
))
if key_length > 20 and hash_algorithm == :
raise ValueError(pretty_message(
,
repr(key_length)
))
algo = getattr(hashlib, hash_algorithm)
output = algo(password + salt).digest()
for _ in range(2, iterations + 1):
output = algo(output).digest()
return output[:key_length] | An implementation of PBKDF1 - should only be used for interop with legacy
systems, not new architectures
:param hash_algorithm:
The string name of the hash algorithm to use: "md2", "md5", "sha1"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:return:
The derived key as a byte string |
22,263 | def insert(self, path, simfile):
if self.state is not None:
simfile.set_state(self.state)
mountpoint, chunks = self.get_mountpoint(path)
if mountpoint is None:
self._files[self._join_chunks(chunks)] = simfile
return True
else:
return mountpoint.insert(chunks, simfile) | Insert a file into the filesystem. Returns whether the operation was successful. |
22,264 | def ntp_authentication_key_encryption_type_sha1_type_sha1(self, **kwargs):
config = ET.Element("config")
ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp")
authentication_key = ET.SubElement(ntp, "authentication-key")
keyid_key = ET.SubElement(authentication_key, "keyid")
keyid_key.text = kwargs.pop()
encryption_type = ET.SubElement(authentication_key, "encryption-type")
sha1_type = ET.SubElement(encryption_type, "sha1-type")
sha1 = ET.SubElement(sha1_type, "sha1")
sha1.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
22,265 | def setContentLen(self, content, len):
libxml2mod.xmlNodeSetContentLen(self._o, content, len) | Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). |
22,266 | def GetCBVs(campaign, model=, clobber=False, **kwargs):
if len(logging.getLogger().handlers) == 0:
InitLog(file_name=None, screen_level=logging.DEBUG)
log.info( % (campaign))
path = os.path.join(EVEREST_DAT, , , % campaign)
if not os.path.exists(path):
os.makedirs(path)
xfile = os.path.join(path, )
if clobber or not os.path.exists(xfile):
log.info()
time = None
for module in range(2, 25):
lcfile = os.path.join(path, % module)
if clobber or not os.path.exists(lcfile):
try:
time, breakpoints, fluxes, errors, kpars = GetStars(
campaign, module, model=model, **kwargs)
except AssertionError:
continue
np.savez(lcfile, time=time, breakpoints=breakpoints,
fluxes=fluxes, errors=errors, kpars=kpars)
lcs = np.load(lcfile)
if time is None:
time = lcs[]
breakpoints = lcs[]
fluxes = lcs[]
errors = lcs[]
kpars = lcs[]
else:
fluxes = np.vstack([fluxes, lcs[]])
errors = np.vstack([errors, lcs[]])
kpars = np.vstack([kpars, lcs[]])
log.info()
X = np.ones((len(time), 1 + kwargs.get(, 5)))
new_fluxes = np.zeros_like(fluxes)
for b in range(len(breakpoints)):
return X | Computes the CBVs for a given campaign.
:param int campaign: The campaign number
:param str model: The name of the :py:obj:`everest` model. Default `nPLD`
:param bool clobber: Overwrite existing files? Default `False` |
22,267 | def search(cls, element, pattern):
if isinstance(element, Layout):
return [el for cell in element for el in cls.search(cell, pattern)]
if isinstance(element, (NdOverlay, Overlay)):
return [el for el in element if el.matches(pattern)]
elif isinstance(element, Element):
return [element] if element.matches(pattern) else [] | Helper method that returns a list of elements that match the
given path pattern of form {type}.{group}.{label}.
The input may be a Layout, an Overlay type or a single
Element. |
22,268 | def to_python(self, value):
if isinstance(value, dict):
return value
if self.blank and not value:
return None
if isinstance(value, string_types):
try:
return json.loads(value)
except Exception as e:
raise ValidationError(str(e))
return value | Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted. |
22,269 | def visit_wavedrom(self, node):
format = determine_format(self.builder.supported_image_types)
if format is None:
raise SphinxError(__("Cannot determine a suitable output format"))
bname = "wavedrom-{}".format(uuid4())
outpath = path.join(self.builder.outdir, self.builder.imagedir)
imgname = render_wavedrom(self, node, outpath, bname, format)
image_node = node[]
image_node[] = os.path.join(self.builder.imgpath, imgname)
node.append(image_node)
raise nodes.SkipDeparture | Visit the wavedrom node |
22,270 | def _gotitem(self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
if subset is None:
subset = self
elif subset.ndim == 1:
return subset
return subset[key] | Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on |
22,271 | def reduce_by_device(parallelism, data, reduce_fn):
unique_devices = []
device_to_data = {}
for dev, datum in zip(parallelism.devices, data):
if dev not in device_to_data:
unique_devices.append(dev)
device_to_data[dev] = [datum]
else:
device_to_data[dev].append(datum)
device_parallelism = Parallelism(unique_devices)
grouped_data = [device_to_data[dev] for dev in unique_devices]
return device_parallelism, device_parallelism(reduce_fn, grouped_data) | Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device. |
22,272 | def connect(self):
self._logger.info( % self._url)
return adapters.TornadoConnection(pika.URLParameters(self._url),
self.on_connection_open,
custom_ioloop=self._ioloop_instance) | This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection |
22,273 | def cached_property(method):
name = method.__name__
def newmethod(self):
try:
val = self.__dict__[name]
except KeyError:
val = method(self)
self.__dict__[name] = val
return val
newmethod.__name__ = method.__name__
newmethod.__doc__ = method.__doc__
return property(newmethod) | :param method: a method without arguments except self
:returns: a cached property |
22,274 | def get_deployment_by_slot(self, service_name, deployment_slot):
_validate_not_none(, service_name)
_validate_not_none(, deployment_slot)
return self._perform_get(
self._get_deployment_path_using_slot(
service_name, deployment_slot),
Deployment) | Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production |
22,275 | def get_value(self, field, quick):
if callable(field.default):
default = field.default(self)
else:
default = field.default
if quick and default is not None:
return default
shell.cprint(, field.help)
while True:
try:
answer = click.prompt(field.pretty_prompt, default=default)
return field.type(answer)
except ValueError:
shell.cprint("<31>Unsupported value") | Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter. |
22,276 | def get_fetch_request(self, method, fetch_url, *args, **kwargs):
return requests.request(method, fetch_url, *args, **kwargs) | This is handy if you want to modify the request right before passing it
to requests, or you want to do something extra special customized
:param method: string, the http method (eg, GET, POST)
:param fetch_url: string, the full url with query params
:param *args: any other positional arguments
:param **kwargs: any keyword arguments to pass to requests
:returns: a requests.Response compatible object instance |
22,277 | def component_title(component):
title = u
label_text = u
title_text = u
if component.get():
label_text = component.get()
if component.get():
title_text = component.get()
title = unicode_value(label_text)
if label_text != and title_text != :
title +=
title += unicode_value(title_text)
if component.get() == and title == :
title =
return title | Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc. |
22,278 | def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive,
quit):
if handle_exit_code and not RUNNING_TESTS:
alive = True
while alive:
quit.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code) | handles the timeout logic |
22,279 | def check_write_permissions(file):
try:
open(file, )
except IOError:
print("Can't open file {}. "
"Please grant write permissions or change the path in your config".format(file))
sys.exit(1) | Check if we can write to the given file
Otherwise since we might detach the process to run in the background
we might never find out that writing failed and get an ugly
exit message on startup. For example:
ERROR: Child exited immediately with non-zero exit code 127
So we catch this error upfront and print a nicer error message
with a hint on how to fix it. |
22,280 | def workspace_backup_list(ctx):
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
for b in backup_manager.list():
print(b) | List backups |
22,281 | def connect(self, pattern, presenter, **kwargs):
if isinstance(presenter, type) and issubclass(presenter, WWebPresenter) is True:
self.presenter_collection().add(presenter)
presenter = presenter.__presenter_name__()
self.__route_map.connect(pattern, presenter, **kwargs) | Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None |
22,282 | def getquery(query):
try:
conn = connection.cursor()
conn.execute(query)
data = conn.fetchall()
conn.close()
except: data = list()
return data | Performs a query and get the results. |
22,283 | def record(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError()
rec = struct.pack(self.FMT, b * 16,
self.vol_desc_seqnum, self.part_flags,
self.part_num, self.part_contents.record(),
self.part_contents_use.record(), self.access_type,
self.part_start_location, self.part_length,
self.impl_ident.record(), self.implementation_use,
b * 156)[16:]
return self.desc_tag.record(rec) + rec | A method to generate the string representing this UDF Partition Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Partition Volume Descriptor. |
22,284 | def getElementsByType(self, type):
foundElements=[]
for element in self.getAllElementsOfHirarchy():
if isinstance(element, type):
foundElements.append(element)
return foundElements | retrieves all Elements that are of type type
@type type: class
@param type: type of the element |
22,285 | def up(self):
self.swap(self.get_ordering_queryset().filter(order__lt=self.order).order_by()) | Move this object up one position. |
22,286 | def read_cyc(this, fn, conv=1.0):
f = paropen(fn, "r")
f.readline()
f.readline()
f.readline()
f.readline()
cell = np.array( [ [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ] ] )
l = f.readline()
s = map(float, l.split())
cell[0, 0] = s[0]*conv
cell[1, 0] = s[1]*conv
cell[2, 0] = s[2]*conv
l = f.readline()
s = map(float, l.split())
cell[0, 1] = s[0]*conv
cell[1, 1] = s[1]*conv
cell[2, 1] = s[2]*conv
l = f.readline()
s = map(float, l.split())
cell[0, 2] = s[0]*conv
cell[1, 2] = s[1]*conv
cell[2, 2] = s[2]*conv
this.set_cell(cell)
this.set_pbc(True)
f.close() | Read the lattice information from a cyc.dat file (i.e., tblmd input file) |
22,287 | def send_update(url_id, dataset):
data = _convert_to_seeder_format(dataset)
if not data:
return
try:
_send_request(url_id, json=data, req_type=requests.patch)
except Exception as e:
sys.stderr.write("Seeder PATCH error: ")
sys.stderr.write(str(e.message))
return None | Send request to Seeder's API with data changed by user.
Args:
url_id (str): ID used as identification in Seeder.
dataset (dict): WA-KAT dataset sent from frontend. |
22,288 | def decode_from_sha(sha):
if isinstance(sha, str):
sha = sha.encode()
return codecs.decode(re.sub(rb, b, sha), "hex_codec") | convert coerced sha back into numeric list |
22,289 | def highlightNextMatch(self):
if self.qteText.toPlainText() == :
self.qteText.setText(self.defaultChoice)
return
if self.selMatchIdx < 0:
self.selMatchIdx = 0
return
if self.selMatchIdx >= len(self.matchList):
self.selMatchIdx = 0
return
SCI = self.qteWidget
start, stop = self.matchList[self.selMatchIdx - 1]
line, col = SCI.lineIndexFromPosition(start)
SCI.SendScintilla(SCI.SCI_STARTSTYLING, start, 0xFF)
SCI.SendScintilla(SCI.SCI_SETSTYLING, stop - start, 30)
start, stop = self.matchList[self.selMatchIdx]
SCI.SendScintilla(SCI.SCI_STARTSTYLING, start, 0xFF)
SCI.SendScintilla(SCI.SCI_SETSTYLING, stop - start, 31)
line, col = SCI.lineIndexFromPosition(start)
SCI.setCursorPosition(line, col)
self.selMatchIdx += 1 | Select and highlight the next match in the set of matches. |
22,290 | def verify(self, signed):
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError()
return buf[0:size] | Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed |
22,291 | def _WriteFileChunk(self, chunk):
if chunk.chunk_index == 0:
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) | Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written |
22,292 | def __write_srgb(self, outfile):
if self.rendering_intent is not None and self.icc_profile is not None:
raise FormatError("sRGB(via rendering_intent) and iCCP could not"
"be present simultaneously")
if self.rendering_intent is not None:
write_chunk(outfile, ,
struct.pack("B", int(self.rendering_intent)))
if (self.white_point is not None and self.rgb_points is None) or\
(self.white_point is None and self.rgb_points is not None):
logging.warn("White and RGB points should be both specified to"
" write cHRM chunk")
self.white_point = None
self.rgb_points = None
if (self.white_point is not None and self.rgb_points is not None):
data = (self.white_point[0], self.white_point[1],
self.rgb_points[0][0], self.rgb_points[0][1],
self.rgb_points[1][0], self.rgb_points[1][1],
self.rgb_points[2][0], self.rgb_points[2][1],
)
write_chunk(outfile, ,
struct.pack("!8L",
*[int(round(it * 1e5)) for it in data]))
if self.gamma is not None:
write_chunk(outfile, ,
struct.pack("!L", int(round(self.gamma * 1e5))))
if self.icc_profile is not None:
if self.compression is None or self.compression == -1:
comp_level = 6
else:
comp_level = self.compression
write_chunk(outfile, ,
self.icc_profile[0] + zerobyte +
zerobyte +
zlib.compress(self.icc_profile[1], comp_level)) | Write colour reference information: gamma, iccp etc.
This method should be called only from ``write_idat`` method
or chunk order will be ruined. |
22,293 | def read(*paths):
filename = os.path.join(*paths)
with codecs.open(filename, mode=, encoding=) as handle:
return handle.read() | Build a file path from *paths* and return the contents. |
22,294 | def get_cognitive_process_id(self):
if not bool(self._my_map[]):
raise errors.IllegalState()
else:
return Id(self._my_map[]) | Gets the grade ``Id`` associated with the cognitive process.
return: (osid.id.Id) - the grade ``Id``
raise: IllegalState - ``has_cognitive_process()`` is ``false``
*compliance: mandatory -- This method must be implemented.* |
22,295 | def get_instance(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "get_instance" not in self._inner_api_calls:
self._inner_api_calls[
"get_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_instance,
default_retry=self._method_configs["GetInstance"].retry,
default_timeout=self._method_configs["GetInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.GetInstanceRequest(name=name)
return self._inner_api_calls["get_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Gets the details of a specific Redis instance.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.get_instance(name)
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types.Instance` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
22,296 | def __sweeten(self, dumper: , class_: Type, node: Node) -> None:
for base_class in class_.__bases__:
if base_class in dumper.yaml_representers:
logger.debug(.format(
self.class_.__name__))
self.__sweeten(dumper, base_class, node)
if hasattr(class_, ):
class_.yatiml_sweeten(node) | Applies the user's yatiml_sweeten() function(s), if any.
Sweetening is done for the base classes first, then for the \
derived classes, down the hierarchy to the class we're \
constructing.
Args:
dumper: The dumper that is dumping this object.
class_: The type of the object to be dumped.
represented_object: The object to be dumped. |
22,297 | def copyFuncVersionedLib(dest, source, env):
if os.path.isdir(source):
raise SCons.Errors.UserError("cannot install directory `%s' as a version library" % str(source) )
else:
try:
os.remove(dest)
except:
pass
shutil.copy2(source, dest)
st = os.stat(source)
os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
installShlibLinks(dest, source, env)
return 0 | Install a versioned library into a destination by copying,
(including copying permission/mode bits) and then creating
required symlinks. |
22,298 | def wait_for_connection(self, timeout=10):
start_time = datetime.datetime.now()
while True:
if self.connected:
return True
now = datetime.datetime.now()
if (now - start_time).total_seconds() > timeout:
return False
time.sleep(0.5) | Busy loop until connection is established.
Will abort after timeout (seconds). Return value is a boolean, whether
connection could be established. |
22,299 | def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
figsize = kwargs.get(,(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
mu, Y = self._model(self.latent_variables.get_z_values())
date_index = self.shift_dates(h)
t_z = self.transform_z()
mean_values = self._mean_prediction(mu, Y, h, t_z)
if intervals is True:
sim_values = self._sim_prediction(mu, Y, h, t_z, 15000)
else:
sim_values = self._sim_prediction(mu, Y, h, t_z, 2)
error_bars, forecasted_values, plot_values, plot_index = self._summarize_simulations(mean_values, sim_values, date_index, h, past_values)
plt.figure(figsize=figsize)
if intervals is True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
for count, pre in enumerate(error_bars):
plt.fill_between(date_index[-h-1:], forecasted_values-pre, forecasted_values+pre,alpha=alpha[count])
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | Plots forecasts with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.