Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,700 |
async def run_checks(self):
async for check in self.fsm.health_check():
yield check
async for check in self.self_check():
yield check
for check in MiddlewareManager.health_check():
yield check
|
Run checks on itself and on the FSM
|
24,701 |
def accepts(*checkers_args, **checkers_kws):
@decorator
def run_checkers(func, *args, **kwargs):
all_args = inspect.getcallargs(func, *args, **kwargs)
if checkers_args:
for idx, checker_function in enumerate(checkers_args):
if callable(checker_function):
result = checker_function(args[idx])
if checkers_kws:
for key in checkers_kws.keys():
if key not in all_args:
raise ValueError()
else:
df = all_args[key]
result = checkers_kws[key](df)
return func(*args, **kwargs)
return run_checkers
|
Create a decorator for validating function parameters.
Parameters
----------
checkers_args: positional args
Functions to apply to the inputs of the decorated function. The position of the argument
is assumed to match the position of the function in the decorator.
checkers_kws: keyword args
Keyword pairs in the form (arg: function) to apply to the inputs of the decorated function.
Example
-------
@accepts(df=df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
|
24,702 |
def priority(self):
if self.compiled_type == const.COMPILED_TYPE.MARKER:
return self.compiled.priority
return const.compiled_type_priorities[self.compiled_type]
|
Get priority for this Schema.
Used to sort mapping keys
:rtype: int
|
24,703 |
def write_xyz(self, *args, **kwargs):
message =
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(message, DeprecationWarning)
return self.to_xyz(*args, **kwargs)
|
Deprecated, use :meth:`~chemcoord.Cartesian.to_xyz`
|
24,704 |
def add_root_book(self, book_id):
if self._catalog_session is not None:
return self._catalog_session.add_root_catalog(catalog_id=book_id)
return self._hierarchy_session.add_root(id_=book_id)
|
Adds a root book.
arg: book_id (osid.id.Id): the ``Id`` of a book
raise: AlreadyExists - ``book_id`` is already in hierarchy
raise: NotFound - ``book_id`` is not found
raise: NullArgument - ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
24,705 |
def clacks_overhead(fn):
@wraps(fn)
def _wrapped(*args, **kw):
response = fn(*args, **kw)
response[] =
return response
return _wrapped
|
A Django view decorator that will add the `X-Clacks-Overhead` header.
Usage:
@clacks_overhead
def my_view(request):
return my_response
|
24,706 |
def FindTypeInfo(self, name):
result = self.type_infos.get(name)
if result is None:
result = type_info.String(name=name, default="")
return result
|
Search for a type_info instance which describes this key.
|
24,707 |
def get_content_type(self):
extension = self.path_params.get()
for ext, mime in self.EXTENSION_MAP:
if ext == extension:
return mime
if self.response.vary is None:
self.response.vary = []
else:
self.response.vary.append()
types = [mime for ext, mime in self.EXTENSION_MAP]
ct = self.request.accept.best_match(types)
if not ct:
ct = types[0]
return ct
|
Returns the Content Type to serve from either the extension or the
Accept headers. Uses the :attr:`EXTENSION_MAP` list for all the
configured MIME types.
|
24,708 |
def visitPlusCardinality(self, ctx: ShExDocParser.PlusCardinalityContext):
self.expression.min = 1
self.expression.max = -1
|
'+'
|
24,709 |
def visit_reference(self, node: docutils.nodes.reference) -> None:
path = pathlib.Path(node.attributes[])
try:
if path.is_absolute():
return
resolved_path = path.resolve()
except FileNotFoundError:
return
try:
resolved_path.relative_to(HERE)
except ValueError:
return
if not path.is_file():
return
assert node.attributes[] == node.children[0].astext()
self.references.append(node)
|
Called for "reference" nodes.
|
24,710 |
def blend_mode(self):
blend_mode_ptr = ffi.new()
lib.SDL_GetTextureBlendMode(self._ptr, blend_mode_ptr)
return BlendMode(blend_mode_ptr[0])
|
BlendMode: The blend mode used for drawing operations.
|
24,711 |
def tee(iterable, n=2):
tees = tuple(AsyncTeeIterable(iterable) for _ in range(n))
for tee in tees:
tee._siblings = tees
return tees
|
Return n independent iterators from a single iterable.
Once tee() has made a split, the original iterable should not be used
anywhere else; otherwise, the iterable could get advanced without the tee
objects being informed.
This itertool may require significant auxiliary storage (depending on how
much temporary data needs to be stored). In general, if one iterator uses
most or all of the data before another iterator starts, it is faster to use
list() instead of tee().
|
24,712 |
def process_config(raw_path, cache_dir, cache_file, **kwargs):
config = _create_cache(raw_path, cache_dir, cache_file)
for modifier in _CONFIG_MODIFIERS:
modifier(config, **kwargs)
cache = devpipeline_configure.cache._CachedConfig(
config, os.path.join(cache_dir, cache_file)
)
_handle_value_modifiers(cache)
_add_package_options(cache)
_write_config(cache, cache_dir)
return cache
|
Read a build configuration and create it, storing the result in a build
cache.
Arguments
raw_path -- path to a build configuration
cache_dir -- the directory where cache should be written
cache_file -- The filename to write the cache. This will live inside
cache_dir.
**kwargs -- additional arguments used by some modifiers
|
24,713 |
def talk_back(self, message):
quote = self.get_quote()
if quote:
self.reply("Actually, she said things like this: \n%s" % quote)
|
that's what she said: Tells you some things she actually said. :)
|
24,714 |
def get_average_along_axis(self, ind):
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
|
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
|
24,715 |
def converge(self, playbook=None, **kwargs):
if playbook is None:
pb = self._get_ansible_playbook(self.playbooks.converge, **kwargs)
else:
pb = self._get_ansible_playbook(playbook, **kwargs)
return pb.execute()
|
Executes ``ansible-playbook`` against the converge playbook unless
specified otherwise and returns a string.
:param playbook: An optional string containing an absolute path to a
playbook.
:param kwargs: An optional keyword arguments.
:return: str
|
24,716 |
def randpath(self):
return .join(
gen_rand_str(3, 10, use=self.random, keyspace=list(self.keyspace))
for _ in range(self.random.randint(0, 3)))
|
-> a random URI-like #str path
|
24,717 |
def _resolve_hostname(name):
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)[]
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name
|
Returns resolved hostname using the ssh config
|
24,718 |
def _consolidate_binds(local_binds, remote_binds):
count = len(remote_binds) - len(local_binds)
if count < 0:
raise ValueError(
)
local_binds.extend([(, 0) for x in range(count)])
return local_binds
|
Fill local_binds with defaults when no value/s were specified,
leaving paramiko to decide in which local port the tunnel will be open
|
24,719 |
def fmt_row(self, columns, dimensions, row, **settings):
cells = []
i = 0
for column in columns:
cells.append(self.fmt_cell(
row[i],
dimensions[i],
column,
**settings[self.SETTING_TEXT_FORMATING]
)
)
i += 1
return self.bchar(, , settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]) + \
self.bchar(, , settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]).join(cells) + \
self.bchar(, , settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING])
|
Format single table row.
|
24,720 |
def _setup_directories(self):
dirs = [self._data_dir]
dirs += [os.path.join(self._data_dir, name) for name
in self.DATA_SUBDIRS]
for path in dirs:
if not os.path.isdir(path):
try:
os.makedirs(path)
os.chmod(path, 0755)
except OSError:
raise errors.DirectorySetupFail()
return True
|
Creates data directory structure.
* Raises a ``DirectorySetupFail`` exception if error occurs
while creating directories.
|
24,721 |
def module_settings(self):
response = self._get(url.settings_modules)
self._check_response(response, 200)
return self._create_response(response)
|
Get Module settings. Uses GET to /settings/modules interface.
:Returns: (dict) Module settings as shown `here <https://cloud.knuverse.com/docs/api/#api-Module_Settings-Get_the_module_settings>`_.
|
24,722 |
def _to_dict(self, serialize=False):
copy_dict = self.__dict__.copy()
for key, value in vars(self).items():
if serialize and key == :
continue
if not serialize and isinstance(value, BaseObject):
copy_dict[key] = copy_dict.pop(key).to_dict()
elif serialize and getattr(value, , False):
continue
elif serialize and key in self._always_dirty:
continue
elif key in (, , , , ):
del copy_dict[key]
elif serialize and key not in self._dirty_attributes:
del copy_dict[key]
elif key.startswith():
copy_dict[key[1:]] = copy_dict[key]
del copy_dict[key]
return copy_dict
|
This method works by copying self.__dict__, and removing everything that should not be serialized.
|
24,723 |
def keys(self):
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys
|
return a list of all app_names
|
24,724 |
def run(self, depth=None):
if len(self.simgr.right) != 1 or len(self.simgr.left) != 1:
self._report_incongruency("Single path in pg.left and pg.right required.")
return False
if "UNICORN" in self.simgr.one_right.options and depth is not None:
self.simgr.one_right.unicorn.max_steps = depth
if "UNICORN" in self.simgr.one_left.options and depth is not None:
self.simgr.one_left.unicorn.max_steps = depth
l.debug("Performing initial path comparison.")
if not self.compare_paths(self.simgr.left[0], self.simgr.right[0]):
self._report_incongruency("Initial path comparison check failed.")
return False
while len(self.simgr.left) > 0 and len(self.simgr.right) > 0:
if depth is not None:
self._update_progress(100. * float(self.simgr.one_left.history.block_count) / depth)
if len(self.simgr.deadended) != 0:
self._report_incongruency("Unexpected deadended paths before step.")
return False
if len(self.simgr.right) == 0 and len(self.simgr.left) == 0:
l.debug("All done!")
return True
if len(self.simgr.right) != 1 or len(self.simgr.left) != 1:
self._report_incongruency("Different numbers of paths in left and right stash..")
return False
l.debug(
"Stepping right path with weighted length %d/%d",
self.simgr.right[0].history.block_count,
depth
)
self.prev_pg = self.simgr.copy()
self.simgr.step(stash=)
CongruencyCheck._sync_steps(self.simgr)
if len(self.simgr.errored) != 0:
self._report_incongruency("Unexpected errored paths.")
return False
try:
if not self.compare_path_group(self.simgr) and self._validate_incongruency():
self._report_incongruency("Path group comparison failed.")
return False
except AngrIncongruencyError:
if self._validate_incongruency():
raise
if depth is not None:
self.simgr.drop(stash=, filter_func=lambda p: p.history.block_count >= depth)
self.simgr.drop(stash=, filter_func=lambda p: p.history.block_count >= depth)
self.simgr.right.sort(key=lambda p: p.addr)
self.simgr.left.sort(key=lambda p: p.addr)
self.simgr.stashed_right[:] = self.simgr.stashed_right[::-1]
self.simgr.stashed_left[:] = self.simgr.stashed_left[::-1]
self.simgr.move(, )
self.simgr.move(, )
if len(self.simgr.left) > 1:
self.simgr.split(from_stash=, limit=1, to_stash=)
self.simgr.split(from_stash=, limit=1, to_stash=)
|
Checks that the paths in the specified path group stay the same over the next
`depth` bytes.
The path group should have a "left" and a "right" stash, each with a single
path.
|
24,725 |
def Terminate(self, status=None):
try:
self.queue_manager.DestroyFlowStates(self.session_id)
except queue_manager.MoreDataException:
pass
if not self.IsRunning():
return
self._SendTerminationMessage(status=status)
self.context.state = rdf_flow_runner.FlowContext.State.TERMINATED
self.flow_obj.Flush()
|
Terminates this flow.
|
24,726 |
def pull_tasks(self, kill_event):
logger.info("[TASK PULL THREAD] starting")
poller = zmq.Poller()
poller.register(self.task_incoming, zmq.POLLIN)
msg = self.create_reg_message()
logger.debug("Sending registration message: {}".format(msg))
self.task_incoming.send(msg)
last_beat = time.time()
last_interchange_contact = time.time()
task_recv_counter = 0
poll_timer = 1
while not kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
ready_worker_count = self.ready_worker_queue.qsize()
pending_task_count = self.pending_task_queue.qsize()
logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count,
pending_task_count))
if time.time() > last_beat + self.heartbeat_period:
self.heartbeat()
last_beat = time.time()
if pending_task_count < self.max_queue_size and ready_worker_count > 0:
logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count))
msg = ((ready_worker_count).to_bytes(4, "little"))
self.task_incoming.send(msg)
socks = dict(poller.poll(timeout=poll_timer))
if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:
_, pkl_msg = self.task_incoming.recv_multipart()
tasks = pickle.loads(pkl_msg)
last_interchange_contact = time.time()
if tasks == :
logger.critical("[TASK_PULL_THREAD] Received stop request")
kill_event.set()
break
elif tasks == HEARTBEAT_CODE:
logger.debug("Got heartbeat from interchange")
else:
poll_timer = 1
task_recv_counter += len(tasks)
logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t[] for t in tasks],
task_recv_counter))
for task in tasks:
self.pending_task_queue.put(task)
else:
logger.debug("[TASK_PULL_THREAD] No incoming tasks")
poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2)
if time.time() > last_interchange_contact + self.heartbeat_threshold:
logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
kill_event.set()
logger.critical("[TASK_PULL_THREAD] Exiting")
break
|
Pulls tasks from the incoming tasks 0mq pipe onto the internal
pending task queue
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
|
24,727 |
def attribute_node(self, name, ns_uri=None):
attr_impl_node = self.adapter.get_node_attribute_node(
self.impl_node, name, ns_uri)
return self.adapter.wrap_node(
attr_impl_node, self.adapter.impl_document, self.adapter)
|
:param string name: the name of the attribute to return.
:param ns_uri: a URI defining a namespace constraint on the attribute.
:type ns_uri: string or None
:return: this element's attributes that match ``ns_uri`` as
:class:`Attribute` nodes.
|
24,728 |
def align(s1, s2, gap=, eq=operator.eq):
pharmacyfarmácia_\\nadvantagevantagem_\\n
m, n = len(s1), len(s2)
table = []
row = list(range(n+1))
table.append(list(row))
for i in range(m):
p = i
row[0] = i+1
for j in range(n):
t = 0 if eq(s1[i], s2[j]) else 1
p, row[j+1] = row[j+1], min(p+t, row[j]+1, row[j+1]+1)
table.append(list(row))
s1_, s2_ = ,
i, j = m, n
while i != 0 and j != 0:
_, i, j, s1_, s2_ = min(
(table[i-1][j-1], i-1, j-1, s1[i-1]+s1_, s2[j-1]+s2_),
(table[i-1][j], i-1, j, s1[i-1]+s1_, gap+s2_),
(table[i][j-1], i, j-1, gap+s1_, s2[j-1]+s2_)
)
if i != 0:
s1_ = s1[:i]+s1_
s2_ = gap*i+s2_
if j != 0:
s1_ = gap*j+s1_
s2_ = s2[:j]+s2_
return s1_, s2_
|
aligns two strings
>>> print(*align('pharmacy', 'farmácia', gap='_'), sep='\\n')
pharmac_y
_farmácia
>>> print(*align('advantage', 'vantagem', gap='_'), sep='\\n')
advantage_
__vantagem
|
24,729 |
def _output(self):
self.session._out()
self.session._out()
self.session._out( % self.width)
self.session._out( % self.height)
if self.colorspace is :
self.session._out( %
(self.pal, self.number + 1))
else:
self.session._out( % self.colorspace)
if self.colorspace is :
self.session._out()
self.session._out( % self.bits_per_component)
if self.filter:
self.session._out( % self.filter)
if self.decode:
self.session._out( % self.decode)
if self.transparent:
self.session._out( % self.transparent_string)
if self.soft_mask:
self.session._out( % (self.number + 1))
self.session._out( % self.size)
self.session._put_stream(self.image_data)
self.session._out()
if self.colorspace is :
self.session._out( % (self.palette_filter, self.palette_length))
self.session._put_stream(self.palette)
self.session._out()
if isinstance(self.soft_mask, PDFImage):
obj = self.session._add_object()
self.soft_mask._set_number(obj.id)
self.soft_mask._output()
|
Prompts the creating of image objects.
|
24,730 |
def get_template_options():
template_root = turrentine_settings.TURRENTINE_TEMPLATE_ROOT
turrentine_dir = turrentine_settings.TURRENTINE_TEMPLATE_SUBDIR
output = []
for root, dirs, files in os.walk(turrentine_dir):
for file_name in files:
full_path = os.path.join(root, file_name)
relative_path = os.path.relpath(full_path, template_root)
output.append(relative_path)
return output
|
Returns a list of all templates that can be used for CMS pages.
The paths that are returned are relative to TURRENTINE_TEMPLATE_ROOT.
|
24,731 |
def id(self) -> typing.Union[str, None]:
return self._project.id if self._project else None
|
Identifier for the project.
|
24,732 |
def get_branch_sha(profile, name):
ref = "heads/" + name
data = refs.get_ref(profile, ref)
head = data.get("head")
sha = head.get("sha")
return sha
|
Get the SHA a branch's HEAD points to.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch.
Returns:
The requested SHA.
|
24,733 |
def _handle_xmpp_message(self, xmpp_message: BeautifulSoup):
if in xmpp_message.attrs:
self._handle_xmlns(xmpp_message[], xmpp_message)
elif xmpp_message[] == :
if xmpp_message.g:
self.callback.on_group_receipts_received(chatting.IncomingGroupReceiptsEvent(xmpp_message))
else:
self.xml_namespace_handlers[].handle(xmpp_message)
else:
self.xml_namespace_handlers[].handle(xmpp_message)
|
a XMPP 'message' in the case of Kik is the actual stanza we receive when someone sends us a message
(weather groupchat or not), starts typing, stops typing, reads our message, etc.
Examples: http://slixmpp.readthedocs.io/api/stanza/message.html
:param xmpp_message: The XMPP 'message' element we received
|
24,734 |
def hil_actuator_controls_encode(self, time_usec, controls, mode, flags):
return MAVLink_hil_actuator_controls_message(time_usec, controls, mode, flags)
|
Sent from autopilot to simulation. Hardware in the loop control
outputs (replacement for HIL_CONTROLS)
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
controls : Control outputs -1 .. 1. Channel assignment depends on the simulated hardware. (float)
mode : System mode (MAV_MODE), includes arming state. (uint8_t)
flags : Flags as bitfield, reserved for future use. (uint64_t)
|
24,735 |
def copy(self):
dup = super(Conditional, self).copy()
condition = self._condition.copy()
condition.invalidate(self)
dup._condition = condition
return dup
|
Copy the container, put an invalidated copy of the condition in the new container
|
24,736 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mag = self._convert_magnitude(rup.mag)
mean = (
C[] + C[] * mag + C[] * (mag - 6) ** 2 +
(C[] + C[] * mag) * np.log(dists.rjb + np.exp(C[]))
)
mean = clip_mean(imt, mean)
stddevs = self._compute_stddevs(C, dists.rjb.size, stddev_types)
return mean, stddevs
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
24,737 |
def show_hydrophobic(self):
grp = self.getPseudoBondGroup("Hydrophobic Interactions-%i" % self.tid, associateWith=[self.model])
grp.lineType = self.chimera.Dash
grp.lineWidth = 3
grp.color = self.colorbyname()
for i in self.plcomplex.hydrophobic_contacts.pairs_ids:
self.bs_res_ids.append(i[0])
|
Visualizes hydrophobic contacts.
|
24,738 |
def getbool(value):
value = str(value).lower()
if value in [, , , , ]:
return True
elif value in [, , , , ]:
return False
return None
|
Returns a boolean from any of a range of values. Returns None for
unrecognized values. Numbers other than 0 and 1 are considered
unrecognized.
>>> getbool(True)
True
>>> getbool(1)
True
>>> getbool('1')
True
>>> getbool('t')
True
>>> getbool(2)
>>> getbool(0)
False
>>> getbool(False)
False
>>> getbool('n')
False
|
24,739 |
def _read_from_folder(self, dirname):
templates = _par_read(dirname=dirname, compressed=False)
t_files = glob.glob(dirname + os.sep + )
tribe_cat_file = glob.glob(os.path.join(dirname, "tribe_cat.*"))
if len(tribe_cat_file) != 0:
tribe_cat = read_events(tribe_cat_file[0])
else:
tribe_cat = Catalog()
previous_template_names = [t.name for t in self.templates]
for template in templates:
if template.name in previous_template_names:
template.st = read(t_file[0])
self.templates.extend(templates)
return
|
Internal folder reader.
:type dirname: str
:param dirname: Folder to read from.
|
24,740 |
def get_share_properties(self, share_name, timeout=None):
s
list of files or directories.
:param str share_name:
Name of existing share.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A Share that exposes properties and metadata.
:rtype: :class:`.Share`
share_nameGETrestypesharetimeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _parse_share(share_name, response)
|
Returns all user-defined metadata and system properties for the
specified share. The data returned does not include the shares's
list of files or directories.
:param str share_name:
Name of existing share.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A Share that exposes properties and metadata.
:rtype: :class:`.Share`
|
24,741 |
def __replace_names(sentence, counts):
if sentence is not None:
while sentence.find() != -1:
sentence = sentence.replace(, str(__get_name(counts)), 1)
if sentence.find() == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
|
24,742 |
def encode(password, algorithm, salt, iterations):
hash = hashlib.pbkdf2_hmac(digest().name, password.encode(), salt.encode(), iterations)
encoded = base64.b64encode(hash).decode().strip()
return "%s$%d$%s$%s" % (algorithm, iterations, salt, encoded)
|
Encode a Password
:param password: Password
:param algorithm
:param salt: Salt
:param iterations: iterations
:return: PBKDF2 hashed Password
|
24,743 |
def drop(connection, skip):
for idx, name, manager in _iterate_managers(connection, skip):
click.secho(f, fg=, bold=True)
manager.drop_all()
|
Drop all.
|
24,744 |
def ReadMostRecentClientGraphSeries(self, client_label,
report_type
):
series_with_timestamps = self.ReadAllClientGraphSeries(
client_label, report_type)
if not series_with_timestamps:
return None
_, latest_series = list(sorted(iteritems(series_with_timestamps)))[-1]
return latest_series
|
See db.Database.
|
24,745 |
def absorptionCoefficient_Doppler(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL=, HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None,
WavenumberRange=None,WavenumberStep=None,WavenumberWing=None,
WavenumberWingHW=None,WavenumberGrid=None):
if WavenumberRange: OmegaRange=WavenumberRange
if WavenumberStep: OmegaStep=WavenumberStep
if WavenumberWing: OmegaWing=WavenumberWing
if WavenumberWingHW: OmegaWingHW=WavenumberWingHW
if WavenumberGrid: OmegaGrid=WavenumberGrid
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
if not OmegaStep: OmegaStep = 0.001
if OmegaStep>0.005: warn()
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange_(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
Tref = __FloatType__(296.)
pref = __FloatType__(1.)
T = Environment[]
p = Environment[]
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX[]]
except KeyError:
raise Exception( % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX[]]
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
for TableName in SourceTables:
nline = LOCAL_TABLE_CACHE[TableName][][]
for RowID in range(nline):
LineCenterDB = LOCAL_TABLE_CACHE[TableName][][][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName][][][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName][][][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName][][][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName][][][RowID]
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName][][][RowID]
else:
Shift0DB = 0
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
if LineIntensity < IntensityThreshold: continue
cMassMol = 1.66053873e-27
fSqrtMass = sqrt(molecularMass(MoleculeNumberDB,IsoNumberDB))
cc_ = 2.99792458e8
cBolts_ = 1.3806503e-23
GammaD = (cSqrt2Ln2/cc_)*sqrt(cBolts_/cMassMol)*sqrt(T) * LineCenterDB/fSqrtMass
OmegaWingF = max(OmegaWing,OmegaWingHW*GammaD)
Shift0 = Shift0DB*p/pref
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
lineshape_vals = PROFILE_DOPPLER(LineCenterDB+Shift0,GammaD,Omegas[BoundIndexLower:BoundIndexUpper])
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
|
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
WavenumberRange: wavenumber range to consider.
WavenumberStep: wavenumber step to consider.
WavenumberWing: absolute wing for calculating a lineshape (in cm-1)
WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths)
IntensityThreshold: threshold for intensities
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts for significant digits in WavenumberStep)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Doppler (Gauss) profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation.
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which give a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Doppler(((2,1),),'co2',WavenumberStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
|
24,746 |
def join(L, keycols=None, nullvals=None, renamer=None,
returnrenaming=False, Names=None):
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if not keycols:
keycols = utils.listintersection([a.dtype.names for a in LL])
if len(keycols) == 0:
raise ValueError()
keycols = [l for l in keycols if all([a.dtype[l] == LL[0].dtype[l]
for a in LL])]
if len(keycols) == 0:
raise ValueError(
)
keycols = [l for l in keycols if all([isunique(a[keycols])
for a in LL])]
if len(keycols) == 0:
raise ValueError(
)
else:
print(, keycols)
elif isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split()]
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
renaming = {}
if len(commons) > 0:
print
if renamer == None:
print()
renamer = DEFAULT_RENAMER
renaming = renamer(L, Names=Names)
if not RenamingIsInCorrectFormat(renaming, L, Names=Names):
print(
)
renaming = DEFAULT_RENAMER(L, Names = Names)
NewNames = [[l if l not in renaming[k].keys() else renaming[k][l]
for l in ll.dtype.names] for (k, ll) in zip(Names, LL)]
if set(Commons(NewNames)).difference(keycols):
raise ValueError(
)
Result = strictjoin(L, keycols, nullvals, renaming, Names=Names)
if returnrenaming:
return [Result, renaming]
else:
if renaming:
print(
)
return Result
|
Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin`
|
24,747 |
def cleanup(self):
all([delete_file_or_tree(f) for f in self.to_delete])
self.to_delete = []
|
Clean up my temporary files.
|
24,748 |
def success_count(self):
return len([i for i, result in enumerate(self.data) if result.success])
|
Amount of passed test cases in this list.
:return: integer
|
24,749 |
def _addDPFilesToOldEntry(self, *files):
self.view_entry_dialog.addDataProducts(self.purrer.makeDataProducts(
[(file, True) for file in files], unbanish=True, unignore=True))
|
callback to add DPs corresponding to files.
|
24,750 |
def spike_times(signal, threshold, fs, absval=True):
times = []
if absval:
signal = np.abs(signal)
over, = np.where(signal>threshold)
segments, = np.where(np.diff(over) > 1)
if len(over) > 1:
if len(segments) == 0:
segments = [0, len(over)-1]
else:
if segments[0] != 0:
segments = np.insert(segments, [0], [0])
else:
times.append(float(over[0])/fs)
if 1 not in segments:
segments[0] = 1
if segments[-1] != len(over)-1:
segments = np.insert(segments, [len(segments)], [len(over)-1])
else:
times.append(float(over[-1])/fs)
for iseg in range(1,len(segments)):
if segments[iseg] - segments[iseg-1] == 1:
idx = over[segments[iseg]]
else:
segments[0] = segments[0]-1
idx = over[segments[iseg-1]+1] + np.argmax(signal[over[segments[iseg-1]+1]:over[segments[iseg]]])
times.append(float(idx)/fs)
elif len(over) == 1:
times.append(float(over[0])/fs)
if len(times)>0:
return refractory(times)
else:
return times
|
Detect spikes from a given signal
:param signal: Spike trace recording (vector)
:type signal: numpy array
:param threshold: Threshold value to determine spikes
:type threshold: float
:param absval: Whether to apply absolute value to signal before thresholding
:type absval: bool
:returns: list(float) of spike times in seconds
For every continuous set of points over given threshold,
returns the time of the maximum
|
24,751 |
def _onError(self, error):
self.stop()
self._logModule.err(
error,
"Unhandled error logging exception to %s" % (self.airbrakeURL,))
self.start()
|
Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions.
|
24,752 |
def from_inline(cls: Type[IdentityType], version: int, currency: str, inline: str) -> IdentityType:
selfcert_data = Identity.re_inline.match(inline)
if selfcert_data is None:
raise MalformedDocumentError("Inline self certification")
pubkey = selfcert_data.group(1)
signature = selfcert_data.group(2)
ts = BlockUID.from_str(selfcert_data.group(3))
uid = selfcert_data.group(4)
return cls(version, currency, pubkey, uid, ts, signature)
|
Return Identity instance from inline Identity string
:param version: Document version number
:param currency: Name of the currency
:param inline: Inline string of the Identity
:return:
|
24,753 |
def name_backbone(name, rank=None, kingdom=None, phylum=None, clazz=None,
order=None, family=None, genus=None, strict=False, verbose=False,
offset=None, limit=100, **kwargs):
t get a match GBIF gives back a list of length 3 with slots synonym,
confidence, and ``matchType=``.
reference: http://www.gbif.org/developer/species
Usage::
from pygbif import species
species.name_backbone(name=, kingdom=)
species.name_backbone(name=, rank=, kingdom=)
species.name_backbone(name=, rank=, family=)
species.name_backbone(name=, kingdom=, verbose=True)
species.name_backbone(name=, kingdom=, verbose=True, strict=False)
species.name_backbone(name=, kingdom=, verbose=True, strict=True)
species.name_backbone(name=)
species.name_backbone(name=)
species/matchnamerankkingdomphylumclassorderfamilygenusstrictverboseoffsetlimit': limit}
tt = gbif_GET(url, args, **kwargs)
return tt
|
Lookup names in the GBIF backbone taxonomy.
:param name: [str] Full scientific name potentially with authorship (required)
:param rank: [str] The rank given as our rank enum. (optional)
:param kingdom: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param phylum: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param class: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param order: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param family: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param genus: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param strict: [bool] If True it (fuzzy) matches only the given name, but never a
taxon in the upper classification (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param offset: [int] Record to start at. Default: ``0``
:param limit: [int] Number of results to return. Default: ``100``
A list for a single taxon with many slots (with ``verbose=False`` - default), or a
list of length two, first element for the suggested taxon match, and a data.frame
with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``).
If you don't get a match GBIF gives back a list of length 3 with slots synonym,
confidence, and ``matchType='NONE'``.
reference: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_backbone(name='Helianthus annuus', kingdom='plants')
species.name_backbone(name='Helianthus', rank='genus', kingdom='plants')
species.name_backbone(name='Poa', rank='genus', family='Poaceae')
# Verbose - gives back alternatives
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True)
# Strictness
species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False)
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True)
# Non-existent name
species.name_backbone(name='Aso')
# Multiple equal matches
species.name_backbone(name='Oenante')
|
24,754 |
def configure(root_directory, build_path, cmake_command, only_show):
default_build_path = os.path.join(root_directory, )
check_cmake_exists()
if build_path is None:
build_path = default_build_path
if not only_show:
setup_build_path(build_path)
cmake_command += + build_path
print(.format(cmake_command))
if only_show:
sys.exit(0)
run_cmake(cmake_command, build_path, default_build_path)
|
Main configure function.
|
24,755 |
def get_proficiency_admin_session(self, proxy):
if not self.supports_proficiency_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ProficiencyAdminSession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the ``OsidSession`` associated with the proficiency administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyAdminSession``
:rtype: ``osid.learning.ProficiencyAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_admin()`` is ``true``.*
|
24,756 |
def score_frequency_grid(self, f0, df, N):
return self._score_frequency_grid(f0, df, N)
|
Compute the score on a frequency grid.
Some models can compute results faster if the inputs are passed in this
manner.
Parameters
----------
f0, df, N : (float, float, int)
parameters describing the frequency grid freq = f0 + df * arange(N)
Note that these are frequencies, not angular frequencies.
Returns
-------
score : ndarray
the length-N array giving the score at each frequency
|
24,757 |
def orify(event, changed_callback):
event.changed = changed_callback
if not hasattr(event, ):
event._set = event.set
event._clear = event.clear
event.set = lambda: or_set(event)
event.clear = lambda: or_clear(event)
|
Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
----------
|
24,758 |
def _read_dictionary_page(file_obj, schema_helper, page_header, column_metadata):
raw_bytes = _read_page(file_obj, page_header, column_metadata)
io_obj = io.BytesIO(raw_bytes)
values = encoding.read_plain(
io_obj,
column_metadata.type,
page_header.dictionary_page_header.num_values
)
schema_element = schema_helper.schema_element(column_metadata.path_in_schema[-1])
return convert_column(values, schema_element) if schema_element.converted_type is not None else values
|
Read a page containing dictionary data.
Consumes data using the plain encoding and returns an array of values.
|
24,759 |
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=):
return self.raw_query(, , data={
: [{: i} for i in lces],
: [{: i} for i in assets],
: [{: i} for i in queries],
: [{: i} for i in policies],
: [{: i} for i in dashboards],
: [{: i} for i in credentials],
: [{: i} for i in repos],
: [{: i} for i in restrict],
: name,
: description,
: [],
:
})
|
group_add name, restrict, repos
|
24,760 |
def init_word_db(cls, name, text):
text = text.replace(, ).replace(, )
words = [w.strip() for w in text.split() if w.strip()]
assert len(words) > 2, \
freqs = {}
for i in range(len(words) - 2):
w1 = words[i]
w2 = words[i + 1]
w3 = words[i + 2]
key = (w1, w2)
if key in freqs:
freqs[key].append(w3)
else:
freqs[key] = [w3]
cls._dbs[name] = {
: freqs,
: words,
: len(words) - 2
}
|
Initialize a database of words for the maker with the given name
|
24,761 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
coeffs = self.COEFFS_BEDROCK[imt].copy()
a_1, a_2, sigma_site = self._get_site_coeffs(sites, imt)
coeffs.update({: a_1, : a_2, : sigma_site})
ln_mean = (self._compute_magnitude_terms(rup, coeffs) +
self._compute_distance_terms(dists, coeffs))
ln_mean += self._compute_site_amplification(ln_mean, coeffs)
ln_stddevs = self._get_stddevs(coeffs, stddev_types)
return ln_mean, [ln_stddevs]
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for specification of input and result values.
Implements the following equations:
Equation (8) on p. 203 for the bedrock ground motion:
``ln(y_br) = c1 + c2*(M - 6) + c3*(M - 6)**2 - lnR - c4*R + ln(ε_br)``
Equation (9) on p. 207 gives the site amplification factor:
``ln(F_s) = a1*y_br + a2 + ln(δ_site)``
Equation (10) on p. 207 for the ground motion at a given site:
``y_site = y_br*F_s``
Equation (11) on p. 207 for total standard error at a given site:
``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)``
|
24,762 |
def uploads(self, option):
params = join_params(self.parameters, {"uploads": option})
return self.__class__(**params)
|
Set whether to filter by a user's uploads list. Options available are
user.ONLY, user.NOT, and None; default is None.
|
24,763 |
def rgb_color_list_to_hex(color_list):
color_list_rgb = [[int(x*255) for x in c[0:3]] for c in color_list]
color_list_hex = [.format(rgb[0], rgb[1], rgb[2]) for rgb in color_list_rgb]
return color_list_hex
|
Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list
|
24,764 |
def process_scheduled_consumption(self, token):
scheduled_retry = self._tokens_to_scheduled_consumption.pop(token)
self._total_wait = max(
self._total_wait - scheduled_retry[], 0)
|
Processes a scheduled consumption request that has completed
:type token: RequestToken
:param token: The token associated to the consumption
request that is used to identify the request.
|
24,765 |
def make_get_request(url, params, headers, connection):
timeout = getattr(connection, )
response = connection.get(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
|
Helper function that makes an HTTP GET request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_get_request('http://firebase.localhost/users', {'print': silent'},
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'John Doe', '2': 'Jane Doe'}
|
24,766 |
def create_empty_resource(self, name):
assert "/" not in name
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
path = util.join_uri(self.path, name)
fp = self.provider._loc_to_file_path(path, self.environ)
f = open(fp, "wb")
f.close()
return self.provider.get_resource_inst(path, self.environ)
|
Create an empty (length-0) resource.
See DAVResource.create_empty_resource()
|
24,767 |
def update(self, uid):
postinfo = MPost.get_by_uid(uid)
if postinfo.kind == self.kind:
pass
else:
return False
post_data, ext_dic = self.fetch_post_data()
if in post_data:
pass
else:
return False
if in post_data:
post_data[] = int(post_data[])
else:
post_data[] = postinfo.valid
ext_dic[] = str(uid)
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data[].strip()
if cnt_old == cnt_new:
pass
else:
MPostHist.create_post_history(postinfo)
MPost.modify_meta(uid, post_data, extinfo=ext_dic)
self._add_download_entity(ext_dic)
update_category(uid, post_data)
update_label(uid, post_data)
logger.info( + self.kind)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect(.format(router_post[postinfo.kind], uid))
|
in infor.
|
24,768 |
def add_castle(self, position):
if self.has_moved or self.in_check(position):
return
if self.color == color.white:
rook_rank = 0
else:
rook_rank = 7
castle_type = {
notation_const.KING_SIDE_CASTLE: {
"rook_file": 7,
"direction": lambda king_square, times: king_square.shift_right(times)
},
notation_const.QUEEN_SIDE_CASTLE: {
"rook_file": 0,
"direction": lambda king_square, times: king_square.shift_left(times)
}
}
for castle_key in castle_type:
castle_dict = castle_type[castle_key]
castle_rook = position.piece_at_square(Location(rook_rank, castle_dict["rook_file"]))
if self._rook_legal_for_castle(castle_rook) and \
self._empty_not_in_check(position, castle_dict["direction"]):
yield self.create_move(castle_dict["direction"](self.location, 2), castle_key)
|
Adds kingside and queenside castling moves if legal
:type: position: Board
|
24,769 |
def read_stdout(self):
output = ""
if self._stdout_file:
try:
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warning("Could not read {}: {}".format(self._stdout_file, e))
return output
|
Reads the standard output of the QEMU process.
Only use when the process has been stopped or has crashed.
|
24,770 |
def load_libs(self, scripts_paths):
for path in scripts_paths:
self.run_script(_read_file(path), identifier=path)
|
Load script files into the context.\
This can be thought as the HTML script tag.\
The files content must be utf-8 encoded.
This is a shortcut for reading the files\
and pass the content to :py:func:`run_script`
:param list scripts_paths: Script file paths.
:raises OSError: If there was an error\
manipulating the files. This should not\
normally be caught
:raises V8Error: if there was\
an error running the JS script
|
24,771 |
def tagscleanupdicts(configuration=None, url=None, keycolumn=5, failchained=True):
if not Tags._tags_dict:
if configuration is None:
configuration = Configuration.read()
with Download(full_agent=configuration.get_user_agent()) as downloader:
if url is None:
url = configuration[]
Tags._tags_dict = downloader.download_tabular_rows_as_dicts(url, keycolumn=keycolumn)
keys = Tags._tags_dict.keys()
chainerror = False
for i, tag in enumerate(keys):
whattodo = Tags._tags_dict[tag]
action = whattodo[u]
final_tags = whattodo[u]
for final_tag in final_tags.split():
if final_tag in keys:
index = list(keys).index(final_tag)
if index != i:
whattodo2 = Tags._tags_dict[final_tag]
action2 = whattodo2[u]
if action2 != and action2 != :
final_tags2 = whattodo2[u]
if final_tag not in final_tags2.split():
chainerror = True
if failchained:
logger.error( %
(action, tag, final_tags, action2, final_tag, final_tags2))
if failchained and chainerror:
raise ChainRuleError()
Tags._wildcard_tags = list()
for tag in Tags._tags_dict:
if in tag:
Tags._wildcard_tags.append(tag)
return Tags._tags_dict, Tags._wildcard_tags
|
Get tags cleanup dictionaries
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter).
keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5.
failchained (bool): Fail if chained rules found. Defaults to True.
Returns:
Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list)
|
24,772 |
def get_rmse(self, data_x=None, data_y=None):
if data_x is None:
data_x = np.array(self.args["x"])
if data_y is None:
data_y = np.array(self.args["y"])
if len(data_x) != len(data_y):
raise ValueError("Lengths of data_x and data_y are different")
rmse_y = self.bestfit_func(data_x)
return np.sqrt(np.mean((rmse_y - data_y) ** 2))
|
Get Root Mean Square Error using
self.bestfit_func
args:
x_min: scalar, default=min(x)
minimum x value of the line
x_max: scalar, default=max(x)
maximum x value of the line
resolution: int, default=1000
how many steps between x_min and x_max
|
24,773 |
def asyncPipeUnion(context=None, _INPUT=None, conf=None, **kwargs):
_input = yield _INPUT
_OUTPUT = get_output(_input, **kwargs)
returnValue(_OUTPUT)
|
An operator that asynchronously merges multiple source together.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : unused
Keyword arguments
-----------------
_OTHER1 : asyncPipe like object
_OTHER2 : etc.
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
|
24,774 |
def send(self, command, message=None):
if message:
joined = command + constants.NL + util.pack(message)
else:
joined = command + constants.NL
if self._blocking:
for sock in self.socket():
sock.sendall(joined)
else:
self._pending.append(joined)
|
Send a command over the socket with length endcoded
|
24,775 |
def _validate_arguments(self):
if self._email is None:
self.set_error_message("E-mail for the account not provided")
return False
if self._api_token is None:
self.set_error_message("API Token for the account not provided")
return False
return True
|
Validates the command line arguments passed to the CLI
Derived classes that override need to call this method before
validating their arguments
|
24,776 |
def dates(self, start, end):
td = date.today()
end = safetodate(end) or td
end = end if end <= td else td
start = safetodate(start)
if not start or start > end:
start = end - timedelta(days=int(round(30.4*
settings.months_history)))
return start,end
|
Internal function which perform pre-conditioning on dates:
:keyword start: start date.
:keyword end: end date.
This function makes sure the *start* and *end* date are consistent.
It *never fails* and always return a two-element tuple
containing *start*, *end* with *start* less or equal *end*
and *end* never after today.
There should be no reason to override this function.
|
24,777 |
def round_controlled(cycled_iterable, rounds=1):
round_start = None
rounds_completed = 0
for item in cycled_iterable:
if round_start is None:
round_start = item
elif item == round_start:
rounds_completed += 1
if rounds_completed == rounds:
return
yield item
|
Return after <rounds> passes through a cycled iterable.
|
24,778 |
def get_factory_kwargs(self):
kwargs = {}
kwargs.update({
: self.can_delete,
: self.extra,
: self.exclude,
: self.fields,
: self.formfield_callback,
: self.fk_name,
})
if self.formset_class:
kwargs[] = self.formset_class
if self.child_form:
kwargs[] = self.child_form
return kwargs
|
Returns the keyword arguments for calling the formset factory
|
24,779 |
def user_exists(name, host=, **kwargs):
with settings(hide(, , , ), warn_only=True):
res = query( % {
: name,
: host,
}, **kwargs)
return res.succeeded and (int(res) == 1)
|
Check if a MySQL user exists.
|
24,780 |
def offer_trades(self, offer_id, cursor=None, order=, limit=10):
endpoint = .format(offer_id=offer_id)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params)
|
This endpoint represents all trades for a given offer.
`GET /offers/{offer_id}/trades{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/trades-for-offer.html>`_
:param int offer_id: The offer ID to get trades on.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: A list of effects on the given operation.
:rtype: dict
|
24,781 |
def is_fully_verified(self):
unverified_errors = TextLogError.objects.filter(
_metadata__best_is_verified=False,
step__job=self).count()
if unverified_errors:
logger.error("Job %r has unverified TextLogErrors", self)
return False
logger.info("Job %r is fully verified", self)
return True
|
Determine if this Job is fully verified based on the state of its Errors.
An Error (TextLogError or FailureLine) is considered Verified once its
related TextLogErrorMetadata has best_is_verified set to True. A Job
is then considered Verified once all its Errors TextLogErrorMetadata
instances are set to True.
|
24,782 |
def size(self):
N = self.nmax + 1;
NC = N + self.mmax * (2 * N - self.mmax - 1);
assert NC == len(self._vec)
return NC
|
Total number of coefficients in the ScalarCoefs structure.
Example::
>>> sz = c.size
>>> N = c.nmax + 1
>>> L = N+ c.mmax * (2 * N - c.mmax - 1);
>>> assert sz == L
|
24,783 |
def getObjectId(self):
if self._object_id is not None:
return defer.succeed(self._object_id)
d = self.db.state.getObjectId(self.name,
"buildbot.master.BuildMaster")
@d.addCallback
def keep(id):
self._object_id = id
return id
return d
|
Return the object id for this master, for associating state with the
master.
@returns: ID, via Deferred
|
24,784 |
def get_dependencies_from_wheel_cache(ireq):
if ireq.editable or not is_pinned_requirement(ireq):
return
matches = WHEEL_CACHE.get(ireq.link, name_from_req(ireq.req))
if matches:
matches = set(matches)
if not DEPENDENCY_CACHE.get(ireq):
DEPENDENCY_CACHE[ireq] = [format_requirement(m) for m in matches]
return matches
return
|
Retrieves dependencies for the given install requirement from the wheel cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
|
24,785 |
def current_index(self):
i = self._parent.proxy_model.mapToSource(self._parent.currentIndex())
return i
|
Get the currently selected index in the parent table view.
|
24,786 |
def loadFile(self, fileName):
if not QtCore.QFile(fileName).exists():
msg = "File <b>{}</b> does not exist".format(self.qteAppletID())
self.qteLogger.info(msg)
self.fileName = None
return
self.fileName = fileName
doc = popplerqt4.Poppler.Document.load(fileName)
doc.setRenderHint(popplerqt4.Poppler.Document.Antialiasing)
doc.setRenderHint(popplerqt4.Poppler.Document.TextAntialiasing)
hbox = QtGui.QVBoxLayout()
for ii in range(doc.numPages()):
pdf_img = doc.page(ii).renderToImage()
pdf_label = self.qteAddWidget(QtGui.QLabel())
pdf_label.setPixmap(QtGui.QPixmap.fromImage(pdf_img))
hbox.addWidget(pdf_label)
tmp = self.qteAddWidget(QtGui.QWidget(self))
tmp.setLayout(hbox)
self.qteScroll.setWidget(tmp)
|
Load and display the PDF file specified by ``fileName``.
|
24,787 |
def _is_master_running(self):
ipc_file))
|
Perform a lightweight check to see if the master daemon is running
Note, this will return an invalid success if the master crashed or was
not shut down cleanly.
|
24,788 |
def robot_files(self):
result = []
for name in os.listdir(self.path):
fullpath = os.path.join(self.path, name)
if os.path.isdir(fullpath):
result.append(RobotFactory(fullpath, parent=self))
else:
if ((name.endswith(".txt") or name.endswith(".robot")) and
(name not in ("__init__.txt", "__init__.robot"))):
result.append(RobotFactory(fullpath, parent=self))
return result
|
Return a list of all folders, and test suite files (.txt, .robot)
|
24,789 |
def bbox(self):
return BoundingBox(self.slices[1].start, self.slices[1].stop,
self.slices[0].start, self.slices[0].stop)
|
The minimal `~photutils.aperture.BoundingBox` for the cutout
region with respect to the original (large) image.
|
24,790 |
def maverage(size):
size_inv = 1. / size
@tostream
def maverage_filter(sig, zero=0.):
data = deque((zero * size_inv for _ in xrange(size)), maxlen=size)
mean_value = zero
for el in sig:
mean_value -= data.popleft()
new_value = el * size_inv
data.append(new_value)
mean_value += new_value
yield mean_value
return maverage_filter
|
Moving average
This is the only strategy that uses a ``collections.deque`` object
instead of a ZFilter instance. Fast, but without extra capabilites such
as a frequency response plotting method.
Parameters
----------
size :
Data block window size. Should be an integer.
Returns
-------
A callable that accepts two parameters: a signal ``sig`` and the starting
memory element ``zero`` that behaves like the ``LinearFilter.__call__``
arguments. The output from that callable is a Stream instance, and has
no decimation applied.
See Also
--------
envelope :
Signal envelope (time domain) strategies.
|
24,791 |
def null_advance_strain(self, blocksize):
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = 0
self.strain.start_time += blocksize
self.taper_immediate_strain = True
|
Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
|
24,792 |
def __populate_symbols(self):
if not self._ptr:
raise BfdException("BFD not initialized")
try:
symbols = _bfd.get_symbols(self._ptr)
sections = {}
for section in self.sections:
sections[self.sections[section].index] = self.sections[section]
for symbol in symbols:
symbol_section_index = symbol[0]
symbol_name = symbol[1]
symbol_value = symbol[2]
symbol_flags = symbol[3]
symbol_flags = tuple(
[f for f in SYMBOL_FLAGS_LIST if symbol_flags & f == f] )
new_symbol = Symbol(
sections.get(symbol_section_index, None),
symbol_name,
symbol_value,
symbol_flags)
if new_symbol.section is None:
continue
symbol_address = new_symbol.section.vma + new_symbol.value
self._symbols[symbol_address] = new_symbol
del sections
except BfdSectionException, err:
raise BfdException("Exception on symbolic ifnormation parsing.")
|
Get a list of the symbols present in the bfd to populate our
internal list.
|
24,793 |
def calc_regenerated(self, lastvotetime):
delta = datetime.utcnow() - datetime.strptime(lastvotetime,)
td = delta.days
ts = delta.seconds
tt = (td * 86400) + ts
return tt * 10000 / 86400 / 5
|
Uses math formula to calculate the amount
of steem power that would have been regenerated
given a certain datetime object
|
24,794 |
def generate_maximum_validator(maximum, exclusiveMaximum=False, **kwargs):
return functools.partial(validate_maximum, maximum=maximum, is_exclusive=exclusiveMaximum)
|
Generator function returning a callable for maximum value validation.
|
24,795 |
def parse_compound_list(path, compounds):
context = FilePathContext(path)
for compound_def in compounds:
if in compound_def:
file_format = compound_def.get()
include_context = context.resolve(compound_def[])
for compound in parse_compound_file(include_context, file_format):
yield compound
else:
yield parse_compound(compound_def, context)
|
Parse a structured list of compounds as obtained from a YAML file
Yields CompoundEntries. Path can be given as a string or a context.
|
24,796 |
def geo2apex(self, glat, glon, height):
glat = helpers.checklat(glat, name=)
alat, alon = self._geo2apex(glat, glon, height)
if np.any(np.float64(alat) == -9999):
warnings.warn(
)
return np.float64(alat), np.float64(alon)
|
Converts geodetic to modified apex coordinates.
Parameters
==========
glat : array_like
Geodetic latitude
glon : array_like
Geodetic longitude
height : array_like
Altitude in km
Returns
=======
alat : ndarray or float
Modified apex latitude
alon : ndarray or float
Modified apex longitude
|
24,797 |
def create_item(self, item):
item_dict = {
: item.project_id,
: item.from_user_id,
: item.to_user_id,
: item.auth_role,
: item.user_message
}
if item.share_user_ids:
item_dict[] = item.share_user_ids
data = json.dumps(item_dict)
resp = requests.post(self.make_url(item.destination), headers=self.json_headers, data=data)
self.check_response(resp)
return resp
|
Create a new item in D4S2 service for item at the specified destination.
:param item: D4S2Item data to use for creating a D4S2 item
:return: requests.Response containing the successful result
|
24,798 |
def update_expression_list(self):
self.expression_list = []
self.expression_keys = []
self.expression_order = []
self.expression_order, self.expression_list, self.expression_keys = zip(*sorted(zip(self.expression_order, self.expression_list, self.expression_keys)))
|
Extract a list of expressions from the dictionary of expressions.
|
24,799 |
def readDOE(serialize_output=True):
refDOE = [[[None]*16 for k_ in range(3)] for j_ in range(16)]
Schedule = [[[None]*16 for k_ in range(3)] for j_ in range(16)]
refBEM = [[[None]*16 for k_ in range(3)] for j_ in range(16)]
for i in range(16):
file_doe_name_bld = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_BuildingSummary.csv".format(i+1))
list_doe1 = read_csv(file_doe_name_bld)
nFloor = str2fl(list_doe1[3][3:6])
glazing = str2fl(list_doe1[4][3:6])
hCeiling = str2fl(list_doe1[5][3:6])
ver2hor = str2fl(list_doe1[7][3:6])
AreaRoof = str2fl(list_doe1[8][3:6])
file_doe_name_zone = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_ZoneSummary.csv".format(i+1))
list_doe2 = read_csv(file_doe_name_zone)
AreaFloor = str2fl([list_doe2[2][5],list_doe2[3][5],list_doe2[4][5]])
Volume = str2fl([list_doe2[2][6],list_doe2[3][6],list_doe2[4][6]])
AreaWall = str2fl([list_doe2[2][8],list_doe2[3][8],list_doe2[4][8]])
AreaWindow = str2fl([list_doe2[2][9],list_doe2[3][9],list_doe2[4][9]])
Occupant = str2fl([list_doe2[2][11],list_doe2[3][11],list_doe2[4][11]])
Light = str2fl([list_doe2[2][12],list_doe2[3][12],list_doe2[4][12]])
Elec = str2fl([list_doe2[2][13],list_doe2[3][13],list_doe2[4][13]])
Gas = str2fl([list_doe2[2][14],list_doe2[3][14],list_doe2[4][14]])
SHW = str2fl([list_doe2[2][15],list_doe2[3][15],list_doe2[4][15]])
Vent = str2fl([list_doe2[2][17],list_doe2[3][17],list_doe2[4][17]])
Infil = str2fl([list_doe2[2][20],list_doe2[3][20],list_doe2[4][20]])
file_doe_name_location = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_LocationSummary.csv".format(i+1))
list_doe3 = read_csv(file_doe_name_location)
TypeWall = [list_doe3[3][4:20],list_doe3[14][4:20],list_doe3[25][4:20]]
RvalWall = str2fl([list_doe3[4][4:20],list_doe3[15][4:20],list_doe3[26][4:20]])
TypeRoof = [list_doe3[5][4:20],list_doe3[16][4:20],list_doe3[27][4:20]]
RvalRoof = str2fl([list_doe3[6][4:20],list_doe3[17][4:20],list_doe3[28][4:20]])
Uwindow = str2fl([list_doe3[7][4:20],list_doe3[18][4:20],list_doe3[29][4:20]])
SHGC = str2fl([list_doe3[8][4:20],list_doe3[19][4:20],list_doe3[30][4:20]])
HVAC = str2fl([list_doe3[9][4:20],list_doe3[20][4:20],list_doe3[31][4:20]])
HEAT = str2fl([list_doe3[10][4:20],list_doe3[21][4:20],list_doe3[32][4:20]])
COP = str2fl([list_doe3[11][4:20],list_doe3[22][4:20],list_doe3[33][4:20]])
EffHeat = str2fl([list_doe3[12][4:20],list_doe3[23][4:20],list_doe3[34][4:20]])
FanFlow = str2fl([list_doe3[13][4:20],list_doe3[24][4:20],list_doe3[35][4:20]])
file_doe_name_schedules = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_Schedules.csv".format(i+1))
list_doe4 = read_csv(file_doe_name_schedules)
SchEquip = str2fl([list_doe4[1][6:30],list_doe4[2][6:30],list_doe4[3][6:30]])
SchLight = str2fl([list_doe4[4][6:30],list_doe4[5][6:30],list_doe4[6][6:30]])
SchOcc = str2fl([list_doe4[7][6:30],list_doe4[8][6:30],list_doe4[9][6:30]])
SetCool = str2fl([list_doe4[10][6:30],list_doe4[11][6:30],list_doe4[12][6:30]])
SetHeat = str2fl([list_doe4[13][6:30],list_doe4[14][6:30],list_doe4[15][6:30]])
SchGas = str2fl([list_doe4[16][6:30],list_doe4[17][6:30],list_doe4[18][6:30]])
SchSWH = str2fl([list_doe4[19][6:30],list_doe4[20][6:30],list_doe4[21][6:30]])
for j in range(3):
for k in range(16):
B = Building(
hCeiling[j],
1,
1,
0.1,
0.1,
Infil[j],
Vent[j]/1000.,
glazing[j],
Uwindow[j][k],
SHGC[j][k],
,
COP[j][k],
297,
297,
293,
293,
(HVAC[j][k]*1000.0)/AreaFloor[j],
EffHeat[j][k],
293)
B.heatCap = (HEAT[j][k]*1000.0)/AreaFloor[j]
B.Type = BLDTYPE[i]
B.Era = BUILTERA[j]
B.Zone = ZONETYPE[k]
refDOE[i][j][k] = B
Concrete = Material (1.311, 836.8 * 2240,"Concrete")
Insulation = Material (0.049, 836.8 * 265.0, "Insulation")
Gypsum = Material (0.16, 830.0 * 784.9, "Gypsum")
Wood = Material (0.11, 1210.0 * 544.62, "Wood")
Stucco = Material(0.6918, 837.0 * 1858.0, "Stucco")
if TypeWall[j][k] == "MassWall":
Rbase = 0.271087
Rins = RvalWall[j][k] - Rbase
D_ins = Rins * Insulation.thermalCond
if D_ins > 0.01:
thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,D_ins,0.0127]
layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Insulation,Gypsum]
else:
thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,0.0127]
layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Gypsum]
wall = Element(0.08,0.92,thickness,layers,0.,293.,0.,"MassWall")
alb = 0.2
emis = 0.9
thickness = [0.054,0.054]
concrete = Material (1.31, 2240.0*836.8)
mass = Element(alb,emis,thickness,[concrete,concrete],0,293,1,"MassFloor")
elif TypeWall[j][k] == "WoodFrame":
Rbase = 0.170284091
Rins = RvalWall[j][k] - Rbase
D_ins = Rins * Insulation.thermalCond
if D_ins > 0.01:
thickness = [0.01,D_ins,0.0127]
layers = [Wood,Insulation,Gypsum]
else:
thickness = [0.01,0.0127]
layers = [Wood,Gypsum]
wall = Element(0.22,0.92,thickness,layers,0.,293.,0.,"WoodFrameWall")
alb = 0.2
emis = 0.9
thickness = [0.05,0.05]
wood = Material(1.31, 2240.0*836.8)
mass = Element(alb,emis,thickness,[wood,wood],0.,293.,1.,"WoodFloor")
elif TypeWall[j][k] == "SteelFrame":
Rbase = 0.271087
Rins = RvalWall[j][k] - Rbase
D_ins = Rins * Insulation.thermalCond
if D_ins > 0.01:
thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,D_ins,0.0127]
layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Insulation,Gypsum]
else:
thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,0.0127]
layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Gypsum]
wall = Element(0.15,0.92,thickness,layers,0.,293.,0.,"SteelFrame")
alb = 0.2
emis = 0.93
thickness = [0.05,0.05]
mass = Element(alb,emis,thickness,[Concrete,Concrete],0.,293.,1.,"MassFloor")
elif TypeWall[j][k] == "MetalWall":
alb = 0.2
emis = 0.9
D_ins = max((RvalWall[j][k] * Insulation.thermalCond)/2, 0.01)
thickness = [D_ins,D_ins,0.0127]
materials = [Insulation,Insulation,Gypsum]
wall = Element(alb,emis,thickness,materials,0,293,0,"MetalWall")
alb = 0.2
emis = 0.9
thickness = [0.05, 0.05]
concrete = Material(1.31, 2240.0*836.8)
mass = Element(alb,emis,thickness,[concrete,concrete],0.,293.,1.,"MassFloor")
if TypeRoof[j][k] == "IEAD":
alb = 0.2
emis = 0.93
D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01);
roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"IEAD")
elif TypeRoof[j][k] == "Attic":
alb = 0.2
emis = 0.9
D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01)
roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"Attic")
elif TypeRoof[j][k] == "MetalRoof":
alb = 0.2
emis = 0.9
D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01)
roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"MetalRoof")
refBEM[i][j][k] = BEMDef(B, mass, wall, roof, 0.0)
refBEM[i][j][k].building.FanMax = FanFlow[j][k]
Schedule[i][j][k] = SchDef()
Schedule[i][j][k].Elec = SchEquip
Schedule[i][j][k].Light = SchLight
Schedule[i][j][k].Gas = SchGas
Schedule[i][j][k].Occ = SchOcc
Schedule[i][j][k].Cool = SetCool
Schedule[i][j][k].Heat = SetHeat
Schedule[i][j][k].SWH = SchSWH
Schedule[i][j][k].Qelec = Elec[j]
Schedule[i][j][k].Qlight = Light[j]
Schedule[i][j][k].Nocc = Occupant[j]/AreaFloor[j]
Schedule[i][j][k].Qgas = Gas[j]
Schedule[i][j][k].Vent = Vent[j]/1000.0
Schedule[i][j][k].Vswh = SHW[j]/AreaFloor[j]
if serialize_output:
pkl_file_path = os.path.join(DIR_CURR,,)
pickle_readDOE = open(pkl_file_path, )
pickle.dump(refDOE, pickle_readDOE,1)
pickle.dump(refBEM, pickle_readDOE,1)
pickle.dump(Schedule, pickle_readDOE,1)
pickle_readDOE.close()
return refDOE, refBEM, Schedule
|
Read csv files of DOE buildings
Sheet 1 = BuildingSummary
Sheet 2 = ZoneSummary
Sheet 3 = LocationSummary
Sheet 4 = Schedules
Note BLD8 & 10 = school
Then make matrix of ref data as nested nested lists [16, 3, 16]:
matrix refDOE = Building objs
matrix Schedule = SchDef objs
matrix refBEM (16,3,16) = BEMDef
where:
[16,3,16] is Type = 1-16, Era = 1-3, climate zone = 1-16
i.e.
Type: FullServiceRestaurant, Era: Pre80, Zone: 6A Minneapolis
Nested tree:
[TYPE_1:
ERA_1:
CLIMATE_ZONE_1
...
CLIMATE_ZONE_16
ERA_2:
CLIMATE_ZONE_1
...
CLIMATE_ZONE_16
...
ERA_3:
CLIMATE_ZONE_1
...
CLIMATE_ZONE_16]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.