Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
8,600 | def on_button_release(self, event):
affected_models = {}
for inmotion in self._movable_items:
inmotion.move((event.x, event.y))
rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item,
inmotion.item.handles()[NW])
if isinstance(inmotion.item, StateView):
state_v = inmotion.item
state_m = state_v.model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()[] != rel_pos:
state_m.set_meta_data_editor(, rel_pos)
affected_models[state_m] = ("position", True, state_v)
elif isinstance(inmotion.item, NameView):
state_v = inmotion.item
state_m = self.view.canvas.get_parent(state_v).model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()[][] != rel_pos:
state_m.set_meta_data_editor(, rel_pos)
affected_models[state_m] = ("name_position", False, state_v)
elif isinstance(inmotion.item, TransitionView):
transition_v = inmotion.item
transition_m = transition_v.model
self.view.canvas.request_update(transition_v)
current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v)
old_waypoints = transition_m.get_meta_data_editor()[]
if current_waypoints != old_waypoints:
transition_m.set_meta_data_editor(, current_waypoints)
affected_models[transition_m] = ("waypoints", False, transition_v)
if len(affected_models) == 1:
model = next(iter(affected_models))
change, affects_children, view = affected_models[model]
self.view.graphical_editor.emit(, model, change, affects_children)
elif len(affected_models) > 1:
common_parents = None
for change, affects_children, view in affected_models.values():
parents_of_view = set(self.view.canvas.get_ancestors(view))
if common_parents is None:
common_parents = parents_of_view
else:
common_parents = common_parents.intersection(parents_of_view)
assert len(common_parents) > 0, "The selected elements do not have common parent element"
for state_v in common_parents:
children_of_state_v = self.view.canvas.get_all_children(state_v)
if any(common_parent in children_of_state_v for common_parent in common_parents):
continue
self.view.graphical_editor.emit(, state_v.model, "positions", True)
break
if not affected_models and self._old_selection is not None:
self.view.unselect_all()
self.view.select_item(self._old_selection)
self.view.handle_new_selection(self._item)
self._move_name_v = False
self._old_selection = None
return super(MoveItemTool, self).on_button_release(event) | Write back changes
If one or more items have been moved, the new position are stored in the corresponding meta data and a signal
notifying the change is emitted.
:param event: The button event |
8,601 | def attribute_exists(self, attribute, section):
if foundations.namespace.remove_namespace(attribute, root_only=True) in self.get_attributes(section,
strip_namespaces=True):
LOGGER.debug("> attribute exists in section.".format(attribute, section))
return True
else:
LOGGER.debug("> attribute doesn{1}' section.".format(attribute, section))
return False | Checks if given attribute exists.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x234564563>
>>> sections_file_parser.attribute_exists("Attribute 1", "Section A")
True
>>> sections_file_parser.attribute_exists("Attribute 2", "Section A")
False
:param attribute: Attribute to check existence.
:type attribute: unicode
:param section: Section to search attribute into.
:type section: unicode
:return: Attribute existence.
:rtype: bool |
8,602 | def preShear(self, h, v):
a, b = self.a, self.b
self.a += v * self.c
self.b += v * self.d
self.c += h * a
self.d += h * b
return self | Calculate pre shearing and replace current matrix. |
8,603 | def add_target(self, name=None):
def deco(func):
def nestfunc(control):
destdir = os.path.join(self.dest_dir, control[])
return [func(destdir, control)]
key = name or func.__name__
self.nest.add(key, nestfunc, create_dir=False)
self._register_alias(key)
return func
return deco | Add an SCons target to this nest.
The function decorated will be immediately called with each of the
output directories and current control dictionaries. Each result will
be added to the respective control dictionary for later nests to
access.
:param name: Name for the target in the name (default: function name). |
8,604 | def logstop(self):
if self.logfile is not None:
self.logfile.close()
self.logfile = None
else:
print "Logging hadn't been started."
self.log_active = False | Fully stop logging and close log file.
In order to start logging again, a new logstart() call needs to be
made, possibly (though not necessarily) with a new filename, mode and
other options. |
8,605 | def update_ip(self, ip, record_type=, domains=None, subdomains=None):
if domains is None:
domains = self.get_domains()
elif sys.version_info < (3, 0):
if isinstance(domains, (str, unicode)):
domains = [domains]
elif sys.version_info >= (3, 0):
if isinstance(domains, str):
domains = [domains]
else:
domains = list(domains)
for domain in domains:
a_records = self.get_records(domain, record_type=record_type)
for record in a_records:
r_name = str(record[])
r_ip = str(record[])
if not r_ip == ip:
if (subdomains is None or
(isinstance(subdomains, (unicode, str)) and r_name == subdomains) or
r_name in subdomains):
record.update(data=str(ip))
self.update_record(domain, record)
return True | Update the IP address in all records, specified by type, to the value of ip. Returns True if no
exceptions occurred during the update. If no domains are provided, all domains returned from
self.get_domains() will be updated. By default, only A records are updated.
:param record_type: The type of records to update (eg. 'A')
:param ip: The new IP address (eg. '123.1.2.255')
:param domains: A list of the domains you want to update (eg. ['123.com','abc.net'])
:param subdomains: A list of the subdomains you want to update (eg. ['www','dev'])
:type record_type: str or unicode
:type ip: str or unicode
:type domains: str, list of str
:type subdomains: str, list of str
:return: True if no exceptions occurred |
8,606 | def initialize(self, runtime=None):
if self._runtime is not None:
raise IllegalState()
self._runtime = runtime
config = runtime.get_configuration()
parameter_id = Id()
host = config.get_value_by_parameter(parameter_id).get_string_value()
if host is not None:
self._host = host
parameter_id = Id()
app_key = config.get_value_by_parameter(parameter_id).get_string_value()
if app_key is not None:
self._app_key = app_key | Initializes this manager.
A manager is initialized once at the time of creation.
arg: runtime (osid.OsidRuntimeManager): the runtime
environment
raise: CONFIGURATION_ERROR - an error with implementation
configuration
raise: ILLEGAL_STATE - this manager has already been
initialized by the OsidRuntime
raise: NullArgument - runtime is null
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
implementation notes: In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed. Providers must
thread-protect any data stored in the manager. To maximize
interoperability, providers should not honor a second call to
initialize() and must set an ILLEGAL_STATE error. |
8,607 | def _encrypt(self, archive):
arc_name = archive.replace("sosreport-", "secured-sosreport-")
arc_name += ".gpg"
enc_cmd = "gpg --batch -o %s " % arc_name
env = None
if self.enc_opts["key"]:
enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"]
enc_cmd += archive
if self.enc_opts["password"]:
passwd = "%s" % self.enc_opts["password"].replace("')
env = {"sos_gpg": passwd}
enc_cmd += "-c --passphrase-fd 0 "
enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd
enc_cmd += archive
r = sos_get_command_output(enc_cmd, timeout=0, env=env)
if r["status"] == 0:
return arc_name
elif r["status"] == 2:
if self.enc_opts["key"]:
msg = "Specified key not in keyring"
else:
msg = "Could not read passphrase"
else:
msg = "gpg exited with code %s" % r["status"]
raise Exception(msg) | Encrypts the compressed archive using GPG.
If encryption fails for any reason, it should be logged by sos but not
cause execution to stop. The assumption is that the unencrypted archive
would still be of use to the user, and/or that the end user has another
means of securing the archive.
Returns the name of the encrypted archive, or raises an exception to
signal that encryption failed and the unencrypted archive name should
be used. |
8,608 | def request(
self,
url: str,
method: str,
raise_for_status: bool = True,
path_to_errors: tuple = None,
*args,
**kwargs
) -> tuple:
session = kwargs.get("session", requests.Session())
log.debug(
"sending a %s request to %s with args: %s kwargs: %s",
method.upper(),
url,
args,
kwargs,
)
rsp = session.request(method, url, *args, **kwargs)
log.debug("response: %s", rsp.text)
errors = None
if raise_for_status:
try:
rsp.raise_for_status()
except requests.RequestException as e:
if e.response is not None:
rsp = e.response
if path_to_errors:
try:
errors = rsp.json()
for arg in path_to_errors:
if errors.get(arg):
errors = errors[arg]
except json.decoder.JSONDecodeError:
errors = [rsp.text]
else:
errors = [rsp.text]
if not isinstance(errors, list):
errors = [errors]
else:
rsp = None
errors = [str(e)]
log.debug("errors when trying to access %s: %s", url, errors)
log.debug("returning response %s, errors %s", rsp, errors)
return rsp, errors | A wrapper method for :meth:`~requests.Session.request``, which adds some defaults and logging
:param url: The URL to send the reply to
:param method: The method to use
:param raise_for_status: Should an exception be raised for a failed response. Default is **True**
:param args: Additional args to be sent to the request
:param kwargs: Additional args to be sent to the request
:return: Dict of response body or original :class:`requests.Response` |
8,609 | def p_bound_terminal(self, p):
if p[1][0].literal in [, ]:
p[0] = [_Segment(_BINDING, % self.binding_var_count),
p[1][0],
_Segment(_END_BINDING, )]
self.binding_var_count += 1
else:
p[0] = p[1] | bound_terminal : unbound_terminal |
8,610 | def prepare_video_params(self, title=None, tags=, description=,
copyright_type=, public_type=,
category=None, watch_password=None,
latitude=None, longitude=None, shoot_time=None
):
params = {}
if title is None:
title = self.file_name
elif len(title) > 80:
title = title[:80]
if len(description) > 2000:
description = description[0:2000]
params[] = title
params[] = tags
params[] = description
params[] = copyright_type
params[] = public_type
if category:
params[] = category
if watch_password:
params[] = watch_password
if latitude:
params[] = latitude
if longitude:
params[] = longitude
if shoot_time:
params[] = shoot_time
return params | util method for create video params to upload.
Only need to provide a minimum of two essential parameters:
title and tags, other video params are optional. All params spec
see: http://cloud.youku.com/docs?id=110#create .
Args:
title: string, 2-50 characters.
tags: string, 1-10 tags joind with comma.
description: string, less than 2000 characters.
copyright_type: string, 'original' or 'reproduced'
public_type: string, 'all' or 'friend' or 'password'
watch_password: string, if public_type is password.
latitude: double.
longitude: double.
shoot_time: datetime.
Returns:
dict params that upload/create method need. |
8,611 | def detect_fts(conn, table):
"Detect if table has a corresponding FTS virtual table and return it"
rows = conn.execute(detect_fts_sql(table)).fetchall()
if len(rows) == 0:
return None
else:
return rows[0][0] | Detect if table has a corresponding FTS virtual table and return it |
8,612 | def generate(self):
sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim))
for batch in range(self.__batch_size):
for i in range(len(self.__program_list)):
program_key = self.__program_list[i]
key = np.random.randint(low=0, high=len(self.__midi_df_list))
midi_df = self.__midi_df_list[key]
midi_df = midi_df[midi_df.program == program_key]
if midi_df.shape[0] < self.__seq_len:
continue
row = np.random.uniform(
low=midi_df.start.min(),
high=midi_df.end.max() - (self.__seq_len * self.__time_fraction)
)
for seq in range(self.__seq_len):
start = row + (seq * self.__time_fraction)
end = row + ((seq+1) * self.__time_fraction)
df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)]
sampled_arr[batch, i, seq] = self.__convert_into_feature(df)
return sampled_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. |
8,613 | def fields(cls):
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs | Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name. |
8,614 | def fix_music(file_name):
.mp3
setup()
if not Py3:
file_name = file_name.encode()
tags = File(file_name)
log.log(file_name)
log.log()
try:
artist, album, song_name, lyrics, match_bool, score = get_details_spotify(
file_name)
except Exception:
artist, album, song_name, lyrics, match_bool, score = get_details_letssingit(
file_name)
try:
log.log_indented()
albumart = albumsearch.img_search_google(artist++album)
except Exception:
log.log_indented()
albumart = albumsearch.img_search_bing(artist++album)
if match_bool:
add_albumart(albumart, file_name)
add_details(file_name, song_name, artist, album, lyrics)
try:
rename(file_name, artist++song_name+)
except Exception:
log.log_error("Couldnt find appropriate details of your song", indented=True)
log.log("Match score: %s/10.0" % round(score * 10, 1))
log.log(LOG_LINE_SEPERATOR)
log.log_success() | Searches for '.mp3' files in directory (optionally recursive)
and checks whether they already contain album art and album name tags or not. |
8,615 | def wait_until_finished(
self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
return self.manager.wait_until_finished(
uuid=self.uuid, refresh_period=refresh_period
) | Wait until a task instance with the given UUID is finished.
Args:
refresh_period (int, optional): How many seconds to wait
before checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it finished. |
8,616 | def refresh(self):
logger.debug("refresh user interface")
try:
with self.refresh_lock:
self.draw_screen()
except AssertionError:
logger.warning("application is not running")
pass | explicitely refresh user interface; useful when changing widgets dynamically |
8,617 | def from_path(cls, path, suffix=):
def _get_filepath(filename, warning, path=path, suffix=suffix):
paths = glob.glob(os.path.join(path, filename + suffix + ))
if not paths:
warnings.warn(warning)
return None
if len(paths) > 1:
paths.sort(reverse=True)
warnings.warn(.format(os.path.basename(path)))
path = paths[0]
return path
chgcar_path = _get_filepath(, )
chgcar = Chgcar.from_file(chgcar_path)
aeccar0_path = _get_filepath(, )
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = _get_filepath(, )
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None
return cls(chgcar.structure, chgcar, chgcar_ref) | Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:return: |
8,618 | def close_event(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.close_event_with_http_info(id, **kwargs)
else:
(data) = self.close_event_with_http_info(id, **kwargs)
return data | Close a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.close_event(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerEvent
If the method is called asynchronously,
returns the request thread. |
8,619 | def get_binary_property(value, is_bytes=False):
obj = unidata.ascii_binary if is_bytes else unidata.unicode_binary
if value.startswith():
negated = value[1:]
value = + unidata.unicode_alias[].get(negated, negated)
else:
value = unidata.unicode_alias[].get(value, value)
return obj[value] | Get `BINARY` property. |
8,620 | def outgoing_args(self, nodeid):
_vars = self._vars
_hcons = self._hcons
args = self.args(nodeid)
for arg, val in list(args.items()):
if not (val in _hcons or IVARG_ROLE in refs or in refs):
del args[arg]
return args | Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}` |
8,621 | def console_get_char(con: tcod.console.Console, x: int, y: int) -> int:
return lib.TCOD_console_get_char(_console(con), x, y) | Return the character at the x,y of this console.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.ch`. |
8,622 | def gtd7(Input, flags, output):
mn3 = 5
zn3 = [32.5,20.0,15.0,10.0,0.0]
mn2 = 4
zn2 = [72.5,55.0,45.0,32.5]
zmix = 62.5
soutput = nrlmsise_output()
tselec(flags);
xlat=Input.g_lat;
if (flags.sw[2]==0):
xlat=45.0;
glatf(xlat, gsurf, re);
xmm = pdm[2][4];
if (Input.alt>zn2[0]):
altt=Input.alt;
else:
altt=zn2[0];
tmp=Input.alt;
Input.alt=altt;
gts7(Input, flags, soutput);
altt=Input.alt;
Input.alt=tmp;
if (flags.sw[0]):
dm28m= dm28*1.0E6;
else:
dm28m = dm28;
output.t[0]=soutput.t[0];
output.t[1]=soutput.t[1];
if (Input.alt>=zn2[0]):
for i in range(9):
output.d[i]=soutput.d[i];
return
meso_tgn2[0]=meso_tgn1[1];
meso_tn2[0]=meso_tn1[4];
meso_tn2[1]=pma[0][0]*pavgm[0]/(1.0-flags.sw[20]*glob7s(pma[0], Input, flags));
meso_tn2[2]=pma[1][0]*pavgm[1]/(1.0-flags.sw[20]*glob7s(pma[1], Input, flags));
meso_tn2[3]=pma[2][0]*pavgm[2]/(1.0-flags.sw[20]*flags.sw[22]*glob7s(pma[2], Input, flags));
meso_tgn2[1]=pavgm[8]*pma[9][0]*(1.0+flags.sw[20]*flags.sw[22]*glob7s(pma[9], Input, flags))*meso_tn2[3]*meso_tn2[3]/(pow((pma[2][0]*pavgm[2]),2.0));
meso_tn3[0]=meso_tn2[3];
if (Input.alt<zn3[0]):
meso_tgn3[0]=meso_tgn2[1];
meso_tn3[1]=pma[3][0]*pavgm[3]/(1.0-flags.sw[22]*glob7s(pma[3], Input, flags));
meso_tn3[2]=pma[4][0]*pavgm[4]/(1.0-flags.sw[22]*glob7s(pma[4], Input, flags));
meso_tn3[3]=pma[5][0]*pavgm[5]/(1.0-flags.sw[22]*glob7s(pma[5], Input, flags));
meso_tn3[4]=pma[6][0]*pavgm[6]/(1.0-flags.sw[22]*glob7s(pma[6], Input, flags));
meso_tgn3[1]=pma[7][0]*pavgm[7]*(1.0+flags.sw[22]*glob7s(pma[7], Input, flags)) *meso_tn3[4]*meso_tn3[4]/(pow((pma[6][0]*pavgm[6]),2.0));
dmc=0;
if (Input.alt>zmix):
dmc = 1.0 - (zn2[0]-Input.alt)/(zn2[0] - zmix);
dz28=soutput.d[2];
dmr=soutput.d[2] / dm28m - 1.0;
tz = [0.0]
output.d[2]=densm(Input.alt,dm28m,xmm, tz, mn3, zn3, meso_tn3, meso_tgn3, mn2, zn2, meso_tn2, meso_tgn2);
output.d[2]=output.d[2] * (1.0 + dmr*dmc);
dmr = soutput.d[0] / (dz28 * pdm[0][1]) - 1.0;
output.d[0] = output.d[2] * pdm[0][1] * (1.0 + dmr*dmc);
output.d[1] = 0;
output.d[8] = 0;
dmr = soutput.d[3] / (dz28 * pdm[3][1]) - 1.0;
output.d[3] = output.d[2] * pdm[3][1] * (1.0 + dmr*dmc);
dmr = soutput.d[4] / (dz28 * pdm[4][1]) - 1.0;
output.d[4] = output.d[2] * pdm[4][1] * (1.0 + dmr*dmc);
output.d[6] = 0;
output.d[7] = 0;
output.d[5] = 1.66E-24 * (4.0 * output.d[0] + 16.0 * output.d[1] + 28.0 * output.d[2] + 32.0 * output.d[3] + 40.0 * output.d[4] + output.d[6] + 14.0 * output.d[7]);
if (flags.sw[0]):
output.d[5]=output.d[5]/1000;
global dd
dd = densm(Input.alt, 1.0, 0, tz, mn3, zn3, meso_tn3, meso_tgn3, mn2, zn2, meso_tn2, meso_tgn2);
output.t[1]=tz[0];
return | The standard model subroutine (GTD7) always computes the
‘‘thermospheric’’ mass density by explicitly summing the masses of
the species in equilibrium at the thermospheric temperature T(z). |
8,623 | def calculate_sunrise_sunset(self, month, day, depression=0.833,
is_solar_time=False):
datetime = DateTime(month, day, hour=12, leap_year=self.is_leap_year)
return self.calculate_sunrise_sunset_from_datetime(datetime,
depression,
is_solar_time) | Calculate sunrise, noon and sunset.
Return:
A dictionary. Keys are ("sunrise", "noon", "sunset") |
8,624 | def parse_pattern(pattern):
if isinstance(pattern, NumberPattern):
return pattern
def _match_number(pattern):
rv = number_re.search(pattern)
if rv is None:
raise ValueError( % pattern)
return rv.groups()
pos_pattern = pattern
if in pattern:
pos_pattern, neg_pattern = pattern.split(, 1)
pos_prefix, number, pos_suffix = _match_number(pos_pattern)
neg_prefix, _, neg_suffix = _match_number(neg_pattern)
else:
pos_prefix, number, pos_suffix = _match_number(pos_pattern)
neg_prefix = + pos_prefix
neg_suffix = pos_suffix
if in number:
number, exp = number.split(, 1)
else:
exp = None
if in number:
if in number and in number:
raise ValueError(
)
if in number:
integer, fraction = number.rsplit(, 1)
else:
integer = number
fraction =
def parse_precision(p):
min = max = 0
for c in p:
if c in :
min += 1
max += 1
elif c == :
max += 1
elif c == :
continue
else:
break
return min, max
int_prec = parse_precision(integer)
frac_prec = parse_precision(fraction)
if exp:
exp_plus = exp.startswith()
exp = exp.lstrip()
exp_prec = parse_precision(exp)
else:
exp_plus = None
exp_prec = None
grouping = babel.numbers.parse_grouping(integer)
return NumberPattern(pattern, (pos_prefix, neg_prefix),
(pos_suffix, neg_suffix), grouping,
int_prec, frac_prec,
exp_prec, exp_plus) | Parse number format patterns |
8,625 | def enviar_dados_venda(self, dados_venda):
resp = self._http_post(,
dados_venda=dados_venda.documento())
conteudo = resp.json()
return RespostaEnviarDadosVenda.analisar(conteudo.get()) | Sobrepõe :meth:`~satcfe.base.FuncoesSAT.enviar_dados_venda`.
:return: Uma resposta SAT especializada em ``EnviarDadosVenda``.
:rtype: satcfe.resposta.enviardadosvenda.RespostaEnviarDadosVenda |
8,626 | def customCompute(self, recordNum, patternNZ, classification):
if not hasattr(self, "_computeFlag"):
self._computeFlag = False
if self._computeFlag:
warnings.simplefilter(, DeprecationWarning)
warnings.warn("The customCompute() method should not be "
"called at the same time as the compute() "
"method. The compute() method is called "
"whenever network.run() is called.",
DeprecationWarning)
return self._sdrClassifier.compute(recordNum,
patternNZ,
classification,
self.learningMode,
self.inferenceMode) | Just return the inference value from one input sample. The actual
learning happens in compute() -- if, and only if learning is enabled --
which is called when you run the network.
.. warning:: This method is deprecated and exists only to maintain backward
compatibility. This method is deprecated, and will be removed. Use
:meth:`nupic.engine.Network.run` instead, which will call
:meth:`~nupic.regions.sdr_classifier_region.compute`.
:param recordNum: (int) Record number of the input sample.
:param patternNZ: (list) of the active indices from the output below
:param classification: (dict) of the classification information:
* ``bucketIdx``: index of the encoder bucket
* ``actValue``: actual value going into the encoder
:returns: (dict) containing inference results, one entry for each step in
``self.steps``. The key is the number of steps, the value is an
array containing the relative likelihood for each ``bucketIdx``
starting from 0.
For example:
::
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]} |
8,627 | def clear(self):
self.reg = np.zeros((self.m,), dtype=np.int8) | Reset the current HyperLogLog to empty. |
8,628 | def enable_cloud_integration(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.enable_cloud_integration_with_http_info(id, **kwargs)
else:
(data) = self.enable_cloud_integration_with_http_info(id, **kwargs)
return data | Enable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread. |
8,629 | def get_paths(folder):
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*_phase.txt")
return sorted(files) | Return *_phase.txt files in `folder` |
8,630 | def get_cube(self, name):
return Cube(self.get_engine(), name, self.get_cube_model(name)) | Given a cube name, construct that cube and return it. Do not
overwrite this method unless you need to. |
8,631 | def load_if(s):
is_data_file = s.endswith() or s.endswith()
return load(s) if is_data_file else loads(s) | Load either a filename, or a string representation of yml/json. |
8,632 | def __download_from_s3(self, key, dest_dir):
log = logging.getLogger(self.cls_logger + )
filename = key.split()[-1]
if filename is None:
log.error(, key)
return None
destination = dest_dir + + filename
log.info(,
key, self.bucket_name, destination)
max_tries = 10
count = 1
while count <= max_tries:
log.info(, key,
count, max_tries)
try:
self.s3client.download_file(
Bucket=self.bucket_name, Key=key, Filename=destination)
except ClientError:
if count >= max_tries:
_, ex, trace = sys.exc_info()
msg = .format(
k=key, b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.warn()
count += 1
time.sleep(5)
continue
else:
log.info(,
key,
self.bucket_name,
destination)
return destination | Private method for downloading from S3
This private helper method takes a key and the full path to
the destination directory, assumes that the args have been
validated by the public caller methods, and attempts to
download the specified key to the dest_dir.
:param key: (str) S3 key for the file to be downloaded
:param dest_dir: (str) Full path destination directory
:return: (str) Downloaded file destination if the file was
downloaded successfully, None otherwise. |
8,633 | def plot_projected_dos(self, pdos_indices=None, legend=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position()
ax.yaxis.set_ticks_position()
ax.xaxis.set_tick_params(which=, direction=)
ax.yaxis.set_tick_params(which=, direction=)
self._pdos.plot(ax,
indices=pdos_indices,
legend=legend,
draw_grid=False)
ax.set_ylim((0, None))
return plt | Plot projected DOS
Parameters
----------
pdos_indices : list of list, optional
Sets of indices of atoms whose projected DOS are summed over.
The indices start with 0. An example is as follwos:
pdos_indices=[[0, 1], [2, 3, 4, 5]]
Default is None, which means
pdos_indices=[[i] for i in range(natom)]
legend : list of instances such as str or int, optional
The str(instance) are shown in legend.
It has to be len(pdos_indices)==len(legend). Default is None.
When None, legend is not shown. |
8,634 | def delete_terms_indexes(es, index_name: str = "terms_*"):
try:
es.indices.delete(index=index_name)
except Exception as e:
log.error(f"Could not delete all terms indices: {e}") | Delete all terms indexes |
8,635 | def header_little_endian(self):
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.HEADER_LITTLE_ENDIAN) | Return the header_little_endian attribute of the BFD file being
processed. |
8,636 | def drop(self):
if self.is_root:
raise ValueError("Cannot drop root node!")
parent = self.parent
for child in self.children:
child.parent = parent
parent.children.add(child)
self.children = set()
parent.sequence_ids.update(self.sequence_ids)
self.sequence_ids = set()
parent.remove_child(self) | Remove this node from the taxonomy, maintaining child subtrees by
adding them to the node's parent, and moving sequences at this node
to the parent.
Not valid for root node. |
8,637 | def returner(load):
serial = salt.payload.Serial(__opts__)
if load[] == :
load[] = prep_jid(nocache=load.get(, False))
jid_dir = salt.utils.jid.jid_dir(load[], _job_dir(), __opts__[])
if os.path.exists(os.path.join(jid_dir, )):
return
hn_dir = os.path.join(jid_dir, load[])
try:
os.makedirs(hn_dir)
except OSError as err:
if err.errno == errno.EEXIST:
log.error(
, load[]
)
return False
elif err.errno == errno.ENOENT:
log.error(
, load[]
)
return False
raise
serial.dump(
dict((key, load[key]) for key in [, , ] if key in load),
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, OUT_P),
)
) | Return data to the local job cache |
8,638 | def merge_dicts(dict_a, dict_b):
obj = {}
for key, value in iteritems(dict_a):
if key in dict_b:
if isinstance(dict_b[key], dict):
obj[key] = merge_dicts(value, dict_b.pop(key))
else:
obj[key] = value
for key, value in iteritems(dict_b):
obj[key] = value
return obj | Deep merge of two dicts |
8,639 | def calcSMAfromT(self, epsilon=0.7):
return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a | Calculates the semi-major axis based on planet temperature |
8,640 | def click_text(self, text, exact_match=False):
self._element_find_by_text(text,exact_match).click() | Click text identified by ``text``.
By default tries to click first text involves given ``text``, if you would
like to click exactly matching text, then set ``exact_match`` to `True`.
If there are multiple use of ``text`` and you do not want first one,
use `locator` with `Get Web Elements` instead. |
8,641 | def xor_key(first, second, trafaret):
trafaret = t.Trafaret._trafaret(trafaret)
def check_(value):
if (first in value) ^ (second in value):
key = first if first in value else second
yield first, t.catch_error(trafaret, value[key]), (key,)
elif first in value and second in value:
yield first, t.DataError(error=.format(second)), (first,)
yield second, t.DataError(error=.format(first)), (second,)
else:
yield first, t.DataError(error=.format()), (first,)
yield second, t.DataError(error=.format()), (second,)
return check_ | xor_key - takes `first` and `second` key names and `trafaret`.
Checks if we have only `first` or only `second` in data, not both,
and at least one.
Then checks key value against trafaret. |
8,642 | def create_inputs(inspecs):
ret = []
for i in inspecs:
v = nn.Variable(i.shape, need_grad=i.need_grad)
v.d = i.init(v.shape)
ret.append(v)
return ret | Create input :obj:`nnabla.Variable` from :obj:`Inspec`.
Args:
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
Returns:
:obj:`list` of :obj:`nnabla.Variable`: Input variables. |
8,643 | def and_join(strings):
last = len(strings) - 1
if last == 0:
return strings[0]
elif last < 0:
return
iterator = enumerate(strings)
return .join( + s if i == last else s for i, s in iterator) | Join the given ``strings`` by commas with last `' and '` conjuction.
>>> and_join(['Korea', 'Japan', 'China', 'Taiwan'])
'Korea, Japan, China, and Taiwan'
:param strings: a list of words to join
:type string: :class:`collections.abc.Sequence`
:returns: a joined string
:rtype: :class:`str`, :class:`basestring` |
8,644 | def add_missing_price_information_message(request, item):
messages.warning(
request,
_(
).format(
item=item,
em_start=,
em_end=,
link_start=.format(
support_link=get_configuration_value(, settings.ENTERPRISE_SUPPORT_URL),
),
platform_name=get_configuration_value(, settings.PLATFORM_NAME),
link_end=,
span_start=,
span_end=,
strong_start=,
strong_end=,
)
) | Add a message to the Django messages store indicating that we failed to retrieve price information about an item.
:param request: The current request.
:param item: The item for which price information is missing. Example: a program title, or a course. |
8,645 | def new_address(self, sender=None, nonce=None):
if sender is not None and nonce is None:
nonce = self.get_nonce(sender)
new_address = self.calculate_new_address(sender, nonce)
if sender is None and new_address in self:
return self.new_address(sender, nonce)
return new_address | Create a fresh 160bit address |
8,646 | def animate_correlation_matrix(sync_output_dynamic, animation_velocity = 75, colormap = , save_movie = None):
figure = plt.figure()
correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(0)
artist = plt.imshow(correlation_matrix, cmap = plt.get_cmap(colormap), interpolation=, vmin = 0.0, vmax = 1.0)
def init_frame():
return [ artist ]
def frame_generation(index_dynamic):
correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(index_dynamic)
artist.set_data(correlation_matrix)
return [ artist ]
correlation_animation = animation.FuncAnimation(figure, frame_generation, len(sync_output_dynamic), init_func = init_frame, interval = animation_velocity , repeat_delay = 1000, blit = True)
if (save_movie is not None):
correlation_animation.save(save_movie, writer = , fps = 15, bitrate = 1500)
else:
plt.show() | !
@brief Shows animation of correlation matrix between oscillators during simulation.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.
@param[in] animation_velocity (uint): Interval between frames in milliseconds.
@param[in] colormap (string): Name of colormap that is used by matplotlib ('gray', 'pink', 'cool', spring', etc.).
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter. |
8,647 | async def wasSet(self, node, oldv):
for func in self.onsets:
try:
await s_coro.ornot(func, node, oldv)
except asyncio.CancelledError:
raise
except Exception:
logger.exception( % (self.full,)) | Fire the onset() handlers for this property.
Args:
node (synapse.lib.node.Node): The node whose property was set.
oldv (obj): The previous value of the property. |
8,648 | def _make_cache_key(key_prefix):
if callable(key_prefix):
cache_key = key_prefix()
elif in key_prefix:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix
cache_key = cache_key.encode()
return cache_key | Make cache key from prefix
Borrowed from Flask-Cache extension |
8,649 | def fig_kernel_lfp_EITN_II(savefolders, params, transient=200, T=[800., 1000.], X=,
lags=[20, 20], channels=[0,3,7,11,13]):
zvec = np.r_[params.electrodeParams[]]
alphabet =
ana_params.set_PLOS_2column_fig_style(ratio=0.5)
fig = plt.figure()
fig.subplots_adjust(left=0.06, right=0.95, bottom=0.08, top=0.9, hspace=0.23, wspace=0.55)
gs = gridspec.GridSpec(len(channels), 7)
savefolder =
params.savefolder = os.path.join(os.path.split(params.savefolder)[0],
savefolder)
params.figures_path = os.path.join(params.savefolder, )
params.spike_output_path = os.path.join(params.savefolder,
)
params.networkSimParams[] = params.spike_output_path
f = h5py.File(os.path.join(, ))
srate = f[].value
tvec = np.arange(f[].shape[1]) * 1000. / srate
inds = (tvec < params.tstop) & (tvec >= transient)
data_sg_raw = f[].value.astype(float)
data_sg = data_sg_raw[:, inds]
f.close()
kwidth = 20
activationtimes = np.array([x*100 for x in range(3,11)] + [200])
networkSimSpikegen = CachedNetwork(**params.networkSimParams)
x, y = networkSimSpikegen.get_xy([transient, params.tstop])
for i, (savefolder, lag) in enumerate(zip(savefolders, lags)):
params.savefolder = os.path.join(os.path.split(params.savefolder)[0],
savefolder)
params.figures_path = os.path.join(params.savefolder, )
params.spike_output_path = os.path.join(params.savefolder,
)
params.networkSimParams[] = params.spike_output_path
networkSim = CachedNetwork(**params.networkSimParams)
f = h5py.File(os.path.join(params.savefolder, ))
data_raw = f[].value
srate = f[].value
tvec = np.arange(data_raw.shape[1]) * 1000. / srate
inds = (tvec < params.tstop) & (tvec >= transient)
data = data_raw[:, inds]
dataT = data.T - data.mean(axis=1)
data = dataT.T
f.close()
f = h5py.File(os.path.join(, ))
data_sg_raw = f[].value
f.close()
x, y = networkSim.get_xy([0,params.tstop])
bins = np.arange(0, params.tstop+2) + 0.5
x0_raw = np.histogram(x[X], bins=bins)[0]
x0 = x0_raw[inds].astype(float)
gsb = gridspec.GridSpec(len(channels), 8)
ax = fig.add_subplot(gsb[:, (i*4):(i*4+2)])
phlp.annotate_subplot(ax, ncols=8/2., nrows=4, letter=alphabet[i*3+2],
linear_offset=0.02)
kernels = np.zeros((len(params.N_X), 16, kwidth*2))
for j in range(len(params.X)):
kernels[j, :, kwidth:] = data_sg_raw[:, (j+2)*100:kwidth+(j+2)*100]/params.N_X[j]
LFP_reconst_raw = np.zeros(data_raw.shape)
for j, pop in enumerate(params.X):
x0_raw = np.histogram(x[pop], bins=bins)[0].astype(float)
for ch in range(kernels.shape[1]):
LFP_reconst_raw[ch] += np.convolve(x0_raw, kernels[j, ch],
)
LFP_reconst = LFP_reconst_raw[:, inds]
LFP_reconstT = LFP_reconst.T - LFP_reconst.mean(axis=1)
LFP_reconst = LFP_reconstT.T
vlimround = plot_signal_sum(ax, params,
fname=os.path.join(params.savefolder,
),
unit=, scalebar=True,
T=T, ylim=[-1550, 50],
color=, label=,
rasterized=False)
plot_signal_sum(ax, params, fname=LFP_reconst_raw,
unit=, scaling_factor= 1., scalebar=False,
vlimround=vlimround,
T=T, ylim=[-1550, 50],
color=, label=,
rasterized=False)
ax.set_title()
if i > 0:
ax.set_yticklabels([])
ax = fig.add_subplot(gsb[:, i*4+2:i*4+3])
phlp.remove_axis_junk(ax)
phlp.annotate_subplot(ax, ncols=8./1, nrows=4, letter=alphabet[i*3+3],
linear_offset=0.02)
cc = np.zeros(len(zvec))
for ch in np.arange(len(zvec)):
cc[ch] = np.corrcoef(data[ch], LFP_reconst[ch])[1, 0]
ax.barh(zvec, cc, height=90, align=, color=, linewidth=0.5)
ax.set_ylim([-1550, 50])
ax.set_yticklabels([])
ax.set_yticks(zvec)
ax.set_xlim([0.0, 1.])
ax.set_xticks([0.0, 0.5, 1])
ax.yaxis.tick_left()
ax.set_xlabel(, labelpad=0.1)
ax.set_title()
print
print cc
freqs, PSD_data = calc_signal_power(params, fname=data,
transient=transient, Df=None, mlab=True,
NFFT=256, noverlap=128,
window=plt.mlab.window_hanning)
freqs, PSD_LFP_reconst = calc_signal_power(params, fname=LFP_reconst,
transient=transient, Df=None, mlab=True,
NFFT=256, noverlap=128,
window=plt.mlab.window_hanning)
zv = np.r_[params.electrodeParams[]]
zv = np.r_[zv, zv[-1] + np.diff(zv)[-1]]
inds = freqs >= 1
for j, ch in enumerate(channels):
ax = fig.add_subplot(gsb[j, (i*4+3):(i*4+4)])
if j == 0:
phlp.annotate_subplot(ax, ncols=8./1, nrows=4.5*len(channels),
letter=alphabet[i*3+4], linear_offset=0.02)
ax.set_title()
phlp.remove_axis_junk(ax)
ax.loglog(freqs[inds], PSD_data[ch, inds], , label=, clip_on=True)
ax.loglog(freqs[inds], PSD_LFP_reconst[ch, inds], , label=, clip_on=True)
ax.set_xlim([4E0,4E2])
ax.set_ylim([1E-8, 1E-4])
ax.tick_params(axis=, which=, pad=0)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.text(0.8, 0.9, % (ch+1),
horizontalalignment=,
verticalalignment=,
fontsize=6,
transform=ax.transAxes)
if j == 0:
ax.set_ylabel(, labelpad=0.)
if j > 0:
ax.set_yticklabels([])
if j == len(channels)-1:
ax.set_xlabel(r, labelpad=0.)
else:
ax.set_xticklabels([])
return fig, PSD_LFP_reconst, PSD_data | This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels.
Arguments
::
transient : the time in milliseconds, after which the analysis should begin
so as to avoid any starting transients
X : id of presynaptic trigger population |
8,650 | def log_to_ganttplot(execution_history_items):
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
d = log_to_DataFrame(execution_history_items)
unique_states, idx = np.unique(d.path_by_name, return_index=True)
ordered_unique_states = np.array(d.path_by_name)[np.sort(idx)]
name2idx = {k: i for i, k in enumerate(ordered_unique_states)}
calldate = dates.date2num(d.timestamp_call.dt.to_pydatetime())
returndate = dates.date2num(d.timestamp_return.dt.to_pydatetime())
state2color = {: ,
: ,
: ,
: }
fig, ax = plt.subplots(1, 1)
ax.barh(bottom=[name2idx[k] for k in d.path_by_name], width=returndate-calldate,
left=calldate, align=, color=[state2color[s] for s in d.state_type], lw=0.0)
plt.yticks(list(range(len(ordered_unique_states))), ordered_unique_states) | Example how to use the DataFrame representation |
8,651 | def AddArg(self, arg):
self.args.append(arg)
if len(self.args) > self.number_of_args:
raise ParseError("Too many args for this expression.")
elif len(self.args) == self.number_of_args:
return True
return False | Adds a new arg to this expression.
Args:
arg: The argument to add (string).
Returns:
True if this arg is the last arg, False otherwise.
Raises:
ParseError: If there are too many args. |
8,652 | def get_area(self):
mesh = self.mesh
_, _, _, area = mesh.get_cell_dimensions()
return numpy.sum(area) | Compute area as the sum of the mesh cells area values. |
8,653 | def find_prefix(self, iri: Union[URIRef, Literal, str]) -> Union[None, str]:
iri = str(iri)
max_iri_len = 0
max_prefix = None
for prefix, uri in common_namespaces.items():
if uri in iri and max_iri_len < len(uri):
max_prefix = prefix
max_iri_len = len(uri)
return max_prefix | Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None |
8,654 | def replace(self, name, newname):
if not re.match("[a-zA-Z]\w*", name):
return None
if not re.match("[a-zA-Z]\w*", newname):
return None
def _replace(match):
return match.group(0).replace(match.group(), newname)
pattern = re.compile("(\W|^)(?P<name>" + name + ")(\W|$)")
cut = re.sub(pattern, _replace, str(self))
return Cut(cut) | Replace all occurrences of name with newname |
8,655 | def columns_used(self):
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression)))) | Returns all the columns used in this model for filtering
and in the model expression. |
8,656 | def register_on_serial_port_changed(self, callback):
event_type = library.VBoxEventType.on_serial_port_changed
return self.event_source.register_callback(callback, event_type) | Set the callback function to consume on serial port changed events.
Callback receives a ISerialPortChangedEvent object.
Returns the callback_id |
8,657 | def decode(s, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
return codec.decode(s) | Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...> |
8,658 | def all(
self,
count=500,
offset=0,
type=None,
inactive=None,
emailFilter=None,
tag=None,
messageID=None,
fromdate=None,
todate=None,
):
responses = self.call_many(
"GET",
"/bounces/",
count=count,
offset=offset,
type=type,
inactive=inactive,
emailFilter=emailFilter,
tag=tag,
messageID=messageID,
fromdate=fromdate,
todate=todate,
)
return self.expand_responses(responses, "Bounces") | Returns many bounces.
:param int count: Number of bounces to return per request.
:param int offset: Number of bounces to skip.
:param str type: Filter by type of bounce.
:param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce.
:param str emailFilter: Filter by email address.
:param str tag: Filter by tag.
:param str messageID: Filter by messageID.
:param date fromdate: Filter messages starting from the date specified (inclusive).
:param date todate: Filter messages up to the date specified (inclusive).
:return: A list of :py:class:`Bounce` instances.
:rtype: `list` |
8,659 | def _get_menu_width(self, max_width, complete_state):
return min(max_width, max(self.MIN_WIDTH, max(get_cwidth(c.display)
for c in complete_state.current_completions) + 2)) | Return the width of the main column. |
8,660 | def info_factory(name, libnames, headers, frameworks=None,
section=None, classname=None):
if not classname:
classname = % name
if not section:
section = name
if not frameworks:
framesworks = []
class _ret(system_info):
def __init__(self):
system_info.__init__(self)
def library_extensions(self):
return system_info.library_extensions(self)
def calc_info(self):
if libnames:
libs = self.get_libs(, )
if not libs:
libs = libnames
lib_dirs = self.get_lib_dirs()
tmp = None
for d in lib_dirs:
tmp = self.check_libs(d, libs)
if tmp is not None:
info = tmp
break
if tmp is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, headers)
if p:
inc_dir = os.path.dirname(p[0])
dict_append(info, include_dirs=[d])
break
if inc_dir is None:
log.info( % name)
return
self.set_info(**info)
else:
if frameworks:
fargs = []
for f in frameworks:
p = "/System/Library/Frameworks/%s.framework" % f
if os.path.exists(p):
fargs.append("-framework")
fargs.append(f)
if fargs:
self.set_info(extra_link_args=fargs)
return
_ret.__name__ = classname
_ret.section = section
return _ret | Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters |
8,661 | def add_hookcall_monitoring(self, before, after):
return _tracing._TracedHookExecution(self, before, after).undo | add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call. |
8,662 | def create_geoms(self, gdefs, plot):
new_gdefs = []
for gdef in gdefs:
gdef = gdef.create_geoms(plot)
if gdef:
new_gdefs.append(gdef)
return new_gdefs | Add geoms to the guide definitions |
8,663 | def unmarshal(self, values, bind_client=None):
if values is not None:
return [super(EntityCollection, self).unmarshal(v, bind_client=bind_client) for v in values] | Cast the list. |
8,664 | def openidf(fname, idd=None, epw=None):
import eppy.easyopen as easyopen
return easyopen.easyopen(fname, idd=idd, epw=epw) | automatically set idd and open idf file. Uses version from idf to set correct idd
It will work under the following circumstances:
- the IDF file should have the VERSION object.
- Needs the version of EnergyPlus installed that matches the IDF version.
- Energyplus should be installed in the default location.
Parameters
----------
fname : str, StringIO or IOBase
Filepath IDF file,
File handle of IDF file open to read
StringIO with IDF contents within
idd : str, StringIO or IOBase
This is an optional argument. easyopen will find the IDD without this arg
Filepath IDD file,
File handle of IDD file open to read
StringIO with IDD contents within
epw : str
path name to the weather file. This arg is needed to run EneryPlus from eppy. |
8,665 | def get_subparser(self, name):
if self._subparsers_action is None:
raise ValueError("%s has no subparsers defined!" % self)
return self._subparsers_action.choices[name] | Convenience method to get a certain subparser
Parameters
----------
name: str
The name of the subparser
Returns
-------
FuncArgParser
The subparsers corresponding to `name` |
8,666 | def map_value(self, value, gid):
base_gid = self.base_gid_pattern.search(gid).group(1)
if self.anonymyze:
try:
if value in self._maps[base_gid]:
return self._maps[base_gid][value]
else:
k = (len(self._maps[base_gid]) + 1) % self.mapmax
new_item = u.format(base_gid.upper(), k, self.mapexp)
self._maps[base_gid][value] = new_item
return new_item
except KeyError:
return value
elif base_gid in [, , , , ] and self.ip_lookup:
ip_match = self.ip_pattern.search(value)
if ip_match is None:
return value
host = self.gethost(ip_match.group(1))
if host == ip_match.group(1) or value.startswith(host):
return value
return u.join([
value[:ip_match.start(1)],
self.gethost(ip_match.group(1)),
value[ip_match.end(1):]])
elif (base_gid == or base_gid == ) and self.uid_lookup:
return self.getuname(value)
else:
return value | Return the value for a group id, applying requested mapping.
Map only groups related to a filter, ie when the basename of
the group is identical to the name of a filter. |
8,667 | def request(self, hash_, quickkey, doc_type, page=None,
output=None, size_id=None, metadata=None,
request_conversion_only=None):
if len(hash_) > 4:
hash_ = hash_[:4]
query = QueryParams({
: quickkey,
: doc_type,
: page,
: output,
: size_id,
: metadata,
: request_conversion_only
})
url = API_ENDPOINT + + hash_ + + urlencode(query)
response = self.http.get(url, stream=True)
if response.status_code == 204:
raise ConversionServerError("Unable to fulfill request. "
"The document will not be converted.",
response.status_code)
response.raise_for_status()
if response.headers[] == :
return response.json()
return response | Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content |
8,668 | def queryset(self, request, queryset):
if self.value() is None:
return queryset.all()
else:
return queryset.filter(subscriptions__status=self.value()).distinct() | Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter |
8,669 | def _map_update(
self,
prior_mean,
prior_cov,
global_cov_scaled,
new_observation):
common = np.linalg.inv(prior_cov + global_cov_scaled)
observation_mean = np.mean(new_observation, axis=1)
posterior_mean = prior_cov.dot(common.dot(observation_mean)) +\
global_cov_scaled.dot(common.dot(prior_mean))
posterior_cov =\
prior_cov.dot(common.dot(global_cov_scaled))
return posterior_mean, posterior_cov | Maximum A Posterior (MAP) update of a parameter
Parameters
----------
prior_mean : float or 1D array
Prior mean of parameters.
prior_cov : float or 1D array
Prior variance of scalar parameter, or
prior covariance of multivariate parameter
global_cov_scaled : float or 1D array
Global prior variance of scalar parameter, or
global prior covariance of multivariate parameter
new_observation : 1D or 2D array, with shape [n_dim, n_subj]
New observations on parameters.
Returns
-------
posterior_mean : float or 1D array
Posterior mean of parameters.
posterior_cov : float or 1D array
Posterior variance of scalar parameter, or
posterior covariance of multivariate parameter |
8,670 | def coord2healpix(coords, frame, nside, nest=True):
if coords.frame.name != frame:
c = coords.transform_to(frame)
else:
c = coords
if hasattr(c, ):
phi = c.ra.rad
theta = 0.5*np.pi - c.dec.rad
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
elif hasattr(c, ):
phi = c.l.rad
theta = 0.5*np.pi - c.b.rad
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
elif hasattr(c, ):
return hp.pixelfunc.vec2pix(nside, c.x.kpc, c.y.kpc, c.z.kpc, nest=nest)
elif hasattr(c, ):
return hp.pixelfunc.vec2pix(nside, c.w.kpc, c.u.kpc, c.v.kpc, nest=nest)
else:
raise dustexceptions.CoordFrameError(
.format(
frame)) | Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix
system is defined on the coordinate frame ``frame``.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates.
frame (:obj:`str`): The frame in which the HEALPix system is defined.
nside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2.
nest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering
is desired. ``False`` for ring ordering.
Returns:
An array of pixel indices (integers), with the same shape as the input
SkyCoord coordinates (:obj:`coords.shape`).
Raises:
:obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported. |
8,671 | def lookup(self, host_value):
try:
host_object = self._host_factory(host_value)
except InvalidHostError:
return None
result = self._get_match_and_classification(
host_object
)
host_item, classification = result
if host_item is not None:
return AddressListItem(
host_item.to_unicode(),
self,
classification
)
return None | Get a host value matching the given value.
:param host_value: a value of the host of a type that can be
listed by the service
:returns: an instance of AddressListItem representing
a matched value
:raises InvalidHostError: if the argument is not a valid
host string |
8,672 | def _hide_column(self, column):
__\
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name( + column)
self._rename(column, new_name) | Hides a column by prefixing the name with \'__\ |
8,673 | def create_question_dialog(self, text, second_text):
dialog = self.create_message_dialog(
text, buttons=Gtk.ButtonsType.YES_NO, icon=Gtk.MessageType.QUESTION
)
dialog.format_secondary_text(second_text)
response = dialog.run()
dialog.destroy()
return response | Function creates a question dialog with title text
and second_text |
8,674 | def recv_sub(self, id_, name, params):
self.api.sub(id_, name, *params) | DDP sub handler. |
8,675 | def vpc_peering_connection_present(name, requester_vpc_id=None, requester_vpc_name=None,
peer_vpc_id=None, peer_vpc_name=None, conn_name=None,
peer_owner_id=None, peer_region=None, region=None,
key=None, keyid=None, profile=None):
ret = {: name,
: True,
: ,
: {}
}
if __salt__[](conn_name=conn_name, region=region,
key=key, keyid=keyid, profile=profile):
if __salt__[](conn_name=conn_name,
vpc_id=requester_vpc_id,
vpc_name=requester_vpc_name,
region=region, key=key,
keyid=keyid, profile=profile):
ret[] = (
.format(conn_name, peer_owner_id
or peer_vpc_name or peer_vpc_id))
log.info(ret[])
return ret
return accept_vpc_peering_connection(name=name, conn_name=conn_name,
region=region, key=key, keyid=keyid,
profile=profile)
return request_vpc_peering_connection(name=name, requester_vpc_id=requester_vpc_id,
requester_vpc_name=requester_vpc_name,
peer_vpc_id=peer_vpc_id, peer_vpc_name=peer_vpc_name,
conn_name=conn_name, peer_owner_id=peer_owner_id,
peer_region=peer_region, region=region, key=key,
keyid=keyid, profile=profile) | name
Name of the state
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC tp crete VPC peering connection with. This can only
be a VPC in the same account, else resolving it into a vpc ID will fail.
Exclusive with peer_vpc_id.
conn_name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
ensure peering twixt local vpc and the other guys:
boto_vpc.vpc_peering_connection_present:
- requester_vpc_name: my_local_vpc
- peer_vpc_name: some_other_guys_vpc
- conn_name: peering_from_here_to_there
- peer_owner_id: 012345654321 |
8,676 | def list_required(self, type=None, service=None):
from burlap.common import (
required_system_packages,
required_python_packages,
required_ruby_packages,
)
service = (service or ).strip().upper()
type = (type or ).lower().strip()
assert not type or type in PACKAGE_TYPES, % (type,)
packages_set = set()
packages = []
version = self.os_version
for _service, satchel in self.all_other_enabled_satchels.items():
_service = _service.strip().upper()
if service and service != _service:
continue
_new = []
if not type or type == SYSTEM:
_new.extend(required_system_packages.get(
_service, {}).get((version.distro, version.release), []))
try:
_pkgs = satchel.packager_system_packages
if self.verbose:
print()
pprint(_pkgs, indent=4)
for _key in [(version.distro, version.release), version.distro]:
if self.verbose:
print(, _key)
if _key in _pkgs:
if self.verbose:
print( % satchel, _pkgs[_key])
_new.extend(_pkgs[_key])
break
except AttributeError:
pass
if not type or type == PYTHON:
_new.extend(required_python_packages.get(
_service, {}).get((version.distro, version.release), []))
try:
_pkgs = satchel.packager_python_packages
for _key in [(version.distro, version.release), version.distro]:
if _key in _pkgs:
_new.extend(_pkgs[_key])
except AttributeError:
pass
print(, _new)
if not type or type == RUBY:
_new.extend(required_ruby_packages.get(
_service, {}).get((version.distro, version.release), []))
for _ in _new:
if _ in packages_set:
continue
packages_set.add(_)
packages.append(_)
if self.verbose:
for package in sorted(packages):
print(, package)
return packages | Displays all packages required by the current role
based on the documented services provided. |
8,677 | async def on_raw_kick(self, message):
kicker, kickermeta = self._parse_user(message.source)
self._sync_user(kicker, kickermeta)
if len(message.params) > 2:
channels, targets, reason = message.params
else:
channels, targets = message.params
reason = None
channels = channels.split()
targets = targets.split()
for channel, target in itertools.product(channels, targets):
target, targetmeta = self._parse_user(target)
self._sync_user(target, targetmeta)
if self.is_same_nick(target, self.nickname):
self._destroy_channel(channel)
else:
if self.in_channel(channel):
self._destroy_user(target, channel)
await self.on_kick(channel, target, kicker, reason) | KICK command. |
8,678 | def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files):
data = load_tabular_file(f, index_col=True, header=True)
fs, _ = drop_bids_suffix(f)
save_name, save_dir, _ = self._save_namepaths_bids_derivatives(
fs, tag, , )
if in params.keys():
if params[] == :
fc_files = self.get_selected_files(
quiet=1, pipeline=, forfile=f)
if len(fc_files) == 1:
params[] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError()
if in params.keys():
if params[] == :
fc_files = self.get_selected_files(
quiet=1, pipeline=, forfile=f)
if len(fc_files) == 1:
params[] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError()
params[] =
params[] = save_dir +
params[] = save_name +
if not os.path.exists(params[]):
os.makedirs(params[])
if not in params:
params[] =
dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)
dfc_net = TemporalNetwork(from_array=dfc, nettype=)
dfc_net.network.to_csv(save_dir + save_name + , sep=)
sidecar = get_sidecar(f)
sidecar[] = params
if in sidecar[]:
sidecar[][] = True
sidecar[][] = fc_files
if in sidecar[]:
sidecar[][] = True
sidecar[][] = fc_files
sidecar[][] = f
sidecar[][] =
with open(save_dir + save_name + , ) as fs:
json.dump(sidecar, fs)
if confounds_exist:
analysis_step =
df = pd.read_csv(confound_files[i], sep=)
df = df.fillna(df.median())
ind = np.triu_indices(dfc.shape[0], k=1)
dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose())
if len(df) != len(dfc_df):
df = df.iloc[int(np.round((params[]-1)/2)): int(np.round((params[]-1)/2)+len(dfc_df))]
df.reset_index(inplace=True, drop=True)
dfc_df_z = (dfc_df - dfc_df.mean())
df_z = (df - df.mean())
R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(
df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0)
R_df_describe = R_df.describe()
desc_index = R_df_describe.index
confound_report_dir = params[] + \
+ save_name +
confound_report_figdir = confound_report_dir +
if not os.path.exists(confound_report_figdir):
os.makedirs(confound_report_figdir)
report =
report += + analysis_step +
for c in R_df.columns:
fig, ax = plt.subplots(1)
ax = sns.distplot(
R_df[c], hist=False, color=, ax=ax, kde_kws={"shade": True})
fig.savefig(confound_report_figdir + c + )
plt.close(fig)
report += + c +
for ind_name, r in enumerate(R_df_describe[c]):
report += str(desc_index[ind_name]) +
report += str(r) +
report +=
report += + \
os.path.abspath(confound_report_figdir) + \
+ c +
report +=
with open(confound_report_dir + save_name + , ) as file:
file.write(report) | Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing. |
8,679 | def from_settings(cls, settings):
if not in settings or not in settings or \
settings[] == or settings[] == :
raise Exception(
"Erroneous mongodb settings, "
"needs a collection and mongodb setting",
settings)
cx_uri = urlparse.urlsplit(settings["mongodb"])
db_name = cx_uri.path
if in db_name:
db_name, query = db_name.split(, 1)
db_name = db_name[1:]
if db_name == "":
raise Exception(
"Erroneous mongodb settings, "
"missing db_name", settings)
cx_uri = urlparse.urlunsplit(
(cx_uri.scheme, cx_uri.netloc, "/", cx_uri.query, cx_uri.fragment))
options = copy.deepcopy(settings)
del options[]
del options[]
return Mongodb(
cls.connection_for_uri(cx_uri),
db_name, settings[], options) | Read Mongodb Source configuration from the provided settings |
8,680 | def assign(var, new_val, assign_fn=assign_slice):
if isinstance(var, Tensor):
var = var.operation
if not isinstance(var, Variable):
raise ValueError("var must be a mtf.Variable or its output Tensor.")
return Assign([var], [new_val], assign_fn=assign_fn) | Assign a new value to a variable.
Args:
var: either a Variable operation or its output Tensor.
new_val: a Tensor
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
Returns:
an Operation
Raises:
ValueError: if var is not a Variable and var.operation is not a Variable |
8,681 | def date_between(self, start_date=, end_date=):
start_date = self._parse_date(start_date)
end_date = self._parse_date(end_date)
return self.date_between_dates(date_start=start_date, date_end=end_date) | Get a Date object based on a random date between two given dates.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to 30 years ago
:param end_date Defaults to "today"
:example Date('1999-02-02')
:return Date |
8,682 | def heating_degree_days(T, T_base=F2K(65), truncate=True):
r
dd = T - T_base
if truncate and dd < 0.0:
dd = 0.0
return dd | r'''Calculates the heating degree days for a period of time.
.. math::
\text{heating degree days} = max(T - T_{base}, 0)
Parameters
----------
T : float
Measured temperature; sometimes an average over a length of time is used,
other times the average of the lowest and highest temperature in a
period are used, [K]
T_base : float, optional
Reference temperature for the degree day calculation, defaults
to 65 °F (18.33 °C, 291.483 K), the value most used in the US, [K]
truncate : bool
If truncate is True, no negative values will be returned; if negative,
the value is truncated to 0, [-]
Returns
-------
heating_degree_days : float
Degree above the base temperature multiplied by the length of time of
the measurement, normally days [day*K]
Notes
-----
Some common base temperatures are 18 °C (Canada), 15.5 °C (EU),
17 °C (Denmark, Finland), 12 °C Switzerland. The base temperature
should always be presented with the results.
The time unit does not have to be days; it can be any time unit, and the
calculation behaves the same.
Examples
--------
>>> heating_degree_days(303.8)
12.31666666666672
>>> heating_degree_days(273)
0.0
>>> heating_degree_days(322, T_base=300)
22
References
----------
.. [1] "Heating Degree Day." Wikipedia, January 24, 2018.
https://en.wikipedia.org/w/index.php?title=Heating_degree_day&oldid=822187764. |
8,683 | def setHeight(self, vehID, height):
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_HEIGHT, vehID, height) | setHeight(string, double) -> None
Sets the height in m for this vehicle. |
8,684 | def get_apex(self, lat, height=None):
lat = helpers.checklat(lat, name=)
if height is None:
height = self.refh
cos_lat_squared = np.cos(np.radians(lat))**2
apex_height = (self.RE + height) / cos_lat_squared - self.RE
return apex_height | Calculate apex height
Parameters
-----------
lat : (float)
Latitude in degrees
height : (float or NoneType)
Height above the surface of the earth in km or NoneType to use
reference height (default=None)
Returns
----------
apex_height : (float)
Height of the field line apex in km |
8,685 | def navigate(self):
tic = datetime.now()
lons40km = self._data["pos"][:, :, 1] * 1e-4
lats40km = self._data["pos"][:, :, 0] * 1e-4
try:
from geotiepoints import SatelliteInterpolator
except ImportError:
logger.warning("Could not interpolate lon/lats, "
"python-geotiepoints missing.")
self.lons, self.lats = lons40km, lats40km
else:
cols40km = np.arange(24, 2048, 40)
cols1km = np.arange(2048)
lines = lons40km.shape[0]
rows40km = np.arange(lines)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = SatelliteInterpolator(
(lons40km, lats40km), (rows40km, cols40km), (rows1km, cols1km),
along_track_order, cross_track_order)
self.lons, self.lats = satint.interpolate()
logger.debug("Navigation time %s", str(datetime.now() - tic)) | Return the longitudes and latitudes of the scene. |
8,686 | def app_token(vault_client, app_id, user_id):
resp = vault_client.auth_app_id(app_id, user_id)
if in resp and in resp[]:
return resp[][]
else:
raise aomi.exceptions.AomiCredentials() | Returns a vault token based on the app and user id. |
8,687 | def delete(self, hdfs_path, recursive=False):
return self.client.delete(hdfs_path, recursive=recursive) | Delete a file located at `hdfs_path`. |
8,688 | def linearBlend(img1, img2, overlap, backgroundColor=None):
(sizex, sizey) = img1.shape[:2]
overlapping = True
if overlap < 0:
overlapping = False
overlap = -overlap
alpha = np.tile(np.expand_dims(np.linspace(1, 0, overlap), 1), sizey)
if len(img2.shape) == 3:
alpha = np.dstack(([alpha for _ in range(img2.shape[2])]))
if overlapping:
img1_cut = img1[sizex - overlap:sizex, :]
img2_cut = img2[0:overlap, :]
else:
img1_cut = np.tile(img1[-min(sizex, 5):, :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
img2_cut = np.tile(img2[:min(img2.shape[0], 5), :].mean(
axis=0), (overlap, 1)).reshape(alpha.shape)
inter = (img1_cut * alpha + img2_cut * (1 - alpha)).astype(img1.dtype)
if backgroundColor is not None:
mask = np.logical_and(img1_cut == backgroundColor,
img2_cut != backgroundColor)
inter[mask] = img2_cut[mask]
mask = np.logical_and(img2_cut == backgroundColor,
img1_cut != backgroundColor)
inter[mask] = img1_cut[mask]
if not overlapping:
overlap = 0
return np.vstack((img1[0:sizex - overlap, :],
inter,
img2[overlap:, :])) | Stitch 2 images vertically together.
Smooth the overlap area of both images with a linear fade from img1 to img2
@param img1: numpy.2dArray
@param img2: numpy.2dArray of the same shape[1,2] as img1
@param overlap: number of pixels both images overlap
@returns: stitched-image |
8,689 | def set_level(logger=None, log_level=None):
log_level = logging.getLevelName(os.getenv(, ))
logging.getLogger(logger).setLevel(log_level) | Set logging levels using logger names.
:param logger: Name of the logger
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:rtype: None |
8,690 | def get_dated_items(self):
self.date_list, self.object_list, extra_context = super(
EntryWeek, self).get_dated_items()
self.date_list = self.get_date_list(self.object_list, )
extra_context[] = extra_context[
] + datetime.timedelta(days=6)
return self.date_list, self.object_list, extra_context | Override get_dated_items to add a useful 'week_end_day'
variable in the extra context of the view. |
8,691 | def check_text(self, text):
if to_text_string(text) == u:
self.button_ok.setEnabled(False)
else:
self.button_ok.setEnabled(True) | Disable empty layout name possibility |
8,692 | def find_fields(self, classname=".*", fieldname=".*", fieldtype=".*", accessflags=".*"):
for cname, c in self.classes.items():
if re.match(classname, cname):
for f in c.get_fields():
z = f.get_field()
if re.match(fieldname, z.get_name()) and \
re.match(fieldtype, z.get_descriptor()) and \
re.match(accessflags, z.get_access_flags_string()):
yield f | find fields by regex
:param classname: regular expression of the classname
:param fieldname: regular expression of the fieldname
:param fieldtype: regular expression of the fieldtype
:param accessflags: regular expression of the access flags
:rtype: generator of `FieldClassAnalysis` |
8,693 | def distribution_to_markdown(distribution):
text_template =
if "field" in distribution:
fields = "- " + \
"\n- ".join(map(field_to_markdown, distribution["field"]))
else:
fields = ""
text = text_template.format(
title=distribution["title"],
description=distribution.get("description", ""),
fields=fields
)
return text | Genera texto en markdown a partir de los metadatos de una
`distribution`.
Args:
distribution (dict): Diccionario con metadatos de una
`distribution`.
Returns:
str: Texto que describe una `distribution`. |
8,694 | def from_rational(
cls,
value,
to_base,
precision=None,
method=RoundingMethods.ROUND_DOWN
):
if to_base < 2:
raise BasesValueError(to_base, "to_base", "must be at least 2")
if precision is not None and precision < 0:
raise BasesValueError(precision, "precision", "must be at least 0")
if value == 0:
non_repeating_part = [] if precision is None else precision * [0]
return (Radix(0, [], non_repeating_part, [], to_base), 0)
if value < 0:
sign = -1
else:
sign = 1
div_method = method
if sign == -1:
value = abs(value)
div_method = cls._reverse_rounding_method(method)
numerator = Nats.convert_from_int(value.numerator, to_base)
denominator = Nats.convert_from_int(value.denominator, to_base)
(integer_part, non_repeating_part, repeating_part, relation) = \
NatDivision.division(
denominator,
numerator,
to_base,
precision,
div_method
)
relation = relation * sign
result = Radix(
sign,
integer_part,
non_repeating_part,
repeating_part,
to_base
)
if precision is not None:
(result, rel) = result.rounded(precision, method)
relation = relation if rel == 0 else rel
return (result, relation) | Convert rational value to a base.
:param Rational value: the value to convert
:param int to_base: base of result, must be at least 2
:param precision: number of digits in total or None
:type precision: int or NoneType
:param method: rounding method
:type method: element of RoundingMethods.METHODS()
:returns: the conversion result and its relation to actual result
:rtype: Radix * int
:raises BasesValueError: if to_base is less than 2
Complexity: Uncalculated. |
8,695 | def url(**attributes):
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError(" is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute ".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute : {1}".format(
name, err
)
)
return True
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url | Parses an URL and validates its attributes. |
8,696 | def deleteQueue(destinationRoot, queueArk, debug=False):
url = urlparse.urljoin(destinationRoot, "APP/queue/" + queueArk + "/")
response, content = doWaitWebRequest(url, "DELETE")
if response.getcode() != 200:
raise Exception(
"Error updating queue %s to url %s. Response code is %s\n%s" %
(queueArk, url, response.getcode(), content)
) | Delete an entry from the queue |
8,697 | def get_variants(self, arch=None, types=None, recursive=False):
types = types or []
result = []
if "self" in types:
result.append(self)
for variant in six.itervalues(self.variants):
if types and variant.type not in types:
continue
if arch and arch not in variant.arches.union(["src"]):
continue
result.append(variant)
if recursive:
result.extend(variant.get_variants(types=[i for i in types if i != "self"], recursive=True))
result.sort(key=lambda x: x.uid)
return result | Return all variants of given arch and types.
Supported variant types:
self - include the top-level ("self") variant as well
addon
variant
optional |
8,698 | def times_update(self, factor):
if factor < 0:
raise ValueError("The factor must not be negative.")
elif factor == 0:
self.clear()
else:
_elements = self._elements
for element in _elements:
_elements[element] *= factor
self._total *= factor | Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a', 'c', 'c', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`times`.
Args:
factor: The factor to multiply each multiplicity with. |
8,699 | def get_session_id(self):
max_session =
try:
with open(self.log_folder + os.sep + , ) as f:
for _ in f:
txt = f.readline()
if txt.strip() != :
max_session = txt
except Exception:
max_session =
this_session = str(int(max_session) + random.randint(9,100)).zfill(9)
with open(self.log_folder + os.sep + , ) as f2:
f2.write(this_session + )
return this_session | get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.