Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
376,500 | def validate_edge_direction(edge_direction):
if not isinstance(edge_direction, six.string_types):
raise TypeError(u.format(
type(edge_direction), edge_direction))
if edge_direction not in ALLOWED_EDGE_DIRECTIONS:
raise ValueError(u.format(edge_direction)) | Ensure the provided edge direction is either "in" or "out". |
376,501 | def django_include(context, template_name, **kwargs):
try:
djengine = engines[]
except KeyError as e:
raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e
djtemplate = djengine.get_template(template_name)
djcontext = {}
djcontext.update(context)
djcontext.update(kwargs)
return djtemplate.render(djcontext, context[]) | Mako tag to include a Django template withing the current DMP (Mako) template.
Since this is a Django template, it is search for using the Django search
algorithm (instead of the DMP app-based concept).
See https://docs.djangoproject.com/en/2.1/topics/templates/.
The current context is sent to the included template, which makes all context
variables available to the Django template. Any additional kwargs are added
to the context. |
376,502 | def sync_main(async_main, config_path=None, default_config=None,
should_validate_task=True, loop_function=asyncio.get_event_loop):
context = _init_context(config_path, default_config)
_init_logging(context)
if should_validate_task:
validate_task_schema(context)
loop = loop_function()
loop.run_until_complete(_handle_asyncio_loop(async_main, context)) | Entry point for scripts using scriptworker.
This function sets up the basic needs for a script to run. More specifically:
* it creates the scriptworker context and initializes it with the provided config
* the path to the config file is either taken from `config_path` or from `sys.argv[1]`.
* it verifies `sys.argv` doesn't have more arguments than the config path.
* it creates the asyncio event loop so that `async_main` can run
Args:
async_main (function): The function to call once everything is set up
config_path (str, optional): The path to the file to load the config from.
Loads from ``sys.argv[1]`` if ``None``. Defaults to None.
default_config (dict, optional): the default config to use for ``_init_context``.
defaults to None.
should_validate_task (bool, optional): whether we should validate the task
schema. Defaults to True.
loop_function (function, optional): the function to call to get the
event loop; here for testing purposes. Defaults to
``asyncio.get_event_loop``. |
376,503 | def submit_url(self, url, params={}, _extra_params={}):
self._check_user_parameters(params)
params = copy.copy(params)
params[] = url
return self._submit(params, _extra_params=_extra_params) | Submit a website for analysis. |
376,504 | def proxy_global(name, no_expand_macro=False, fname=, args=()):
if no_expand_macro:
@property
def gSomething_no_func(self):
glob = self(getattr(ROOT, name))
def func():
return glob
glob.func = func
return glob
return gSomething_no_func
@property
def gSomething(self):
obj_func = getattr(getattr(ROOT, name), fname)
try:
obj = obj_func(*args)
except ReferenceError:
return None
return self(obj)
return gSomething | Used to automatically asrootpy ROOT's thread local variables |
376,505 | def get_threads_where_participant_is_active(self, participant_id):
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related()
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct() | Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. |
376,506 | def decode_intervals(self, encoded, duration=None, multi=True, sparse=False,
transition=None, p_state=None, p_init=None):
if np.isrealobj(encoded):
if multi:
if transition is None:
encoded = encoded >= 0.5
else:
encoded = viterbi_binary(encoded.T, transition,
p_init=p_init, p_state=p_state).T
elif sparse and encoded.shape[1] > 1:
if transition is None:
encoded = np.argmax(encoded, axis=1)[:, np.newaxis]
else:
encoded = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)[:, np.newaxis]
elif not sparse:
if transition is None:
encoded = (encoded == np.max(encoded, axis=1, keepdims=True))
else:
encoded_ = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)
encoded = np.zeros(encoded.shape, dtype=bool)
encoded[np.arange(len(encoded_)), encoded_] = True
if duration is None:
duration = 1 + encoded.shape[0]
else:
duration = 1 + time_to_frames(duration,
sr=self.sr,
hop_length=self.hop_length)
times = times_like(duration + 1,
sr=self.sr, hop_length=self.hop_length)
if sparse:
idx = np.where(encoded[1:] != encoded[:-1])[0]
else:
idx = np.where(np.max(encoded[1:] != encoded[:-1], axis=-1))[0]
idx = np.unique(np.append(idx, encoded.shape[0]))
delta = np.diff(np.append(-1, idx))
position = np.cumsum(np.append(0, delta))
return [(times[p], times[p + d], encoded[p])
for (p, d) in zip(position, delta)] | Decode labeled intervals into (start, end, value) triples
Parameters
----------
encoded : np.ndarray, shape=(n_frames, m)
Frame-level annotation encodings as produced by
``encode_intervals``
duration : None or float > 0
The max duration of the annotation (in seconds)
Must be greater than the length of encoded array.
multi : bool
If true, allow multiple labels per input frame.
If false, take the most likely label per input frame.
sparse : bool
If true, values are returned as indices, not one-hot.
If false, values are returned as one-hot encodings.
Only applies when `multi=False`.
transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)]
Optional transition matrix for each interval, used for Viterbi
decoding. If `multi=True`, then transition should be `(2, 2)` or
`(m, 2, 2)`-shaped. If `multi=False`, then transition should be
`(m, m)`-shaped.
p_state : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
p_init : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
Returns
-------
[(start, end, value)] : iterable of tuples
where `start` and `end` are the interval boundaries (in seconds)
and `value` is an np.ndarray, shape=(m,) of the encoded value
for this interval. |
376,507 | def pre_execute(self, execution, context):
path = self._fspath
if path:
path = path.format(
benchmark=context.benchmark,
api=execution[],
**execution.get(, {})
)
if self.clean_path:
shutil.rmtree(path, ignore_errors=True)
if execution[][] == :
path = osp.dirname(path)
if not osp.exists(path):
os.makedirs(path) | Make sure the named directory is created if possible |
376,508 | def standardize(self, x):
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= np.std(x, keepdims=True) + 1e-7
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn(
t been fit on any training data. Fit it first by calling `.fit(numpy_data)`.This ImageDataGenerator specifies `featurewise_std_normalization`, but it hasn\
)
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn(
t been fit on any training data. Fit it first by calling `.fit(numpy_data)`.')
return x | Apply the normalization configuration to a batch of inputs.
# Arguments
x: batch of inputs to be normalized.
# Returns
The inputs, normalized. |
376,509 | def series_index(self, series):
for idx, s in enumerate(self):
if series is s:
return idx
raise ValueError() | Return the integer index of *series* in this sequence. |
376,510 | def min(self):
results = [x.ufuncs.min() for x in self.elem]
return np.min(results) | Return the minimum of ``self``.
See Also
--------
numpy.amin
max |
376,511 | def loads(astring):
try:
return marshal.loads(zlib.decompress(astring))
except zlib.error as e:
raise SerializerError(
.format(str(e))
)
except Exception as e:
raise SerializerError(
.format(str(e))
) | Decompress and deserialize string into Python object via marshal. |
376,512 | def _update_tree_store(self):
self.list_store.clear()
if self.view_dict[] and isinstance(self.model, ContainerStateModel) and \
len(self.model.state.transitions) > 0:
for transition_id in self.combo[].keys():
t = self.model.state.transitions[transition_id]
if t.from_state is not None:
from_state = self.model.state.states[t.from_state]
from_state_label = from_state.name
from_outcome_label = from_state.outcomes[t.from_outcome].name
else:
from_state_label = "self (" + self.model.state.name + ")"
from_outcome_label = ""
if t.to_state is None:
to_state_label = "self (" + self.model.state.name + ")"
to_outcome = None if t.to_outcome is None else self.model.state.outcomes[t.to_outcome]
to_outcome_label = "None" if to_outcome is None else to_outcome.name
else:
if t.to_state == self.model.state.state_id:
to_state_label = "self (" + self.model.state.name + ")"
to_outcome_label = self.model.state.outcomes[t.to_outcome].name
else:
to_state_label = self.model.state.states[t.to_state].name
to_outcome_label = None
self.list_store.append([transition_id,
from_state_label,
from_outcome_label,
to_state_label,
to_outcome_label,
False,
t,
self.model.state,
True,
self.model.get_transition_m(transition_id)])
if self.view_dict[] and self.model.parent and \
len(self.model.parent.state.transitions) > 0:
for transition_id in self.combo[].keys():
try:
t = self.model.parent.state.transitions[transition_id]
from_state = None
if t.from_state is not None:
from_state = self.model.parent.states[t.from_state].state
if from_state is None:
from_state_label = "parent (" + self.model.state.parent.name + ")"
from_outcome_label = ""
elif from_state.state_id == self.model.state.state_id:
from_state_label = "self (" + from_state.name + ")"
from_outcome_label = from_state.outcomes[t.from_outcome].name
else:
from_state_label = from_state.name
from_outcome_label = from_state.outcomes[t.from_outcome].name
if t.to_state == self.model.parent.state.state_id:
to_state_label = + self.model.parent.state.name + ")"
to_outcome_label = self.model.parent.state.outcomes[t.to_outcome].name
else:
if t.to_state == self.model.state.state_id:
to_state_label = "self (" + self.model.state.name + ")"
else:
to_state_label = self.model.parent.state.states[t.to_state].name
to_outcome_label = None
self.list_store.append([transition_id,
from_state_label,
from_outcome_label,
to_state_label,
to_outcome_label,
True,
t,
self.model.state,
True,
self.model.parent.get_transition_m(transition_id)])
except Exception as e:
logger.warning("There was a problem while updating the data-flow widget TreeStore. {0}".format(e)) | Updates TreeStore of the Gtk.ListView according internal combo knowledge gained by
_update_internal_data_base function call. |
376,513 | def show_progress(self):
from pyemma import config
if not hasattr(self, "_show_progress"):
val = config.show_progress_bars
self._show_progress = val
elif not config.show_progress_bars:
return False
return self._show_progress | whether to show the progress of heavy calculations on this object. |
376,514 | def _Rforce(self,R,phi=0.,t=0.):
return self._A*math.exp(-(t-self._to)**2./2./self._sigma2)\
/R*math.sin(self._alpha*math.log(R)
-self._m*(phi-self._omegas*t-self._gamma)) | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-11-24 - Written - Bovy (NYU) |
376,515 | def complete_server(self, text, line, begidx, endidx):
return [i for i in PsiturkShell.server_commands if i.startswith(text)] | Tab-complete server command |
376,516 | def query_pager_by_slug(slug, current_page_num=1, tag=, order=False):
cat_rec = MCategory.get_by_slug(slug)
if cat_rec:
cat_id = cat_rec.uid
else:
return None
if cat_id.endswith():
cat_con = TabPost2Tag.par_id == cat_id
else:
cat_con = TabPost2Tag.tag_id == cat_id
if tag:
condition = {
: [tag]
}
recs = TabPost.select().join(
TabPost2Tag,
on=((TabPost.uid == TabPost2Tag.post_id) & (TabPost.valid == 1))
).where(
cat_con & TabPost.extinfo.contains(condition)
).order_by(
TabPost.time_update.desc()
).paginate(current_page_num, CMS_CFG[])
elif order:
recs = TabPost.select().join(
TabPost2Tag,
on=((TabPost.uid == TabPost2Tag.post_id) & (TabPost.valid == 1))
).where(
cat_con
).order_by(
TabPost.order.asc()
)
else:
recs = TabPost.select().join(
TabPost2Tag,
on=((TabPost.uid == TabPost2Tag.post_id) & (TabPost.valid == 1))
).where(
cat_con
).order_by(
TabPost.time_update.desc()
).paginate(current_page_num, CMS_CFG[])
return recs | Query pager via category slug. |
376,517 | def Beach(fm, linewidth=2, facecolor=, bgcolor=, edgecolor=,
alpha=1.0, xy=(0, 0), width=200, size=100, nofill=False,
zorder=100, axes=None):
try:
assert(len(width) == 2)
except TypeError:
width = (width, width)
mt = None
np1 = None
if isinstance(fm, MomentTensor):
mt = fm
np1 = MT2Plane(mt)
elif isinstance(fm, NodalPlane):
np1 = fm
elif len(fm) == 6:
mt = MomentTensor(fm[0], fm[1], fm[2], fm[3], fm[4], fm[5], 0)
np1 = MT2Plane(mt)
elif len(fm) == 3:
np1 = NodalPlane(fm[0], fm[1], fm[2])
else:
raise TypeError("Wrong input value for .")
if size < 100:
size = 100
if mt:
(T, N, P) = MT2Axes(mt)
if np.fabs(N.val) < EPSILON and np.fabs(T.val + P.val) < EPSILON:
colors, p = plotDC(np1, size, xy=xy, width=width)
else:
colors, p = plotMT(T, N, P, size,
plot_zerotrace=True, xy=xy, width=width)
else:
colors, p = plotDC(np1, size=size, xy=xy, width=width)
if nofill:
col = collections.PatchCollection([p[1]], match_original=False)
col.set_facecolor()
else:
col = collections.PatchCollection(p, match_original=False)
fc = [facecolor if c == else bgcolor for c in colors]
col.set_facecolors(fc)
if axes is not None:
col.set_transform(transforms.IdentityTransform())
for p in col._paths:
p.vertices -= xy
col.set_offsets(xy)
col._transOffset = axes.transData
col.set_edgecolor(edgecolor)
col.set_alpha(alpha)
col.set_linewidth(linewidth)
col.set_zorder(zorder)
return col | Return a beach ball as a collection which can be connected to an
current matplotlib axes instance (ax.add_collection).
S1, D1, and R1, the strike, dip and rake of one of the focal planes, can
be vectors of multiple focal mechanisms.
:param fm: Focal mechanism that is either number of mechanisms (NM) by 3
(strike, dip, and rake) or NM x 6 (M11, M22, M33, M12, M13, M23 - the
six independent components of the moment tensor, where the coordinate
system is 1,2,3 = Up,South,East which equals r,theta,phi). The strike
is of the first plane, clockwise relative to north.
The dip is of the first plane, defined clockwise and perpendicular to
strike, relative to horizontal such that 0 is horizontal and 90 is
vertical. The rake is of the first focal plane solution. 90 moves the
hanging wall up-dip (thrust), 0 moves it in the strike direction
(left-lateral), -90 moves it down-dip (normal), and 180 moves it
opposite to strike (right-lateral).
:param facecolor: Color to use for quadrants of tension; can be a string,
e.g. ``'r'``, ``'b'`` or three component color vector, [R G B].
Defaults to ``'b'`` (blue).
:param bgcolor: The background color. Defaults to ``'w'`` (white).
:param edgecolor: Color of the edges. Defaults to ``'k'`` (black).
:param alpha: The alpha level of the beach ball. Defaults to ``1.0``
(opaque).
:param xy: Origin position of the beach ball as tuple. Defaults to
``(0, 0)``.
:type width: int or tuple
:param width: Symbol size of beach ball, or tuple for elliptically
shaped patches. Defaults to size ``200``.
:param size: Controls the number of interpolation points for the
curves. Minimum is automatically set to ``100``.
:param nofill: Do not fill the beach ball, but only plot the planes.
:param zorder: Set zorder. Artists with lower zorder values are drawn
first.
:type axes: :class:`matplotlib.axes.Axes`
:param axes: Used to make beach balls circular on non-scaled axes. Also
maintains the aspect ratio when resizing the figure. Will not add
the returned collection to the axes instance. |
376,518 | def setColor( self, color ):
self.setBorderColor(color)
clr = QColor(color)
clr.setAlpha(150)
self.setHighlightColor(clr)
clr = QColor(color)
clr.setAlpha(80)
self.setFillColor(clr) | Convenience method to set the border, fill and highlight colors based
on the inputed color.
:param color | <QColor> |
376,519 | def _get_previous_open_tag(self, obj):
prev_instance = self.get_previous_instance(obj)
if prev_instance and prev_instance.plugin_type == self.__class__.__name__:
return prev_instance.glossary.get() | Return the open tag of the previous sibling |
376,520 | def derive_single_object_url_pattern(slug_url_kwarg, path, action):
if slug_url_kwarg:
return r % (path, action, slug_url_kwarg)
else:
return r % (path, action) | Utility function called by class methods for single object views |
376,521 | def to_dict(self):
return {
: self.asset,
: self.amount,
: self.cost_basis,
: self.last_sale_price
} | Creates a dictionary representing the state of this position.
Returns a dict object of the form: |
376,522 | def _sort_lows_and_highs(func):
"Decorator for extract_cycles"
@functools.wraps(func)
def wrapper(*args, **kwargs):
for low, high, mult in func(*args, **kwargs):
if low < high:
yield low, high, mult
else:
yield high, low, mult
return wrapper | Decorator for extract_cycles |
376,523 | def _set_static_ag_ipv6_config(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=static_ag_ipv6_config.static_ag_ipv6_config, is_container=, presence=False, yang_name="static-ag-ipv6-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__static_ag_ipv6_config = t
if hasattr(self, ):
self._set() | Setter method for static_ag_ipv6_config, mapped from YANG variable /rbridge_id/ipv6/static_ag_ipv6_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_static_ag_ipv6_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static_ag_ipv6_config() directly. |
376,524 | def configurar_interface_de_rede(self, configuracao):
resp = self._http_post(,
configuracao=configuracao.documento())
conteudo = resp.json()
return RespostaSAT.configurar_interface_de_rede(conteudo.get()) | Sobrepõe :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT |
376,525 | def set_prefs(prefs):
prefs[] = [, , ,
, , , , ]
prefs[] = True
prefs[] = False
prefs[] = True
prefs[] = 0
prefs[] = True
prefs[] = True
prefs[] = 32
prefs[] = True
prefs[] = False
| This function is called before opening the project |
376,526 | def age(self):
if not self.birthdate():
return -1
adjuster = 0
today = date.today()
birthday = self.birthdate()
if today.month == birthday.month:
if today.day < birthday.day:
adjuster -= 1
elif today.month < birthday.month:
adjuster -= 1
return (today.year - birthday.year) + adjuster | Returns the user's age, determined by their birthdate() |
376,527 | def tag_values(request):
data = defaultdict(lambda: {"values": {}})
for tag in Tag.objects.filter(lang=get_language(request)):
data[tag.type]["name"] = tag.type_name
data[tag.type]["values"][tag.value] = tag.value_name
return render_json(request, data, template=, help_text=tag_values.__doc__) | Get tags types and values with localized names
language:
language of tags |
376,528 | def send_keys(self, keys, wait=True):
self._process.stdin.write(bytearray(keys, self._encoding))
self._process.stdin.flush()
if wait:
self.wait() | Send a raw key sequence to *Vim*.
.. note:: *Vim* style key sequence notation (like ``<Esc>``)
is not recognized.
Use escaped characters (like ``'\033'``) instead.
Example:
>>> import headlessvim
>>> with headlessvim.open() as vim:
... vim.send_keys('ispam\033')
... str(vim.display_lines()[0].strip())
...
'spam'
:param strgin keys: key sequence to send
:param boolean wait: whether if wait a response |
376,529 | def _split_path(path, seps=PATH_SEPS):
if not path:
return []
for sep in seps:
if sep in path:
if path == sep:
return []
return [x for x in path.split(sep) if x]
return [path] | Parse path expression and return list of path items.
:param path: Path expression may contain separator chars.
:param seps: Separator char candidates.
:return: A list of keys to fetch object[s] later.
>>> assert _split_path('') == []
>>> assert _split_path('/') == [''] # JSON Pointer spec expects this.
>>> for p in ('/a', '.a', 'a', 'a.'):
... assert _split_path(p) == ['a'], p
>>> assert _split_path('/a/b/c') == _split_path('a.b.c') == ['a', 'b', 'c']
>>> assert _split_path('abc') == ['abc'] |
376,530 | def loadUnStructuredGrid(filename):
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
gf = vtk.vtkUnstructuredGridGeometryFilter()
gf.SetInputConnection(reader.GetOutputPort())
gf.Update()
return Actor(gf.GetOutput()) | Load a ``vtkunStructuredGrid`` object from file and return a ``Actor(vtkActor)`` object. |
376,531 | def arrays2wcxf(C):
d = {}
for k, v in C.items():
if np.shape(v) == () or np.shape(v) == (1,):
d[k] = v
else:
ind = np.indices(v.shape).reshape(v.ndim, v.size).T
for i in ind:
name = k + + .join([str(int(j) + 1) for j in i])
d[name] = v[tuple(i)]
return d | Convert a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values to a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values. This is needed for the output in WCxf format. |
376,532 | def iflat_nodes(self, status=None, op="==", nids=None):
nids = as_set(nids)
if status is None:
if not (nids and self.node_id not in nids):
yield self
for work in self:
if nids and work.node_id not in nids: continue
yield work
for task in work:
if nids and task.node_id not in nids: continue
yield task
else:
op = operator_from_str(op)
status = Status.as_status(status)
if not (nids and self.node_id not in nids):
if op(self.status, status): yield self
for wi, work in enumerate(self):
if nids and work.node_id not in nids: continue
if op(work.status, status): yield work
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status): yield task | Generators that produces a flat sequence of nodes.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the nodes. |
376,533 | def find_item_project(self, eitem):
ds_name = self.cfg_section_name if self.cfg_section_name else self.get_connector_name()
try:
if self.projects_json_repo:
project = self.prjs_map[ds_name][self.projects_json_repo]
project = self.prjs_map[ds_name][ds_repo]
break
if project == UNKNOWN_PROJECT:
project = None
return project | Find the project for a enriched item
:param eitem: enriched item for which to find the project
:return: the project entry (a dictionary) |
376,534 | def minion_pub(self, load):
if not self.__verify_minion_publish(load):
return {}
pub_load = {
: load[],
: salt.utils.args.parse_input(
load[],
no_parse=load.get(, [])),
: load.get(, ),
: load[],
: load[],
: load[],
}
if in load:
if load[].startswith():
if load[] in self.opts[]:
pub_load[] = self.opts[][load[]]
pub_load[] =
else:
return {}
else:
pub_load[] = load[]
ret = {}
ret[] = self.local.cmd_async(**pub_load)
_res = self.ckminions.check_minions(
load[],
pub_load[])
ret[] = _res[]
auth_cache = os.path.join(
self.opts[],
)
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(ret[]))
with salt.utils.files.fopen(jid_fn, ) as fp_:
fp_.write(salt.utils.stringutils.to_str(load[]))
return ret | Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module |
376,535 | def median1d(self, name, return_errors=False):
if return_errors:
mid = self.data[name][]
low, high = self.data[name][]
return (mid, low, high)
else:
return self.data[name][] | Return median 1d marginalized parameters
Parameters
----------
name: str
The name of the parameter requested
return_errors: Optional, {bool, False}
If true, return a second and third parameter that represents the
lower and upper 90% error on the parameter.
Returns
-------
param: float or tuple
The requested parameter |
376,536 | def html_single_plot(self,abfID,launch=False,overwrite=False):
if type(abfID) is str:
abfID=[abfID]
for thisABFid in cm.abfSort(abfID):
parentID=cm.parent(self.groups,thisABFid)
saveAs=os.path.abspath("%s/%s_plot.html"%(self.folder2,parentID))
if overwrite is False and os.path.basename(saveAs) in self.files2:
continue
filesByType=cm.filesByType(self.groupFiles[parentID])
html=""
html+=
html+=%parentID
html+=%os.path.abspath(self.folder1+"/"+parentID+".abf")
html+=
for fname in filesByType[]:
html+=self.htmlFor(fname)
print("creating",saveAs,)
style.save(html,saveAs,launch=launch) | create ID_plot.html of just intrinsic properties. |
376,537 | def blockstack_tx_filter( tx ):
if not in tx:
return False
if tx[] is None:
return False
payload = binascii.unhexlify( tx[] )
if payload.startswith(blockstack_magic_bytes()):
return True
else:
return False | Virtualchain tx filter function:
* only take txs whose OP_RETURN payload starts with 'id' |
376,538 | def check_version():
if sys.version_info[0:3] == PYTHON_VERSION_INFO[0:3]:
return
sys.exit(
ansi.error() +
+ os.linesep + os.linesep + BIN_PYTHON +
os.linesep
) | Sanity check version information for corrupt virtualenv symlinks |
376,539 | def _print_speed(self):
if self._bandwidth_meter.num_samples:
speed = self._bandwidth_meter.speed()
if self._human_format:
file_size_str = wpull.string.format_size(speed)
else:
file_size_str = .format(speed * 8)
speed_str = _().format(
preformatted_file_size=file_size_str
)
else:
speed_str = _()
self._print(speed_str) | Print the current speed. |
376,540 | def form_field(self):
"Returns appropriate form field."
label = unicode(self)
defaults = dict(required=False, label=label, widget=self.widget)
defaults.update(self.extra)
return self.field_class(**defaults) | Returns appropriate form field. |
376,541 | def create_namespace(self, namespace):
std_namespace = _ensure_unicode(namespace.strip())
ws_profiles = self.get_selected_profiles(, )
if ws_profiles:
ws_profiles_sorted = sorted(
ws_profiles, key=lambda prof: prof[])
ws_profile_inst = ws_profiles_sorted[-1]
ws_insts = self.get_central_instances(ws_profile_inst.path)
if len(ws_insts) != 1:
raise CIMError(
CIM_ERR_FAILED,
_format("Unexpected number of central instances of WBEM "
"Server profile: {0!A}",
[i.path for i in ws_insts]))
ws_inst = ws_insts[0]
ns_inst = CIMInstance()
ns_inst[] = std_namespace
try:
(ret_val, out_params) = self._conn.InvokeMethod(
MethodName="CreateWBEMServerNamespace",
ObjectName=ws_inst.path,
Params=[(, ns_inst)])
except CIMError as exc:
if exc.status_code in (CIM_ERR_METHOD_NOT_FOUND,
CIM_ERR_METHOD_NOT_AVAILABLE,
CIM_ERR_NOT_SUPPORTED):
pass
else:
raise
else:
if ret_val != 0:
raise CIMError(
CIM_ERR_FAILED,
_format("The CreateWBEMServerNamespace() method is "
"implemented but failed: {0}",
out_params[]))
else:
if self.brand == "OpenPegasus":
ns_classname =
else:
ns_classname =
ns_inst = CIMInstance(ns_classname)
if self.brand == "OpenPegasus":
ns_inst[] = True
ns_inst[] = std_namespace
ns_inst[] = ns_classname
ns_inst[] = self.cimom_inst[]
ns_inst[] = \
self.cimom_inst[]
ns_inst[] = self.cimom_inst[]
ns_inst[] = \
self.cimom_inst[]
self.conn.CreateInstance(ns_inst, namespace=self.interop_ns)
self._determine_namespaces()
return std_namespace | Create the specified CIM namespace in the WBEM server and
update this WBEMServer object to reflect the new namespace
there.
This method attempts the following approaches for creating the
namespace, in order, until an approach succeeds:
1. Namespace creation as described in the WBEM Server profile
(:term:`DSP1092`) via CIM method
`CIM_WBEMServer.CreateWBEMServerNamespace()`.
This is a new standard approach that is not likely to be
widely implemented yet.
2. Issuing the `CreateInstance` operation using the CIM class
representing namespaces ('PG_Namespace' for OpenPegasus,
and 'CIM_Namespace' otherwise), against the Interop namespace.
This approach is typically supported in WBEM servers that
support the creation of CIM namespaces. This approach is
similar to the approach described in :term:`DSP0200`.
Creating namespaces using the `__Namespace` pseudo-class has been
deprecated already in DSP0200 1.1.0 (released in 01/2003), and pywbem
does not implement that approach.
Parameters:
namespace (:term:`string`): CIM namespace name. Must not be `None`.
The namespace may contain leading and a trailing slash, both of
which will be ignored.
Returns:
:term:`unicode string`: The specified CIM namespace name in its
standard format (i.e. without leading or trailing slash characters).
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
ModelError: An issue with the model implemented by the WBEM server.
CIMError: CIM_ERR_ALREADY_EXISTS, Specified namespace already
exists in the WBEM server.
CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be
determined.
CIMError: CIM_ERR_NOT_FOUND, Unexpected number of
`CIM_ObjectManager` instances.
CIMError: CIM_ERR_FAILED, Unexpected number of
central instances of WBEM Server profile. |
376,542 | def hicpro_mapping_chart (self):
keys = OrderedDict()
keys[] = { : , : }
keys[] = { : , : }
keys[] = { : , : }
data = [{},{}]
for s_name in self.hicpro_data:
for r in [1,2]:
data[r-1][.format(s_name, r)] = {
: self.hicpro_data[s_name][.format(r)],
: self.hicpro_data[s_name][.format(r)],
: int(self.hicpro_data[s_name][.format(r)]) - int(self.hicpro_data[s_name][.format(r)])
}
config = {
: ,
: ,
: ,
: ,
: [
{: , : },
{: , : }
]
}
return bargraph.plot(data, [keys, keys], config) | Generate the HiC-Pro Aligned reads plot |
376,543 | def match_file(filename):
base_name = os.path.basename(filename)
if base_name.startswith():
return False
if not os.path.isdir(filename) and not filename.lower().endswith():
return False
return True | Return True if file is okay for modifying/recursing. |
376,544 | def show_instances(server, cim_class):
if cim_class == :
for inst in server.profiles:
print(inst.tomof())
return
for ns in server.namespaces:
try:
insts = server.conn.EnumerateInstances(cim_class, namespace=ns)
if len(insts):
print( % (cim_class, ns))
for inst in insts:
print(inst.tomof())
except pywbem.Error as er:
if er.status_code != pywbem.CIM_ERR_INVALID_CLASS:
print(
% (cim_class, ns, server, er)) | Display the instances of the CIM_Class defined by cim_class. If the
namespace is None, use the interop namespace. Search all namespaces for
instances except for CIM_RegisteredProfile |
376,545 | def _FindLargestIdPostfixNumber(self, schedule):
postfix_number_re = re.compile()
def ExtractPostfixNumber(entity_id):
if entity_id is None:
return 0
match = postfix_number_re.search(entity_id)
if match is not None:
return int(match.group(1))
else:
return 0
id_data_sets = {: schedule.GetAgencyList(),
: schedule.GetStopList(),
: schedule.GetRouteList(),
: schedule.GetTripList(),
: schedule.GetServicePeriodList(),
: schedule.GetFareAttributeList(),
: schedule.GetShapeList()}
max_postfix_number = 0
for id_name, entity_list in id_data_sets.items():
for entity in entity_list:
entity_id = getattr(entity, id_name)
postfix_number = ExtractPostfixNumber(entity_id)
max_postfix_number = max(max_postfix_number, postfix_number)
return max_postfix_number | Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id. |
376,546 | def image_uuid(pil_img):
print()
img_bytes_ = pil_img.tobytes()
uuid_ = hashable_to_uuid(img_bytes_)
return uuid_ | UNSAFE: DEPRICATE: JPEG IS NOT GAURENTEED TO PRODUCE CONSITENT VALUES ON
MULTIPLE MACHINES image global unique id
References:
http://stackoverflow.com/questions/23565889/jpeg-images-have-different-pixel-values-across-multiple-devices |
376,547 | def create_config(config_path="scriptworker.yaml"):
if not os.path.exists(config_path):
print("{} doesncredentials\ncredentialscredentials'])
config = get_frozen_copy(config)
return config, credentials | Create a config from DEFAULT_CONFIG, arguments, and config file.
Then validate it and freeze it.
Args:
config_path (str, optional): the path to the config file. Defaults to
"scriptworker.yaml"
Returns:
tuple: (config frozendict, credentials dict)
Raises:
SystemExit: on failure |
376,548 | def add_pagination_meta(self, params, meta):
meta[] = params[]
meta[] = params[]
meta[] = "page={0}&page_size={1}".format(
params[] - 1, params[]
) if meta[] > 0 else None
meta[] = "page={0}&page_size={1}".format(
params[] + 1, params[]
) if meta.get(, True) else None | Extend default meta dictionary value with pagination hints.
Note:
This method handler attaches values to ``meta`` dictionary without
changing it's reference. This means that you should never replace
``meta`` dictionary with any other dict instance but simply modify
its content.
Args:
params (dict): dictionary of decoded parameter values
meta (dict): dictionary of meta values attached to response |
376,549 | def upload(self, fileobj, tileset, name=None, patch=False, callback=None, bypass=False):
tileset = self._validate_tileset(tileset)
url = self.stage(fileobj, callback=callback)
return self.create(url, tileset, name=name, patch=patch, bypass=bypass) | Upload data and create a Mapbox tileset
Effectively replicates the Studio upload feature. Returns a
Response object, the json() of which returns a dict with upload
metadata.
Parameters
----------
fileobj: file object or str
A filename or a Python file object opened in binary mode.
tileset: str
A tileset identifier such as '{owner}.my-tileset'.
name: str
A short name for the tileset that will appear in Mapbox
studio.
patch: bool
Optional patch mode which requires a flag on the owner's
account.
bypass: bool
Optional bypass validation mode for MBTiles which requires
a flag on the owner's account.
callback: func
A function that takes a number of bytes processed as its
sole argument. May be used with a progress bar.
Returns
-------
requests.Response |
376,550 | def _get_ordering_field_lookup(self, field_name):
field = field_name
get_field = getattr(self, "get_%s_ordering_field" % field_name, None)
if get_field:
field = get_field()
return field | get real model field to order by |
376,551 | def _model_foreign(ins):
fks = []
for t in ins.tables:
fks.extend([
SaForeignkeyDoc(
key=fk.column.key,
target=fk.target_fullname,
onupdate=fk.onupdate,
ondelete=fk.ondelete
)
for fk in t.foreign_keys])
return fks | Get foreign keys info
:type ins: sqlalchemy.orm.mapper.Mapper
:rtype: list[SaForeignkeyDoc] |
376,552 | def _wrap_class(request_handler, validator):
METHODS = [, , , , , , ]
for name in METHODS:
method = getattr(request_handler, name)
setattr(request_handler, name, _auth_required(method, validator))
return request_handler | Decorate each HTTP verb method to check if the request is authenticated
:param request_handler: a tornado.web.RequestHandler instance |
376,553 | def clear(self, decorated_function=None):
if decorated_function is not None and decorated_function in self._storage:
self._storage.pop(decorated_function)
else:
self._storage.clear()
if self.__statistic is True:
self.__cache_missed = 0
self.__cache_hit = 0 | :meth:`WCacheStorage.clear` method implementation (Clears statistics also) |
376,554 | def _gti_dirint_lt_90(poa_global, aoi, aoi_lt_90, solar_zenith, solar_azimuth,
times, surface_tilt, surface_azimuth, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, albedo=.25,
model=, model_perez=,
max_iterations=30):
I0 = get_extra_radiation(times, 1370, )
cos_zenith = tools.cosd(solar_zenith)
I0h = I0 * np.maximum(0.065, cos_zenith)
airmass = atmosphere.get_relative_airmass(solar_zenith, model=)
airmass = atmosphere.get_absolute_airmass(airmass, pressure)
coeffs = np.empty(max(30, max_iterations))
coeffs[0:3] = 1
coeffs[3:10] = 0.5
coeffs[10:20] = 0.25
coeffs[20:] = 0.125
coeffs = coeffs[:max_iterations]
diff = pd.Series(9999, index=times)
best_diff = diff
poa_global_i = poa_global
for iteration, coeff in enumerate(coeffs):
best_diff_lte_1 = best_diff <= 1
best_diff_lte_1_lt_90 = best_diff_lte_1[aoi_lt_90]
if best_diff_lte_1_lt_90.all():
break
kt = clearness_index(poa_global_i, aoi, I0)
disc_dni = np.maximum(_disc_kn(kt, airmass)[0] * I0, 0)
kt_prime = clearness_index_zenith_independent(kt, airmass)
dni = _dirint_from_dni_ktprime(disc_dni, kt_prime, solar_zenith,
use_delta_kt_prime, temp_dew)
ghi = kt * I0h
dhi = ghi - dni * cos_zenith
dni = np.maximum(dni, 0)
ghi = np.maximum(ghi, 0)
dhi = np.maximum(dhi, 0)
all_irrad = get_total_irradiance(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=I0, airmass=airmass,
albedo=albedo, model=model, model_perez=model_perez)
gti_model = all_irrad[]
diff = gti_model - poa_global
diff_abs = diff.abs()
smallest_diff = diff_abs < best_diff
best_diff = diff_abs.where(smallest_diff, best_diff)
if iteration == 0:
best_ghi = ghi
best_dni = dni
best_dhi = dhi
best_kt_prime = kt_prime
else:
best_ghi = ghi.where(smallest_diff, best_ghi)
best_dni = dni.where(smallest_diff, best_dni)
best_dhi = dhi.where(smallest_diff, best_dhi)
best_kt_prime = kt_prime.where(smallest_diff, best_kt_prime)
poa_global_i = np.maximum(1.0, poa_global_i - coeff * diff)
else:
import warnings
failed_points = best_diff[aoi_lt_90][~best_diff_lte_1_lt_90]
warnings.warn(
(
% (len(failed_points), max_iterations, failed_points)),
RuntimeWarning)
return best_ghi, best_dni, best_dhi, best_kt_prime | GTI-DIRINT model for AOI < 90 degrees. See Marion 2015 Section 2.1.
See gti_dirint signature for parameter details. |
376,555 | def is_course_run_enrollable(course_run):
now = datetime.datetime.now(pytz.UTC)
end = parse_datetime_handle_invalid(course_run.get())
enrollment_start = parse_datetime_handle_invalid(course_run.get())
enrollment_end = parse_datetime_handle_invalid(course_run.get())
return (not end or end > now) and \
(not enrollment_start or enrollment_start < now) and \
(not enrollment_end or enrollment_end > now) | Return true if the course run is enrollable, false otherwise.
We look for the following criteria:
- end is greater than now OR null
- enrollment_start is less than now OR null
- enrollment_end is greater than now OR null |
376,556 | def findall(text):
results = TIMESTRING_RE.findall(text)
dates = []
for date in results:
if re.compile(, re.I).match(date[0]):
dates.append((date[0].strip(), Range(date[0])))
else:
dates.append((date[0].strip(), Date(date[0])))
return dates | Find all the timestrings within a block of text.
>>> timestring.findall("once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.")
[
('3 weeks ago,', <timestring.Date 2014-02-09 00:00:00 4483019280>),
('august 15th at 7:20 am', <timestring.Date 2014-08-15 07:20:00 4483019344>)
] |
376,557 | def save(self):
username, email, password = (self.cleaned_data[],
self.cleaned_data[],
self.cleaned_data[])
user = get_user_model().objects.create_user(username, email, password,
not defaults.ACCOUNTS_ACTIVATION_REQUIRED,
defaults.ACCOUNTS_ACTIVATION_REQUIRED)
return user | Creates a new user and account. Returns the newly created user. |
376,558 | def unstack_annotations(annotations_sframe, num_rows=None):
_raise_error_if_not_sframe(annotations_sframe, variable_name="annotations_sframe")
cols = [, , ]
has_confidence = in annotations_sframe.column_names()
if has_confidence:
cols.append()
if num_rows is None:
if len(annotations_sframe) == 0:
num_rows = 0
else:
num_rows = annotations_sframe[].max() + 1
sf = annotations_sframe
sf[] =
sf = sf.pack_columns([, , , ], dtype=dict,
new_column_name=)
sf = sf.pack_columns(cols, dtype=dict, new_column_name=)
sf = sf.unstack(, new_column_name=)
sf_all_ids = _tc.SFrame({: range(num_rows)})
sf = sf.join(sf_all_ids, on=, how=)
sf = sf.fillna(, [])
sf = sf.sort()
annotations_sarray = sf[]
if has_confidence:
annotations_sarray = annotations_sarray.apply(
lambda x: sorted(x, key=lambda ann: ann[], reverse=True),
dtype=list)
return annotations_sarray | Converts object detection annotations (ground truth or predictions) to
unstacked format (an `SArray` where each element is a list of object
instances).
Parameters
----------
annotations_sframe: SFrame
An `SFrame` with stacked predictions, produced by the
`stack_annotations` function.
num_rows: int
Optionally specify the number of rows in your original dataset, so that
all get represented in the unstacked format, regardless of whether or
not they had instances or not.
Returns
-------
annotations_sarray: An `SArray` with unstacked annotations.
See also
--------
stack_annotations
Examples
--------
If you have annotations in stacked format:
>>> stacked_predictions
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
They can be converted to unstacked format using this function:
>>> turicreate.object_detector.util.unstack_annotations(stacked_predictions)[0]
[{'confidence': 0.98,
'coordinates': {'height': 182.0, 'width': 80.0, 'x': 123.0, 'y': 128.0},
'label': 'dog',
'type': 'rectangle'},
{'confidence': 0.67,
'coordinates': {'height': 101.0, 'width': 129.0, 'x': 150.0, 'y': 183.0},
'label': 'cat',
'type': 'rectangle'}] |
376,559 | def events(cls, filters):
current = filters.pop(, False)
current_params = []
if current:
current_params = [(, )]
filter_url = uparse.urlencode(sorted(list(filters.items())) + current_params)
events = cls.json_get( % (cls.api_url, filter_url),
empty_key=True, send_key=False)
return events | Retrieve events details from status.gandi.net. |
376,560 | async def async_poller(client, initial_response, deserialization_callback, polling_method):
try:
client = client if isinstance(client, ServiceClientAsync) else client._client
except AttributeError:
raise ValueError("Poller client parameter must be a low-level msrest Service Client or a SDK client.")
response = initial_response.response if isinstance(initial_response, ClientRawResponse) else initial_response
if isinstance(deserialization_callback, type) and issubclass(deserialization_callback, Model):
deserialization_callback = deserialization_callback.deserialize
polling_method.initialize(client, response, deserialization_callback)
await polling_method.run()
return polling_method.resource() | Async Poller for long running operations.
:param client: A msrest service client. Can be a SDK client and it will be casted to a ServiceClient.
:type client: msrest.service_client.ServiceClient
:param initial_response: The initial call response
:type initial_response: msrest.universal_http.ClientResponse or msrest.pipeline.ClientRawResponse
:param deserialization_callback: A callback that takes a Response and return a deserialized object. If a subclass of Model is given, this passes "deserialize" as callback.
:type deserialization_callback: callable or msrest.serialization.Model
:param polling_method: The polling strategy to adopt
:type polling_method: msrest.polling.PollingMethod |
376,561 | def note_list(self, body_matches=None, post_id=None, post_tags_match=None,
creator_name=None, creator_id=None, is_active=None):
params = {
: body_matches,
: post_id,
: post_tags_match,
: creator_name,
: creator_id,
: is_active
}
return self._get(, params) | Return list of notes.
Parameters:
body_matches (str): The note's body matches the given terms.
post_id (int): A specific post.
post_tags_match (str): The note's post's tags match the given terms.
creator_name (str): The creator's name. Exact match.
creator_id (int): The creator's user id.
is_active (bool): Can be: True, False. |
376,562 | def model_to_select_list(model_class, filter_dict=None, q_filter=None):
if filter_dict is None:
filter_dict = {}
if q_filter is not None:
filter_list = [q_filter]
else:
filter_list = []
objects = model_class.objects.filter(
*filter_list, **filter_dict).values(, )
return list(objects) | 只选择 id 和 name,用来做列表选择
:param model_class:
:param filter_dict:
:param q_filter:
:return: |
376,563 | def determinize(m):
if not m.is_finite():
raise TypeError("machine must be a finite automaton")
transitions = collections.defaultdict(lambda: collections.defaultdict(set))
alphabet = set()
for transition in m.get_transitions():
[[lstate], read] = transition.lhs
[[rstate]] = transition.rhs
if len(read) > 1:
raise NotSupportedException("multiple input symbols on transition not supported")
if len(read) == 1:
alphabet.add(read[0])
transitions[lstate][tuple(read)].add(rstate)
class Set(frozenset):
def __str__(self):
return "{{{}}}".format(",".join(map(str, sorted(self))))
def _repr_html_(self):
return "{{{}}}".format(",".join(x._repr_html_() for x in sorted(self)))
def eclosure(states):
states = set(states)
frontier = set(states)
while len(frontier) > 0:
lstate = frontier.pop()
for rstate in transitions[lstate][()]:
if rstate not in states:
states.add(rstate)
frontier.add(rstate)
return states
dm = FiniteAutomaton()
start_state = Set(eclosure([m.get_start_state()]))
dm.set_start_state(start_state)
frontier = {start_state}
visited = set()
while len(frontier) > 0:
lstates = frontier.pop()
if lstates in visited:
continue
visited.add(lstates)
dtransitions = collections.defaultdict(set)
for lstate in lstates:
for read in alphabet:
dtransitions[read] |= transitions[lstate][(read,)]
for read in alphabet:
rstates = Set(eclosure(dtransitions[read]))
dm.add_transition([[lstates], read], [[rstates]])
frontier.add(rstates)
accept_states = set(m.get_accept_states())
for states in visited:
if len(states & accept_states) > 0:
dm.add_accept_state(states)
return dm | Determinizes a finite automaton. |
376,564 | def hpforest(self, data: [, str] = None,
freq: str = None,
id: str = None,
input: [str, list, dict] = None,
save: str = None,
score: [str, bool, ] = True,
target: [str, list, dict] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> :
| Python method to call the HPFOREST procedure
Documentation link:
https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf
:param data: SASdata object or string. This parameter is required.
:parm freq: The freq variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required
:parm save: The save variable can only be a string type.
:parm score: The score variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object |
376,565 | def _repr_html_row_(self, keys):
tr, th, c = , ,
r =
h =
for k in keys:
v = self.__dict__.get(k)
if k == :
k =
c = utils.text_colour_for_hex(v)
style = .format(c, v)
else:
style =
if k == :
try:
v = v._repr_html_()
except AttributeError:
v = v.__repr__()
tr += r.format(v=v, stl=style)
th += h.format(k=k)
return th, tr | Jupyter Notebook magic repr function as a row – used by
``Legend._repr_html_()``. |
376,566 | def check_denovo_input(inputfile, params):
background = params["background"]
input_type = determine_file_type(inputfile)
if input_type == "fasta":
valid_bg = FA_VALID_BGS
elif input_type in ["bed", "narrowpeak"]:
genome = params["genome"]
valid_bg = BED_VALID_BGS
if "genomic" in background or "gc" in background:
Genome(genome)
check_bed_file(inputfile)
else:
sys.stderr.write("Format of inputfile {} not recognized.\n".format(inputfile))
sys.stderr.write("Input should be FASTA, BED or narrowPeak.\n")
sys.stderr.write("See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\n")
sys.exit(1)
for bg in background:
if not bg in valid_bg:
logger.info("Input type is %s, ignoring background type ",
input_type, bg)
background = [bg for bg in background if bg in valid_bg]
if len(background) == 0:
logger.error("No valid backgrounds specified!")
sys.exit(1)
return input_type, background | Check if an input file is valid, which means BED, narrowPeak or FASTA |
376,567 | def re_evaluate(local_dict=None):
try:
compiled_ex = _numexpr_last[]
except KeyError:
raise RuntimeError("not a previous evaluate() execution found")
argnames = _numexpr_last[]
args = getArguments(argnames, local_dict)
kwargs = _numexpr_last[]
with evaluate_lock:
return compiled_ex(*args, **kwargs) | Re-evaluate the previous executed array expression without any check.
This is meant for accelerating loops that are re-evaluating the same
expression repeatedly without changing anything else than the operands.
If unsure, use evaluate() which is safer.
Parameters
----------
local_dict : dictionary, optional
A dictionary that replaces the local operands in current frame. |
376,568 | def write_by_templ(templ, target, sub_value, safe=False):
templ_txt = read_file(templ)
txt = None
if safe:
txt = Template(templ_txt).safe_substitute(sub_value)
else:
txt = Template(templ_txt).substitute(sub_value)
write_file(target, txt) | 根据模版写入文件。
:param str templ: 模版文件所在路径。
:param str target: 要写入的文件所在路径。
:param dict sub_value: 被替换的内容。 |
376,569 | def _imply_options(self):
self.no_upload = self.no_upload or self.to_stdout or self.offline
self.auto_update = self.auto_update and not self.offline
if (self.analyze_container or
self.analyze_file or
self.analyze_mountpoint or
self.analyze_image_id):
self.analyze_container = True
self.to_json = self.to_json or self.analyze_container
self.register = (self.register or self.reregister) and not self.offline
self.keep_archive = self.keep_archive or self.no_upload
if self.payload:
self.legacy_upload = False | Some options enable others automatically |
376,570 | def process_flat_files(id_mappings_file, complexes_file=None, ptm_file=None,
ppi_file=None, seq_file=None, motif_window=7):
id_df = pd.read_csv(id_mappings_file, delimiter=, names=_hprd_id_cols,
dtype=)
id_df = id_df.set_index()
if complexes_file is None and ptm_file is None and ppi_file is None:
raise ValueError(
)
if ptm_file and not seq_file:
raise ValueError()
cplx_df = None
if complexes_file:
cplx_df = pd.read_csv(complexes_file, delimiter=, names=_cplx_cols,
dtype=, na_values=[, ])
ptm_df = None
seq_dict = None
if ptm_file:
ptm_df = pd.read_csv(ptm_file, delimiter=, names=_ptm_cols,
dtype=, na_values=)
seq_dict = load_fasta_sequences(seq_file, id_index=2)
ppi_df = None
if ppi_file:
ppi_df = pd.read_csv(ppi_file, delimiter=, names=_ppi_cols,
dtype=)
return HprdProcessor(id_df, cplx_df, ptm_df, ppi_df, seq_dict, motif_window) | Get INDRA Statements from HPRD data.
Of the arguments, `id_mappings_file` is required, and at least one of
`complexes_file`, `ptm_file`, and `ppi_file` must also be given. If
`ptm_file` is given, `seq_file` must also be given.
Note that many proteins (> 1,600) in the HPRD content are associated with
outdated RefSeq IDs that cannot be mapped to Uniprot IDs. For these, the
Uniprot ID obtained from the HGNC ID (itself obtained from the Entrez ID)
is used. Because the sequence referenced by the Uniprot ID obtained this
way may be different from the (outdated) RefSeq sequence included with the
HPRD content, it is possible that this will lead to invalid site positions
with respect to the Uniprot IDs.
To allow these site positions to be mapped during assembly, the
Modification statements produced by the HprdProcessor include an additional
key in the `annotations` field of their Evidence object. The annotations
field is called 'site_motif' and it maps to a dictionary with three
elements: 'motif', 'respos', and 'off_by_one'. 'motif' gives the peptide
sequence obtained from the RefSeq sequence included with HPRD. 'respos'
indicates the position in the peptide sequence containing the residue.
Note that these positions are ONE-INDEXED (not zero-indexed). Finally, the
'off-by-one' field contains a boolean value indicating whether the correct
position was inferred as being an off-by-one (methionine cleavage) error.
If True, it means that the given residue could not be found in the HPRD
RefSeq sequence at the given position, but a matching residue was found at
position+1, suggesting a sequence numbering based on the methionine-cleaved
sequence. The peptide included in the 'site_motif' dictionary is based on
this updated position.
Parameters
----------
id_mappings_file : str
Path to HPRD_ID_MAPPINGS.txt file.
complexes_file : Optional[str]
Path to PROTEIN_COMPLEXES.txt file.
ptm_file : Optional[str]
Path to POST_TRANSLATIONAL_MODIFICATIONS.txt file.
ppi_file : Optional[str]
Path to BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file.
seq_file : Optional[str]
Path to PROTEIN_SEQUENCES.txt file.
motif_window : int
Number of flanking amino acids to include on each side of the
PTM target residue in the 'site_motif' annotations field of the
Evidence for Modification Statements. Default is 7.
Returns
-------
HprdProcessor
An HprdProcessor object which contains a list of extracted INDRA
Statements in its statements attribute. |
376,571 | def get_n_excluded_patches(self):
base = self.get_patches_base()
if not base:
return 0
p = base.rfind()
if p == -1:
return 0
try:
n = int(base[p+1:])
return n
except TypeError:
return 0 | Gets number of excluded patches from patches_base:
#patches_base=1.0.0+THIS_NUMBER |
376,572 | def remove_info_file():
try:
os.unlink(_get_info_file_path())
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise | Remove the current process's TensorBoardInfo file, if it exists.
If the file does not exist, no action is taken and no error is raised. |
376,573 | def removeFile(file):
if "y" in speech.question("Are you sure you want to remove " + file + "? (Y/N): "):
speech.speak("Removing " + file + " with the command.")
subprocess.call(["rm", "-r", file])
else:
speech.speak("Okay, I won't remove " + file + ".") | remove a file |
376,574 | def topfnfile(self, fileobj):
for entry in self:
print >>fileobj, entry.path
fileobj.close() | write a cache object to filename as a plain text pfn file |
376,575 | def run_file(path_or_file, context=None):
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError()
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context | Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context). |
376,576 | def get_books_for_schedule(self, schedule):
slns = self._get_slns(schedule)
books = {}
for sln in slns:
try:
section_books = self.get_books_by_quarter_sln(
schedule.term.quarter, sln
)
books[sln] = section_books
except DataFailureException:
pass
return books | Returns a dictionary of data. SLNs are the keys, an array of Book
objects are the values. |
376,577 | def handle_errors(
cls, message, *format_args,
re_raise=True, exception_class=Exception,
do_finally=None, do_except=None, do_else=None,
**format_kwds
):
try:
yield
except exception_class as err:
try:
final_message = cls.reformat_exception(
message, err, *format_args, **format_kwds
)
except Exception as msg_err:
raise cls(
"Failed while formatting message: {}".format(repr(msg_err))
)
trace = cls.get_traceback()
if do_except is not None:
do_except(err, final_message, trace)
if re_raise:
raise cls(final_message).with_traceback(trace)
else:
if do_else is not None:
do_else()
finally:
if do_finally is not None:
do_finally() | provides a context manager that will intercept exceptions and repackage
them as Buzz instances with a message attached:
.. code-block:: python
with Buzz.handle_errors("It didn't work"):
some_code_that_might_raise_an_exception()
:param: message: The message to attach to the raised Buzz
:param: format_args: Format arguments. Follows str.format conv.
:param: format_kwds: Format keyword args. Follows str.format conv.
:param: re_raise: If true, the re-packaged exception will be
raised
:param: exception_class: Limits the class of exceptions that will be
re-packaged as a Buzz exception.
Any other exception types will not be caught
and re-packaged.
Defaults to Exception (will handle all
exceptions)
:param: do_finally: A function that should always be called at the
end of the block. Should take no parameters
:param: do_except: A function that should be called only if there
was an exception. Should take the raised
exception as its first parameter, the final
message for the exception that will be raised
as its second, and the traceback as its third
:param: do_else: A function taht should be called only if there
were no exceptions encountered |
376,578 | def recordAndPropagate(self, request: Request, clientName):
self.requests.add(request)
self.propagate(request, clientName)
self.tryForwarding(request) | Record the request in the list of requests and propagate.
:param request:
:param clientName: |
376,579 | def is_cf_trajectory(nc, variable):
dims = nc.variables[variable].dimensions
cmatrix = coordinate_dimension_matrix(nc)
for req in (, , ):
if req not in cmatrix:
return False
if len(cmatrix[]) != 2:
return False
if cmatrix[] != cmatrix[]:
return False
if cmatrix[] != cmatrix[]:
return False
if in cmatrix and cmatrix[] != cmatrix[]:
return False
if dims == cmatrix[]:
return True
return False | Returns true if the variable is a CF trajectory feature type
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: name of the variable to check |
376,580 | def parse_pkcs12(data, password=None):
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
,
type_name(data)
))
if password is not None:
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
,
type_name(password)
))
else:
password = b
certs = {}
private_keys = {}
pfx = pkcs12.Pfx.load(data)
auth_safe = pfx[]
if auth_safe[].native != :
raise ValueError(pretty_message(
))
authenticated_safe = pfx.authenticated_safe
mac_data = pfx[]
if mac_data:
mac_algo = mac_data[][][].native
key_length = {
: 20,
: 28,
: 32,
: 48,
: 64,
: 28,
: 32,
}[mac_algo]
mac_key = pkcs12_kdf(
mac_algo,
password,
mac_data[].native,
mac_data[].native,
key_length,
3
)
hash_mod = getattr(hashlib, mac_algo)
computed_hmac = hmac.new(mac_key, auth_safe[].contents, hash_mod).digest()
stored_hmac = mac_data[][].native
if not constant_compare(computed_hmac, stored_hmac):
raise ValueError()
for content_info in authenticated_safe:
content = content_info[]
if isinstance(content, core.OctetString):
_parse_safe_contents(content.native, certs, private_keys, password)
elif isinstance(content, cms.EncryptedData):
encrypted_content_info = content[]
encryption_algorithm_info = encrypted_content_info[]
encrypted_content = encrypted_content_info[].native
decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)
_parse_safe_contents(decrypted_content, certs, private_keys, password)
else:
raise ValueError(pretty_message(
))
key_fingerprints = set(private_keys.keys())
cert_fingerprints = set(certs.keys())
common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))
key = None
cert = None
other_certs = []
if len(common_fingerprints) >= 1:
fingerprint = common_fingerprints[0]
key = private_keys[fingerprint]
cert = certs[fingerprint]
other_certs = [certs[f] for f in certs if f != fingerprint]
return (key, cert, other_certs)
if len(private_keys) > 0:
first_key = sorted(list(private_keys.keys()))[0]
key = private_keys[first_key]
if len(certs) > 0:
first_key = sorted(list(certs.keys()))[0]
cert = certs[first_key]
del certs[first_key]
if len(certs) > 0:
other_certs = sorted(list(certs.values()))
return (key, cert, other_certs) | Parses a PKCS#12 ANS.1 DER-encoded structure and extracts certs and keys
:param data:
A byte string of a DER-encoded PKCS#12 file
:param password:
A byte string of the password to any encrypted data
:raises:
ValueError - when any of the parameters are of the wrong type or value
OSError - when an error is returned by one of the OS decryption functions
:return:
A three-element tuple of:
1. An asn1crypto.keys.PrivateKeyInfo object
2. An asn1crypto.x509.Certificate object
3. A list of zero or more asn1crypto.x509.Certificate objects that are
"extra" certificates, possibly intermediates from the cert chain |
376,581 | def percent(self, value) -> :
raise_not_number(value)
self.gap = .format(value)
return self | Set the margin as a percentage. |
376,582 | def synchronize(self, graph_data=None):
profile = graph_data or self.graph.get()
self.facebook_username = profile.get()
self.first_name = profile.get()
self.middle_name = profile.get()
self.last_name = profile.get()
self.birthday = datetime.strptime(profile[], ) if profile.has_key() else None
self.email = profile.get()
self.locale = profile.get()
self.gender = profile.get()
self.extra_data = profile
self.save() | Synchronize ``facebook_username``, ``first_name``, ``middle_name``,
``last_name`` and ``birthday`` with Facebook.
:param graph_data: Optional pre-fetched graph data |
376,583 | def dtype_contract(input_dtype=None, output_dtype=None):
def wrap(function):
@wraps(function)
def wrapped_function(*args, **kwargs):
if input_dtype is not None:
check_dtype(args[0], input_dtype)
array = function(*args, **kwargs)
if output_dtype is not None:
check_dtype(array, output_dtype)
return array
return wrapped_function
return wrap | Function decorator for specifying input and/or output array dtypes. |
376,584 | def predecessors(self, node, exclude_compressed=True):
preds = super(Graph, self).predecessors(node)
if exclude_compressed:
return [n for n in preds if not self.node[n].get(, False)]
else:
return preds | Returns the list of predecessors of a given node
Parameters
----------
node : str
The target node
exclude_compressed : boolean
If true, compressed nodes are excluded from the predecessors list
Returns
-------
list
List of predecessors nodes |
376,585 | def enumeration(*values, **kwargs):
if not (values and all(isinstance(value, string_types) and value for value in values)):
raise ValueError("expected a non-empty sequence of strings, got %s" % values)
if len(values) != len(set(values)):
raise ValueError("enumeration items must be unique, got %s" % values)
attrs = {value: value for value in values}
attrs.update({
"_values": list(values),
"_default": values[0],
"_case_sensitive": kwargs.get("case_sensitive", True),
"_quote": kwargs.get("quote", False),
})
return type(str("Enumeration"), (Enumeration,), attrs)() | Create an |Enumeration| object from a sequence of values.
Call ``enumeration`` with a sequence of (unique) strings to create an
Enumeration object:
.. code-block:: python
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
Args:
values (str) : string enumeration values, passed as positional arguments
The order of arguments is the order of the enumeration, and the
first element will be considered the default value when used
to create |Enum| properties.
Keyword Args:
case_sensitive (bool, optional) :
Whether validation should consider case or not (default: True)
quote (bool, optional):
Whther values should be quoted in the string representations
(default: False)
Raises:
ValueError if values empty, if any value is not a string or not unique
Returns:
Enumeration |
376,586 | def print_all():
_, conf = read_latoolscfg()
default = conf[][]
pstr =
for s in conf.sections():
if s == default:
pstr += s +
elif s == :
pstr += s +
else:
pstr += s +
for k, v in conf[s].items():
if k != :
if v[:9] == :
v = pkgrs.resource_filename(, v)
pstr += + k + + v +
pstr +=
print(pstr)
return | Prints all currently defined configurations. |
376,587 | def get_pr(pr_num, config=None, repo=DEFAULT_REPO, raw=False):
response = requests.get(PR_ENDPOINT.format(repo, pr_num), auth=get_auth_info(config))
if raw:
return response
else:
response.raise_for_status()
return response.json() | Get the payload for the given PR number. Let exceptions bubble up. |
376,588 | def nu_max(self, *args):
return 3120.* (self.mass(*args) /
(self.radius(*args)**2 * np.sqrt(self.Teff(*args)/5777.))) | Returns asteroseismic nu_max in uHz
reference: https://arxiv.org/pdf/1312.3853v1.pdf, Eq (3) |
376,589 | def _download_astorb(
self):
self.log.info()
url = self.settings["astorb"]["url"]
print "Downloading orbital elements from " % locals()
response = urllib2.urlopen(url)
data = response.read()
astorbgz = "/tmp/astorb.dat.gz"
file_ = open(astorbgz, )
file_.write(data)
file_.close()
print "Finished downloading orbital elements" % locals()
self.log.info()
return astorbgz | *download the astorb database file*
**Key Arguments:**
- ``astorbgz`` -- path to the downloaded astorb database file |
376,590 | def strain(self, ifo, duration=32, sample_rate=4096):
from astropy.utils.data import download_file
from pycbc.frame import read_frame
length = "{}sec".format(duration)
if sample_rate == 4096:
sampling = "4KHz"
elif sample_rate == 16384:
sampling = "16KHz"
channel = "{}:GWOSC-{}_R1_STRAIN".format(ifo, sampling.upper())
url = self.data[][ifo][length][sampling][]
filename = download_file(url, cache=True)
return read_frame(str(filename), str(channel)) | Return strain around the event
Currently this will return the strain around the event in the smallest
format available. Selection of other data is not yet available.
Parameters
----------
ifo: str
The name of the observatory you want strain for. Ex. H1, L1, V1
Returns
-------
strain: pycbc.types.TimeSeries
Strain around the event. |
376,591 | def _parse_materials(header, views):
try:
import PIL.Image
except ImportError:
log.warning("unable to load textures without pillow!")
return None
images = None
if "images" in header:
images = [None] * len(header["images"])
for i, img in enumerate(header["images"]):
blob = views[img["bufferView"]]
try:
images[i] = PIL.Image.open(util.wrap_as_stream(blob))
except BaseException:
log.error("failed to load image!", exc_info=True)
materials = []
if "materials" in header:
for mat in header["materials"]:
loopable = mat.copy()
if "pbrMetallicRoughness" in loopable:
loopable.update(loopable.pop("pbrMetallicRoughness"))
pbr = {}
for k, v in loopable.items():
if not isinstance(v, dict):
pbr[k] = v
elif "index" in v:
idx = header["textures"][v["index"]]["source"]
pbr[k] = images[idx]
materials.append(visual.texture.PBRMaterial(**pbr))
return materials | Convert materials and images stored in a GLTF header
and buffer views to PBRMaterial objects.
Parameters
------------
header : dict
Contains layout of file
views : (n,) bytes
Raw data
Returns
------------
materials : list
List of trimesh.visual.texture.Material objects |
376,592 | def delete_all(self, filter, force=False, timeout=-1):
uri = "{}?filter={}&force={}".format(self._base_uri, quote(filter), force)
logger.debug("Delete all resources (uri = %s)" % uri)
return self.delete(uri) | Deletes all resources from the appliance that match the provided filter.
Args:
filter:
A general filter/query string to narrow the list of items deleted.
force:
If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
bool: Indicates if the resources were successfully deleted. |
376,593 | def basic_params1():
return hparam.HParams(
batch_size=4096,
batch_shuffle_size=512,
use_fixed_batch_size=False,
num_hidden_layers=4,
kernel_height=3,
kernel_width=1,
hidden_size=64,
compress_steps=0,
dropout=0.2,
clip_grad_norm=2.0,
grad_noise_scale=0.0,
summarize_grads=False,
mlperf_mode=False,
summarize_vars=False,
initializer="orthogonal",
initializer_gain=1.5,
label_smoothing=0.1,
optimizer="adam",
optimizer_adam_epsilon=1e-6,
optimizer_adam_beta1=0.85,
optimizer_adam_beta2=0.997,
optimizer_momentum_momentum=0.9,
optimizer_momentum_nesterov=False,
optimizer_adafactor_beta1=0.0,
optimizer_adafactor_beta2=0.999,
optimizer_adafactor_factored=True,
optimizer_adafactor_decay_type="pow",
optimizer_adafactor_memory_exponent=0.8,
optimizer_adafactor_clipping_threshold=1.0,
optimizer_adafactor_multiply_by_parameter_scale=True,
optimizer_multistep_accumulate_steps=0,
mixed_precision_optimizer_loss_scaler="exponential",
mixed_precision_optimizer_init_loss_scale=2**15,
optimizer_zero_grads=False,
weight_decay=1e-6,
weight_noise=0.0,
learning_rate_schedule="legacy",
learning_rate_constant=1.0,
learning_rate_decay_scheme="none",
learning_rate_decay_steps=5000,
learning_rate_decay_staircase=False,
learning_rate_minimum=None,
learning_rate_decay_rate=1.0,
learning_rate_warmup_steps=100,
learning_rate_cosine_cycle_steps=250000,
learning_rate=0.1,
sampling_method="argmax",
sampling_temp=1.0,
sampling_keep_top_k=-1,
factored_logits=False,
multiply_embedding_mode="sqrt_depth",
moe_hidden_sizes="2048",
moe_num_experts=64,
moe_k=2,
moe_loss_coef=1e-2,
layer_preprocess_sequence="none",
layer_postprocess_sequence="dan",
layer_prepostprocess_dropout=0.1,
layer_prepostprocess_dropout_broadcast_dims="",
symbol_dropout=0.0,
norm_type="layer",
norm_epsilon=1e-6,
vocab_divisor=1,
min_length=0,
max_length=0,
pack_dataset=False,
use_custom_ops=True,
split_targets_chunk_length=0,
split_targets_max_chunks=100,
split_targets_strided_training=False,
min_length_bucket=8,
length_bucket_step=1.1,
eval_drop_long_sequences=False,
eval_run_autoregressive=False,
shared_embedding_and_softmax_weights=False,
shared_embedding=False,
symbol_modality_num_shards=1,
scheduled_sampling_prob=0.0,
scheduled_sampling_warmup_steps=50000,
scheduled_sampling_gold_mixin_prob=0.5,
daisy_chain_variables=True,
force_full_predict=False,
no_data_parallelism=False,
activation_dtype="float32",
weight_dtype="float32",
pretrained_model_dir="",
multiproblem_schedule_threshold=0.5,
multiproblem_per_task_threshold="",
multiproblem_schedule_max_examples=1e7,
multiproblem_mixing_schedule="constant",
multiproblem_reweight_label_loss=False,
multiproblem_label_weight=0.5,
max_relative_position=0,
heads_share_relative_embedding=False,
add_relative_to_values=False,
tpu_enable_host_call=False,
pad_batch=False,
multiproblem_target_eval_only=False,
multiproblem_vocab_size=-1,
multiproblem_max_input_length=-1,
multiproblem_max_target_length=-1,
multiproblem_fixed_train_length=-1,
warm_start_from_second="",
area_value_mode="none",
area_key_mode="none",
num_area_layers=0,
max_area_width=1,
max_area_height=1,
memory_height=1
) | A set of basic hyperparameters. |
376,594 | def _connect(self):
if self.connected:
return
self._connect_broker()
stack = self._build_stack()
self._connect_stack(stack) | Establish a connection to the master process's UNIX listener socket,
constructing a mitogen.master.Router to communicate with the master,
and a mitogen.parent.Context to represent it.
Depending on the original transport we should emulate, trigger one of
the _connect_*() service calls defined above to cause the master
process to establish the real connection on our behalf, or return a
reference to the existing one. |
376,595 | def get_geocode(city, state, street_address="", zipcode=""):
try:
key = settings.GMAP_KEY
except AttributeError:
return "You need to put GMAP_KEY in settings"
location = ""
if street_address:
location += .format(street_address.replace(" ", "+"))
location += .format(city.replace(" ", "+"), state)
if zipcode:
location += "+{}".format(zipcode)
url = "http://maps.google.com/maps/geo?q={}&output=xml&key={}".format(location, key)
file = urllib.urlopen(url).read()
try:
xml = xmltramp.parse(file)
except Exception as error:
print("Failed to parse xml file {}: {}".format(file, error))
return None
status = str(xml.Response.Status.code)
if status == "200":
geocode = str(xml.Response.Placemark.Point.coordinates).split()
return (geocode[1], geocode[0])
else:
print(status)
return None | For given location or object, takes address data and returns
latitude and longitude coordinates from Google geocoding service
get_geocode(self, street_address="1709 Grand Ave.", state="MO", zip="64112")
Returns a tuple of (lat, long)
Most times you'll want to join the return. |
376,596 | def _file_where(user_id, api_path):
directory, name = split_api_filepath(api_path)
return and_(
files.c.name == name,
files.c.user_id == user_id,
files.c.parent_name == directory,
) | Return a WHERE clause matching the given API path and user_id. |
376,597 | def console_hline(
con: tcod.console.Console,
x: int,
y: int,
l: int,
flag: int = BKGND_DEFAULT,
) -> None:
lib.TCOD_console_hline(_console(con), x, y, l, flag) | Draw a horizontal line on the console.
This always uses the character 196, the horizontal line character.
.. deprecated:: 8.5
Use :any:`Console.hline` instead. |
376,598 | def random_forest_error(forest, X_train, X_test, inbag=None,
calibrate=True, memory_constrained=False,
memory_limit=None):
if inbag is None:
inbag = calc_inbag(X_train.shape[0], forest)
pred = np.array([tree.predict(X_test) for tree in forest]).T
pred_mean = np.mean(pred, 0)
pred_centered = pred - pred_mean
n_trees = forest.n_estimators
V_IJ = _core_computation(X_train, X_test, inbag, pred_centered, n_trees,
memory_constrained, memory_limit)
V_IJ_unbiased = _bias_correction(V_IJ, inbag, pred_centered, n_trees)
if np.max(inbag) == 1:
variance_inflation = 1 / (1 - np.mean(inbag)) ** 2
V_IJ_unbiased *= variance_inflation
if not calibrate:
return V_IJ_unbiased
if V_IJ_unbiased.shape[0] <= 20:
print("No calibration with n_samples <= 20")
return V_IJ_unbiased
if calibrate:
calibration_ratio = 2
n_sample = np.ceil(n_trees / calibration_ratio)
new_forest = copy.deepcopy(forest)
new_forest.estimators_ =\
np.random.permutation(new_forest.estimators_)[:int(n_sample)]
new_forest.n_estimators = int(n_sample)
results_ss = random_forest_error(new_forest, X_train, X_test,
calibrate=False,
memory_constrained=memory_constrained,
memory_limit=memory_limit)
sigma2_ss = np.mean((results_ss - V_IJ_unbiased)**2)
delta = n_sample / n_trees
sigma2 = (delta**2 + (1 - delta)**2) / (2 * (1 - delta)**2) * sigma2_ss
V_IJ_calibrated = calibrateEB(V_IJ_unbiased, sigma2)
return V_IJ_calibrated | Calculate error bars from scikit-learn RandomForest estimators.
RandomForest is a regressor or classifier object
this variance can be used to plot error bars for RandomForest objects
Parameters
----------
forest : RandomForest
Regressor or Classifier object.
X_train : ndarray
An array with shape (n_train_sample, n_features). The design matrix for
training data.
X_test : ndarray
An array with shape (n_test_sample, n_features). The design matrix
for testing data
inbag : ndarray, optional
The inbag matrix that fit the data. If set to `None` (default) it
will be inferred from the forest. However, this only works for trees
for which bootstrapping was set to `True`. That is, if sampling was
done with replacement. Otherwise, users need to provide their own
inbag matrix.
calibrate: boolean, optional
Whether to apply calibration to mitigate Monte Carlo noise.
Some variance estimates may be negative due to Monte Carlo effects if
the number of trees in the forest is too small. To use calibration,
Default: True
memory_constrained: boolean, optional
Whether or not there is a restriction on memory. If False, it is
assumed that a ndarry of shape (n_train_sample,n_test_sample) fits
in main memory. Setting to True can actually provide a speed up if
memory_limit is tuned to the optimal range.
memory_limit: int, optional.
An upper bound for how much memory the itermediate matrices will take
up in Megabytes. This must be provided if memory_constrained=True.
Returns
-------
An array with the unbiased sampling variance (V_IJ_unbiased)
for a RandomForest object.
See Also
----------
:func:`calc_inbag`
Notes
-----
The calculation of error is based on the infinitesimal jackknife variance,
as described in [Wager2014]_ and is a Python implementation of the R code
provided at: https://github.com/swager/randomForestCI
.. [Wager2014] S. Wager, T. Hastie, B. Efron. "Confidence Intervals for
Random Forests: The Jackknife and the Infinitesimal Jackknife", Journal
of Machine Learning Research vol. 15, pp. 1625-1651, 2014. |
376,599 | def _idx_table_by_num(tables):
logger_jsons.info("enter idx_table_by_num")
_tables = []
for name, table in tables.items():
try:
tmp = _idx_col_by_num(table)
_tables.append(tmp)
except Exception as e:
logger_jsons.error("idx_table_by_num: {}".format(e))
logger_jsons.info("exit idx_table_by_num")
return _tables | Switch tables to index-by-number
:param dict tables: Metadata
:return list _tables: Metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.