Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
14,600 | def get(self, request, enterprise_uuid, course_id):
enrollment_course_mode = request.GET.get()
enterprise_catalog_uuid = request.GET.get()
if not enrollment_course_mode:
return redirect(LMS_DASHBOARD_URL)
enrollment_api_client = EnrollmentApiClient()
course_modes = enrollment_api_client.get_course_modes(course_id)
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
enterprise_customer_user = get_enterprise_customer_user(request.user.id, enterprise_customer.uuid)
if not course_modes:
context_data = get_global_context(request, enterprise_customer)
error_code =
log_message = (
.format(
userid=request.user.id,
enterprise_catalog_uuid=enterprise_catalog_uuid,
course_id=course_id,
error_code=error_code
)
)
return render_page_with_error_code_message(request, context_data, error_code, log_message)
selected_course_mode = None
for course_mode in course_modes:
if course_mode[] == enrollment_course_mode:
selected_course_mode = course_mode
break
if not selected_course_mode:
return redirect(LMS_DASHBOARD_URL)
__, created = EnterpriseCourseEnrollment.objects.get_or_create(
enterprise_customer_user=enterprise_customer_user,
course_id=course_id,
)
if created:
track_enrollment(, request.user.id, course_id, request.get_full_path())
DataSharingConsent.objects.update_or_create(
username=enterprise_customer_user.username,
course_id=course_id,
enterprise_customer=enterprise_customer_user.enterprise_customer,
defaults={
: True
},
)
audit_modes = getattr(settings, , [, ])
if selected_course_mode[] in audit_modes:
enrollment_api_client.enroll_user_in_course(
request.user.username, course_id, selected_course_mode[]
)
return redirect(LMS_COURSEWARE_URL.format(course_id=course_id))
premium_flow = LMS_START_PREMIUM_COURSE_FLOW_URL.format(course_id=course_id)
if enterprise_catalog_uuid:
premium_flow += .format(
catalog_uuid=enterprise_catalog_uuid
)
return redirect(premium_flow) | Handle the enrollment of enterprise learner in the provided course.
Based on `enterprise_uuid` in URL, the view will decide which
enterprise customer's course enrollment record should be created.
Depending on the value of query parameter `course_mode` then learner
will be either redirected to LMS dashboard for audit modes or
redirected to ecommerce basket flow for payment of premium modes. |
14,601 | def get_file_sha1(filename_or_io):
file_data = get_file_io(filename_or_io)
cache_key = file_data.cache_key
if cache_key and cache_key in FILE_SHAS:
return FILE_SHAS[cache_key]
with file_data as file_io:
hasher = sha1()
buff = file_io.read(BLOCKSIZE)
while len(buff) > 0:
if isinstance(buff, six.text_type):
buff = buff.encode()
hasher.update(buff)
buff = file_io.read(BLOCKSIZE)
digest = hasher.hexdigest()
if cache_key:
FILE_SHAS[cache_key] = digest
return digest | Calculates the SHA1 of a file or file object using a buffer to handle larger files. |
14,602 | def author(self, value):
if value is not None:
assert type(value) is unicode, " attribute: type is not !".format(
"author", value)
self.__author = value | Setter for **self.__author** attribute.
:param value: Attribute value.
:type value: unicode |
14,603 | def get_as_nullable_integer(self, key):
value = self.get(key)
return IntegerConverter.to_nullable_integer(value) | Converts map element into an integer or returns None if conversion is not possible.
:param key: an index of element to get.
:return: integer value of the element or None if conversion is not supported. |
14,604 | def from_hoy(cls, hoy, leap_year=False):
return cls.from_moy(round(hoy * 60), leap_year) | Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760 |
14,605 | async def kick_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None) -> base.Boolean:
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.KICK_CHAT_MEMBER, payload)
return result | Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean` |
14,606 | def requirements(work_dir, hive_root, with_requirements,
with_dockerfile, active_module, active_module_file):
import sys
sys.path.insert(0, hive_root)
hive_root = os.path.abspath(os.path.expanduser(hive_root))
work_dir = work_dir or os.path.join(
os.environ.get(,
os.getcwd()))
work_dir = os.path.expanduser(work_dir)
requirements_root = os.path.join(work_dir, )
migrate_root = os.path.join(work_dir, )
active_module_paths = []
active_module_list = []
if active_module_file:
with open(active_module_file, ) as fp:
for l in fp:
pkg = l.split()[0].strip()
if pkg:
active_module_list.append(l.strip("\n"))
pass
active_module_list += active_module
for m in active_module_list:
try:
mod = importlib.import_module(m)
active_module_paths.append(os.path.dirname(mod.__file__))
except ImportError:
click.echo( % m, color="yellow")
pass
pass
def build_requirements():
if not os.path.exists(requirements_root):
os.makedirs(requirements_root)
pass
click.echo(click.style("Generate hive requirements...", fg="yellow"))
shutil.copy(
os.path.join(hive_root, ),
os.path.join(requirements_root, )
)
click.echo(click.style("Generate hive-module requirements...",
fg="yellow"))
requirements_files = []
for m in active_module_paths:
t = os.path.join(m, )
if os.path.exists(t):
requirements_files.append(t)
pass
module_packages = set()
with fileinput.input(requirements_files) as fp:
for line in fp:
pkg = line.split()[0].strip()
if pkg:
module_packages.add(pkg)
pass
with click.open_file(
os.path.join(requirements_root, ),
) as fp:
for p in module_packages:
fp.write("%s\n" % p)
pass
pass
def build_dockerfile():
modules_in_hive = map(
lambda x: x.replace(hive_root, ).lstrip(),
filter(lambda x: x.startswith(hive_root),
active_module_paths))
modules_path = .join(modules_in_hive)
docker_file = os.path.join(
os.path.dirname(requirements_root),
)
if os.path.exists(docker_file):
click.echo(click.style("Found Dockerfile,try update...",
fg="yellow"))
with open(docker_file, ) as fp:
buffer = fp.read()
pass
import re
replaced = re.sub(,
% modules_path,
buffer)
with open(docker_file, ) as fp:
fp.write(replaced)
pass
pass
pass
def build_migrations():
models_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], ), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, models = zip(*models_pairs)
except ValueError:
click.echo(click.style("No models found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found models.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, ), ) as fp:
for p in models:
fp.write("%s\n" % p)
pass
pass
def build_tasks():
tasks_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], ), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, tasks = zip(*tasks_pairs)
except ValueError:
click.echo(click.style("No tasks found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found tasks.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, ), ) as fp:
for p in tasks:
fp.write("%s\n" % p)
pass
if with_requirements:
build_requirements()
if with_dockerfile:
build_dockerfile()
if os.path.exists(migrate_root):
build_migrations()
if os.path.exists(migrate_root):
build_tasks()
click.echo(click.style("Generate done...", fg="yellow"))
pass | 编译全新依赖文件 |
14,607 | def delete_edges(self, edges: Iterable[Tuple[str, str]]):
for edge in edges:
if self.has_edge(*edge):
self.remove_edge(*edge) | Iterate over a set of edges and remove the ones that are present in
the graph. |
14,608 | def lock(self, key, lease_time=-1):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_lock_codec, key_data, key=key_data,
thread_id=thread_id(), ttl=to_millis(lease_time),
reference_id=self.reference_id_generator.get_and_increment()) | Acquires the lock for the specified key infinitely or for the specified lease time if provided.
If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies
dormant until the lock has been acquired.
Scope of the lock is this map only. Acquired lock is only for the key in this map.
Locks are re-entrant; so, if the key is locked N times, it should be unlocked N times before another thread can
acquire it.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the key to lock.
:param lease_time: (int), time in seconds to wait before releasing the lock (optional). |
14,609 | async def _send_to_messenger_profile(self, page, content):
log_name = .join(repr(x) for x in content.keys())
page_id = page[]
current = await self._get_messenger_profile(page, content.keys())
if dict_is_subset(content, current):
logger.info(, page_id, log_name)
return
params = {
: page[],
}
headers = {
: ,
}
post = self.session.post(
PROFILE_ENDPOINT,
params=params,
headers=headers,
data=ujson.dumps(content)
)
try:
async with post as r:
await self._handle_fb_response(r)
except Exception:
logger.exception(, page_id, log_name)
reporter.report()
else:
logger.info(, page_id, log_name) | The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict) |
14,610 | def send_trending_data(events):
bodies = {}
top_hits = sorted(
[(key, count) for key, count in events.items()],
key=lambda x: x[1],
reverse=True
)[:100]
for (site, content_id), count in top_hits:
if not len(site) or not re.match(CONTENT_ID_REGEX, content_id):
continue
bodies.setdefault(site, [])
bodies[site].append([content_id, count])
for site, points in bodies.items():
name = "{}_trending".format(site)
try:
data = [{
"name": name,
"columns": ["content_id", "value"],
"points": points,
}]
INFLUXDB_CLIENT.write_points(data)
except Exception as e:
LOGGER.exception(e) | creates data point payloads for trending data to influxdb |
14,611 | def lcp(s1, s2):
abcdxabcdyaxyz
i = 0
for i, (c1, c2) in enumerate(zip(s1, s2)):
if c1 != c2:
return i
return min(len(s1), len(s2)) | longest common prefix
>>> lcp('abcdx', 'abcdy'), lcp('', 'a'), lcp('x', 'yz')
(4, 0, 0) |
14,612 | def log(msg, delay=0.5, chevrons=True, verbose=True):
if verbose:
if chevrons:
click.echo("\n❯❯ " + msg)
else:
click.echo(msg)
time.sleep(delay) | Log a message to stdout. |
14,613 | def channels_remove_moderator(self, room_id, user_id, **kwargs):
return self.__call_api_post(, roomId=room_id, userId=user_id, kwargs=kwargs) | Removes the role of moderator from a user in the current channel. |
14,614 | def get_queue_bindings(self, vhost, qname):
vhost = quote(vhost, )
qname = quote(qname, )
path = Client.urls[] % (vhost, qname)
bindings = self._call(path, )
return bindings | Return a list of dicts, one dict per binding. The dict format coming
from RabbitMQ for queue named 'testq' is:
{"source":"sourceExch","vhost":"/","destination":"testq",
"destination_type":"queue","routing_key":"*.*","arguments":{},
"properties_key":"%2A.%2A"} |
14,615 | def get(self, sid):
return MemberContext(
self._version,
service_sid=self._solution[],
channel_sid=self._solution[],
sid=sid,
) | Constructs a MemberContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.chat.v2.service.channel.member.MemberContext
:rtype: twilio.rest.chat.v2.service.channel.member.MemberContext |
14,616 | def bear_push(title, content, send_key=None):
if not send_key:
raise ValueError("请配置通道send_key,如果还没有,"
"可以到这里创建通道获取:https://pushbear.ftqq.com/admin/
api = "https://pushbear.ftqq.com/sub"
requests.post(api, data={: title, : content, "sendkey": send_key}) | 使用PushBear推送消息给所有订阅者微信,关于PushBear,
请参考:https://pushbear.ftqq.com/admin/#/
:param title: str
消息标题
:param content: str
消息内容,最长64Kb,可空,支持MarkDown
:param send_key: str
从[PushBear](https://pushbear.ftqq.com/admin/#/)获取的通道send_key
:return: None |
14,617 | async def await_event(self, event=None, timeout=30):
return await self._protocol.await_event(event, timeout=timeout) | Wait for an event from QTM.
:param event: A :class:`qtm.QRTEvent`
to wait for a specific event. Otherwise wait for any event.
:param timeout: Max time to wait for event.
:rtype: A :class:`qtm.QRTEvent` |
14,618 | def generate_moffat_profile(seeing_fwhm, alpha):
scale = 2 * math.sqrt(2**(1.0 / alpha) - 1)
gamma = seeing_fwhm / scale
amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2
seeing_model = Moffat2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
gamma=gamma,
alpha=alpha)
return seeing_model | Generate a normalized Moffat profile from its FWHM and alpha |
14,619 | def add(self, origin, rel, target, attrs=None):
if not origin:
raise ValueError()
if not rel:
raise ValueError()
attrs = attrs or {}
origin_item = self._db_coll.find_one({: origin})
rel = self._abbreviate(rel)
target = self._abbreviate(target)
rel_info = {: rel, : [[target, attrs]]}
if origin_item is None:
self._db_coll.insert_one(
{
: origin,
: [rel_info],
}
)
else:
origin_item[].append(rel_info)
self._db_coll.replace_one(
{: origin}, origin_item
)
return | Add one relationship to the model
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} |
14,620 | def _minimally_quoted_parameter_value(value):
if re.match("^[{charset}]*$".format(charset=MediaType.RFC7320_TOKEN_CHARSET), value):
return value
else:
return MediaType._quote(value) | Per RFC 7321 (https://tools.ietf.org/html/rfc7231#section-3.1.1.1):
Parameters values don't need to be quoted if they are a "token".
Token characters are defined by RFC 7320 (https://tools.ietf.org/html/rfc7230#section-3.2.6).
Otherwise, parameters values can be a "quoted-string".
So we will quote values that contain characters other than the standard token characters. |
14,621 | def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Alexa Skills Kit sample. " \
"Please tell me your favorite color by saying, " \
"my favorite color is red"
reprompt_text = "Please tell me your favorite color by saying, " \
"my favorite color is red."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session)) | If we wanted to initialize the session to have some attributes we could
add those here |
14,622 | def get_resource_subscription(self, device_id, resource_path, fix_path=True):
fixed_path = resource_path
if fix_path and resource_path.startswith("/"):
fixed_path = resource_path[1:]
api = self._get_api(mds.SubscriptionsApi)
try:
api.check_resource_subscription(device_id, fixed_path)
except Exception as e:
if e.status == 404:
return False
raise
return True | Read subscription status.
:param device_id: Name of device to set the subscription on (Required)
:param resource_path: The resource path on device to observe (Required)
:param fix_path: Removes leading / on resource_path if found
:returns: status of subscription |
14,623 | def get_update_service(self):
update_service_url = utils.get_subresource_path_by(self,
)
return (update_service.
HPEUpdateService(self._conn, update_service_url,
redfish_version=self.redfish_version)) | Return a HPEUpdateService object
:returns: The UpdateService object |
14,624 | def title(self, value=None):
if not (value is None):
if (self.metadatatype == "native"):
self.metadata[] = value
else:
self._title = value
if (self.metadatatype == "native"):
if in self.metadata:
return self.metadata[]
else:
return None
else:
return self._title | Get or set the document's title from/in the metadata
No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata |
14,625 | def set(self, section, option, value=None):
if isinstance(section, bytes):
section = section.decode()
if isinstance(option, bytes):
option = option.decode()
if isinstance(value, bytes):
value = value.decode()
return super(VSGConfigParser, self).set(section, option, value) | Extends :meth:`~configparser.ConfigParser.set` by auto formatting byte strings into unicode strings. |
14,626 | def down_by_name(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
files = command.list_files(*filters, remote_dir=remote_dir)
greatest = sorted(files, key=lambda f: f.filename)
to_sync = greatest[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir) | Sync files whose filename attribute is highest in alphanumeric order |
14,627 | def iplot(self, places=-1, c_poly=, c_holes=,
c_sop=, s_sop=25, extra_height=0, ret=False, ax=None):
if places == -1:
places = range(len(self.places))
elif type(places) == int:
places = [places]
places = np.array(places)
places[places<0] = len(self.places) + places[places<0]
places = np.unique(places)
aux_space = Space([self[i] for i in places])
for place in aux_space:
ax = place.iplot(c_poly, c_holes, c_sop, s_sop, extra_height,
ret=True, ax=ax)
aux_space.center_plot(ax)
if ret: return ax | Improved plot that allows to visualize the Places in the Space
selectively. It also allows to plot polygons and holes in
different colors and to change the size and the color of the
set of points.
The points can be plotted accordingly to a ndarray colormap.
:param places: Indexes of the Places to visualize.
:type places: int, list or ndarray
:param c_poly: Polygons color.
:type c_poly: matplotlib color, 'default' or 't' (transparent)
:param c_holes: Holes color.
:type c_holes: matplotlib color, 'default' or 't' (transparent)
:param c_sop: Set of points color.
:type c_sop: matplotlib color or colormap
:param s_sop: Set of points size.
:type s_sop: float or ndarray
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: None, mplot3d.Axes3D |
14,628 | def binomial(n):
if n == 1:
return [1, 1]
elif n == 2:
return [1, 2, 1]
elif n == 3:
return [1, 3, 3, 1]
elif n == 4:
return [1, 4, 6, 4, 1]
elif n == 5:
return [1, 5, 10, 10, 5, 1]
else:
from scipy.special import binom
return binom(n, np.arange(n + 1)) | Return all binomial coefficients for a given order.
For n > 5, scipy.special.binom is used, below we hardcode
to avoid the scipy.special dependency.
Parameters
--------------
n : int
Order
Returns
---------------
binom : (n + 1,) int
Binomial coefficients of a given order |
14,629 | def ascii(graph):
from .._ascii import DAG
from .._echo import echo_via_pager
echo_via_pager(str(DAG(graph))) | Format graph as an ASCII art. |
14,630 | def _replication_request(command, host=None, core_name=None, params=None):
hostname=valuesuccessdataerrorswarnings
params = [] if params is None else params
extra = ["command={0}".format(command)] + params
url = _format_url(, host=host, core_name=core_name,
extra=extra)
return _http_request(url) | PRIVATE METHOD
Performs the requested replication command and returns a dictionary with
success, errors and data as keys. The data object will contain the JSON
response.
command : str
The replication command to execute.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name: str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
params : list<str> ([])
Any additional parameters you want to send. Should be a lsit of
strings in name=value format. e.g. ['name=value']
Return: dict<str, obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list} |
14,631 | def get_issuer_keys(self, issuer):
res = []
for kbl in self.issuer_keys[issuer]:
res.extend(kbl.keys())
return res | Get all the keys that belong to an entity.
:param issuer: The entity ID
:return: A possibly empty list of keys |
14,632 | def sum_tbl(tbl, kfield, vfields):
pairs = [(n, tbl.dtype[n]) for n in [kfield] + vfields]
dt = numpy.dtype(pairs + [(, int)])
def sum_all(group):
vals = numpy.zeros(1, dt)[0]
for rec in group:
for vfield in vfields:
vals[vfield] += rec[vfield]
vals[] += 1
vals[kfield] = rec[kfield]
return vals
rows = groupby(tbl, operator.itemgetter(kfield), sum_all).values()
array = numpy.zeros(len(rows), dt)
for i, row in enumerate(rows):
for j, name in enumerate(dt.names):
array[i][name] = row[j]
return array | Aggregate a composite array and compute the totals on a given key.
>>> dt = numpy.dtype([('name', (bytes, 10)), ('value', int)])
>>> tbl = numpy.array([('a', 1), ('a', 2), ('b', 3)], dt)
>>> sum_tbl(tbl, 'name', ['value'])['value']
array([3, 3]) |
14,633 | def cohort_queryplan(plan):
cohort = plan[]
action = plan[]
source = plan[]
cohort_start = datetime_to_kronos_time(_date_to_datetime(cohort[]))
cohort_span = timedelta(**{cohort[]: cohort[]})
cohort_end = cohort[] + cohort_span
action_span = timedelta(**{action[]: action[]})
action_end = cohort_end + action_span
cohort_end = datetime_to_kronos_time(_date_to_datetime(cohort_end)) + 1
action_end = datetime_to_kronos_time(_date_to_datetime(action_end)) + 1
left = _cohort_stream_transform(source,
cohort[], cohort_start, cohort_end,
cohort.get(),
cohort[], cohort[])
right = _cohort_stream_transform(source,
action[], cohort_start, action_end,
action.get(),
action[], action[])
additional_action_time = (DateUnit.unit_to_kronos_time(action[]) *
action[])
left.alias =
right.alias =
joined = Join(left,
right,
(Condition(Condition.Op.EQ,
Property( % cohort[]),
Property( % action[])) &
Condition(Condition.Op.GTE,
Property( % TIMESTAMP_FIELD),
Property( % TIMESTAMP_FIELD)) &
Condition(Condition.Op.LT,
Property( % TIMESTAMP_FIELD),
Add([Property( % TIMESTAMP_FIELD),
Constant(additional_action_time)]))))
user_aggregated = Aggregate(
joined,
GroupBy([Property(, alias=TIMESTAMP_FIELD),
Property( % cohort[], alias=),
Floor([Subtract([Property( % TIMESTAMP_FIELD),
Property( % TIMESTAMP_FIELD)]),
Constant(DateUnit.unit_to_kronos_time(action[]))],
alias=)]),
[Count([], alias=)]
)
aggregated = Aggregate(
user_aggregated,
GroupBy([Property(TIMESTAMP_FIELD, alias=TIMESTAMP_FIELD),
Property(, alias=)]),
[Count([], alias=)])
return aggregated.to_dict() | Input:
{
'source': 'kronos', # Name of data source from settings
'cohort':
{'stream': CohortTest.EMAIL_STREAM, # Kronos stream to define cohort from.
'transform': lambda x: x, # Transformations on the kstream.
'start': date.now(), # The day of the first cohort.
'unit': DateUnit.XX, # Users are in the same cohort
# if they are in the same day/week.
'cohorts': 5 # How many cohorts (days/weeks/months)
# to track.
'grouping_key': 'user'}, # What key in an event should we tie
# to a key in the action stream?
'action':
{'stream': CohortTest.FRONTPAGE_STREAM, # Stream users take actions on.
'transform': lambda x: x # Transformations on the stream.
'unit': DateUnit.XX, # Track events in day/week/months.
'repetitions': 14 # How many days/weeks/months to track.
'grouping_key': 'user_id'} # What key in an event should we tie
# to a key in the action stream?
}
Output:
A metis-compatible query plan to return a cohort analysis. |
14,634 | def from_string(cls, s):
for num, text in cls._STATUS_TABLE.items():
if text == s: return cls(num)
else:
logger.warning("Got unknown status: %s" % s)
return cls.from_string("UNKNOWN") | Return a :class:`JobStatus` instance from its string representation. |
14,635 | def choose_key(gpg_private_keys):
uid_strings_fp = []
uid_string_fp2key = {}
current_key_index = None
for i, key in enumerate(gpg_private_keys):
fingerprint = key[]
if fingerprint == config["gpg_key_fingerprint"]:
current_key_index = i
for uid_string in key[]:
uid_string_fp = + uid_string + + fingerprint +
uid_strings_fp.append(uid_string_fp)
uid_string_fp2key[uid_string_fp] = key
msg = _(
)
dlg = wx.SingleChoiceDialog(None, msg, _(), uid_strings_fp,
wx.CHOICEDLG_STYLE)
childlist = list(dlg.GetChildren())
childlist[-3].SetLabel(_("Use chosen key"))
childlist[-2].SetLabel(_("Create new key"))
if current_key_index is not None:
dlg.SetSelection(current_key_index)
if dlg.ShowModal() == wx.ID_OK:
uid_string_fp = dlg.GetStringSelection()
key = uid_string_fp2key[uid_string_fp]
else:
key = None
dlg.Destroy()
return key | Displays gpg key choice and returns key |
14,636 | def hidden_item_tags(self):
hidden_item_tags = self.cp.get(, )
return [] if hidden_item_tags == else [tag.strip() for tag in
hidden_item_tags.split()] | Returns a list of tags which hide an item from the 'ls' output. |
14,637 | def numpyStr(array, format=, includeIndices=False, includeZeros=True):
shape = array.shape
assert (len(shape) <= 2)
items = []
if len(shape) == 1:
if includeIndices:
format = + format
if includeZeros:
rowItems = [format % (c,x) for (c,x) in enumerate(array)]
else:
rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0]
else:
rowItems = [format % (x) for x in array]
items.extend(rowItems)
else:
(rows, cols) = shape
if includeIndices:
format = + format
for r in xrange(rows):
if includeIndices:
rowItems = [format % (r,c,x) for c,x in enumerate(array[r])]
else:
rowItems = [format % (x) for x in array[r]]
if r > 0:
items.append()
items.append()
items.extend(rowItems)
if r < rows-1:
items.append()
else:
items.append()
items.append()
return .join(items) | Pretty print a numpy matrix using the given format string for each
value. Return the string representation
Parameters:
------------------------------------------------------------
array: The numpy array to print. This can be either a 1D vector or 2D matrix
format: The format string to use for each value
includeIndices: If true, include [row,col] label for each value
includeZeros: Can only be set to False if includeIndices is on.
If True, include 0 values in the print-out
If False, exclude 0 values from the print-out. |
14,638 | def download_align(from_idx, to_idx, _params):
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = + str(idx)
if idx == 0:
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/align/{nm}.tar".format(nm=name)
down_sc = .format(script=script,
nm=name,
align_path=_params[])
try:
print(down_sc)
os.system(down_sc)
succ.add(idx)
except OSError as error:
print(error)
fail.add(idx)
return (succ, fail) | download aligns |
14,639 | def predict(self, X):
check_is_fitted(self, [])
X = check_array(X)
return self.__find_leverages(X, self.inverse_influence_matrix) <= self.threshold_value | Predict inside or outside AD for X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
ad : array of shape = [n_samples]
Array contains True (reaction in AD) and False (reaction residing outside AD). |
14,640 | def geopotential_to_height(geopot):
r
height = (((1 / mpconsts.Re) - (geopot / (mpconsts.G * mpconsts.me))) ** -1) - mpconsts.Re
return height | r"""Compute height from a given geopotential.
Parameters
----------
geopotential : `pint.Quantity`
Geopotential (array_like)
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8. |
14,641 | def get_focus(self, filt=False, samples=None, subset=None, nominal=False):
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
focus = {: []}
focus.update({a: [] for a in self.analytes})
for sa in samples:
s = self.data[sa]
focus[].append(s.uTime)
ind = s.filt.grab_filt(filt)
for a in self.analytes:
tmp = s.focus[a].copy()
tmp[~ind] = np.nan
focus[a].append(tmp)
if nominal:
self.focus.update({k: nominal_values(np.concatenate(v)) for k, v, in focus.items()})
else:
self.focus.update({k: np.concatenate(v) for k, v, in focus.items()})
return | Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None |
14,642 | def reboot(self, target_mode=None, timeout_ms=None):
return self._simple_command(, arg=target_mode,
timeout_ms=timeout_ms) | Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode. |
14,643 | def make_argument_subquery(arg):
return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg | Decide when a Join argument needs to be wrapped in a subquery |
14,644 | def setAccessRules(self, pid, public=False):
url = "{url_base}/resource/accessRules/{pid}/".format(url_base=self.url_base,
pid=pid)
params = {: public}
r = self._request(, url, data=params)
if r.status_code != 200:
if r.status_code == 403:
raise HydroShareNotAuthorized((, url))
elif r.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, , r.status_code, params))
resource = r.json()
assert(resource[] == pid)
return resource[] | Set access rules for a resource. Current only allows for setting the public or private setting.
:param pid: The HydroShare ID of the resource
:param public: True if the resource should be made public. |
14,645 | def img_search_bing(album):
setup()
album = album + " Album Art"
api_key = "Key"
endpoint = "https://api.cognitive.microsoft.com/bing/v5.0/images/search"
links_dict = {}
headers = {: str(BING_KEY)}
param = {: album, : }
response = requests.get(endpoint, headers=headers, params=param)
response = response.json()
key = 0
try:
for i in response[]:
links_dict[str(key)] = str((i[]))
key = key + 1
return links_dict["0"]
except KeyError:
return None | Bing image search |
14,646 | def current_values(self):
current_dict = {
: self.current_session_date,
: self.current_sleep_score,
: self.current_sleep_stage,
: self.current_sleep_breakdown,
: self.current_tnt,
: self.current_bed_temp,
: self.current_room_temp,
: self.current_resp_rate,
: self.current_heart_rate,
: self.current_session_processing,
}
return current_dict | Return a dict of all the 'current' parameters. |
14,647 | def _move_cursor_to_column(self, column):
last_col = len(self._cursor.block().text())
self._cursor.movePosition(self._cursor.EndOfBlock)
to_insert =
for i in range(column - last_col):
to_insert +=
if to_insert:
self._cursor.insertText(to_insert)
self._cursor.movePosition(self._cursor.StartOfBlock)
self._cursor.movePosition(self._cursor.Right, self._cursor.MoveAnchor, column)
self._last_cursor_pos = self._cursor.position() | Moves the cursor to the specified column, if possible. |
14,648 | def _discover_mac(self):
mac = None
mac_reversed = None
cmd = MAGIC + DISCOVERY
resp = self._udp_transact(cmd, self._discovery_resp,
broadcast=True,
timeout=DISCOVERY_TIMEOUT)
if resp:
(mac, mac_reversed) = resp
if mac is None:
raise S20Exception("Couldn't discover {}".format(self.host))
return (mac, mac_reversed) | Discovers MAC address of device.
Discovery is done by sending a UDP broadcast.
All configured devices reply. The response contains
the MAC address in both needed formats.
Discovery of multiple switches must be done synchronously.
:returns: Tuple of MAC address and reversed MAC address. |
14,649 | def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
X_train, X_test = to_array(X_train, X_test)
assert X_train.shape[1] == X_test.shape[1]
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test) | The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1]. |
14,650 | def _get_ignore_from_manifest(filename):
class MyTextFile(TextFile):
def error(self, msg, line=None):
raise Failure(self.gen_error(msg, line))
def warn(self, msg, line=None):
warning(self.gen_error(msg, line))
template = MyTextFile(filename,
strip_comments=True,
skip_blanks=True,
join_lines=True,
lstrip_ws=True,
rstrip_ws=True,
collapse_join=True)
try:
lines = template.readlines()
finally:
template.close()
return _get_ignore_from_manifest_lines(lines) | Gather the various ignore patterns from a MANIFEST.in.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore. |
14,651 | def get_public_rooms(self, **kwargs):
return GetPublicRooms(settings=self.settings, **kwargs).call(**kwargs) | Get a listing of all public rooms with their names and IDs |
14,652 | def selected(script, face=True, vert=True):
if face and vert:
filter_xml =
elif face and not vert:
filter_xml =
elif not face and vert:
filter_xml =
util.write_filter(script, filter_xml)
return None | Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA |
14,653 | async def on_reaction_add(reaction, user):
server = reaction.message.server
emoji = reaction.emoji
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
if user != reaction.message.channel.server.me:
if server.id not in _data.cache or _data.cache[server.id].state == :
return
try:
valid_reaction = reaction.message.id == _data.cache[server.id].embed.sent_embed.id
except AttributeError:
pass
else:
if valid_reaction:
try:
await client.remove_reaction(reaction.message, emoji, user)
except discord.errors.NotFound:
pass
except discord.errors.Forbidden:
pass
if emoji == "⏯":
await _data.cache[server.id].toggle()
if emoji == "⏹":
await _data.cache[server.id].stop()
if emoji == "⏭":
await _data.cache[server.id].skip("1")
if emoji == "⏮":
await _data.cache[server.id].rewind("1")
if emoji == "🔀":
await _data.cache[server.id].shuffle()
if emoji == "🔉":
await _data.cache[server.id].setvolume()
if emoji == "🔊":
await _data.cache[server.id].setvolume() | The on_message event handler for this module
Args:
reaction (discord.Reaction): Input reaction
user (discord.User): The user that added the reaction |
14,654 | def require(self, fieldname, allow_blank=False):
if self.request.form and fieldname not in self.request.form.keys():
raise Exception("Required field not found in request: %s" % fieldname)
if self.request.form and (not self.request.form[fieldname] or allow_blank):
raise Exception("Required field %s may not have blank value") | fieldname is required |
14,655 | def by_date(self, chamber, date):
"Return votes cast in a chamber on a single day"
date = parse_date(date)
return self.by_range(chamber, date, date) | Return votes cast in a chamber on a single day |
14,656 | def plugins(self):
from fluent_contents import extensions
if self._plugins is None:
return extensions.plugin_pool.get_plugins()
else:
try:
return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
except extensions.PluginNotFound as e:
raise extensions.PluginNotFound(str(e) + " Update the plugin list of field or FLUENT_CONTENTS_PLACEHOLDER_CONFIG[] setting.".format(self.model._meta.object_name, self.name, self.slot)) | Get the set of plugins that this field may display. |
14,657 | def uniform(self, a: float, b: float, precision: int = 15) -> float:
return round(a + (b - a) * self.random(), precision) | Get a random number in the range [a, b) or [a, b] depending on rounding.
:param a: Minimum value.
:param b: Maximum value.
:param precision: Round a number to a given
precision in decimal digits, default is 15. |
14,658 | def refresh(self) -> None:
if not self:
self.values[:] = 0.
elif len(self) == 1:
values = list(self._toy2values.values())[0]
self.values[:] = self.apply_timefactor(values)
else:
for idx, date in enumerate(
timetools.TOY.centred_timegrid(self.simulationstep)):
values = self.interp(date)
self.values[idx] = self.apply_timefactor(values) | Update the actual simulation values based on the toy-value pairs.
Usually, one does not need to call refresh explicitly. The
"magic" methods __call__, __setattr__, and __delattr__ invoke
it automatically, when required.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
When a |SeasonalParameter| object does not contain any toy-value
pairs yet, the method |SeasonalParameter.refresh| sets all actual
simulation values to zero:
>>> par.values = 1.
>>> par.refresh()
>>> par.values[0]
0.0
When there is only one toy-value pair, its values are relevant
for all actual simulation values:
>>> par.toy_1 = 2. # calls refresh automatically
>>> par.values[0]
2.0
Method |SeasonalParameter.refresh| performs a linear interpolation
for the central time points of each simulation time step. Hence,
in the following example, the original values of the toy-value
pairs do not show up:
>>> par.toy_12_31 = 4.
>>> from hydpy import round_
>>> round_(par.values[0])
2.00274
>>> round_(par.values[-2])
3.99726
>>> par.values[-1]
3.0
If one wants to preserve the original values in this example, one
would have to set the corresponding toy instances in the middle of
some simulation step intervals:
>>> del par.toy_1
>>> del par.toy_12_31
>>> par.toy_1_1_12 = 2
>>> par.toy_12_31_12 = 4.
>>> par.values[0]
2.0
>>> round_(par.values[1])
2.005479
>>> round_(par.values[-2])
3.994521
>>> par.values[-1]
4.0 |
14,659 | def _extract_dot15d4address(pkt, source=True):
underlayer = pkt.underlayer
while underlayer is not None and not isinstance(underlayer, Dot15d4Data):
underlayer = underlayer.underlayer
if type(underlayer) == Dot15d4Data:
addr = underlayer.src_addr if source else underlayer.dest_addr
if underlayer.underlayer.fcf_destaddrmode == 3:
tmp_ip = LINK_LOCAL_PREFIX[0:8] + struct.pack(">Q", addr)
tmp_ip = tmp_ip[0:8] + struct.pack("B", (orb(tmp_ip[8]) ^ 0x2)) + tmp_ip[9:16]
elif underlayer.underlayer.fcf_destaddrmode == 2:
tmp_ip = LINK_LOCAL_PREFIX[0:8] + \
b"\x00\x00\x00\xff\xfe\x00" + \
struct.pack(">Q", addr)[6:]
return tmp_ip
else:
| This function extracts the source/destination address of a 6LoWPAN
from its upper Dot15d4Data (802.15.4 data) layer.
params:
- source: if True, the address is the source one. Otherwise, it is the
destination.
returns: the packed & processed address |
14,660 | def to_statement(self, parameter_values):
missing = self.missing_parameter_values(parameter_values)
if len(missing) > 0:
raise InsufficientParameterValues("Following required parameters of template don't have values: {}"
.format(self.name, [str(m) for m in missing]))
necessary_parameter_values = {name: value for name, value in parameter_values.items()
if name in self.parameters}
supported_intrinsics = {
RefAction.intrinsic_name: RefAction()
}
resolver = IntrinsicsResolver(necessary_parameter_values, supported_intrinsics)
definition_copy = copy.deepcopy(self.definition)
return resolver.resolve_parameter_refs(definition_copy) | With the given values for each parameter, this method will return a policy statement that can be used
directly with IAM.
:param dict parameter_values: Dict containing values for each parameter defined in the template
:return dict: Dictionary containing policy statement
:raises InvalidParameterValues: If parameter values is not a valid dictionary or does not contain values
for all parameters
:raises InsufficientParameterValues: If the parameter values don't have values for all required parameters |
14,661 | def kde(data, grid, package, **kwargs):
if package == :
package =
func = KDE_FUNCS[package]
return func(data, grid, **kwargs) | Kernel Density Estimation
Parameters
----------
package : str
Package whose kernel density estimation to use.
Should be one of
`['statsmodels-u', 'statsmodels-m', 'scipy', 'sklearn']`.
data : numpy.array
Data points used to compute a density estimator. It
has `n x p` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x p` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions |
14,662 | def check_for_cores(self):
if not len(self.cores):
if self.session.options.get(, False):
logging.error("No cores were discovered!")
else:
raise exceptions.DebugError("No cores were discovered!") | ! @brief Init task: verify that at least one core was discovered. |
14,663 | def _indent(text, prefix, predicate=None):
if predicate is None:
predicate = lambda line: line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield prefix + line if predicate(line) else line
return "".join(prefixed_lines()) | Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters. |
14,664 | def get_sites_in_sphere(self, pt, r, include_index=False, include_image=False):
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i, img in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species,
fcoord, self._lattice,
properties=self[i].properties)
nn_data = (nnsite, dist) if not include_index else (nnsite, dist, i)
if include_image:
nn_data += (img,)
neighbors.append(nn_data)
return neighbors | Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance. |
14,665 | def get_board_mapping_parent_items(self, team_context, child_backlog_context_category_ref_name, workitem_ids):
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if team is not None:
route_values[] = self._serialize.url(, team, )
query_parameters = {}
if child_backlog_context_category_ref_name is not None:
query_parameters[] = self._serialize.query(, child_backlog_context_category_ref_name, )
if workitem_ids is not None:
workitem_ids = ",".join(map(str, workitem_ids))
query_parameters[] = self._serialize.query(, workitem_ids, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, self._unwrap_collection(response)) | GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap] |
14,666 | def from_labels_and_predictions(labels, predictions, num_classes):
assert len(labels) == len(predictions)
cm = np.zeros((num_classes, num_classes), dtype=np.int32)
for i in range(len(labels)):
cm[labels[i], predictions[i]] += 1
return cm | Compute a confusion matrix from labels and predictions.
A drop-in replacement for tf.confusion_matrix that works on CPU data
and not tensors.
Params
------
labels : array-like
1-D array of real labels for classification
predicitions: array-like
1-D array of predicted label classes
num_classes: scalar
Total number of classes
Returns
-------
matrix : NxN array
Array of shape [num_classes, num_classes] containing the confusion values. |
14,667 | def create_with(
cls, event: str = None, observable: T.Union[str, Observable] = None
) -> T.Callable[..., "ObservableProperty"]:
return functools.partial(cls, event=event, observable=observable) | Creates a partial application of ObservableProperty with
event and observable preset. |
14,668 | def iter_relation(self):
for point in iter_points(self.inputs):
yield (point, self.restrict(point)) | Iterate through all (point, element) pairs in the relation. |
14,669 | def hash_args(*args, **kwargs):
arg_string = .join([str(arg) for arg in args])
kwarg_string = .join([str(key) + + str(value)
for key, value in iteritems(kwargs)])
combined = .join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest() | Define a unique string for any set of representable args. |
14,670 | def alphafilter(request, queryset, template):
qs_filter = {}
for key in list(request.GET.keys()):
if in key:
qs_filter[str(key)] = request.GET[key]
break
return render_to_response(
template,
{: queryset.filter(**qs_filter),
: queryset},
context_instance=RequestContext(request)
) | Render the template with the filtered queryset |
14,671 | def update_domain_base_path_mapping(self, domain_name, lambda_name, stage, base_path):
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! CanitemsrestApiIdstagebasePathbasePath' if base_path is None else base_path,
restApiId=api_id,
stage=stage
) | Update domain base path mapping on API Gateway if it was changed |
14,672 | def get_metric(run_id, metric_id):
data = current_app.config["data"]
dao = data.get_metrics_dao()
metric = dao.get(run_id, metric_id)
return Response(render_template(
"api/metric.js",
run_id=metric["run_id"],
metric_id=metric["metric_id"],
name=metric["name"],
steps=metric["steps"],
timestamps=metric["timestamps"],
values=metric["values"]),
mimetype="application/json") | Get a specific Sacred metric from the database.
Returns a JSON response or HTTP 404 if not found.
Issue: https://github.com/chovanecm/sacredboard/issues/58 |
14,673 | def tradingStatusSSE(symbols=None, on_data=None, token=, version=):
return _runSSE(, symbols, on_data, token, version) | The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version |
14,674 | def binary_operator(op):
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: " % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator | Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__. |
14,675 | def parse_simpleexprsp(self, tup_tree):
raise CIMXMLParseError(
_format("Internal Error: Parsing support for element {0!A} is not "
"implemented", name(tup_tree)),
conn_id=self.conn_id) | This Function not implemented. This response is for export senders
(indication senders) so it is not implemented in the pywbem
client. |
14,676 | def env(key, default=None, required=False):
try:
value = os.environ[key]
return ast.literal_eval(value)
except (SyntaxError, ValueError):
return value
except KeyError:
if default or not required:
return default
raise ImproperlyConfigured(
"Missing required environment variable " % key) | Retrieves environment variables and returns Python natives. The (optional)
default will be returned if the environment variable does not exist. |
14,677 | def cmd_long(self, args):
if len(args) < 1:
print("Usage: long <command> [arg1] [arg2]...")
return
command = None
if args[0].isdigit():
command = int(args[0])
else:
try:
command = eval("mavutil.mavlink." + args[0])
except AttributeError as e:
try:
command = eval("mavutil.mavlink.MAV_CMD_" + args[0])
except AttributeError as e:
pass
if command is None:
print("Unknown command long ({0})".format(args[0]))
return
floating_args = [ float(x) for x in args[1:] ]
while len(floating_args) < 7:
floating_args.append(float(0))
self.master.mav.command_long_send(self.settings.target_system,
self.settings.target_component,
command,
0,
*floating_args) | execute supplied command long |
14,678 | def set_unavailable(self):
show = PresenceShow.NONE
self.set_presence(PresenceState(available=False, show=show)) | Sets the agent availability to False. |
14,679 | def get_group_value(self, token, match):
try:
value = match.group(.format(token.name, self.group))
except IndexError:
value =
return self.func(value) if callable(self.func) else value | Return value of regex match for the specified group |
14,680 | def send_rsp_recv_cmd(self, target, data, timeout):
return super(Device, self).send_rsp_recv_cmd(target, data, timeout) | While operating as *target* send response *data* to the remote
device and return new command data if received within
*timeout* seconds. |
14,681 | def get_thumbprint(self):
extensions = self.extensions.split()
name_str = .join( % ext for ext in extensions)
cmd = + self.base_dir + r + name_str + r
return getoutput(cmd) | Calculates the current thumbprint of the item being tracked. |
14,682 | def apply_lens(df, lens=, dist=, n_dim=2, **kwargs):
if n_dim != 2:
raise
if dist not in [, ]:
raise
if lens == and dist != :
raise
if lens == :
df_lens = pd.DataFrame(decomposition.PCA(n_components=n_dim, **kwargs).fit_transform(df), df.index)
elif lens == :
D = metrics.pairwise.pairwise_distances(df, metric=dist)
df_lens = pd.DataFrame(manifold.MDS(n_components=n_dim, **kwargs).fit_transform(D), df.index)
elif lens == :
D = metrics.pairwise.pairwise_distances(df, metric=dist)
df_lens = pd.DataFrame(manifold.SpectralEmbedding(n_components=n_dim, **kwargs).fit_transform(D), df.index)
else:
raise
return df_lens | input: N x F dataframe of observations
output: N x n_dim image of input data under lens function |
14,683 | def p_route_version(self, p):
if len(p) > 2:
if p[2] <= 0:
msg = "Version number should be a positive integer."
self.errors.append((msg, p.lineno(2), self.path))
p[0] = p[2]
else:
p[0] = 1 | route_version : COLON INTEGER
| empty |
14,684 | def energy(self, spins, break_aux_symmetry=True):
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
av = next(self._auxvar_counter)
auxvars = {v: Symbol(.format(av, v), BOOL) for v in subtheta.linear}
if break_aux_symmetry and av == 0:
self.assertions.update(set(auxvars.values()))
trees = self._trees
if not trees:
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message(trees, {}, subtheta, auxvars), subtheta.offset)
return energy | A formula for the exact energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
break_aux_symmetry (bool, optional): Default True. If True, break
the aux variable symmetry by setting all aux variable to 1
for one of the feasible configurations. If the energy ranges
are not symmetric then this can make finding models impossible.
Returns:
Formula for the exact energy of Theta with spins fixed. |
14,685 | def density(pressure, temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r
virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3) | r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v} |
14,686 | def file_to_str(fname):
data = None
with open(fname, ) as fd:
data = fd.read()
return data | Read a file into a string
PRE: fname is a small file (to avoid hogging memory and its discontents) |
14,687 | def prioritize():
while True:
hp_qs = Message.objects.high_priority().using()
mp_qs = Message.objects.medium_priority().using()
lp_qs = Message.objects.low_priority().using()
while hp_qs.count() or mp_qs.count():
while hp_qs.count():
for message in hp_qs.order_by("when_added"):
yield message
while hp_qs.count() == 0 and mp_qs.count():
yield mp_qs.order_by("when_added")[0]
while hp_qs.count() == 0 and mp_qs.count() == 0 and lp_qs.count():
yield lp_qs.order_by("when_added")[0]
if Message.objects.non_deferred().using().count() == 0:
break | Yield the messages in the queue in the order they should be sent. |
14,688 | def get(self, timeout=None):
valid = False
result = None
for tube in self._output_tubes:
if timeout:
valid, result = tube.get(timeout)
if valid:
result = result[0]
else:
result = tube.get()[0]
if timeout:
return valid, result
return result | Retrieve results from all the output tubes. |
14,689 | def get(method, hmc, uri, uri_parms, logon_required):
cpc_oid = uri_parms[0]
query_str = uri_parms[1]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
assert not cpc.dpm_enabled
result_profiles = []
filter_args = parse_query_parms(method, uri, query_str)
for profile in cpc.load_activation_profiles.list(filter_args):
result_profile = {}
for prop in profile.properties:
if prop in (, ):
result_profile[prop] = profile.properties[prop]
result_profiles.append(result_profile)
return {: result_profiles} | Operation: List Load Activation Profiles (requires classic mode). |
14,690 | def is_lop(ch,block_op_pairs_dict=get_block_op_pairs()):
{[}]a
for i in range(1,block_op_pairs_dict.__len__()+1):
if(ch == block_op_pairs_dict[i][0]):
return(True)
else:
pass
return(False) | # is_lop('{',block_op_pairs_dict)
# is_lop('[',block_op_pairs_dict)
# is_lop('}',block_op_pairs_dict)
# is_lop(']',block_op_pairs_dict)
# is_lop('a',block_op_pairs_dict) |
14,691 | def query(*args, **kwargs):
localremoteallpasswd -S -a**
query = _("query")
try:
return query.Query(kwargs.get(), cachedir=__opts__[])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex) | Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False |
14,692 | def get_lecture_filename(combined_section_lectures_nums,
section_dir,
secnum,
lecnum,
lecname,
title,
fmt):
fmt = fmt[:FORMAT_MAX_LENGTH]
title = title[:TITLE_MAX_LENGTH]
if combined_section_lectures_nums:
lecture_filename = os.path.join(
section_dir,
format_combine_number_resource(
secnum + 1, lecnum + 1, lecname, title, fmt))
else:
lecture_filename = os.path.join(
section_dir, format_resource(lecnum + 1, lecname, title, fmt))
return lecture_filename | Prepare a destination lecture filename.
@param combined_section_lectures_nums: Flag that indicates whether
section lectures should have combined numbering.
@type combined_section_lectures_nums: bool
@param section_dir: Path to current section directory.
@type section_dir: str
@param secnum: Section number.
@type secnum: int
@param lecnum: Lecture number.
@type lecnum: int
@param lecname: Lecture name.
@type lecname: str
@param title: Resource title.
@type title: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@return: Lecture file name.
@rtype: str |
14,693 | def _get_block_publisher(self, state_hash):
state_view = self._state_view_factory.create_view(state_hash)
try:
class BatchPublisher:
def send(self, transactions):
raise InvalidGenesisConsensusError(
)
consensus = ConsensusFactory.get_configured_consensus_module(
NULL_BLOCK_IDENTIFIER,
state_view)
return consensus.BlockPublisher(
BlockCache(self._block_store),
state_view_factory=self._state_view_factory,
batch_publisher=BatchPublisher(),
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._identity_signer.get_public_key().as_hex())
except UnknownConsensusModuleError as e:
raise InvalidGenesisStateError(e) | Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher. |
14,694 | def output_args(f):
args = [
magic_arguments.argument(, action="store_const", dest=,
const=,
help="collate outputs in order (same as group-outputs=order)"
),
magic_arguments.argument(, action="store_const", dest=,
const=,
help="group outputs by engine (same as group-outputs=engine)"
),
magic_arguments.argument(, dest=, type=str,
choices=[, , ], default=,
help=
),
magic_arguments.argument(, , dest=, type=str,
help=
),
]
for a in args:
f = a(f)
return f | decorator for output-formatting args
applied to %pxresult and %%px |
14,695 | def _Connect(self):
elastic_host = {: self._host, : self._port}
if self._url_prefix:
elastic_host[] = self._url_prefix
elastic_http_auth = None
if self._username is not None:
elastic_http_auth = (self._username, self._password)
self._client = elasticsearch.Elasticsearch(
[elastic_host],
http_auth=elastic_http_auth,
use_ssl=self._use_ssl,
ca_certs=self._ca_certs
)
logger.debug(
(
).format(self._host, self._port, self._url_prefix)) | Connects to an Elasticsearch server. |
14,696 | def draw_special_char_key(self, surface, key):
key.value = u
if key.is_activated():
key.value = u
self.draw_character_key(surface, key, True) | Default drawing method for special char key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn. |
14,697 | def rows(self):
for s_name, s in self.sections.items():
if s.name != :
yield []
yield [, s.value] + s.property_names
for row in s.rows:
term, value = row
term = term.replace(, ).title()
try:
yield [term] + value
except:
yield [term] + [value] | Iterate over all of the rows |
14,698 | def results(self, Pc):
r
Psatn = self[] <= Pc
Tsatn = self[] <= Pc
inv_phase = {}
inv_phase[] = sp.array(Psatn, dtype=float)
inv_phase[] = sp.array(Tsatn, dtype=float)
return inv_phase | r"""
This method determines which pores and throats are filled with invading
phase at the specified capillary pressure, and creates several arrays
indicating the occupancy status of each pore and throat for the given
pressure.
Parameters
----------
Pc : scalar
The capillary pressure for which an invading phase configuration
is desired.
Returns
-------
A dictionary containing an assortment of data about distribution
of the invading phase at the specified capillary pressure. The data
include:
**'pore.occupancy'** : A value between 0 and 1 indicating the
fractional volume of each pore that is invaded. If no late pore
filling model was applied, then this will only be integer values
(either filled or not).
**'throat.occupancy'** : The same as 'pore.occupancy' but for throats.
This dictionary can be passed directly to the ``update`` method of
the *Phase* object. These values can then be accessed by models
or algorithms. |
14,699 | def convert_directory_2_to_3(meas_fname="magic_measurements.txt", input_dir=".",
output_dir=".", meas_only=False, data_model=None):
convert = {: map_magic.spec_magic2_2_magic3_map,
: map_magic.samp_magic2_2_magic3_map,
: map_magic.site_magic2_2_magic3_map,
: map_magic.loc_magic2_2_magic3_map,
: map_magic.age_magic2_2_magic3_map}
full_name = os.path.join(input_dir, meas_fname)
if not os.path.exists(full_name):
print("-W- {} is not a file".format(full_name))
return False, False, False
data2, filetype = magic_read(full_name)
NewMeas = convert_items(data2, map_magic.meas_magic2_2_magic3_map)
ofile = os.path.join(output_dir, )
magic_write(ofile, NewMeas, )
upgraded = []
if os.path.exists(ofile):
print("-I- 3.0 format measurements file was successfully created: {}".format(ofile))
upgraded.append("measurements.txt")
else:
print("-W- 3.0 format measurements file could not be created")
no_upgrade = []
if not meas_only:
for dtype in [, , , , ]:
mapping = convert[dtype]
res = convert_and_combine_2_to_3(
dtype, mapping, input_dir, output_dir, data_model)
if res:
upgraded.append(res)
if os.path.exists(os.path.join(input_dir, )):
crit_file = convert_criteria_file_2_to_3(input_dir=input_dir,
output_dir=output_dir,
data_model=data_model)[0]
if crit_file:
upgraded.append(crit_file)
else:
no_upgrade.append("pmag_criteria.txt")
for fname in os.listdir(input_dir):
if fname in [, , ,
, ]:
continue
elif in fname:
no_upgrade.append(fname)
elif fname in [, , ,
]:
no_upgrade.append(fname)
return NewMeas, upgraded, no_upgrade | Convert 2.0 measurements file into 3.0 measurements file.
Merge and convert specimen, sample, site, and location data.
Also translates criteria data.
Parameters
----------
meas_name : name of measurement file (do not include full path,
default is "magic_measurements.txt")
input_dir : name of input directory (default is ".")
output_dir : name of output directory (default is ".")
meas_only : boolean, convert only measurement data (default is False)
data_model : data_model3.DataModel object (default is None)
Returns
---------
NewMeas : 3.0 measurements data (output of pmag.convert_items)
upgraded : list of files successfully upgraded to 3.0
no_upgrade: list of 2.5 files not upgraded to 3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.