Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,400 | def populate(self, priority, address, rtr, data):
assert isinstance(data, bytes)
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 3)
self.transmit_error_counter = data[0]
self.receive_error_counter = data[1]
self.bus_off_counter = data[2] | :return: None |
23,401 | def toimage(self, width=None, height=None):
from .postprocessing import NoEffect
effect = NoEffect(self)
self.post_processing.append(effect)
oldwidth, oldheight = self.width(), self.height()
if None not in (width, height):
self.resize(width, height)
self.resizeGL(width, height)
else:
width = self.width()
height = self.height()
self.paintGL()
self.post_processing.remove(effect)
coltex = effect.texture
coltex.bind()
glActiveTexture(GL_TEXTURE0)
data = glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE)
image = pil_Image.frombuffer(, (width, height), data, , , 0, -1)
return image | Return the current scene as a PIL Image.
**Example**
You can build your molecular viewer as usual and dump an image
at any resolution supported by the video card (up to the
memory limits)::
v = QtViewer()
# Add the renderers
v.add_renderer(...)
# Add post processing effects
v.add_post_processing(...)
# Move the camera
v.widget.camera.autozoom(...)
v.widget.camera.orbit_x(...)
v.widget.camera.orbit_y(...)
# Save the image
image = v.widget.toimage(1024, 768)
image.save("mol.png")
.. seealso::
https://pillow.readthedocs.org/en/latest/PIL.html#module-PIL.Image |
23,402 | def get_new_document(
include_wdom_js: bool = True,
include_skeleton: bool = False,
include_normalizecss: bool = False,
autoreload: bool = None,
reload_wait: float = None,
log_level: Union[int, str] = None,
log_prefix: str = None,
log_console: bool = False,
ws_url: str = None,
message_wait: float = None,
document_factory: Callable[..., Document] = WdomDocument,
**kwargs: Any) -> Document:
document = document_factory(
autoreload=autoreload,
reload_wait=reload_wait,
**kwargs
)
if log_level is None:
log_level = config.logging
if message_wait is None:
message_wait = config.message_wait
log_script = []
log_script.append(.format(message_wait))
if isinstance(log_level, str):
log_script.append({}\.format(log_level))
elif isinstance(log_level, int):
log_script.append(.format(log_level))
if log_prefix:
log_script.append({}\.format(log_prefix))
if log_console:
log_script.append()
if log_script:
_s = Script(parent=document.head)
_s.textContent = .format(.join(log_script))
if ws_url:
_s = Script(parent=document.head)
_s.textContent = {}\.format(ws_url)
if include_wdom_js:
document.add_jsfile_head()
return document | Create new :class:`Document` object with options.
:arg bool include_wdom_js: Include wdom.js file. Usually should be True.
:arg bool include_skeleton: Include skelton.css.
:arg bool include_normalizecss: Include normalize.css.
:arg bool autoreload: Enable autoreload flag. This flag overwrites
``--debug`` flag, which automatically enables autoreload.
:arg float reload_wait: Seconds to wait until reload when autoreload is
enabled.
:arg str log_level: Log level string, chosen from DEBUG, INFO, WARN, ERROR.
Integer values are also acceptable like ``logging.INFO``. By default
use ``wdom.config.options.log_level``, which default is ``INFO``.
:arg str log_prefix: Prefix of log outputs.
:arg bool log_console: Flag to show wdom log on browser console.
:arg str ws_url: URL string to the ws url.
Default: ``ws://localhost:8888/wdom_ws``.
:arg float message_wait: Duration (seconds) to send WS messages.
:arg Callable document_factory: Factory function/class to create Document
object.
:rtype: Document |
23,403 | def _group_filter_values(seg, filter_indices, ms_per_input):
ret = []
for filter_value, (_segment, timestamp) in zip(filter_indices, seg.generate_frames_as_segments(ms_per_input)):
if filter_value == 1:
if len(ret) > 0 and ret[-1][0] == :
ret.append([, timestamp])
elif len(ret) > 0 and ret[-1][0] == :
ret[-1][1] = timestamp
else:
ret.append([, timestamp])
else:
if len(ret) > 0 and ret[-1][0] == :
ret[-1][1] = timestamp
elif len(ret) > 0 and ret[-1][0] == :
ret.append([, timestamp])
else:
ret.append([, timestamp])
return ret | Takes a list of 1s and 0s and returns a list of tuples of the form:
['y/n', timestamp]. |
23,404 | def _remap_key(key):
if key in KNOWN_PARAMS:
return key
if key.lower() in known_params:
return KNOWN_PARAMS[known_params.index(key.lower())]
return key | Change key into correct casing if we know the parameter |
23,405 | def _instruction_list(self, filters):
return .join([
self.INSTRUCTIONS.strip(),
,
.format(self.user),
.format(self.user),
] + [filter.description() for filter in filters]) | Generates the instructions for a bot and its filters.
Note:
The guidance for each filter is generated by combining the
docstrings of the predicate filter and resulting dispatch
function with a single space between. The class's
:py:attr:`INSTRUCTIONS` and the default help command are
added.
Arguments:
filters (:py:class:`list`): The filters to apply to incoming
messages.
Returns:
:py:class:`str`: The bot's instructions. |
23,406 | def get_ancestors_through_subont(self, go_term, relations):
all_ancestors = self.ontology.ancestors(go_term, reflexive=True)
subont = self.ontology.subontology(all_ancestors)
return subont.ancestors(go_term, relations) | Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO. |
23,407 | def stddev(values, meanval=None):
if meanval == None: meanval = mean(values)
return math.sqrt( sum([(x - meanval)**2 for x in values]) / (len(values)-1) ) | The standard deviation of a set of values.
Pass in the mean if you already know it. |
23,408 | def extend_service_volume(self, stack, service, volume, args):
url = .format(self.host, stack, service, volume)
return self.__post(url, args) | 扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 |
23,409 | async def delete(query):
assert isinstance(query, peewee.Delete),\
("Error, trying to run delete coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
rowcount = cursor.rowcount
await cursor.release()
return rowcount | Perform DELETE query asynchronously. Returns number of rows deleted. |
23,410 | def register(self, name, *slots):
if name not in self:
self[name] = Signal()
for slot in slots:
self[name].connect(slot) | Registers a given signal
:param name: the signal to register |
23,411 | def thread_details(io_handler, thread_id, max_depth=0):
try:
max_depth = int(max_depth)
if max_depth < 1:
max_depth = None
except (ValueError, TypeError):
max_depth = None
try:
thread_id = int(thread_id)
stack = sys._current_frames()[thread_id]
except KeyError:
io_handler.write_line("Unknown thread ID: {0}", thread_id)
except ValueError:
io_handler.write_line("Invalid thread ID: {0}", thread_id)
except AttributeError:
io_handler.write_line("sys._current_frames() is not available.")
else:
try:
name = threading._active[thread_id].name
except KeyError:
name = "<unknown>"
lines = [
"Thread ID: {0} - Name: {1}".format(thread_id, name),
"Stack trace:",
]
trace_lines = []
depth = 0
frame = stack
while frame is not None and (
max_depth is None or depth < max_depth
):
trace_lines.append(format_frame_info(frame))
frame = frame.f_back
depth += 1
trace_lines.reverse()
lines.extend(trace_lines)
lines.append("")
io_handler.write("\n".join(lines)) | Prints details about the thread with the given ID (not its name) |
23,412 | def street(random=random, *args, **kwargs):
return random.choice([
"{noun} {street_type}",
"{adjective}{verb} {street_type}",
"{direction} {adjective}{verb} {street_type}",
"{direction} {noun} {street_type}",
"{direction} {lastname} {street_type}",
]).format(noun=noun(random=random),
lastname=lastname(random=random),
direction=direction(random=random),
adjective=adjective(random=random),
verb=verb(random=random),
street_type=random.choice(streets)) | Produce something that sounds like a street name
>>> mock_random.seed(0)
>>> street(random=mock_random)
'chimp place'
>>> street(random=mock_random, capitalize=True)
'Boatbench Block'
>>> mock_random.seed(3)
>>> street(random=mock_random, slugify=True)
'central-britches-boulevard' |
23,413 | def check_running(self):
if self.check_pidfile():
pid = self.get_pidfile()
if not salt.utils.platform.is_windows():
if self.check_pidfile() and self.is_daemonized(pid) and os.getppid() != pid:
return True
else:
if self.check_pidfile() and self.is_daemonized(pid) and salt.utils.win_functions.get_parent_pid() != pid:
return True
return False | Check if a pid file exists and if it is associated with
a running process. |
23,414 | def context(self):
if self._context is not None:
return self._context
else:
logger.warning("Using shared context without a lock")
return self._executor._shared_context | Convenient access to shared context |
23,415 | def _init_data_line(self, fnc, lnum, line):
fld = re.split(self.sep, line)
if self.usr_max_idx < len(fld):
self.convert_ints_floats(fld)
fnc(fld)
else:
for fld in enumerate(zip(self.hdr2idx.keys(), fld)):
print(fld)
for hdr in self.hdrs_usr:
print(hdr)
print(, len(fld))
print(, self.usr_max_idx)
raise Exception("ERROR ON LINE {} IN {}".format(lnum+1, self.fin)) | Process Data line. |
23,416 | def get_stats_display(self, args=None, max_width=None):
display_curse = False
if hasattr(self, ):
display_curse = self.display_curse
if hasattr(self, ):
align_curse = self._align
if max_width is not None:
ret = {: display_curse,
: self.msg_curse(args, max_width=max_width),
: align_curse}
else:
ret = {: display_curse,
: self.msg_curse(args),
: align_curse}
return ret | Return a dict with all the information needed to display the stat.
key | description
----------------------------
display | Display the stat (True or False)
msgdict | Message to display (list of dict [{ 'msg': msg, 'decoration': decoration } ... ])
align | Message position |
23,417 | def process_LANGUAGE_CODE(self, language_code, data):
language_code = if language_code == else language_code
language = translation.get_language_info( if language_code == else language_code)
if not settings.JS_CONTEXT or in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and in settings.JS_CONTEXT_EXCLUDE):
data[] = language[]
if not settings.JS_CONTEXT or in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and in settings.JS_CONTEXT_EXCLUDE):
data[] = language[]
return language_code | Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``. |
23,418 | def maximum_likelihood_estimator(self, data, states):
x_df = pd.DataFrame(data, columns=states)
x_len = len(self.evidence)
sym_coefs = []
for i in range(0, x_len):
sym_coefs.append( + str(i + 1) + )
sum_x = x_df.sum()
x = [sum_x[]]
coef_matrix = pd.DataFrame(columns=sym_coefs)
for i in range(0, x_len):
x.append(self.sum_of_product(x_df[], x_df[self.evidence[i]]))
for j in range(0, x_len):
coef_matrix.loc[i, sym_coefs[j]] = self.sum_of_product(
x_df[self.evidence[i]], x_df[self.evidence[j]])
coef_matrix.insert(0, , sum_x[self.evidence].values)
row_1 = np.append([len(x_df)], sum_x[self.evidence].values)
coef_matrix.loc[-1] = row_1
coef_matrix.index = coef_matrix.index + 1
coef_matrix.sort_index(inplace=True)
beta_coef_matrix = np.matrix(coef_matrix.values, dtype=)
coef_inv = np.linalg.inv(beta_coef_matrix)
beta_est = np.array(np.matmul(coef_inv, np.transpose(x)))
self.beta = beta_est[0]
sigma_est = 0
x_len_df = len(x_df)
for i in range(0, x_len):
for j in range(0, x_len):
sigma_est += self.beta[i + 1] * self.beta[j + 1] * (self.sum_of_product(
x_df[self.evidence[i]], x_df[self.evidence[j]]) / x_len_df - np.mean(x_df[self.evidence[i]]) * np.mean(x_df[self.evidence[j]]))
sigma_est = np.sqrt(
self.sum_of_product(
x_df[],
x_df[]) /
x_len_df -
np.mean(
x_df[]) *
np.mean(
x_df[]) -
sigma_est)
self.sigma_yx = sigma_est
return self.beta, self.sigma_yx | Fit using MLE method.
Parameters
----------
data: pandas.DataFrame or 2D array
Dataframe of values containing samples from the conditional distribution, (Y|X)
and corresponding X values.
states: All the input states that are jointly gaussian.
Returns
-------
beta, variance (tuple): Returns estimated betas and the variance. |
23,419 | def from_series(series):
result = PercentRankTransform()
result.cdf = series.values
result.bin_edges = series.index.values[1:-1]
return result | Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform |
23,420 | def _parse_known_pattern(self, pattern: str) -> List[str]:
if pattern.endswith(os.path.sep):
patterns = [
filename
for filename in os.listdir(pattern)
if os.path.isdir(os.path.join(pattern, filename))
]
else:
patterns = [pattern]
return patterns | Expand pattern if identified as a directory and return found sub packages |
23,421 | def create_container_student(self, parent_container_id, environment, network_grading, mem_limit, student_path,
socket_path, systemfiles_path, course_common_student_path):
student_path = os.path.abspath(student_path)
socket_path = os.path.abspath(socket_path)
systemfiles_path = os.path.abspath(systemfiles_path)
course_common_student_path = os.path.abspath(course_common_student_path)
response = self._docker.containers.create(
environment,
stdin_open=True,
command="_run_student_intern",
mem_limit=str(mem_limit) + "M",
memswap_limit=str(mem_limit) + "M",
mem_swappiness=0,
oom_kill_disable=True,
network_mode=( if not network_grading else ( + parent_container_id)),
volumes={
student_path: {: },
socket_path: {: },
systemfiles_path: {: , : },
course_common_student_path: {: , : }
}
)
return response.id | Creates a student container
:param parent_container_id: id of the "parent" container
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not (share the parent stack)
:param mem_limit: in Mo
:param student_path: path to the task directory that will be mounted in the container
:param socket_path: path to the socket that will be mounted in the container
:param systemfiles_path: path to the systemfiles folder containing files that can override partially some defined system files
:return: the container id |
23,422 | def UpdateFlows(self,
client_id_flow_id_pairs,
pending_termination=db.Database.unchanged):
for client_id, flow_id in client_id_flow_id_pairs:
try:
self.UpdateFlow(
client_id, flow_id, pending_termination=pending_termination)
except db.UnknownFlowError:
pass | Updates flow objects in the database. |
23,423 | def bundle_lambda(zipfile):
if not zipfile:
return 1
with open(, ) as zfile:
zfile.write(zipfile)
log.info()
return 0 | Write zipfile contents to file.
:param zipfile:
:return: exit_code |
23,424 | def parseLine(line, lineNumber=None):
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: {0!s}".format(line), lineNumber)
return (match.group().replace(, ),
parseParams(match.group()),
match.group(), match.group()) | Parse line |
23,425 | def to_json_data(self, model_name=None):
return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data]) | Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data. |
23,426 | def to_record(self):
tf_list = [getattr(self, k, None) for k in
[_.value for _ in TLSFileType]]
return self.record | Create a CertStore record from this TLSFileBundle |
23,427 | def _8bit_oper(op1, op2=None, reversed_=False):
output = []
if op2 is not None and reversed_:
tmp = op1
op1 = op2
op2 = tmp
op = op1
indirect = (op[0] == )
if indirect:
op = op[1:]
immediate = (op[0] == )
if immediate:
op = op[1:]
if is_int(op):
op = int(op)
if indirect:
output.append( % op)
else:
if op == 0:
output.append()
else:
output.append( % int8(op))
else:
if immediate:
if indirect:
output.append( % op)
else:
output.append( % op)
elif op[0] == :
if indirect:
idx = if reversed_ else
output.append( % (idx, op))
if not reversed_:
output.extend(tmp)
return output | Returns pop sequence for 8 bits operands
1st operand in H, 2nd operand in A (accumulator)
For some operations (like comparisons), you can swap
operands extraction by setting reversed = True |
23,428 | def addSpecfile(self, specfiles, path):
for specfile in aux.toList(specfiles):
if specfile not in self.info:
self._addSpecfile(specfile, path)
else:
warntext = \
\
% (specfile, )
warnings.warn(warntext) | Prepares the container for loading ``mrc`` files by adding specfile
entries to ``self.info``. Use :func:`MsrunContainer.load()` afterwards
to actually import the files
:param specfiles: the name of an ms-run file or a list of names
:type specfiles: str or [str, str, ...]
:param path: filedirectory used for loading and saving ``mrc`` files |
23,429 | def set_window_title(self):
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u)
title = u.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title) | Set window title. |
23,430 | def detect_number_of_threads():
log.warning()
try:
nthreads = int(os.environ.get(, ))
except ValueError:
try:
nthreads = int(os.environ.get(, ))
except ValueError:
nthreads = detect_number_of_cores()
if nthreads > MAX_THREADS:
nthreads = MAX_THREADS
return nthreads | DEPRECATED: use `_init_num_threads` instead.
If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide |
23,431 | def validate_token(self, token):
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.token == token).first()
if p is not None:
if p.token.endswith((,)):
p = None
else:
p = p.id
return p | retrieve a subject based on a token. Valid means we return a participant
invalid means we return None |
23,432 | def import_list(
self,
listName,
pathToTaskpaperDoc
):
self.log.info()
newTasks = self._get_tasks_from_reminder_list(listName)
self._add_tasks_to_taskpaper(
pathToTaskpaperDoc=pathToTaskpaperDoc,
taskString=newTasks
)
self._delete_reminders_from_list(
listName=listName
)
self.log.info()
return newTasks | *import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
) |
23,433 | def percentile(self, percentile, axis, inclusive=True):
axis = self.get_axis_number(axis)
s = self.histogram.shape
s_collapsed = list(s)
s_collapsed[axis] = 1
s_removed = np.concatenate([s[:axis], s[axis + 1:]]).astype(np.int)
if inclusive:
ecdf = self.cumulative_density(axis).histogram
else:
density = self.normalize(axis).histogram
ecdf = ecdf - density
ecdf = np.nan_to_num(ecdf)
x = ecdf - 2 * (ecdf >= percentile / 100)
sz = np.ones(len(s), dtype=np.int)
sz[axis] = -1
x += np.linspace(0, 1e-9, s[axis]).reshape(sz)
result = self.all_axis_bin_centers(axis)[
x == np.min(x, axis=axis).reshape(s_collapsed)
]
result = result.reshape(s_removed)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=result,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | Returns d-1 dimensional histogram containing percentile of values along axis
if inclusive=True, will report bin center of first bin for which percentile% of data lies in or below the bin
=False, ... data lies strictly below the bin
10% percentile is calculated as: value at least 10% data is LOWER than |
23,434 | def handle_keywords(self, func, node, offset=0):
func_argument_names = {}
for i, arg in enumerate(func.args.args[offset:]):
assert isinstance(arg, ast.Name)
func_argument_names[arg.id] = i
nargs = len(func.args.args) - offset
defaults = func.args.defaults
keywords = {func_argument_names[kw.arg]: kw.value
for kw in node.keywords}
node.args.extend([None] * (1 + max(keywords.keys()) - len(node.args)))
replacements = {}
for index, arg in enumerate(node.args):
if arg is None:
if index in keywords:
replacements[index] = deepcopy(keywords[index])
else:
replacements[index] = deepcopy(defaults[index - nargs])
return replacements | Gather keywords to positional argument information
Assumes the named parameter exist, raises a KeyError otherwise |
23,435 | def _load_entries(self, func, count, page=1, entries=None, **kwargs):
if entries is None:
entries = []
res = \
func(offset=(page - 1) * self.max_entries_per_load,
limit=self.max_entries_per_load,
**kwargs)
loaded_entries = [
entry for entry in res[][:count]
]
total_count = self.count
if count > total_count:
count = total_count
if count <= self.max_entries_per_load:
return entries + loaded_entries
else:
cur_count = count - self.max_entries_per_load
return self._load_entries(
func=func, count=cur_count, page=page + 1,
entries=entries + loaded_entries, **kwargs) | Load entries
:param function func: function (:meth:`.API._req_files` or
:meth:`.API._req_search`) that returns entries
:param int count: number of entries to load. This value should never
be greater than self.count
:param int page: page number (starting from 1) |
23,436 | def valuemap(f):
@wraps(f)
def wrapper(*args, **kwargs):
if in kwargs:
val = kwargs[]
del kwargs[]
_f = f(*args, **kwargs)
def valued_f(*args, **kwargs):
result = _f(*args, **kwargs)
s, obj, span = result
if callable(val):
return PegreResult(s, val(obj), span)
else:
return PegreResult(s, val, span)
return valued_f
else:
return f(*args, **kwargs)
return wrapper | Decorator to help PEG functions handle value conversions. |
23,437 | def gone_assignments(self):
qs = Assignment.objects.filter(hard_deadline__lt=timezone.now())
qs = qs.filter(course__in=self.user_courses())
return qs.order_by() | Returns the list of past assignments the user did not submit for
before the hard deadline. |
23,438 | def handle(self, *args, **options):
self.stdout.write()
for model in get_registered_model():
if options.get(, None) and not (model.__name__ in
options[]):
continue
get_adapter(model).set_settings()
self.stdout.write(.format(model.__name__)) | Run the management command. |
23,439 | def init_config(app):
for k in dir(config):
if k.startswith():
app.config.setdefault(k, getattr(config, k)) | Initialize configuration. |
23,440 | def WriteSessionCompletion(self, aborted=False):
self._RaiseIfNotWritable()
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError()
self._session.aborted = aborted
session_completion = self._session.CreateSessionCompletion()
self._storage_file.WriteSessionCompletion(session_completion) | Writes session completion information.
Args:
aborted (Optional[bool]): True if the session was aborted.
Raises:
IOError: if the storage type is not supported or
when the storage writer is closed.
OSError: if the storage type is not supported or
when the storage writer is closed. |
23,441 | def break_on_error(self, pid, errorCode):
aProcess = self.system.get_process(pid)
address = aProcess.get_break_on_error_ptr()
if not address:
raise NotImplementedError(
"The functionality is not supported in this system.")
aProcess.write_dword(address, errorCode) | Sets or clears the system breakpoint for a given Win32 error code.
Use L{Process.is_system_defined_breakpoint} to tell if a breakpoint
exception was caused by a system breakpoint or by the application
itself (for example because of a failed assertion in the code).
@note: This functionality is only available since Windows Server 2003.
In 2003 it only breaks on error values set externally to the
kernel32.dll library, but this was fixed in Windows Vista.
@warn: This method will fail if the debug symbols for ntdll (kernel32
in Windows 2003) are not present. For more information see:
L{System.fix_symbol_store_path}.
@see: U{http://www.nynaeve.net/?p=147}
@type pid: int
@param pid: Process ID.
@type errorCode: int
@param errorCode: Win32 error code to stop on. Set to C{0} or
C{ERROR_SUCCESS} to clear the breakpoint instead.
@raise NotImplementedError:
The functionality is not supported in this system.
@raise WindowsError:
An error occurred while processing this request. |
23,442 | def _friends_leaveoneout_radius(points, ftype):
kdtree = spatial.KDTree(points)
if ftype == :
dists, ids = kdtree.query(points, k=2, eps=0, p=2)
elif ftype == :
dists, ids = kdtree.query(points, k=2, eps=0, p=np.inf)
dist = dists[:, 1]
return dist | Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
leave-one-out (LOO) cross-validation. |
23,443 | def insert_many(conn, tablename, column_names, records, chunksize=2500):
groups = chunks(records, chunksize)
column_str = .join(column_names)
insert_template = .format(
table=tablename, columns=column_str, values=)
with conn:
with conn.cursor() as cursor:
for recs in groups:
record_group = list(recs)
records_template_str = .join([] * len(record_group))
insert_query = insert_template.format(records_template_str)
cursor.execute(insert_query, record_group) | Insert many records by chunking data into insert statements.
Notes
-----
records should be Iterable collection of namedtuples or tuples. |
23,444 | def fetch_and_filter_tags(self):
self.all_tags = self.fetcher.get_all_tags()
self.filtered_tags = self.get_filtered_tags(self.all_tags)
self.fetch_tags_dates() | Fetch and filter tags, fetch dates and sort them in time order. |
23,445 | def compute_within_collection_vowel_duration(self, prefix, no_singletons=False):
if no_singletons:
min_size = 2
else:
prefix += "no_singletons_"
min_size = 1
durations = []
for cluster in self.collection_list:
if len(cluster) >= min_size:
for word in cluster:
word = self.full_timed_response[word.index_in_timed_response]
for phone in word.phones:
if phone.string in self.vowels:
durations.append(phone.end - phone.start)
self.measures[prefix + ] = get_mean(durations) \
if len(durations) > 0 else
if not self.quiet:
if no_singletons:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, excluding singletons:", \
self.measures[prefix + ]
else:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, including singletons:", \
self.measures[prefix + ] | Computes the mean duration of vowels from Units within clusters.
:param str prefix: Prefix for the key entry in self.measures
:param bool no_singletons: If False, excludes collections of length 1 from calculations
and adds "no_singletons" to the prefix
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_vowel_duration_mean |
23,446 | def get_default_config(self):
config = super(DarnerCollector, self).get_default_config()
config.update({
: ,
: True,
: []
})
return config | Returns the default collector settings |
23,447 | def scatter(slope, zero, x1, x2, x1err=[], x2err=[]):
n = len(x1)
x2pred = zero + slope * x1
s = sum((x2 - x2pred) ** 2) / (n - 1)
if len(x2err) == n:
s_obs = sum((x2err / x2) ** 2) / n
s0 = s - s_obs
print numpy.sqrt(s), numpy.sqrt(s_obs), numpy.sqrt(s0)
return numpy.sqrt(s0) | Used mainly to measure scatter for the BCES best-fit |
23,448 | def findAllSubstrings(string, substring):
start = 0
positions = []
while True:
start = string.find(substring, start)
if start == -1:
break
positions.append(start)
start += 1
return positions | Returns a list of all substring starting positions in string or an empty
list if substring is not present in string.
:param string: a template string
:param substring: a string, which is looked for in the ``string`` parameter.
:returns: a list of substring starting positions in the template string |
23,449 | def is_link(self, path, use_sudo=False):
func = use_sudo and _sudo or _run
with self.settings(hide(, ), warn_only=True):
return func( % locals()).succeeded | Check if a path exists, and is a symbolic link. |
23,450 | def convolve(input, weights, mask=None, slow=False):
assert (len(input.shape) == 2)
assert (len(weights.shape) == 2)
assert (weights.shape[0] < input.shape[0] + 1)
assert (weights.shape[1] < input.shape[1] + 1)
if mask is not None:
assert (not slow)
assert (input.shape == mask.shape)
tiled_mask = tile_and_reflect(mask)
output = np.copy(input)
tiled_input = tile_and_reflect(input)
rows = input.shape[0]
cols = input.shape[1]
hw_row = np.int(weights.shape[0] / 2)
hw_col = np.int(weights.shape[1] / 2)
fw_row = weights.shape[0]
fw_col = weights.shape[0]
for i, io in zip(list(range(rows, rows * 2)), list(range(rows))):
for j, jo in zip(list(range(cols, cols * 2)), list(range(cols))):
if mask is not None and tiled_mask[i, j]:
continue
average = 0.0
if slow:
for k in range(weights.shape[0]):
for l in range(weights.shape[1]):
m = i + k - hw_row
n = j + l - hw_col
average += tiled_input[m, n] * weights[k, l]
else:
overlapping = tiled_input[
i - hw_row:i - hw_row + fw_row,
j - hw_col:j - hw_col + fw_col]
assert (overlapping.shape == weights.shape)
if mask is not None:
overlapping_mask = tiled_mask[
i - hw_row:i - hw_row + fw_row,
j - hw_col:j - hw_col + fw_row]
assert (overlapping_mask.shape == weights.shape)
clobber_total = np.sum(weights[overlapping_mask])
remaining_num = np.sum(np.logical_not(overlapping_mask))
assert (remaining_num > 0)
correction = clobber_total / remaining_num
if correction == 0:
assert (not overlapping_mask.any())
tmp_weights = np.copy(weights)
tmp_weights[overlapping_mask] = 0.0
tmp_weights[np.where(tmp_weights != 0)] += correction
assert (abs(np.sum(tmp_weights) - 1) < 1e-15)
else:
tmp_weights = weights
merged = tmp_weights[:] * overlapping
average = np.sum(merged)
output[io, jo] = average
return output | 2 dimensional convolution.
This is a Python implementation of what will be written in Fortran.
Borders are handled with reflection.
Masking is supported in the following way:
* Masked points are skipped.
* Parts of the input which are masked have weight 0 in the kernel.
* Since the kernel as a whole needs to have value 1, the weights of the
masked parts of the kernel are evenly distributed over the non-masked
parts.
Adapted from https://github.com/nicjhan/gaussian-filter |
23,451 | def collect(cls, result_key, func):
def scanner(self, obj):
if not getattr(self, result_key, None):
setattr(self, result_key, [])
rv = func(obj)
if rv:
getattr(self, result_key).append(rv)
cls._scan(result_key, scanner) | Sets the `result_key` to an iterable of objects for which `func(obj)`
returns True |
23,452 | def gallery_images(self):
api_version = self._get_api_version()
if api_version == :
from .v2018_06_01.operations import GalleryImagesOperations as OperationClass
elif api_version == :
from .v2019_03_01.operations import GalleryImagesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-06-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2018_06_01.operations.GalleryImagesOperations>`
* 2019-03-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2019_03_01.operations.GalleryImagesOperations>` |
23,453 | def load_service(config):
if isinstance(config, collections.abc.Mapping):
service_config = config
elif isinstance(config, str):
service_config = load_config(pathlib.Path(config))
elif isinstance(config, pathlib.Path):
service_config = load_config(config)
else:
raise TypeError(.format(type(config)))
apis = []
for api, defn in service_config[].items():
api_def= create_api_definition(api, defn, service_config[])
apis.append(api_def)
service_module = create_service_module(service_config[], apis)
return service_module | Load a restful service specified by some YAML file at config_path.
:param config_path: A pathlib Path object that points to the yaml
config
:returns: A python module containing a Client class, call factory,
and the definition of each of the APIs defined by the config. |
23,454 | def stop(name):
*
if _service_is_upstart(name):
cmd = .format(name)
else:
cmd = .format(name)
return not __salt__[](cmd, python_shell=False) | Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name> |
23,455 | def delete_object(self, id):
params = urllib.parse.urlencode({"method": "delete", : str(id)})
u = requests.get("https://graph.facebook.com/" + str(id) + "?" + params)
groups = u.json()
return groups | Deletes the object with the given ID from the graph. |
23,456 | def register(**criteria):
def inner(cls):
class_list.add(cls)
for (category, value) in criteria.items():
index[category][value].add(cls)
_entry = dict((k, set([v])) for (k, v) in criteria.items())
if cls not in class_criteria:
class_criteria[cls] = _entry
else:
for key in _entry.keys():
class_criteria[cls][key] = class_criteria[cls].get(key, set()) | _entry[key]
return cls
return inner | class decorator to add :class:`Part <cqparts.Part>` or
:class:`Assembly <cqparts.Assembly>` to the ``cqparts`` search index:
.. testcode::
import cqparts
from cqparts.params import *
# Created Part or Assembly
@cqparts.search.register(
type='motor',
current_class='dc',
part_number='ABC123X',
)
class SomeMotor(cqparts.Assembly):
shaft_diam = PositiveFloat(5)
def make_components(self):
return {} # build assembly content
motor_class = cqparts.search.find(part_number='ABC123X')
motor = motor_class(shaft_diam=6.0)
Then use :meth:`find` &/or :meth:`search` to instantiate it.
.. warning::
Multiple classes *can* be registered with identical criteria, but
should be avoided.
If multiple classes share the same criteria, :meth:`find` will never
yield the part you want.
Try adding unique criteria, such as *make*, *model*, *part number*,
*library name*, &/or *author*.
To avoid this, learn more in :ref:`tutorial_component-index`. |
23,457 | def create_resource(self, path, transaction):
t = self._parent.root.with_prefix(path)
max_len = 0
imax = None
for i in t:
if i == path:
return self.edit_resource(transaction, path)
elif len(i) > max_len:
imax = i
max_len = len(i)
lp = path
parent_resource = self._parent.root[imax]
if parent_resource.allow_children:
return self.add_resource(transaction, parent_resource, lp)
else:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction | Render a POST request.
:param path: the path of the request
:param transaction: the transaction
:return: the response |
23,458 | def system_exit(object):
@functools.wraps(object)
def system_exit_wrapper(*args, **kwargs):
try:
if object(*args, **kwargs):
foundations.core.exit(0)
except Exception as error:
sys.stderr.write("\n".join(foundations.exceptions.format_exception(*sys.exc_info())))
foundations.core.exit(1)
return system_exit_wrapper | Handles proper system exit in case of critical exception.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object |
23,459 | def guess_encoding(blob):
if hasattr(magic, ):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, ):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception() | uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str |
23,460 | def add(self, agent_id, media_type, media_file):
params = {
: agent_id,
: media_type,
}
return self._post(
url=,
params=params,
files={
: media_file
}
) | 新增其它类型永久素材
详情请参考
https://qydev.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E6%B0%B8%E4%B9%85%E7%B4%A0%E6%9D%90
:param agent_id: 企业应用的id
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)普通文件(file)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包 |
23,461 | def parse_declaration(self, i):
j = None
if self.rawdata[i:i+9] == :
k = self.rawdata.find(, i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j | Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object. |
23,462 | def _run(self):
self.set_state(self.STATE_INITIALIZING)
self.ioloop = ioloop.IOLoop.current()
self.consumer_lock = locks.Lock()
self.sentry_client = self.setup_sentry(
self._kwargs[], self.consumer_name)
try:
self.setup()
except (AttributeError, ImportError):
return self.on_startup_error(
.format(
self.consumer_name))
if not self.is_stopped:
try:
self.ioloop.start()
except KeyboardInterrupt:
LOGGER.warning() | Run method that can be profiled |
23,463 | def print_options(self):
options = []
print("The script is running with the following options:")
options.append(("dry_run", self.options.dry_run))
options.append(("worker_config", self.__class__))
database_config = self.database_config or \
self.options.queue_model.database.connection_settings
options.append(("database", % (database_config[],
database_config[],
database_config[])))
if self.options.worker_class is not None:
options.append(("worker-class", self.options.worker_class))
for name, value in options:
print(" - %s = %s" % (name.replace(, ), value))
print("The worker will run with the following options:")
for name in self.options.worker_class.parameters:
option = getattr(self.worker, name)
if name == and \
self.options.worker_class.execute == Worker.execute:
option =
elif isinstance(option, (list, tuple, set)):
option = .join(option)
print(" - %s = %s" % (name.replace(, ), option)) | Print all options as parsed by the script |
23,464 | def scalar_inc_dec(word, valence, is_cap_diff):
scalar = 0.0
word_lower = word.lower()
if word_lower in BOOSTER_DICT:
scalar = BOOSTER_DICT[word_lower]
if valence < 0:
scalar *= -1
if word.isupper() and is_cap_diff:
if valence > 0:
scalar += C_INCR
else:
scalar -= C_INCR
return scalar | Check if the preceding words increase, decrease, or negate/nullify the
valence |
23,465 | def EncodeMessages(self,
message_list,
result,
destination=None,
timestamp=None,
api_version=3):
if api_version not in [3]:
raise RuntimeError(
"Unsupported api version: %s, expected 3." % api_version)
if destination is None:
destination = self.server_name
cipher = self._GetServerCipher()
else:
remote_public_key = self._GetRemotePublicKey(destination)
cipher = Cipher(self.common_name, self.private_key, remote_public_key)
if timestamp is None:
self.timestamp = timestamp = int(time.time() * 1000000)
packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp)
self.EncodeMessageList(message_list, packed_message_list)
result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata
result.encrypted_cipher = cipher.encrypted_cipher
serialized_message_list = packed_message_list.SerializeToString()
result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list)
result.hmac = cipher.HMAC(result.encrypted)
result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher,
result.encrypted_cipher_metadata,
result.packet_iv.SerializeToString(),
struct.pack("<I", api_version))
result.api_version = api_version
if isinstance(result, rdfvalue.RDFValue):
result.num_messages = len(message_list)
return timestamp | Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version. |
23,466 | def getRegexpsByName(regexpNames = []):
allRegexpList = getAllRegexp()
if in regexpNames:
return allRegexpList
regexpList = []
for name in regexpNames:
for r in allRegexpList:
if name == r.name:
regexpList.append(r)
return regexpList | Method that recovers the names of the <RegexpObject> in a given list.
:param regexpNames: list of strings containing the possible regexp.
:return: Array of <RegexpObject> classes. |
23,467 | def import_class(import_path, setting_name=None):
mod_name, class_name = import_path.rsplit(, 1)
mod = import_module_or_none(mod_name)
if mod is not None:
try:
return getattr(mod, class_name)
except AttributeError:
pass
if setting_name:
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
else:
raise ImproperlyConfigured("Class not found: {0}".format(import_path)) | Import a class by name. |
23,468 | def get_mining_equipment():
url = build_url()
data = load_data(url)
coin_data = data[]
mining_data = data[]
return coin_data, mining_data | Get all the mining equipment information available.
Returns:
This function returns two major dictionaries. The first one contains information about the coins for which mining equipment data is available.
coin_data:
{symbol1: {'BlockNumber': ...,
'BlockReward': ...,
'BlockRewardReduction': ...,
'BlockTime': ...,
'DifficultyAdjustment': ...,
'NetHashesPerSecond': ...,
'PreviousTotalCoinsMined': ...,
'PriceUSD': ...,
'Symbol': ...,
'TotalCoinsMined': ...},
symbol2: {...},
...}
The other one contains all the available mining equipment.
mining_data:
{id1: {'AffiliateURL': ...,
'Algorithm': ...,
'Company': ...,
'Cost': ...,
'CurrenciesAvailable': ...,
'CurrenciesAvailableLogo': ...,
'CurrenciesAvailableName': ...,
'Currency': ...,
'EquipmentType': ...,
'HashesPerSecond': ...,
'Id': ...,
'LogoUrl': ...,
'Name': ...,
'ParentId': ...,
'PowerConsumption': ...,
'Recommended': ...,
'Sponsored': ...,
'Url': ...},
id2: {...}, |
23,469 | def paths_for_download(self):
if self._paths_for_download is None:
queries = list()
try:
for sra in self.gsm.relations[]:
query = sra.split("=")[-1]
if not in query:
raise ValueError(
"Sample looks like it is not an SRA: %s" % query)
logger.info("Query: %s" % query)
queries.append(query)
except KeyError:
raise NoSRARelationException(
% self.gsm.get_accession())
df = DataFrame(columns=[])
for query in queries:
searchdata = Entrez.esearch(db=, term=query, usehistory=,
retmode=)
answer = json.loads(searchdata.read())
ids = answer["esearchresult"]["idlist"]
if len(ids) != 1:
raise ValueError(
"There should be one and only one ID per SRX")
number_of_trials = 10
wait_time = 30
for trial in range(number_of_trials):
try:
results = Entrez.efetch(db="sra", id=ids[0],
rettype="runinfo",
retmode="text").read()
break
except HTTPError as httperr:
if "502" in str(httperr):
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
wait_time))
time.sleep(wait_time)
elif httperr.code == 429:
try:
header_wait_time = int(
httperr.headers["Retry-After"])
except:
header_wait_time = wait_time
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
header_wait_time))
time.sleep(header_wait_time)
else:
raise httperr
try:
df_tmp = DataFrame([i.split() for i in results.split() if i != ][1:],
columns=[i.split() for i in results.split() if i != ][0])
except IndexError:
logger.error(("SRA is empty (ID: %s, query: %s). "
"Check if it is publicly available.") %
(ids[0], query))
continue
try:
df_tmp[]
except KeyError as e:
logger.error( + str(e) + )
logger.error(str(results) + )
df = concat([df, df_tmp], sort=True)
self._paths_for_download = [path for path in df[]]
return self._paths_for_download | List of URLs available for downloading. |
23,470 | def checksum(file_path, hash_type=, block_size=65536):
if hash_type == :
hash_ = hashlib.md5()
elif hash_type == :
hash_ = hashlib.sha256()
else:
raise ValueError(
"{} is an invalid hash_type. Expected or ."
.format(hash_type)
)
with open(file_path, ) as f:
for block in iter(lambda: f.read(block_size), b):
hash_.update(block)
return hash_.hexdigest() | Returns either the md5 or sha256 hash of a file at `file_path`.
md5 is the default hash_type as it is faster than sha256
The default block size is 64 kb, which appears to be one of a few command
choices according to https://stackoverflow.com/a/44873382/2680. The code
below is an extension of the example presented in that post. |
23,471 | def is_quoted(value):
ret =
if isinstance(value, six.string_types) \
and value[0] == value[-1] \
and value.startswith(("')):
ret = value[0]
return ret | Return a single or double quote, if a string is wrapped in extra quotes.
Otherwise return an empty string. |
23,472 | def get_status_job(self, id_job, hub=None, group=None, project=None,
access_token=None, user_id=None):
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
respond = {}
respond["status"] =
respond["error"] = "Not credentials valid"
return respond
if not id_job:
respond = {}
respond["status"] =
respond["error"] = "Job ID not specified"
return respond
url = get_job_url(self.config, hub, group, project)
url += + id_job +
status = self.req.get(url)
return status | Get the status about a job, by its id |
23,473 | def normalize_string(mac_type, resource, content_hash):
normalized = [
+ str(HAWK_VER) + + mac_type,
normalize_header_attr(resource.timestamp),
normalize_header_attr(resource.nonce),
normalize_header_attr(resource.method or ),
normalize_header_attr(resource.name or ),
normalize_header_attr(resource.host),
normalize_header_attr(resource.port),
normalize_header_attr(content_hash or )
]
normalized.append(normalize_header_attr(resource.ext or ))
if resource.app:
normalized.append(normalize_header_attr(resource.app))
normalized.append(normalize_header_attr(resource.dlg or ))
normalized.append()
normalized = .join(normalized)
return normalized | Serializes mac_type and resource into a HAWK string. |
23,474 | def verify_verify(self, id, token):
return Verify().load(self.request( + str(id), params={: token})) | Verify the token of a specific verification. |
23,475 | def qtt_fft1(self,tol,inverse=False, bitReverse=True):
d = self.d
r = self.r.copy()
y = self.to_list(self)
if inverse:
twiddle =-1+1.22e-16j
else:
twiddle =-1-1.22e-16j
for i in range(d-1, 0, -1):
r1= y[i].shape[0]
r2= y[i].shape[2]
crd2 = _np.zeros((r1, 2, r2), order=, dtype=complex)
crd2[:,0,:]= (y[i][:,0,:] + y[i][:,1,:])/_np.sqrt(2)
crd2[:,1,:]= (y[i][:,0,:] - y[i][:,1,:])/_np.sqrt(2)
y[i]= _np.zeros((r1*2, 2, r2),order=,dtype=complex)
y[i][0:r1, 0, 0:r2]= crd2[:,0,:]
y[i][r1:r1*2, 1, 0:r2]= crd2[:,1,:]
rv=1;
for j in range(0, i):
cr=y[j]
r1= cr.shape[0]
r2= cr.shape[2]
if j==0:
r[j]=r1
r[j+1] = r2*2
y[j] = _np.zeros((r[j], 2, r[j+1]),order=,dtype=complex)
y[j][0:r1, :, 0:r2] = cr
y[j][0:r1, 0, r2 :r[j+1]] = cr[:,0,:]
y[j][0:r1, 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:]
else:
r[j]=r1*2
r[j+1] = r2*2
y[j] = _np.zeros((r[j], 2, r[j+1]),order=,dtype=complex)
y[j][0:r1, :, 0:r2] = cr
y[j][r1:r[j], 0, r2 :r[j+1]] = cr[:,0,:]
y[j][r1:r[j], 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:]
y[j] = _np.reshape(y[j],( r[j], 2*r[j+1]),order=)
y[j] = _np.dot(rv,y[j])
r[j] = y[j].shape[0]
y[j] = _np.reshape(y[j],( 2*r[j], r[j+1]),order=)
y[j], rv = _np.linalg.qr(y[j])
y[j] = _np.reshape(y[j], (r[j], 2, rv.shape[0]),order=)
y[i] = _np.reshape(y[i], (r[i], 2*r[i+1]),order=)
y[i] = _np.dot(rv,y[i])
r[i] = rv.shape[0]
for j in range(i, 0,-1):
u,s,v = _np.linalg.svd(y[j], full_matrices=False)
rnew = my_chop2(s, _np.linalg.norm(s)*tol/_np.sqrt(i))
u=_np.dot(u[:, 0:rnew], _np.diag(s[0:rnew]))
v= v[0:rnew, :]
y[j] = _np.reshape(v, (rnew, 2, r[j+1]),order= )
y[j-1] = _np.reshape(y[j-1], (r[j-1]*2,r[j] ),order= )
y[j-1] = _np.dot(y[j-1], u)
r[j] = rnew
y[j-1] = _np.reshape(y[j-1], (r[j-1],r[j]*2 ),order= )
y[0] = _np.reshape(y[0], (r[0],2, r[1]), order= )
y[0]=_np.transpose(y[0],(1,0,2))
y[0]=_np.reshape(y[0],(2, r[0]*r[1]),order=)
y[0]= _np.dot( _np.array([[1,1],[1,-1]]), y[0])/_np.sqrt(2)
y[0]=_np.reshape(y[0],(2, r[0], r[1]),order=)
y[0]=_np.transpose(y[0],(1,0,2))
if bitReverse:
y2=[None]*d
for i in range(d):
y2[d-i-1]= _np.transpose(y[i],(2,1,0))
y=self.from_list(y2)
else:
y=self.from_list(y)
return y | Compute 1D (inverse) discrete Fourier Transform in the QTT format.
:param tol: error tolerance.
:type tol: float
:param inverse: whether do an inverse FFT or not.
:type inverse: Boolean
:param bitReverse: whether do the bit reversion or not. If this function is used as a subroutine for multi-dimensional qtt-fft, this option
need to be set False.
:type bitReverse: Boolean.
:returns: QTT-vector of FFT coefficients.
This is a python translation of the Matlab function "qtt_fft1" in Ivan Oseledets' project TT-Toolbox(https://github.com/oseledets/TT-Toolbox)
See S. Dolgov, B. Khoromskij, D. Savostyanov,
Superfast Fourier transform using QTT approximation,
J. Fourier Anal. Appl., 18(5), 2012. |
23,476 | def digest(self,data=None):
if data is not None:
self.update(data)
b=create_string_buffer(256)
size=c_size_t(256)
if libcrypto.EVP_DigestSignFinal(self.ctx,b,pointer(size))<=0:
raise DigestError()
self.digest_finalized=True
return b.raw[:size.value] | Method digest is redefined to return keyed MAC value instead of
just digest. |
23,477 | def equivalent_to(self, token):
return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter(
models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk) | Gets all tokens which match the character and scopes of a reference token
:param token: :class:`esi.models.Token`
:return: :class:`esi.managers.TokenQueryset` |
23,478 | def _adjust_n_months(other_day, n, reference_day):
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n | Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided. |
23,479 | def parse(self, stream, full_statusline=None):
if full_statusline is None:
full_statusline = stream.readline()
full_statusline = self.decode_header(full_statusline)
statusline, total_read = _strip_count(full_statusline, 0)
headers = []
if total_read == 0:
raise EOFError()
elif not statusline:
return StatusAndHeaders(statusline=statusline,
headers=headers,
protocol=,
total_len=total_read)
if self.verify:
protocol_status = self.split_prefix(statusline, self.statuslist)
if not protocol_status:
msg =
msg = msg.format(self.statuslist, statusline)
raise StatusAndHeadersParserException(msg, full_statusline)
else:
protocol_status = statusline.split(, 1)
line, total_read = _strip_count(self.decode_header(stream.readline()), total_read)
while line:
result = line.split(, 1)
if len(result) == 2:
name = result[0].rstrip()
value = result[1].lstrip()
else:
name = result[0]
value = None
next_line, total_read = _strip_count(self.decode_header(stream.readline()),
total_read)
while next_line and next_line.startswith((, )):
if value is not None:
value += next_line
next_line, total_read = _strip_count(self.decode_header(stream.readline()),
total_read)
if value is not None:
header = (name, value)
headers.append(header)
line = next_line
if len(protocol_status) > 1:
statusline = protocol_status[1].strip()
else:
statusline =
return StatusAndHeaders(statusline=statusline,
headers=headers,
protocol=protocol_status[0],
total_len=total_read) | parse stream for status line and headers
return a StatusAndHeaders object
support continuation headers starting with space or tab |
23,480 | def indent_list(inlist, level):
indent = *level
joinstr = + indent
retval = joinstr.join(inlist)
return indent + retval | Join a list of strings, one per line with 'level' spaces before each one |
23,481 | def _get_time_at_horizon(self, utc_time, obslon, obslat, **kwargs):
warnings.warn("_get_time_at_horizon is replaced with get_next_passes",
DeprecationWarning)
if "precision" in kwargs:
precision = kwargs[]
else:
precision = timedelta(seconds=0.001)
if "max_iterations" in kwargs:
nmax_iter = kwargs["max_iterations"]
else:
nmax_iter = 100
sec_step = 0.5
t_step = timedelta(seconds=sec_step / 2.0)
def fprime(timex):
el0 = self.get_observer_look(timex - t_step,
obslon, obslat, 0.0)[1]
el1 = self.get_observer_look(timex + t_step,
obslon, obslat, 0.0)[1]
return el0, (abs(el1) - abs(el0)) / sec_step
tx0 = utc_time - timedelta(seconds=1.0)
tx1 = utc_time
idx = 0
eps = 100.
while abs(tx1 - tx0) > precision and idx < nmax_iter:
tx0 = tx1
fpr = fprime(tx0)
var_scale = np.abs(fpr[0])
tx1 = tx0 - timedelta(seconds=(eps * var_scale * fpr[1]))
idx = idx + 1
if abs(tx1 - utc_time) < precision and idx < 2:
tx1 = tx1 + timedelta(seconds=1.0)
if abs(tx1 - tx0) <= precision and idx < nmax_iter:
return tx1
else:
return None | Get the time closest in time to *utc_time* when the
satellite is at the horizon relative to the position of an observer on
ground (altitude = 0)
Note: This is considered deprecated and it's functionality is currently
replaced by 'get_next_passes'. |
23,482 | def cleanDir(self):
if os.path.isdir(self.outdir):
baddies = [,,]
for file in baddies:
filepath = os.path.join(self.outdir,file)
if os.path.isfile(filepath):
os.remove(filepath) | Remove existing json datafiles in the target directory. |
23,483 | def get_image_hashes(image_path, version=None, levels=None):
s reproducibility on each level.
'
if levels is None:
levels = get_levels(version=version)
hashes = dict()
for level_name,level_filter in levels.items():
hashes[level_name] = get_image_hash(image_path,
level_filter=level_filter)
return hashes | get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level. |
23,484 | def get_line_relative_to_node(self, target_node: ast.AST, offset: int) -> str:
return self.lines[target_node.lineno - self.node.lineno + offset] | Raises:
IndexError: when ``offset`` takes the request out of bounds of this
Function's lines. |
23,485 | def save(self, file_path):
try:
file_path = os.path.abspath(file_path)
with open(file_path, ) as df:
pickle.dump((self.__data, self.__classes, self.__labels,
self.__dtype, self.__description, self.__num_features,
self.__feature_names),
df)
return
except IOError as ioe:
raise IOError(, format(ioe))
except:
raise | Method to save the dataset to disk.
Parameters
----------
file_path : str
File path to save the current dataset to
Raises
------
IOError
If saving to disk is not successful. |
23,486 | def is_timed_out(self):
if (self.is_expired):
return True
try:
if (not self.last_access_time):
msg = ("session.last_access_time for session with id [" +
str(self.session_id) + "] is null. This value must be"
"set at least once, preferably at least upon "
"instantiation. Please check the " +
self.__class__.__name__ +
" implementation and ensure self value will be set "
"(perhaps in the constructor?)")
raise ValueError(msg)
if self.is_absolute_timed_out:
return True
if self.is_idle_timed_out:
return True
except AttributeError:
msg2 = ("Timeouts not set for session with id [" +
str(self.session_id) + "]. Session is not considered "
"expired.")
logger.debug(msg2)
return False | determines whether a Session has been inactive/idle for too long a time
OR exceeds the absolute time that a Session may exist |
23,487 | def cas(self, key, value, cas, time=0, compress_level=-1):
server = self._get_server(key)
return server.cas(key, value, cas, time, compress_level) | Set a value for a key on server if its CAS value matches cas.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param cas: The CAS value previously obtained from a call to get*.
:type cas: int
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool |
23,488 | def get_tile_image(self, x, y, l):
try:
return self._animated_tile[(x, y, l)]
except KeyError:
return self._get_tile_image(x, y, l) | Get a tile image, respecting current animations
:param x: x coordinate
:param y: y coordinate
:param l: layer
:type x: int
:type y: int
:type l: int
:rtype: pygame.Surface |
23,489 | def on_click(self, button, **kwargs):
if button in (4, 5):
return super().on_click(button, **kwargs)
else:
activemodule = self.get_active_module()
if not activemodule:
return
return activemodule.on_click(button, **kwargs) | Capture scrollup and scorlldown to move in groups
Pass everthing else to the module itself |
23,490 | def _HuntFlowCondition(self, condition):
if condition == db.HuntFlowsCondition.UNSET:
return "", []
elif condition == db.HuntFlowsCondition.FAILED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.ERROR)])
elif condition == db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.FINISHED)])
elif condition == db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY:
return ("AND (flow_state = %s OR flow_state = %s) ", [
int(rdf_flow_objects.Flow.FlowState.FINISHED),
int(rdf_flow_objects.Flow.FlowState.ERROR)
])
elif condition == db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.RUNNING)])
elif condition == db.HuntFlowsCondition.CRASHED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.CRASHED)])
else:
raise ValueError("Invalid condition value: %r" % condition) | Builds an SQL condition matching db.HuntFlowsCondition. |
23,491 | def get_pending_servermanager():
*
vname =
key = r
reg_ret = __utils__[](, key, vname)
if reg_ret[]:
log.debug(, key)
try:
if int(reg_ret[]) > 0:
return True
except ValueError:
pass
else:
log.debug(, key)
return False | Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager |
23,492 | def start_instance(self, build):
if self.domain is not None:
log.msg("Cannot start_instance as already active" %
self.workername)
return False
yield self._prepare_base_image()
try:
if self.xml:
self.domain = yield self.connection.create(self.xml)
else:
self.domain = yield self.connection.lookupByName(self.workername)
yield self.domain.create()
except Exception:
log.err(failure.Failure(),
"Cannot start a VM (%s), failing gracefully and triggering"
"a new build check" % self.workername)
self.domain = None
return False
return True | I start a new instance of a VM.
If a base_image is specified, I will make a clone of that otherwise i will
use image directly.
If i'm not given libvirt domain definition XML, I will look for my name
in the list of defined virtual machines and start that. |
23,493 | def required_permission(f, level):
@functools.wraps(f)
def wrapper(request, pid, *args, **kwargs):
d1_gmn.app.auth.assert_allowed(request, level, pid)
return f(request, pid, *args, **kwargs)
return wrapper | Assert that subject has access at given level or higher for object. |
23,494 | def asm_binary(exprs, dst_reg, sym_to_reg, triple_or_target=None):
if not llvmlite_available:
raise RuntimeError("llvmlite module unavailable! can__arybo' function. As the function is naked and
M = llvm.parse_assembly(str(M))
M.verify()
target_machine = target.create_target_machine()
obj_bin = target_machine.emit_object(M)
obj = llvm.ObjectFileRef.from_data(obj_bin)
for s in obj.sections():
if s.is_text():
return s.data()
raise RuntimeError("unable to get the assembled binary!") | Compile and assemble an expression for a given architecture.
Arguments:
* *exprs*: list of expressions to convert. This can represent a graph of
expressions.
* *dst_reg*: final register on which to store the result of the last
expression. This is represented by a tuple ("reg_name", reg_size_bits).
Example: ("rax", 64)
* *sym_to_reg*: a dictionnary that maps Arybo variable name to registers
(described as tuple, see *dst_reg*). Example: {"x": ("rdi",64), "y": ("rsi", 64)}
* *triple_or_target*: LLVM architecture triple to use. Use by default the
host architecture. Example: "x86_64-unknown-unknown"
Output:
* binary stream of the assembled expression for the given target
Here is an example that will compile and assemble "x+y" for x86_64::
from arybo.lib import MBA
from arybo.lib import mba_exprs
from arybo.lib.exprs_asm import asm_binary
mba = MBA(64)
x = mba.var("x")
y = mba.var("y")
e = mba_exprs.ExprBV(x) + mba_exprs.ExprBV(y)
code = asm_binary([e], ("rax", 64), {"x": ("rdi", 64), "y": ("rsi", 64)}, "x86_64-unknown-unknown")
print(code.hex())
which outputs ``488d0437`` (which is equivalent to ``lea rax,[rdi+rsi*1]``). |
23,495 | async def _get_subscriptions(self) -> Tuple[Set[Text], Text]:
url, params = self._get_subscriptions_endpoint()
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
data = await r.json()
for scope in data[]:
if scope[] == :
return (
set(x[] for x in scope[]),
scope[],
)
return set(), | List the subscriptions currently active |
23,496 | def get_source_value(self, obj, source, **kwargs):
result = []
for sub_source in self.expand_source(source):
sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs)
result.extend(sub_result)
return result | Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object
to which term coercions and the query type lookup are delegated. |
23,497 | def has_snap(self):
return len(list(filter(lambda s: s.state != SnapStateEnum.DESTROYING,
self.snapshots))) > 0 | This method won't count the snaps in "destroying" state!
:return: false if no snaps or all snaps are destroying. |
23,498 | def get_sequence_rules_by_ids(self, sequence_rule_ids):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
object_id_list = []
for i in sequence_rule_ids:
object_id_list.append(ObjectId(self._get_id(i, ).get_identifier()))
result = collection.find(
dict({: {: object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map[] == object_id:
sorted_result.append(object_map)
break
return objects.SequenceRuleList(sorted_result, runtime=self._runtime, proxy=self._proxy) | Gets a ``SequenceRuleList`` corresponding to the given ``IdList``.
arg: sequence_rule_ids (osid.id.IdList): the list of ``Ids``
to retrieve
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NotFound - a ``Id was`` not found
raise: NullArgument - ``sequence_rule_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
23,499 | def inject(self,
require: Optional[List[Text]] = None,
fail: Text = ,
var_name: Text = ):
def decorator(func):
async def health_check(cls) -> Iterator[HealthCheckFail]:
if not callable(getattr(cls, fail, None)):
yield HealthCheckFail(
,
f
f
f
)
if require:
func.health_check = health_check
@wraps(func)
async def wrapper(state: Union[BaseState, BaseTrigger], **kwargs):
conv_id = state.request.conversation.id
key = f
x = self.open(key)
async with x as context:
for item in (require or []):
if item not in context:
return await getattr(state, fail)(state, **kwargs)
kwargs[var_name] = context
return await func(state, **kwargs)
return wrapper
return decorator | This is a decorator intended to be used on states (and actually only
work on state handlers).
The `require` argument is a list of keys to be checked in the context.
If at least one of them is missing, then instead of calling the handler
another method will be called. By default the method is
`missing_context` but it can be configured using the `fail` argument.
The context will be injected into the handler as a keyword arg. By
default, the arg is expected to be named `context` but you can change
it to anything you'd like using `var_name`.
See `create_context_store()` for a full example. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.