Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,800 |
def split_kv_pairs(lines, comment_char="
_lines = lines if comment_char is None else get_active_lines(lines, comment_char=comment_char)
_lines = _lines if filter_string is None else [l for l in _lines if filter_string in l]
kv_pairs = OrderedDict() if ordered else {}
for line in _lines:
if not use_partition:
if split_on in line:
k, v = line.split(split_on, 1)
kv_pairs[k.strip()] = v.strip()
else:
k, _, v = line.partition(split_on)
kv_pairs[k.strip()] = v.strip()
return kv_pairs
|
Split lines of a list into key/value pairs
Use this function to filter and split all lines of a list of strings into
a dictionary. Named arguments may be used to control how the line is split,
how lines are filtered and the type of output returned. See parameters for
more information. When splitting key/value, the first occurence of the
split character is used, other occurrences of the split char in the line
will be ignored. ::func:`get_active_lines` is called to strip comments and
blank lines from the data.
Parameters:
lines (list of str): List of the strings to be split.
comment_char (str): Char that when present in the line indicates all
following chars are part of a comment. If this is present, all
comments and all blank lines are removed from list before further
processing. The default comment char is the `#` character.
filter_string (str): If the filter string is present, then only lines
containing the filter will be processed, other lines will be ignored.
split_on (str): Character to use when splitting a line. Only the first
occurence of the char is used when splitting, so only one split is
performed at the first occurrence of `split_on`. The default string is `=`.
use_partition (bool): If this parameter is `True` then the python `partition`
function will be used to split the line. If `False` then the pyton `split`
function will be used. The difference is that when `False`, if the split
character is not present in the line then the line is ignored and when
`True` the line will be parsed regardless. Set `use_partition` to `True`
if you have valid lines that do not contain the `split_on` character.
Set `use_partition` to `False` if you want to ignore lines that do not
contain the `split_on` character. The default value is `False`.
ordered (bool): If this parameter is `True` then the resulting dictionary
will be in the same order as in the original file, a python
`OrderedDict` type is used. If this parameter is `False` then the resulting
dictionary is in no particular order, a base python `dict` type is used.
The default is `False`.
Returns:
dict: Return value is a dictionary of the key/value pairs. If parameter
`keyword` is `True` then an OrderedDict is returned, otherwise a dict
is returned.
Examples:
>>> from .. import split_kv_pairs
>>> for line in lines:
... print line
# Comment line
# Blank lines will also be removed
keyword1 = value1 # Inline comments
keyword2 = value2a=True, value2b=100M
keyword3 # Key with no separator
>>> split_kv_pairs(lines)
{'keyword2': 'value2a=True, value2b=100M', 'keyword1': 'value1'}
>>> split_kv_pairs(lines, comment_char='#')
{'keyword2': 'value2a=True, value2b=100M', 'keyword1': 'value1'}
>>> split_kv_pairs(lines, filter_string='keyword2')
{'keyword2': 'value2a=True, value2b=100M'}
>>> split_kv_pairs(lines, use_partition=True)
{'keyword3': '', 'keyword2': 'value2a=True, value2b=100M', 'keyword1': 'value1'}
>>> split_kv_pairs(lines, use_partition=True, ordered=True)
OrderedDict([('keyword1', 'value1'), ('keyword2', 'value2a=True, value2b=100M'), ('keyword3', '')])
|
24,801 |
def find_source_files(input_path, excludes):
java_files = []
input_path = os.path.normpath(os.path.abspath(input_path))
for dirpath, dirnames, filenames in os.walk(input_path):
if is_excluded(dirpath, excludes):
del dirnames[:]
continue
for filename in filenames:
if filename.endswith(".java"):
java_files.append(os.path.join(dirpath, filename))
return java_files
|
Get a list of filenames for all Java source files within the given
directory.
|
24,802 |
def _get_stddevs(self, coeffs, stddev_types):
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
return np.sqrt(coeffs[]**2 + coeffs[]**2)
|
Equation (11) on p. 207 for total standard error at a given site:
``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)``
|
24,803 |
def generateImplicitParameters(cls, obj):
for comp in obj.components():
if comp.behavior is not None:
comp.behavior.generateImplicitParameters(comp)
if not hasattr(obj, ):
obj.add(ContentLine(, [], PRODID))
if not hasattr(obj, ):
obj.add(ContentLine(, [], cls.versionString))
tzidsUsed = {}
def findTzids(obj, table):
if isinstance(obj, ContentLine) and (obj.behavior is None or
not obj.behavior.forceUTC):
if getattr(obj, , None):
table[obj.tzid_param] = 1
else:
if type(obj.value) == list:
for item in obj.value:
tzinfo = getattr(obj.value, , None)
tzid = TimezoneComponent.registerTzinfo(tzinfo)
if tzid:
table[tzid] = 1
else:
tzinfo = getattr(obj.value, , None)
tzid = TimezoneComponent.registerTzinfo(tzinfo)
if tzid:
table[tzid] = 1
for child in obj.getChildren():
if obj.name != :
findTzids(child, table)
findTzids(obj, tzidsUsed)
oldtzids = [toUnicode(x.tzid.value) for x in getattr(obj, , [])]
for tzid in tzidsUsed.keys():
tzid = toUnicode(tzid)
if tzid != u and tzid not in oldtzids:
obj.add(TimezoneComponent(tzinfo=getTzid(tzid)))
|
Create PRODID, VERSION, and VTIMEZONEs if needed.
VTIMEZONEs will need to exist whenever TZID parameters exist or when
datetimes with tzinfo exist.
|
24,804 |
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Returns a list of ``Voice`` supported for synthesis.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> response = client.list_voices()
Args:
language_code (str): Optional (but recommended)
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag.
If specified, the ListVoices call will only return voices that can be
used to synthesize this language\_code. E.g. when specifying "en-NZ",
you will get supported "en-*" voices; when specifying "no", you will get
supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices;
specifying "zh" will also get supported "cmn-*" voices; specifying
"zh-hk" will also get supported "yue-\*" voices.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
24,805 |
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8):
assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf
if beta<0.0:
beta=-beta
std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf)))
non_centrality = beta
beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)
n_grid = 100000
beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0))
beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality)
pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0)
power = (pvals<cutoff).mean()
return power, pvals
|
estimate power for a given allele frequency, effect size beta and sample size N
Assumption:
z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis
the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )
Arguments:
maf: minor allele frequency of the SNP
beta: effect size of the SNP
N: sample size (number of individuals)
Returns:
power: probability to detect a SNP in that study with the given parameters
|
24,806 |
def message(self, to, subject, text):
return self.compose(to, subject, text)
|
Alias for :meth:`compose`.
|
24,807 |
def as_pseudo(cls, obj):
return obj if isinstance(obj, cls) else cls.from_file(obj)
|
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
|
24,808 |
def change_site(self, old_site_name, new_site_name, new_location_name=None,
new_er_data=None, new_pmag_data=None, replace_data=False):
site = self.find_by_name(old_site_name, self.sites)
if not site:
print(.format(old_site_name))
return False
if new_location_name:
if site.location:
old_location = self.find_by_name(site.location.name, self.locations)
if old_location:
old_location.sites.remove(site)
new_location = self.find_by_name(new_location_name, self.locations)
if not new_location:
print(.format(new_location_name, new_location_name))
new_location = self.add_location(new_location_name)
new_location.sites.append(site)
else:
new_location = None
site.change_site(new_site_name, new_location, new_er_data, new_pmag_data, replace_data)
return site
|
Find actual data objects for site and location.
Then call the Site class change method to update site name and data.
|
24,809 |
def confirm_answer(self, answer, message=None):
if message is None:
message = "\nYou entered {0}. Is this correct?".format(answer)
return self.prompt_for_yes_or_no(message)
|
Prompts the user to confirm a question with a yes/no prompt.
If no message is specified, the default message is: "You entered {}. Is this correct?"
:param answer: the answer to confirm.
:param message: a message to display rather than the default message.
:return: True if the user confirmed Yes, or False if user specified No.
|
24,810 |
def get_tectonic_regionalisation(self, regionalisation, region_type=None):
if region_type:
self.trt = region_type
if not self.trt in regionalisation.key_list:
raise ValueError(
)
for iloc, key_val in enumerate(regionalisation.key_list):
if self.trt in key_val:
self.regionalisation = regionalisation.regionalisation[iloc]
if not self.shear_modulus:
self.shear_modulus = self.regionalisation.shear_modulus
if not self.msr:
self.msr = self.regionalisation.scaling_rel
if not self.disp_length_ratio:
self.disp_length_ratio = \
self.regionalisation.disp_length_ratio
break
return
|
Defines the tectonic region and updates the shear modulus,
magnitude scaling relation and displacement to length ratio using
the regional values, if not previously defined for the fault
:param regionalistion:
Instance of the :class:
openquake.hmtk.faults.tectonic_regionalisaion.TectonicRegionalisation
:param str region_type:
Name of the region type - if not in regionalisation an error will
be raised
|
24,811 |
def download_to_bytes(url, chunk_size=1024 * 1024 * 10, loadbar_length=10):
stream = False if chunk_size is None else True
print("Downloading {0:s}: ".format(url), end="")
response = requests.get(url, stream=stream)
response.raise_for_status()
encoding = response.encoding
total_length = response.headers.get()
if total_length is not None:
total_length = float(total_length)
if stream:
print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="")
else:
print("{0:.2f}Mb ".format(total_length / (1024 * 1024)), end="")
if stream:
print("[", end="")
chunks = []
loaded = 0
loaded_size = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
if total_length is not None:
while loaded < loadbar_length * loaded_size / total_length:
print("=", end=)
loaded += 1
loaded_size += chunk_size
chunks.append(chunk)
if total_length is None:
print("=" * loadbar_length, end=)
else:
while loaded < loadbar_length:
print("=", end=)
loaded += 1
content = b"".join(chunks)
print("] ", end="")
else:
content = response.content
print("Finished")
response.close()
return content, encoding
|
Download a url to bytes.
if chunk_size is not None, prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)
:param url: str or url
:param chunk_size: None or int in bytes
:param loadbar_length: int length of load bar
:return: (bytes, encoding)
|
24,812 |
def split_traversal(traversal,
edges,
edges_hash=None):
traversal = np.asanyarray(traversal,
dtype=np.int64)
if edges_hash is None:
edges_hash = grouping.hashable_rows(
np.sort(edges, axis=1))
trav_edge = np.column_stack((traversal[:-1],
traversal[1:]))
trav_hash = grouping.hashable_rows(
np.sort(trav_edge, axis=1))
contained = np.in1d(trav_hash, edges_hash)
if contained.all():
split = [traversal]
else:
blocks = grouping.blocks(contained,
min_len=1,
only_nonzero=True)
split = [np.append(trav_edge[b][:, 0],
trav_edge[b[-1]][1])
for b in blocks]
for i, t in enumerate(split):
split[i] = np.asanyarray(split[i], dtype=np.int64)
edge = np.sort([t[0], t[-1]])
if edge.ptp() == 0:
continue
close = grouping.hashable_rows(edge.reshape((1, 2)))[0]
if close in edges_hash:
split[i] = np.append(t, t[0]).astype(np.int64)
result = np.array(split)
return result
|
Given a traversal as a list of nodes, split the traversal
if a sequential index pair is not in the given edges.
Parameters
--------------
edges : (n, 2) int
Graph edge indexes
traversal : (m,) int
Traversal through edges
edge_hash : (n,)
Edges sorted on axis=1 and
passed to grouping.hashable_rows
Returns
---------------
split : sequence of (p,) int
|
24,813 |
def _print_task_data(self, task):
print(.format(task[], task[]))
paths = task.get(, [])
if not paths:
return
for path in paths:
if path.endswith():
continue
if path.endswith(.format(task.get())):
continue
if path.startswith():
continue
print( + path)
|
Pretty-prints task data.
Args:
task: Task dict generated by Turbinia.
|
24,814 |
def make_stream_features(self, stream, features):
self.stream = stream
if stream.peer_authenticated and not stream.peer.resource:
ElementTree.SubElement(features, FEATURE_BIND)
|
Add resource binding feature to the <features/> element of the
stream.
[receving entity only]
:returns: update <features/> element.
|
24,815 |
def _process_data(self, sd, ase, offsets, data):
self._put_data(sd, ase, offsets, data)
with self._transfer_lock:
self._synccopy_bytes_sofar += offsets.num_bytes
sd.complete_offset_upload(offsets.chunk_num)
|
Process downloaded data for upload
:param SyncCopy self: this
:param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
:param blobxfer.models.azure.StorageEntity ase: storage entity
:param blobxfer.models.synccopy.Offsets offsets: offsets
:param bytes data: data to process
|
24,816 |
def truncate(self, table):
truncate_sql, serial_key_sql = super(PostgresDbWriter, self).truncate(table)
self.execute(truncate_sql)
if serial_key_sql:
self.execute(serial_key_sql)
|
Send DDL to truncate the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
|
24,817 |
def _PSat_h(h):
hmin_Ps3 = _Region1(623.15, Ps_623)["h"]
hmax_Ps3 = _Region2(623.15, Ps_623)["h"]
if h < hmin_Ps3 or h > hmax_Ps3:
raise NotImplementedError("Incoming out of bound")
nu = h/2600
I = [0, 1, 1, 1, 1, 5, 7, 8, 14, 20, 22, 24, 28, 36]
J = [0, 1, 3, 4, 36, 3, 0, 24, 16, 16, 3, 18, 8, 24]
n = [0.600073641753024, -0.936203654849857e1, 0.246590798594147e2,
-0.107014222858224e3, -0.915821315805768e14, -0.862332011700662e4,
-0.235837344740032e2, 0.252304969384128e18, -0.389718771997719e19,
-0.333775713645296e23, 0.356499469636328e11, -0.148547544720641e27,
0.330611514838798e19, 0.813641294467829e38]
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (nu-1.02)**i * (nu-0.608)**j
return 22*suma
|
Define the saturated line, P=f(h) for region 3
Parameters
----------
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
P : float
Pressure, [MPa]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* h'(623.15K) ≤ h ≤ h''(623.15K)
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for the
Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS
Industrial Formulation 1997 for the Thermodynamic Properties of Water and
Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 10
Examples
--------
>>> _PSat_h(1700)
17.24175718
>>> _PSat_h(2400)
20.18090839
|
24,818 |
def points_from_sql(self, db_name):
points = self._read_from_sql("SELECT * FROM history;", db_name)
return list(points.columns.values)[1:]
|
Retrieve point list from SQL database
|
24,819 |
def set_pixel_size(self, deltaPix):
self._pixel_size = deltaPix
if self.psf_type == :
try:
del self._kernel_point_source
except:
pass
|
update pixel size
:param deltaPix:
:return:
|
24,820 |
def emit(self, record):
msg = record.getMessage()
list_store = self.list_view.get_model()
Gdk.threads_enter()
if msg:
msg = replace_markup_chars(record.getMessage())
record.msg = URL_FINDER.sub(r, msg)
self.parent.debug_logs[].append(record)
event_type = getattr(record, , )
if event_type:
if event_type == :
switch_cursor(Gdk.CursorType.WATCH, self.parent.run_window)
list_store.append([format_entry(record)])
if event_type == :
switch_cursor(Gdk.CursorType.ARROW, self.parent.run_window)
if not self.parent.debugging:
if int(record.levelno) > 10:
if event_type == "dep_check" or event_type == "dep_found":
list_store.append([format_entry(record)])
elif not event_type.startswith("dep_"):
list_store.append([format_entry(record, colorize=True)])
if self.parent.debugging:
if event_type != "cmd_retcode":
list_store.append([format_entry(record, show_level=True, colorize=True)])
Gdk.threads_leave()
|
Function inserts log messages to list_view
|
24,821 |
def data_to_binary(self):
if self.channel == 0x01:
tmp = 0x03
else:
tmp = 0x0C
return bytes([
COMMAND_CODE,
tmp
]) + struct.pack(, self.delay_time)[-3:]
|
:return: bytes
|
24,822 |
def update_scope(self, patch_document, scope_id):
route_values = {}
if scope_id is not None:
route_values[] = self._serialize.url(, scope_id, )
content = self._serialize.body(patch_document, )
self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content,
media_type=)
|
UpdateScope.
[Preview API]
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.identity.models.[JsonPatchOperation]>` patch_document:
:param str scope_id:
|
24,823 |
def is_legal_sequence(self, packet: DataPacket) -> bool:
try:
diff = packet.sequence - self.lastSequence[packet.universe]
if 0 >= diff > -20:
return False
except:
pass
self.lastSequence[packet.universe] = packet.sequence
return True
|
Check if the Sequence number of the DataPacket is legal.
For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf.
:param packet: the packet to check
:return: true if the sequence is legal. False if the sequence number is bad
|
24,824 |
def firmware_checksum(self, firmware_checksum):
if firmware_checksum is not None and len(firmware_checksum) > 64:
raise ValueError("Invalid value for `firmware_checksum`, length must be less than or equal to `64`")
self._firmware_checksum = firmware_checksum
|
Sets the firmware_checksum of this DeviceDataPostRequest.
The SHA256 checksum of the current firmware image.
:param firmware_checksum: The firmware_checksum of this DeviceDataPostRequest.
:type: str
|
24,825 |
def handle_event(self, event):
if event.get():
if event[] == :
cmd =
else:
cmd =
task = self.send_command_ack(event[], cmd)
self.loop.create_task(task)
|
Handle incoming packet from rflink gateway.
|
24,826 |
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->:
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs)
|
Apply `func` to every input to get its label.
|
24,827 |
def get_select_sql(self, columns, order=None, limit=0, skip=0):
sql =
sql += .format(columns, self.tables)
if len(self.where_clauses) > 0:
sql +=
sql += .join(self.where_clauses)
if order is not None:
sql += .format(order)
if limit > 0:
sql += .format(limit)
if skip > 0:
sql += .format(skip)
return sql
|
Build a SELECT query based on the current state of the builder.
:param columns:
SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID'
:param order:
Optional ordering constraint, i.e. 'e.eventTime DESC'
:param limit:
Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed.
:param skip:
Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item
available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed
on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will
actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee
of this (and some operations such as indexing will definitely break this property unless explicitly set).
:returns:
A SQL SELECT query, which will make use of self.sql_args when executed.
|
24,828 |
def is_valid(self):
assert self._bundle_context
assert self._container_props is not None
assert self._get_distribution_provider()
assert self.get_config_name()
assert self.get_namespace()
return True
|
Checks if the component is valid
:return: Always True if it doesn't raise an exception
:raises AssertionError: Invalid properties
|
24,829 |
def get_argument_topology(self):
try:
topology = self.get_argument(constants.PARAM_TOPOLOGY)
return topology
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message)
|
Helper function to get topology argument.
Raises exception if argument is missing.
Returns the topology argument.
|
24,830 |
def num_batches(n, batch_size):
b = n // batch_size
if n % batch_size > 0:
b += 1
return b
|
Compute the number of mini-batches required to cover a data set of
size `n` using batches of size `batch_size`.
Parameters
----------
n: int
the number of samples in the data set
batch_size: int
the mini-batch size
Returns
-------
int: the number of batches required
|
24,831 |
def check_need_install():
need_install_flag = False
for root, _, basename_list in os.walk(SRC):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(SRC, DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag
|
Check if installed package are exactly the same to this one.
By checking md5 value of all files.
|
24,832 |
def typechecked_module(md, force_recursive = False):
if not pytypes.checking_enabled:
return md
if isinstance(md, str):
if md in sys.modules:
md = sys.modules[md]
if md is None:
return md
elif md in _pending_modules:
_pending_modules[md].append(lambda t: typechecked_module(t, True))
return md
assert(ismodule(md))
if md.__name__ in _pending_modules:
_pending_modules[md.__name__].append(lambda t: typechecked_module(t, True))
if md.__name__ in _fully_typechecked_modules and \
_fully_typechecked_modules[md.__name__] == len(md.__dict__):
return md
if _check_as_func(memb) and memb.__module__ == md.__name__ and \
has_type_hints(memb):
setattr(md, key, typechecked_func(memb, force_recursive))
elif isclass(memb) and memb.__module__ == md.__name__:
typechecked_class(memb, force_recursive, force_recursive)
if not md.__name__ in _pending_modules:
_fully_typechecked_modules[md.__name__] = len(md.__dict__)
return md
|
Works like typechecked, but is only applicable to modules (by explicit call).
md must be a module or a module name contained in sys.modules.
|
24,833 |
def safety_set_allowed_area_encode(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z):
return MAVLink_safety_set_allowed_area_message(target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z)
|
Set a safety zone (volume), which is defined by two corners of a cube.
This message can be used to tell the MAV which
setpoints/MISSIONs to accept and which to reject.
Safety areas are often enforced by national or
competition regulations.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t)
p1x : x position 1 / Latitude 1 (float)
p1y : y position 1 / Longitude 1 (float)
p1z : z position 1 / Altitude 1 (float)
p2x : x position 2 / Latitude 2 (float)
p2y : y position 2 / Longitude 2 (float)
p2z : z position 2 / Altitude 2 (float)
|
24,834 |
def isNull(self):
check = self.raw_values.copy()
scope = check.pop(, {})
return len(check) == 0 and len(scope) == 0
|
Returns whether or not this option set has been modified.
:return <bool>
|
24,835 |
def patch(self, patch: int) -> None:
self.filter_negatives(patch)
self._patch = patch
|
param patch
Patch version number property. Must be a non-negative integer.
|
24,836 |
def _CreateMethod(self, method_name):
soap_service_method = getattr(self.suds_client.service, method_name)
def MakeSoapRequest(*args):
AddToUtilityRegistry()
self.SetHeaders(
self._header_handler.GetSOAPHeaders(self.CreateSoapElementForType),
self._header_handler.GetHTTPHeaders())
try:
return soap_service_method(
*[_PackForSuds(arg, self.suds_client.factory,
self._packer) for arg in args])
except suds.WebFault as e:
if _logger.isEnabledFor(logging.WARNING):
_logger.warning(,
_ExtractResponseSummaryFields(e.document))
_logger.debug(, e.document.str())
if not hasattr(e.fault, ):
exc = (googleads.errors.
GoogleAdsServerFault(e.document, message=e.fault.faultstring))
raise exc
fault = e.fault.detail.ApiExceptionFault
if not hasattr(fault, ) or fault.errors is None:
exc = (googleads.errors.
GoogleAdsServerFault(e.document, message=e.fault.faultstring))
raise exc
obj = fault.errors
if not isinstance(obj, list):
fault.errors = [obj]
exc = googleads.errors.GoogleAdsServerFault(e.document, fault.errors,
message=e.fault.faultstring)
raise exc
return MakeSoapRequest
|
Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
|
24,837 |
def is_fundamental(type_):
return does_match_definition(
type_,
cpptypes.fundamental_t,
(cpptypes.const_t, cpptypes.volatile_t)) \
or does_match_definition(
type_,
cpptypes.fundamental_t,
(cpptypes.volatile_t, cpptypes.const_t))
|
returns True, if type represents C++ fundamental type
|
24,838 |
def get_densities(self, spin=None):
if self.densities is None:
result = None
elif spin is None:
if Spin.down in self.densities:
result = self.densities[Spin.up] + self.densities[Spin.down]
else:
result = self.densities[Spin.up]
else:
result = self.densities[spin]
return result
|
Returns the density of states for a particular spin.
Args:
spin: Spin
Returns:
Returns the density of states for a particular spin. If Spin is
None, the sum of all spins is returned.
|
24,839 |
def render_field(self, field, render_kw):
field_kw = getattr(field, , None)
if field_kw is not None:
render_kw = dict(field_kw, **render_kw)
render_kw = get_html5_kwargs(field, render_kw)
return field.widget(field, **render_kw)
|
Returns the rendered field after adding auto–attributes.
Calls the field`s widget with the following kwargs:
1. the *render_kw* set on the field are used as based
2. and are updated with the *render_kw* arguments from the render call
3. this is used as an argument for a call to `get_html5_kwargs`
4. the return value of the call is used as final *render_kw*
|
24,840 |
def closeEvent(self, event):
self.save_config(self.gui_settings[])
self.script_thread.quit()
self.read_probes.quit()
event.accept()
print()
print()
print()
|
things to be done when gui closes, like save the settings
|
24,841 |
def register_json_encoder(self, encoder_type: type, encoder: JSONEncoder):
self._json_encoders[encoder_type] = encoder
return self
|
Register the given JSON encoder for use with the given object type.
:param encoder_type: the type of object to encode
:param encoder: the JSON encoder
:return: this builder
|
24,842 |
def session(self):
if self._session is None:
from .tcex_session import TcExSession
self._session = TcExSession(self)
return self._session
|
Return an instance of Requests Session configured for the ThreatConnect API.
|
24,843 |
def get_similar_users(self, users=None, k=10):
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {: self.__proxy__,
: users,
: get_all_users,
: k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response
|
Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
|
24,844 |
async def self_check(self):
platforms = set()
for platform in get_platform_settings():
try:
name = platform[]
cls: Type[Platform] = import_class(name)
except KeyError:
yield HealthCheckFail(
,
)
except (AttributeError, ImportError, ValueError):
yield HealthCheckFail(
,
f
)
else:
if cls in platforms:
yield HealthCheckFail(
,
f
)
platforms.add(cls)
async for check in cls.self_check():
yield check
|
Checks that the platforms configuration is all right.
|
24,845 |
def visit_FunctionDef(self, node, **kwargs):
if self.options.debug:
stderr.write("
containingNodes = kwargs.get() or []
containingNodes.append((node.name, ))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = .join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = .format(modifiedContextTag)
else:
tail = self._processMembers(node, )
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
self.generic_visit(node, containingNodes=containingNodes)
containingNodes.pop()
|
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
|
24,846 |
def calc_integral_merger_rate(self):
)-1)/2Tc(t and stores it as
self.integral_merger_rate. This differences of this quantity evaluated at
different times points are the cost of a branch.
out of interpolation rangelinear')
|
calculates the integral int_0^t (k(t')-1)/2Tc(t') dt' and stores it as
self.integral_merger_rate. This differences of this quantity evaluated at
different times points are the cost of a branch.
|
24,847 |
def truncate(self, index, chain=-1):
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
self._traces[name].truncate(index, chain)
|
Tell the traces to truncate themselves at the given index.
|
24,848 |
def add_log_hooks_to_pytorch_module(self, module, name=None, prefix=, log_parameters=True, log_gradients=True, log_freq=0):
if name is not None:
prefix = prefix + name
if log_parameters:
def parameter_log_hook(module, input_, output, log_track):
if not log_track_update(log_track):
return
for name, parameter in module.named_parameters():
if isinstance(parameter, torch.autograd.Variable):
data = parameter.data
else:
data = parameter
self.log_tensor_stats(
data.cpu(), + prefix + name)
log_track_params = log_track_init(log_freq)
module.register_forward_hook(
lambda mod, inp, outp: parameter_log_hook(mod, inp, outp, log_track_params))
if log_gradients:
for name, parameter in module.named_parameters():
if parameter.requires_grad:
log_track_grad = log_track_init(log_freq)
self._hook_variable_gradient_stats(
parameter, + prefix + name, log_track_grad)
|
This instuments hooks into the pytorch module
log_parameters - log parameters after a forward pass
log_gradients - log gradients after a backward pass
log_freq - log gradients/parameters every N batches
|
24,849 |
def forward_char_extend_selection(self, e):
u
self.l_buffer.forward_char_extend_selection(self.argument_reset)
self.finalize()
|
u"""Move forward a character.
|
24,850 |
def autoregister(self, cls):
params = self.get_meta_attributes(cls)
return self.register(cls, params)
|
Autoregister a class that is encountered for the first time.
:param cls: The class that should be registered.
|
24,851 |
def parse_ethnicity(parts):
eastern_european = [, , , , , , ,
, , , , , , , , , ,
, , , , , ]
western_european = [, , , , , , , , ,
, , , , , , , , , ,
, , , , ]
caribbean = [, , , , , , , , ,
, , , , , , , , , ,
, , , , , , , , ]
south_central_american = [, , , , , , ,
, , , , , , , , , ,
, , ]
mexican = []
spanish = []
east_asian = [, , , , , , , ,
, , , , ]
korean = [, , ]
south_asian = [, , , ]
hawaiian_pacific_islanders = [, , , , , , ,
, , , , , , ]
middle_eastern = [, , , , , , , ,
, , , , , , , , ,
, , ]
north_african = [, , , , , , , , ]
white_non_hispanic = eastern_european + western_european
hispanic_latino = caribbean + south_central_american + mexican + spanish
american_indian = [, , , , , , , ,
, , , , , , , , , ]
asian = east_asian + south_asian + hawaiian_pacific_islanders
midEast_nAfrica = middle_eastern + north_african
african_american = [, ]
ss_african = [, , , , , , , , , ,
, , , , , , , , , ,
, , , , , , , , , ,
, , , , , , , , ]
white_non_hispanic.append()
white_non_hispanic.append()
hispanic_latino.extend([, ])
asian.extend([, ])
midEast_nAfrica.extend([, ])
from_names = []
ethnicities = white_non_hispanic + hispanic_latino + american_indian + asian + midEast_nAfrica + african_american + ss_african
num = 0
found = []
clean_parts = []
for p in parts:
part = parser_helpers.clean_part_ethn(p)
clean_parts.append(part)
for name in from_names:
if re.compile(r + re.escape(name)).search(part):
found.append(name)
if any(eth in part for eth in ethnicities):
for ethn in ethnicities:
if ethn in part:
index=part.index(ethn)
if ( in part and part.index()+4==index) or ( in part and part.index()==0 and part.index() + 3==index) or ( in part and part.index() + 4==index):
pass
else:
if ethn in [, ]:
ethn = "african_american"
if ethn == :
ethn =
if ethn not in found:
found.append(ethn)
if ethn in eastern_european:
found.append("eastern_european")
if ethn in western_european:
found.append("western_european")
if ethn in caribbean:
found.append("caribbean")
if ethn in south_central_american:
found.append("south_central_american")
if ethn in east_asian:
found.append("east_asian")
if ethn in south_asian:
found.append("south_asian_indian")
if ethn in hawaiian_pacific_islanders:
found.append("hawaiian_pacific_islanders")
if ethn in middle_eastern:
found.append("middle_eastern")
if ethn in north_african:
found.append("north_african")
if ethn in white_non_hispanic and "white_non_hispanic" not in found:
found.append("white_non_hispanic")
num += 1
if ethn in hispanic_latino and "hispanic_latino" not in found:
found.append("hispanic_latino")
num += 1
if ethn in american_indian and "american_indian" not in found:
found.append("american_indian")
num += 1
if ethn in asian and "asian" not in found:
if ethn != "asian":
found.append("asian")
num += 1
if ethn in midEast_nAfrica and "midEast_nAfrican" not in found:
found.append("midEast_nAfrican")
num += 1
if ethn in ss_african and "subsaharan_african" not in found:
found.append("subsaharan_african")
num += 1
if ethn == "african_american":
num += 1
output_parts = []
for p in clean_parts:
part = p
if any(eth in part for eth in found):
for eth in found:
if eth in part:
part = re.sub(eth, "", part)
if len(part) > 2:
output_parts.append(part)
if num > 1:
found.append("multiracial")
found = list(set(found))
return (found, output_parts)
|
Parse the ethnicity from the Backpage ad. Returns the higher level ethnicities associated with an ethnicity.
For example, if "russian" is found in the ad, this function will return ["russian", "eastern_european", "white_non_hispanic"].
This allows for us to look at ethnicities numerically and uniformally.
Note: The code for this function is pretty old and messy, but still works well enough for our purposes.
parts ->
|
24,852 |
def startup(api=None):
def startup_wrapper(startup_function):
apply_to_api = hug.API(api) if api else hug.api.from_object(startup_function)
apply_to_api.add_startup_handler(startup_function)
return startup_function
return startup_wrapper
|
Runs the provided function on startup, passing in an instance of the api
|
24,853 |
def apply_default_prefetch(input_source_or_dataflow, trainer):
if not isinstance(input_source_or_dataflow, InputSource):
if type(trainer) == SimpleTrainer:
input = FeedInput(input_source_or_dataflow)
else:
logger.info("Automatically applying QueueInput on the DataFlow.")
input = QueueInput(input_source_or_dataflow)
else:
input = input_source_or_dataflow
if hasattr(trainer, ):
towers = trainer.devices
if len(towers) > 1:
assert not isinstance(trainer, SimpleTrainer)
if isinstance(input, FeedfreeInput) and \
not isinstance(input, (StagingInput, DummyConstantInput)):
logger.info("Automatically applying StagingInput on the DataFlow.")
input = StagingInput(input)
return input
|
Apply a set of default rules to make a fast :class:`InputSource`.
Args:
input_source_or_dataflow(InputSource | DataFlow):
trainer (Trainer):
Returns:
InputSource
|
24,854 |
def format_vertices_section(self):
buf = io.StringIO()
buf.write()
buf.write()
for v in self.valid_vertices:
buf.write( + v.format() + )
buf.write()
return buf.getvalue()
|
format vertices section.
assign_vertexid() should be called before this method, because
self.valid_vetices should be available and member self.valid_vertices
should have valid index.
|
24,855 |
def shutdown(self):
log.info("shutting down")
for peer in self._dispatcher.peers.values():
peer.go_down(reconnect=False)
if self._listener_coro:
backend.schedule_exception(
errors._BailOutOfListener(), self._listener_coro)
if self._udp_listener_coro:
backend.schedule_exception(
errors._BailOutOfListener(), self._udp_listener_coro)
|
Close all peer connections and stop listening for new ones
|
24,856 |
def _outlier_rejection(self, params, model, signal, ii):
z_score = (params - np.mean(params, 0))/np.std(params, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outlier_idx = np.where(np.abs(z_score)>3.0)[0]
nan_idx = np.where(np.isnan(params))[0]
outlier_idx = np.unique(np.hstack([nan_idx, outlier_idx]))
ii[outlier_idx] = 0
model[outlier_idx] = np.nan
signal[outlier_idx] = np.nan
params[outlier_idx] = np.nan
return model, signal, params, ii
|
Helper function to reject outliers
DRY!
|
24,857 |
def attr(self, kw=None, _attributes=None, **attrs):
if kw is not None and kw.lower() not in (, , ):
raise ValueError(
% kw)
if attrs or _attributes:
if kw is None:
a_list = self._a_list(None, attrs, _attributes)
line = self._attr_plain % a_list
else:
attr_list = self._attr_list(None, attrs, _attributes)
line = self._attr % (kw, attr_list)
self.body.append(line)
|
Add a general or graph/node/edge attribute statement.
Args:
kw: Attributes target (``None`` or ``'graph'``, ``'node'``, ``'edge'``).
attrs: Attributes to be set (must be strings, may be empty).
See the :ref:`usage examples in the User Guide <attributes>`.
|
24,858 |
def enable_pointer_type(self):
self.enable_pointer_type = lambda: True
import pkgutil
headers = pkgutil.get_data(, ).decode()
import ctypes
from clang.cindex import TypeKind
word_size = self.parser.get_ctypes_size(TypeKind.POINTER) // 8
word_type = self.parser.get_ctypes_name(TypeKind.ULONG)
word_char = getattr(ctypes, word_type)._type_
headers = headers.replace(, str(word_size))
headers = headers.replace(, word_type)
headers = headers.replace(, word_char)
print(headers, file=self.imports)
return
|
If a type is a pointer, a platform-independent POINTER_T type needs
to be in the generated code.
|
24,859 |
def _build_ocsp_response(self, ocsp_request: OCSPRequest) -> OCSPResponse:
tbs_request = ocsp_request[]
request_list = tbs_request[]
if len(request_list) != 1:
logger.warning()
raise NotImplemented()
single_request = request_list[0]
req_cert = single_request[]
serial = req_cert[].native
try:
certificate_status, revocation_date = self._validate(serial)
except Exception as e:
logger.exception(, e)
return self._fail(ResponseStatus.internal_error)
try:
subject_cert_contents = self._cert_retrieve(serial)
except Exception as e:
logger.exception(, serial, e)
return self._fail(ResponseStatus.internal_error)
try:
subject_cert = asymmetric.load_certificate(subject_cert_contents.encode())
except Exception as e:
logger.exception(, serial, e)
return self._fail(ResponseStatus.internal_error)
builder = OCSPResponseBuilder(**{
: ResponseStatus.successful.value,
: subject_cert,
: certificate_status.value,
: revocation_date,
})
for extension in tbs_request[]:
extn_id = extension[].native
critical = extension[].native
value = extension[].parsed
unknown = False
if extn_id == :
builder.nonce = value.native
elif unknown is True:
logger.info(, dict(extension.native))
builder.certificate_issuer = self._issuer_cert
builder.next_update = datetime.now(timezone.utc) + timedelta(days=self._next_update_days)
return builder.build(self._responder_key, self._responder_cert)
|
Create and return an OCSP response from an OCSP request.
|
24,860 |
def runcode(self, code):
try:
Exec(code, self.frame.f_globals, self.frame.f_locals)
pydevd_save_locals.save_locals(self.frame)
except SystemExit:
raise
except:
sys.excepthook = sys.__excepthook__
try:
self.showtraceback()
finally:
sys.__excepthook__ = sys.excepthook
|
Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
|
24,861 |
def get_folder_children(self, folder_id, name_contains=None):
return self._create_array_response(
self.data_service.get_folder_children(
folder_id, name_contains
),
DDSConnection._folder_or_file_constructor
)
|
Get direct files and folders of a folder.
:param folder_id: str: uuid of the folder
:param name_contains: str: filter children based on a pattern
:return: File|Folder
|
24,862 |
def write_single_response(self, response_obj):
if not isinstance(response_obj, JsonRpcResponse):
raise ValueError(
"Expected JsonRpcResponse, but got {} instead".format(type(response_obj).__name__))
if not self.response_is_sent:
self.set_status(200)
self.set_header("Content-Type", "application/json")
self.finish(response_obj.to_string())
self.response_is_sent = True
|
Writes a json rpc response ``{"result": result, "error": error, "id": id}``.
If the ``id`` is ``None``, the response will not contain an ``id`` field.
The response is sent to the client as an ``application/json`` response. Only one call per
response is allowed
:param response_obj: A Json rpc response object
:return:
|
24,863 |
def dedupFasta(reads):
seen = set()
add = seen.add
for read in reads:
hash_ = md5(read.sequence.encode()).digest()
if hash_ not in seen:
add(hash_)
yield read
|
Remove sequence duplicates (based on sequence) from FASTA.
@param reads: a C{dark.reads.Reads} instance.
@return: a generator of C{dark.reads.Read} instances with no duplicates.
|
24,864 |
def do_list_queue(self, line):
def f(p, args):
o = p.get()
if o.resources.queue:
for q in o.resources.queue:
print( % (q.resource_id, q.port))
self._request(line, f)
|
list_queue <peer>
|
24,865 |
def main():
arguments = docopt(__doc__, version=__version__)
if arguments[]:
print
print
print
print
on()
elif arguments[]:
off()
print
elif arguments[]:
last(arguments[], arguments[] or arguments[])
else:
print __doc__
|
i am winston wolfe, i solve problems
|
24,866 |
def get_requirements(filename=):
with open(filename) as f:
return [
line.rstrip().split()[0]
for line in f.readlines()
if not line.startswith()
]
|
Get the contents of a file listing the requirements.
:param filename: path to a requirements file
:type filename: str
:returns: the list of requirements
:return type: list
|
24,867 |
def restore_from_disk(self, clean_old_snapshot=False):
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split()[-1].strip(), "%Y-%m-%d")
if snapshot_period < last_period and not clean_old_snapshot:
continue
else:
self._union_bf_from_file(filename)
if snapshot_period == self.current_period:
self._union_bf_from_file(filename, current=True)
if snapshot_period < last_period and clean_old_snapshot:
os.remove(filename)
self.ready = True
|
Restore the state of the BF using previous snapshots.
:clean_old_snapshot: Delete the old snapshot on the disk (period < current - expiration)
|
24,868 |
def get_project(self, project_short_name):
project = pbclient.find_project(short_name=project_short_name,
all=self.all)
if (len(project) == 1):
return project[0]
else:
raise ProjectNotFound(project_short_name)
|
Return project object.
|
24,869 |
def write_byte(self, addr, val):
assert self._device is not None,
self._select_device(addr)
data = bytearray(1)
data[0] = val & 0xFF
self._device.write(data)
|
Write a single byte to the specified device.
|
24,870 |
def pull(self):
for item in self.input_stream:
print( % item[], end=)
if item[]:
print(item[][], end=)
packet_type = item[][]
print(packet_type, end=)
packet = item[]
if packet_type in [, ]:
if in packet:
print( % (net_utils.inet_to_str(packet[]), packet[],
net_utils.inet_to_str(packet[]), packet[]), end=)
else:
print( % (net_utils.inet_to_str(packet[]), net_utils.inet_to_str(packet[])), end=)
else:
print(str(packet))
if item[]:
print( % item[][], end=)
print(str(item[]), end=)
print()
|
Print out summary information about each packet from the input_stream
|
24,871 |
def list_all(prefix=None, app=None, owner=None, description_contains=None,
name_not_contains=None, profile="splunk"):
client = _get_splunk(profile)
description = v
if description_contains and description_contains not in description:
continue
results["manage splunk search " + name] = {"splunk_search.present": d}
return salt.utils.yaml.safe_dump(results, default_flow_style=False, width=120)
|
Get all splunk search details. Produces results that can be used to create
an sls file.
if app or owner are specified, results will be limited to matching saved
searches.
if description_contains is specified, results will be limited to those
where "description_contains in description" is true if name_not_contains is
specified, results will be limited to those where "name_not_contains not in
name" is true.
If prefix parameter is given, alarm names in the output will be prepended
with the prefix; alarms that have the prefix will be skipped. This can be
used to convert existing alarms to be managed by salt, as follows:
CLI example:
1. Make a "backup" of all existing searches
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls
2. Get all searches with new prefixed names
$ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls
3. Insert the managed searches into splunk
$ salt-call state.sls managed_searches.sls
4. Manually verify that the new searches look right
5. Delete the original searches
$ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
$ salt-call state.sls remove_legacy_searches.sls
6. Get all searches again, verify no changes
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
$ diff final_searches.sls managed_searches.sls
|
24,872 |
def perfect_platonic_per_pixel(N, R, scale=11, pos=None, zscale=1.0, returnpix=None):
if scale % 2 != 1:
scale += 1
if pos is None:
pos = np.array([(N-1)/2.0]*3)
s = 1.0/scale
f = zscale**2
i = pos.astype()
p = i + s*((pos - i)/s).astype()
pos = p + 1e-10
image = np.zeros((N,)*3)
x,y,z = np.meshgrid(*(xrange(N),)*3, indexing=)
for x0,y0,z0 in zip(x.flatten(),y.flatten(),z.flatten()):
ddd = np.sqrt(f*(x0-pos[0])**2 + (y0-pos[1])**2 + (z0-pos[2])**2)
if ddd > R + 4:
image[x0,y0,z0] = 0.0
continue
xp,yp,zp = np.meshgrid(
*(np.linspace(i-0.5+s/2, i+0.5-s/2, scale, endpoint=True) for i in (x0,y0,z0)),
indexing=
)
ddd = np.sqrt(f*(xp-pos[0])**2 + (yp-pos[1])**2 + (zp-pos[2])**2)
if returnpix is not None and returnpix == [x0,y0,z0]:
outpix = 1.0 * (ddd < R)
vol = (1.0*(ddd < R) + 0.0*(ddd == R)).sum()
image[x0,y0,z0] = vol / float(scale**3)
if returnpix:
return image, pos, outpix
return image, pos
|
Create a perfect platonic sphere of a given radius R by supersampling by a
factor scale on a grid of size N. Scale must be odd.
We are able to perfectly position these particles up to 1/scale. Therefore,
let's only allow those types of shifts for now, but return the actual position
used for the placement.
|
24,873 |
def _set_drop_precedence_force(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), is_leaf=True, yang_name="drop-precedence-force", rest_name="drop-precedence-force", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "ip-access-list:drop-prec-uint",
: ,
})
self.__drop_precedence_force = t
if hasattr(self, ):
self._set()
|
Setter method for drop_precedence_force, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/drop_precedence_force (ip-access-list:drop-prec-uint)
If this variable is read-only (config: false) in the
source YANG file, then _set_drop_precedence_force is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_drop_precedence_force() directly.
|
24,874 |
def get_3_tuple(self,obj,default=None):
if not (default is not None \
and type(default) is tuple \
and len(default)==3):
raise ValueError(%(default))
if is_sequence(obj):
n = len(obj)
if n>3:
log.warning(%(n,type(obj)))
if n>=3:
return tuple(obj)
log.warning(%(default[0]))
if default is not None:
if n==0:
return default
elif n==1:
return (obj[0],default[1],default[2])
elif n==2:
return (obj[0],obj[1],default[2])
elif is_number(obj) and default is not None:
log.warning(%(default[0]))
return (obj,default[1],default[2])
elif obj is None and default is not None:
log.warning(%(default[0]))
return default
raise ValueError(%(n,type(obj)))
|
Return 3-tuple from
number -> (obj,default[1],default[2])
0-sequence|None -> default
1-sequence -> (obj[0],default[1],default[2])
2-sequence -> (obj[0],obj[1],default[2])
(3 or more)-sequence -> (obj[0],obj[1],obj[2])
|
24,875 |
def motto(self):
if self.url is None:
return
else:
if self.soup is not None:
bar = self.soup.find(
, class_=)
if len(bar.contents) < 4:
return
else:
return bar.contents[3].text
else:
assert self.card is not None
motto = self.card.find(, class_=)
return motto.text if motto is not None else
|
获取用户自我介绍,由于历史原因,我还是把这个属性叫做motto吧.
:return: 用户自我介绍
:rtype: str
|
24,876 |
def length_between(min_len,
max_len,
open_left=False,
open_right=False
):
if open_left and open_right:
def length_between_(x):
if (min_len < len(x)) and (len(x) < max_len):
return True
else:
raise LengthNotInRange(wrong_value=x, min_length=min_len, left_strict=True, max_length=max_len,
right_strict=True)
elif open_left:
def length_between_(x):
if (min_len < len(x)) and (len(x) <= max_len):
return True
else:
raise LengthNotInRange(wrong_value=x, min_length=min_len, left_strict=True, max_length=max_len,
right_strict=False)
elif open_right:
def length_between_(x):
if (min_len <= len(x)) and (len(x) < max_len):
return True
else:
raise LengthNotInRange(wrong_value=x, min_length=min_len, left_strict=False, max_length=max_len,
right_strict=True)
else:
def length_between_(x):
if (min_len <= len(x)) and (len(x) <= max_len):
return True
else:
raise LengthNotInRange(wrong_value=x, min_length=min_len, left_strict=False, max_length=max_len,
right_strict=False)
length_between_.__name__ = .format(min_len, max_len)
return length_between_
|
'Is length between' validation_function generator.
Returns a validation_function to check that `min_len <= len(x) <= max_len (default)`. `open_right` and `open_left`
flags allow to transform each side into strict mode. For example setting `open_left=True` will enforce
`min_len < len(x) <= max_len`.
:param min_len: minimum length for x
:param max_len: maximum length for x
:param open_left: Boolean flag to turn the left inequality to strict mode
:param open_right: Boolean flag to turn the right inequality to strict mode
:return:
|
24,877 |
def get_single_series(self, id):
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
|
Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
|
24,878 |
def transform(x):
try:
x = np.array([_x.total_seconds()*10**6 for _x in x])
except TypeError:
x = x.total_seconds()*10**6
return x
|
Transform from Timeddelta to numerical format
|
24,879 |
def terminate(self, devices):
for device in devices:
self.logger.info(, device.id)
try:
device.delete()
except packet.baseapi.Error:
raise PacketManagerException(.format(device.id))
|
Terminate one or more running or stopped instances.
|
24,880 |
def find_point_in_section_list(point, section_list):
if point < section_list[0] or point > section_list[-1]:
return None
if point in section_list:
if point == section_list[-1]:
return section_list[-2]
ind = section_list.bisect(point)-1
if ind == 0:
return section_list[0]
return section_list[ind]
try:
ind = section_list.bisect(point)
return section_list[ind-1]
except IndexError:
return None
|
Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30
|
24,881 |
def handler(self):
if hasNTLM:
if self._handler is None:
passman = request.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self._parsed_org_url, self._login_username, self._password)
self._handler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(passman)
return self._handler
else:
raise Exception("Missing Ntlm python package.")
|
gets the security handler for the class
|
24,882 |
def addMethod(self, m):
if m.nargs == -1:
m.nargs = len([a for a in marshal.genCompleteTypes(m.sigIn)])
m.nret = len([a for a in marshal.genCompleteTypes(m.sigOut)])
self.methods[m.name] = m
self._xml = None
|
Adds a L{Method} to the interface
|
24,883 |
def txtopn(fname):
fnameP = stypes.stringToCharP(fname)
unit_out = ctypes.c_int()
fname_len = ctypes.c_int(len(fname))
libspice.txtopn_(fnameP, ctypes.byref(unit_out), fname_len)
return unit_out.value
|
Internal undocumented command for opening a new text file for
subsequent write access.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ftncls_c.html#Files
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ftncls_c.html#Examples
:param fname: name of the new text file to be opened.
:type fname: str
:return: FORTRAN logical unit of opened file
:rtype: int
|
24,884 |
def gha(julian_day, f):
rad = old_div(np.pi, 180.)
d = julian_day - 2451545.0 + f
L = 280.460 + 0.9856474 * d
g = 357.528 + 0.9856003 * d
L = L % 360.
g = g % 360.
lamb = L + 1.915 * np.sin(g * rad) + .02 * np.sin(2 * g * rad)
epsilon = 23.439 - 0.0000004 * d
t = (np.tan(old_div((epsilon * rad), 2)))**2
r = old_div(1, rad)
rl = lamb * rad
alpha = lamb - r * t * np.sin(2 * rl) + \
(old_div(r, 2)) * t * t * np.sin(4 * rl)
delta = np.sin(epsilon * rad) * np.sin(lamb * rad)
delta = old_div(np.arcsin(delta), rad)
eqt = (L - alpha)
utm = f * 24 * 60
H = old_div(utm, 4) + eqt + 180
H = H % 360.0
return H, delta
|
returns greenwich hour angle
|
24,885 |
def radius_server_host_timeout(self, **kwargs):
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop()
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
24,886 |
def transmit_content_metadata(self, user):
exporter = self.get_content_metadata_exporter(user)
transmitter = self.get_content_metadata_transmitter()
transmitter.transmit(exporter.export())
|
Transmit content metadata to integrated channel.
|
24,887 |
def create_process(daemon, name, callback, *callbackParams):
bp = Process(daemon=daemon, name=name, target=callback, args=callbackParams)
return bp
|
创建进程
:param daemon: True主进程关闭而关闭, False主进程必须等待子进程结束
:param name: 进程名称
:param callback: 回调函数
:param callbackParams: 回调函数参数
:return: 返回一个进程对象
|
24,888 |
def requestAvatarId(self, credentials):
username, domain = credentials.username.split("@")
key = self.users.key(domain, username)
if key is None:
return defer.fail(UnauthorizedLogin())
def _cbPasswordChecked(passwordIsCorrect):
if passwordIsCorrect:
return username + + domain
else:
raise UnauthorizedLogin()
return defer.maybeDeferred(credentials.checkPassword,
key).addCallback(_cbPasswordChecked)
|
Return the ID associated with these credentials.
@param credentials: something which implements one of the interfaces in
self.credentialInterfaces.
@return: a Deferred which will fire a string which identifies an
avatar, an empty tuple to specify an authenticated anonymous user
(provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin).
@see: L{twisted.cred.credentials}
|
24,889 |
def normalize(self, expr, operation):
assert operation in (self.AND, self.OR,)
expr = expr.literalize()
expr = expr.simplify()
operation_example = operation(self.TRUE, self.FALSE)
expr = self._rdistributive(expr, operation_example)
expr = expr.simplify()
return expr
|
Return a normalized expression transformed to its normal form in the
given AND or OR operation.
The new expression arguments will satisfy these conditions:
- operation(*args) == expr (here mathematical equality is meant)
- the operation does not occur in any of its arg.
- NOT is only appearing in literals (aka. Negation normal form).
The operation must be an AND or OR operation or a subclass.
|
24,890 |
def restart():
if not conf.interactive or not os.path.isfile(sys.argv[0]):
raise OSError("Scapy was not started from console")
if WINDOWS:
try:
res_code = subprocess.call([sys.executable] + sys.argv)
except KeyboardInterrupt:
res_code = 1
finally:
os._exit(res_code)
os.execv(sys.executable, [sys.executable] + sys.argv)
|
Restarts scapy
|
24,891 |
def apply_mesh_programs(self, mesh_programs=None):
if not mesh_programs:
mesh_programs = [ColorProgram(), TextureProgram(), FallbackProgram()]
for mesh in self.meshes:
for mp in mesh_programs:
instance = mp.apply(mesh)
if instance is not None:
if isinstance(instance, MeshProgram):
mesh.mesh_program = mp
break
else:
raise ValueError("apply() must return a MeshProgram instance, not {}".format(type(instance)))
if not mesh.mesh_program:
print("WARING: No mesh program applied to ".format(mesh.name))
|
Applies mesh programs to meshes
|
24,892 |
def request_halt(self, req, msg):
f = Future()
@gen.coroutine
def _halt():
req.reply("ok")
yield gen.moment
self.stop(timeout=None)
raise AsyncReply
self.ioloop.add_callback(lambda: chain_future(_halt(), f))
return f
|
Halt the device server.
Returns
-------
success : {'ok', 'fail'}
Whether scheduling the halt succeeded.
Examples
--------
::
?halt
!halt ok
|
24,893 |
def has_key(tup, key):
if isinstance(tup, framework.TupleLike):
return tup.is_bound(key)
if isinstance(tup, dict):
return key in tup
if isinstance(tup, list):
if not isinstance(key, int):
raise ValueError()
return key < len(tup)
raise ValueError( % tup)
|
has(tuple, string) -> bool
Return whether a given tuple has a key and the key is bound.
|
24,894 |
def main():
pack = struct.pack(, OPTIONS[].encode(), 0)
sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
info = struct.unpack(, fcntl.ioctl(sk.fileno(), 0x8933, pack))
except OSError:
return error(.format(OPTIONS[]))
finally:
sk.close()
if_index = int(info[1])
sk = nl_socket_alloc()
ok(0, genl_connect, sk)
_LOGGER.debug()
driver_id = ok(0, genl_ctrl_resolve, sk, b)
_LOGGER.debug()
mcid = ok(0, genl_ctrl_resolve_grp, sk, b, b)
if not OPTIONS[]:
print()
else:
print("Attempting to read results of previous scan.")
results = dict()
for i in range(2, -1, -1):
if not OPTIONS[]:
ret = ok(i, do_scan_trigger, sk, if_index, driver_id, mcid)
if ret < 0:
_LOGGER.warning(, ret)
time.sleep(5)
continue
ret = ok(i, do_scan_results, sk, if_index, driver_id, results)
if ret < 0:
_LOGGER.warning(, ret)
time.sleep(5)
continue
break
if not results:
print()
return
print(.format(len(results)))
print_table(results.values())
|
Main function called upon script execution.
|
24,895 |
def set_rotation(self, r=0, redraw=True):
if r in self._pix_map.keys():
if redraw:
pixel_list = self.get_pixels()
self._rotation = r
if redraw:
self.set_pixels(pixel_list)
else:
raise ValueError()
|
Sets the LED matrix rotation for viewing, adjust if the Pi is upside
down or sideways. 0 is with the Pi HDMI port facing downwards
|
24,896 |
def _process_ping(self):
yield self.send_command(PONG_PROTO)
if self._flush_queue.empty():
yield self._flush_pending()
|
The server will be periodically sending a PING, and if the the client
does not reply a PONG back a number of times, it will close the connection
sending an `-ERR 'Stale Connection'` error.
|
24,897 |
async def check_ping_timeout(self):
if self.closed:
raise exceptions.SocketIsClosedError()
if time.time() - self.last_ping > self.server.ping_interval + 5:
self.server.logger.info(,
self.sid)
await self.close(wait=False, abort=False)
return False
return True
|
Make sure the client is still sending pings.
This helps detect disconnections for long-polling clients.
|
24,898 |
def _get_symbolic_function_initial_state(self, function_addr, fastpath_mode_state=None):
if function_addr is None:
return None
if function_addr in self._symbolic_function_initial_state:
return self._symbolic_function_initial_state[function_addr]
if fastpath_mode_state is not None:
fastpath_state = fastpath_mode_state
else:
if function_addr in self._function_input_states:
fastpath_state = self._function_input_states[function_addr]
else:
raise AngrCFGError()
symbolic_initial_state = self.project.factory.entry_state(mode=)
if fastpath_state is not None:
symbolic_initial_state = self.project.simos.prepare_call_state(fastpath_state,
initial_state=symbolic_initial_state)
func = self.project.kb.functions.get(function_addr)
start_block = func._get_block(function_addr)
num_instr = start_block.instructions - 1
symbolic_initial_state.ip = function_addr
path = self.project.factory.path(symbolic_initial_state)
try:
sim_successors = self.project.factory.successors(path.state, num_inst=num_instr)
except (SimError, AngrError):
return None
exits = sim_successors.flat_successors + sim_successors.unsat_successors
if exits:
final_st = None
for ex in exits:
if ex.satisfiable():
final_st = ex
break
else:
final_st = None
self._symbolic_function_initial_state[function_addr] = final_st
return final_st
|
Symbolically execute the first basic block of the specified function,
then returns it. We prepares the state using the already existing
state in fastpath mode (if avaiable).
:param function_addr: The function address
:return: A symbolic state if succeeded, None otherwise
|
24,899 |
def GetNextWrittenEventSource(self):
if not self._is_open:
raise IOError()
if self._written_event_source_index >= len(self._event_sources):
return None
event_source = self._event_sources[self._written_event_source_index]
self._written_event_source_index += 1
return event_source
|
Retrieves the next event source that was written after open.
Returns:
EventSource: event source or None if there are no newly written ones.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.