Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
28,300 |
def create_logstore(self, project_name, logstore_name,
ttl=30,
shard_count=2,
enable_tracking=False,
append_meta=False,
auto_split=True,
max_split_shard=64,
preserve_storage=False
):
if auto_split and (max_split_shard <= 0 or max_split_shard >= 64):
max_split_shard = 64
if preserve_storage:
ttl = 3650
params = {}
resource = "/logstores"
headers = {"x-log-bodyrawsize": , "Content-Type": "application/json"}
body = {"logstoreName": logstore_name, "ttl": int(ttl), "shardCount": int(shard_count),
"enable_tracking": enable_tracking,
"autoSplit": auto_split,
"maxSplitShard": max_split_shard,
"appendMeta": append_meta
}
body_str = six.b(json.dumps(body))
try:
(resp, header) = self._send("POST", project_name, body_str, resource, params, headers)
except LogException as ex:
if ex.get_error_code() == "LogStoreInfoInvalid" and ex.get_error_message() == "redundant key exist in json":
logger.warning("LogStoreInfoInvalid, will retry with basic parameters. detail: {0}".format(ex))
body = {"logstoreName": logstore_name, "ttl": int(ttl), "shardCount": int(shard_count),
"enable_tracking": enable_tracking }
body_str = six.b(json.dumps(body))
(resp, header) = self._send("POST", project_name, body_str, resource, params, headers)
else:
raise
return CreateLogStoreResponse(header, resp)
|
create log store
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type ttl: int
:param ttl: the life cycle of log in the logstore in days, default 30, up to 3650
:type shard_count: int
:param shard_count: the shard count of the logstore to create, default 2
:type enable_tracking: bool
:param enable_tracking: enable web tracking, default is False
:type append_meta: bool
:param append_meta: allow to append meta info (server received time and IP for external IP to each received log)
:type auto_split: bool
:param auto_split: auto split shard, max_split_shard will be 64 by default is True
:type max_split_shard: int
:param max_split_shard: max shard to split, up to 64
:type preserve_storage: bool
:param preserve_storage: if always persist data, TTL will be ignored.
:return: CreateLogStoreResponse
:raise: LogException
|
28,301 |
def normalize_underscore_case(name):
normalized = name.lower()
normalized = re.sub(r,
lambda match: + match.group(1).upper(),
normalized)
return normalized[0].upper() + normalized[1:]
|
Normalize an underscore-separated descriptor to something more readable.
i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes
'Host Components'
|
28,302 |
def socks_username(self, value):
self._verify_proxy_type_compatibility(ProxyType.MANUAL)
self.proxyType = ProxyType.MANUAL
self.socksUsername = value
|
Sets socks proxy username setting.
:Args:
- value: The socks proxy username value.
|
28,303 |
def _add_thousand_g(self, variant_obj, info_dict):
thousand_g = info_dict.get()
if thousand_g:
logger.debug("Updating thousand_g to: {0}".format(
thousand_g))
variant_obj.thousand_g = float(thousand_g)
variant_obj.add_frequency(, variant_obj.get())
|
Add the thousand genomes frequency
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary
|
28,304 |
def to_snake(camel):
if not camel:
return camel
return .join( + x if <= x <= else x for x in camel).lower()[camel[0].isupper():]
|
TimeSkill -> time_skill
|
28,305 |
def create_type_variant(cls, type_name, type_converter):
assert isinstance(type_name, six.string_types)
if not CardinalityField.matches_type(type_name):
message = "type_name= has no CardinalityField" % type_name
raise ValueError(message)
primary_name, cardinality = CardinalityField.split_type(type_name)
if isinstance(type_converter, dict):
type_dict = type_converter
type_converter = type_dict.get(primary_name, None)
if not type_converter:
raise MissingTypeError(primary_name)
assert callable(type_converter)
type_variant = TypeBuilder.with_cardinality(cardinality,
type_converter, listsep=cls.listsep)
type_variant.name = type_name
return type_variant
|
Create type variants for types with a cardinality field.
The new type converters are based on the type converter with
cardinality=1.
.. code-block:: python
import parse
@parse.with_pattern(r'\d+')
def parse_number(text):
return int(text)
new_type = CardinalityFieldTypeBuilder.create_type_variant(
"Number+", parse_number)
new_type = CardinalityFieldTypeBuilder.create_type_variant(
"Number+", dict(Number=parse_number))
:param type_name: Type name with cardinality field suffix.
:param type_converter: Type converter or type dictionary.
:return: Type converter variant (function).
:raises: ValueError, if type_name does not end with CardinalityField
:raises: MissingTypeError, if type_converter is missing in type_dict
|
28,306 |
def array_to_csv(array_like):
stream = StringIO()
np.savetxt(stream, array_like, delimiter=, fmt=)
return stream.getvalue()
|
Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV
|
28,307 |
def get_view(self, columns: Sequence[str], query: str=None) -> PopulationView:
if not in columns:
query_with_track = query + if query else
return PopulationView(self, columns, query_with_track)
return PopulationView(self, columns, query)
|
Return a configured PopulationView
Notes
-----
Client code should only need this (and only through the version exposed as
``population_view`` on the builder during setup) if it uses dynamically
generated column names that aren't known at definition time. Otherwise
components should use ``uses_columns``.
|
28,308 |
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9):
quaternion_yzy = quaternion_from_euler([theta1, xi, theta2], )
euler = quaternion_yzy.to_zyz()
quaternion_zyz = quaternion_from_euler(euler, )
out_angles = (euler[1], euler[0], euler[2])
abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data))
if not np.allclose(abs_inner, 1, eps):
raise TranspilerError()
out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle
for angle in out_angles)
return out_angles
|
Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
|
28,309 |
def host(environ):
url = environ[] +
if environ.get():
url += environ[]
else:
url += environ[]
if environ[] == :
if environ[] != :
url += + environ[]
else:
if environ[] != :
url += + environ[]
return url + quote(environ.get(, ))
|
Reconstruct host from environment. A modified version
of http://www.python.org/dev/peps/pep-0333/#url-reconstruction
|
28,310 |
def zip_built(outdir):
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosenblended initzipblended build' yet?")
|
Packages the build folder into a zip
|
28,311 |
def count_empty(self, field):
try:
df2 = self.df[[field]]
vals = where(df2.applymap(lambda x: x == ))
num = len(vals[0])
except Exception as e:
self.err(e, "Can not count empty values")
return
self.ok("Found", num, "empty rows in column " + field)
|
List of empty row indices
|
28,312 |
def tryMatchedAnchor(self, block, autoIndent):
oposite = { : ,
: ,
: }
char = self._firstNonSpaceChar(block)
if not char in oposite.keys():
return None
try:
foundBlock, foundColumn = self.findBracketBackward(block, 0, oposite[char])
except ValueError:
return None
if autoIndent:
indentation = self._blockIndent(foundBlock)
else:
indentation = self._makeIndentAsColumn(foundBlock, foundColumn)
dbg("tryMatchedAnchor: success in line %d" % foundBlock.blockNumber())
return indentation
indentation = self._blockIndent(foundBlock)
self._qpart.replaceText((block.blockNumber(), 0), len(self._blockIndent(block)), "\n")
self._qpart.cursorPosition = (block.blockNumber(), len(indentation))
self._setBlockIndent(block.next(), indentation)
dbg("tryMatchedAnchor: success in line %d" % foundBlock.blockNumber())
return self._increaseIndent(indentation)
|
find out whether we pressed return in something like {} or () or [] and indent properly:
{}
becomes:
{
|
}
|
28,313 |
def _transform_variable_to_expression(expression, node, context):
variable_name = expression.variable_name
if not variable_name.startswith(u):
raise AssertionError(u
u.format(variable_name))
return bindparam(variable_name[1:])
|
Transform a Variable compiler expression into its SQLAlchemy expression representation.
Args:
expression: expression, Variable compiler expression.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression.
|
28,314 |
def _get_template_texts(source_list=None,
template=,
defaults=None,
context=None,
**kwargs):
chunks
ret = {: ,
: {},
: True,
: ,
: []}
if source_list is None:
return _error(ret,
)
txtl = []
for (source, source_hash) in source_list:
context_dict = defaults if defaults else {}
if context:
context_dict = salt.utils.dictupdate.merge(context_dict, context)
rndrd_templ_fn = __salt__[](
source,
,
template=template,
saltenv=__env__,
context=context_dict,
**kwargs
)
log.debug(,
rndrd_templ_fn, source)
if rndrd_templ_fn:
tmplines = None
with salt.utils.files.fopen(rndrd_templ_fn, ) as fp_:
tmplines = fp_.read()
tmplines = salt.utils.stringutils.to_unicode(tmplines)
tmplines = tmplines.splitlines(True)
if not tmplines:
msg = .format(
rndrd_templ_fn, source
)
log.debug(msg)
ret[] = source
return _error(ret, msg)
txtl.append(.join(tmplines))
else:
msg = .format(source)
log.debug(msg)
ret[] = source
return _error(ret, msg)
ret[] = txtl
return ret
|
Iterate a list of sources and process them as templates.
Returns a list of 'chunks' containing the rendered templates.
|
28,315 |
def bottom(self):
if self.vMerge is not None:
tc_below = self._tc_below
if tc_below is not None and tc_below.vMerge == ST_Merge.CONTINUE:
return tc_below.bottom
return self._tr_idx + 1
|
The row index that marks the bottom extent of the vertical span of
this cell. This is one greater than the index of the bottom-most row
of the span, similar to how a slice of the cell's rows would be
specified.
|
28,316 |
def frequency(data, output=, scaling=, sides=,
taper=None, halfbandwidth=3, NW=None, duration=None,
overlap=0.5, step=None, detrend=, n_fft=None,
log_trans=False, centend=):
if output not in (, , ):
raise TypeError(f
)
if not in data.list_of_axes:
raise TypeError(time\ +
str(data.list_of_axes))
if len(data.list_of_axes) != data.index_of() + 1:
raise TypeError(time\)
if duration is not None and output == :
raise ValueError()
if output == and data.number_of() != 2:
raise ValueError()
if duration is not None:
nperseg = int(duration * data.s_freq)
if step is not None:
nstep = int(step * data.s_freq)
else:
nstep = nperseg - int(overlap * nperseg)
freq = ChanFreq()
freq.attr = deepcopy(data.attr)
freq.s_freq = data.s_freq
freq.start_time = data.start_time
freq.axis[] = copy(data.axis[])
freq.axis[] = empty(data.number_of(), dtype=)
if output == :
freq.axis[] = empty(data.number_of(), dtype=)
freq.data = empty(data.number_of(), dtype=)
for i in range(data.number_of()):
x = data(trial=i)
if duration is not None:
x = _create_subepochs(x, nperseg, nstep)
f, Sxx = _fft(x,
s_freq=data.s_freq,
detrend=detrend,
taper=taper,
output=output,
sides=sides,
scaling=scaling,
halfbandwidth=halfbandwidth,
NW=NW,
n_fft=n_fft)
if log_trans:
Sxx = log(Sxx)
if duration is not None:
if centend == :
Sxx = Sxx.mean(axis=-2)
elif centend == :
Sxx = median(Sxx, axis=-2)
else:
raise ValueError(
)
freq.axis[][i] = f
if output == :
freq.axis[][i] = arange(Sxx.shape[-1])
if output == :
newchan = .join(freq.axis[][i])
freq.axis[][i] = asarray([newchan], dtype=)
freq.data[i] = Sxx
return freq
|
Compute the
power spectral density (PSD, output='spectraldensity', scaling='power'), or
energy spectral density (ESD, output='spectraldensity', scaling='energy') or
the complex fourier transform (output='complex', sides='two')
Parameters
----------
data : instance of ChanTime
one of the datatypes
detrend : str
None (no detrending), 'constant' (remove mean), 'linear' (remove linear
trend)
output : str
'spectraldensity' or 'csd' or 'complex'
'spectraldensity' meaning the autospectrum or auto-spectral density,
a special case of 'csd' (cross-spectral density), where the signal is
cross-correlated with itself
if 'csd', both channels in data are used as input
sides : str
'one' or 'two', where 'two' implies negative frequencies
scaling : str
'power' (units: V ** 2 / Hz), 'energy' (units: V ** 2), 'fieldtrip',
'chronux'
taper : str
Taper to use, commonly used tapers are 'boxcar', 'hann', 'dpss'
halfbandwidth : int
(only if taper='dpss') Half bandwidth (in Hz), frequency smoothing will
be from +halfbandwidth to -halfbandwidth
NW : int
(only if taper='dpss') Normalized half bandwidth
(NW = halfbandwidth * dur). Number of DPSS tapers is 2 * NW - 1.
If specified, NW takes precedence over halfbandwidth
duration : float, in s
If not None, it divides the signal in epochs of this length (in seconds)
and then average over the PSD / ESD (not the complex result)
overlap : float, between 0 and 1
The amount of overlap between epochs (0.5 = 50%, 0.95 = almost complete
overlap).
step : float, in s
step in seconds between epochs (alternative to overlap)
n_fft: int
Length of FFT, in samples. If less than input axis, input is cropped.
If longer than input axis, input is padded with zeros. If None, FFT
length set to axis length.
log_trans : bool
If True, spectral values will be natural log-transformed. The
transformation is applied before averaging (or taking the median).
centend : str
(only if duration is not None). Central tendency measure to use, either
mean (arithmetic) or median.
Returns
-------
instance of ChanFreq
If output='complex', there is an additional dimension ('taper') which
is useful for 'dpss' but it's also present for all the other tapers.
Raises
------
TypeError
If the data does not have a 'time' axis. It might work in the
future on other axes, but I cannot imagine how.
ValueError
If you use duration (to create multiple epochs) and output='complex',
because it does not average the complex output of multiple epochs.
Notes
-----
See extensive notes at wonambi.trans.frequency._fft
It uses sampling frequency as specified in s_freq, it does not
recompute the sampling frequency based on the time axis.
Use of log or median for Welch's method is included based on
recommendations from Izhikevich et al., bioRxiv, 2018.
|
28,317 |
def choose(self):
if not self.choosed:
self.choosed = True
self.pos = self.pos + Sep(5, 0)
|
Marks the item as the one the user is in.
|
28,318 |
def pkcs7_unpad(buf):
unpadder = cryptography.hazmat.primitives.padding.PKCS7(
cryptography.hazmat.primitives.ciphers.
algorithms.AES.block_size).unpadder()
return unpadder.update(buf) + unpadder.finalize()
|
Removes PKCS7 padding a decrypted object
:param bytes buf: buffer to remove padding
:rtype: bytes
:return: buffer without PKCS7_PADDING
|
28,319 |
def _validate_gain_A_value(self, gain_A):
if gain_A not in self._valid_gains_for_channel_A:
raise ParameterValidationError("{gain_A} is not a valid gain".format(gain_A=gain_A))
|
validate a given value for gain_A
:type gain_A: int
:raises: ValueError
|
28,320 |
def server_prepare_root_bin_dir():
/root/bin/
commands = []
for command in commands:
install_file_legacy(flo(), sudo=True)
sudo(flo())
if command == :
sudo()
|
Install custom commands for user root at '/root/bin/'.
|
28,321 |
def opt(self, x_init, f_fp=None, f=None, fp=None):
tnc_rcstrings = [, , , ,
, ]
assert f_fp != None, "TNC requires f_fp"
opt_dict = {}
if self.xtol is not None:
opt_dict[] = self.xtol
if self.ftol is not None:
opt_dict[] = self.ftol
if self.gtol is not None:
opt_dict[] = self.gtol
opt_result = optimize.fmin_tnc(f_fp, x_init, messages=self.messages,
maxfun=self.max_f_eval, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[1]
self.status = tnc_rcstrings[opt_result[2]]
|
Run the TNC optimizer
|
28,322 |
def indent(text, num=4):
str_indent = * num
return str_indent + ( + str_indent).join(text.splitlines())
|
Indet the given string
|
28,323 |
def bezier_point(p, t):
try:
p.large_arc
return p.point(t)
except:
pass
deg = len(p) - 1
if deg == 3:
return p[0] + t*(
3*(p[1] - p[0]) + t*(
3*(p[0] + p[2]) - 6*p[1] + t*(
-p[0] + 3*(p[1] - p[2]) + p[3])))
elif deg == 2:
return p[0] + t*(
2*(p[1] - p[0]) + t*(
p[0] - 2*p[1] + p[2]))
elif deg == 1:
return p[0] + t*(p[1] - p[0])
elif deg == 0:
return p[0]
else:
bern = bernstein(deg, t)
return sum(bern[k]*p[k] for k in range(deg+1))
|
Evaluates the Bezier curve given by it's control points, p, at t.
Note: Uses Horner's rule for cubic and lower order Bezier curves.
Warning: Be concerned about numerical stability when using this function
with high order curves.
|
28,324 |
def set_socket_address(self):
Global.LOGGER.debug()
random.seed()
default_port = random.randrange(5001, 5999)
internal_0mq_address = "tcp://127.0.0.1"
internal_0mq_port_subscriber = str(default_port)
internal_0mq_port_publisher = str(default_port)
Global.LOGGER.info(str.format(
f"zmq subsystem subscriber on {internal_0mq_port_subscriber} port"))
Global.LOGGER.info(str.format(
f"zmq subsystem publisher on {internal_0mq_port_publisher} port"))
self.subscriber_socket_address = f"{internal_0mq_address}:{internal_0mq_port_subscriber}"
self.publisher_socket_address = f"{internal_0mq_address}:{internal_0mq_port_publisher}"
|
Set a random port to be used by zmq
|
28,325 |
def filter_since_tag(self, all_tags):
tag = self.detect_since_tag()
if not tag or tag == REPO_CREATED_TAG_NAME:
return copy.deepcopy(all_tags)
filtered_tags = []
tag_names = [t["name"] for t in all_tags]
try:
idx = tag_names.index(tag)
except ValueError:
self.warn_if_tag_not_found(tag, "since-tag")
return copy.deepcopy(all_tags)
since_tag = all_tags[idx]
since_date = self.get_time_of_tag(since_tag)
for t in all_tags:
tag_date = self.get_time_of_tag(t)
if since_date <= tag_date:
filtered_tags.append(t)
return filtered_tags
|
Filter tags according since_tag option.
:param list(dict) all_tags: All tags.
:rtype: list(dict)
:return: Filtered tags.
|
28,326 |
def parse_function(fn):
try:
return parse_string(inspect.getsource(fn))
except (IOError, OSError) as e:
raise ValueError(
% e)
|
Get the source of a function and return its AST.
|
28,327 |
def extractall(self, directory, auto_create_dir=False, patool_path=None):
log.debug(, self.filename, directory, self.backend)
is_zipfile = zipfile.is_zipfile(self.filename)
directory = _fullpath(directory)
if not os.path.exists(self.filename):
raise ValueError(
+ str(self.filename))
if not os.path.exists(directory):
if auto_create_dir:
os.makedirs(directory)
else:
raise ValueError( + str(directory))
if self.backend == :
if is_zipfile:
self.extractall_zipfile(directory)
else:
self.extractall_patool(directory, patool_path)
if self.backend == :
if not is_zipfile:
raise ValueError( + str(self.filename))
self.extractall_zipfile(directory)
if self.backend == :
self.extractall_patool(directory, patool_path)
|
:param directory: directory to extract to
:param auto_create_dir: auto create directory
:param patool_path: the path to the patool backend
|
28,328 |
def get_object(self, obj_class, data=None, subset=None):
if subset:
if not isinstance(subset, list):
if isinstance(subset, basestring):
subset = subset.split("&")
else:
raise TypeError
if data is None:
return self.get_list(obj_class, data, subset)
elif isinstance(data, (basestring, int)):
return self.get_individual_object(obj_class, data, subset)
elif isinstance(data, ElementTree.Element):
return self.get_new_object(obj_class, data)
else:
raise ValueError
|
Return a subclassed JSSObject instance by querying for
existing objects or posting a new object.
Args:
obj_class: The JSSObject subclass type to search for or
create.
data: The data parameter performs different operations
depending on the type passed.
None: Perform a list operation, or for non-container
objects, return all data.
int: Retrieve an object with ID of <data>.
str: Retrieve an object with name of <str>. For some
objects, this may be overridden to include searching
by other criteria. See those objects for more info.
xml.etree.ElementTree.Element: Create a new object from
xml.
subset:
A list of XML subelement tags to request (e.g.
['general', 'purchasing']), OR an '&' delimited string
(e.g. 'general&purchasing'). This is not supported for
all JSSObjects.
Returns:
JSSObjectList: for empty or None arguments to data.
JSSObject: Returns an object of type obj_class for searches
and new objects.
(FUTURE) Will return None if nothing is found that match
the search criteria.
Raises:
TypeError: if subset not formatted properly.
JSSMethodNotAllowedError: if you try to perform an operation
not supported by that object type.
JSSGetError: If object searched for is not found.
JSSPostError: If attempted object creation fails.
|
28,329 |
def getHeight(self):
if self.useUiAutomator:
return self.map[][1][1] - self.map[][0][1]
else:
try:
return int(self.map[self.heightProperty])
except:
return 0
|
Gets the height.
|
28,330 |
def _head(self, client_kwargs):
return _handle_http_errors(
self.client.request(
, timeout=self._TIMEOUT, **client_kwargs)).headers
|
Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header.
|
28,331 |
def reduced_chi2(self, model, error_map=0):
chi2 = self.reduced_residuals(model, error_map)
return np.sum(chi2**2) / self.num_data_evaluate()
|
returns reduced chi2
:param model:
:param error_map:
:return:
|
28,332 |
def set_value(self, index, value):
self._data[ self.keys[index.row()] ] = value
self.showndata[ self.keys[index.row()] ] = value
self.sizes[index.row()] = get_size(value)
self.types[index.row()] = get_human_readable_type(value)
self.sig_setting_data.emit()
|
Set value
|
28,333 |
def destroy_dns(app=, env=, **_):
client = boto3.Session(profile_name=env).client()
generated = get_details(app=app, env=env)
record = generated.dns_elb()
zone_ids = get_dns_zone_ids(env=env, facing=)
for zone_id in zone_ids:
record_sets = client.list_resource_record_sets(
HostedZoneId=zone_id, StartRecordName=record, StartRecordType=, MaxItems=)
for found_record in record_sets[]:
assert destroy_record(client=client, found_record=found_record, record=record, zone_id=zone_id)
return True
|
Destroy DNS records.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
regions (str): AWS region.
Returns:
bool: True upon successful completion.
|
28,334 |
def enable(self, ospf_profile=None, router_id=None):
ospf_profile = element_resolver(ospf_profile) if ospf_profile \
else OSPFProfile().href
self.data.update(
enabled=True,
ospfv2_profile_ref=ospf_profile,
router_id=router_id)
|
Enable OSPF on this engine. For master engines, enable
OSPF on the virtual firewall.
Once enabled on the engine, add an OSPF area to an interface::
engine.dynamic_routing.ospf.enable()
interface = engine.routing.get(0)
interface.add_ospf_area(OSPFArea('myarea'))
:param str,OSPFProfile ospf_profile: OSPFProfile element or str
href; if None, use default profile
:param str router_id: single IP address router ID
:raises ElementNotFound: OSPF profile not found
:return: None
|
28,335 |
def compute_deflections_at_next_plane(plane_index, total_planes):
if plane_index < total_planes - 1:
return True
elif plane_index == total_planes - 1:
return False
else:
raise exc.RayTracingException()
|
This function determines whether the tracer should compute the deflections at the next plane.
This is True if there is another plane after this plane, else it is False..
Parameters
-----------
plane_index : int
The index of the plane we are deciding if we should compute its deflections.
total_planes : int
The total number of planes.
|
28,336 |
def append(self, row):
row = tuple(row)
if len(row) != self.table_width:
raise ValueError()
self.results.append(row)
|
Append a result row and check its length.
>>> x = Results(['title', 'type'])
>>> x.append(('Konosuba', 'TV'))
>>> x
Results(['title', 'type'], [('Konosuba', 'TV')])
>>> x.append(('Konosuba',))
Traceback (most recent call last):
...
ValueError: Wrong result row length
|
28,337 |
def takeChild(self, index):
item = super(XGanttWidgetItem, self).takeChild(index)
if item:
item.removeFromScene()
return item
|
Removes the child at the given index from this item.
:param index | <int>
|
28,338 |
def _new_pivot_query(self):
query = super(MorphToMany, self)._new_pivot_query()
return query.where(self._morph_type, self._morph_class)
|
Create a new query builder for the pivot table.
:rtype: eloquent.orm.Builder
|
28,339 |
def style_defs(cls):
formatter = HtmlFormatter()
formatter.style.highlight_color = cls.VIOLATION_COLOR
return formatter.get_style_defs()
|
Return the CSS style definitions required
by the formatted snippet.
|
28,340 |
def pub_view(request, docid, configuration):
if in settings.CONFIGURATIONS[configuration]:
for annotationtype, set in settings.CONFIGURATIONS[][]:
try:
r = flat.comm.query(request, "USE pub/" + docid + " DECLARE " + annotationtype + " OF " + set)
except Exception as e:
return fatalerror(request,e)
return initdoc(request, ,docid, , , configuration=configuration)
|
The initial view, does not provide the document content yet
|
28,341 |
def set_debug(self, debuglevel):
if isinstance(debuglevel, int):
self._debuglevel = debuglevel
if self._debuglevel == 1:
logging.basicConfig(level=logging.INFO,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
api_logger.setLevel(logging.INFO)
elif self._debuglevel == 2:
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
requests.cookies.cookielib.debug = True
api_logger.setLevel(logging.DEBUG)
elif self._debuglevel >= 3:
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
requests.cookies.cookielib.debug = True
api_logger.setLevel(logging.DEBUG)
urllib3_logger = logging.getLogger("requests.packages.urllib3")
urllib3_logger.setLevel(logging.DEBUG)
urllib3_logger.propagate = True
else:
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
requests.cookies.cookielib.debug = False
api_logger.setLevel(logging.WARNING)
return
|
Change the debug level of the API
**Returns:** No item returned.
|
28,342 |
def _lsm_load_pages(self):
pages = self.pages
pages.cache = True
pages.useframes = True
pages.keyframe = 1
pages.keyframe = 0
pages._load(keyframe=None)
self._lsm_fix_strip_offsets()
self._lsm_fix_strip_bytecounts()
keyframe = pages[0]
for page in pages[::2]:
page.keyframe = keyframe
keyframe = pages[1]
for page in pages[1::2]:
page.keyframe = keyframe
|
Load and fix all pages from LSM file.
|
28,343 |
def get_end_date(self, obj):
obj_date = getattr(obj, self.get_end_date_field())
try:
obj_date = obj_date.date()
except AttributeError:
pass
return obj_date
|
Returns the end date for a model instance
|
28,344 |
def generate_api_doc(self, uri):
sphinx.builder
functions, classes = self._parse_module_with_import(uri)
if not len(functions) and not len(classes) and DEBUG:
print(, uri)
uri_short = re.sub(r % self.package_name,,uri)
head =
body =
if in uri_short:
title = + uri_short +
head += title + + self.rst_section_levels[2] * len(title)
else:
title = + uri_short +
head += title + + self.rst_section_levels[1] * len(title)
head += + uri +
head += + uri +
body += + uri +
for c in classes:
body += + c + \
+ self.rst_section_levels[3] * \
(len(c)+9) +
body += + c +
body += \
\
\
\
head +=
for f in classes + functions:
head += + f +
head +=
for f in functions:
body += f +
body += self.rst_section_levels[3] * len(f) +
body += + f +
return head, body
|
Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
head : string
Module name, table of contents.
body : string
Function and class docstrings.
|
28,345 |
def _double_fork(self):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
return None
except OSError as err:
LOG.exception(
"Fork
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None
os.chdir("/")
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as err:
LOG.exception(
"Fork
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None
|
Do the UNIX double-fork magic.
See Stevens' "Advanced Programming in the UNIX Environment" for details
(ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
|
28,346 |
def ip_shell_after_exception(frame):
frame_info_list = []
frame_list = []
original_frame = frame = frame or inspect.currentframe()
local_ns = frame.f_locals
dummy_module = DummyMod()
dummy_module.__dict__ = frame.f_globals
while frame is not None:
frame_list.append(frame)
info = inspect.getframeinfo(frame)
frame_info_list.append(info)
frame = frame.f_back
frame_info_list.reverse()
frame_info_str_list = [format_frameinfo(fi) for fi in frame_info_list]
custom_header1 = "----- frame list -----\n\n"
frame_info_str = "\n--\n".join(frame_info_str_list[:-1])
custom_header2 = "\n----- ERROR -----\n"
custom_header = "{0}{1}{2}".format(custom_header1, frame_info_str, custom_header2)
if len(frame_info_list) >= 2:
test_str = str(frame_info_list[0]) + str(frame_info_list[1])
if in test_str and in test_str:
print("\n- Not entering IPython embedded shell -\n")
return
config = load_default_config()
config.InteractiveShellEmbed = config.TerminalInteractiveShell
InteractiveShellEmbedWithoutBanner.clear_instance()
InteractiveShellEmbedWithoutBanner._instance = None
shell = InteractiveShellEmbedWithoutBanner.instance()
shell(header=custom_header, stack_depth=2, local_ns=local_ns, module=dummy_module)
diff_index = local_ns.get("__mu")
if not isinstance(diff_index, int):
diff_index = None
return diff_index
|
Launches an IPython embedded shell in the namespace where an exception occurred.
:param frame:
:return:
|
28,347 |
def business_hours_schedule_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/schedules
api_path = "/api/v2/business_hours/schedules/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/schedules#delete-a-schedule
|
28,348 |
def _convertEntities(self, match):
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u % x
elif len(x) > 0 and x[0] == :
if len(x) > 1 and x[1] == :
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u % x
else:
return u % x
|
Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped.
|
28,349 |
def train(X_train, y_train, **kwargs):
annot.opcorpora.xml
crf = Trainer()
crf.set_params({
: 1.0,
: 0.001,
: 200,
: True,
})
for xseq, yseq in zip(X_train, y_train):
crf.append(xseq, yseq)
crf.train(TOKENIZATION_MODEL_PATH)
return crf
|
>>> corpus = CorpusReader('annot.opcorpora.xml')
>>> X_train, x_test, y_train, y_test = get_train_data(corpus, test_size=0.33, random_state=42)
>>> crf = train(X_train, y_train)
|
28,350 |
def getDatastreamProfile(self, dsid, date=None):
if self._create:
return None
r = self.api.getDatastream(self.pid, dsid, asOfDateTime=date)
return parse_xml_object(DatastreamProfile, r.content, r.url)
|
Get information about a particular datastream belonging to this object.
:param dsid: datastream id
:rtype: :class:`DatastreamProfile`
|
28,351 |
def _explore(self, node, visited, skip_father=None):
if node in visited:
return
visited = visited + [node]
fathers_context = {:set(), :set(), :set(), :{}}
for father in node.fathers:
if self.KEY in father.context:
fathers_context[] |= set([s for s in father.context[self.KEY][] if s!=skip_father])
fathers_context[] |= set([c for c in father.context[self.KEY][] if c!=skip_father])
fathers_context[] |= set(father.context[self.KEY][])
fathers_context[] = union_dict(fathers_context[], father.context[self.KEY][])
if node in self.visited_all_paths:
if all(call in self.visited_all_paths[node][] for call in fathers_context[]):
if all(send in self.visited_all_paths[node][] for send in fathers_context[]):
if all(read in self.visited_all_paths[node][] for read in fathers_context[]):
if dict_are_equal(self.visited_all_paths[node][], fathers_context[]):
return
else:
self.visited_all_paths[node] = {:set(), :set(), :set(), :{}}
self.visited_all_paths[node][] = set(self.visited_all_paths[node][] | fathers_context[])
self.visited_all_paths[node][] = set(self.visited_all_paths[node][] | fathers_context[])
self.visited_all_paths[node][] = set(self.visited_all_paths[node][] | fathers_context[])
self.visited_all_paths[node][] = union_dict(self.visited_all_paths[node][], fathers_context[])
node.context[self.KEY] = fathers_context
state_vars_read = set(node.state_variables_read)
state_vars_written = set(node.state_variables_written)
slithir_operations = []
for internal_call in node.internal_calls:
if isinstance(internal_call, Function):
state_vars_written |= set(internal_call.all_state_variables_written())
state_vars_read |= set(internal_call.all_state_variables_read())
slithir_operations += internal_call.all_slithir_operations()
contains_call = False
node.context[self.KEY][] = set(state_vars_written)
if self._can_callback(node.irs + slithir_operations):
node.context[self.KEY][] = set(node.context[self.KEY][] | {node})
node.context[self.KEY][][node] = set(node.context[self.KEY][].get(node, set()) | node.context[self.KEY][] |state_vars_read)
contains_call = True
if self._can_send_eth(node.irs + slithir_operations):
node.context[self.KEY][] = set(node.context[self.KEY][] | {node})
node.context[self.KEY][] = set(node.context[self.KEY][] | state_vars_read)
sons = node.sons
if contains_call and node.type in [NodeType.IF, NodeType.IFLOOP]:
if self._filter_if(node):
son = sons[0]
self._explore(son, visited, node)
sons = sons[1:]
else:
son = sons[1]
self._explore(son, visited, node)
sons = [sons[0]]
for son in sons:
self._explore(son, visited)
|
Explore the CFG and look for re-entrancy
Heuristic: There is a re-entrancy if a state variable is written
after an external call
node.context will contains the external calls executed
It contains the calls executed in father nodes
if node.context is not empty, and variables are written, a re-entrancy is possible
|
28,352 |
def remove_item(self, **kwargs):
path = self._get_id_path()
kwargs.update({: self.session_id})
payload = {
: kwargs.pop(, None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
|
Delete movies from a list that the user created.
A valid session id is required.
Args:
media_id: A movie id.
Returns:
A dict respresentation of the JSON returned from the API.
|
28,353 |
def _proxy(self):
if self._context is None:
self._context = RecordingSettingsContext(self._version, )
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RecordingSettingsContext for this RecordingSettingsInstance
:rtype: twilio.rest.video.v1.recording_settings.RecordingSettingsContext
|
28,354 |
def _check_is_chained_assignment_possible(self):
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t=,
force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t=)
return False
|
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
|
28,355 |
def add_pos_indicator(self):
body = new_body(name="pos_indicator")
body.append(
new_geom(
"sphere",
[0.03],
rgba=[1, 0, 0, 0.5],
group=1,
contype="0",
conaffinity="0",
)
)
body.append(new_joint(type="free", name="pos_indicator"))
self.worldbody.append(body)
|
Adds a new position indicator.
|
28,356 |
def set(self, **kargs):
kwords = set([
, , , , ,
, ,
])
kargs = dict(kargs)
oldkargs = {}
fargs = {}
for k in list(kargs.keys()):
if k in kwords:
oldkargs[k] = getattr(self, k)
setattr(self, k, kargs[k])
kwords.remove(k)
else:
fargs[k] = kargs[k]
del kargs[k]
for k in kwords:
kargs[k] = getattr(self, k)
if in kwords:
return kargs, oldkargs
|
Reset default keyword parameters.
Assigns new default values from dictionary ``kargs`` to the fitter's
keyword parameters. Keywords for the underlying :mod:`lsqfit` fitters
can also be included (or grouped together in dictionary
``fitterargs``).
Returns tuple ``(kargs, oldkargs)`` where ``kargs`` is a dictionary
containing all :class:`lsqfit.MultiFitter` keywords after they have
been updated, and ``oldkargs`` contains the original values for these
keywords. Use ``fitter.set(**oldkargs)`` to restore the original
values.
|
28,357 |
def _make_parser_func(sep):
def parser_func(
filepath_or_buffer,
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal=b".",
lineterminator=None,
quotechar=,
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
tupleize_cols=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
if not kwargs.get("sep", sep):
kwargs["sep"] = "\t"
return _read(**kwargs)
return parser_func
|
Creates a parser function from the given sep.
Args:
sep: The separator default to use for the parser.
Returns:
A function object.
|
28,358 |
def beautify(string, *args, **kwargs):
parser = Parser(args, kwargs)
return parser.beautify(string)
|
Convenient interface to the ecstasy package.
Arguments:
string (str): The string to beautify with ecstasy.
args (list): The positional arguments.
kwargs (dict): The keyword ('always') arguments.
|
28,359 |
def get_environ(self, key, default=None, cast=None):
key = key.upper()
data = self.environ.get(key, default)
if data:
if cast in converters:
data = converters.get(cast)(data)
if cast is True:
data = parse_conf_data(data, tomlfy=True)
return data
|
Get value from environment variable using os.environ.get
:param key: The name of the setting value, will always be upper case
:param default: In case of not found it will be returned
:param cast: Should cast in to @int, @float, @bool or @json ?
or cast must be true to use cast inference
:return: The value if found, default or None
|
28,360 |
def randomLocation(cls, radius, width, height, origin=None):
return cls(width,
height,
Point.randomLocation(radius, origin))
|
:param: radius - float
:param: width - float
:param: height - float
:param: origin - optional Point subclass
:return: Rectangle
|
28,361 |
def _set_medians_and_extremes(self):
rtts = sorted([p.rtt for p in self.packets if p.rtt is not None])
if rtts:
self.rtt_min = rtts[0]
self.rtt_max = rtts[-1]
self.rtt_median = self.calculate_median(rtts)
offsets = sorted(
[p.offset for p in self.packets if p.offset is not None]
)
if offsets:
self.offset_min = offsets[0]
self.offset_max = offsets[-1]
self.offset_median = self.calculate_median(offsets)
|
Sets median values for rtt and the offset of result packets.
|
28,362 |
def backtick (cmd, encoding=):
data = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
return data.decode(encoding)
|
Return decoded output from command.
|
28,363 |
def batch_qs(qs, batch_size=1000):
total = qs.count()
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
yield (start, end, total, qs[start:end])
|
Returns a (start, end, total, queryset) tuple for each batch in the given
queryset.
Usage:
# Make sure to order your queryset
article_qs = Article.objects.order_by('id')
for start, end, total, qs in batch_qs(article_qs):
print "Now processing %s - %s of %s" % (start + 1, end, total)
for article in qs:
print article.body
|
28,364 |
def receiveds_parsing(receiveds):
parsed = []
receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds]
n = len(receiveds)
log.debug("Nr. of receiveds. {}".format(n))
for idx, received in enumerate(receiveds):
log.debug("Parsing received {}/{}".format(idx + 1, n))
log.debug("Try to parse {!r}".format(received))
try:
values_by_clause = parse_received(received)
except MailParserReceivedParsingError:
parsed.append({: received})
else:
parsed.append(values_by_clause)
log.debug("len(receiveds) %s, len(parsed) %s" % (
len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \
parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds)
else:
return receiveds_format(parsed)
|
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
|
28,365 |
def fetch(context, data):
url = data.get()
attempt = data.pop(, 1)
try:
result = context.http.get(url, lazy=True)
rules = context.get(, {: {}})
if not Rule.get_rule(rules).apply(result):
context.log.info(, result.url)
return
if not result.ok:
err = (result.url, result.status_code)
context.emit_warning("Fetch fail [%s]: HTTP %s" % err)
if not context.params.get(, False):
return
else:
context.log.info("Fetched [%s]: %r",
result.status_code,
result.url)
data.update(result.serialize())
if url != result.url:
tag = make_key(context.run_id, url)
context.set_tag(tag, None)
context.emit(data=data)
except RequestException as ce:
retries = int(context.get(, 3))
if retries >= attempt:
context.log.warn("Retry: %s (error: %s)", url, ce)
data[] = attempt + 1
context.recurse(data=data, delay=2 ** attempt)
else:
context.emit_warning("Fetch fail [%s]: %s" % (url, ce))
|
Do an HTTP GET on the ``url`` specified in the inbound data.
|
28,366 |
def decode_jwt_token(token, secret):
try:
decoded_token = jwt.decode(
token,
key=secret.encode(),
verify=True,
algorithms=[__algorithm__],
leeway=__bound__
)
if not in decoded_token:
raise TokenIssuerError
if not in decoded_token:
raise TokenIssuedAtError
now = epoch_seconds()
iat = int(decoded_token[])
if now > (iat + __bound__):
raise TokenExpiredError("Token has expired", decoded_token)
if iat > (now + __bound__):
raise TokenExpiredError("Token can not be in the future", decoded_token)
return True
except jwt.InvalidIssuedAtError:
raise TokenExpiredError("Token has invalid iat field", decode_token(token))
except jwt.DecodeError:
raise TokenDecodeError
|
Validates and decodes the JWT token
Token checked for
- signature of JWT token
- token issued date is valid
:param token: jwt token
:param secret: client specific secret
:return boolean: True if valid token, False otherwise
:raises TokenIssuerError: if iss field not present
:raises TokenIssuedAtError: if iat field not present
:raises jwt.DecodeError: If signature validation fails
|
28,367 |
def _get_version_from_git_tag(path):
m = GIT_DESCRIBE_REGEX.match(_git_describe_tags(path) or )
if m is None:
return None
version, post_commit, hash = m.groups()
return version if post_commit == else "{0}.post{1}+{2}".format(version, post_commit, hash)
|
Return a PEP440-compliant version derived from the git status.
If that fails for any reason, return the changeset hash.
|
28,368 |
def assign_params(sess, params, network):
ops = []
for idx, param in enumerate(params):
ops.append(network.all_params[idx].assign(param))
if sess is not None:
sess.run(ops)
return ops
|
Assign the given parameters to the TensorLayer network.
Parameters
----------
sess : Session
TensorFlow Session.
params : list of array
A list of parameters (array) in order.
network : :class:`Layer`
The network to be assigned.
Returns
--------
list of operations
A list of tf ops in order that assign params. Support sess.run(ops) manually.
Examples
--------
- See ``tl.files.save_npz``
References
----------
- `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`__
|
28,369 |
def send_messages(self, email_messages):
from .mail import create
from .utils import create_attachments
if not email_messages:
return
for email_message in email_messages:
subject = email_message.subject
from_email = email_message.from_email
message = email_message.body
headers = email_message.extra_headers
alternatives = getattr(email_message, , ())
for alternative in alternatives:
if alternative[1].startswith():
html_message = alternative[0]
break
else:
html_message =
attachment_files = {}
for attachment in email_message.attachments:
if isinstance(attachment, MIMEBase):
attachment_files[attachment.get_filename()] = {
: ContentFile(attachment.get_payload()),
: attachment.get_content_type(),
: OrderedDict(attachment.items()),
}
else:
attachment_files[attachment[0]] = ContentFile(attachment[1])
email = create(sender=from_email,
recipients=email_message.to, cc=email_message.cc,
bcc=email_message.bcc, subject=subject,
message=message, html_message=html_message,
headers=headers)
if attachment_files:
attachments = create_attachments(attachment_files)
email.attachments.add(*attachments)
if get_default_priority() == :
email.dispatch()
|
Queue one or more EmailMessage objects and returns the number of
email messages sent.
|
28,370 |
def check_for_launchpad(old_vendor, name, urls):
if old_vendor != "pypi":
|
Check if the project is hosted on launchpad.
:param name: str, name of the project
:param urls: set, urls to check.
:return: the name of the project on launchpad, or an empty string.
|
28,371 |
def _get_asconv_headers(mosaic):
asconv_headers = re.findall(r,
mosaic[Tag(0x0029, 0x1020)].value.decode(encoding=),
re.DOTALL)[0]
return asconv_headers
|
Getter for the asconv headers (asci header info stored in the dicom)
|
28,372 |
def _activate_organization(organization):
[_activate_organization_course_relationship(record) for record
in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)]
[_activate_record(record) for record
in internal.Organization.objects.filter(id=organization.id, active=False)]
|
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
|
28,373 |
def from_compact(cls: Type[TransactionType], currency: str, compact: str) -> TransactionType:
lines = compact.splitlines(True)
n = 0
header_data = Transaction.re_header.match(lines[n])
if header_data is None:
raise MalformedDocumentError("Compact TX header")
version = int(header_data.group(1))
issuers_num = int(header_data.group(2))
inputs_num = int(header_data.group(3))
unlocks_num = int(header_data.group(4))
outputs_num = int(header_data.group(5))
has_comment = int(header_data.group(6))
locktime = int(header_data.group(7))
n += 1
blockstamp = None
if version >= 3:
blockstamp = BlockUID.from_str(Transaction.parse_field("CompactBlockstamp", lines[n]))
n += 1
issuers = []
inputs = []
unlocks = []
outputs = []
signatures = []
for i in range(0, issuers_num):
issuer = Transaction.parse_field("Pubkey", lines[n])
issuers.append(issuer)
n += 1
for i in range(0, inputs_num):
input_source = InputSource.from_inline(version, lines[n])
inputs.append(input_source)
n += 1
for i in range(0, unlocks_num):
unlock = Unlock.from_inline(lines[n])
unlocks.append(unlock)
n += 1
for i in range(0, outputs_num):
output_source = OutputSource.from_inline(lines[n])
outputs.append(output_source)
n += 1
comment = ""
if has_comment == 1:
data = Transaction.re_compact_comment.match(lines[n])
if data:
comment = data.group(1)
n += 1
else:
raise MalformedDocumentError("Compact TX Comment")
while n < len(lines):
data = Transaction.re_signature.match(lines[n])
if data:
signatures.append(data.group(1))
n += 1
else:
raise MalformedDocumentError("Compact TX Signatures")
return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures)
|
Return Transaction instance from compact string format
:param currency: Name of the currency
:param compact: Compact format string
:return:
|
28,374 |
def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
|
Determine if all indexes contain the same elements.
Parameters
----------
indexes : list of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
|
28,375 |
def listen_tta(self, target, timeout):
if not target.brty == :
info = "unsupported target bitrate: %r" % target.brty
raise nfc.clf.UnsupportedTargetError(info)
if target.rid_res:
info = "listening for type 1 tag activation is not supported"
raise nfc.clf.UnsupportedTargetError(info)
if target.sens_res is None:
raise ValueError("sens_res is required")
if target.sdd_res is None:
raise ValueError("sdd_res is required")
if target.sel_res is None:
raise ValueError("sel_res is required")
if len(target.sens_res) != 2:
raise ValueError("sens_res must be 2 byte")
if len(target.sdd_res) != 4:
raise ValueError("sdd_res must be 4 byte")
if len(target.sel_res) != 1:
raise ValueError("sel_res must be 1 byte")
if target.sdd_res[0] != 0x08:
raise ValueError("sdd_res[0] must be 08h")
nfca_params = target.sens_res + target.sdd_res[1:4] + target.sel_res
log.debug("nfca_params %s", hexlify(nfca_params))
self.chipset.tg_set_rf("106A")
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
time_to_return = time.time() + timeout
tg_comm_rf_args = {: True, : nfca_params}
tg_comm_rf_args[] = min(int(1000 * timeout), 0xFFFF)
def listen_tta_tt2():
recv_timeout = tg_comm_rf_args[]
while recv_timeout > 0:
log.debug("wait %d ms for Type 2 Tag activation", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
except CommunicationError as error:
log.debug(error)
else:
brty = (, , )[data[0]-11]
log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7)))
if brty == "106A" and data[2] & 0x03 == 3:
self.chipset.tg_set_protocol(rf_off_error=True)
return nfc.clf.LocalTarget(
"106A", sens_res=nfca_params[0:2],
sdd_res=b+nfca_params[2:5],
sel_res=nfca_params[5:6], tt2_cmd=data[7:])
else:
log.debug("not a 106A Type 2 Tag command")
finally:
recv_timeout = int(1000 * (time_to_return - time.time()))
tg_comm_rf_args[] = recv_timeout
def listen_tta_tt4():
rats_cmd = rats_res = None
recv_timeout = tg_comm_rf_args[]
while recv_timeout > 0:
log.debug("wait %d ms for 106A TT4 command", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
tg_comm_rf_args[] = None
except CommunicationError as error:
tg_comm_rf_args[] = None
rats_cmd = rats_res = None
log.debug(error)
else:
brty = (, , )[data[0]-11]
log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7)))
if brty == "106A" and data[2] == 3 and data[7] == 0xE0:
(rats_cmd, rats_res) = (data[7:], target.rats_res)
log.debug("rcvd RATS_CMD %s", hexlify(rats_cmd))
if rats_res is None:
rats_res = bytearray.fromhex("05 78 80 70 02")
log.debug("send RATS_RES %s", hexlify(rats_res))
tg_comm_rf_args[] = rats_res
elif brty == "106A" and data[7] != 0xF0 and rats_cmd:
did = rats_cmd[1] & 0x0F
cmd = data[7:]
ta_tb_tc = rats_res[2:]
ta = ta_tb_tc.pop(0) if rats_res[1] & 0x10 else None
tb = ta_tb_tc.pop(0) if rats_res[1] & 0x20 else None
tc = ta_tb_tc.pop(0) if rats_res[1] & 0x40 else None
if ta is not None:
log.debug("TA(1) = {:08b}".format(ta))
if tb is not None:
log.debug("TB(1) = {:08b}".format(tb))
if tc is not None:
log.debug("TC(1) = {:08b}".format(tc))
if ta_tb_tc:
log.debug("T({}) = {}".format(
len(ta_tb_tc), hexlify(ta_tb_tc)))
did_supported = tc is None or bool(tc & 0x02)
cmd_with_did = bool(cmd[0] & 0x08)
if (((cmd_with_did and did_supported and cmd[1] == did)
or (did == 0 and not cmd_with_did))):
if cmd[0] in (0xC2, 0xCA):
log.debug("rcvd S(DESELECT) %s", hexlify(cmd))
tg_comm_rf_args[] = cmd
log.debug("send S(DESELECT) %s", hexlify(cmd))
rats_cmd = rats_res = None
else:
log.debug("rcvd TT4_CMD %s", hexlify(cmd))
self.chipset.tg_set_protocol(rf_off_error=True)
return nfc.clf.LocalTarget(
"106A", sens_res=nfca_params[0:2],
sdd_res=b+nfca_params[2:5],
sel_res=nfca_params[5:6], tt4_cmd=cmd,
rats_cmd=rats_cmd, rats_res=rats_res)
else:
log.debug("skip TT4_CMD %s (DID)", hexlify(cmd))
else:
log.debug("not a 106A TT4 command")
finally:
recv_timeout = int(1000 * (time_to_return - time.time()))
tg_comm_rf_args[] = recv_timeout
if target.sel_res[0] & 0x60 == 0x00:
return listen_tta_tt2()
if target.sel_res[0] & 0x20 == 0x20:
return listen_tta_tt4()
reason = "sel_res does not indicate any tag target support"
raise nfc.clf.UnsupportedTargetError(reason)
|
Listen as Type A Target in 106 kbps.
Restrictions:
* It is not possible to send short frames that are required
for ACK and NAK responses. This means that a Type 2 Tag
emulation can only implement a single sector memory model.
* It can not be avoided that the chipset responds to SENSF_REQ
commands. The driver configures the SENSF_RES response to
all zero and ignores all Type F communication but eventually
it depends on the remote device whether Type A Target
activation will still be attempted.
|
28,376 |
def list_absent(name, acl_type, acl_names=None, recurse=False):
usergroup
if acl_names is None:
acl_names = []
ret = {: name,
: True,
: {},
: }
if not os.path.exists(name):
ret[] = .format(name)
ret[] = False
return ret
__current_perms = __salt__[](name)
if acl_type.startswith((, )):
_acl_type = .join(acl_type.split()[1:])
_current_perms = __current_perms[name].get(, {})
_default = True
else:
_acl_type = acl_type
_current_perms = __current_perms[name]
_default = False
if not acl_names:
_search_names = set(__current_perms[name].get().get(_acl_type, ))
else:
_search_names = set(acl_names)
if _current_perms.get(_acl_type, None) or _default:
try:
users = {}
for i in _current_perms[_acl_type]:
if i and next(six.iterkeys(i)) in _search_names:
users.update(i)
except (AttributeError, KeyError):
users = None
if users:
ret[] =
if __opts__[]:
ret[] = None
return ret
for acl_name in acl_names:
__salt__[](acl_type, acl_name, name, recursive=recurse)
else:
ret[] =
else:
ret[] =
ret[] = False
return ret
|
Ensure a Linux ACL list does not exist
Takes a list of acl names and remove them from the given path
name
The acl path
acl_type
The type of the acl is used for, it can be 'user' or 'group'
acl_names
The list of users or groups
perms
Remove the permissions eg.: rwx
recurse
Set the permissions recursive in the path
|
28,377 |
def set_version(request, response):
settings = request.registry.settings
resolver = DottedNameResolver()
version_header = settings.get(
,
,
)
version_header_value = settings.get()
if callable(version_header_value):
version_header_value = version_header_value()
elif version_header_value:
version_header_value = resolver.resolve(version_header_value)
revision_header = settings.get(
,
,
)
revision_header_value = settings.get()
if callable(revision_header_value):
revision_header_value = revision_header_value()
elif revision_header_value:
revision_header_value = resolver.resolve(revision_header_value)
if version_header and version_header_value:
response.headers[str(version_header)] = str(version_header_value)
if revision_header and revision_header_value:
response.headers[str(revision_header)] = str(revision_header_value)
|
Set version and revision to response
|
28,378 |
def p_expr_function(p):
p[0] = ast.Closure(p[4], p[6], p[8], p[2], lineno=p.lineno(1))
|
expr : FUNCTION is_reference LPAREN parameter_list RPAREN lexical_vars LBRACE inner_statement_list RBRACE
|
28,379 |
def _check_operators(self, operators):
if not isinstance(operators, (list, tuple, np.ndarray)):
raise TypeError(
)
operators = np.array(operators)
if not operators.size:
raise ValueError()
for operator in operators:
if not hasattr(operator, ):
raise ValueError()
if not hasattr(operator, ):
raise ValueError()
operator.op = check_callable(operator.op)
operator.cost = check_callable(operator.cost)
return operators
|
Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
Returns
-------
np.array operators
Raises
------
TypeError
For invalid input type
|
28,380 |
def land_cover_analysis_summary_report(feature, parent):
_ = feature, parent
analysis_dir = get_analysis_dir(exposure_land_cover[])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None
|
Retrieve an HTML land cover analysis table report from a multi exposure
analysis.
|
28,381 |
def to_workspace_value(self, result, assets):
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(result.values, pd.Categorical), (
% type(result.values)
)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
)
|
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
|
28,382 |
def name(self, node, children):
return self.env.get(node.text.strip(), -1)
|
name = ~"[a-z]+" _
|
28,383 |
def upsert(self, body, raise_exc=True, headers=False, files=None):
return self._request(PUT, body, raise_exc, headers, files)
|
Performs an HTTP PUT to the server. This is an idempotent
call that will create the resource this navigator is pointing
to, or will update it if it already exists.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
|
28,384 |
def save_current(self):
if self.current_widget() is not None:
editor = self.current_widget()
self._save(editor)
|
Save current editor. If the editor.file.path is None, a save as dialog
will be shown.
|
28,385 |
def link_source_files(generator):
posts = [
getattr(generator, attr, None) for attr in PROCESS
if getattr(generator, attr, None) is not None]
for post in posts[0]:
if not in generator.settings and \
not in generator.settings:
return
if in generator.settings or \
in post.metadata:
show_source_filename = generator.settings.get(
, .format(post.slug)
)
try:
source_out = os.path.join(
post.settings[], post.save_as
)
source_out_path = os.path.split(source_out)[0]
copy_to = os.path.join(
source_out_path, show_source_filename
)
source_url = urljoin(
post.save_as, show_source_filename
)
except Exception:
return
out = dict()
out[] = post.source_path
out[] = copy_to
logger.debug(, post.source_path, copy_to)
source_files.append(out)
post.show_source_url = source_url
|
Processes each article/page object and formulates copy from and copy
to destinations, as well as adding a source file URL as an attribute.
|
28,386 |
def get_end_offset( self, value, parent=None, index=None ):
return self.get_start_offset( value, parent, index ) + self.get_size( value, parent, index )
|
Return the end offset of the Field's data. Useful for chainloading.
value
Input Python object to process.
parent
Parent block object where this Field is defined. Used for e.g.
evaluating Refs.
index
Index of the Python object to measure from. Used if the Field
takes a list of objects.
|
28,387 |
def _split(self, iterator, tmp_dir):
fnames = []
for i, lines in enumerate(iterator):
lines = list(lines)
out_fname = os.path.join(tmp_dir, self.TMP_FNAME.format(i + 1))
self._write(lines, out_fname)
fnames.append(out_fname)
if len(lines) < self.max_lines:
break
return fnames
|
Splits the file into several chunks.
If the original file is too big to fit in the allocated space, the sorting will be split into several chunks,
then merged.
:param tmp_dir: Where to put the intermediate sorted results.
:param orig_lines: The lines read before running out of space.
:return: The names of the intermediate files.
|
28,388 |
def get(self, sid):
return UserContext(self._version, service_sid=self._solution[], sid=sid, )
|
Constructs a UserContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.chat.v2.service.user.UserContext
:rtype: twilio.rest.chat.v2.service.user.UserContext
|
28,389 |
def unwrap(self):
red = [self.red[i]/65535.0 for i in range(self.size)]
green = [self.green[i]/65535.0 for i in range(self.size)]
blue = [self.blue[i]/65535.0 for i in range(self.size)]
return red, green, blue
|
Returns a nested python sequence.
|
28,390 |
def get_flash_crypt_config(self):
word0 = self.read_efuse(0)
rd_disable = (word0 >> 19) & 0x1
if rd_disable == 0:
word5 = self.read_efuse(5)
word5 = (word5 >> 28) & 0xF
return word5
else:
return 0xF
|
bit 3 in efuse_rd_disable[3:0] is mapped to flash_crypt_config
this bit is at position 19 in EFUSE_BLK0_RDATA0_REG
|
28,391 |
def set_prewarp(self, prewarp):
prewarp = _convert_to_charp(prewarp)
self._set_prewarp_func(self.alpr_pointer, prewarp)
|
Updates the prewarp configuration used to skew images in OpenALPR before
processing.
:param prewarp: A unicode/ascii string (Python 2/3) or bytes array (Python 3)
:return: None
|
28,392 |
def open(self, key):
try:
return self.cache.open(key)
except KeyError:
fp = self._dstore.open(key)
self.cache.put_file(key, fp)
return self.cache.open(key)
except IOError:
return self._dstore.open(key)
|
Implementation of :meth:`~simplekv.KeyValueStore.open`.
If a cache miss occurs, the value is retrieved, stored in the cache,
then then another open is issued on the cache.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well.
|
28,393 |
def _acceptance_prob(self, position, position_bar, momentum, momentum_bar):
_, logp = self.grad_log_pdf(position, self.model).get_gradient_log_pdf()
_, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
potential_change = logp_bar - logp
kinetic_change = 0.5 * np.float(np.dot(momentum_bar.T, momentum_bar) - np.dot(momentum.T, momentum))
return np.exp(potential_change - kinetic_change)
|
Returns the acceptance probability for given new position(position) and momentum
|
28,394 |
def select_specimen(self, specimen):
try:
fit_index = self.pmag_results_data[][self.s].index(
self.current_fit)
except KeyError:
fit_index = None
except ValueError:
fit_index = None
self.initialize_CART_rot(specimen)
self.list_bound_loc = 0
if fit_index != None and self.s in self.pmag_results_data[]:
try:
self.current_fit = self.pmag_results_data[][self.s][fit_index]
except IndexError:
self.current_fit = None
else:
self.current_fit = None
if self.s != self.specimens_box.GetValue():
self.specimens_box.SetValue(self.s)
|
Goes through the calculations necessary to plot measurement data for
specimen and sets specimen as current GUI specimen, also attempts to
handle changing current fit.
|
28,395 |
def _get_inline_translations(self, request, language_code, obj=None):
inline_instances = self.get_inline_instances(request, obj=obj)
for inline in inline_instances:
if issubclass(inline.model, TranslatableModelMixin):
rel_name: obj
}
for translations_model in inline.model._parler_meta.get_all_models():
qs = translations_model.objects.filter(**filters)
if obj is not None:
qs = qs.using(obj._state.db)
yield inline, qs
|
Fetch the inline translations
|
28,396 |
def insert_text(self, s, from_undo=False):
return super().insert_text(
.join(c for c in s if c in ),
from_undo
)
|
Natural numbers only.
|
28,397 |
def _inject_closure_values_fix_code(c, injected, **kwargs):
c.freevars += injected
for i, (opcode, value) in enumerate(c.code):
if opcode == byteplay.LOAD_GLOBAL and value in kwargs:
c.code[i] = byteplay.LOAD_DEREF, value
_inject_closure_values_fix_closures(c, injected, **kwargs)
return c
|
Fix code objects, recursively fixing any closures
|
28,398 |
def _open_list(self, list_type):
if list_type in LIST_TYPES.keys():
tag = LIST_TYPES[list_type]
else:
raise Exception()
html = .format(
t=tag,
c=list_type
)
self.cleaned_html += html
self.current_parent_element[] = LIST_TYPES[list_type]
self.current_parent_element[] = {: list_type}
|
Add an open list tag corresponding to the specification in the
parser's LIST_TYPES.
|
28,399 |
def _drag_col(self, event):
x = self._dx + event.x
self._visual_drag.place_configure(x=x)
if (self._dragged_col_neighbor_widths[0] is not None and
x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):
self._swap_columns()
elif (self._dragged_col_neighbor_widths[1] is not None and
x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):
self._swap_columns()
if x < 0 and self.xview()[0] > 0:
self.xview_scroll(-10, )
self._dragged_col_x += 10
elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:
self.xview_scroll(10, )
self._dragged_col_x -= 10
|
Continue dragging a column
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.