Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
7,100 | def safe_evaluate(command, glob, local):
while True:
try:
return eval(command, glob, local)
except NameError as e:
match = re.match("name is not defined", e.message)
if not match:
raise e
try:
exec ( % (match.group(1), )) in glob
except ImportError:
raise e | Continue to attempt to execute the given command, importing objects which
cause a NameError in the command
:param command: command for eval
:param glob: globals dict for eval
:param local: locals dict for eval
:return: command result |
7,101 | def _remove_summary(self):
if self.size > 0:
print("\nRemoved summary")
print("=" * 79)
print("{0}Size of removed packages {1} {2}.{3}".format(
self.meta.color["GREY"], round(self.size, 2), self.unit,
self.meta.color["ENDC"])) | Removed packge size summary |
7,102 | def show_instance(name, call=None):
local = salt.client.LocalClient()
ret = local.cmd(name, )
ret.update(_build_required_items(ret))
return ret | List the a single node, return dict of grains. |
7,103 | def set_alarm_mode(self, mode):
values = {"desired_state": {"alarm_mode": mode}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | :param mode: one of [None, "activity", "tamper", "forced_entry"]
:return: nothing |
7,104 | def add_service(self, service_type, service_endpoint=None, values=None):
if isinstance(service_type, Service):
service = service_type
else:
service = Service(service_endpoint, service_type, values, did=self._did)
logger.debug(f)
self._services.append(service) | Add a service to the list of services on the DDO.
:param service_type: Service
:param service_endpoint: Service endpoint, str
:param values: Python dict with serviceDefinitionId, templateId, serviceAgreementContract,
list of conditions and consume endpoint. |
7,105 | def arity_evaluation_checker(function):
is_class = inspect.isclass(function)
if is_class:
function = function.__init__
function_info = inspect.getargspec(function)
function_args = function_info.args
if is_class:
function_args = function_args[1:]
def evaluation_checker(*args, **kwargs):
kwarg_keys = set(kwargs.keys())
if function_info.keywords is None:
acceptable_kwargs = function_args[len(args):]
if not kwarg_keys.issubset(acceptable_kwargs):
TypeError("Unrecognized Arguments: {0}".format(
[key for key in kwarg_keys
if key not in acceptable_kwargs]
))
needed_args = function_args[len(args):]
if function_info.defaults:
needed_args = needed_args[:-len(function_info.defaults)]
return not needed_args or kwarg_keys.issuperset(needed_args)
return evaluation_checker | Build an evaluation checker that will return True when it is
guaranteed that all positional arguments have been accounted for. |
7,106 | def cached_property(func):
name = func.__name__
doc = func.__doc__
def getter(self, name=name):
try:
return self.__dict__[name]
except KeyError:
self.__dict__[name] = value = func(self)
return value
getter.func_name = name
return property(getter, doc=doc) | Special property decorator that caches the computed
property value in the object's instance dict the first
time it is accessed. |
7,107 | def _parse_name(self, team_data):
name = team_data()
name = re.sub(r, , str(name))
name = re.sub(r, , name)
setattr(self, , name) | Parses the team's name.
On the pages being parsed, the team's name doesn't follow the standard
parsing algorithm that we use for the fields, and requires a special
one-off algorithm. The name is attached in the 'title' attribute from
within 'team_ID'. A few simple regex subs captures the team name. The
'_name' attribute is applied with the captured team name from this
function.
Parameters
----------
team_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string. |
7,108 | def deploy_s3app(self):
utils.banner("Deploying S3 App")
primary_region = self.configs[][]
s3obj = s3.S3Deployment(
app=self.app,
env=self.env,
region=self.region,
prop_path=self.json_path,
artifact_path=self.artifact_path,
artifact_version=self.artifact_version,
primary_region=primary_region)
s3obj.upload_artifacts() | Deploys artifacts contents to S3 bucket |
7,109 | def render_js_template(self, template_path, element_id, context=None):
context = context or {}
return u"<script type= id=>\n{}\n</script>".format(
element_id,
self.render_template(template_path, context)
) | Render a js template. |
7,110 | def color_pack2rgb(packed):
r = packed & 255
g = (packed & (255 << 8)) >> 8
b = (packed & (255 << 16)) >> 16
return r, g, b | Returns r, g, b tuple from packed wx.ColourGetRGB value |
7,111 | def calculate_ecef_velocity(inst):
x = inst[]
vel_x = (x.values[2:] - x.values[0:-2])/2.
y = inst[]
vel_y = (y.values[2:] - y.values[0:-2])/2.
z = inst[]
vel_z = (z.values[2:] - z.values[0:-2])/2.
inst[1:-1, ] = vel_x
inst[1:-1, ] = vel_y
inst[1:-1, ] = vel_z
inst.meta[] = {:,
:}
inst.meta[] = {:,
:}
inst.meta[] = {:,
:}
return | Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z) |
7,112 | def nfa_json_importer(input_file: str) -> dict:
file = open(input_file)
json_file = json.load(file)
transitions = {}
for p in json_file[]:
transitions.setdefault((p[0], p[1]), set()).add(p[2])
nfa = {
: set(json_file[]),
: set(json_file[]),
: set(json_file[]),
: set(json_file[]),
: transitions
}
return nfa | Imports a NFA from a JSON file.
:param str input_file: path+filename to JSON file;
:return: *(dict)* representing a NFA. |
7,113 | def get_processor_status(self, p, x, y):
address = (self.read_struct_field("sv", "vcpu_base", x, y) +
self.structs[b"vcpu"].size * p)
data = self.read(address, self.structs[b"vcpu"].size, x, y)
state = {
name.decode(): struct.unpack(
f.pack_chars,
data[f.offset:f.offset+struct.calcsize(f.pack_chars)]
)[0] for (name, f) in iteritems(self.structs[b"vcpu"].fields)
}
state["registers"] = [state.pop("r{}".format(i)) for i in range(8)]
state["user_vars"] = [state.pop("user{}".format(i)) for i in range(4)]
state["app_name"] = state["app_name"].strip(b).decode()
state["cpu_state"] = consts.AppState(state["cpu_state"])
state["rt_code"] = consts.RuntimeException(state["rt_code"])
sw_ver = state.pop("sw_ver")
state["version"] = ((sw_ver >> 16) & 0xFF,
(sw_ver >> 8) & 0xFF,
(sw_ver >> 0) & 0xFF)
for newname, oldname in [("iobuf_address", "iobuf"),
("program_state_register", "psr"),
("stack_pointer", "sp"),
("link_register", "lr"), ]:
state[newname] = state.pop(oldname)
state.pop("__PAD")
return ProcessorStatus(**state) | Get the status of a given core and the application executing on it.
Returns
-------
:py:class:`.ProcessorStatus`
Representation of the current state of the processor. |
7,114 | def get_out_streamids(self):
if self.outputs is None:
return set()
if not isinstance(self.outputs, (list, tuple)):
raise TypeError("Argument to outputs must be either list or tuple, given: %s"
% str(type(self.outputs)))
ret_lst = []
for output in self.outputs:
if not isinstance(output, (str, Stream)):
raise TypeError("Outputs must be a list of strings or Streams, given: %s" % str(output))
ret_lst.append(Stream.DEFAULT_STREAM_ID if isinstance(output, str) else output.stream_id)
return set(ret_lst) | Returns a set of output stream ids registered for this component |
7,115 | def parse_time_trigger_string(trigger_frequency):
trigger_frequency = .join(trigger_frequency.split())
if trigger_frequency.startswith(TRIGGER_PREAMBLE_AT):
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_AT):]
parsed_trigger_frequency = trigger_frequency.replace(, ).replace(, ).split()
timer_klass = EventClock
elif trigger_frequency.startswith(TRIGGER_PREAMBLE_EVERY):
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_EVERY):]
parsed_trigger_frequency = int(trigger_frequency)
timer_klass = RepeatTimer
else:
raise ValueError(.format(trigger_frequency))
return parsed_trigger_frequency, timer_klass | :param trigger_frequency: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
:return: return tuple (parsed_trigger_frequency, timer_klass) |
7,116 | def altitude(SCALED_PRESSURE, ground_pressure=None, ground_temp=None):
from . import mavutil
self = mavutil.mavfile_global
if ground_pressure is None:
if self.param(, None) is None:
return 0
ground_pressure = self.param(, 1)
if ground_temp is None:
ground_temp = self.param(, 0)
scaling = ground_pressure / (SCALED_PRESSURE.press_abs*100.0)
temp = ground_temp + 273.15
return log(scaling) * temp * 29271.267 * 0.001 | calculate barometric altitude |
7,117 | def get_nvr(self, epoch=None):
name = self.get_tag(, expand_macros=True)
vr = self.get_vr(epoch=epoch)
return % (name, vr) | get NVR string from .spec Name, Version, Release and Epoch |
7,118 | def rowlengths(table):
counter = Counter()
for row in data(table):
counter[len(row)] += 1
output = [(, )]
output.extend(counter.most_common())
return wrap(output) | Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files. |
7,119 | def cmd_antenna(self, args):
if len(args) != 2:
if self.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(self.gcs_location))
return
self.gcs_location = (float(args[0]), float(args[1])) | set gcs location |
7,120 | def bandpass_filter(data, k, w1, w2):
data = np.asarray(data)
low_w = np.pi * 2 / w2
high_w = np.pi * 2 / w1
bweights = np.zeros(2 * k + 1)
bweights[k] = (high_w - low_w) / np.pi
j = np.arange(1, int(k) + 1)
weights = 1 / (np.pi * j) * (sin(high_w * j) - sin(low_w * j))
bweights[k + j] = weights
bweights[:k] = weights[::-1]
bweights -= bweights.mean()
return fftconvolve(bweights, data, mode=) | This function will apply a bandpass filter to data. It will be kth
order and will select the band between w1 and w2.
Parameters
----------
data: array, dtype=float
The data you wish to filter
k: number, int
The order of approximation for the filter. A max value for
this isdata.size/2
w1: number, float
This is the lower bound for which frequencies will pass
through.
w2: number, float
This is the upper bound for which frequencies will pass
through.
Returns
-------
y: array, dtype=float
The filtered data. |
7,121 | def list_modules(desc=False):
win01win01
cmd =
modules = _pshell(cmd)
if isinstance(modules, dict):
ret = []
if desc:
modules_ret = {}
modules_ret[modules[]] = copy.deepcopy(modules)
modules = modules_ret
return modules
ret.append(modules[])
return ret
names = []
if desc:
names = {}
for module in modules:
if desc:
names[module[]] = module
continue
names.append(module[])
return names | List currently installed PSGet Modules on the system.
:param desc: If ``True``, the verbose description will be returned.
:type desc: ``bool``
CLI Example:
.. code-block:: bash
salt 'win01' psget.list_modules
salt 'win01' psget.list_modules desc=True |
7,122 | def unsetenv(key):
key = path2fsn(key)
if is_win:
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key) | Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset |
7,123 | def n_at_a_time(
items: List[int], n: int, fillvalue: str
) -> Iterator[Tuple[Union[int, str]]]:
it = iter(items)
return itertools.zip_longest(*[it] * n, fillvalue=fillvalue) | Returns an iterator which groups n items at a time.
Any final partial tuple will be padded with the fillvalue
>>> list(n_at_a_time([1, 2, 3, 4, 5], 2, 'X'))
[(1, 2), (3, 4), (5, 'X')] |
7,124 | def simplex_select_entering_arc(self, t, pivot):
first_eligibledantzigdantzig
if pivot==:
candidate = {}
for e in self.edge_attr:
if e in t.edge_attr:
continue
flow_ij = self.edge_attr[e][]
potential_i = self.get_node(e[0]).get_attr()
potential_j = self.get_node(e[1]).get_attr()
capacity_ij = self.edge_attr[e][]
c_ij = self.edge_attr[e][]
cpi_ij = c_ij - potential_i + potential_j
if flow_ij==0:
if cpi_ij < 0:
candidate[e] = cpi_ij
elif flow_ij==capacity_ij:
if cpi_ij > 0:
candidate[e] = cpi_ij
for e in candidate:
max_c = e
max_v = abs(candidate[e])
break
for e in candidate:
if max_v < abs(candidate[e]):
max_c = e
max_v = abs(candidate[e])
elif pivot==:
for e in self.edge_attr:
if e in t.edge_attr:
continue
flow_ij = self.edge_attr[e][]
potential_i = self.get_node(e[0]).get_attr()
potential_j = self.get_node(e[1]).get_attr()
capacity_ij = self.edge_attr[e][]
c_ij = self.edge_attr[e][]
cpi_ij = c_ij - potential_i + potential_j
if flow_ij==0:
if cpi_ij < 0:
max_c = e
max_v = abs(cpi_ij)
elif flow_ij==capacity_ij:
if cpi_ij > 0:
max_c = e
max_v = cpi_ij
else:
raise Exception("Unknown pivot rule.")
return max_c | API:
simplex_select_entering_arc(self, t, pivot)
Description:
Decides and returns entering arc using pivot rule.
Input:
t: current spanning tree solution
pivot: May be one of the following; 'first_eligible' or 'dantzig'.
'dantzig' is the default value.
Return:
Returns entering arc tuple (k,l) |
7,125 | def add_index(self, mode, blob_id, path):
self.command_exec([, , , mode, blob_id, path]) | Add new entry to the current index
:param tree:
:return: |
7,126 | def split_qs(string, delimiter=):
open_list =
close_list =
quote_chars = '
level = index = last_index = 0
quoted = False
result = []
for index, letter in enumerate(string):
if letter in quote_chars:
if not quoted:
quoted = True
level += 1
else:
quoted = False
level -= 1
elif letter in open_list:
level += 1
elif letter in close_list:
level -= 1
elif letter == delimiter and level == 0:
element = string[last_index: index]
if element:
result.append(element)
last_index = index + 1
if index:
element = string[last_index: index + 1]
if element:
result.append(element)
return result | Split a string by the specified unquoted, not enclosed delimiter |
7,127 | def members(self):
if self.serialized:
return set(map(
self._loads, self._client.smembers(self.key_prefix)))
else:
return set(map(
self._decode, self._client.smembers(self.key_prefix))) | -> #set of all members in the set |
7,128 | def _comm_tensor_data(device_name,
node_name,
maybe_base_expanded_node_name,
output_slot,
debug_op,
tensor_value,
wall_time):
output_slot = int(output_slot)
logger.info(
, node_name, output_slot, debug_op)
tensor_values = None
if isinstance(tensor_value, debug_data.InconvertibleTensorProto):
if not tensor_value.initialized:
tensor_dtype = UNINITIALIZED_TAG
tensor_shape = UNINITIALIZED_TAG
else:
tensor_dtype = UNSUPPORTED_TAG
tensor_shape = UNSUPPORTED_TAG
tensor_values = NA_TAG
else:
tensor_dtype = tensor_helper.translate_dtype(tensor_value.dtype)
tensor_shape = tensor_value.shape
if tensor_helper.numel(tensor_shape) < 5:
_, _, tensor_values = tensor_helper.array_view(tensor_value)
if tensor_dtype == and tensor_value is not None:
tensor_values = tensor_helper.process_buffers_for_display(
tensor_values, limit=STRING_ELEMENT_MAX_LEN)
return {
: ,
: wall_time,
: {
: device_name,
: node_name,
: maybe_base_expanded_node_name,
: output_slot,
: debug_op,
: tensor_dtype,
: tensor_shape,
: tensor_values,
},
} | Create a dict() as the outgoing data in the tensor data comm route.
Note: The tensor data in the comm route does not include the value of the
tensor in its entirety in general. Only if a tensor satisfies the following
conditions will its entire value be included in the return value of this
method:
1. Has a numeric data type (e.g., float32, int32) and has fewer than 5
elements.
2. Is a string tensor and has fewer than 5 elements. Each string element is
up to 40 bytes.
Args:
device_name: Name of the device that the tensor is on.
node_name: (Original) name of the node that produces the tensor.
maybe_base_expanded_node_name: Possbily base-expanded node name.
output_slot: Output slot number.
debug_op: Name of the debug op.
tensor_value: Value of the tensor, as a numpy.ndarray.
wall_time: Wall timestamp for the tensor.
Returns:
A dict representing the tensor data. |
7,129 | def property_data_zpool():
zpool get
property_data = _property_parse_cmd(_zpool_cmd(), {
: ,
: ,
: ,
: ,
: ,
})
zpool_size_extra = [
, ,
, ,
, ,
, ,
]
zpool_numeric_extra = [
, ,
]
for prop in zpool_size_extra:
property_data[prop] = {
: False,
: ,
: ,
}
for prop in zpool_numeric_extra:
property_data[prop] = {
: False,
: ,
: ,
}
return property_data | Return a dict of zpool properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zpool get' with some suplimental
data that is hardcoded. There is no better way to get this informatio aside
from reading the code. |
7,130 | def get_language_description(grammar_file):
LOGGER.debug("> Processing grammar file.".format(grammar_file))
sections_file_parser = foundations.parsers.SectionsFileParser(grammar_file)
sections_file_parser.parse(strip_quotation_markers=False)
name = sections_file_parser.get_value("Name", "Language")
if not name:
raise LanguageGrammarError("{0} | attribute not found in file!".format(__name__,
"Language|Name",
grammar_file))
extensions = sections_file_parser.get_value("Extensions", "Language")
if not extensions:
raise LanguageGrammarError("{0} | attribute not found in file!".format(__name__,
"Language|Extensions",
grammar_file))
highlighter = get_object_from_language_accelerators(sections_file_parser.get_value("Highlighter", "Accelerators"))
completer = get_object_from_language_accelerators(sections_file_parser.get_value("Completer", "Accelerators"))
pre_input_accelerators = sections_file_parser.get_value("PreInputAccelerators", "Accelerators")
pre_input_accelerators = pre_input_accelerators and [get_object_from_language_accelerators(accelerator)
for accelerator in pre_input_accelerators.split("|")] or ()
post_input_accelerators = sections_file_parser.get_value("PostInputAccelerators", "Accelerators")
post_input_accelerators = post_input_accelerators and [get_object_from_language_accelerators(accelerator)
for accelerator in post_input_accelerators.split("|")] or ()
visual_accelerators = sections_file_parser.get_value("VisualAccelerators", "Accelerators")
visual_accelerators = visual_accelerators and [get_object_from_language_accelerators(accelerator)
for accelerator in visual_accelerators.split("|")] or ()
indent_marker = sections_file_parser.section_exists("Syntax") and sections_file_parser.get_value("IndentMarker",
"Syntax") or \
DEFAULT_INDENT_MARKER
comment_marker = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("CommentMarker", "Syntax") or ""
comment_block_marker_start = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("CommentBlockMarkerStart", "Syntax") or ""
comment_block_marker_end = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("CommentBlockMarkerEnd", "Syntax") or ""
symbols_pairs = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("SymbolsPairs", "Syntax") or {}
if symbols_pairs:
associated_pairs = foundations.data_structures.Lookup()
for pair in symbols_pairs.split("|"):
associated_pairs[pair[0]] = pair[1]
symbols_pairs = associated_pairs
indentation_symbols = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("IndentationSymbols", "Syntax")
indentation_symbols = indentation_symbols and indentation_symbols.split("|") or ()
rules = []
attributes = sections_file_parser.sections.get("Rules")
if attributes:
for attribute in sections_file_parser.sections["Rules"]:
pattern = sections_file_parser.get_value(attribute, "Rules")
rules.append(umbra.ui.highlighters.Rule(name=foundations.namespace.remove_namespace(attribute),
pattern=QRegExp(pattern)))
tokens = []
dictionary = sections_file_parser.get_value("Dictionary", "Accelerators")
if dictionary:
dictionary_file = os.path.join(os.path.dirname(grammar_file), dictionary)
if foundations.common.path_exists(dictionary_file):
with open(dictionary_file, "r") as file:
for line in iter(file):
line = line.strip()
line and tokens.append(line)
else:
LOGGER.warning(
"!> {0} | language dictionary file doesn{0}{0}{0}{1}'.".format(attribute, value))
return Language(**attributes) | Gets the language description from given language grammar file.
:param grammar_file: Language grammar.
:type grammar_file: unicode
:return: Language description.
:rtype: Language |
7,131 | async def get_updates(self, offset: typing.Union[base.Integer, None] = None,
limit: typing.Union[base.Integer, None] = None,
timeout: typing.Union[base.Integer, None] = None,
allowed_updates:
typing.Union[typing.List[base.String], None] = None) -> typing.List[types.Update]:
allowed_updates = prepare_arg(allowed_updates)
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_UPDATES, payload)
return [types.Update(**update) for update in result] | Use this method to receive incoming updates using long polling (wiki).
Notes
1. This method will not work if an outgoing webhook is set up.
2. In order to avoid getting duplicate updates, recalculate offset after each server response.
Source: https://core.telegram.org/bots/api#getupdates
:param offset: Identifier of the first update to be returned
:type offset: :obj:`typing.Union[base.Integer, None]`
:param limit: Limits the number of updates to be retrieved
:type limit: :obj:`typing.Union[base.Integer, None]`
:param timeout: Timeout in seconds for long polling
:type timeout: :obj:`typing.Union[base.Integer, None]`
:param allowed_updates: List the types of updates you want your bot to receive
:type allowed_updates: :obj:`typing.Union[typing.List[base.String], None]`
:return: An Array of Update objects is returned
:rtype: :obj:`typing.List[types.Update]` |
7,132 | def get_position_i(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("Could not read from motors") | Get the I value of the current PID for position |
7,133 | def create(self, email, tos=1, options=None):
data = {: email,
: str(tos)}
if options:
data.update(options)
return self.post(self.base_url, body=json.dumps(data)) | Creates an account with Zencoder, no API Key necessary.
https://app.zencoder.com/docs/api/accounts/create |
7,134 | def save(self, name, content):
if name is None:
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
return name.replace("\\", "/") | Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning. |
7,135 | def updatej9DB(dbname = abrevDBname, saveRawHTML = False):
if saveRawHTML:
rawDir = .format(os.path.dirname(__file__))
if not os.path.isdir(rawDir):
os.mkdir(rawDir)
_j9SaveCurrent(sDir = rawDir)
dbLoc = os.path.join(os.path.normpath(os.path.dirname(__file__)), dbname)
try:
with dbm.dumb.open(dbLoc, flag = ) as db:
try:
j9Dict = _getCurrentj9Dict()
except urllib.error.URLError:
raise urllib.error.URLError("Unable to access server, check your connection")
for k, v in j9Dict.items():
if k in db:
for jName in v:
if jName not in j9Dict[k]:
j9Dict[k] += + jName
else:
db[k] = .join(v)
except dbm.dumb.error as e:
raise JournalDataBaseError("Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn{}{}'".format(dbLoc, os.path.dirname(__file__), e)) | Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory.
# Parameters
_dbname_ : `optional [str]`
> The name of the database file, default is "j9Abbreviations.db"
_saveRawHTML_ : `optional [bool]`
> Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date. |
7,136 | def execute(self):
if not in self.params:
raise MMException("Please include the type of log, or ")
if not in self.params:
raise MMException("Please include debug categories in dictionary format: e.g.: {:, :}")
request = {}
if self.params[] == :
request[] = None
request[] = self.params.get(, config.sfdc_client.user_id)
elif self.params[] == :
request[] = config.sfdc_client.user_id
request[] = self.params[]
for c in self.params[]:
if in c:
request[c[]] = c[]
else:
request[c] = self.params[][c]
request[] = util.get_iso_8601_timestamp(int(float(self.params.get(, 30))))
config.logger.debug(self.params[])
config.logger.debug("Log creation reuqest--->")
config.logger.debug(request)
create_result = config.sfdc_client.create_trace_flag(request)
config.logger.debug("Log creation response--->")
config.logger.debug(create_result)
if type(create_result) is list:
create_result = create_result[0]
if type(create_result) is not str and type(create_result) is not unicode:
return json.dumps(create_result)
else:
return create_result | params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
} |
7,137 | def tie_properties(self, class_list):
log.setLevel(self.log_level)
start = datetime.datetime.now()
log.info(" Tieing properties to the class")
for cls_name in class_list:
cls_obj = getattr(MODULE.rdfclass, cls_name)
prop_dict = dict(cls_obj.properties)
for prop_name, prop_obj in cls_obj.properties.items():
setattr(cls_obj, prop_name, link_property(prop_obj, cls_obj))
log.info(" Finished tieing properties in: %s",
(datetime.datetime.now() - start)) | Runs through the classess and ties the properties to the class
args:
class_list: a list of class names to run |
7,138 | def change_email(self, email):
self.email_unconfirmed = email
salt, hash = generate_sha1(self.username)
self.email_confirmation_key = hash
self.email_confirmation_key_created = get_datetime_now()
self.save()
self.send_confirmation_email()
return self | Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field
-- ``temporary_email`` -- we are able to set this email address
after the user has verified it by clicking on the verification URI
in the email. This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use. |
7,139 | def readBIM(fileName):
snps = set()
with open(fileName, "r") as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split("\t")
snpName = row[1]
snps.add(snpName)
return snps | Reads a BIM file.
:param fileName: the name of the BIM file to read.
:type fileName: str
:returns: the set of markers in the BIM file.
Reads a Plink BIM file and extract the name of the markers. There is one
marker per line, and the name of the marker is in the second column. There
is no header in the BIM file. |
7,140 | def get_dataset(self, key, info):
if self.mdrs is None:
self._read_all(self.filename)
if key.name in [, ]:
lons, lats = self.get_full_lonlats()
if key.name == :
dataset = create_xarray(lons)
else:
dataset = create_xarray(lats)
elif key.name in [, ,
, ]:
sun_azi, sun_zen, sat_azi, sat_zen = self.get_full_angles()
if key.name == :
dataset = create_xarray(sun_zen)
elif key.name == :
dataset = create_xarray(sun_azi)
if key.name == :
dataset = create_xarray(sat_zen)
elif key.name == :
dataset = create_xarray(sat_azi)
else:
mask = None
if key.calibration == :
raise ValueError( +
)
elif key.calibration not in [, , ]:
raise ValueError( + str(key.calibration) +
)
if key.name in [, ] and self.three_a_mask is None:
self.three_a_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 2 ** 16)
if key.name in [, ] and self.three_b_mask is None:
self.three_b_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 0)
if key.name not in ["1", "2", "3a", "3A", "3b", "3B", "4", "5"]:
LOG.info("Canreflectancereflectancereflectancebrightness_temperaturebrightness_temperaturebrightness_temperatureplatform_namesensor'] = self.sensor_name
dataset.attrs.update(info)
dataset.attrs.update(key.to_dict())
return dataset | Get calibrated channel data. |
7,141 | def put(self, namespacePrefix):
self.reqparse.add_argument(, type=str, required=True)
self.reqparse.add_argument(, type=int, required=True)
args = self.reqparse.parse_args()
ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix)
if not ns:
return self.make_response(.format(namespacePrefix), HTTP.NOT_FOUND)
ns.name = args[]
ns.sort_order = args[]
db.session.add(ns)
db.session.commit()
self.dbconfig.reload_data()
auditlog(event=, actor=session[].username, data=args)
return self.make_response() | Update a specific configuration namespace |
7,142 | def AddExtraShapes(extra_shapes_txt, graph):
print("Adding extra shapes from %s" % extra_shapes_txt)
try:
tmpdir = tempfile.mkdtemp()
shutil.copy(extra_shapes_txt, os.path.join(tmpdir, ))
loader = transitfeed.ShapeLoader(tmpdir)
schedule = loader.Load()
for shape in schedule.GetShapeList():
print("Adding extra shape: %s" % shape.shape_id)
graph.AddPoly(ShapeToPoly(shape))
finally:
if tmpdir:
shutil.rmtree(tmpdir) | Add extra shapes into our input set by parsing them out of a GTFS-formatted
shapes.txt file. Useful for manually adding lines to a shape file, since it's
a pain to edit .shp files. |
7,143 | def multicat(data, samples, ipyclient):
start = time.time()
printstr = " indexing clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
lbview = ipyclient.load_balanced_view()
last_sample = 0
cleanups = {}
cleanups[last_sample] = lbview.apply(time.sleep, 0.0)
snames = [i.name for i in samples]
snames.sort()
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
async1 = lbview.apply(get_seeds_and_hits, *(uhandle, bseeds, snames))
async2 = lbview.apply(fill_dups_arr, data)
while not (async1.ready() and async2.ready()):
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
time.sleep(0.1)
if not async1.successful():
raise IPyradWarningExit("error in get_seeds: %s", async1.exception())
if not async2.successful():
raise IPyradWarningExit("error in fill_dups: %s", async2.exception())
smallview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
smpios = [os.path.join(data.dirs.across, sample.name+) \
for sample in samples]
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
jobs = {}
for sample in samples:
sidx = snames.index(sample.name)
jobs[sample.name] = smallview.apply(singlecat, *(data, sample, bseeds, sidx))
alljobs = len(jobs)
while 1:
curkeys = jobs.keys()
for key in curkeys:
async = jobs[key]
if async.ready():
if async.successful():
args = (data, data.samples[key], snames.index(key))
with lbview.temp_flags(after=cleanups[last_sample]):
cleanups[key] = lbview.apply(write_to_fullarr, *args)
last_sample = key
del jobs[key]
else:
err = jobs[key].exception()
errmsg = "singlecat error: {} {}".format(key, err)
raise IPyradWarningExit(errmsg)
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(alljobs, alljobs-len(jobs), printstr.format(elapsed))
time.sleep(0.1)
if not jobs:
break
if in data.paramsdict["assembly_method"]:
with lbview.temp_flags(after=cleanups.values()):
cleanups[] = lbview.apply(dask_chroms, *(data, samples))
print("")
start = time.time()
printstr = " building database | {} | s6 |"
while 1:
finished = [i for i in cleanups.values() if i.ready()]
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(len(cleanups), len(finished), printstr.format(elapsed))
time.sleep(0.1)
if not all([i.successful() for i in finished]):
break
if len(cleanups) == len(finished):
break
for job in cleanups:
if cleanups[job].ready():
if not cleanups[job].successful():
err = " error in write_to_fullarr ({}) {}"\
.format(job, cleanups[job].result())
LOGGER.error(err)
raise IPyradWarningExit(err)
ifile = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
if os.path.exists(ifile):
os.remove(ifile)
if os.path.exists(bseeds):
os.remove(bseeds)
for sh5 in [os.path.join(data.dirs.across, i.name+".tmp.h5") for i in samples]:
os.remove(sh5)
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(10, 10, printstr.format(elapsed))
print("") | Runs singlecat and cleanup jobs for each sample.
For each sample this fills its own hdf5 array with catg data & indels.
This is messy, could use simplifiying. |
7,144 | def error_messages(self, driver_id=None):
if driver_id is not None:
assert isinstance(driver_id, ray.DriverID)
return self._error_messages(driver_id)
error_table_keys = self.redis_client.keys(
ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*")
driver_ids = [
key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]
for key in error_table_keys
]
return {
binary_to_hex(driver_id): self._error_messages(
ray.DriverID(driver_id))
for driver_id in driver_ids
} | Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver. |
7,145 | def check_pdb_status(pdbid):
url = % pdbid
xmlf = urlopen(url)
xml = et.parse(xmlf)
xmlf.close()
status = None
current_pdbid = pdbid
for df in xml.xpath():
status = df.attrib[]
if status == :
current_pdbid = df.attrib[]
return [status, current_pdbid.lower()] | Returns the status and up-to-date entry in the PDB for a given PDB ID |
7,146 | def run(self, node, client):
perms = os.stat(self.source).st_mode
client.put(path=self.target, chmod=perms,
contents=open(self.source, ).read())
return node | Upload the file, retaining permissions
See also L{Deployment.run} |
7,147 | def strptime(cls, value, format):
if cls.python_supports_z or not in format:
return datetime.strptime(value, format)
else:
assert format[-2:] == ,
dt = datetime.strptime(value[:-5], format[:-2])
tz = FixedOffset(value[-5:])
return dt.replace(tzinfo=tz) | Parse a datetime string using the provided format.
This also emulates `%z` support on Python 2.
:param value: Datetime string
:type value: str
:param format: Format to use for parsing
:type format: str
:rtype: datetime
:raises ValueError: Invalid format
:raises TypeError: Invalid input type |
7,148 | def _opbend_transform_mean(rs, fn_low, deriv=0):
v = 0.0
d = np.zeros((4,3), float)
dd = np.zeros((4,3,4,3), float)
for p in np.array([[0,1,2], [2,0,1], [1,2,0]]):
opbend = _opbend_transform([rs[p[0]], rs[p[1]], rs[p[2]], rs[3]], fn_low, deriv)
v += opbend[0]/3
index0 = np.where(p==0)[0][0]
index1 = np.where(p==1)[0][0]
index2 = np.where(p==2)[0][0]
index3 = 3
if deriv>0:
d[0] += opbend[1][index0]/3
d[1] += opbend[1][index1]/3
d[2] += opbend[1][index2]/3
d[3] += opbend[1][index3]/3
if deriv>1:
dd[0, :, 0, :] += opbend[2][index0, :, index0, :]/3
dd[0, :, 1, :] += opbend[2][index0, :, index1, :]/3
dd[0, :, 2, :] += opbend[2][index0, :, index2, :]/3
dd[0, :, 3, :] += opbend[2][index0, :, index3, :]/3
dd[1, :, 0, :] += opbend[2][index1, :, index0, :]/3
dd[1, :, 1, :] += opbend[2][index1, :, index1, :]/3
dd[1, :, 2, :] += opbend[2][index1, :, index2, :]/3
dd[1, :, 3, :] += opbend[2][index1, :, index3, :]/3
dd[2, :, 0, :] += opbend[2][index2, :, index0, :]/3
dd[2, :, 1, :] += opbend[2][index2, :, index1, :]/3
dd[2, :, 2, :] += opbend[2][index2, :, index2, :]/3
dd[2, :, 3, :] += opbend[2][index2, :, index3, :]/3
dd[3, :, 0, :] += opbend[2][index3, :, index0, :]/3
dd[3, :, 1, :] += opbend[2][index3, :, index1, :]/3
dd[3, :, 2, :] += opbend[2][index3, :, index2, :]/3
dd[3, :, 3, :] += opbend[2][index3, :, index3, :]/3
if deriv==0:
return v,
elif deriv==1:
return v, d
elif deriv==2:
return v, d, dd
else:
raise ValueError("deriv must be 0, 1 or 2.") | Compute the mean of the 3 opbends |
7,149 | def build_next_url(self, url):
if not url:
if self.split_urls:
self.total_count_flag = False
return self.split_urls.pop(0)
else:
return None
parsed_url = urlparse(url)
return "{0}?{1}".format(parsed_url.path, parsed_url.query) | Builds next url in a format compatible with cousteau. Path + query |
7,150 | def variant(case_id, variant_id):
case_obj = app.db.case(case_id)
variant = app.db.variant(case_id, variant_id)
if variant is None:
return abort(404, "variant not found")
comments = app.db.comments(variant_id=variant.md5)
template = if app.db.variant_type == else
return render_template(template, variant=variant, case_id=case_id,
comments=comments, case=case_obj) | Show a single variant. |
7,151 | def is_valid_path(path):
if not path.startswith():
msg =
raise ValueError(msg % path[:40])
for c in :
if c in path:
msg = (
)
raise ValueError(msg % c)
return True | :return: True if the path is valid, else raise a ValueError with the
specific error |
7,152 | def docker(ctx, docker_run_args, docker_image, nvidia, digest, jupyter, dir, no_dir, shell, port, cmd, no_tty):
if not find_executable():
raise ClickException(
"Docker not installed, install it from https://docker.com" )
args = list(docker_run_args)
image = docker_image or ""
if len(args) > 0 and args[0] == "run":
args.pop(0)
if image == "" and len(args) > 0:
image = args.pop(0)
if not util.docker_image_regex(image.split("@")[0]):
if image:
args = args + [image]
image = wandb.docker.default_image(gpu=nvidia)
subprocess.call(["docker", "pull", image])
_, repo_name, tag = wandb.docker.parse(image)
resolved_image = wandb.docker.image_id(image)
if resolved_image is None:
raise ClickException(
"Couldntypeconfirmnameattachmessageattachdockerattachdockerrun-eLANG=C.UTF-8-eWANDB_DOCKER=%s--ipc=host-v:/wandb-entrypoint.sh--entrypoint/wandb-entrypoint.sh--runtimenvidia-v-w-eWANDB_API_KEY=%st find WANDB_API_KEY, run `wandb login` to enable streaming metrics")
if jupyter:
command.extend([, , , port+])
no_tty = True
cmd = "jupyter lab --no-browser --ip=0.0.0.0 --allow-root --NotebookApp.token= --notebook-dir %s" % dir
command.extend(args)
if no_tty:
command.extend([image, shell, "-c", cmd])
else:
if cmd:
command.extend([, % cmd])
command.extend([, image, shell])
wandb.termlog("Launching docker container \U0001F6A2")
subprocess.call(command) | W&B docker lets you run your code in a docker image ensuring wandb is configured. It adds the WANDB_DOCKER and WANDB_API_KEY
environment variables to your container and mounts the current directory in /app by default. You can pass additional
args which will be added to `docker run` before the image name is declared, we'll choose a default image for you if
one isn't passed:
wandb docker -v /mnt/dataset:/app/data
wandb docker gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.4.0 --jupyter
wandb docker wandb/deepo:keras-gpu --no-tty --cmd "python train.py --epochs=5"
By default we override the entrypoint to check for the existance of wandb and install it if not present. If you pass the --jupyter
flag we will ensure jupyter is installed and start jupyter lab on port 8888. If we detect nvidia-docker on your system we will use
the nvidia runtime. If you just want wandb to set environment variable to an existing docker run command, see the wandb docker-run
command. |
7,153 | def _LogProgressUpdateIfReasonable(self):
next_log_time = (
self._time_of_last_status_log +
self.SECONDS_BETWEEN_STATUS_LOG_MESSAGES)
current_time = time.time()
if current_time < next_log_time:
return
completion_time = time.ctime(current_time + self.EstimateTimeRemaining())
log_message = (
.format(
self.NAME, self.hash_queue.qsize(), completion_time))
logger.info(log_message)
self._time_of_last_status_log = current_time | Prints a progress update if enough time has passed. |
7,154 | def dependencies(request, ident, stateless=False, **kwargs):
_, app = DashApp.locate_item(ident, stateless)
with app.app_context():
view_func = app.locate_endpoint_function()
resp = view_func()
return HttpResponse(resp.data,
content_type=resp.mimetype) | Return the dependencies |
7,155 | def save_imglist(self, fname=None, root=None, shuffle=False):
def progress_bar(count, total, suffix=):
import sys
bar_len = 24
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = * filled_len + * (bar_len - filled_len)
sys.stdout.write( % (bar, percents, , suffix))
sys.stdout.flush()
str_list = []
for index in range(self.num_images):
progress_bar(index, self.num_images)
label = self.label_from_index(index)
if label.size < 1:
continue
path = self.image_path_from_index(index)
if root:
path = osp.relpath(path, root)
str_list.append(.join([str(index), str(2), str(label.shape[1])] \
+ ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + )
if str_list:
if shuffle:
import random
random.shuffle(str_list)
if not fname:
fname = self.name +
with open(fname, ) as f:
for line in str_list:
f.write(line)
else:
raise RuntimeError("No image in imdb") | save imglist to disk
Parameters:
----------
fname : str
saved filename |
7,156 | def name(self):
res = type(self).__name__
if self._id:
res += ".{}".format(self._id)
return res | Get the module name
:return: Module name
:rtype: str | unicode |
7,157 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.status is not None:
_dict[] = self.status
if hasattr(self, ) and self.last_updated is not None:
_dict[] = datetime_to_string(self.last_updated)
return _dict | Return a json dictionary representing this model. |
7,158 | def del_option(self, section, option):
if self.config.has_section(section):
if self.config.has_option(section, option):
self.config.remove_option(section, option)
return (True, self.config.options(section))
return (False, + option + )
return (False, + section + ) | Deletes an option if the section and option exist |
7,159 | def load_method(path,method,class_name = None,instance_creator = None):
module = load_module(path)
if class_name :
class_type = getattr(module, class_name)
if instance_creator:
ic_rest = instance_creator
nxt = module
while ( in ic_rest) :
nxt = getattr(nxt , instance_creator.split()[0])
ic_rest = .join(ic_rest.split()[1:])
instance = getattr(module, instance_creator)()
else :
instance = class_type()
return getattr(instance , method)
else :
return getattr(module , method) | Returns an instance of the method specified.
Args :
path : The path to the module contianing the method or function.
method : The name of the function.
class_name : The name of the class if the funtion is a method.
instance_creator: The name of the method to return the class instance. |
7,160 | def rollback(self):
if self.save_dir is None:
logger.error(
"CanRolling back uninstall of %sReplacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback() | Rollback the changes previously made by remove(). |
7,161 | def filter_by_moys(self, moys):
_filt_values, _filt_datetimes = self._filter_by_moys_slow(moys)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection | Filter the Data Collection based on a list of minutes of the year.
Args:
moys: A List of minutes of the year [0..8759 * 60]
Return:
A new Data Collection with filtered data |
7,162 | def _build_vocab(filename, vocab_dir, vocab_name):
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(filename, "r") as f:
data = f.read().split()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)
encoder.store_to_file(vocab_path)
else:
encoder = text_encoder.TokenTextEncoder(vocab_path)
return encoder | Reads a file to build a vocabulary.
Args:
filename: file to read list of words from.
vocab_dir: directory where to save the vocabulary.
vocab_name: vocab file name.
Returns:
text encoder. |
7,163 | def load(self, *objs, consistent=False):
get_table_name = self._compute_table_name
objs = set(objs)
validate_not_abstract(*objs)
table_index, object_index, request = {}, {}, {}
for obj in objs:
table_name = get_table_name(obj.__class__)
key = dump_key(self, obj)
index = index_for(key)
if table_name not in object_index:
table_index[table_name] = list(sorted(key.keys()))
object_index[table_name] = {}
request[table_name] = {"Keys": [], "ConsistentRead": consistent}
if index not in object_index[table_name]:
request[table_name]["Keys"].append(key)
object_index[table_name][index] = set()
object_index[table_name][index].add(obj)
response = self.session.load_items(request)
for table_name, list_of_attrs in response.items():
for attrs in list_of_attrs:
key_shape = table_index[table_name]
key = extract_key(key_shape, attrs)
index = index_for(key)
for obj in object_index[table_name].pop(index):
unpack_from_dynamodb(
attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj)
object_loaded.send(self, engine=self, obj=obj)
if not object_index[table_name]:
object_index.pop(table_name)
if object_index:
not_loaded = set()
for index in object_index.values():
for index_set in index.values():
not_loaded.update(index_set)
logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs)))
raise MissingObjects("Failed to load some objects.", objects=not_loaded)
logger.info("successfully loaded {} objects".format(len(objs))) | Populate objects from DynamoDB.
:param objs: objects to delete.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column.
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html |
7,164 | def on_copy_local(self, pair):
status = pair.remote_classification
self._log_action("copy", status, ">", pair.local) | Called when the local resource should be copied to remote. |
7,165 | def _TypecheckDecorator(subject=None, **kwargs):
if subject is None:
return _TypecheckDecoratorFactory(kwargs)
elif inspect.isfunction(subject) or inspect.ismethod(subject):
return _TypecheckFunction(subject, {}, 2, None)
else:
raise TypeError() | Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator. |
7,166 | def polygonVertices(x, y, radius, sides, rotationDegrees=0, stretchHorizontal=1.0, stretchVertical=1.0):
if sides % 2 == 1:
angleOfStartPointDegrees = 90 + rotationDegrees
else:
angleOfStartPointDegrees = 90 + rotationDegrees - (180 / sides)
for sideNum in range(sides):
angleOfPointRadians = math.radians(angleOfStartPointDegrees + (360 / sides * sideNum))
yield ( int(math.cos(angleOfPointRadians) * radius * stretchHorizontal) + x,
-(int(math.sin(angleOfPointRadians) * radius) * stretchVertical) + y) | Returns a generator that produces the (x, y) points of the vertices of a regular polygon.
`x` and `y` mark the center of the polygon, `radius` indicates the size,
`sides` specifies what kind of polygon it is.
Odd-sided polygons have a pointed corner at the top and flat horizontal
side at the bottom. The `rotationDegrees` argument will rotate the polygon
counterclockwise.
The polygon can be stretched by passing `stretchHorizontal` or `stretchVertical`
arguments. Passing `2.0` for `stretchHorizontal`, for example, will double with
width of the polygon.
If `filled` is set to `True`, the generator will also produce the interior
(x, y) points.
(Note: The `thickness` parameter is not yet implemented.)
>>> list(polygonVertices(10, 10, 8, 5))
[(10, 2.0), (3, 8.0), (6, 16.0), (14, 16.0), (17, 8.0)]
>>> drawPoints(polygonVertices(10, 10, 8, 5))
,,,,,,,O,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
O,,,,,,,,,,,,,O
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,O,,,,,,,O,,,
>>> drawPoints(polygonVertices(10, 10, 8, 5, rotationDegrees=20))
,,,,,O,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
O,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,O,,,,,,,, |
7,167 | def transitivity_wu(W):
triangles to triplets
K = np.sum(np.logical_not(W == 0), axis=1)
ws = cuberoot(W)
cyc3 = np.diag(np.dot(ws, np.dot(ws, ws)))
return np.sum(cyc3, axis=0) / np.sum(K * (K - 1), axis=0) | Transitivity is the ratio of 'triangles to triplets' in the network.
(A classical version of the clustering coefficient).
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
T : int
transitivity scalar |
7,168 | def require_remote_ref_path(func):
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
wrapper.__name__ = func.__name__
return wrapper | A decorator raising a TypeError if we are not a valid remote, based on the path |
7,169 | def build_parser():
parser = argparse.ArgumentParser(
description=
)
parser.add_argument(
, ,
help=,
dest=,
default=None
)
parser.add_argument(
, ,
help=,
dest=,
default=os.getcwd()
)
parser.add_argument(
, ,
help=,
dest=,
default=None
)
parser.add_argument(
, ,
help=,
dest=,
default=None
)
parser.add_argument(
,
help=,
default=False,
action=
)
parser.add_argument(
, ,
help=,
default=[],
nargs=
)
opts = parser.parse_args()
return vars(opts) | _build_parser_
Set up CLI parser options, parse the
CLI options an return the parsed results |
7,170 | def local_replace(self, dt, use_dst=True, _recurse=False, **kwds):
local_time = dt + self.standard_offset
if use_dst:
dst_offset = self.dst(local_time)
if dst_offset:
local_time += dst_offset
adjusted_time = local_time.replace(**kwds)
if adjusted_time > local_time and not _recurse:
return self.local_replace(
dt - DAY, use_dst=use_dst, _recurse=True, **kwds)
adjusted_time -= dst_offset
if self.dst(adjusted_time):
return adjusted_time - self.standard_offset
adjusted_time = local_time.replace(**kwds)
if use_dst:
dst_offset = self.dst(adjusted_time)
adjusted_time -= dst_offset
if adjusted_time > local_time and not _recurse:
return self.local_replace(
dt - DAY, use_dst=use_dst, _recurse=True, **kwds)
return adjusted_time - self.standard_offset | Return pywws timestamp (utc, no tzinfo) for the most recent
local time before the pywws timestamp dt, with datetime replace
applied. |
7,171 | def read(file, system):
try:
fid = open(file, )
raw_file = fid.readlines()
except IOError:
print()
return
ret_dict = dict()
ret_dict[] = file.split()[0].lower() +
key, val = None, None
for idx, line in enumerate(raw_file):
line = line.strip()
if not line:
continue
if line.startswith():
continue
elif in line:
line = line.split()[0]
if in line:
key, val = line.split()
key, val = key.strip(), val.strip()
val = [] if val == else val
ret_dict.update({key: val})
if val:
val = val.split()
else:
val.extend(line.split())
if val:
val = de_blank(val)
ret_dict[key] = val
ret_dict_ord = dict(ret_dict)
for key, val in ret_dict.items():
if not val:
continue
if type(val) == list:
if in val[0]:
new_val = {}
new_val_ord = [
]
for item in val:
try:
m, n = item.split()
except ValueError:
print(.format(item))
return
m, n = m.strip(), n.strip()
if in n:
n = n.split()
n = de_blank(n)
n = [to_number(i) for i in n]
else:
n = to_number(n)
new_val.update({m.strip(): n})
new_val_ord.append([m.strip(), n])
ret_dict[key] = new_val
ret_dict_ord[key] = new_val_ord
ret_dict[] = ret_dict[][0]
ret_dict[] = ret_dict[][0]
ret_dict[] = ret_dict[][0]
ret_dict[] = list(ret_dict[].keys())
ret_dict[] = list(ret_dict[].keys()) + list(
ret_dict[].keys())
ret_dict[] = ret_dict_ord[]
ret_dict[] = ret_dict_ord[]
ret_dict[] = ret_dict_ord[]
copy_algebs = []
copy_states = []
for item in ret_dict[]:
key, val = item
if val[3] == :
copy_algebs.append(key)
elif val[3] == :
copy_states.append(key)
elif val[3] == :
ret_dict[].append(key)
ret_dict[] = copy_algebs
ret_dict[] = copy_states
return run(system, **ret_dict) | Parse an ANDES card file into internal variables |
7,172 | def call(command, silent=False):
try:
if silent:
with open(os.devnull, ) as FNULL:
return subprocess.check_call(command_to_array(command), stdout=FNULL)
else:
return check_call(command_to_array(command))
except CalledProcessError as e:
e.message = "%s failed with error code %s" % (e.cmd[0], e.returncode)
e.cmd = e.cmd[0] + " [arguments stripped for security]"
raise e | Runs a bash command safely, with shell=false, catches any non-zero
return codes. Raises slightly modified CalledProcessError exceptions
on failures.
Note: command is a string and cannot include pipes. |
7,173 | def find_mecab_dictionary(names):
suggested_pkg = names[0]
paths = [
os.path.expanduser(),
,
,
,
,
,
]
full_paths = [os.path.join(path, name) for path in paths for name in names]
checked_paths = [path for path in full_paths if len(path) <= MAX_PATH_LENGTH]
for path in checked_paths:
if os.path.exists(path):
return path
error_lines = [
"Couldns package manager to install",
"the %r package." % suggested_pkg,
"",
"We looked in the following locations:"
] + ["\t%s" % path for path in checked_paths]
skipped_paths = [path for path in full_paths if len(path) > MAX_PATH_LENGTH]
if skipped_paths:
error_lines += [
"We had to skip these paths that are too long for MeCab to find:",
] + ["\t%s" % path for path in skipped_paths]
raise OSError(.join(error_lines)) | Find a MeCab dictionary with a given name. The dictionary has to be
installed separately -- see wordfreq's README for instructions. |
7,174 | def plot_shade_mask(ax, ind, mask, facecolor=, alpha=0.5):
ymin, ymax = ax.get_ylim()
ax.fill_between(ind, ymin, ymax, where=mask,
facecolor=facecolor, alpha=alpha)
return ax | Shade across x values where boolean mask is `True`
Args
----
ax: pyplot.ax
Axes object to plot with a shaded region
ind: ndarray
The indices to use for the x-axis values of the data
mask: ndarray
Boolean mask array to determine which regions should be shaded
facecolor: matplotlib color
Color of the shaded area
Returns
-------
ax: pyplot.ax
Axes object with the shaded region added |
7,175 | def _is_accepted(self, element_tag):
element_tag = element_tag.lower()
if self._ignored_tags is not None \
and element_tag in self._ignored_tags:
return False
if self._followed_tags is not None:
return element_tag in self._followed_tags
else:
return True | Return if the link is accepted by the filters. |
7,176 | def in_builddir(sub=):
from functools import wraps
def wrap_in_builddir(func):
@wraps(func)
def wrap_in_builddir_func(self, *args, **kwargs):
p = local.path(self.builddir) / sub
if not p.exists():
LOG.error("%s does not exist.", p)
if p == local.cwd:
LOG.debug("CWD already is %s", p)
return func(self, *args, *kwargs)
with local.cwd(p):
return func(self, *args, **kwargs)
return wrap_in_builddir_func
return wrap_in_builddir | Decorate a project phase with a local working directory change.
Args:
sub: An optional subdirectory to change into. |
7,177 | def system(self) -> :
self.chat_name = "System"
self.chat_alias = None
self.chat_uid = EFBChat.SYSTEM_ID
self.chat_type = ChatType.System
return self | Set the chat as a system chat.
Only set for channel-level and group-level system chats.
Returns:
EFBChat: This object. |
7,178 | def _fake_openreferenceinstances(self, namespace, **params):
self._validate_namespace(namespace)
self._validate_open_params(**params)
params[] = params[]
del params[]
result = self._fake_references(namespace, **params)
objects = [] if result is None else [x[2] for x in result[0][2]]
return self._open_response(objects, namespace,
, **params) | Implements WBEM server responder for
:meth:`~pywbem.WBEMConnection.OpenReferenceInstances`
with data from the instance repository. |
7,179 | def pre_check(self, data):
sentences = len(re.findall(, data)) or 1
chars = len(data) - len(re.findall(, data))
num_words = len(re.findall(, data))
data = re.split(, data)
return data, sentences, chars, num_words | Count chars, words and sentences in the text. |
7,180 | def get(self, cycle_list, dataitem=None, isotope=None, sparse=1):
t1=time.time()
isotopes_of_interest = []
nested_list = False
if isinstance(cycle_list, basestring):
cycle_list = [cycle_list]
else:
try:
if len(cycle_list) == 1:
nested_list = True
except TypeError:
pass
if isinstance(dataitem, basestring):
dataitem = [dataitem]
if isinstance(isotope, basestring):
isotope = [isotope]
if dataitem==None and isotope==None:
option_ind = 1
dataitem = cycle_list
if not any([item in self.hattrs for item in dataitem]):
cycle_list = self.cycles
else:
first_file = mrT.File(self.h5s[0].filename,)
dat = []
for item in dataitem:
tmp = first_file.attrs.get(item, None)
try:
if len(tmp) == 1:
tmp = tmp[0]
except TypeError:
pass
dat.append(tmp)
if (len(dat) == 1) and (not nested_list):
dat = dat[0]
first_file.close()
return dat
if any([item.split()[0] in self.isos for item in dataitem]):
return self.get(cycle_list,dataitem,sparse=sparse)
elif isotope==None:
option_ind = 2
cycle_list = cycle_list
dataitem = dataitem
if isinstance(dataitem, basestring):
dataitem = [dataitem]
new_dataitem = []
new_isotopes = []
for item in dataitem:
if item.split()[0] in self.isos:
new_isotopes.append(item)
else:
new_dataitem.append(item)
if len(new_isotopes) != 0:
tmp = []
try:
tmp = self.get(cycle_list,new_dataitem + [],new_isotopes,sparse=sparse)
except:
tmp = self.get(cycle_list,new_dataitem + [],new_isotopes,sparse=sparse)
dat = []
if isinstance(cycle_list, basestring):
tmp = [tmp]
else:
try:
if len(cycle_list) == 1:
tmp = [tmp]
except TypeError:
tmp = [tmp]
for cyc in tmp:
temp_dataitem = []
for item in dataitem:
if item in new_dataitem:
temp_dataitem.append(cyc[new_dataitem.index(item)])
else:
if len(new_dataitem) == 0:
temp_dataitem = cyc
else:
if len(new_isotopes) == 1:
temp_dataitem.append(cyc[-1])
else:
temp_dataitem.append(cyc[-1][new_isotopes.index(item)])
dat.append(temp_dataitem)
if (len(dat) == 1) and (not nested_list):
dat = dat[0]
return dat
else:
option_ind = 3
cycle_list = cycle_list
dataitem = dataitem
isotopes_of_interest = isotope
try:
cycle_list[0]
except (TypeError,IndexError):
cycle_list = [cycle_list]
shellnb=self.get(cycle_list[0],)
if sparse <1:
sparse=1
try:
for x in range(len(cycle_list)):
cycle_list[x] = str(cycle_list[x])
except TypeError:
cycle_list = [str(cycle_list)]
if option_ind != 1:
try:
if cycle_list.isdigit():
cycle_list = [cycle_list]
for cycle in cycle_list:
if len(cycle) != len(self.cycles[0]):
diff = len(self.cycles[0])-len(cycle)
OO =
while diff >=1:
OO+=
cycle = OO+cycle
except AttributeError:
if cycle_list[0].isdigit():
for x in range(len(cycle_list)):
if len(str(cycle_list[x])) != len(str(self.cycles[0])):
diff = len(str(self.cycles[0]))-len(str(cycle_list[x]))
OO =
while diff >=1:
OO+=
diff-=1
try:
cycle_list[x] = OO+cycle_list[x]
except TypeError:
cycle_list[0] = OO+cycle_list[0]
dat = []
cycle_list.sort()
cyclelist=np.array(list(map(int, cycle_list)))
cycles_requested = []
file_min=[]
file_max=[]
try:
for h5 in self.h5s:
file_min.append(int(h5.cycle[0]))
file_max.append(int(h5.cycle[-1]))
except IndexError:
print(+h5.filename+)
print()
print()
raise IOError()
file_min.sort()
file_max.sort()
for h5 in self.h5s:
min_file = int(h5.cycle[0])
max_file = int(h5.cycle[-1])
min_list = int(cyclelist[0])
max_list = int(cyclelist[-1])
index_min = None
index_max = None
if len(file_min) == 1:
min_file = min_list - 1
max_file = max_list + 1
else:
file_index = file_min.index(min_file)
if file_index == 0:
if min_list - 1 < min_file:
min_file = min_list - 1
max_file = (file_min[file_index + 1] + max_file)//2
elif file_index == len(file_min) - 1:
min_file = (file_max[file_index - 1] + min_file)//2 + 1
if max_list + 1 > max_file:
max_file = max_list + 1
else:
min_file = (file_max[file_index - 1] + min_file)//2 + 1
max_file = (file_min[file_index + 1] + max_file)//2
if (max_list < min_file) or (max_file < min_list):
continue
elif (min_list <= min_file) and (max_file <= max_list):
index_min = bisect.bisect_left(cyclelist, min_file)
index_max = bisect.bisect_right(cyclelist, max_file)
elif (min_file <= min_list) and (max_list <= max_file):
index_min = None
index_max = None
else:
if min_list > min_file:
index_min = None
index_max = bisect.bisect_right(cyclelist, max_file)
else:
index_min = bisect.bisect_left(cyclelist, min_file)
index_max = None
imin = index_min
if index_min == None:
imin = 0
imax = index_max
if index_max == None:
imax = len(cyclelist)
request_min = bisect.bisect_left(cycles_requested, imin)
request_max = bisect.bisect_right(cycles_requested, imax)
del cycles_requested[request_min:request_max]
if ((request_max-request_min) % 2) ==1:
if request_min % 2 == 0:
cycles_requested.insert(request_min, imin)
else:
cycles_requested.insert(request_min, imax)
else:
if request_min % 2 == 0:
cycles_requested.insert(request_min, imin)
cycles_requested.insert(request_min + 1, imax)
else:
pass
if not self.h5sStarted[self.h5s.index(h5)]:
h5.start()
h5.join()
temp = h5.fetch_data_sam(dataitem,cycle_list[index_min:index_max],len(cycle_list),len(dat))
self.h5sStarted[self.h5s.index(h5)]=True
else:
temp = h5.fetch_data_sam(dataitem,cycle_list[index_min:index_max],len(cycle_list),len(dat))
temp_dat = []
for temp_num, temp_cycle in enumerate(temp):
temp_dataforcycle = []
for dataitem_num, temp_dataitem in enumerate(temp_cycle):
temp_dataitem=self.red_dim(temp_dataitem)
if (dataitem[dataitem_num] == or dataitem[dataitem_num] == ) and isotopes_of_interest != []:
index = []
iso_tmp = []
if in dataitem[dataitem_num]:
iso_tmp = self.isotopes
else:
iso_tmp = self.elements
for iso in isotopes_of_interest:
x = iso_tmp.index(iso)
index.append(x)
if index == []:
index = [0]
islist=True
if len(cycle_list)==1:
islist=False
temp_multicyc = []
for i in index:
if shellnb == 1:
temp_multicyc.append(temp_dataitem[i])
else:
temp_multicyc.append(temp_dataitem[:,i])
if len(temp_multicyc) == 1:
temp_multicyc = temp_multicyc[0]
temp_dataitem = temp_multicyc
temp_dataforcycle.append(temp_dataitem)
if len(temp_dataforcycle) == 1:
temp_dataforcycle = temp_dataforcycle[0]
temp_dat.append(temp_dataforcycle)
insert_pnt = 0
if index_min is not None:
for i in range(len(cycles_requested)):
if i % 2 == 1:
if cycles_requested[i] < index_min:
insert_pnt += cycles_requested[i] - cycles_requested[i-1]
elif cycles_requested[i - 1] < index_min:
insert_pnt += index_min - cycles_requested[i - 1]
dat[insert_pnt:insert_pnt] = temp_dat
if len(dat) < 2 and option_ind != 3 and (not nested_list):
try:
dat = dat[0]
except IndexError:
None
except TypeError:
None
try:
if len(dat) < 2 and isotopes_of_interest != []:
dat = dat[0]
except TypeError:
None
except IndexError:
None
t2=time.time()
return dat | Get Data from HDF5 files.
There are three ways to call this function
1. get(dataitem)
Fetches the datatiem for all cycles. If dataitem is a header
attribute or list of attributes then the data is retured.
If detaitem an individulal or list of column attributes,
data columns or isotopes/elements the data is returned for
all cycles.
2. get(cycle_list, dataitem)
Fetches the dataitem or list of dataitems for the cycle
or list of cycles. The variable dataitems can contain column
attributes, data columns, and isotopes/elemnts.
3. get(cycle_list, dataitem, isotope)
Fetches the dataitems like the seccond method except that
one of the dataitems must be either "iso_massf" or "yps",
and in the data returned "iso_massf" and "yps" are replaced
with the data from the isotopes. The isotopes must be in
the form given by se.isotopes or se.elements.
Parameters
----------
cycle_list : list, integer or string
If cycle_list is a list or string and all of the entries
are header attributes then the attributes are returned.
If cycle_list is a list or string of dataitems then the
dataitems are fetched for all cycles.
If cycle_list is a list, integer or string of cycle numbers
then data is returned for those cycles.
dataitem: list or string, optional
If dataitem is not None then the data for each item is
returned for the cycle or list of cycles. dataitem may be an
individual or a mixed list of column attributes, column
data or isotopes/elements. If dataitem is None then
cycle_list must be a string. The default is None.
isotope: list or string, optional
If one of the dataitems is "iso_massf" or "yps" then it is
replaced with the data from the individual isotopes/elements
listed in isotope. The default is None.
sparse : int
Implements a sparsity factor on the fetched data i.e. only
the i th cycle in cycle_list data is returned,
where i = sparse. |
7,181 | def __recognize_scalar(self, node: yaml.Node,
expected_type: Type) -> RecResult:
logger.debug()
if (isinstance(node, yaml.ScalarNode)
and node.tag == scalar_type_to_tag[expected_type]):
return [expected_type],
message = .format(
type_to_desc(expected_type), node.start_mark)
return [], message | Recognize a node that we expect to be a scalar.
Args:
node: The node to recognize.
expected_type: The type it is expected to be.
Returns:
A list of recognized types and an error message |
7,182 | def add_arguments(cls, parser, sys_arg_list=None):
parser.add_argument(,
dest=,
required=False, default=2, type=float,
help="TCP health-test interval in seconds, "
"default 2 "
"(only for health monitor plugin)")
parser.add_argument(,
dest=,
required=False, default=22, type=int,
help="Port for TCP health-test, default 22 "
"(only for health monitor plugin)")
return ["tcp_check_interval", "tcp_check_port"] | Arguments for the TCP health monitor plugin. |
7,183 | def generate(self, information, timeout=-1):
return self._client.create(information, timeout=timeout) | Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients.
Args:
information (dict): Information to generate the certificate for RabbitMQ clients.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: RabbitMQ certificate generated |
7,184 | def pull_guest_properties(self):
(names, values, timestamps, flags) = self._call("pullGuestProperties")
return (names, values, timestamps, flags) | Get the list of the guest properties matching a set of patterns along
with their values, timestamps and flags and give responsibility for
managing properties to the console.
out names of type str
The names of the properties returned.
out values of type str
The values of the properties returned. The array entries match the
corresponding entries in the @a name array.
out timestamps of type int
The timestamps of the properties returned. The array entries match
the corresponding entries in the @a name array.
out flags of type str
The flags of the properties returned. The array entries match the
corresponding entries in the @a name array. |
7,185 | def compile_mof_string(self, mof_str, namespace=None, search_paths=None,
verbose=None):
namespace = namespace or self.default_namespace
self._validate_namespace(namespace)
mofcomp = MOFCompiler(_MockMOFWBEMConnection(self),
search_paths=search_paths,
verbose=verbose)
mofcomp.compile_string(mof_str, namespace) | Compile the MOF definitions in the specified string and add the
resulting CIM objects to the specified CIM namespace of the mock
repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_str (:term:`string`):
A string with the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository. |
7,186 | def setup_database(config_data):
with chdir(config_data.project_directory):
env = deepcopy(dict(os.environ))
env[str()] = str(.format(config_data.project_name))
env[str()] = str(os.pathsep.join(map(shlex_quote, sys.path)))
commands = []
commands.append(
[sys.executable, , , , ],
)
if config_data.verbose:
sys.stdout.write(
.format(
.join([.join(cmd) for cmd in commands])
)
)
for command in commands:
try:
output = subprocess.check_output(
command, env=env, stderr=subprocess.STDOUT
)
sys.stdout.write(output.decode())
except subprocess.CalledProcessError as e:
if config_data.verbose:
sys.stdout.write(e.output.decode())
raise
if not config_data.no_user:
sys.stdout.write()
if config_data.noinput:
create_user(config_data)
else:
subprocess.check_call(.join(
[sys.executable, , , , ]
), shell=True, stderr=subprocess.STDOUT) | Run the migrate command to create the database schema
:param config_data: configuration data |
7,187 | def filter_update(self, id, phrase = None, context = None, irreversible = None, whole_word = None, expires_in = None):
id = self.__unpack_id(id)
params = self.__generate_params(locals(), [])
url = .format(str(id))
return self.__api_request(, url, params) | Updates the filter with the given `id`. Parameters are the same
as in `filter_create()`.
Returns the `filter dict`_ of the updated filter. |
7,188 | def requires_user(fn):
@functools.wraps(fn)
def wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
if subject.identifiers is None:
msg = ("Attempting to perform a user-only operation. The "
"current Subject is NOT a user (they haven't been "
"authenticated or remembered from a previous login). "
"ACCESS DENIED.")
raise UnauthenticatedException(msg)
return fn(*args, **kwargs)
return wrap | Requires that the calling Subject be *either* authenticated *or* remembered
via RememberMe services before allowing access.
This method essentially ensures that subject.identifiers IS NOT None
:raises UnauthenticatedException: indicating that the decorated method is
not allowed to be executed because the
Subject attempted to perform a user-only
operation |
7,189 | def find_dups(file_dict):
found_hashes = {}
for f in file_dict:
if file_dict[f][] not in found_hashes:
found_hashes[file_dict[f][]] = []
found_hashes[file_dict[f][]].append(f)
final_hashes = dict(found_hashes)
for h in found_hashes:
if len(found_hashes[h])<2:
del(final_hashes[h])
return final_hashes.values() | takes output from :meth:`scan_dir` and returns list of duplicate files |
7,190 | def refresh(self):
if self.exists:
self.delete()
self.populate()
self.open() | Refresh the cache by deleting the old one and creating a new one. |
7,191 | def prepare_dispatch(self):
if self.new_to_dispatch:
raise DispatcherError("A configuration is already prepared!")
self.new_to_dispatch = True
self.first_dispatch_done = False
for daemon_link in self.all_daemons_links:
daemon_link.cfg.update({: self.alignak_conf.alignak_name})
logger.info("Preparing realms dispatch:")
master_arbiter_cfg = arbiters_cfg = {}
for arbiter_link in self.get_satellites_list():
if not arbiter_link.active:
continue
arbiter_cfg = arbiter_link.cfg
arbiter_cfg.update({
: [h.get_name() for h in self.alignak_conf.hosts],
: serialize(arbiter_link.modules, True),
: self.alignak_conf.instance_id,
:
})
cfg_string = json.dumps(arbiter_cfg, sort_keys=True).encode()
arbiter_cfg[] = hashlib.sha1(cfg_string).hexdigest()
arbiters_cfg[arbiter_link.uuid] = arbiter_cfg[]
if arbiter_link != self.arbiter_link:
arbiter_cfg.update({
: master_arbiter_cfg,
: self.alignak_conf.spare_arbiter_conf,
})
try:
s_conf_part = json.dumps(arbiter_cfg, sort_keys=True).encode()
except UnicodeDecodeError:
pass
arbiter_cfg[] = hashlib.sha1(s_conf_part).hexdigest()
pickled_conf = pickle.dumps(arbiter_cfg)
logger.info(, sys.getsizeof(pickled_conf))
arbiter_link.cfg = arbiter_cfg
arbiter_link.cfg_to_manage = self.alignak_conf
arbiter_link.push_flavor = arbiter_cfg[]
arbiter_link.hash = arbiter_cfg[]
arbiter_link.need_conf = False
arbiter_link.configuration_sent = False
if arbiter_link == self.arbiter_link:
master_arbiter_cfg = {self.arbiter_link.uuid: arbiter_cfg[]}
logger.info(, arbiter_link.name)
for realm in self.alignak_conf.realms:
logger.info("- realm %s: %d configuration part(s)", realm.name, len(realm.parts))
parts_to_dispatch = [cfg for cfg in list(realm.parts.values()) if not cfg.is_assigned]
if not parts_to_dispatch:
logger.info()
continue
logger.info(" preparing the dispatch for schedulers:")
schedulers = realm.get_potential_satellites_by_type(
self.get_satellites_list(), )
if not schedulers:
logger.error(, realm)
continue
logger.info(" realm schedulers: %s",
.join([s.get_name() for s in schedulers]))
for cfg_part in parts_to_dispatch:
logger.info(" .assigning configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
while True:
try:
scheduler_link = schedulers.pop()
except IndexError:
logger.error("No more scheduler link: %s", realm)
for sat_type in (, , , ):
realm.to_satellites[sat_type][cfg_part.instance_id] = None
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = \
False
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
break
if not scheduler_link.need_conf:
logger.info(,
realm.name, scheduler_link.name)
continue
logger.debug(" preparing configuration part for the scheduler ",
cfg_part.instance_id, scheduler_link.name)
logger.debug(" - %d hosts, %d services",
len(cfg_part.hosts), len(cfg_part.services))
s_conf_part = serialize(realm.parts[cfg_part.instance_id])
try:
s_conf_part = s_conf_part.encode()
except UnicodeDecodeError:
pass
cfg_part.push_flavor = hashlib.sha1(s_conf_part).hexdigest()
sat_scheduler_cfg = scheduler_link.give_satellite_cfg()
sat_scheduler_cfg.update({
: [h.get_name() for h in cfg_part.hosts],
: cfg_part.instance_id,
: cfg_part.push_flavor
})
cfg_string = json.dumps(sat_scheduler_cfg, sort_keys=True).encode()
sat_scheduler_cfg[] = hashlib.sha1(cfg_string).hexdigest()
logger.debug(, sat_scheduler_cfg)
for sat_type in (, , , ):
realm.to_satellites[sat_type][cfg_part.instance_id] = sat_scheduler_cfg
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
scheduler_link.cfg.update({
: scheduler_link.instance_id,
: scheduler_link.name,
: {scheduler_link.uuid: sat_scheduler_cfg},
: arbiters_cfg if scheduler_link.manage_arbiters else {},
: realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers),
: serialize(scheduler_link.modules, True),
: serialize(realm.parts[cfg_part.instance_id]),
: cfg_part.instance_id,
: cfg_part.push_flavor,
: scheduler_link.get_override_configuration()
})
cfg_string = json.dumps(scheduler_link.cfg, sort_keys=True).encode()
scheduler_link.cfg[] = hashlib.sha1(cfg_string).hexdigest()
pickled_conf = pickle.dumps(scheduler_link.cfg)
logger.info(" scheduler configuration size: %d bytes",
sys.getsizeof(pickled_conf))
logger.info(" scheduler satellites:")
satellites = realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers)
for sat_type in satellites:
logger.info(" - %s", sat_type)
for sat_link_uuid in satellites[sat_type]:
satellite = satellites[sat_type][sat_link_uuid]
logger.info(" %s", satellite[])
cfg_part.is_assigned = True
cfg_part.scheduler_link = scheduler_link
scheduler_link.cfg_to_manage = cfg_part
scheduler_link.push_flavor = cfg_part.push_flavor
scheduler_link.hash = scheduler_link.cfg[]
scheduler_link.need_conf = False
scheduler_link.configuration_sent = False
logger.info(,
cfg_part.instance_id, cfg_part.push_flavor, scheduler_link.name)
break
logger.info(" preparing the dispatch for satellites:")
for cfg_part in list(realm.parts.values()):
logger.info(" .configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
for sat_type in (, , , ):
if cfg_part.instance_id not in realm.to_satellites_need_dispatch[sat_type]:
logger.warning(" nothing to dispatch for %ss", sat_type)
return
if not realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id]:
logger.warning(" no need to dispatch to %ss", sat_type)
return
satellites = realm.get_potential_satellites_by_type(self.satellites, sat_type)
if satellites:
logger.info(" realm %ss: %s",
sat_type, .join([s.get_name() for s in satellites]))
else:
logger.info(" no %s satellites", sat_type)
nb_cfg_prepared = 0
for sat_link in satellites:
if not sat_link.active:
continue
if nb_cfg_prepared > realm.get_nb_of_must_have_satellites(sat_type):
logger.warning("Too much configuration parts prepared "
"for the expected satellites count. "
"Realm: %s, satellite: %s - prepared: %d out of %d",
realm.name, sat_link.name, nb_cfg_prepared,
realm.get_nb_of_must_have_satellites(sat_type))
logger.info(" preparing configuration part for the %s ",
cfg_part.instance_id, sat_type, sat_link.name)
sat_link.cfg.update({
: arbiters_cfg if sat_link.manage_arbiters else {},
: serialize(sat_link.modules, True),
: ,
: self.global_conf
})
sat_link.cfg[].update({
cfg_part.uuid: realm.to_satellites[sat_type][cfg_part.instance_id]})
if sat_type == "broker":
sat_link.cfg.update({: realm.get_links_for_a_broker(
self.pollers, self.reactionners, self.receivers,
self.alignak_conf.realms, sat_link.manage_sub_realms)})
cfg_string = json.dumps(sat_link.cfg, sort_keys=True).encode()
sat_link.cfg[] = hashlib.sha1(cfg_string).hexdigest()
pickled_conf = pickle.dumps(sat_link.cfg)
logger.info(,
sat_type, sys.getsizeof(pickled_conf))
sat_link.cfg_to_manage = cfg_part
sat_link.push_flavor = cfg_part.push_flavor
sat_link.hash = sat_link.cfg[]
sat_link.need_conf = False
sat_link.configuration_sent = False
logger.info(,
cfg_part.instance_id, cfg_part.push_flavor, sat_link.name)
nb_cfg_prepared += 1
realm.to_satellites_managed_by[sat_type][
cfg_part.instance_id].append(sat_link)
scheduler_link.need_conf = False | Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,
pollers)
This function will only prepare something if self.new_to_dispatch is False
It will reset the first_dispatch_done flag
A DispatcherError exception is raised if a configuration is already prepared! Unset the
new_to_dispatch flag before calling!
:return: None |
7,192 | def _commit_handler(self, cmd):
current_prompt = self.device.find_prompt().strip()
terminating_char = current_prompt[-1]
pattern1 = r"[>
pattern2 = r".*all username.*confirm"
patterns = r"(?:{}|{})".format(pattern1, pattern2)
output = self.device.send_command_expect(cmd, expect_string=patterns)
loop_count = 50
new_output = output
for i in range(loop_count):
if re.search(pattern2, new_output):
new_output = self.device.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
output += new_output
else:
break
self.device.set_base_prompt()
return output | Special handler for hostname change on commit operation. Also handles username removal
which prompts for confirmation (username removal prompts for each user...) |
7,193 | def _fix_permissions(self):
state = yield from self._get_container_state()
if state == "stopped" or state == "exited":
yield from self.manager.query("POST", "containers/{}/start".format(self._cid))
for volume in self._volumes:
log.debug("Docker container [{image}] fix ownership on {path}".format(
name=self._name, image=self._image, path=volume))
process = yield from asyncio.subprocess.create_subprocess_exec(
"docker",
"exec",
self._cid,
"/gns3/bin/busybox",
"sh",
"-c",
"("
"/gns3/bin/busybox find \"{path}\" -depth -print0"
" | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c > \"{path}/.gns3_perms\""
")"
" && /gns3/bin/busybox chmod -R u+rX \"{path}\""
" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\""
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
)
yield from process.wait() | Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete |
7,194 | def pad_to(unpadded, target_len):
under = target_len - len(unpadded)
if under <= 0:
return unpadded
return unpadded + ( * under) | Pad a string to the target length in characters, or return the original
string if it's longer than the target length. |
7,195 | def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
with tf.variable_scope("discriminator"):
net = layers().Conv2D(
filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net | A simple single-layer convolutional discriminator. |
7,196 | def _histogram_data(iterator):
histogram_started = False
header_passed = False
for l in iterator:
if in l:
histogram_started = True
elif histogram_started:
if header_passed:
values = l.rstrip().split("\t")
problem_type, name = values[0].split()
yield problem_type, name, int(values[1])
elif l.startswith():
header_passed = True | Yields only the row contents that contain the histogram entries |
7,197 | def _compute_nfps_uniform(cum_counts, sizes):
nfps = np.zeros((len(sizes), len(sizes)))
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps | Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1. |
7,198 | def discovery_print(pkt):
if pkt.src in mac_id_list:
return
mac_id_list.append(pkt.src)
text = pkt_text(pkt)
click.secho(text, fg=) if in text else click.echo(text) | Scandevice callback. Register src mac to avoid src repetition.
Print device on screen.
:param scapy.packet.Packet pkt: Scapy Packet
:return: None |
7,199 | def deallocate_ip(self, hostipaddress):
delete_host_from_segment(hostipaddress, self.netaddr, self.auth, self.url) | Object method takes in input of hostip address,removes them from the parent ip scope.
:param hostid: str of the hostid of the target host ip record
:return: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.