Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
380,300 | def __view_add_actions(self):
self.Components_Manager_Ui_treeView.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.ComponentsManagerUi|Activate Component(s)",
slot=self.__view_activate_components_action__triggered))
self.Components_Manager_Ui_treeView.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.ComponentsManagerUi|Deactivate Component(s)",
slot=self.__view_deactivate_components_action__triggered))
separator_action = QAction(self.Components_Manager_Ui_treeView)
separator_action.setSeparator(True)
self.Components_Manager_Ui_treeView.addAction(separator_action)
self.Components_Manager_Ui_treeView.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.ComponentsManagerUi|Reload Component(s)",
slot=self.__view_reload_components_action__triggered))
separator_action = QAction(self.Components_Manager_Ui_treeView)
separator_action.setSeparator(True)
self.Components_Manager_Ui_treeView.addAction(separator_action) | Sets the **Components_Manager_Ui_treeView** actions. |
380,301 | def name(self):
if self.chosen_name:
return self.chosen_name
else:
name = self.process.get_name()
if name:
return os.path.basename(name)
return | The name for the window as displayed in the title bar and status bar. |
380,302 | def support_scripting(self):
if not hasattr(self, ):
try:
self._support_scripting = self.redis_version >= (2, 5) \
and hasattr(self.connection, )
except:
self._support_scripting = False
return self._support_scripting | Returns True if scripting is available. Checks are done in the client
library (redis-py) AND the redis server. Result is cached, so done only
one time. |
380,303 | def add_node(self, id, label=None, type=, meta=None):
g = self.get_graph()
if meta is None:
meta={}
g.add_node(id, label=label, type=type, meta=meta) | Add a new node to the ontology |
380,304 | def moment1(self):
delays, response = self.delay_response_series
return statstools.calc_mean_time(delays, response) | The first time delay weighted statistical moment of the
instantaneous unit hydrograph. |
380,305 | def nl_msg_in_handler_debug(msg, arg):
ofd = arg or _LOGGER.debug
ofd()
nl_msg_dump(msg, ofd)
return NL_OK | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114. |
380,306 | def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields):
if remove_all_metadata_fields:
for df in concated_meta_dfs:
df.drop(df.columns, axis=1, inplace=True)
all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0)
n_rows = all_concated_meta_df.shape[0]
logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows))
n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs])
assert n_rows == n_rows_cumulative
all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1)
return all_concated_meta_df_sorted | Assemble the concatenated metadata dfs together. For example,
if horizontally concatenating, the concatenated metadata dfs are the
column metadata dfs. Both indices are sorted.
Args:
concated_meta_dfs (list of pandas dfs)
Returns:
all_concated_meta_df_sorted (pandas df) |
380,307 | def check_length_of_initial_values(self, init_values):
num_nests = self.rows_to_nests.shape[1]
num_index_coefs = self.design.shape[1]
assumed_param_dimensions = num_index_coefs + num_nests
if init_values.shape[0] != assumed_param_dimensions:
msg = "The initial values are of the wrong dimension"
msg_1 = "It should be of dimension {}"
msg_2 = "But instead it has dimension {}"
raise ValueError(msg +
msg_1.format(assumed_param_dimensions) +
msg_2.format(init_values.shape[0]))
return None | Ensures that the initial values are of the correct length. |
380,308 | def template_class_from_name(name):
term = TerminalView()
template_name = name +
try:
__import__( + template_name)
template_mod = sys.modules[ + template_name]
except ImportError:
term.print_error_and_exit("Unable to find {}".format(name))
try:
template_class = getattr(template_mod, template_name)
except AttributeError:
term.print_error_and_exit("Unable to create a template {}".format(name))
return template_class() | Return the template class object from agiven name. |
380,309 | def train(*tf_records: "Records to train on"):
tf.logging.set_verbosity(tf.logging.INFO)
estimator = dual_net.get_estimator()
effective_batch_size = FLAGS.train_batch_size
if FLAGS.use_tpu:
effective_batch_size *= FLAGS.num_tpu_cores
if FLAGS.use_tpu:
if FLAGS.use_bt:
def _input_fn(params):
games = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table)
games_nr = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table + )
return preprocessing.get_tpu_bt_input_tensors(
games,
games_nr,
params[],
number_of_games=FLAGS.window_size,
random_rotation=True)
else:
def _input_fn(params):
return preprocessing.get_tpu_input_tensors(
params[],
tf_records,
random_rotation=True)
hooks = []
else:
def _input_fn():
return preprocessing.get_input_tensors(
FLAGS.train_batch_size,
tf_records,
filter_amount=FLAGS.filter_amount,
shuffle_buffer_size=FLAGS.shuffle_buffer_size,
random_rotation=True)
hooks = [UpdateRatioSessionHook(FLAGS.work_dir),
EchoStepCounterHook(output_dir=FLAGS.work_dir)]
steps = FLAGS.steps_to_train
logging.info("Training, steps = %s, batch = %s -> %s examples",
steps or , effective_batch_size,
(steps * effective_batch_size) if steps else )
if FLAGS.use_bt:
games = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table)
if not games.read_wait_cell():
games.require_fresh_games(20000)
latest_game = games.latest_game_number
index_from = max(latest_game, games.read_wait_cell())
print("== Last game before training:", latest_game, flush=True)
print("== Wait cell:", games.read_wait_cell(), flush=True)
try:
estimator.train(_input_fn, steps=steps, hooks=hooks)
if FLAGS.use_bt:
bigtable_input.set_fresh_watermark(games, index_from,
FLAGS.window_size)
except:
if FLAGS.use_bt:
games.require_fresh_games(0)
raise | Train on examples. |
380,310 | def _set_value(self, new_value):
if self.min_value is not None and new_value < self.min_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}".format(
self.name, new_value, self.min_value))
if self.max_value is not None and new_value > self.max_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}".format(
self.name, new_value, self.max_value))
if self.has_auxiliary_variable():
with warnings.catch_warnings():
warnings.simplefilter("always", RuntimeWarning)
warnings.warn("You are trying to assign to a parameter which is either linked or "
"has auxiliary variables. The assignment has no effect.", RuntimeWarning)
if self._transformation is None:
new_internal_value = new_value
else:
new_internal_value = self._transformation.forward(new_value)
if new_internal_value != self._internal_value:
self._internal_value = new_internal_value
for callback in self._callbacks:
try:
callback(self)
except:
raise NotCallableOrErrorInCall("Could not call callback for parameter %s" % self.name) | Sets the current value of the parameter, ensuring that it is within the allowed range. |
380,311 | def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
if cloud:
if not model_version or not model_name:
raise ValueError()
if training_dir:
raise ValueError()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError()
if model_version or model_name:
raise ValueError(
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data) | Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame. |
380,312 | def _get_styles(self, style_urls, asset_url_path):
styles = []
for style_url in style_urls:
urls_inline = STYLE_ASSET_URLS_INLINE_FORMAT.format(
asset_url_path.rstrip())
asset_content = self._download(style_url)
content = re.sub(urls_inline, self._match_asset, asset_content)
styles.append(content)
return styles | Gets the content of the given list of style URLs and
inlines assets. |
380,313 | def pv_absent(name):
ret = {: {},
: ,
: name,
: True}
if not __salt__[](name, quiet=True):
ret[] = .format(name)
elif __opts__[]:
ret[] = .format(name)
ret[] = None
return ret
else:
changes = __salt__[](name)
if __salt__[](name, quiet=True):
ret[] = .format(name)
ret[] = False
else:
ret[] = .format(name)
ret[][] = changes
return ret | Ensure that a Physical Device is not being used by lvm
name
The device name to initialize. |
380,314 | def make_venv(self, dj_version):
venv_path = self._get_venv_path(dj_version)
self.logger.info( % dj_version)
try:
create_venv(venv_path, **VENV_CREATE_KWARGS)
except ValueError:
self.logger.warning()
self.venv_install( % dj_version, venv_path)
return venv_path | Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env |
380,315 | def header(self, name, default=None):
wsgi_header = "HTTP_{0}".format(name.upper())
try:
return self.env_raw[wsgi_header]
except KeyError:
return default | Returns the value of the HTTP header identified by `name`. |
380,316 | def execute(self, request):
url = request.uri
if request.parameters:
url += + urlencode(request.parameters)
if request.headers:
headers = dict(self._headers, **request.headers)
else:
headers = self._headers
retry = 0
server = getattr(self._local, "server", None)
while True:
if not server:
self._local.server = server = self._get_server()
try:
parse_result = urlparse(server)
conn = get_pool().connection_from_host(parse_result.hostname,
parse_result.port,
parse_result.scheme)
kwargs = dict(
method=Method._VALUES_TO_NAMES[request.method],
url=parse_result.path + url,
body=request.body,
headers=headers,
timeout=self._timeout,
)
response = conn.urlopen(**kwargs)
return RestResponse(status=response.status,
body=response.data,
headers=response.headers)
except (IOError, urllib3.exceptions.HTTPError) as ex:
self._drop_server(server)
self._local.server = server = None
if retry >= self._max_retries:
logger.error("Client error: bailing out after %d failed retries",
self._max_retries, exc_info=1)
raise NoServerAvailable(ex)
logger.exception("Client error: %d retries left", self._max_retries - retry)
retry += 1 | Execute a request and return a response |
380,317 | def convex_hull_image(image):
labels = image.astype(int)
points, counts = convex_hull(labels, np.array([1]))
output = np.zeros(image.shape, int)
for i in range(counts[0]):
inext = (i+1) % counts[0]
draw_line(output, points[i,1:], points[inext,1:],1)
output = fill_labeled_holes(output)
return output == 1 | Given a binary image, return an image of the convex hull |
380,318 | def set_speed(self, value):
self._combined_speed = float(value)
speed_per_min = int(self._combined_speed * SEC_PER_MIN)
command = GCODES[] + str(speed_per_min)
log.debug("set_speed: {}".format(command))
self._send_command(command) | set total axes movement speed in mm/second |
380,319 | def dict_to_etree(d, root):
u
def _to_etree(d, node):
if d is None or len(d) == 0:
return
elif isinstance(d, basestring):
node.text = d
elif isinstance(d, dict):
for k, v in d.items():
assert isinstance(k, basestring)
if k.startswith():
assert k ==
assert isinstance(v, basestring)
node.text = v
elif k.startswith():
assert isinstance(v, basestring)
node.set(k[1:], v)
elif isinstance(v, list):
sub_elem = etree.SubElement(node, k)
for child_num, e in enumerate(v):
if e is None:
if child_num == 0:
continue
_to_etree(node, k)
else:
_to_etree(d, root)
return root | u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str |
380,320 | def main(args=sys.argv[1:]):
opt = docopt(main.__doc__.strip(), args, options_first=True)
config_logging(opt[])
if opt[]:
check_backends(opt[])
elif opt[]:
handler = fulltext.get
if opt[]:
handler = _handle_open
for path in opt[]:
print(handler(path))
else:
raise ValueError("don't know how to handle cmd") | Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output. |
380,321 | def multi_curve_fit(xs, ys, verbose):
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal
}
from scipy.optimize import curve_fit
fit_results = {}
best = [, np.inf]
for function in functions:
try:
weights = get_weights(xs, ys)
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
pcov = []
m = measure(function, xs, ys, popt, weights)
fit_results.update({function: {: m, : popt, : pcov}})
for f in fit_results:
if fit_results[f][] <= best[1]:
best = f, fit_results[f][]
if verbose:
print(str(function), m)
except RuntimeError:
print(, function)
return fit_results[best[0]][], fit_results[best[0]][], best | fit multiple functions to the x, y data, return the best fit |
380,322 | def get_channel_id(turn_context: TurnContext) -> str:
if turn_context.activity.channel_id is None:
return ""
else:
return turn_context.activity.channel_id | Get the Channel Id from the current Activity on the Turn Context.
Args:
turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.
Returns:
str: The Channel Id from the Turn Context's Activity. |
380,323 | def _get_desired_pkg(name, desired):
if not desired[name] or desired[name].startswith((, , )):
oper =
else:
oper =
return .format(name, oper,
if not desired[name] else desired[name]) | Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state. |
380,324 | def enrich_relations(rdf, enrich_mappings, use_narrower, use_transitive):
if enrich_mappings:
infer.skos_symmetric_mappings(rdf)
infer.skos_hierarchical_mappings(rdf, use_narrower)
infer.skos_related(rdf)
for s, o in rdf.subject_objects(SKOSEXT.broaderGeneric):
rdf.add((s, SKOS.broader, o))
for s, o in rdf.subject_objects(SKOSEXT.broaderPartitive):
rdf.add((s, SKOS.broader, o))
infer.skos_hierarchical(rdf, use_narrower)
if use_transitive:
infer.skos_transitive(rdf, use_narrower)
else:
for s, o in rdf.subject_objects(SKOS.broaderTransitive):
rdf.remove((s, SKOS.broaderTransitive, o))
for s, o in rdf.subject_objects(SKOS.narrowerTransitive):
rdf.remove((s, SKOS.narrowerTransitive, o))
infer.skos_topConcept(rdf) | Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model. |
380,325 | def show_taghistory():
if not nav:
sys.exit(1)
ecode = 0
try:
result = nav.get_taghistory()
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts[].clear()
sys.exit(ecode) | Show history of all known repo/tags for image |
380,326 | def toFilter(self, property):
if self.leftedge == self.rightedge and self.leftop is ge and self.rightop is le:
return Filter(style.SelectorAttributeTest(property, , self.leftedge))
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge),
style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge))
except KeyError:
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge))
except KeyError:
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge))
except KeyError:
return Filter() | Convert this range to a Filter with a tests having a given property. |
380,327 | def _set_get_vnetwork_hosts(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "rpc",
: ,
})
self.__get_vnetwork_hosts = t
if hasattr(self, ):
self._set() | Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts |
380,328 | def get_instruction(self, idx, off=None):
if self.code is not None:
return self.code.get_bc().get_instruction(idx, off)
return None | Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object |
380,329 | def get_params_type(descriptor):
params = descriptor.split()[0][1:].split()
if params:
return [param for param in params]
return [] | Return the parameters type of a descriptor (e.g (IC)V) |
380,330 | def _repr_html_(self):
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
val = buf.getvalue().replace(, r, 1)
val = val.replace(, r, 1)
return + val +
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None | Return a html representation for a particular DataFrame.
Mainly for IPython notebook. |
380,331 | def _init_edges_relationships(rel2src2dsts, rel2dst2srcs):
edge_rel2fromto = {}
relationships = set(rel2src2dsts).union(rel2dst2srcs)
for reltype in relationships:
edge_from_to = []
if reltype in rel2src2dsts:
for parent, children in rel2src2dsts[reltype].items():
for child in children:
edge_from_to.append((child, parent))
if reltype in rel2dst2srcs:
for parent, children in rel2dst2srcs[reltype].items():
for child in children:
edge_from_to.append((child, parent))
edge_rel2fromto[reltype] = edge_from_to
return edge_rel2fromto | Get the directed edges from GO term to GO term using relationships. |
380,332 | def log_calls(function):
def wrapper(self,*args,**kwargs):
self.log.log(group=function.__name__,message=)
function(self,*args,**kwargs)
self.log.log(group=function.__name__,message=)
return wrapper | Decorator that logs function calls in their self.log |
380,333 | def line_break(s, length=76):
x = .join(s[pos:pos + length] for pos in range(0, len(s), length))
return x | 将字符串分割成一行一行
:param s:
:param length:
:return: |
380,334 | def run_sex_check(in_prefix, in_type, out_prefix, base_dir, options):
os.mkdir(out_prefix)
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
script_prefix = os.path.join(out_prefix, "sexcheck")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
try:
sex_check.main(options)
except sex_check.ProgramError as e:
msg = "sex_check {}".format(e)
raise ProgramError(msg)
hetero = {}
if os.path.isfile(script_prefix + ".chr23_recodeA.raw.hetero"):
with open(script_prefix + ".chr23_recodeA.raw.hetero", "r") as i_file:
header = {
name: i for i, name in
enumerate(createRowFromPlinkSpacedOutput(i_file.readline()))
}
for required_col in ("PED", "ID", "HETERO"):
if required_col not in header:
msg = "{}: no column named {}".format(
script_prefix + ".chr23_recodeA.raw.hetero",
required_col,
)
raise ProgramError(msg)
for line in i_file:
row = line.rstrip("\r\n").split("\t")
famid = row[header["PED"]]
indid = row[header["ID"]]
het = None
try:
het = "{:.4f}".format(float(row[header["HETERO"]]))
except:
het = "N/A"
hetero[(famid, indid)] = het
nb_no_call = {}
if os.path.isfile(script_prefix + ".chr24_recodeA.raw.noCall"):
with open(script_prefix + ".chr24_recodeA.raw.noCall", "r") as i_file:
header = {
name: i for i, name in
enumerate(createRowFromPlinkSpacedOutput(i_file.readline()))
}
for required_col in ("PED", "ID", "nbGeno", "nbNoCall"):
if required_col not in header:
msg = "{}: no column named {}".format(
script_prefix + ".chr24_recodeA.raw.noCall",
required_col,
)
raise ProgramError(msg)
for line in i_file:
row = line.rstrip("\r\n").split("\t")
famid = row[header["PED"]]
indid = row[header["ID"]]
nb_geno = row[header["nbGeno"]]
nb_nocall = row[header["nbNoCall"]]
percent = None
try:
percent = "{:.4f}".format(
float(nb_nocall) / float(nb_geno),
)
except:
percent = "N/A"
nb_no_call[(famid, indid)] = percent
nb_problems = 0
table = []
nb_no_genetic = 0
nb_discordant = 0
with open(script_prefix + ".list_problem_sex", "r") as i_file:
header = i_file.readline().rstrip("\r\n").split("\t")
table.append(header)
header = {name: i for i, name in enumerate(header)}
for required_col in ("FID", "IID", "SNPSEX"):
if required_col not in header:
msg = "{}: no column named {}".format(
script_prefix + ".list_problem_sex",
required_col,
)
raise ProgramError(msg)
table[-1].append("HET")
table[-1].append(r"\%NOCALL")
for line in i_file:
nb_problems += 1
row = line.rstrip("\r\n").split("\t")
if row[header["SNPSEX"]] == "0":
nb_no_genetic += 1
else:
nb_discordant += 1
table.append([
latex_template.sanitize_tex(row[header[name]])
for name in ("FID", "IID", "PEDSEX", "SNPSEX", "STATUS", "F")
])
table[-1].append(
hetero.get((row[header["FID"]], row[header["IID"]]), "N/A"),
)
table[-1].append(
nb_no_call.get((row[header["FID"]], row[header["IID"]]), "N/A")
)
male_f = sex_check.parser.get_default("maleF")
if "--maleF" in options:
male_f = options[options.index("--maleF") + 1]
female_f = sex_check.parser.get_default("femaleF")
if "--femaleF" in options:
female_f = options[options.index("--femaleF") + 1]
latex_file = os.path.join(script_prefix + ".summary.tex")
graphics_paths = set()
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(sex_check.pretty_name)
text = (
"Using $F$ thresholds of {male_f} and {female_f} for males "
"and females respectively, {nb_problems:,d} sample{plural} "
"had gender problem according to Plink.".format(
male_f=male_f,
female_f=female_f,
nb_problems=nb_problems,
plural="s" if nb_problems > 1 else "",
)
)
print >>o_file, latex_template.wrap_lines(text)
float_template = latex_template.jinja2_env.get_template(
"float_template.tex",
)
if nb_problems > 0:
table_label = re.sub(
r"[/\\]",
"_",
script_prefix,
) + "_problems"
text = (
r"Table~\ref{" + table_label + "} summarizes the gender "
"problems encountered during the analysis."
)
print >>o_file, latex_template.wrap_lines(text)
longtable_template = latex_template.jinja2_env.get_template(
"longtable_template.tex",
)
print >>o_file, longtable_template.render(
table_caption="Summarization of the gender problems "
"encountered during Plinksexcheck.LRR_BAF' directory, then there are LRR
if os.path.isdir(script_prefix + ".LRR_BAF"):
figures = glob(
os.path.join(script_prefix + ".LRR_BAF", "*.png"),
)
if len(figures) > 0:
sample_ids = [
re.search(
"^baf_lrr_(\S+)_lrr_baf.png$",
os.path.basename(figure),
) for figure in figures
]
sample_ids = [
"unknown sample" if not sample else sample.group(1)
for sample in sample_ids
]
sorted_indexes = sorted(range(len(figures)),
key=figures.__getitem__)
figures = [figures[i] for i in sorted_indexes]
sample_ids = [sample_ids[i] for i in sorted_indexes]
labels = [
re.sub(
r"[/\\]",
"_",
script_prefix + "_baf_lrr_" +
os.path.splitext(sample)[0],
) for sample in sample_ids
]
fig_1 = labels[0]
fig_2 = ""
if len(figures) > 1:
fig_2 = labels[-1]
text = (
"Figure" + ("s" if len(figures) > 1 else "") +
r"~\ref{" + fig_1 + "} " +
(r"to \ref{" + fig_2 + "} " if fig_2 else "") +
"show" + (" " if len(figures) > 1 else "s ") + "the "
"log R ratio and the B allele frequency versus the "
"position on chromosome X and Y for the problematic "
"sample{}.".format("s" if len(figures) > 1 else "")
)
print >>o_file, latex_template.wrap_lines(text)
zipped = zip(figures, sample_ids, labels)
for figure, sample_id, label in zipped:
sample_id = latex_template.sanitize_tex(sample_id)
graphics_path, path = os.path.split(figure)
graphics_path = os.path.relpath(graphics_path,
base_dir)
caption = (
"Plots showing the log R ratio and the B allele "
"frequency for chromosome X and Y (on the left "
"and right, respectively) for sample "
"{}.".format(sample_id)
)
print >>o_file, float_template.render(
float_type="figure",
float_placement="H",
float_caption=caption,
float_label=label,
float_content=graphic_template.render(
width=r"\textwidth",
path=latex_template.sanitize_fig_name(path),
),
)
graphics_paths.add(graphics_path)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "
print >>o_file, "Number of samples with gender problem"
print >>o_file, " - no genetic gender\t{:,d}".format(nb_no_genetic)
print >>o_file, " - discordant gender\t{:,d}".format(nb_discordant)
print >>o_file, "---"
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=sex_check.desc,
long_description=sex_check.long_desc,
graph_path=graphics_paths,
) | Runs step6 (sexcheck).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.SexCheck.sex_check` module. The
required file type for this module is ``bfile``, hence the need to use the
:py:func:`check_input_files` to check if the file input file type is the
good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.SexCheck.sex_check` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type. |
380,335 | def restore(name=None, **kwargs):
**
if not status(name):
raise CommandExecutionError()
frozen_pkgs = {}
frozen_repos = {}
for name, content in zip(_paths(name), (frozen_pkgs, frozen_repos)):
with fopen(name) as fp:
content.update(json.load(fp))
safe_kwargs = clean_kwargs(**kwargs)
res = {
: {: [], : []},
: {: [], : []},
: [],
}
repos = __salt__[](**safe_kwargs)
missing_repos = set(frozen_repos) - set(repos)
for repo in missing_repos:
try:
_tmp_kwargs = frozen_repos[repo].copy()
_tmp_kwargs.update(safe_kwargs)
__salt__[](repo, **_tmp_kwargs)
res[][].append(repo)
log.info(, repo)
except Exception as e:
msg =
log.error(msg, repo, e)
res[].append(msg % (repo, e))
pkgs = __salt__[](**safe_kwargs)
missing_pkgs = set(frozen_pkgs) - set(pkgs)
for pkg in missing_pkgs:
try:
__salt__[](name=pkg, **safe_kwargs)
res[][].append(pkg)
log.info(, pkg)
except Exception as e:
msg =
log.error(msg, pkg, e)
res[].append(msg % (pkg, e))
pkgs = __salt__[](**safe_kwargs)
extra_pkgs = set(pkgs) - set(frozen_pkgs)
for pkg in extra_pkgs:
try:
__salt__[](name=pkg, **safe_kwargs)
res[][].append(pkg)
log.info(, pkg)
except Exception as e:
msg =
log.error(msg, pkg, e)
res[].append(msg % (pkg, e))
repos = __salt__[](**safe_kwargs)
extra_repos = set(repos) - set(frozen_repos)
for repo in extra_repos:
try:
__salt__[](repo, **safe_kwargs)
res[][].append(repo)
log.info(, repo)
except Exception as e:
msg =
log.error(msg, repo, e)
res[].append(msg % (repo, e))
return res | Make sure that the system contains the packages and repos from a
frozen state.
Read the list of packages and repositories from the freeze file,
and compare it with the current list of packages and repos. If
there is any difference, all the missing packages are repos will
be installed, and all the extra packages and repos will be
removed.
As this module is build on top of the pkg module, the user can
send extra attributes to the underlying pkg module via kwargs.
This function will call ``pkg.list_repos``, ``pkg.mod_repo``,
``pkg.list_pkgs``, ``pkg.install``, ``pkg.remove`` and
``pkg.del_repo``, and any additional arguments will be passed
through to those functions.
name
Name of the frozen state. Optional.
CLI Example:
.. code-block:: bash
salt '*' freezer.restore
salt '*' freezer.restore root=/chroot |
380,336 | def onReactionRemoved(
self,
mid=None,
author_id=None,
thread_id=None,
thread_type=None,
ts=None,
msg=None,
):
log.info(
"{} removed reaction from {} message in {} ({})".format(
author_id, mid, thread_id, thread_type
)
) | Called when the client is listening, and somebody removes reaction from a message
:param mid: Message ID, that user reacted to
:param author_id: The ID of the person who removed reaction
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType |
380,337 | def mimeData( self, items ):
func = self.dataCollector()
if ( func ):
return func(self, items)
return super(XTableWidget, self).mimeData(items) | Returns the mime data for dragging for this instance.
:param items | [<QTableWidgetItem>, ..] |
380,338 | def messages(self):
if self._messages is None:
self._messages = MessageList(self._version, session_sid=self._solution[], )
return self._messages | Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList |
380,339 | def copy(self):
stat_result = copy(self)
stat_result.use_float = self.use_float
return stat_result | Return a copy where the float usage is hard-coded to mimic the
behavior of the real os.stat_result. |
380,340 | def delete_all_objects(self, nms, async_=False):
if nms is None:
nms = self.api.list_object_names(self.name, full_listing=True)
return self.api.bulk_delete(self.name, nms, async_=async_) | Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call |
380,341 | def remove(self, key, value):
check_not_none(key, "key cant be none")
return self._encode_invoke(transactional_multi_map_remove_entry_codec, key=self._to_data(key),
value=self._to_data(value)) | Transactional implementation of :func:`MultiMap.remove(key, value)
<hazelcast.proxy.multi_map.MultiMap.remove>`
:param key: (object), the key of the entry to remove.
:param value: (object), the value of the entry to remove.
:return: |
380,342 | def create_build_paths(context: Context):
paths = [context.app.asset_build_path, context.app.screenshots_build_path, context.app.collected_assets_path]
for path in filter(None, paths):
os.makedirs(path, exist_ok=True) | Creates directories needed for build outputs |
380,343 | def lines2mecab(lines, **kwargs):
sents = []
for line in lines:
sent = txt2mecab(line, **kwargs)
sents.append(sent)
return sents | Use mecab to parse many lines |
380,344 | def aggregate(self, block_size):
raster2 = block_reduce(self.raster, block_size, func=np.ma.sum)
geot = self.geot
geot = (geot[0], block_size[0] * geot[1], geot[2], geot[3], geot[4],
block_size[1] * geot[-1])
return GeoRaster(raster2, geot, nodata_value=self.nodata_value,\
projection=self.projection, datatype=self.datatype) | geo.aggregate(block_size)
Returns copy of raster aggregated to smaller resolution, by adding cells. |
380,345 | def list_port_fwd(zone, permanent=True):
*
ret = []
cmd = .format(zone)
if permanent:
cmd +=
for i in __firewall_cmd(cmd).splitlines():
(src, proto, dest, addr) = i.split()
ret.append(
{: src.split()[1],
: proto.split()[1],
: dest.split()[1],
: addr.split()[1]}
)
return ret | List port forwarding
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.list_port_fwd public |
380,346 | def _pull_out_unaffected_blocks_rhs(rest, rhs, out_port, in_port):
_, block_index = rhs.index_in_block(in_port)
rest = tuple(rest)
bs = rhs.block_structure
(nbefore, nblock, nafter) = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = rhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_rhs = before + cid(nblock - 1) + after
inner_rhs = cid(nbefore) + block + cid(nafter)
return Feedback.create(SeriesProduct.create(*(rest + (inner_rhs,))),
out_port=out_port, in_port=in_port) << outer_rhs
elif block == cid(nblock):
outer_rhs = before + cid(nblock - 1) + after
return Feedback.create(SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port) << outer_rhs
raise CannotSimplify() | Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback. |
380,347 | def configure_logging(level):
global logging_level
logging_level = logging.ERROR
if "info" == level.lower():
logging_level = logging.INFO
elif "warn" == level.lower():
logging_level = logging.WARNING
elif "debug" == level.lower():
logging_level = logging.DEBUG | Configure global log level to given one
:param level: Level (INFO | DEBUG | WARN | ERROR)
:return: |
380,348 | def _get_grammar_errors(self,pos,text,tokens):
word_counts = [max(len(t),1) for t in tokens]
good_pos_tags = []
min_pos_seq=2
max_pos_seq=4
bad_pos_positions=[]
for i in xrange(0, len(text)):
pos_seq = [tag[1] for tag in pos[i]]
pos_ngrams = util_functions.ngrams(pos_seq, min_pos_seq, max_pos_seq)
long_pos_ngrams=[z for z in pos_ngrams if z.count()==(max_pos_seq-1)]
bad_pos_tuples=[[z,z+max_pos_seq] for z in xrange(0,len(long_pos_ngrams)) if long_pos_ngrams[z] not in self._good_pos_ngrams]
bad_pos_tuples.sort(key=operator.itemgetter(1))
to_delete=[]
for m in reversed(xrange(len(bad_pos_tuples)-1)):
start, end = bad_pos_tuples[m]
for j in xrange(m+1, len(bad_pos_tuples)):
lstart, lend = bad_pos_tuples[j]
if lstart >= start and lstart <= end:
bad_pos_tuples[m][1]=bad_pos_tuples[j][1]
to_delete.append(j)
fixed_bad_pos_tuples=[bad_pos_tuples[z] for z in xrange(0,len(bad_pos_tuples)) if z not in to_delete]
bad_pos_positions.append(fixed_bad_pos_tuples)
overlap_ngrams = [z for z in pos_ngrams if z in self._good_pos_ngrams]
if (len(pos_ngrams)-len(overlap_ngrams))>0:
divisor=len(pos_ngrams)/len(pos_seq)
else:
divisor=1
if divisor == 0:
divisor=1
good_grammar_ratio = (len(pos_ngrams)-len(overlap_ngrams))/divisor
good_pos_tags.append(good_grammar_ratio)
return good_pos_tags,bad_pos_positions | Internal function to get the number of grammar errors in given text
pos - part of speech tagged text (list)
text - normal text (list)
tokens - list of lists of tokenized text |
380,349 | def render_xml_to_string(template, input, params=None):
xsl_path = find_template_path(template)
result = transform(input, str(xsl_path), params)
return result | Transforms ``input`` using ``template``, which should be an xslt.
:param template: an xslt template name.
:param input: an string that contains xml
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:rtype: :class:`unicode` |
380,350 | def load_locale(locale, icu=False):
if locale not in locales:
raise NotImplementedError("The locale is not supported" % locale)
if locale not in __locale_caches:
mod = __import__(__name__, fromlist=[locale], level=0)
__locale_caches[locale] = getattr(mod, locale)
return __locale_caches[locale] | Return data of locale
:param locale:
:return: |
380,351 | def _compute_f3(self, C, mag):
if mag <= 5.8:
return C[]
elif 5.8 < mag < C[]:
return (
C[] +
(C[] - C[]) * (mag - 5.8) / (C[] - 5.8)
)
else:
return C[] | Compute f3 term (eq.6, page 106)
NOTE: In the original manuscript, for the case 5.8 < mag < c1,
the term in the numerator '(mag - 5.8)' is missing, while is
present in the software used for creating the verification tables |
380,352 | def size(self):
return (0 if self.shape == () else
int(np.prod(self.shape, dtype=))) | Total number of grid points. |
380,353 | def domains(request):
url =
query =
if settings.SEARCH_TYPE == :
url = % (settings.SEARCH_URL, query)
if settings.SEARCH_TYPE == :
url = % (settings.SEARCH_URL, query)
LOGGER.debug(url)
response = urllib2.urlopen(url)
data = response.read().replace(, )
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template()
context = RequestContext(request, {
: data,
: layers_count,
: services_count,
})
return HttpResponse(template.render(context)) | A page with number of services and layers faceted on domains. |
380,354 | def data_vector_from_blurred_mapping_matrix_and_data(blurred_mapping_matrix, image_1d, noise_map_1d):
mapping_shape = blurred_mapping_matrix.shape
data_vector = np.zeros(mapping_shape[1])
for image_index in range(mapping_shape[0]):
for pix_index in range(mapping_shape[1]):
data_vector[pix_index] += image_1d[image_index] * \
blurred_mapping_matrix[image_index, pix_index] / (noise_map_1d[image_index] ** 2.0)
return data_vector | Compute the hyper vector *D* from a blurred mapping matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
image_1d : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map_1d : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit. |
380,355 | def dump(self):
data = dict(
sessions_active=self.sess_active,
connections_active=self.conn_active,
connections_ps=self.conn_ps.last_average,
packets_sent_ps=self.pack_sent_ps.last_average,
packets_recv_ps=self.pack_recv_ps.last_average
)
for k, v in self.sess_transports.items():
data[ + k] = v
return data | Return dictionary with current statistical information |
380,356 | def DeserializeTX(buffer):
mstream = MemoryStream(buffer)
reader = BinaryReader(mstream)
tx = Transaction.DeserializeFrom(reader)
return tx | Deserialize the stream into a Transaction object.
Args:
buffer (BytesIO): stream to deserialize the Transaction from.
Returns:
neo.Core.TX.Transaction: |
380,357 | def reserve(self, location=None, force=False, wait_for_up=True, timeout=80):
if not location or is_local_host(location):
return
hostname, card, port = location.split()
chassis = self.root.hw.get_chassis(hostname)
if force:
chassis.get_card(int(card)).get_port(int(port)).release()
try:
phy_port = chassis.get_card(int(card)).get_port(int(port))
except KeyError as _:
raise TgnError(.format(location))
self.set_attributes(commit=True, connectedTo=phy_port.ref)
while self.get_attribute() == :
time.sleep(1)
if wait_for_up:
self.wait_for_up(timeout) | Reserve port and optionally wait for port to come up.
:param location: port location as 'ip/module/port'. If None, the location will be taken from the configuration.
:param force: whether to revoke existing reservation (True) or not (False).
:param wait_for_up: True - wait for port to come up, False - return immediately.
:param timeout: how long (seconds) to wait for port to come up. |
380,358 | def enable_contactgroup_svc_notifications(self, contactgroup):
for contact_id in contactgroup.get_contacts():
self.enable_contact_svc_notifications(self.daemon.contacts[contact_id]) | Enable service notifications for a contactgroup
Format of the line that triggers function call::
ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to enable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None |
380,359 | def read_dependencies(filename):
dependencies = []
filepath = os.path.join(, filename)
with open(filepath, ) as stream:
for line in stream:
package = line.strip().split()[0].strip()
if package and package.split()[0] != :
dependencies.append(package)
return dependencies | Read in the dependencies from the virtualenv requirements file. |
380,360 | def run(self):
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
self.callbacks.on_start()
for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
play = Play(self, play_ds, play_basedir)
matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
matched_tags_all = matched_tags_all | matched_tags
unmatched_tags_all = unmatched_tags_all | unmatched_tags
if (len(matched_tags) > 0 or len(play.tasks()) == 0):
plays.append(play)
raise errors.AnsibleError(msg % (unknown, unmatched))
for play in plays:
if not self._run_play(play):
break
results = {}
for host in self.stats.processed.keys():
results[host] = self.stats.summarize(host)
return results | run all patterns in the playbook |
380,361 | def write(self, handle):
handle.write(u"\t".join(self.columns))
handle.write(u"\n")
for row in self.rows:
row.write(handle) | Write metadata to handle. |
380,362 | def read(self):
if self._current >= len(self._data):
return None
self._current += 1
return self._data[self._current - 1] | Read one character from buffer.
:Returns:
Current character or None if end of buffer is reached |
380,363 | def pool_function(args):
is_valid = True
try:
checker = emailahoy.VerifyEmail()
status, message = checker.verify_email_smtp(args, from_host=, from_email=)
if status == 250:
print("\t[*] Verification of status: {}. Details:\n\t\t{}".format(general.success(args), general.success("SUCCESS ({})".format(str(status))), message.replace(, )))
is_valid = True
else:
print("\t[*] Verification of status: {}. Details:\n\t\t{}".format(general.error(args), general.error("FAILED ({})".format(str(status))), message.replace(, )))
is_valid = False
except Exception, e:
print(general.warning("WARNING. An error was found when performing the search. You can omit this message.\n" + str(e)))
is_valid = False
aux = {}
aux["type"] = "i3visio.profile"
aux["value"] = "Email - " + args
aux["attributes"] = general.expandEntitiesFromEmail(args)
platform = aux["attributes"][2]["value"].title()
aux["attributes"].append({
"type": "i3visio.platform",
"value": platform,
"attributes": []
}
)
if is_valid:
return {"platform": platform, "status": "DONE", "data": aux}
else:
return {"platform": platform, "status": "DONE", "data": {}} | A wrapper for being able to launch all the threads.
We will use python-emailahoy library for the verification.
Args:
-----
args: reception of the parameters for getPageWrapper as a tuple.
Returns:
--------
A dictionary representing whether the verification was ended
successfully. The format is as follows:
```
{"platform": "str(domain["value"])", "status": "DONE", "data": aux}
``` |
380,364 | def pixel_to_q(self, row: float, column: float):
qrow = 4 * np.pi * np.sin(
0.5 * np.arctan(
(row - float(self.header.beamcentery)) *
float(self.header.pixelsizey) /
float(self.header.distance))) / float(self.header.wavelength)
qcol = 4 * np.pi * np.sin(0.5 * np.arctan(
(column - float(self.header.beamcenterx)) *
float(self.header.pixelsizex) /
float(self.header.distance))) / float(self.header.wavelength)
return qrow, qcol | Return the q coordinates of a given pixel.
Inputs:
row: float
the row (vertical) coordinate of the pixel
column: float
the column (horizontal) coordinate of the pixel
Coordinates are 0-based and calculated from the top left corner. |
380,365 | def collapse(self, msgpos):
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message() | collapse message at given position |
380,366 | def _cas_2(self):
lonc_left = self._format_lon(self.lonm)
lonc_right = self._format_lon(self.lonM)
latc = self._format_lat(self.latm)
print(lonc_left, lonc_right, self.lonm, self.lonM)
img_name_left = self._format_name_map(lonc_left, latc)
print(img_name_left)
img_left = BinaryTable(img_name_left, self.path_pdsfiles)
X_left, Y_left, Z_left = img_left.extract_grid(self.lonm,
float(
img_left.EASTERNMOST_LONGITUDE),
self.latm,
self.latM)
img_name_right = self._format_name_map(lonc_right, latc)
img_right = BinaryTable(img_name_right, self.path_pdsfiles)
X_right, Y_right, Z_right = img_right.extract_grid(float(img_right.WESTERNMOST_LONGITUDE),
self.lonM,
self.latm,
self.latM)
X_new = np.hstack((X_left, X_right))
Y_new = np.hstack((Y_left, Y_right))
Z_new = np.hstack((Z_left, Z_right))
return X_new, Y_new, Z_new | Longitude overlap (2 images). |
380,367 | def _make_return_edges(self):
for func_addr, func in self.functions.items():
if func.returning is False:
continue
if func.startpoint is None:
l.warning(, func_addr)
continue
startpoint = self.model.get_any_node(func.startpoint.addr)
if startpoint is None:
l.warning(, func_addr)
continue
endpoints = self._get_return_sources(func)
callers = self.model.get_predecessors(startpoint, jumpkind=)
return_targets = itertools.chain.from_iterable(
self.model.get_successors(caller, excluding_fakeret=False, jumpkind=) for caller in callers
)
return_targets = set(return_targets)
for ep in endpoints:
src = self.model.get_any_node(ep.addr)
for rt in return_targets:
if not src.instruction_addrs:
ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(src.instruction_addrs) > 1:
ins_addr = src.instruction_addrs[-2]
else:
l.error(, src)
ins_addr = None
else:
ins_addr = src.instruction_addrs[-1]
self._graph_add_edge(rt, src, , ins_addr, DEFAULT_STATEMENT) | For each returning function, create return edges in self.graph.
:return: None |
380,368 | def create_win_salt_restart_task():
*
cmd =
args = \
return __salt__[](name=,
user_name=,
force=True,
action_type=,
cmd=cmd,
arguments=args,
trigger_type=,
start_date=,
start_time=) | Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task() |
380,369 | def dict_table(cls,
d,
order=None,
header=None,
sort_keys=True,
show_none="",
max_width=40):
def _keys():
all_keys = []
for e in d:
keys = d[e].keys()
all_keys.extend(keys)
return list(set(all_keys))
def _get(item, key):
try:
tmp = str(d[item][key])
if tmp == "None":
tmp = show_none
except:
tmp =
return tmp
if d is None or d == {}:
return None
if order is None:
order = _keys()
if header is None and order is not None:
header = order
elif header is None:
header = _keys()
x = PrettyTable(header)
x.max_width = max_width
if sort_keys:
if type(sort_keys) is str:
sorted_list = sorted(d, key=lambda x: d[x][sort_keys])
elif type(sort_keys) == tuple:
sorted_list = sorted(d, key=lambda x: tuple(
[d[x][sort_key] for sort_key in sort_keys]))
else:
sorted_list = d
else:
sorted_list = d
for element in sorted_list:
values = []
for key in order:
values.append(_get(element, key))
x.add_row(values)
x.align = "l"
return x | prints a pretty table from an dict of dicts
:param d: A a dict with dicts of the same type.
Each key will be a column
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
:param header: The Header of each of the columns
:type header: A list of string
:param sort_keys: Key(s) of the dict to be used for sorting.
This specify the column(s) in the table for sorting.
:type sort_keys: string or a tuple of string (for sorting with multiple columns)
:param show_none: prints None if True for None values otherwise ""
:type show_none: bool
:param max_width: maximum width for a cell
:type max_width: int |
380,370 | def edge_betweenness_bin(G):
n = len(G)
BC = np.zeros((n,))
EBC = np.zeros((n, n))
for u in range(n):
D = np.zeros((n,))
D[u] = 1
NP = np.zeros((n,))
NP[u] = 1
P = np.zeros((n, n))
Q = np.zeros((n,), dtype=int)
q = n - 1
Gu = G.copy()
V = np.array([u])
while V.size:
Gu[:, V] = 0
for v in V:
Q[q] = v
q -= 1
W, = np.where(Gu[v, :])
for w in W:
if D[w]:
NP[w] += NP[v]
P[w, v] = 1
else:
D[w] = 1
NP[w] = NP[v]
P[w, v] = 1
V, = np.where(np.any(Gu[V, :], axis=0))
if np.any(np.logical_not(D)):
Q[:q], = np.where(np.logical_not(D))
DP = np.zeros((n,))
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC | Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network. |
380,371 | def check(self, src_tgt, actual_deps):
if self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(src_tgt, actual_deps)
buildroot = get_buildroot()
def shorten(path):
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address not in self._target_whitelist]
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
log_fn = (self.context.log.error if self._check_missing_direct_deps ==
else self.context.log.warn)
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = .join([.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn(
.format(tgt_pair[0].address.spec, tgt_pair[1].address.spec, evidence_str))
if self._check_missing_direct_deps == :
raise TaskError()
if self._check_unnecessary_deps:
log_fn = (self.context.log.error if self._check_unnecessary_deps ==
else self.context.log.warn)
had_unused = self._do_check_unnecessary_deps(src_tgt, actual_deps, log_fn)
if had_unused and self._check_unnecessary_deps == :
raise TaskError() | Check for missing deps.
See docstring for _compute_missing_deps for details. |
380,372 | def returner(ret):
_options = _get_options(ret)
channel = _options.get()
username = _options.get()
as_user = _options.get()
api_key = _options.get()
changes = _options.get()
only_show_failed = _options.get()
yaml_format = _options.get()
if not channel:
log.error()
return
if not username:
log.error()
return
if not as_user:
log.error()
return
if not api_key:
log.error()
return
if only_show_failed and changes:
log.error()
return
returns = ret.get()
if changes is True:
returns = {(key, value) for key, value in returns.items() if value[] is not True or value[]}
if only_show_failed is True:
returns = {(key, value) for key, value in returns.items() if value[] is not True}
if yaml_format is True:
returns = salt.utils.yaml.safe_dump(returns)
else:
returns = pprint.pformat(returns)
message = (
).format(
ret.get(),
ret.get(),
ret.get(),
ret.get(),
returns)
slack = _post_message(channel,
message,
username,
as_user,
api_key)
return slack | Send an slack message with the data |
380,373 | async def rows(self, offs, size=None, iden=None):
if iden is not None:
self.setOffset(iden, offs)
for i, (indx, byts) in enumerate(self._items.rows(offs)):
if size is not None and i >= size:
return
yield indx, byts | Yield a number of raw items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((indx, bytes)): Index and msgpacked bytes. |
380,374 | def sponsor_image_url(sponsor, name):
if sponsor.files.filter(name=name).exists():
return sponsor.files.filter(name=name).first().item.url
return | Returns the corresponding url from the sponsors images |
380,375 | def get_auth(self):
url = self.h_url + self.server + ":" + self.port
auth = requests.auth.HTTPDigestAuth(self.username,self.password)
auth_url = "/imcrs"
f_url = url + auth_url
try:
r = requests.get(f_url, auth=auth, headers=headers, verify=False)
return r.status_code
except requests.exceptions.RequestException as e:
return ("Error:\n" + str(e) + )
set_imc_creds()
if r.status_code != 200:
return ("Error:\n" + str(e) +"Error: \n You're credentials are invalid. Please try again\n\n")
set_imc_creds() | This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object
:return: |
380,376 | def run_job(self, section_id, session=None):
if not self.parser.has_section(section_id):
raise KeyError(.format(section_id))
session = session or Session()
for name, looter_cls in six.iteritems(self._CLS_MAP):
targets = self.get_targets(self._get(section_id, name))
quiet = self._getboolean(
section_id, "quiet", self.args.get("--quiet", False))
if targets:
logger.info("Launching {} job for section {}".format(name, section_id))
for target, directory in six.iteritems(targets):
try:
logger.info("Downloading {} to {}".format(target, directory))
looter = looter_cls(
target,
add_metadata=self._getboolean(section_id, , False),
get_videos=self._getboolean(section_id, , False),
videos_only=self._getboolean(section_id, , False),
jobs=self._getint(section_id, , 16),
template=self._get(section_id, , ),
dump_json=self._getboolean(section_id, , False),
dump_only=self._getboolean(section_id, , False),
extended_dump=self._getboolean(section_id, , False),
session=session)
if self.parser.has_option(section_id, ):
looter.logout()
username = self._get(section_id, )
password = self._get(section_id, ) or \
getpass.getpass(.format(username))
looter.login(username, password)
n = looter.download(
directory,
media_count=self._getint(section_id, ),
new_only=self._getboolean(section_id, , False),
pgpbar_cls=None if quiet else TqdmProgressBar,
dlpbar_cls=None if quiet else TqdmProgressBar)
logger.success("Downloaded %i medias !", n)
except Exception as exception:
logger.error(six.text_type(exception)) | Run a job as described in the section named ``section_id``.
Raises:
KeyError: when the section could not be found. |
380,377 | def parse_events(content, start=None, end=None, default_span=timedelta(days=7)):
if not start:
start = now()
if not end:
end = start + default_span
if not content:
raise ValueError()
calendar = Calendar.from_ical(content)
break;
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == "VEVENT":
e = create_event(component)
if e.recurring:
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend(e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))
elif e.end >= start and e.start <= end:
found.append(e)
return found | Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list |
380,378 | def send(self, *args, **kwargs):
conn = SMTP(*args, **kwargs)
send_result = conn.send(self)
return conn, send_result | Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns. |
380,379 | def displayText(self, value, blank=, joiner=):
if value is None:
return
labels = []
for key, my_value in sorted(self.items(), key=lambda x: x[1]):
if value & my_value:
labels.append(self._labels.get(my_value, text.pretty(key)))
return joiner.join(labels) or blank | Returns the display text for the value associated with
the inputted text. This will result in a comma separated
list of labels for the value, or the blank text provided if
no text is found.
:param value | <variant>
blank | <str>
joiner | <str>
:return <str> |
380,380 | def _kmp_construct_next(self, pattern):
next = [[0 for state in pattern] for input_token in self.ALPHABETA_KMP]
next[pattern[0]][0] = 1
restart_state = 0
for state in range(1, len(pattern)):
for input_token in self.ALPHABETA_KMP:
next[input_token][state] = next[input_token][restart_state]
next[pattern[state]][state] = state + 1
restart_state = next[pattern[state]][restart_state]
return next | the helper function for KMP-string-searching is to construct the DFA. pattern should be an integer array. return a 2D array representing the DFA for moving the pattern. |
380,381 | def open_addnew_win(self, *args, **kwargs):
if self.reftrackadderwin:
self.reftrackadderwin.close()
self.reftrackadderwin = ReftrackAdderWin(self.refobjinter, self.root, parent=self)
self.reftrackadderwin.destroyed.connect(self.addnewwin_destroyed)
self.reftrackadderwin.show() | Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError |
380,382 | def delete_nsg(access_token, subscription_id, resource_group, nsg_name):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, resource_group,
, nsg_name,
, NETWORK_API])
return do_delete(endpoint, access_token) | Delete network security group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nsg_name (str): Name of the NSG.
Returns:
HTTP response. |
380,383 | def load_datafile(self, name, search_path=None, **kwargs):
if not search_path:
search_path = self.define_dir
self.debug_msg( % (name, str(search_path)))
return codec.load_datafile(name, search_path, **kwargs) | find datafile and load them from codec |
380,384 | def stopContext(self, context):
if ((self.clear_context[] and inspect.ismodule(context)) or
(self.clear_context[] and inspect.isclass(context))):
self.connection.drop_database(self.database_name) | Clear the database if so configured for this |
380,385 | def copy_ssh_keys_to_host(self, host, password=None, no_add_host=False, known_hosts=DEFAULT_KNOWN_HOSTS):
client = None
try:
client = paramiko.SSHClient()
if not no_add_host:
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
if os.path.isfile(known_hosts):
client.load_host_keys(filename=known_hosts)
client.connect(host.hostname, port=host.port, username=host.user, password=password,
key_filename=self.priv_key)
cmd = (r{0}
.format(self.pub_key_content))
logger.debug(, host.hostname, cmd)
client.exec_command(cmd.encode())
finally:
if client:
client.close() | Copy the SSH keys to the given host.
:param host: the `Host` object to copy the SSH keys to.
:param password: the SSH password for the given host.
:param no_add_host: if the host is not in the known_hosts file, write an error instead of adding it to the
known_hosts.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:raise paramiko.ssh_exception.AuthenticationException: if SSH authentication error.
:raise paramiko.ssh_exception.SSHException: generic SSH error.
:raise socket.error: if error at the socket level. |
380,386 | def is_admin(self):
return self.role == self.roles.administrator.value and self.state == State.approved | Is the user a system administrator |
380,387 | def add_sma(self,periods=20,column=None,name=,
str=None,**kwargs):
if not column:
column=self._d[]
study={:,
:name,
:{:periods,:column,
:str},
:utils.merge_dict({:False},kwargs)}
self._add_study(study) | Add Simple Moving Average (SMA) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot() |
380,388 | def dmrs_tikz_dependency(xs, **kwargs):
def link_label(link):
return .format(link.rargname or , link.post)
def label_edge(link):
if link.post == H_POST and link.rargname == RSTR_ROLE:
return
elif link.post == EQ_POST:
return
else:
return
if isinstance(xs, Xmrs):
xs = [xs]
lines = .split("\n")
for ix, x in enumerate(xs):
lines.append("%%%\n%%% {}\n%%%".format(ix+1))
lines.append("\\begin{dependency}[dmrs]")
ns = nodes(x)
lines.append(" \\begin{deptext}[column sep=10pt]")
for i, n in enumerate(ns):
sep = "\\&" if (i < len(ns) - 1) else "\\\\"
pred = _latex_escape(n.pred.short_form())
pred = "\\named{}" if pred == else pred
if n.carg is not None:
print(n.carg.strip())
pred += "\\smaller ({})".format(n.carg.strip())
lines.append(" \\spred{{{}}} {} % node {}".format(
pred, sep, i+1))
lines.append(" \\end{deptext}")
nodeidx = {n.nodeid: i+1 for i, n in enumerate(ns)}
for link in links(x):
if link.start == 0:
lines.append(
.format(
nodeidx[link.end],
)
)
else:
lines.append(.format(
label_edge(link),
nodeidx[link.start],
nodeidx[link.end],
_latex_escape(link_label(link))
))
lines.append()
lines.append()
lines.append()
return .join(lines) | Return a LaTeX document with each Xmrs in *xs* rendered as DMRSs.
DMRSs use the `tikz-dependency` package for visualization. |
380,389 | def update_reflexrules_workflow_state(portal):
wf_tool = getToolByName(portal, )
logger.info("Updating Reflex Rulesinactive_stateportal_catalogReflexRule s were updated.") | Updates Reflex Rules' inactive_state, otherwise they don't have it by
default.
:param portal: Portal object
:return: None |
380,390 | def callback(self, request, **kwargs):
access_token = Pocket.get_access_token(consumer_key=self.consumer_key, code=request.session[])
kwargs = {: access_token}
return super(ServicePocket, self).callback(request, **kwargs) | Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template |
380,391 | def name(self):
if self.cadence == :
return self.__class__.__name__
else:
return % self.__class__.__name__ | Returns the name of the current :py:class:`Detrender` subclass. |
380,392 | def getStates(self):
cust_attr = (self.raw_data.get("rtc_cm:state")
.get("@rdf:resource")
.split("/")[-2])
return self.rtc_obj._get_paged_resources("State",
projectarea_id=self.contextId,
customized_attr=cust_attr,
page_size="50") | Get all :class:`rtcclient.models.State` objects of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.State` objects
:rtype: list |
380,393 | def parents(self, resources):
if self.docname == :
return []
parents = []
parent = resources.get(self.parent)
while parent is not None:
parents.append(parent)
parent = resources.get(parent.parent)
return parents | Split the path in name and get parents |
380,394 | def send_workflow(self):
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
select_role = self.input[][]
if wfi.current_actor == self.current.role:
task_invitation.role = RoleModel.objects.get(select_role)
wfi.current_actor = RoleModel.objects.get(select_role)
wfi.save()
task_invitation.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"The workflow was assigned to someone else with success.")
else:
title = _(u"Unsuccessful")
msg = _(u"This workflow does not belong to you, you cannot assign it to someone else.")
self.current.msg_box(title=title, msg=msg) | With the workflow instance and the task invitation is assigned a role. |
380,395 | def uriref_matches_iriref(v1: URIRef, v2: Union[str, ShExJ.IRIREF]) -> bool:
return str(v1) == str(v2) | Compare :py:class:`rdflib.URIRef` value with :py:class:`ShExJ.IRIREF` value |
380,396 | async def load_cache(self, archive: bool = False) -> int:
LOGGER.debug(, archive)
rv = int(time())
box_ids = json.loads(await self.get_box_ids_json())
for s_id in box_ids[]:
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in box_ids[]:
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in box_ids[]:
await self._get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)
except ClosedPool:
LOGGER.warning(
,
self.wallet.name,
self.pool.name,
rr_id,
rv)
if archive:
Caches.archive(self.dir_cache)
LOGGER.debug(, rv)
return rv | Load caches and archive enough to go offline and be able to generate proof
on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:return: cache load event timestamp (epoch seconds) |
380,397 | def _check_fields(self, x, y):
if x is None:
if self.x is None:
self.err(
self._check_fields,
"X field is not set: please specify a parameter")
return
x = self.x
if y is None:
if self.y is None:
self.err(
self._check_fields,
"Y field is not set: please specify a parameter")
return
y = self.y
return x, y | Check x and y fields parameters and initialize |
380,398 | def validate(self, value, validator):
try:
validator.validate(value)
except Exception as e:
logging.debug(e, exc_info=e)
if isinstance(e, DoctorError):
raise
else:
validation_errors = sorted(
validator.iter_errors(value), key=lambda e: e.path)
errors = {}
for error in validation_errors:
try:
key = error.path[0]
except IndexError:
key =
errors[key] = error.args[0]
raise SchemaValidationError(e.args[0], errors=errors)
return value | Validates and returns the value.
If the value does not validate against the schema, SchemaValidationError
will be raised.
:param value: A value to validate (usually a dict).
:param validator: An instance of a jsonschema validator class, as
created by Schema.get_validator().
:returns: the passed value.
:raises SchemaValidationError:
:raises Exception: |
380,399 | def _get_goroot(self, goids_all, namespace):
root_goid = self.consts.NAMESPACE2GO[namespace]
if root_goid in goids_all:
return root_goid
root_goids = set()
for goid in goids_all:
goterm = self.gosubdag.go2obj[goid]
if goterm.depth == 0:
root_goids.add(goterm.id)
if len(root_goids) == 1:
return next(iter(root_goids))
raise RuntimeError("UNEXPECTED NUMBER OF ROOTS: {R}".format(R=root_goids)) | Get the top GO for the set of goids_all. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.