Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
21,500 | def norm_proj_path(path, build_module):
if path == :
return
if path.startswith():
norm = normpath(path[2:])
if norm[0] in (, , ):
raise ValueError("Invalid path: `{}/ - use to start from "
"project root".format(path))
if build_module == :
build_module =
norm = normpath(join(build_module, path))
if norm.startswith():
raise ValueError(
"Invalid path `{}.') | Return a normalized path for the `path` observed in `build_module`.
The normalized path is "normalized" (in the `os.path.normpath` sense),
relative from the project root directory, and OS-native.
Supports making references from project root directory by prefixing the
path with "//".
:raises ValueError: If path references outside the project sandbox. |
21,501 | def _sb_r2(self, term, r1_prefixes=None):
r1_start = self._sb_r1(term, r1_prefixes)
return r1_start + self._sb_r1(term[r1_start:]) | Return the R2 region, as defined in the Porter2 specification.
Parameters
----------
term : str
The term to examine
r1_prefixes : set
Prefixes to consider
Returns
-------
int
Length of the R1 region |
21,502 | def syslog(logger_to_update=logger, facility=SysLogHandler.LOG_USER, disableStderrLogger=True):
__remove_internal_loggers(logger_to_update, disableStderrLogger)
syslog_handler = SysLogHandler(facility=facility)
setattr(syslog_handler, LOGZERO_INTERNAL_LOGGER_ATTR, True)
logger_to_update.addHandler(syslog_handler)
return syslog_handler | Setup logging to syslog and disable other internal loggers
:param logger_to_update: the logger to enable syslog logging for
:param facility: syslog facility to log to
:param disableStderrLogger: should the default stderr logger be disabled? defaults to True
:return the new SysLogHandler, which can be modified externally (e.g. for custom log level) |
21,503 | def writeXMLFile(filename, content):
xmlfile = open(filename, )
content = etree.tostring(content, pretty_print=True)
xmlfile.write(content)
xmlfile.close() | Used only for debugging to write out intermediate files |
21,504 | def _instruction_to_superop(cls, instruction):
if isinstance(instruction, QuantumCircuit):
instruction = instruction.to_instruction()
op = SuperOp(np.eye(4 ** instruction.num_qubits))
op._append_instruction(instruction)
return op | Convert a QuantumCircuit or Instruction to a SuperOp. |
21,505 | def on_message(self, message_protocol_entity):
logger.info("Message id %s received" % message_protocol_entity.getId())
self.toLower(message_protocol_entity.ack()) | Callback function when receiving message from whatsapp server |
21,506 | def render_to_mail(subject, template, context, recipient, fail_silently = False, headers = None):
if in settings.INSTALLED_APPS:
render_to_mail_task.delay(
subject,
template,
context,
recipient,
fail_silently,
headers
)
else:
render_to_mail_task(
subject,
template,
context,
recipient,
fail_silently,
headers
) | :param subject: The subject line of the email
:param template: The name of the template to render the HTML email with
:param context: The context data to pass to the template
:param recipient: The email address or ``django.contrib.auth.User`` object to send the email to
:param fail_silently: Set to ``True`` to avoid errors being raised by the sender
:param headers: An optional dictionary of email headers
This function acts as an alias to one of two functions, depending on your setup. If you use
`Celery <http://www.celeryproject.org/>`_,
this function will perform the compositing and sending of the email asynchronously. Otherwise
the process will take place on the same thread. |
21,507 | def convert_linear_problem_to_dual(model, sloppy=False, infinity=None, maintain_standard_form=True, prefix="dual_", dual_model=None):
if dual_model is None:
dual_model = model.interface.Model()
maximization = model.objective.direction == "max"
if infinity is not None:
neg_infinity = -infinity
else:
neg_infinity = None
if maximization:
sign = 1
else:
sign = -1
coefficients = {}
dual_objective = {}
for constraint in model.constraints:
if constraint.expression == 0:
continue
if not (sloppy or constraint.is_Linear):
raise ValueError("Non-linear problems are not supported: " + str(constraint))
if constraint.lb is None and constraint.ub is None:
continue
if not maintain_standard_form and constraint.lb == constraint.ub:
const_var = model.interface.Variable(prefix + constraint.name + "_constraint", lb=neg_infinity, ub=infinity)
dual_model.add(const_var)
if constraint.lb != 0:
dual_objective[const_var] = sign * constraint.lb
for variable, coef in constraint.expression.as_coefficients_dict().items():
if variable == 1:
continue
coefficients.setdefault(variable.name, {})[const_var] = sign * coef
else:
if constraint.lb is not None:
lb_var = model.interface.Variable(prefix + constraint.name + "_constraint_lb", lb=0, ub=infinity)
dual_model.add(lb_var)
if constraint.lb != 0:
dual_objective[lb_var] = -sign * constraint.lb
if constraint.ub is not None:
ub_var = model.interface.Variable(prefix + constraint.name + "_constraint_ub", lb=0, ub=infinity)
dual_model.add(ub_var)
if constraint.ub != 0:
dual_objective[ub_var] = sign * constraint.ub
assert constraint.expression.is_Add or constraint.expression.is_Mul, \
"Invalid expression type: " + str(type(constraint.expression))
if constraint.expression.is_Add:
coefficients_dict = constraint.expression.as_coefficients_dict()
else:
coefficients_dict = {constraint.expression.args[1]: constraint.expression.args[0]}
for variable, coef in coefficients_dict.items():
if variable == 1:
continue
if constraint.lb is not None:
coefficients.setdefault(variable.name, {})[lb_var] = -sign * coef
if constraint.ub is not None:
coefficients.setdefault(variable.name, {})[ub_var] = sign * coef
for variable in model.variables:
if not (sloppy or variable.type == "continuous"):
raise ValueError("Integer variables are not supported: " + str(variable))
if not sloppy and (variable.lb is None or variable.lb < 0):
raise ValueError("Problem is not in standard form (" + variable.name + " can be negative)")
if variable.lb > 0:
bound_var = model.interface.Variable(prefix + variable.name + "_lb", lb=0, ub=infinity)
dual_model.add(bound_var)
coefficients.setdefault(variable.name, {})[bound_var] = -sign * 1
dual_objective[bound_var] = -sign * variable.lb
if variable.ub is not None:
bound_var = model.interface.Variable(prefix + variable.name + "_ub", lb=0, ub=infinity)
dual_model.add(bound_var)
coefficients.setdefault(variable.name, {})[bound_var] = sign * 1
if variable.ub != 0:
dual_objective[bound_var] = sign * variable.ub
primal_objective_dict = model.objective.expression.as_coefficients_dict()
for variable in model.variables:
expr = optlang.symbolics.add([(coef * dual_var) for dual_var, coef in coefficients[variable.name].items()])
obj_coef = primal_objective_dict[variable]
if maximization:
const = model.interface.Constraint(expr, lb=obj_coef, name=prefix + variable.name)
else:
const = model.interface.Constraint(expr, ub=obj_coef, name=prefix + variable.name)
dual_model.add(const)
expr = optlang.symbolics.add([(coef * dual_var) for dual_var, coef in dual_objective.items() if coef != 0])
if maximization:
objective = model.interface.Objective(expr, direction="min")
else:
objective = model.interface.Objective(expr, direction="max")
dual_model.objective = objective
return dual_model | A mathematical optimization problem can be viewed as a primal and a dual problem. If the primal problem is
a minimization problem the dual is a maximization problem, and the optimal value of the dual is a lower bound of
the optimal value of the primal.
For linear problems, strong duality holds, which means that the optimal values of the primal and dual are equal
(duality gap = 0).
This functions takes an optlang Model representing a primal linear problem and returns a new Model representing
the dual optimization problem. The provided model must have a linear objective, linear constraints and only
continuous variables. Furthermore, the problem must be in standard form, i.e. all variables should be non-negative.
Both minimization and maximization problems are allowed. The objective direction of the dual will always be
opposite of the primal.
Attributes:
----------
model: optlang.interface.Model
The primal problem to be dualized
sloppy: Boolean (default False)
If True, linearity, variable types and standard form will not be checked. Only use if you know the primal is
valid
infinity: Numeric or None
If not None this value will be used as bounds instead of unbounded variables.
maintain_standard_form: Boolean (default True)
If False the returned dual problem will not be in standard form, but will have fewer variables and/or constraints
prefix: str
The string that will be prepended to all variable and constraint names in the returned dual problem.
dual_model: optlang.interface.Model or None (default)
If not None, the dual variables and constraints will be added to this model. Note the objective will also be
set to the dual objective. If None a new model will be created.
Returns:
----------
dual_problem: optlang.interface.Model (same solver as the primal) |
21,508 | def accept_connection(self):
assert self.pending, "Connection is not pending."
self.server_protocol = self.server.server_factory.buildProtocol(None)
self._accept_d.callback(
FakeServerProtocolWrapper(self, self.server_protocol))
return self.await_connected() | Accept a pending connection. |
21,509 | def get_default_queryset(self):
return self.__class__.objects.exclude(pk=self.pk) \
.filter(default=True) | looks for default groups excluding the current one
overridable by openwisp-radius and other 3rd party apps |
21,510 | def _expander(namepath):
if "~" in namepath:
namepath = os.path.expanduser(namepath)
else:
namepath = os.path.abspath(namepath)
return namepath | expand ./ ~ and ../ designators in location names |
21,511 | def get_timestamp(timezone_name, year, month, day, hour=0, minute=0):
tz = pytz.timezone(timezone_name)
tz_datetime = tz.localize(datetime(year, month, day, hour, minute))
timestamp = calendar.timegm(tz_datetime.utctimetuple())
return timestamp | Epoch timestamp from timezone, year, month, day, hour and minute. |
21,512 | def addClassKey(self, klass, key, obj):
d = self._getClass(klass)
d[key] = obj | Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore. |
21,513 | def path_safe_spec(self):
return (
.format(safe_spec_path=self._spec_path.replace(os.sep, ),
target_name=self._target_name.replace(os.sep, ))) | :API: public |
21,514 | def http_method_formatter(view, context, model, name):
method_map = {
: ,
: ,
: ,
: ,
}
return Markup(
.format(
method_map.get(model[name], ), model[name]
)
) | Wrap HTTP method value in a bs3 label. |
21,515 | def defunct_hash_message(
primitive=None,
*,
hexstr=None,
text=None,
signature_version=b,
version_specific_data=None):
\\x19Ethereum Signed Message:\\nabcde5000x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f7500x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750utf-80x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f7500x49e299a553460x49e299a553460x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f7500x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750
message_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
recovery_hasher = compose(
HexBytes,
keccak,
signature_wrapper(
signature_version=signature_version,
version_specific_data=version_specific_data,
)
)
return recovery_hasher(message_bytes) | Convert the provided message into a message hash, to be signed.
This provides the same prefix and hashing approach as
:meth:`w3.eth.sign() <web3.eth.Eth.sign>`.
Currently you can only specify the ``signature_version`` as following.
* **Version** ``0x45`` (version ``E``): ``b'\\x19Ethereum Signed Message:\\n'``
concatenated with the number of bytes in the message.
.. note:: This is the defualt version followed, if the signature_version is not specified.
* **Version** ``0x00`` (version ``0``): Sign data with intended validator (EIP 191).
Here the version_specific_data would be a hexstr which is the 20 bytes account address
of the intended validator.
For version ``0x45`` (version ``E``), Awkwardly, the number of bytes in the message is
encoded in decimal ascii. So if the message is 'abcde', then the length is encoded as the ascii
character '5'. This is one of the reasons that this message format is not preferred.
There is ambiguity when the message '00' is encoded, for example.
Only use this method with version ``E`` if you must have compatibility with
:meth:`w3.eth.sign() <web3.eth.Eth.sign>`.
Supply exactly one of the three arguments:
bytes, a hex string, or a unicode string.
:param primitive: the binary message to be signed
:type primitive: bytes or int
:param str hexstr: the message encoded as hex
:param str text: the message as a series of unicode characters (a normal Py3 str)
:param bytes signature_version: a byte indicating which kind of prefix is to be added (EIP 191)
:param version_specific_data: the data which is related to the prefix (EIP 191)
:returns: The hash of the message, after adding the prefix
:rtype: ~hexbytes.main.HexBytes
.. code-block:: python
>>> from eth_account.messages import defunct_hash_message
>>> msg = "I♥SF"
>>> defunct_hash_message(text=msg)
HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750')
# these four also produce the same hash:
>>> defunct_hash_message(w3.toBytes(text=msg))
HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750')
>>> defunct_hash_message(bytes(msg, encoding='utf-8'))
HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750')
>>> Web3.toHex(text=msg)
'0x49e299a55346'
>>> defunct_hash_message(hexstr='0x49e299a55346')
HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750')
>>> defunct_hash_message(0x49e299a55346)
HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750') |
21,516 | def remove_part_files(num_parts=None):
filenames = get_part_filenames(num_parts)
for filename in filenames:
remove_part_images(filename)
remove_file(filename) | Remove PART(#).html files and image directories from disk. |
21,517 | def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if in stream_response:
return stream_response
stream_status = stream_response[]["StreamDescription"]["StreamStatus"]
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response | Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1 |
21,518 | def chimera_blocks(M=16, N=16, L=4):
for x in xrange(M):
for y in xrange(N):
for u in (0, 1):
yield tuple((x, y, u, k) for k in xrange(L)) | Generator for blocks for a chimera block quotient |
21,519 | def get_fields(self):
prefix = getattr(self.Meta, , )
fields = super(ModelSubSerializer, self).get_fields()
fields_without_prefix = OrderedDict()
for field_name, field in fields.items():
if field_name.startswith(prefix):
if not field.source:
field.source = field_name
field_name = field_name[len(prefix):]
fields_without_prefix[field_name] = field
return fields_without_prefix | Convert default field names for this sub-serializer into versions where
the field name has the prefix removed, but each field object knows the
real model field name by setting the field's `source` attribute. |
21,520 | def setExpressCheckout(self, params):
if self._is_recurring(params):
params = self._recurring_setExpressCheckout_adapter(params)
defaults = {"method": "SetExpressCheckout", "noshipping": 1}
required = ["returnurl", "cancelurl", "paymentrequest_0_amt"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj | Initiates an Express Checkout transaction.
Optionally, the SetExpressCheckout API operation can set up billing agreements for
reference transactions and recurring payments.
Returns a NVP instance - check for token and payerid to continue! |
21,521 | def sum(self):
if self._can_use_new_school():
self._prep_spark_sql_groupby()
import pyspark.sql.functions as func
return self._use_aggregation(func.sum)
self._prep_pandas_groupby()
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).sum()
def merge_value(x, y):
return pd.concat([x, create_combiner(y)])
def merge_combiner(x, y):
return x + y
rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx) | Compute the sum for each group. |
21,522 | def open_filechooser(title, parent=None, patterns=None,
folder=None, filter=None, multiple=False,
_before_run=None, action=None):
assert not (patterns and filter)
if multiple:
if action is not None and action != gtk.FILE_CHOOSER_ACTION_OPEN:
raise ValueError(
)
action = gtk.FILE_CHOOSER_ACTION_OPEN
else:
assert action is not None
filechooser = gtk.FileChooserDialog(title,
parent,
action,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
if multiple:
filechooser.set_select_multiple(True)
if patterns or filter:
if not filter:
filter = gtk.FileFilter()
for pattern in patterns:
filter.add_pattern(pattern)
filechooser.set_filter(filter)
filechooser.set_default_response(gtk.RESPONSE_OK)
if folder:
filechooser.set_current_folder(folder)
try:
if _before_run is not None:
_before_run(filechooser)
response = filechooser.run()
if response not in (gtk.RESPONSE_OK, gtk.RESPONSE_NONE):
return
if multiple:
return filechooser.get_filenames()
else:
return filechooser.get_filename()
finally:
_destroy(filechooser) | An open dialog.
:param parent: window or None
:param patterns: file match patterns
:param folder: initial folder
:param filter: file filter
Use of filter and patterns at the same time is invalid. |
21,523 | def update_priorities(self, idxes, priorities):
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority) | Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`. |
21,524 | def brain_post(connection, requirements=None):
assert isinstance(connection, DefaultConnection)
remote_dbs = validate_get_dbs(connection)
assert validate_brain_requirements(connection, remote_dbs, requirements)
assert validate_write_access(connection)
return connection | Power On Self Test for the brain.
Checks that the brain is appropriately seeded and ready for use.
Raises AssertionError's if the brain is not ready.
:param connection: <rethinkdb.net.DefaultConnection>
:param requirements:<dict> keys=Required Databases, key-values=Required Tables in each database
:return: <rethinkdb.net.DefaultConnection> if verified |
21,525 | def _float(self, string):
string = self._denoise(string)
exp_match = re.match(r, string)
if exp_match:
exp = int(exp_match.groups()[0])
fac = 10 ** -exp
string = string.replace(.format(exp), )
else:
fac = 1
return fac * float(string) | Convert string to float
Take care of numbers in exponential format |
21,526 | def PSUBB(cpu, dest, src):
result = []
value_a = dest.read()
value_b = src.read()
for i in reversed(range(0, dest.size, 8)):
a = Operators.EXTRACT(value_a, i, 8)
b = Operators.EXTRACT(value_b, i, 8)
result.append((a - b) & 0xff)
dest.write(Operators.CONCAT(8 * len(result), *result)) | Packed subtract.
Performs a SIMD subtract of the packed integers of the source operand (second operand) from the packed
integers of the destination operand (first operand), and stores the packed integer results in the
destination operand. The source operand can be an MMX(TM) technology register or a 64-bit memory location,
or it can be an XMM register or a 128-bit memory location. The destination operand can be an MMX or an XMM
register.
The PSUBB instruction subtracts packed byte integers. When an individual result is too large or too small
to be represented in a byte, the result is wrapped around and the low 8 bits are written to the
destination element.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand. |
21,527 | def needle_statistics(infile):
alignments = list(AlignIO.parse(infile, "emboss"))
alignment_properties = defaultdict(dict)
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "
line = f.readline()
if not line:
raise StopIteration
while line[0] == "
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == :
a_id = parts[1].strip()
if key == :
b_id = parts[1].strip()
if key == :
ident_parse = parts[1].strip().replace(,).replace(,).replace(,).split()
ident_num = int(ident_parse[0].split()[0])
ident_percent = float(ident_parse[1])
alignment_properties[a_id + + b_id][] = ident_num
alignment_properties[a_id + + b_id][] = ident_percent
if key == :
sim_parse = parts[1].strip().replace(,).replace(,).replace(,).split()
sim_num = int(sim_parse[0].split()[0])
sim_percent = float(sim_parse[1])
alignment_properties[a_id + + b_id][] = sim_num
alignment_properties[a_id + + b_id][] = sim_percent
if key == :
gap_parse = parts[1].strip().replace(,).replace(,).replace(,).split()
gap_num = int(gap_parse[0].split()[0])
gap_percent = float(gap_parse[1])
alignment_properties[a_id + + b_id][] = gap_num
alignment_properties[a_id + + b_id][] = gap_percent
if key == :
score = float(parts[1].strip())
alignment_properties[a_id + + b_id][] = score
line = f.readline()
return alignment_properties | Reads in a needle alignment file and spits out statistics of the alignment.
Args:
infile (str): Alignment file name
Returns:
dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc. |
21,528 | def take_break(minutes: hug.types.number=5):
print("")
print("
try:
for remaining in range(60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds to change your mind. Won't you prefer programming? Or a book?".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("")
print("")
print(":D :D :D\nGood on you! <3")
return
lose()
print("")
print("
try:
for remaining in range(minutes * 60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds remaining without concentration.".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
sys.stdout.write("\rEnough distraction! \n")
print("
print("")
improve() | Enables temporarily breaking concentration |
21,529 | def upvote(self):
data = self.get_selected_item()
if not in data:
self.term.flash()
elif getattr(data[], ):
self.term.show_notification("Voting disabled for archived post", style=)
elif data[]:
with self.term.loader():
data[].clear_vote()
if not self.term.loader.exception:
data[] = None
else:
with self.term.loader():
data[].upvote()
if not self.term.loader.exception:
data[] = True | Upvote the currently selected item. |
21,530 | def load_elements(self, filename):
input_data = load_b26_file(filename)
if isinstance(input_data, dict) and self.elements_type in input_data:
return input_data[self.elements_type]
else:
return {} | loads the elements from file filename |
21,531 | def disconnect(self):
if self.r_session:
self.session_logout()
self.r_session = None
self.clear() | Ends a client authentication session, performs a logout and a clean up. |
21,532 | def my_request_classifier(environ):
request_method = REQUEST_METHOD(environ)
if request_method in _DAV_METHODS:
return "dav"
useragent = USER_AGENT(environ)
if useragent:
for agent in _DAV_USERAGENTS:
if useragent.find(agent) != -1:
return "dav"
if request_method == "POST":
if CONTENT_TYPE(environ) == "text/xml":
return "xmlpost"
elif CONTENT_TYPE(environ) == "application/soap+xml":
return "soap"
return "browser" | Returns one of the classifiers 'dav', 'xmlpost', or 'browser',
depending on the imperative logic below |
21,533 | def _get_sockets(bots):
sockets = {}
for bot in bots:
bot.connect()
sockets[bot.client.Connection._sock] = bot
return sockets | Connects and gathers sockets for all chatrooms |
21,534 | def convert_and_combine_2_to_3(dtype, map_dict, input_dir=".", output_dir=".", data_model=None):
er_file = os.path.join(input_dir, .format(dtype))
er_data, er_dtype = magic_read(er_file)
if len(er_data):
er_df = pd.DataFrame(er_data)
if dtype == :
pass
else:
er_df.index = er_df[.format(dtype[:-1])]
else:
er_df = pd.DataFrame()
if dtype == :
full_df = er_df
else:
pmag_file = os.path.join(input_dir, .format(dtype))
pmag_data, pmag_dtype = magic_read(pmag_file)
if len(pmag_data):
pmag_df = pd.DataFrame(pmag_data)
pmag_df.index = pmag_df[.format(dtype[:-1])]
else:
pmag_df = pd.DataFrame()
full_df = pd.concat([er_df, pmag_df], sort=True)
full_df.sort_index(inplace=True)
full_df.rename(columns=map_dict, inplace=True)
new_df = cb.MagicDataFrame(dtype=dtype, df=full_df, dmodel=data_model)
if len(new_df.df):
new_df.write_magic_file(dir_path=output_dir)
return dtype + ".txt"
else:
print("-I- No {} data found.".format(dtype))
return None | Read in er_*.txt file and pmag_*.txt file in working directory.
Combine the data, then translate headers from 2.5 --> 3.0.
Last, write out the data in 3.0.
Parameters
----------
dtype : string for input type (specimens, samples, sites, etc.)
map_dict : dictionary with format {header2_format: header3_format, ...} (from mapping.map_magic module)
input_dir : input directory, default "."
output_dir : output directory, default "."
data_model : data_model3.DataModel object, default None
Returns
---------
output_file_name with 3.0 format data (or None if translation failed) |
21,535 | def _set_other(self):
if self.dst.style[] == :
if self.docs[][] is not None:
self.docs[][] = self.dst.numpydoc.get_raw_not_managed(self.docs[][])
elif not in self.docs[] or self.docs[][] is None:
self.docs[][] = | Sets other specific sections |
21,536 | def create_all_parent_directories(ase, dirs_created, timeout=None):
dirs = pathlib.Path(ase.name).parts
if len(dirs) <= 1:
return
dirs = dirs[:-1]
dk = ase.client.account_name + + ase.container
for i in range(0, len(dirs)):
dir = str(pathlib.Path(*(dirs[0:i + 1])))
if dk not in dirs_created or dir not in dirs_created[dk]:
ase.client.create_directory(
share_name=ase.container,
directory_name=dir,
fail_on_exist=False,
timeout=timeout)
if dk not in dirs_created:
dirs_created[dk] = set()
dirs_created[dk].add(dir) | Create all parent directories for a file
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param dict dirs_created: directories already created map
:param int timeout: timeout |
21,537 | async def load_all_aldb(self, clear=True):
for addr in self.plm.devices:
await self.load_device_aldb(addr, clear) | Read all devices ALDB. |
21,538 | def add_factors(self, *factors):
for factor in factors:
if set(factor.variables) - set(factor.variables).intersection(
set(self.nodes())):
raise ValueError("Factors defined on variable not in the model",
factor)
self.factors.append(factor) | Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles'),
... ('Charles', 'Debbie'), ('Debbie', 'Alice')])
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor) |
21,539 | def population_variant_regions(items, merged=False):
def _get_variant_regions(data):
out = dd.get_variant_regions(data) or dd.get_sample_callable(data)
if merged and dd.get_variant_regions(data):
merged_out = dd.get_variant_regions_merged(data)
if merged_out:
out = merged_out
else:
out = merge_overlaps(out, data)
return out
import pybedtools
if len(items) == 1:
return _get_variant_regions(items[0])
else:
paired = vcfutils.get_paired(items)
if paired:
return _get_variant_regions(paired.tumor_data)
else:
vrs = []
for data in items:
vr_bed = _get_variant_regions(data)
if vr_bed:
vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed))
vrs.sort(reverse=True)
if vrs:
return vrs[0][1] | Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases. |
21,540 | def _insertOrGetUniqueJobNoRetries(
self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params,
minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):
assert len(client) <= self.CLIENT_MAX_LEN, "client too long:" + repr(client)
assert cmdLine, "Unexpected empty or None command-line: " + repr(cmdLine)
assert len(jobHash) == self.HASH_MAX_LEN, "wrong hash len=%d" % len(jobHash)
if alreadyRunning:
% (self.jobsTableName,)
conn.cursor.execute(query, (self._connectionID, jobID))
return jobID | Attempt to insert a row with the given parameters into the jobs table.
Return jobID of the inserted row, or of an existing row with matching
client/jobHash key.
The combination of client and jobHash are expected to be unique (enforced
by a unique index on the two columns).
NOTE: It's possibe that this or another process (on this or another machine)
already inserted a row with matching client/jobHash key (e.g.,
StreamMgr). This may also happen undetected by this function due to a
partially-successful insert operation (e.g., row inserted, but then
connection was lost while reading response) followed by retries either of
this function or in SteadyDB module.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
jobHash: unique hash of this job. The caller must insure that this,
together with client, uniquely identifies this job request
for the purposes of detecting duplicates.
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop- it gives it a job
record to work with.
retval: jobID of the inserted jobs row, or of an existing jobs row
with matching client/jobHash key |
21,541 | def create(alphabet_size: int):
def instantiate(**_):
return OneHotEncodingInput(alphabet_size)
return ModelFactory.generic(instantiate) | Vel factory function |
21,542 | def ported_string(raw_data, encoding=, errors=):
if not raw_data:
return six.text_type()
if isinstance(raw_data, six.text_type):
return raw_data.strip()
if six.PY2:
try:
return six.text_type(raw_data, encoding, errors).strip()
except LookupError:
return six.text_type(raw_data, "utf-8", errors).strip()
if six.PY3:
try:
return six.text_type(raw_data, encoding).strip()
except (LookupError, UnicodeDecodeError):
return six.text_type(raw_data, "utf-8", errors).strip() | Give as input raw data and output a str in Python 3
and unicode in Python 2.
Args:
raw_data: Python 2 str, Python 3 bytes or str to porting
encoding: string giving the name of an encoding
errors: his specifies the treatment of characters
which are invalid in the input encoding
Returns:
str (Python 3) or unicode (Python 2) |
21,543 | def fetch_object(self, container, obj, include_meta=False,
chunk_size=None, size=None, extra_info=None):
return self._manager.fetch_object(container, obj,
include_meta=include_meta, chunk_size=chunk_size, size=size) | Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more. |
21,544 | def on_click(self, event):
button = event["button"]
if button in [1, 4, 5]:
self.scrolling = True
self._switch_selection()
elif button == 3:
self._apply() | Click events
- left click & scroll up/down: switch between rotations
- right click: apply selected rotation |
21,545 | def find_all_sift(im_source, im_search, min_match_count=4, maxcnt=0):
sift = _sift_instance()
flann = cv2.FlannBasedMatcher({: FLANN_INDEX_KDTREE, : 5}, dict(checks=50))
kp_sch, des_sch = sift.detectAndCompute(im_search, None)
if len(kp_sch) < min_match_count:
return None
kp_src, des_src = sift.detectAndCompute(im_source, None)
if len(kp_src) < min_match_count:
return None
h, w = im_search.shape[1:]
result = []
while True:
matches = flann.knnMatch(des_sch, des_src, k=2)
good = []
for m, n in matches:
if m.distance < 0.9 * n.distance:
good.append(m)
if len(good) < min_match_count:
break
sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
img_pts = np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
h, w = im_search.shape[:2]
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
pypts = []
for npt in dst.astype(int).tolist():
pypts.append(tuple(npt[0]))
lt, br = pypts[0], pypts[2]
middle_point = (lt[0] + br[0]) / 2, (lt[1] + br[1]) / 2
result.append(dict(
result=middle_point,
rectangle=pypts,
confidence=(matches_mask.count(1), len(good))
))
if maxcnt and len(result) >= maxcnt:
break
qindexes, tindexes = [], []
for m in good:
qindexes.append(m.queryIdx)
tindexes.append(m.trainIdx)
def filter_index(indexes, arr):
r = np.ndarray(0, np.float32)
for i, item in enumerate(arr):
if i not in qindexes:
r = np.append(r, item)
return r
kp_src = filter_index(tindexes, kp_src)
des_src = filter_index(tindexes, des_src)
return result | 使用sift算法进行多个相同元素的查找
Args:
im_source(string): 图像、素材
im_search(string): 需要查找的图片
threshold: 阈值,当相识度小于该阈值的时候,就忽略掉
maxcnt: 限制匹配的数量
Returns:
A tuple of found [(point, rectangle), ...]
A tuple of found [{"point": point, "rectangle": rectangle, "confidence": 0.76}, ...]
rectangle is a 4 points list |
21,546 | def _adapt_response(self, response):
if in response.headers[]:
body = response.json()
status = response.status_code
if body.get():
return self._simple_response_to_error_adapter(status, body)
raise UnknownHttpError(response) | Convert error responses to standardized ErrorDetails. |
21,547 | def enqueue(self, klass, *args):
queue = getattr(klass,, None)
if queue:
class_name = % (klass.__module__, klass.__name__)
self.enqueue_from_string(class_name, queue, *args)
else:
logger.warning("unable to enqueue job with class %s" % str(klass)) | Enqueue a job into a specific queue. Make sure the class you are
passing has **queue** attribute and a **perform** method on it. |
21,548 | def facts(self, name=None, value=None, **kwargs):
if name is not None and value is not None:
path = .format(name, value)
elif name is not None and value is None:
path = name
else:
path = None
facts = self._query(, path=path, **kwargs)
for fact in facts:
yield Fact(
node=fact[],
name=fact[],
value=fact[],
environment=fact[]
) | Query for facts limited by either name, value and/or query.
:param name: (Optional) Only return facts that match this name.
:type name: :obj:`string`
:param value: (Optional) Only return facts of `name` that\
match this value. Use of this parameter requires the `name`\
parameter be set.
:type value: :obj:`string`
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Facts.
:rtype: :class:`pypuppetdb.types.Fact` |
21,549 | def _get_plotL(self, Lplot=, proj=, ind=None, multi=False):
ind = self._check_indch(ind)
if ind.size>0:
Ds, us = self.D[:,ind], self.u[:,ind]
if ind.size==1:
Ds, us = Ds.reshape((3,1)), us.reshape((3,1))
kPIn, kPOut = self.kIn[ind], self.kOut[ind]
if self.config.Id.Type==:
kRMin = self._dgeom[][ind]
else:
kRMin = None
pts = _comp.LOS_CrossProj(self.config.Id.Type, Ds, us,
kPIn, kPOut, kRMin, proj=proj,
Lplot=Lplot, multi=multi)
else:
pts = None
return pts | Get the (R,Z) coordinates of the cross-section projections |
21,550 | def get_prediction_results(model_dir_or_id, data, headers, img_cols=None,
cloud=False, with_source=True, show_image=True):
if img_cols is None:
img_cols = []
if isinstance(data, pd.DataFrame):
data = list(data.T.to_dict().values())
elif isinstance(data[0], six.string_types):
data = list(csv.DictReader(data, fieldnames=headers))
images = _download_images(data, img_cols)
predict_data = _get_predicton_csv_lines(data, headers, images)
if cloud:
parts = model_dir_or_id.split()
if len(parts) != 2:
raise ValueError()
predict_results = ml.ModelVersions(parts[0]).predict(parts[1], predict_data)
else:
tf_logging_level = logging.getLogger("tensorflow").level
logging.getLogger("tensorflow").setLevel(logging.WARNING)
try:
predict_results = _tf_predict(model_dir_or_id, predict_data)
finally:
logging.getLogger("tensorflow").setLevel(tf_logging_level)
df_r = pd.DataFrame(predict_results)
if not with_source:
return df_r
display_data = data
if show_image:
display_data = _get_display_data_with_images(data, images)
df_s = pd.DataFrame(display_data)
df = pd.concat([df_r, df_s], axis=1)
df = df.loc[:, ~df.columns.duplicated()]
return df | Predict with a specified model.
It predicts with the model, join source data with prediction results, and formats
the results so they can be displayed nicely in Datalab.
Args:
model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True.
data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not
a list of csv lines, data will be converted to csv lines first, using the orders specified
by headers and then send to model. For images, it can be image gs urls or in-memory PIL
images. Images will be converted to base64 encoded strings before prediction.
headers: the column names of data. It specifies the order of the columns when
serializing to csv lines for prediction.
img_cols: The image url columns. If specified, the img_urls will be converted to
base64 encoded image bytes.
with_source: Whether return a joined prediction source and prediction results, or prediction
results only.
show_image: When displaying prediction source, whether to add a column of image bytes for
each image url column.
Returns:
A dataframe of joined prediction source and prediction results, or prediction results only. |
21,551 | def ensure_dtype(core, dtype, dtype_):
core = core.copy()
if dtype is None:
dtype = dtype_
if dtype_ == dtype:
return core, dtype
for key, val in {
int: chaospy.poly.typing.asint,
float: chaospy.poly.typing.asfloat,
np.float32: chaospy.poly.typing.asfloat,
np.float64: chaospy.poly.typing.asfloat,
}.items():
if dtype == key:
converter = val
break
else:
raise ValueError("dtype not recognised (%s)" % str(dtype))
for key, val in core.items():
core[key] = converter(val)
return core, dtype | Ensure dtype is correct. |
21,552 | def apply(self, doc, clear, **kwargs):
doc = self.session.merge(doc)
for i, mention_class in enumerate(self.mention_classes):
tc_to_insert = defaultdict(list)
self.child_context_set.clear()
for tc in self.matchers[i].apply(self.mention_spaces[i].apply(doc)):
rec = tc._load_id_or_insert(self.session)
if rec:
tc_to_insert[tc._get_table()].append(rec)
self.child_context_set.add(tc)
for table, records in tc_to_insert.items():
stmt = insert(table.__table__).values(records)
self.session.execute(stmt)
mention_args = {"document_id": doc.id}
for child_context in self.child_context_set:
for arg_name in mention_class.__argnames__:
mention_args[arg_name + "_id"] = child_context.id
if not clear:
q = select([mention_class.id])
for key, value in list(mention_args.items()):
q = q.where(getattr(mention_class, key) == value)
mention_id = self.session.execute(q).first()
if mention_id is not None:
continue
yield mention_class(**mention_args) | Extract mentions from the given Document.
:param doc: A document to process.
:param clear: Whether or not to clear the existing database entries. |
21,553 | def paint(self, p, *args):
if self.picturenotgened:
self.generatePicture(self.getBoundingParents()[0].rect())
self.picturenotgened = False
pg.ImageItem.paint(self, p, *args)
self.generatePicture(self.getBoundingParents()[0].rect()) | I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly. |
21,554 | def _EccZmaxRperiRap(self,*args,**kwargs):
if len(args) == 5:
R,vR,vT, z, vz= args
elif len(args) == 6:
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
Lz= R*vT
Phi= _evaluatePotentials(self._pot,R,z)
E= Phi+vR**2./2.+vT**2./2.+vz**2./2.
thisERL= -numpy.exp(self._ERLInterp(Lz))+self._ERLmax
thisERa= -numpy.exp(self._ERaInterp(Lz))+self._ERamax
if isinstance(R,numpy.ndarray):
indx= ((E-thisERa)/(thisERL-thisERa) > 1.)\
*(((E-thisERa)/(thisERL-thisERa)-1.) < 10.**-2.)
E[indx]= thisERL[indx]
indx= ((E-thisERa)/(thisERL-thisERa) < 0.)\
*((E-thisERa)/(thisERL-thisERa) > -10.**-2.)
E[indx]= thisERa[indx]
indx= (Lz < self._Lzmin)
indx+= (Lz > self._Lzmax)
indx+= ((E-thisERa)/(thisERL-thisERa) > 1.)
indx+= ((E-thisERa)/(thisERL-thisERa) < 0.)
indxc= True^indx
ecc= numpy.empty(R.shape)
zmax= numpy.empty(R.shape)
rperi= numpy.empty(R.shape)
rap= numpy.empty(R.shape)
if numpy.sum(indxc) > 0:
u0= numpy.exp(self._logu0Interp.ev(Lz[indxc],
(_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))))
sinh2u0= numpy.sinh(u0)**2.
thisEr= self.Er(R[indxc],z[indxc],vR[indxc],vz[indxc],
E[indxc],Lz[indxc],sinh2u0,u0)
thisEz= self.Ez(R[indxc],z[indxc],vR[indxc],vz[indxc],
E[indxc],Lz[indxc],sinh2u0,u0)
thisv2= self.vatu0(E[indxc],Lz[indxc],u0,self._delta*numpy.sinh(u0),retv2=True)
cos2psi= 2.*thisEr/thisv2/(1.+sinh2u0)
cos2psi[(cos2psi > 1.)*(cos2psi < 1.+10.**-5.)]= 1.
indxCos2psi= (cos2psi > 1.)
indxCos2psi+= (cos2psi < 0.)
indxc[indxc]= True^indxCos2psi
indx= True^indxc
psi= numpy.arccos(numpy.sqrt(cos2psi[True^indxCos2psi]))
coords= numpy.empty((3,numpy.sum(indxc)))
coords[0,:]= (Lz[indxc]-self._Lzmin)/(self._Lzmax-self._Lzmin)*(self._nLz-1.)
y= (_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))
coords[1,:]= y*(self._nE-1.)
coords[2,:]= psi/numpy.pi*2.*(self._npsi-1.)
ecc[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._eccFiltered,
coords,
order=3,
prefilter=False))-10.**-10.)
rperi[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._rperiFiltered,
coords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._rperiLzInterp(Lz[indxc]))-10.**-5.)
sin2psi= 2.*thisEz[True^indxCos2psi]/thisv2[True^indxCos2psi]/(1.+sinh2u0[True^indxCos2psi])
sin2psi[(sin2psi > 1.)*(sin2psi < 1.+10.**-5.)]= 1.
indxSin2psi= (sin2psi > 1.)
indxSin2psi+= (sin2psi < 0.)
indxc[indxc]= True^indxSin2psi
indx= True^indxc
psiz= numpy.arcsin(numpy.sqrt(sin2psi[True^indxSin2psi]))
newcoords= numpy.empty((3,numpy.sum(indxc)))
newcoords[0:2,:]= coords[0:2,True^indxSin2psi]
newcoords[2,:]= psiz/numpy.pi*2.*(self._npsi-1.)
zmax[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._zmaxFiltered,
newcoords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._zmaxLzInterp(Lz[indxc]))-10.**-5.)
rap[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._rapFiltered,
newcoords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._rapLzInterp(Lz[indxc]))-10.**-5.)
if numpy.sum(indx) > 0:
eccindiv, zmaxindiv, rperiindiv, rapindiv=\
self._aA.EccZmaxRperiRap(R[indx],
vR[indx],
vT[indx],
z[indx],
vz[indx],
**kwargs)
ecc[indx]= eccindiv
zmax[indx]= zmaxindiv
rperi[indx]= rperiindiv
rap[indx]= rapindiv
else:
ecc,zmax,rperi,rap= self.EccZmaxRperiRap(numpy.array([R]),
numpy.array([vR]),
numpy.array([vT]),
numpy.array([z]),
numpy.array([vz]),
**kwargs)
return (ecc[0],zmax[0],rperi[0],rap[0])
ecc[ecc < 0.]= 0.
zmax[zmax < 0.]= 0.
rperi[rperi < 0.]= 0.
rap[rap < 0.]= 0.
return (ecc,zmax,rperi,rap) | NAME:
EccZmaxRperiRap (_EccZmaxRperiRap)
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter in the Staeckel approximation
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-15 - Written - Bovy (UofT) |
21,555 | def collect_blame_info(cls, matches):
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area =
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = [, , ] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect info:", err)
else:
yield out | Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines. |
21,556 | def do_bp(self, arg):
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 1)
try:
address = self.input_address(token_list[0], pid)
deferred = False
except Exception:
address = token_list[0]
deferred = True
if not address:
address = token_list[0]
deferred = True
self.debug.break_at(pid, address)
if deferred:
print("Deferred breakpoint set at %s" % address)
else:
print("Breakpoint set at %s" % address) | [~process] bp <address> - set a code breakpoint |
21,557 | def connectionMade(self):
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {: self.guid}) | establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications |
21,558 | def reference_axis_from_chains(chains):
if not len(set([len(x) for x in chains])) == 1:
raise ValueError("All chains must be of the same length")
coords = [numpy.array(chains[0].primitive.coordinates)]
orient_vector = polypeptide_vector(chains[0])
for i, c in enumerate(chains[1:]):
if is_acute(polypeptide_vector(c), orient_vector):
coords.append(numpy.array(c.primitive.coordinates))
else:
coords.append(numpy.flipud(numpy.array(c.primitive.coordinates)))
reference_axis = numpy.mean(numpy.array(coords), axis=0)
return Primitive.from_coordinates(reference_axis) | Average coordinates from a set of primitives calculated from Chains.
Parameters
----------
chains : list(Chain)
Returns
-------
reference_axis : numpy.array
The averaged (x, y, z) coordinates of the primitives for
the list of Chains. In the case of a coiled coil barrel,
this would give the central axis for calculating e.g. Crick
angles.
Raises
------
ValueError :
If the Chains are not all of the same length. |
21,559 | def _process_args(self, largs, rargs, values):
def _through_option(func, *args, **kwds):
try:
func(*args, **kwds)
except optparse.BadOptionError as err:
largs.append(err.opt_str)
while rargs:
arg = rargs[0]
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
_through_option(self._process_long_opt, rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
_through_option(self._process_short_opts, rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return | _process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'. |
21,560 | def get_local_file(file):
try:
with open(file.path):
yield file.path
except NotImplementedError:
_, ext = os.path.splitext(file.name)
with NamedTemporaryFile(prefix=, suffix=ext) as tmp:
try:
file.open()
for chunk in file.chunks():
tmp.write(chunk)
finally:
file.close()
tmp.flush()
yield tmp.name | Get a local version of the file, downloading it from the remote storage if
required. The returned value should be used as a context manager to
ensure any temporary files are cleaned up afterwards. |
21,561 | def _encode_resp(self, value):
if isinstance(value, bytes):
return b.join(
[b,
ascii(len(value)).encode(), CRLF, value, CRLF])
elif isinstance(value, str):
return self._encode_resp(value.encode())
elif isinstance(value, int):
return self._encode_resp(ascii(value).encode())
elif isinstance(value, float):
return self._encode_resp(ascii(value).encode())
elif isinstance(value, list):
output = [b, ascii(len(value)).encode(), CRLF]
for item in value:
output.append(self._encode_resp(item))
return b.join(output)
else:
raise ValueError(.format(type(value))) | Dynamically build the RESP payload based upon the list provided.
:param mixed value: The list of command parts to encode
:rtype: bytes |
21,562 | def makefile(self, tarinfo, targetpath):
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close() | Make a file called targetpath. |
21,563 | def read_model_from_config(cp, ifo, section="calibration"):
model = cp.get_opt_tag(section, "{}_model".format(ifo.lower()), None)
recalibrator = models[model].from_config(cp, ifo.lower(), section)
return recalibrator | Returns an instance of the calibration model specified in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
ifo : string
The detector (H1, L1) whose model will be loaded.
section : {"calibration", string}
Section name from which to retrieve the model.
Returns
-------
instance
An instance of the calibration model class. |
21,564 | def _output_pins(self, pins):
[self._validate_channel(pin) for pin in pins.keys()]
for pin, value in iter(pins.items()):
if value:
self.gpio[int(pin/8)] |= 1 << (int(pin%8))
else:
self.gpio[int(pin/8)] &= ~(1 << (int(pin%8)))
self._write_gpio() | Set multiple pins high or low at once. Pins should be a dict of pin
name to pin value (HIGH/True for 1, LOW/False for 0). All provided pins
will be set to the given values. |
21,565 | def on_window_losefocus(self, window, event):
if not HidePrevention(self.window).may_hide():
return
value = self.settings.general.get_boolean()
visible = window.get_property()
self.losefocus_time = get_server_time(self.window)
if visible and value:
log.info("Hiding on focus lose")
self.hide() | Hides terminal main window when it loses the focus and if
the window_losefocus gconf variable is True. |
21,566 | def calc_time(self) -> None:
def get_number_of_frames(feat_fns):
total = 0
for feat_fn in feat_fns:
num_frames = len(np.load(feat_fn))
total += num_frames
return total
def numframes_to_minutes(num_frames):
minutes = ((num_frames*10)/1000)/60
return minutes
total_frames = 0
train_fns = [train_fn[0] for train_fn in self.train_fns]
num_train_frames = get_number_of_frames(train_fns)
total_frames += num_train_frames
num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0])
total_frames += num_valid_frames
num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0])
total_frames += num_test_frames
print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames))
print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames))
print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames))
print("Total duration: %0.3f" % numframes_to_minutes(total_frames)) | Prints statistics about the the total duration of recordings in the
corpus. |
21,567 | def get_all_if_set(self):
with self._lock:
results = {}
for add, fut in self._state.items():
if self._contains_and_set(add):
results[add] = fut.result()
return results | Return all the addresses and opaque values set in the context.
Useful in the squash method.
Returns:
(dict of str to bytes): The addresses and bytes that have
been set in the context. |
21,568 | def _must_not_custom_query(issn):
custom_queries = set([utils.cleanup_string(i) for i in journal_titles.load(issn).get(, [])])
for item in custom_queries:
query = {
"match": {
"reference_source_cleaned": item
}
}
yield query | Este metodo constroi a lista de filtros por título de periódico que
será aplicada na pesquisa boleana como restrição "must_not".
A lista de filtros é coletada do template de pesquisa customizada
do periódico, quanto este template existir. |
21,569 | def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
)) | Index record in Elasticsearch asynchronously.
:param record_id_iterator: Iterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``) |
21,570 | def print_graphic_information(self, num_curve, information):
label_information = information[0]
data_information = information[1:]
count_nb_label = 0
nb_label = len(label_information)
while count_nb_label <= nb_label:
self.ui.column1_label.setText(label_information[0].strip())
self.ui.column2_label.setText(label_information[1].strip())
self.ui.column3_label.setText(label_information[2].strip())
self.ui.column4_label.setText(label_information[3].strip())
self.ui.column5_label.setText(label_information[4].strip())
self.ui.column6_label.setText(label_information[5].strip())
self.ui.column7_label.setText(label_information[6].strip())
self.ui.column8_label.setText(label_information[7].strip())
count_nb_label += 1
line_of_data = 0
while line_of_data < len(data_information):
if line_of_data == num_curve:
self.ui.column1_result.setText(data_information[line_of_data][0])
self.ui.column2_result.setText(data_information[line_of_data][1])
self.ui.column3_result.setText(data_information[line_of_data][2])
self.ui.column4_result.setText(data_information[line_of_data][3])
self.ui.column5_result.setText(data_information[line_of_data][4])
self.ui.column6_result.setText(data_information[line_of_data][5])
self.ui.column7_result.setText(data_information[line_of_data][6])
self.ui.column8_result.setText(data_information[line_of_data][7])
line_of_data += 1 | This function displays information about curves.
Inputs ; num_curve ; The index of the curve's line that we have to display.
information ; The array which contains the information, of all curves to display. |
21,571 | def __parse_scanned_version_info(self):
string = self.sys_version_info_formatted
try:
major, minor, micro, release_level, serial = string.split(",")
if (release_level in ("alfa", "beta", "candidate", "final") and
(release_level != "final" or serial == "0") and
major.isdigit() and
minor.isdigit() and
micro.isdigit() and
serial.isdigit()):
self.sys_version_info = (int(major), int(minor), int(micro),
release_level, int(serial))
self.__construct_python_version()
return
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
raise BadEnvironment("Unsupported Python version (%s)" % (string,)) | Parses the environment's formatted version info string. |
21,572 | def refresh( self ):
schemas = self.schemas()
self.blockSignals(True)
self.clear()
self.addItems([schema.name() for schema in schemas])
self.blockSignals(False) | Refreshs the current user interface to match the latest settings. |
21,573 | def _find_block(starts, ends, cur_block, rec_num):
total = len(starts)
if (cur_block == -1):
cur_block = 0
for x in range(cur_block, total):
if (starts[x] <= rec_num and ends[x] >= rec_num):
return x, x
if (starts[x] > rec_num):
break
return -1, x-1 | Finds the block that rec_num is in if it is found. Otherwise it returns -1.
It also returns the block that has the physical data either at or
preceeding the rec_num.
It could be -1 if the preceeding block does not exists. |
21,574 | def generate_parsers(config, paths):
output =
output += inspect.getsource(conf_reader._get_source) + "\n\n"
output += inspect.getsource(utils._get_encoding) + "\n\n"
output += inspect.getsource(utils.handle_encodnig) + "\n\n"
output += inspect.getsource(utils.is_equal_tag) + "\n\n"
output += inspect.getsource(utils.has_neigh) + "\n\n"
output += "
for name, path in paths.items():
path = path[0]
required = config[0]["vars"][name].get("required", False)
notfoundmsg = config[0]["vars"][name].get("notfoundmsg", "")
output += _generate_parser(name, path, required, notfoundmsg)
output += "
output += _unittest_template(config)
output += "
output += "if __name__ == :\n"
output += IND + "test_parsers()"
return output | Generate parser for all `paths`.
Args:
config (dict): Original configuration dictionary used to get matches
for unittests. See
:mod:`~harvester.autoparser.conf_reader` for details.
paths (dict): Output from :func:`.select_best_paths`.
Returns:
str: Python code containing all parsers for `paths`. |
21,575 | def _delete(self, vid=None):
assert vid is not None
query = text()
self.execute(query, vid=vid) | Deletes given dataset from index.
Args:
vid (str): dataset vid. |
21,576 | def delete(stack_ref: List[str],
region: str, dry_run: bool, force: bool, remote: str):
lizzy = setup_lizzy_client(remote)
stack_refs = get_stack_refs(stack_ref)
all_with_version = all(stack.version is not None
for stack in stack_refs)
stack_id=stack_id):
output = lizzy.delete(stack_id, region=region, dry_run=dry_run)
print(output) | Delete Cloud Formation stacks |
21,577 | def evaluate_expression(dbg, frame, expression, is_exec):
if frame is None:
return
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals)
try:
expression = str(expression.replace(, ))
if is_exec:
try:
sys.stdout.write( % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
del updated_globals
del frame | returns the result of the evaluated expression
@param is_exec: determines if we should do an exec or an eval |
21,578 | def __crawler_stop(self):
if self.__stopping:
return
self.__stopping = True
self.__wait_for_current_threads()
self.queue.move_bulk([
QueueItem.STATUS_QUEUED,
QueueItem.STATUS_IN_PROGRESS
], QueueItem.STATUS_CANCELLED)
self.__crawler_finish()
self.__stopped = True | Mark the crawler as stopped.
Note:
If :attr:`__stopped` is True, the main thread will be stopped. Every piece of code that gets
executed after :attr:`__stopped` is True could cause Thread exceptions and or race conditions. |
21,579 | def set(self, e, k, v, real_k=None, check_kw_name=False):
if self.escape:
v = v.strip().replace("\\" + self.quote, self.quote)
return super(kv_transformer, self).set(e, k, v, real_k=real_k, check_kw_name=check_kw_name) | override base to handle escape case: replace \" to " |
21,580 | def _list_resource(self, resource):
self.project_service.set_auth(self._token_project)
return super(BossRemote, self).list_project(resource=resource) | List all instances of the given resource type.
Use the specific list_<resource>() methods instead:
list_collections()
list_experiments()
list_channels()
list_coordinate_frames()
Args:
resource (intern.resource.boss.BossResource): resource.name may be
an empty string.
Returns:
(list)
Raises:
requests.HTTPError on failure. |
21,581 | def tags(self):
tags = self.fields[]
return [(t, v) for t in tags for v in tags[t]] | Returns a list of tuples with key-value pairs representing tags in
this todo item. |
21,582 | def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None,
region=None, key=None, keyid=None, profile=None):
ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
FunctionName=FunctionName)
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for id in ids:
conn.delete_event_source_mapping(UUID=id)
return {: True}
except ClientError as e:
return {: False, : __utils__[](e)} | Given an event source mapping ID or an event source ARN and FunctionName,
delete the event source mapping
Returns {deleted: true} if the mapping was deleted and returns
{deleted: false} if the mapping was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.delete_event_source_mapping 260c423d-e8b5-4443-8d6a-5e91b9ecd0fa |
21,583 | def search_metadata_sql_builder(search):
b = SQLBuilder(tables=, where_clauses=["m.observatory IS NOT NULL"])
b.add_set_membership(search.obstory_ids, )
b.add_sql(search.field_name, )
b.add_sql(search.time_min, )
b.add_sql(search.time_max, )
b.add_sql(search.lat_min, )
b.add_sql(search.lat_max, )
b.add_sql(search.long_min, )
b.add_sql(search.long_max, )
b.add_sql(search.item_id, )
if search.exclude_imported:
b.where_clauses.append()
if search.exclude_export_to is not None:
b.where_clauses.append()
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b | Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservatoryMetadataSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservatoryMetadata` instances etc.
:param ObservatoryMetadataSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search |
21,584 | def rename_tab_uuid(self, term_uuid, new_text, user_set=True):
term_uuid = uuid.UUID(term_uuid)
page_index, = (
index for index, t in enumerate(self.get_notebook().iter_terminals())
if t.get_uuid() == term_uuid
)
self.get_notebook().rename_page(page_index, new_text, user_set) | Rename an already added tab by its UUID |
21,585 | def _get_reference_band(self, data):
blue = data[..., self.blue_idx].astype("float32")
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return np.nanmax(np.array([nir, swir1]), axis=0) / blue | Extract the max-ratio band from time-series
The max-ratio is defined as max(NIR,SWIR1)/BLUE
:param data: 4D array from which to compute the max-ratio reference band
:type data: numpy array
:return: 3D array containing the max-ratio reference band |
21,586 | def fit(self, X, lengths=None):
X = check_array(X)
self._init(X, lengths=lengths)
self._check()
self.monitor_._reset()
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprob, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += logprob
bwdlattice = self._do_backward_pass(framelogprob)
posteriors = self._compute_posteriors(fwdlattice, bwdlattice)
self._accumulate_sufficient_statistics(
stats, X[i:j], framelogprob, posteriors, fwdlattice,
bwdlattice)
self._do_mstep(stats)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
return self | Estimate model parameters.
An initialization step is performed before entering the
EM algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self. |
21,587 | def run_cell(self, code, cell_name, filename, run_cell_copy):
def norm(text):
return remove_backslashes(to_text_string(text))
self.run_cell_filename = filename
client = self.get_client_for_file(filename)
if client is None:
client = self.get_current_client()
is_internal_kernel = False
if client is not None:
if client.get_kernel() is not None and not run_cell_copy:
line = (to_text_string("{}(,)")
.format(to_text_string(),
(to_text_string(cell_name).replace("\\","\\\\")
.replace("")),
norm(filename).replace("")))
is_internal_kernel = True
else:
line = code.strip()
try:
if client.shellwidget._executing:
pass
elif client.shellwidget._reading:
client.shellwidget._append_html(
_("<br><b>Exit the debugger before trying to "
"run a cell in this console.</b>\n<hr><br>"),
before_prompt=True)
return
else:
if is_internal_kernel:
client.shellwidget.silent_execute(
to_text_string(
)
.format(to_text_string(code)
.replace(, r)
.replace(, r)))
self.execute_code(line)
except AttributeError:
pass
self.visibility_changed(True)
self.raise_()
else:
QMessageBox.warning(self, _(),
_("No IPython console is currently available "
"to run <b>{}</b>.<br><br>Please open a new "
"one and try again."
).format(osp.basename(filename)),
QMessageBox.Ok) | Run cell in current or dedicated client. |
21,588 | def address_from_digest(digest):
address_trits = [0] * (Address.LEN * TRITS_PER_TRYTE)
sponge = Kerl()
sponge.absorb(digest.as_trits())
sponge.squeeze(address_trits)
return Address.from_trits(
trits=address_trits,
key_index=digest.key_index,
security_level=digest.security_level,
) | Generates an address from a private key digest. |
21,589 | def normalize_profile(in_profile, log=False, return_offset = True):
if log:
tmp_prefactor = in_profile.max(axis=1)
tmp_prof = np.exp(in_profile.T - tmp_prefactor).T
else:
tmp_prefactor = 0.0
tmp_prof = in_profile
norm_vector = tmp_prof.sum(axis=1)
return (np.copy(np.einsum(,tmp_prof,1.0/norm_vector)),
(np.log(norm_vector) + tmp_prefactor) if return_offset else None) | return a normalized version of a profile matrix
Parameters
----------
in_profile : np.array
shape Lxq, will be normalized to one across each row
log : bool, optional
treat the input as log probabilities
return_offset : bool, optional
return the log of the scale factor for each row
Returns
-------
tuple
normalized profile (fresh np object) and offset (if return_offset==True) |
21,590 | def gen_random_string(str_len):
return .join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len)) | generate random string with specified length |
21,591 | def combinations_with_replacement(iterable, r):
stk = [[i,] for i in iterable]
pop = stk.pop
while len(stk) > 0:
top = pop()
if len(top) == r:
yield tuple(top)
else:
stk.extend(top + [i] for i in iterable) | This function acts as a replacement for the
itertools.combinations_with_replacement function. The original does not
replace items that come earlier in the provided iterator. |
21,592 | def make_color_legend_rects(colors, labels=None):
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out | Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index) |
21,593 | def _get_ip_public(self, queue_target, url, json=False, key=None):
try:
response = urlopen(url, timeout=self.timeout).read().decode()
except Exception as e:
logger.debug("IP plugin - Cannot open URL {} ({})".format(url, e))
queue_target.put(None)
else:
try:
if not json:
queue_target.put(response)
else:
queue_target.put(loads(response)[key])
except ValueError:
queue_target.put(None) | Request the url service and put the result in the queue_target. |
21,594 | def recv(self, size):
return self._safe_call(
True,
super(SSLFileobjectMixin, self).recv,
size,
) | Receive message of a size from the socket. |
21,595 | async def edit(self, **fields):
try:
content = fields[]
except KeyError:
pass
else:
if content is not None:
fields[] = str(content)
try:
embed = fields[]
except KeyError:
pass
else:
if embed is not None:
fields[] = embed.to_dict()
data = await self._state.http.edit_message(self.channel.id, self.id, **fields)
self._update(channel=self.channel, data=data)
try:
delete_after = fields[]
except KeyError:
pass
else:
if delete_after is not None:
await self.delete(delay=delete_after) | |coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
Raises
-------
HTTPException
Editing the message failed. |
21,596 | def max_sharpe(self, risk_free_rate=0.02):
if not isinstance(risk_free_rate, (int, float)):
raise ValueError("risk_free_rate should be numeric")
args = (self.expected_returns, self.cov_matrix, self.gamma, risk_free_rate)
result = sco.minimize(
objective_functions.negative_sharpe,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=self.constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights)) | Maximise the Sharpe Ratio. The result is also referred to as the tangency portfolio,
as it is the tangent to the efficient frontier curve that intercepts the risk-free
rate.
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:raises ValueError: if ``risk_free_rate`` is non-numeric
:return: asset weights for the Sharpe-maximising portfolio
:rtype: dict |
21,597 | def get_solr_search_url(self, use_amigo=False):
url = self.endpoint_url(self.solr_search)
if use_amigo:
url = self.endpoint_url(self.amigo_solr_search)
return url | Return solr URL to be used for lexical entity searches
A solr search URL is used to search entities/concepts based on a limited set of parameters.
Arguments
---------
use_amigo : bool
If true, get the URL for the GO/AmiGO instance of GOlr. This is typically used for category='function' queries |
21,598 | def load_json_model(filename):
if isinstance(filename, string_types):
with open(filename, "r") as file_handle:
return model_from_dict(json.load(file_handle))
else:
return model_from_dict(json.load(filename)) | Load a cobra model from a file in JSON format.
Parameters
----------
filename : str or file-like
File path or descriptor that contains the JSON document describing the
cobra model.
Returns
-------
cobra.Model
The cobra model as represented in the JSON document.
See Also
--------
from_json : Load from a string. |
21,599 | def conditionally_attach_managed_policies(role_name, sr_entry):
service_type = sr_entry[]
if not (service_type in SERVICE_TYPE_ROLE and "aws_managed_policies" in sr_entry):
print_if_verbose("not eligible for policies; service_type: {} is not valid for policies "
"or no key in service registry for this role".format(service_type))
return
for policy_name in sr_entry[]:
print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name))
if CONTEXT.commit:
try:
CLIENTS["iam"].attach_role_policy(RoleName=role_name, PolicyArn= + policy_name)
except:
fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info()) | If 'aws_managed_policies' key lists the names of AWS managed policies to bind to the role,
attach them to the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.