Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
12,400 | def load_tmp_dh(self, dhfile):
dhfile = _path_string(dhfile)
bio = _lib.BIO_new_file(dhfile, b"r")
if bio == _ffi.NULL:
_raise_current_error()
bio = _ffi.gc(bio, _lib.BIO_free)
dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
dh = _ffi.gc(dh, _lib.DH_free)
_lib.SSL_CTX_set_tmp_dh(self._context, dh) | Load parameters for Ephemeral Diffie-Hellman
:param dhfile: The file to load EDH parameters from (``bytes`` or
``unicode``).
:return: None |
12,401 | def get_impact_report_as_string(analysis_dir):
html_report_products = [
,
]
output_dir_path = join(analysis_dir, )
for html_report_product in html_report_products:
table_report_path = join(output_dir_path, html_report_product)
if exists(table_report_path):
break
table_report_path = None
if not table_report_path:
return None
with open(table_report_path, , encoding=) as table_report_file:
report = table_report_file.read()
return report | Retrieve an html string of table report (impact-report-output.html).
:param analysis_dir: Directory of where the report located.
:type analysis_dir: str
:return: HTML string of the report.
:rtype: str |
12,402 | def result(self):
field = re.sub(REGEX_CLEANER, , self.field_name)
if self.alias:
alias = re.sub(REGEX_CLEANER, , self.alias)
return "%s AS %s" % (field, alias)
else:
return field | Construye la expresion |
12,403 | def get_issue_labels(self, issue_key):
url = .format(issue_key=issue_key)
return (self.get(url) or {}).get().get() | Get issue labels.
:param issue_key:
:return: |
12,404 | def convert(self, mode):
if mode == self.mode:
return
if mode not in ["L", "LA", "RGB", "RGBA",
"YCbCr", "YCbCrA", "P", "PA"]:
raise ValueError("Mode %s not recognized." % (mode))
if self.is_empty():
self.mode = mode
return
if mode == self.mode + "A":
self.channels.append(np.ma.ones(self.channels[0].shape))
if self.fill_value is not None:
self.fill_value += [1]
self.mode = mode
elif mode + "A" == self.mode:
self.channels = self.channels[:-1]
if self.fill_value is not None:
self.fill_value = self.fill_value[:-1]
self.mode = mode
elif mode.endswith("A") and not self.mode.endswith("A"):
self.convert(self.mode + "A")
self.convert(mode)
elif self.mode.endswith("A") and not mode.endswith("A"):
self.convert(self.mode[:-1])
self.convert(mode)
else:
cases = {
"RGB": {"YCbCr": self._rgb2ycbcr,
"L": self._rgb2l,
"P": self._to_p},
"RGBA": {"YCbCrA": self._rgb2ycbcr,
"LA": self._rgb2l,
"PA": self._to_p},
"YCbCr": {"RGB": self._ycbcr2rgb,
"L": self._ycbcr2l,
"P": self._to_p},
"YCbCrA": {"RGBA": self._ycbcr2rgb,
"LA": self._ycbcr2l,
"PA": self._to_p},
"L": {"RGB": self._l2rgb,
"YCbCr": self._l2ycbcr,
"P": self._to_p},
"LA": {"RGBA": self._l2rgb,
"YCbCrA": self._l2ycbcr,
"PA": self._to_p},
"P": {"RGB": self._from_p,
"YCbCr": self._from_p,
"L": self._from_p},
"PA": {"RGBA": self._from_p,
"YCbCrA": self._from_p,
"LA": self._from_p}}
try:
cases[self.mode][mode](mode)
except KeyError:
raise ValueError("Conversion from %s to %s not implemented !"
% (self.mode, mode)) | Convert the current image to the given *mode*. See :class:`Image`
for a list of available modes. |
12,405 | def strip_figures(figure):
fig=[]
for trace in figure[]:
fig.append(dict(data=[trace],layout=figure[]))
return fig | Strips a figure into multiple figures with a trace on each of them
Parameters:
-----------
figure : Figure
Plotly Figure |
12,406 | def sliding(self, size, step=1):
return self._transform(transformations.sliding_t(_wrap, size, step)) | Groups elements in fixed size blocks by passing a sliding window over them.
The last window has at least one element but may have less than size elements
:param size: size of sliding window
:param step: step size between windows
:return: sequence of sliding windows |
12,407 | def _get_tmaster_with_watch(self, topologyName, callback, isWatching):
path = self.get_tmaster_path(topologyName)
if isWatching:
LOG.info("Adding data watch for path: " + path)
@self.client.DataWatch(path)
def watch_tmaster(data, stats):
if data:
tmaster = TMasterLocation()
tmaster.ParseFromString(data)
callback(tmaster)
else:
callback(None)
return isWatching | Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True. |
12,408 | def has_name_version(self, name: str, version: str) -> bool:
return self.session.query(exists().where(and_(Network.name == name, Network.version == version))).scalar() | Check if there exists a network with the name/version combination in the database. |
12,409 | def getElementsCustomFilter(self, filterFunc, root=):
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and filterFunc(root) is True:
elements.append(root)
getElementsCustomFilter = self.getElementsCustomFilter
for child in root.children:
if filterFunc(child) is True:
elements.append(child)
elements += getElementsCustomFilter(filterFunc, child)
return TagCollection(elements) | getElementsCustomFilter - Scan elements using a provided function
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - TagCollection of all matching elements |
12,410 | def parse_PRIK(chunk, encryption_key):
decrypted = decode_aes256(,
encryption_key[:16],
decode_hex(chunk.payload),
encryption_key)
hex_key = re.match(br, decrypted).group()
rsa_key = RSA.importKey(decode_hex(hex_key))
rsa_key.dmp1 = rsa_key.d % (rsa_key.p - 1)
rsa_key.dmq1 = rsa_key.d % (rsa_key.q - 1)
rsa_key.iqmp = number.inverse(rsa_key.q, rsa_key.p)
return rsa_key | Parse PRIK chunk which contains private RSA key |
12,411 | def use_plenary_resource_view(self):
self._object_views[] = PLENARY
for session in self._get_provider_sessions():
try:
session.use_plenary_resource_view()
except AttributeError:
pass | Pass through to provider ResourceLookupSession.use_plenary_resource_view |
12,412 | def radio_field(*args, **kwargs):
radio_field = wtforms.RadioField(*args, **kwargs)
radio_field.input_type =
return radio_field | Get a password |
12,413 | def key_source(self):
def parent_gen(self):
if self.target.full_table_name not in self.connection.dependencies:
self.connection.dependencies.load()
for parent_name, fk_props in self.target.parents(primary=True).items():
if not parent_name.isdigit():
yield FreeTable(self.connection, parent_name).proj()
else:
grandparent = list(self.connection.dependencies.in_edges(parent_name))[0][0]
yield FreeTable(self.connection, grandparent).proj(**{
attr: ref for attr, ref in fk_props[].items() if ref != attr})
if self._key_source is None:
parents = parent_gen(self)
try:
self._key_source = next(parents)
except StopIteration:
raise DataJointError() from None
for q in parents:
self._key_source *= q
return self._key_source | :return: the relation whose primary key values are passed, sequentially, to the
``make`` method when populate() is called.
The default value is the join of the parent relations.
Users may override to change the granularity or the scope of populate() calls. |
12,414 | async def get_entries(self, **kwargs):
params = dict({: self.token,
: ,
: ,
: 1,
: 30,
: ,
: 0})
if in kwargs and int(kwargs[]) in (0, 1):
params[] = int(kwargs[])
if in kwargs and int(kwargs[]) in (0, 1):
params[] = int(kwargs[])
if in kwargs and kwargs[] in (, ):
params[] = kwargs[]
if in kwargs and isinstance(kwargs[], int):
params[] = kwargs[]
if in kwargs and isinstance(kwargs[], int):
params[] = kwargs[]
if in kwargs and isinstance(kwargs[], list):
params[] = .join(kwargs[])
if in kwargs and isinstance(kwargs[], int):
params[] = kwargs[]
path = .format(ext=self.format)
return await self.query(path, "get", **params) | GET /api/entries.{_format}
Retrieve all entries. It could be filtered by many options.
:param kwargs: can contain one of the following filters
archive: '0' or '1', default '0' filter by archived status.
starred: '0' or '1', default '0' filter by starred status.
sort: 'created' or 'updated', default 'created'
order: 'asc' or 'desc', default 'desc'
page: int default 1 what page you want
perPage: int default 30 result per page
tags: list of tags url encoded.
since: int default 0 from what timestamp you want
Will returns entries that matches ALL tags
:return data related to the ext |
12,415 | def tick(self):
for npc in self.npcs:
self.move_entity(npc, *npc.towards(self.player))
for entity1, entity2 in itertools.combinations(self.entities, 2):
if (entity1.x, entity1.y) == (entity2.x, entity2.y):
if self.player in (entity1, entity2):
return % self.turn
entity1.die()
entity2.die()
if all(npc.speed == 0 for npc in self.npcs):
return % self.turn
self.turn += 1
if self.turn % 20 == 0:
self.player.speed = max(1, self.player.speed - 1)
self.player.display = on_blue(green(bold(unicode_str(self.player.speed)))) | Returns a message to be displayed if game is over, else None |
12,416 | def tabulate(self, format=, syntax=):
from tabulate import tabulate as _tabulate
headers = [, , , , , , ]
rows = []
default_values = False
additional_conditions = False
field_description = False
for key, value in self.keyMap.items():
key_segments = _segment_path(key)
if key_segments:
row = []
field_name =
if len(key_segments) > 1:
for i in range(1,len(key_segments)):
field_name +=
if key_segments[-1] == :
field_name +=
else:
field_name += key_segments[-1]
row.append(field_name)
value_datatype = value[]
if in value.keys():
if value[] and syntax != :
value_datatype =
elif value[] == :
if syntax == :
value_datatype =
elif value[] == :
if syntax == :
value_datatype =
item_key = key +
item_datatype = self.keyMap[item_key][]
if syntax == :
if item_datatype == :
item_datatype =
elif item_datatype == :
item_datatype =
elif in self.keyMap[item_key].keys():
if self.keyMap[item_key][]:
item_datatype =
value_datatype += % item_datatype
row.append(value_datatype)
if value[]:
row.append()
else:
row.append()
if in value.keys():
default_values = True
if isinstance(value[], str):
row.append( % value[])
elif isinstance(value[], bool):
row.append(str(value[]).lower())
else:
row.append(str(value[]))
else:
row.append()
def determine_example(k, v):
example_value =
if in v.keys():
for i in v[]:
if example_value:
example_value +=
if isinstance(i, str):
example_value += % i
else:
example_value += value
elif in v.keys():
if isinstance(v[], str):
example_value = % v[]
elif isinstance(v[], bool):
example_value = str(v[]).lower()
else:
example_value = v[]
else:
if v[] == :
example_value =
elif v[] == :
example_value =
elif v[] == :
example_value =
return example_value
row.append(determine_example(key, value))
conditions =
description =
for k, v in value.items():
extra_integer = False
if k == and syntax == :
extra_integer = True
if k not in (, , , , , , ) or extra_integer:
add_extra = False
if k == :
if v:
add_extra = True
if k in (, ):
field_description = True
if k == :
description = v
elif not description:
description = v
elif k != or add_extra:
additional_conditions = True
if conditions:
conditions +=
condition_value = v
if isinstance(v, str):
condition_value = % v
elif isinstance(v, bool):
condition_value = str(v).lower()
conditions += % (k, condition_value)
row.append(conditions)
row.append(description)
rows.append(row)
top_dict = self.keyMap[]
if top_dict[]:
rows.append([, , , , , , ])
if in top_dict.keys():
rows.append([ % top_dict[], , , , , , ])
if not field_description:
headers.pop()
if not additional_conditions:
headers.pop()
if not default_values:
headers.pop(3)
for row in rows:
if not field_description:
row.pop()
if not additional_conditions:
row.pop()
if not default_values:
row.pop(3)
table_html = _tabulate(rows, headers, tablefmt=)
table_html = _add_links(table_html)
return table_html | a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table |
12,417 | def sodium_unpad(s, blocksize):
ensure(isinstance(s, bytes),
raising=exc.TypeError)
ensure(isinstance(blocksize, integer_types),
raising=exc.TypeError)
s_len = len(s)
u_len = ffi.new("size_t []", 1)
rc = lib.sodium_unpad(u_len, s, s_len, blocksize)
if rc != 0:
raise exc.CryptoError("Unpadding failure")
return s[:u_len[0]] | Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes |
12,418 | def exists(self, key, **opts):
key, store = self._expand_opts(key, opts)
data = store.get(key)
if not data or self._has_expired(data, opts):
return False
return True | Return if a key exists in the cache. |
12,419 | def loadstore(self, addrs, length=1):
if not isinstance(addrs, Iterable):
raise ValueError("addr must be iteratable")
self.first_level.loadstore(addrs, length=length) | Load and store address in order given.
:param addrs: iteratable of address tuples: [(loads, stores), ...]
:param length: will load and store all bytes between addr and
addr+length (for each address) |
12,420 | def tag_image(self, image, target_image, force=False):
logger.info("tagging image as ", image, target_image)
logger.debug("image = , target_image_name = ", image, target_image)
if not isinstance(image, ImageName):
image = ImageName.parse(image)
if image != target_image:
response = self.d.tag(
image.to_str(),
target_image.to_str(tag=False),
tag=target_image.tag,
force=force)
if not response:
logger.error("failed to tag image")
raise RuntimeError("Failed to tag image : target_image = " %
image.to_str(), target_image)
else:
logger.debug()
return target_image.to_str() | tag provided image with specified image_name, registry and tag
:param image: str or ImageName, image to tag
:param target_image: ImageName, new name for the image
:param force: bool, force tag the image?
:return: str, image (reg.om/img:v1) |
12,421 | def repeat(self, count=1):
if not isinstance(count, int) or count < 1:
raise ValueError("count must be a postive integer.")
effect_args = [, .format(count)]
self.effects.extend(effect_args)
self.effects_log.append() | Repeat the entire audio count times.
Parameters
----------
count : int, default=1
The number of times to repeat the audio. |
12,422 | def _load_tcmps_lib():
global _g_TCMPS_LIB
if _g_TCMPS_LIB is None:
if _mac_ver() < (10, 14):
return None
file_dir = _os.path.dirname(__file__)
lib_path = _os.path.abspath(_os.path.join(file_dir, _os.pardir, ))
try:
_g_TCMPS_LIB = _ctypes.CDLL(lib_path, _ctypes.RTLD_LOCAL)
except OSError:
pass
return _g_TCMPS_LIB | Load global singleton of tcmps lib handler.
This function is used not used at the top level, so
that the shared library is loaded lazily only when needed. |
12,423 | def determine_apache_port(public_port, singlenode_mode=False):
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10) | Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service |
12,424 | def start_txn(self, txn_name=None):
if not txn_name:
txn_name = uuid.uuid4().hex
txn_response = self.api.http_request(, % self.root, data=None, headers=None)
if txn_response.status_code == 201:
txn_uri = txn_response.headers[]
logger.debug("spawning transaction: %s" % txn_uri)
txn = Transaction(
self,
txn_name,
txn_uri,
expires = txn_response.headers[])
self.txns[txn_name] = txn
return txn | Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction |
12,425 | def _rebuffer(self):
results = []
exceptions = []
for i in xrange(self.stride):
try:
results.append(self.iterable.next())
exceptions.append(False)
except Exception, excp:
results.append(excp)
exceptions.append(True)
self._repeat_buffer = repeat((results, exceptions), self.n) | (very internal) refill the repeat buffer |
12,426 | def get_group(self):
if self.group is None:
self.group = self.get_field()
if self.group is not None:
self.group = np.diff(self.group)
return self.group | Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group size of each group. |
12,427 | def finding_path(cls, organization, source, finding):
return google.api_core.path_template.expand(
"organizations/{organization}/sources/{source}/findings/{finding}",
organization=organization,
source=source,
finding=finding,
) | Return a fully-qualified finding string. |
12,428 | def do_heavy_work(self, block):
destinations = self.destinations()
t be able to try.'
if not set(destinations).issubset(block.destinations):
self.log.debug("Block not for any of the associated destinations: %s", destinations)
else:
try:
self.do_send(block)
block.send_destinations.extend(destinations)
verif_data = self.verification_data()
if verif_data is not None:
for destination in destinations:
block.destinations_verif_data[destination] = verif_data
except SendingError:
self.log.exception("Failed to send block (%s) to destination (%s)", block, destinations)
return block | Note: Expects Compressor Block like objects |
12,429 | def WithLimitedCallFrequency(min_time_between_calls):
def Decorated(f):
lock = threading.RLock()
prev_times = {}
prev_results = {}
result_locks = {}
@functools.wraps(f)
def Fn(*args, **kwargs):
if WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH:
min_time = rdfvalue.Duration(0)
else:
min_time = min_time_between_calls
key = (args, tuple(sorted(kwargs.items())))
now = rdfvalue.RDFDatetime.Now()
with lock:
for k, prev_time in list(prev_times.items()):
if now - prev_time >= min_time:
prev_times.pop(k)
prev_results.pop(k, None)
result_locks.pop(k, None)
try:
prev_time = prev_times[key]
return prev_results[key]
except KeyError:
prev_time = None
should_call = True
if not should_call:
return prev_results[key]
try:
result_lock = result_locks[key]
except KeyError:
result_lock = threading.RLock()
result_locks[key] = result_lock
with result_lock:
t = prev_times.get(key)
if t == prev_time:
result = f(*args, **kwargs)
with lock:
prev_times[key] = rdfvalue.RDFDatetime.Now()
prev_results[key] = result
return result
else:
return prev_results[key]
return Fn
return Decorated | Function call rate-limiting decorator.
This decorator ensures that the wrapped function will be called at most
once in min_time_between_calls time for the same set of arguments. For all
excessive calls a previous cached return value will be returned.
Suppose we use the decorator like this:
@cache.WithLimitedCallFrequency(rdfvalue.Duration("30s"))
def Foo(id):
...
If Foo(42) is called and then Foo(42) is called again within 30 seconds, then
the second call will simply return the cached return value of the first.
If Foo(42) is called and then Foo(43) is called within 30 seconds, the
wrapped function will be properly called in both cases, since these Foo calls
have different arguments sets.
If Foo(42) is called and takes a long time to finish, and another
Foo(42) call is done in another thread, then the latter call will wait for
the first one to finish and then return the cached result value. I.e. the
wrapped function will be called just once, thus keeping the guarantee of
at most 1 run in min_time_between_calls.
NOTE 1: this function becomes a trivial pass-through and does no caching if
module-level WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH variable is set to
True. This is used in testing.
NOTE 2: all decorated functions' arguments have to be hashable.
Args:
min_time_between_calls: An rdfvalue.Duration specifying the minimal time to
pass between 2 consecutive function calls with same arguments.
Returns:
A Python function decorator. |
12,430 | def refs(self, multihash, **kwargs):
args = (multihash,)
return self._client.request(, args, decoder=, **kwargs) | Returns a list of hashes of objects referenced by the given hash.
.. code-block:: python
>>> c.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
[{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''},
…
{'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}]
Parameters
----------
multihash : str
Path to the object(s) to list refs from
Returns
-------
list |
12,431 | def SystemCoin():
amount = Fixed8.FromDecimal(sum(Blockchain.GENERATION_AMOUNT) * Blockchain.DECREMENT_INTERVAL)
owner = ECDSA.secp256r1().Curve.Infinity
precision = 8
admin = Crypto.ToScriptHash(PUSHF)
return RegisterTransaction([], [], AssetType.UtilityToken,
"[{\"lang\":\"zh-CN\",\"name\":\"小蚁币\"},{\"lang\":\"en\",\"name\":\"AntCoin\"}]",
amount, precision, owner, admin) | Register AntCoin
Returns:
RegisterTransaction: |
12,432 | def install():
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip")
run("mkdir -p /home/%s/logs" % env.user)
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
run("mkdir -p %s" % env.venv_home)
run("echo >> /home/%s/.bashrc" % (env.venv_home,
env.user))
run("echo >> "
"/home/%s/.bashrc" % env.user)
print(green("Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.", bold=True)) | Installs the base system and Python requirements for the entire server. |
12,433 | def writeint2dnorm(filename, Intensity, Error=None):
whattosave = {: Intensity}
if Error is not None:
whattosave[] = Error
if filename.upper().endswith():
np.savez(filename, **whattosave)
elif filename.upper().endswith():
scipy.io.savemat(filename, whattosave)
else:
np.savetxt(filename, Intensity)
if Error is not None:
name, ext = os.path.splitext(filename)
np.savetxt(name + + ext, Error) | Save the intensity and error matrices to a file
Inputs
------
filename: string
the name of the file
Intensity: np.ndarray
the intensity matrix
Error: np.ndarray, optional
the error matrix (can be ``None``, if no error matrix is to be saved)
Output
------
None |
12,434 | def _check_for_default_values(fname, arg_val_dict, compat_args):
for key in arg_val_dict:
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError(" is not a boolean")
except ValueError:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key))) | Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args |
12,435 | def render_args(arglst, argdct):
out =
for arg in arglst:
if arg.name in argdct:
rendered = arg.render(argdct[arg.name])
if rendered:
out +=
out += rendered
return out | Render arguments for command-line invocation.
arglst: A list of Argument objects (specifies order)
argdct: A mapping of argument names to values (specifies rendered values) |
12,436 | def filtered(self, allowed):
kws = {k:v for k,v in self.kwargs.items() if k in allowed}
return self.__class__(key=self.key,
allowed_keywords=self.allowed_keywords,
merge_keywords=self.merge_keywords, **kws) | Return a new Options object that is filtered by the specified
list of keys. Mutating self.kwargs to filter is unsafe due to
the option expansion that occurs on initialization. |
12,437 | def _box_col_values(self, values, items):
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True) | Provide boxed values for a column. |
12,438 | def prev_close(self):
try:
return self._data[]
except (ValueError, KeyError):
pass
if self._prev_close is None:
trading_dt = Environment.get_instance().trading_dt
data_proxy = Environment.get_instance().data_proxy
self._prev_close = data_proxy.get_prev_close(self._instrument.order_book_id, trading_dt)
return self._prev_close | [float] 昨日收盘价 |
12,439 | def gpp_soco(V,E):
model = Model("gpp model -- soco")
x,s,z = {},{},{}
for i in V:
x[i] = model.addVar(vtype="B", name="x(%s)"%i)
for (i,j) in E:
s[i,j] = model.addVar(vtype="C", name="s(%s,%s)"%(i,j))
z[i,j] = model.addVar(vtype="C", name="z(%s,%s)"%(i,j))
model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition")
for (i,j) in E:
model.addCons((x[i] + x[j] -1)*(x[i] + x[j] -1) <= s[i,j], "S(%s,%s)"%(i,j))
model.addCons((x[j] - x[i])*(x[j] - x[i]) <= z[i,j], "Z(%s,%s)"%(i,j))
model.addCons(s[i,j] + z[i,j] == 1, "P(%s,%s)"%(i,j))
model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize")
model.data = x,s,z
return model | gpp -- model for the graph partitioning problem in soco
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved. |
12,440 | def manage_all(self, *args, **kwargs):
for site, site_data in self.iter_unique_databases(site=):
if self.verbose:
print(*80, file=sys.stderr)
print(, site, file=sys.stderr)
if self.env.available_sites_by_host:
hostname = self.current_hostname
sites_on_host = self.env.available_sites_by_host.get(hostname, [])
if sites_on_host and site not in sites_on_host:
self.vprint(, site, sites_on_host, file=sys.stderr)
continue
self.manage(*args, **kwargs) | Runs manage() across all unique site default databases. |
12,441 | def get_config_items(self):
return (
(, self.settings),
(, self.context_class),
(, self.interfaces),
(, self.logging),
(, self.name),
(, self.init_handler),
(, self.sigusr1_handler),
(, self.sigusr2_handler),
) | Return current configuration as a :class:`tuple` with
option-value pairs.
::
(('option1', value1), ('option2', value2)) |
12,442 | def _set_authenticate(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="authenticate", rest_name="authenticate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "empty",
: ,
})
self.__authenticate = t
if hasattr(self, ):
self._set() | Setter method for authenticate, mapped from YANG variable /ntp/authenticate (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_authenticate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authenticate() directly. |
12,443 | def urlparts(self):
env = self.environ
http = env.get() or env.get(, )
host = env.get() or env.get()
if not host:
host = env.get(, )
port = env.get()
if port and port != ( if http == else ):
host += + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get(), ) | The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. |
12,444 | def grab_hidden_properties(self):
result = self.__hidden_properties.copy()
self.__hidden_properties.clear()
del self.__hidden_properties
return result | A one-shot access to hidden properties (the field is then destroyed)
:return: A copy of the hidden properties dictionary on the first call
:raise AttributeError: On any call after the first one |
12,445 | def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True):
hashed = hashlib.new(hash_type)
with open(path, "rb") as infile:
buf = infile.read(block_size)
while len(buf) > 0:
hashed.update(buf)
buf = infile.read(block_size)
return hashed.hexdigest() if hex_digest else hashed.digest() | Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash |
12,446 | def text(self, value):
self._text = value
self.timestamps.edited = datetime.datetime.utcnow()
self.touch(True) | Set the text value.
Args:
value (str): Text value. |
12,447 | def __setUpTrakers(self):
for symbol in self.symbols:
self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio) | set symbols |
12,448 | def _POTUpdateBuilder(env, **kw):
import SCons.Action
from SCons.Tool.GettextCommon import _POTargetFactory
kw[] = SCons.Action.Action(_update_pot_file, None)
kw[] =
kw[] = _POTargetFactory(env, alias=).File
kw[] = _pot_update_emitter
return _POTBuilder(**kw) | Creates `POTUpdate` builder object |
12,449 | def api_exception(http_code):
def wrapper(*args):
code = args[0]
ErrorMapping.mapping[http_code] = code
return code
return wrapper | Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function. |
12,450 | def draw_state(ax, p, text=, l=0.5, alignment=, label_displacement=1.0,
fontsize=25, atoms=None, atoms_h=0.125, atoms_size=5, **kwds):
r
ax.plot([p[0]-l/2.0, p[0]+l/2.0], [p[1], p[1]],
color=, **kwds)
if text != :
if alignment == :
ax.text(p[0] - l/2.0 - label_displacement, p[1], text,
horizontalalignment=, verticalalignment=,
color=, fontsize=fontsize)
elif alignment == :
ax.text(p[0] + l/2.0 + label_displacement, p[1], text,
horizontalalignment=, color=,
fontsize=fontsize)
if atoms is not None:
atoms_x = np.linspace(p[0]-l*0.5, p[0]+l*0.5, atoms)
atoms_y = [p[1] + atoms_h for i in range(atoms)]
ax.plot(atoms_x, atoms_y, "ko", ms=atoms_size) | r"""Draw a quantum state for energy level diagrams. |
12,451 | def create_directory(self):
src = self.get_current_path()
name, status = QtWidgets.QInputDialog.getText(
self.tree_view, _(), _(),
QtWidgets.QLineEdit.Normal, )
if status:
fatal_names = [, ]
for i in fatal_names:
if i == name:
QtWidgets.QMessageBox.critical(
self.tree_view, _("Error"), _("Wrong directory name"))
return
if os.path.isfile(src):
src = os.path.dirname(src)
dir_name = os.path.join(src, name)
try:
os.makedirs(dir_name, exist_ok=True)
except OSError as e:
QtWidgets.QMessageBox.warning(
self.tree_view, _(),
_() % (dir_name, str(e))) | Creates a directory under the selected directory (if the selected item
is a file, the parent directory is used). |
12,452 | def run(self, args):
email = args.email
username = args.username
project = self.fetch_project(args, must_exist=True, include_children=False)
user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username)
self.remote_store.revoke_user_project_permission(project, user)
print(u.format(user.full_name, project.name)) | Remove permissions from the user with user_full_name or email on the remote project with project_name.
:param args Namespace arguments parsed from the command line |
12,453 | def allocate_resource_id(self):
self.resource_id_lock.acquire()
try:
i = self.last_resource_id
while i in self.resource_ids:
i = i + 1
if i > self.info.resource_id_mask:
i = 0
if i == self.last_resource_id:
raise error.ResourceIDError()
self.resource_ids[i] = None
self.last_resource_id = i
return self.info.resource_id_base | i
finally:
self.resource_id_lock.release() | id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids. |
12,454 | def batch_taxids(list_of_names):
for name in list_of_names:
handle = Entrez.esearch(db=, term=name, retmode="xml")
records = Entrez.read(handle)
yield records["IdList"][0] | Opposite of batch_taxonomy():
Convert list of Latin names to taxids |
12,455 | def _set_vnetwork(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vnetwork.vnetwork, is_container=, presence=False, yang_name="vnetwork", rest_name="vnetwork", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__vnetwork = t
if hasattr(self, ):
self._set() | Setter method for vnetwork, mapped from YANG variable /show/vnetwork (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vnetwork is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vnetwork() directly.
YANG Description: Shows virtual infrastructure information |
12,456 | def pipe_worker(pipename, filename, object_type, query, format_string, unique=False):
print_notification("[{}] Starting pipe".format(pipename))
object_type = object_type()
try:
while True:
uniq = set()
if os.path.exists(filename):
os.remove(filename)
os.mkfifo(filename)
with open(filename, ) as pipe:
print_success("[{}] Providing data".format(pipename))
objects = object_type.search(**query)
for obj in objects:
data = fmt.format(format_string, **obj.to_dict())
if unique:
if not data in uniq:
uniq.add(data)
pipe.write(data + )
else:
pipe.write(data + )
os.unlink(filename)
except KeyboardInterrupt:
print_notification("[{}] Shutting down named pipe".format(pipename))
except Exception as e:
print_error("[{}] Error: {}, stopping named pipe".format(e, pipename))
finally:
os.remove(filename) | Starts the loop to provide the data from jackal. |
12,457 | def value(self):
if self.filter_.get() is not None:
try:
result = getattr(self.model, self.filter_[])
except AttributeError:
raise InvalidFilters("{} has no attribute {}".format(self.model.__name__, self.filter_[]))
else:
return result
else:
if not in self.filter_:
raise InvalidFilters("Canval'] | Get the value to filter on
:return: the value to filter on |
12,458 | def unpublish(self):
if self.is_draft and self.publishing_linked:
publishing_signals.publishing_pre_unpublish.send(
sender=type(self), instance=self)
type(self.publishing_linked).objects \
.filter(pk=self.publishing_linked.pk) \
.delete()
self.publishing_linked = None
self.publishing_published_at = None
publishing_signals.publishing_unpublish_save_draft.send(
sender=type(self), instance=self)
publishing_signals.publishing_post_unpublish.send(
sender=type(self), instance=self) | Un-publish the current object. |
12,459 | def _set_bfd(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bfd.bfd, is_container=, presence=True, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None, u: u, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__bfd = t
if hasattr(self, ):
self._set() | Setter method for bfd, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd() directly. |
12,460 | def to_iso(dt):
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt) | Format a date or datetime into an ISO-8601 string
Support dates before 1900. |
12,461 | def print_traceback(with_colors=True):
import traceback
stack = traceback.extract_stack()
stack_lines = traceback.format_list(stack)
tbtext = .join(stack_lines)
if with_colors:
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import TerminalFormatter
lexer = get_lexer_by_name(, stripall=True)
formatter = TerminalFormatter(bg=)
formatted_text = highlight(tbtext, lexer, formatter)
print(formatted_text)
except Exception:
print(tbtext)
else:
print(tbtext) | prints current stack |
12,462 | def _draw_footer(self):
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(1, n_cols, self._row, 0)
window.erase()
window.bkgd(str(), self.term.attr())
text = self.FOOTER.strip()
self.term.add_line(window, text, 0, 0)
self._row += 1 | Draw the key binds help bar at the bottom of the screen |
12,463 | def iou(boxes1, boxes2):
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union | Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores. |
12,464 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.warning_id is not None:
_dict[] = self.warning_id
if hasattr(self, ) and self.description is not None:
_dict[] = self.description
return _dict | Return a json dictionary representing this model. |
12,465 | def _get_date_facet_counts(self, timespan, date_field, start_date=None, end_date=None):
if not in timespan:
raise ValueError("At this time, only DAY date range increment is supported. Aborting..... ")
self.log.info("Processing Items from {} to {}".format(start_date, end_date))
source_facet = self._source.query(self._source_coll,
self._get_date_range_query(timespan=timespan, start_date=start_date, end_date=end_date)
).get_facets_ranges()[date_field]
dest_facet = self._dest.query(
self._dest_coll, self._get_date_range_query(
timespan=timespan, start_date=start_date, end_date=end_date
)).get_facets_ranges()[date_field]
return source_facet, dest_facet | Returns Range Facet counts based on |
12,466 | def binning(keys, start, end, count, axes=None):
if isinstance(keys, tuple):
n_keys = len(keys)
else:
n_keys = 1
bins = np.linspace(start, end, count+1, endpoint=True)
idx = np.searchsorted(bins, keys)
if axes is None:
axes = [-1] | Perform binning over the given axes of the keys
Parameters
----------
keys : indexable or tuple of indexable
Examples
--------
binning(np.random.rand(100), 0, 1, 10) |
12,467 | def smart_insert(col, data, minimal_size=5):
if isinstance(data, list):
try:
col.insert(data)
except pymongo.errors.DuplicateKeyError:
n = len(data)
if n >= minimal_size ** 2:
n_chunk = math.floor(math.sqrt(n))
for chunk in grouper_list(data, n_chunk):
smart_insert(col, chunk, minimal_size)
else:
for doc in data:
try:
col.insert(doc)
except pymongo.errors.DuplicateKeyError:
pass
else:
try:
col.insert(data)
except pymongo.errors.DuplicateKeyError:
pass | An optimized Insert strategy.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。 |
12,468 | def play(self, sox_effects=()):
preloader_threads = []
if self.text != "-":
segments = list(self)
preloader_threads = [PreloaderThread(name="PreloaderThread-%u" % (i)) for i in range(PRELOADER_THREAD_COUNT)]
for preloader_thread in preloader_threads:
preloader_thread.segments = segments
preloader_thread.start()
else:
segments = iter(self)
for segment in segments:
segment.play(sox_effects)
if self.text != "-":
for preloader_thread in preloader_threads:
preloader_thread.join() | Play a speech. |
12,469 | def surfplot(self, z, titletext):
if self.latlon:
plt.imshow(z, extent=(0, self.dx*z.shape[0], self.dy*z.shape[1], 0))
plt.xlabel(, fontsize=12, fontweight=)
plt.ylabel(, fontsize=12, fontweight=)
else:
plt.imshow(z, extent=(0, self.dx/1000.*z.shape[0], self.dy/1000.*z.shape[1], 0))
plt.xlabel(, fontsize=12, fontweight=)
plt.ylabel(, fontsize=12, fontweight=)
plt.colorbar()
plt.title(titletext,fontsize=16) | Plot if you want to - for troubleshooting - 1 figure |
12,470 | def update(self, quote_id, product_data, store_view=None):
return bool(
self.call(,
[quote_id, product_data, store_view])
) | Allows you to update one or several products in the shopping cart
(quote).
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, see def add()
:param store_view: Store view ID or code
:return: boolean, True if the product is updated . |
12,471 | def mv_files(src, dst):
files = os.listdir(src)
for file in files:
shutil.move(os.path.join(src, file), os.path.join(dst, file))
return | Move all files from one directory to another
:param str src: Source directory
:param str dst: Destination directory
:return none: |
12,472 | def verify_rank_integrity(self, tax_id, rank, parent_id, children):
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError(.format(rank))
parent_rank = self.rank(parent_id)
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = (
)
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg =
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True | Confirm that for each node the parent ranks and children ranks are
coherent |
12,473 | def _reorder_fields(self, ordering):
if not in ordering:
raise ImproperlyConfigured(
"When using , "
"make sure the field included too to use form. ".format(
self.__class__.__name__
)
)
super(CaptchaFormMixin, self)._reorder_fields(ordering)
if self.is_preview:
self.fields.pop() | Test that the 'captcha' field is really present.
This could be broken by a bad FLUENT_COMMENTS_FIELD_ORDER configuration. |
12,474 | def get_answer_begin_end(data):
s index of begin and end.
passage_tokensanswer_beginanswer_end']
word_begin = get_word_index(tokens, char_begin)
word_end = get_word_index(tokens, char_end)
begin.append(word_begin)
end.append(word_end)
return np.asarray(begin), np.asarray(end) | Get answer's index of begin and end. |
12,475 | def has_activity(graph: BELGraph, node: BaseEntity) -> bool:
return _node_has_modifier(graph, node, ACTIVITY) | Return true if over any of the node's edges, it has a molecular activity. |
12,476 | def multi_split(text, regexes):
def make_regex(s):
return re.compile(s) if isinstance(s, basestring) else s
regexes = [make_regex(r) for r in regexes]
return piece_list | Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first separates the digits from their word
>>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+']))
'one|234|five| |678'
Splitting on words first keeps the word with digits intact.
>>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+']))
'one234five| |678' |
12,477 | def lex(string):
"this is only used by tests"
safe_lexer = LEXER.clone()
safe_lexer.input(string)
a = []
while 1:
t = safe_lexer.token()
if t: a.append(t)
else: break
return a | this is only used by tests |
12,478 | def _apply_criteria(df, criteria, **kwargs):
idxs = []
for var, check in criteria.items():
_df = df[df[] == var]
for group in _df.groupby(META_IDX):
grp_idxs = _check_rows(group[-1], check, **kwargs)
idxs.append(grp_idxs)
df = df.loc[itertools.chain(*idxs)]
return df | Apply criteria individually to every model/scenario instance |
12,479 | def get_result(self):
self._process()
if not self.has_result:
raise NoResult
return self.res_queue.popleft() | raises *NoResult* exception if no result has been set |
12,480 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.entity is not None:
_dict[] = self.entity
if hasattr(self, ) and self.location is not None:
_dict[] = self.location
return _dict | Return a json dictionary representing this model. |
12,481 | def filterMapAttrs(records=getIndex(), **tags):
if len(tags) == 0: return records
ret = []
for record in records:
if matchRecordAttrs(record, tags):
ret.append(record)
return ret | matches available maps if their attributes match as specified |
12,482 | def ObtenerTagXml(self, *tags):
"Busca en el Xml analizado y devuelve el tag solicitado"
try:
if self.xml:
xml = self.xml
for tag in tags:
xml = xml(tag)
return str(xml)
except Exception, e:
self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0] | Busca en el Xml analizado y devuelve el tag solicitado |
12,483 | def search(self, filters=None, start_index=0, limit=100):
assert filters is None or isinstance(filters, Filter),
if filters:
filter_string = filters.search_string()
else:
filter_string =
payload = {
: ,
: filter_string,
: start_index,
: limit
}
response = self.session.post(, data=payload)
json_response = response.json()
if self.session.json_success(json_response):
results = json_response[]
for loan in results[]:
loan[] = int(loan[])
if filters is not None:
filters.validate(results[])
return results
return False | Search for a list of notes that can be invested in.
(similar to searching for notes in the Browse section on the site)
Parameters
----------
filters : lendingclub.filters.*, optional
The filter to use to search for notes. If no filter is passed, a wildcard search
will be performed.
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
Returns
-------
dict
A dictionary object with the list of matching loans under the `loans` key. |
12,484 | def fetch_by_name(self, name):
service = self.collection.find_one({: name})
if not service:
raise ServiceNotFound
return Service(service) | Gets service for given ``name`` from mongodb storage. |
12,485 | def mod_c(self):
r12, r22 = self.z1*self.z1, self.z2*self.z2
r = np.sqrt(r12 + r22)
return r | Complex modulus |
12,486 | def executable(self):
if not hasattr(self.local, ):
self.local.conn = self.engine.connect()
return self.local.conn | Connection against which statements will be executed. |
12,487 | def propagate_name_down(self, col_name, df_name, verbose=False):
if df_name not in self.tables:
table = self.add_magic_table(df_name)[1]
if is_null(table):
return
df = self.tables[df_name].df
if col_name in df.columns:
if all(df[col_name].apply(not_null)):
return df
grandparent_table_name = col_name.split()[0] + "s"
grandparent_name = grandparent_table_name[:-1]
ind = self.ancestry.index(grandparent_table_name) - 1
parent_table_name, parent_name = self.get_table_name(ind)
child_table_name, child_name = self.get_table_name(ind - 1)
bottom_table_name, bottom_name = self.get_table_name(ind - 2)
if child_name not in df.columns:
if bottom_table_name not in self.tables:
result = self.add_magic_table(bottom_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldnt read in {} data".format(child_table_name))
print("-I- Make sure you-W- could not finish propagating names: {} table is missing {} column-W- could not finish propagating names: {} table is missing {} columnt read in {} data".format(parent_table_name))
print("-I- Make sure you-W- could not finish propagating names: {} table is missing {} column-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, grandparent_name)
df = stringify_col(df, parent_name)
df = df.merge(add_df[[grandparent_name]],
left_on=[parent_name],
right_index=True, how="left")
df = stringify_col(df, grandparent_name)
self.tables[df_name].df = df
return df | Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example. |
12,488 | def connect(self):
if JwtBuilder is None:
raise NotConnectedToOpenEdX("This package must be installed in an OpenEdX environment.")
now = int(time())
jwt = JwtBuilder.create_jwt_for_user(self.user)
self.client = EdxRestApiClient(
self.API_BASE_URL, append_slash=self.APPEND_SLASH, jwt=jwt,
)
self.expires_at = now + self.expires_in | Connect to the REST API, authenticating with a JWT for the current user. |
12,489 | def determine_result(self, returncode, returnsignal, output, isTimeout):
join_output = .join(output)
if isTimeout:
return
if returncode == 2:
return
if join_output is None:
return
elif in join_output:
return result.RESULT_TRUE_PROP
elif in join_output:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN | Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.). |
12,490 | def canonical_headers(self, headers_to_sign):
l = [%(n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign]
l.sort()
return .join(l) | Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines. |
12,491 | def _interchange_level_from_filename(fullname):
(name, extension, version) = _split_iso9660_filename(fullname)
interchange_level = 1
if version != b and (int(version) < 1 or int(version) > 32767):
interchange_level = 3
if b in name or b in extension:
interchange_level = 3
if len(name) > 8 or len(extension) > 3:
interchange_level = 3
try:
_check_d1_characters(name)
_check_d1_characters(extension)
except pycdlibexception.PyCdlibInvalidInput:
interchange_level = 3
return interchange_level | A function to determine the ISO interchange level from the filename.
In theory, there are 3 levels, but in practice we only deal with level 1
and level 3.
Parameters:
name - The name to use to determine the interchange level.
Returns:
The interchange level determined from this filename. |
12,492 | def select_whole_line(self, line=None, apply_selection=True):
if line is None:
line = self.current_line_nbr()
return self.select_lines(line, line, apply_selection=apply_selection) | Selects an entire line.
:param line: Line to select. If None, the current line will be selected
:param apply_selection: True to apply selection on the text editor
widget, False to just return the text cursor without setting it
on the editor.
:return: QTextCursor |
12,493 | def get_full_name(src):
if hasattr(src, "_full_name_"):
return src._full_name_
if hasattr(src, "is_decorator"):
if hasattr(src, "decorator"):
_full_name_ = str(src.decorator)
else:
_full_name_ = str(src)
try:
src._full_name_ = _full_name_
except AttributeError:
pass
except TypeError:
pass
elif hasattr(src, "im_class"):
cls = src.im_class
_full_name_ = get_full_name(cls) + "." + src.__name__
elif hasattr(src, "__module__") and hasattr(src, "__name__"):
_full_name_ = (
("<unknown module>" if src.__module__ is None else src.__module__)
+ "."
+ src.__name__
)
try:
src._full_name_ = _full_name_
except AttributeError:
pass
except TypeError:
pass
else:
_full_name_ = str(get_original_fn(src))
return _full_name_ | Gets full class or function name. |
12,494 | def _init_plot_handles(self):
plots = [self.plot]
if self.plot.subplots:
plots += list(self.plot.subplots.values())
handles = {}
for plot in plots:
for k, v in plot.handles.items():
handles[k] = v
self.plot_handles = handles
requested = {}
for h in self.models+self.extra_models:
if h in self.plot_handles:
requested[h] = handles[h]
elif h in self.extra_models:
print("Warning %s could not find the %s model. "
"The corresponding stream may not work."
% (type(self).__name__, h))
self.handle_ids.update(self._get_stream_handle_ids(requested))
return requested | Find all requested plotting handles and cache them along
with the IDs of the models the callbacks will be attached to. |
12,495 | def get_server_premaster_secret(self, password_verifier, server_private, client_public, common_secret):
return pow((client_public * pow(password_verifier, common_secret, self._prime)), server_private, self._prime) | S = (A * v^u) ^ b % N
:param int password_verifier:
:param int server_private:
:param int client_public:
:param int common_secret:
:rtype: int |
12,496 | def symlink_bundles(self, app, bundle_dir):
for bundle_counter, bundle in enumerate(app.bundles):
count = 0
for path, relpath in bundle.filemap.items():
bundle_path = os.path.join(bundle_dir, relpath)
count += 1
if os.path.exists(bundle_path):
continue
if os.path.isfile(path):
safe_mkdir(os.path.dirname(bundle_path))
os.symlink(path, bundle_path)
elif os.path.isdir(path):
safe_mkdir(bundle_path)
if count == 0:
raise TargetDefinitionException(app.target,
.format(bundle_counter)) | For each bundle in the given app, symlinks relevant matched paths.
Validates that at least one path was matched by a bundle. |
12,497 | def validate_ltsv_label(label):
validate_null_string(label, error_msg="label is empty")
match_list = __RE_INVALID_LTSV_LABEL.findall(preprocess(label))
if match_list:
raise InvalidCharError(
"invalid character found for a LTSV format label: {}".format(match_list)
) | Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param str label: Label to validate.
:raises pathvalidate.NullNameError: If the ``label`` is empty.
:raises pathvalidate.InvalidCharError:
If invalid character(s) found in the ``label`` for a LTSV format label. |
12,498 | def set_popup_menu(self, menu):
self.popup_menu = menu
self.in_queue.put(MPImagePopupMenu(menu)) | set a popup menu on the frame |
12,499 | def do_scan_range(self, line):
self.application.master.ScanRange(opendnp3.GroupVariationID(1, 2), 0, 3, opendnp3.TaskConfig().Default()) | Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.