Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
382,700 | def run(self, **kwargs):
logger.info()
try:
with open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, ) as f:
content_list = f.readlines()
if len(content_list) == 0:
ip_count_old = -1
else:
ip_count_old = int(content_list[0])
except IOError:
ip_count_old = -1
logger.info( % {: ip_count_old})
ip_count_now = IP.objects.count()
if ip_count_now == -1 or ip_count_now > ip_count_old + app_settings.IP_ASSEMBLER_IP_CHANGED_THRESHOLD:
logger.info( % {
: ip_count_old,
: ip_count_now
})
UpdateHtaccessLocationsTask().delay()
try:
open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, ).close()
with open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, ) as f:
f.write(str(ip_count_now))
except IOError:
logger.exception( % {: app_settings.IP_ASSEMBLER_IP_CHANGED_FILE})
else:
logger.info() | Does the magic! |
382,701 | def info_post_request(self, node, info):
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent) | Run when a request to create an info is complete. |
382,702 | def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=_CliFormatter)
parser.add_argument(, , action=,
help=)
fb_group = parser.add_argument_group()
fb_group.add_argument(
, , help=(
))
fb_group.add_argument(
, help=)
fb_group.add_argument(
, help=)
fb_group.add_argument(
, help=)
error_group = parser.add_argument_group()
error_group.add_argument(, ,
help=)
error_group.add_argument(,
help=)
error_group.add_argument(,
help=(
))
parser.set_defaults(**_defaults())
return parser.parse_args() | Parse and return command line arguments. |
382,703 | def collect_segment_partitions(self):
from collections import defaultdict
partitions = defaultdict(set)
for p in self.dataset.partitions:
if p.type == p.TYPE.SEGMENT:
name = p.identity.name
name.segment = None
partitions[name].add(p)
return partitions | Return a dict of segments partitions, keyed on the name of the parent partition |
382,704 | def submit(self, spec):
spec = ApplicationSpec._from_any(spec)
resp = self._call(, spec.to_protobuf())
return resp.id | Submit a new skein application.
Parameters
----------
spec : ApplicationSpec, str, or dict
A description of the application to run. Can be an
``ApplicationSpec`` object, a path to a yaml/json file, or a
dictionary description of an application specification.
Returns
-------
app_id : str
The id of the submitted application. |
382,705 | def find_lexer_class_for_filename(_fn, code=None):
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
code = guess_decode(code)
def get_rating(info):
cls, filename = info
bonus = not in filename and 0.5 or 0
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
return matches[-1][0] | Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found. |
382,706 | def create_object_if_not_exists(self, alias, name=None, *args, **kwargs):
if name is None:
raise ValueError("Method requires an object `name`.")
obj_creator = functools.partial(self.create_object,
alias,
name=name,
*args,
**kwargs)
return self._storage.add_if_not_exists(name, obj_creator) | Constructs the type with the given alias using the given args and kwargs.
NB: aliases may be the alias' object type itself if that type is known.
:API: public
:param alias: Either the type alias or the type itself.
:type alias: string|type
:param *args: These pass through to the underlying callable object.
:param **kwargs: These pass through to the underlying callable object.
:returns: The created object, or an existing object with the same `name`. |
382,707 | def _send_msg(self, header, payload):
if self.verbose:
print(, repr(header))
print(, repr(payload))
assert header.payload == len(payload)
try:
sent = self.socket.send(header + payload)
except IOError as err:
raise ConnError(*err.args)
if sent < len(header + payload):
raise ShortWrite(sent, len(header + payload))
assert sent == len(header + payload), sent | send message to server |
382,708 | def get_dev_details(ip_address, auth, url):
get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \
str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false"
f_url = url + get_dev_details_url
r = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if r.status_code == 200:
dev_details = (json.loads(r.text))
if len(dev_details) == 0:
print("Device not found")
return "Device not found"
elif type(dev_details[]) == list:
for i in dev_details[]:
if i[] == ip_address:
dev_details = i
return dev_details
elif type(dev_details[]) == dict:
return dev_details[]
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_details: An Error has occured" | Takes string input of IP address to issue RESTUL call to HP IMC\n
:param ip_address: string object of dotted decimal notation of IPv4 address
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: dictionary of device details
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_1 = get_dev_details('10.101.0.221', auth.creds, auth.url)
>>> assert type(dev_1) is dict
>>> assert 'sysName' in dev_1
>>> dev_2 = get_dev_details('8.8.8.8', auth.creds, auth.url)
Device not found
>>> assert type(dev_2) is str |
382,709 | def delete_user(self, username, params=None):
if username in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument .")
return self.transport.perform_request(
"DELETE", _make_path("_security", "user", username), params=params
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html>`_
:arg username: username
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for' |
382,710 | def parse(self, paramfile):
with open(paramfile, ) as f:
for line in f.readlines():
line_clean = line.rstrip().split()[0]
if line_clean and in line:
attribute, value = line_clean.split()
try:
value_eval = eval(value.strip())
except NameError:
value_eval = value.strip()
finally:
setattr(self, attribute.strip(), value_eval) | Read parameter file and set parameter values.
File should have python-like syntax. Full file name needed. |
382,711 | def write(url, content, **args):
relay = urlparse.urlparse(args.pop(, ))
try:
smtplib_SMTPS = functools.partial(smtplib.SMTP_SSL,
keyfile=args.pop(, None),
certfile=args.pop(, None))
except AttributeError:
def smtplib_SMTPS():
raise ValueError(relay.geturl())
filename = args.pop(, )
content_type, encoding = mimetypes.guess_type(filename)
content_type = args.pop(, content_type)
encoding = args.pop(, encoding)
maintype, subtype = content_type.split()
content = content_types.get(content_types).format(content, **args)
content = content_encodings.get(encoding).encode(content)
message = {
: application.MIMEApplication,
: text.MIMEText}[maintype](content, subtype)
if filename:
message.set_param(, (, , filename.decode()))
if encoding:
message[] = encoding
message[] = urllib.unquote(url.path)
for name, value in urlparse.parse_qsl(url.query):
message[name.replace(, )] = value
if message[] is None:
username = os.environ.get()
username = os.environ.get(, username)
username = os.environ.get(, username)
message[] = .format(username, socket.getfqdn())
client.quit() | Put an object into a ftp URL. |
382,712 | def tap_and_hold(self, xcoord, ycoord):
self._actions.append(lambda: self._driver.execute(
Command.TOUCH_DOWN, {
: int(xcoord),
: int(ycoord)}))
return self | Touch down at given coordinates.
:Args:
- xcoord: X Coordinate to touch down.
- ycoord: Y Coordinate to touch down. |
382,713 | def reddening(self,extval):
T = 10.0**(-0.4*extval*self.obscuration)
ans = ExtinctionSpectralElement(wave=self.wave,
waveunits=self.waveunits,
throughput=T,
name=%(self.name, extval))
ans.citation = self.litref
return ans | Compute the reddening for the given extinction.
.. math::
A(V) = R(V) \\; \\times \\; E(B-V)
\\textnormal{THRU} = 10^{-0.4 \\; A(V)}
.. note::
``self.litref`` is passed into ``ans.citation``.
Parameters
----------
extval : float
Value of :math:`E(B-V)` in magnitudes.
Returns
-------
ans : `~pysynphot.spectrum.ArraySpectralElement`
Extinction curve to apply to a source spectrum. |
382,714 | def _generate_author_query(self, author_name):
name_variations = [name_variation.lower()
for name_variation
in generate_minimal_name_variations(author_name)]
if author_name_contains_fullnames(author_name):
specialized_author_filter = [
{
: {
: [
{
: {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: names_variation[0]}
},
generate_match_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME[],
names_variation[1],
with_operator_and=True
)
]
}
} for names_variation
in product(name_variations, name_variations)
]
else:
specialized_author_filter = [
{: {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: name_variation}}
for name_variation in name_variations
]
query = {
: {
: {
: {
: specialized_author_filter
}
},
: {
: {
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME[]: author_name
}
}
}
}
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | Generates a query handling specifically authors.
Notes:
The match query is generic enough to return many results. Then, using the filter clause we truncate these
so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John`
shouldn't return papers of 'Smith, Bob'.
Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by
requiring that ``full_name`` field contains both |
382,715 | def bitop_or(self, dest, key, *keys):
return self.execute(b, b, dest, key, *keys) | Perform bitwise OR operations between strings. |
382,716 | def remove_sample(self, md5):
record = self.database[self.sample_collection].find_one({: md5})
if not record:
return
print % (record[], record[]/1024.0/1024.0)
self.database[self.sample_collection].remove({: record[]})
self.gridfs_handle.delete(record[])
print % (self.sample_storage_size(), self.samples_cap) | Delete a specific sample |
382,717 | def deleteMapTable(self, name, session):
duplicate_map_tables = session.query(MapTable).filter(MapTable.mapTableFile == self).filter(MapTable.name == name).all()
for duplicate_map_table in duplicate_map_tables:
if duplicate_map_table.indexMap:
session.delete(duplicate_map_table.indexMap)
session.delete(duplicate_map_table)
session.commit() | Remove duplicate map table if it exists |
382,718 | def expect_keyword(parser, value):
token = parser.token
if token.kind == TokenKind.NAME and token.value == value:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u.format(value, get_token_desc(token)),
) | If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False. |
382,719 | def update_service_definitions(self, service_definitions):
content = self._serialize.body(service_definitions, )
self._send(http_method=,
location_id=,
version=,
content=content) | UpdateServiceDefinitions.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v5_0.location.models.VssJsonCollectionWrapper>` service_definitions: |
382,720 | def has_nvme_ssd(system_obj):
storage_value = False
storage_resource = _get_attribute_value_of(system_obj, )
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, , default=False)
return storage_value | Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe. |
382,721 | def _helper(result,
graph,
number_edges_remaining: int,
node_blacklist: Set[BaseEntity],
invert_degrees: Optional[bool] = None,
):
original_node_count = graph.number_of_nodes()
log.debug(, number_edges_remaining)
for _ in range(number_edges_remaining):
source, possible_step_nodes, c = None, set(), 0
while not source or not possible_step_nodes:
source = get_random_node(result, node_blacklist, invert_degrees=invert_degrees)
c += 1
if c >= original_node_count:
log.warning()
log.warning(, source)
log.warning(, node_blacklist)
return
if source is None:
continue
step_node = random.choice(list(possible_step_nodes))
key, attr_dict = random.choice(list(graph[source][step_node].items()))
result.add_edge(source, step_node, key=key, **attr_dict) | Help build a random graph.
:type result: networkx.Graph
:type graph: networkx.Graph |
382,722 | def search(self, args):
kwargs = {}
for a in args:
k, v = a.split()
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs) | Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)... |
382,723 | def process_csxml_file(self, filename, interval=None, lazy=False):
if interval is None:
interval = (None, None)
tmp_fname = tempfile.mktemp(os.path.basename(filename))
fix_character_encoding(filename, tmp_fname)
self.__f = open(tmp_fname, )
self._gen = self._iter_through_csxml_file_from_handle(*interval)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return | Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now. |
382,724 | def report_error(self, line_number, offset, text, check):
if options.quiet == 1 and not self.file_errors:
message(self.filename)
self.file_errors += 1
code = text[:4]
options.counters[code] = options.counters.get(code, 0) + 1
options.messages[code] = text[5:]
if options.quiet:
return
if options.testsuite:
base = os.path.basename(self.filename)[:4]
if base == code:
return
if base[0] == and code[0] == :
return
if ignore_code(code):
return
if options.counters[code] == 1 or options.repeat:
message("%s:%s:%d: %s" %
(self.filename, line_number, offset + 1, text))
if options.show_source:
line = self.lines[line_number - 1]
message(line.rstrip())
message( * offset + )
if options.show_pep8:
message(check.__doc__.lstrip().rstrip()) | Report an error, according to options. |
382,725 | def _prep_ssh(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type=,
kwarg=None,
**kwargs):
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts[] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts[] = [fun] + arg
opts[] = tgt_type
opts[] = tgt
opts[] = arg
return salt.client.ssh.SSH(opts) | Prepare the arguments |
382,726 | def gradient(self):
r
functional = self
class KLGradient(Operator):
def __init__(self):
super(KLGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
if functional.prior is None:
return (-1.0) / x + 1
else:
return (-functional.prior) / x + 1
return KLGradient() | r"""Gradient of the KL functional.
The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given
as
.. math::
\nabla F(x) = 1 - \frac{g}{x}.
The gradient is not defined in points where one or more components
are non-positive. |
382,727 | def __software_to_pkg_id(self, publisher, name, is_component, is_32bit):
if publisher:
pub_lc = publisher.replace(, ).lower()
else:
pub_lc =
if name:
name_lc = name.replace(, ).lower()
else:
name_lc =
if is_component:
soft_type =
else:
soft_type =
if is_32bit:
soft_type +=
default_pkg_id = pub_lc++name_lc++soft_type
if self.__pkg_obj and hasattr(self.__pkg_obj, ):
pkg_id = self.__pkg_obj.to_pkg_id(publisher, name, is_component, is_32bit)
if pkg_id:
return pkg_id
return default_pkg_id | Determine the Package ID of a software/component using the
software/component ``publisher``, ``name``, whether its a software or a
component, and if its 32bit or 64bit archiecture.
Args:
publisher (str): Publisher of the software/component.
name (str): Name of the software.
is_component (bool): True if package is a component.
is_32bit (bool): True if the software/component is 32bit architecture.
Returns:
str: Package Id |
382,728 | def remove_range(self, start, end):
return self._sl.remove_range(
start, end, callback=lambda sc, value: self._dict.pop(value)) | Remove a range by score. |
382,729 | def disassemble(qobj):
run_config = qobj.config.to_dict()
user_qobj_header = qobj.header.to_dict()
circuits = _experiments_to_circuits(qobj)
return circuits, run_config, user_qobj_header | Dissasemble a qobj and return the circuits, run_config, and user header
Args:
qobj (Qobj): The input qobj object to dissasemble
Returns:
circuits (list): A list of quantum circuits
run_config (dict): The dist of the run config
user_qobj_header (dict): The dict of any user headers in the qobj |
382,730 | def t_INDENTIFIER(t):
r
if t.value in reserved:
t.type = t.value.upper()
if t.value in reservedMap:
t.value = reservedMap[t.value]
elif t.value in strStatment:
t.type =
return t | r'(\$?[_a-zA-Z][_a-zA-Z0-9]*)|(__[A-Z_]+__) |
382,731 | def updateAnomalyLikelihoods(anomalyScores,
params,
verbosity=0):
if verbosity > 3:
print("In updateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
print("Params:", params)
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
if not isValidEstimatorParams(params):
raise ValueError(" is not a valid params structure")
if "historicalLikelihoods" not in params:
params["historicalLikelihoods"] = [1.0]
historicalValues = params["movingAverage"]["historicalValues"]
total = params["movingAverage"]["total"]
windowSize = params["movingAverage"]["windowSize"]
aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)
likelihoods = numpy.zeros(len(anomalyScores), dtype=float)
for i, v in enumerate(anomalyScores):
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, v[2], windowSize)
)
aggRecordList[i] = newAverage
likelihoods[i] = tailProbability(newAverage, params["distribution"])
likelihoods2 = params["historicalLikelihoods"] + list(likelihoods)
filteredLikelihoods = _filterLikelihoods(likelihoods2)
likelihoods[:] = filteredLikelihoods[-len(likelihoods):]
historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]
newParams = {
"distribution": params["distribution"],
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": windowSize,
},
"historicalLikelihoods": historicalLikelihoods,
}
assert len(newParams["historicalLikelihoods"]) <= windowSize
if verbosity > 3:
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))])
print("Leaving updateAnomalyLikelihoods.")
return (likelihoods, aggRecordList, newParams) | Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric. |
382,732 | def select_many(self, *args):
s = apply_query_operators(self.storage, args)
if isinstance(s, QuerySet):
return s
else:
return QuerySet(s) | Select several instances from the instance pool. Query operators such as
where_eq(), order_by() or filter functions may be passed as optional
arguments. |
382,733 | def generate_kmers(seq, k=4):
if isinstance(seq, basestring):
for i in range(len(seq) - k + 1):
yield seq[i:i + k]
elif isinstance(seq, (int, float, Decimal)):
for s in generate_kmers(str(seq)):
yield s
else:
for s in seq:
yield generate_kmers(s, k) | Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string
Not effiicent for large k and long strings.
Doesn't form substrings that are shorter than k, only exactly k-mers
Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing.
jellyfish is a C implementation of k-mer counting
If seq is a string generate a sequence of k-mer string
If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings
If seq is a sequence of sequences of strings generate a sequence of sequence of generators ...
Default k = 4 because that's the length of a gene base-pair?
>>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC'))
'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC' |
382,734 | def get_locations(self, url):
if not is_valid_url(url):
raise InvalidURLError(.format(url))
try:
response = self.session.head(url)
except (ConnectionError, InvalidSchema, Timeout):
raise StopIteration
try:
generator = self.session.resolve_redirects(
response,
response.request
)
for response in generator:
yield response.url
except InvalidURL:
pass
except (ConnectionError, InvalidSchema, Timeout) as error:
last_url = response.headers[]
if isinstance(error, Timeout) or is_valid_url(last_url):
yield last_url | Get valid location header values from responses.
:param url: a URL address. If a HEAD request sent to it
fails because the address has invalid schema, times out
or there is a connection error, the generator yields nothing.
:returns: valid redirection addresses. If a request for
a redirection address fails, and the address is still a valid
URL string, it's included as the last yielded value. If it's
not, the previous value is the last one.
:raises ValuError: if the argument is not a valid URL |
382,735 | def std(self):
std_expr = grizzly_impl.groupby_std(
[self.column],
[self.column_type],
self.grouping_columns,
self.grouping_column_types
)
unzipped_columns = grizzly_impl.unzip_columns(
std_expr,
self.grouping_column_types + [WeldDouble()],
)
index_expr = LazyOpResult(
grizzly_impl.get_field(unzipped_columns, 0),
self.grouping_column_types[0],
1
)
column_expr = LazyOpResult(
grizzly_impl.get_field(unzipped_columns, 1),
self.grouping_column_types[0],
1
)
group_expr = utils.group([index_expr, column_expr])
return SeriesWeld(
group_expr.expr,
WeldDouble(),
index_type=self.grouping_column_types[0],
index_name=self.grouping_column_names[0]
) | Standard deviation
Note that is by default normalizd by n - 1
# TODO, what does pandas do for multiple grouping columns?
# Currently we are just going to use one grouping column |
382,736 | def evaluate_and_log_bleu(estimator, bleu_writer, bleu_source, bleu_ref):
subtokenizer = tokenizer.Subtokenizer(
os.path.join(FLAGS.data_dir, FLAGS.vocab_file))
uncased_score, cased_score = translate_and_compute_bleu(
estimator, subtokenizer, bleu_source, bleu_ref)
print("Bleu score (uncased):", uncased_score)
print("Bleu score (cased):", cased_score)
summary = tf.Summary(value=[
tf.Summary.Value(tag="bleu/uncased", simple_value=uncased_score),
tf.Summary.Value(tag="bleu/cased", simple_value=cased_score),
])
bleu_writer.add_summary(summary, get_global_step(estimator))
bleu_writer.flush()
return uncased_score, cased_score | Calculate and record the BLEU score. |
382,737 | def add_project_name_or_id_arg(arg_parser, required=True, help_text_suffix="manage"):
project_name_or_id = arg_parser.add_mutually_exclusive_group(required=required)
name_help_text = "Name of the project to {}.".format(help_text_suffix)
add_project_name_arg(project_name_or_id, required=False, help_text=name_help_text)
id_help_text = "ID of the project to {}.".format(help_text_suffix)
add_project_id_arg(project_name_or_id, required=False, help_text=id_help_text) | Adds project name or project id argument. These two are mutually exclusive.
:param arg_parser:
:param required:
:param help_text:
:return: |
382,738 | def _parse_jetconfig(self):
conf = env(, None)
if not conf:
return
import urlparse
auth = None
port = None
conf = conf.split().pop()
entry = urlparse.urlparse(conf)
scheme = entry.scheme
host = entry.netloc or entry.path | Undocumented cross-compatability functionality with jetconfig
(https://github.com/shakefu/jetconfig) that is very sloppy. |
382,739 | def clear_samples(self):
self._lastclear = self.niterations
self._itercounter = 0
self._sampler.reset() | Clears the chain and blobs from memory. |
382,740 | def jitter_run(res, rstate=None, approx=False):
if rstate is None:
rstate = np.random
nsamps, samples_n = _get_nsamps_samples_n(res)
logl = res.logl
nunif = len(nlive_start)
for i in range(nunif):
nstart = nlive_start[i]
bound = bounds[i]
sn = samples_n[bound[0]:bound[1]]
y_arr = rstate.exponential(scale=1.0, size=nstart+1)
ycsum = y_arr.cumsum()
ycsum /= ycsum[-1]
uorder = ycsum[np.append(nstart, sn-1)]
rorder = uorder[1:] / uorder[:-1]
t_arr[bound[0]:bound[1]] = rorder
logvol = np.log(t_arr).cumsum()
h = 0.
logz = -1.e300
loglstar = -1.e300
logzvar = 0.
logvols_pad = np.concatenate(([0.], logvol))
logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(nsamps),
-np.ones(nsamps)])
logdvols += math.log(0.5)
dlvs = -np.diff(np.append(0., res.logvol))
saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], []
for i in range(nsamps):
loglstar_new = logl[i]
logdvol, dlv = logdvols[i], dlvs[i]
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
loglstar = loglstar_new
saved_logwt.append(logwt)
saved_logz.append(logz)
saved_logzvar.append(logzvar)
saved_h.append(h)
new_res = Results([item for item in res.items()])
new_res.logvol = np.array(logvol)
new_res.logwt = np.array(saved_logwt)
new_res.logz = np.array(saved_logz)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
new_res.logzerr = np.sqrt(np.array(saved_logzvar))
new_res.h = np.array(saved_h)
return new_res | Probes **statistical uncertainties** on a nested sampling run by
explicitly generating a *realization* of the prior volume associated
with each sample (dead point). Companion function to :meth:`resample_run`
and :meth:`simulate_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our "jittered" prior volume realizations. |
382,741 | def create_vpc(self):
self.create_stack(
self.vpc_name,
,
parameters=define_parameters(
VpcBlock="10.42.0.0/16",
Subnet01Block="10.42.1.0/24",
Subnet02Block="10.42.2.0/24",
Subnet03Block="10.42.3.0/24"
)
) | Create a virtual private cloud on Amazon's Web services configured
for deploying JupyterHubs. |
382,742 | def resizeEvent(self, event):
self.resized.emit()
return super(MetadataConverterDialog, self).resizeEvent(event) | Emit custom signal when the window is re-sized.
:param event: The re-sized event.
:type event: QResizeEvent |
382,743 | def _tp__get_typed_properties(self):
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError | Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta. |
382,744 | def save(evt, designer):
"Basic save functionality: just replaces the gui code"
ok = gui.confirm("Save the changes?", "GUI2PY Designer",
cancel=True, default=True)
if ok:
wx_obj = evt.GetEventObject()
w = wx_obj.obj
try:
if DEBUG: print "saving..."
fin = open(designer.filename, "r")
fout = open(designer.filename + ".bak", "w")
fout.write(fin.read())
fout.close()
fin.close()
if designer.filename.endswith(".rsrc.py"):
gui.save(designer.filename, [gui.dump(w)])
else:
fin = open(designer.filename + ".bak", "r")
fout = open(designer.filename, "w")
copy = True
newlines = fin.newlines or "\n"
def dump(obj, indent=1):
"recursive convert object to string"
for ctl in obj[:]:
write(ctl, indent)
def write(ctl, indent):
if ctl[:]:
fout.write(" " * indent * 4)
fout.write("with %s:" % ctl.__repr__(parent=None, indent=indent, context=True))
fout.write(newlines)
dump(ctl, indent + 1)
else:
fout.write(" " * indent * 4)
fout.write(ctl.__repr__(parent=None, indent=indent))
fout.write(newlines)
dumped = False
for line in fin:
if line.startswith("
fout.write(line)
fout.write(newlines)
write(w, indent=0)
fout.write(newlines)
dumped = True
copy = False
if line.startswith("
copy = True
if copy:
fout.write(line)
if not dumped:
gui.alert("No valid
"Unable to write down design code!",
"Design not updated:")
fout.close()
fin.close()
except Exception, e:
import traceback
print(traceback.print_exc())
ok = gui.confirm("Close anyway?\n%s" % str(e), ,
ok=True, cancel=True)
if ok is not None:
wx.CallAfter(exit)
return ok | Basic save functionality: just replaces the gui code |
382,745 | def get_items(self, container_id, scope=None, item_path=None, metadata=None, format=None, download_file_name=None, include_download_tickets=None, is_shallow=None):
route_values = {}
if container_id is not None:
route_values[] = self._serialize.url(, container_id, )
query_parameters = {}
if scope is not None:
query_parameters[] = self._serialize.query(, scope, )
if item_path is not None:
query_parameters[] = self._serialize.query(, item_path, )
if metadata is not None:
query_parameters[] = self._serialize.query(, metadata, )
if format is not None:
query_parameters[] = self._serialize.query(, format, )
if download_file_name is not None:
query_parameters[] = self._serialize.query(, download_file_name, )
if include_download_tickets is not None:
query_parameters[] = self._serialize.query(, include_download_tickets, )
if is_shallow is not None:
query_parameters[] = self._serialize.query(, is_shallow, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, self._unwrap_collection(response)) | GetItems.
[Preview API]
:param long container_id:
:param str scope:
:param str item_path:
:param bool metadata:
:param str format:
:param str download_file_name:
:param bool include_download_tickets:
:param bool is_shallow:
:rtype: [FileContainerItem] |
382,746 | def pip_command_output(pip_args):
import sys
import pip
from io import StringIO
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
pip.main(pip_args)
output = mystdout.getvalue()
mystdout.truncate(0)
sys.stdout = old_stdout
return output | Get output (as a string) from pip command
:param pip_args: list o pip switches to pass
:return: string with results |
382,747 | def strip_praw_submission(cls, sub):
reddit_link = re.compile(
r)
author = getattr(sub, , )
name = getattr(author, , )
flair = getattr(sub, , )
data = {}
data[] = sub
data[] =
data[] = sub.title
data[] = sub.selftext
data[] = sub.selftext_html or
data[] = cls.humanize_timestamp(sub.created_utc)
data[] = cls.humanize_timestamp(sub.created_utc, True)
data[] = .format(sub.num_comments)
data[] = .format( if sub.hide_score else sub.score)
data[] = name
data[] = sub.permalink
data[] = six.text_type(sub.subreddit)
data[] = .format(flair.strip()) if flair else
data[] = sub.url
data[] = sub.likes
data[] = sub.gilded
data[] = sub.over_18
data[] = sub.stickied
data[] = sub.hidden
data[] = None
data[] = None
data[] = sub.saved
if sub.edited:
data[] = .format(
cls.humanize_timestamp(sub.edited))
data[] = .format(
cls.humanize_timestamp(sub.edited, True))
else:
data[] =
data[] =
if sub.url.split()[-1] == sub.permalink.split()[-1]:
data[] = .format(data[])
data[] =
elif reddit_link.match(sub.url):
url_parts = sub.url.split()
data[] = url_parts[4]
data[] = .format(url_parts[4])
if in url_parts:
data[] =
else:
data[] =
else:
data[] = sub.url
data[] =
return data | Parse through a submission and return a dict with data ready to be
displayed through the terminal.
Definitions:
permalink - URL to the reddit page with submission comments.
url_full - URL that the submission points to.
url - URL that will be displayed on the subreddit page, may be
"selfpost", "x-post submission", "x-post subreddit", or an
external link. |
382,748 | def replace(self, year=None, week=None):
return self.__class__(self.year if year is None else year,
self.week if week is None else week) | Return a Week with either the year or week attribute value replaced |
382,749 | def shadow_calc(data):
up_shadow = abs(data.high - (max(data.open, data.close)))
down_shadow = abs(data.low - (min(data.open, data.close)))
entity = abs(data.open - data.close)
towards = True if data.open < data.close else False
print( * 15)
print(.format(up_shadow))
print(.format(down_shadow))
print(.format(entity))
print(.format(towards))
return up_shadow, down_shadow, entity, data.date, data.code | 计算上下影线
Arguments:
data {DataStruct.slice} -- 输入的是一个行情切片
Returns:
up_shadow {float} -- 上影线
down_shdow {float} -- 下影线
entity {float} -- 实体部分
date {str} -- 时间
code {str} -- 代码 |
382,750 | def not_(self, value, name=):
if isinstance(value.type, types.VectorType):
rhs = values.Constant(value.type, (-1,) * value.type.count)
else:
rhs = values.Constant(value.type, -1)
return self.xor(value, rhs, name=name) | Bitwise integer complement:
name = ~value |
382,751 | def _load(self, url, verbose):
msg = u"_load url: %s" % url
self._last_query_str = url
log.debug(msg)
if verbose:
print msg
response = self.__api__.request(url)
return response | Execute a request against the Salesking API to fetch the items
:param url: url to fetch
:return response
:raises SaleskingException with the corresponding http errors |
382,752 | def value_ranges(self, value_ranges):
intfloat
self._value_ranges = value_ranges
self._logger.log(, .format(
value_ranges
)) | Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val" |
382,753 | def stretch_cv(x,sr,sc,interpolation=cv2.INTER_AREA):
if sr==0 and sc==0: return x
r,c,*_ = x.shape
x = cv2.resize(x, None, fx=sr+1, fy=sc+1, interpolation=interpolation)
nr,nc,*_ = x.shape
cr = (nr-r)//2; cc = (nc-c)//2
return x[cr:r+cr, cc:c+cc] | Stretches image x horizontally by sr+1, and vertically by sc+1 while retaining the original image size and proportion. |
382,754 | def setConnStringForWindows():
global _dbConnectString
from peek_platform.file_config.PeekFileConfigABC import PeekFileConfigABC
from peek_platform.file_config.PeekFileConfigSqlAlchemyMixin import \
PeekFileConfigSqlAlchemyMixin
from peek_platform import PeekPlatformConfig
class _WorkerTaskConfigMixin(PeekFileConfigABC,
PeekFileConfigSqlAlchemyMixin):
pass
PeekPlatformConfig.componentName = peekWorkerName
_dbConnectString = _WorkerTaskConfigMixin().dbConnectString | Set Conn String for Windiws
Windows has a different way of forking processes, which causes the
@worker_process_init.connect signal not to work in "CeleryDbConnInit" |
382,755 | def list_jobs(self, argument_filters=None):
.functionmust_containdebugdebug.id.function.name.dt.interval.month.day.weekday.hour.minute.second.start_date.end_date.id.function.name.dtstartend.interval.month.day.weekday.hour.minute.second
title = % self.__class__.__name__
if argument_filters:
self.fields.validate(argument_filters, )
url = % self.url
job_list = self._get_request(url)
def query_function(**kwargs):
job_details = {}
for key, value in kwargs.items():
if key in self.job_model.schema.keys():
job_details[key] = value
for query_criteria in argument_filters:
if self.job_model.query(query_criteria, job_details):
return True
return False
results_list = []
for job in job_list:
job_details = self._construct_details(job)
if argument_filters:
if query_function(**job_details):
results_list.append(job_details)
else:
results_list.append(job_details)
return results_list | a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0 |
382,756 | def recover_devices(cls):
if "_devices" in globals():
return
global _devices
confs_dir = os.path.abspath(os.path.normpath(cfg.CONF.dhcp_confs))
for netid in os.listdir(confs_dir):
conf_dir = os.path.join(confs_dir, netid)
intf_filename = os.path.join(conf_dir, )
try:
with open(intf_filename, ) as f:
ifname = f.read()
_devices[netid] = ifname
except IOError:
LOG.error(,
intf_filename)
LOG.debug("Recovered device %s for network %s'",
ifname, netid) | Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system. |
382,757 | def get_profile(A):
"Fail-soft profile getter; if no profile is present assume none and quietly ignore."
try:
with open(os.path.expanduser(A.profile)) as I:
profile = json.load(I)
return profile
except:
return {} | Fail-soft profile getter; if no profile is present assume none and quietly ignore. |
382,758 | def badge_label(self, badge):
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] | Display the badge label for a given kind |
382,759 | def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):
with open(file_path, ) as f:
data = f.read()
content_type, content_encoding = mimetypes.guess_type(file_path)
headers = {
: project_id,
: API_VERSION,
: acl,
: % len(data)
}
if content_type: headers[] = content_type
if content_type: headers[] = content_encoding
try:
response, content = auth_http.request(
% (bucket_name, object_name),
method=,
headers=headers,
body=data)
except httplib2.ServerNotFoundError, se:
raise Error(404, )
if response.status >= 300:
raise Error(response.status, response.reason)
return content | Uploads a file to Google Cloud Storage.
Args:
auth_http: An authorized httplib2.Http instance.
project_id: The project to upload to.
bucket_name: The bucket to upload to.
file_path: Path to the file to upload.
object_name: The name within the bucket to upload to.
acl: The ACL to assign to the uploaded file. |
382,760 | def expand_tpm(tpm):
unconstrained = np.ones([2] * (tpm.ndim - 1) + [tpm.shape[-1]])
return tpm * unconstrained | Broadcast a state-by-node TPM so that singleton dimensions are expanded
over the full network. |
382,761 | def _finalize_step(self):
t = time.time()
if self._callback is not None:
self._callback(self.age)
t2 = time.time()
self._step_processing_time += t2 - t
self._log(logging.INFO, "Step {} run in: {:.3f}s ({:.3f}s of "
"actual processing time used)"
.format(self.age, self._step_processing_time,
t2 - self._step_start_time))
self._processing_time += self._step_processing_time | Finalize simulation step after all agents have acted for the current
step. |
382,762 | def apply_gemm(scope, input_name, output_name, container, operator_name=None, alpha=1.0, beta=1.0,
transA=0, transB=0):
name = _create_name_or_use_existing_one(scope, , operator_name)
attrs = {: alpha, : beta, : transA, : transB}
if container.target_opset < 5:
attrs[] = 1
attrs[] = 1
elif container.target_opset < 7:
attrs[] = 6
attrs[] = 1
else:
attrs[] = 7
container.add_node(, input_name, output_name, name=name, **attrs) | Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`. |
382,763 | def terminate_process(self, idf):
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None | Terminate a process by id |
382,764 | def qsnorm(p):
d = p
if d < 0. or d > 1.:
print()
sys.exit()
x = 0.
if (d - 0.5) > 0:
d = 1. - d
if (d - 0.5) < 0:
t2 = -2. * np.log(d)
t = np.sqrt(t2)
x = t - old_div((2.515517 + .802853 * t + .010328 * t2),
(1. + 1.432788 * t + .189269 * t2 + .001308 * t * t2))
if p < 0.5:
x = -x
return x | rational approximation for x where q(x)=d, q being the cumulative
normal distribution function. taken from Abramowitz & Stegun p. 933
|error(x)| < 4.5*10**-4 |
382,765 | def CountFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
with_type=None):
return len(
self.ReadFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
0,
sys.maxsize,
with_type=with_type)) | Returns number of flow output plugin log entries of a given flow. |
382,766 | def _is_valid_duration(self, inpt, metadata):
from dlkit.abstract_osid.calendaring.primitives import Duration as abc_duration
if isinstance(inpt, abc_duration):
return True
else:
return False | Checks if input is a valid Duration |
382,767 | def md5(self, raw_output=False):
res = hashlib.md5(str(self.generator.random.random()).encode())
if raw_output:
return res.digest()
return res.hexdigest() | Calculates the md5 hash of a given string
:example 'cfcd208495d565ef66e7dff9f98764da' |
382,768 | def get_mcu_definition(self, project_file):
project_file = join(getcwd(), project_file)
uvproj_dic = xmltodict.parse(file(project_file), dict_constructor=dict)
mcu = MCU_TEMPLATE
try:
mcu[] = {
: {
: {
: [uvproj_dic[][][][][][]],
: [None if not uvproj_dic[][][][][][] else
int(uvproj_dic[][][][][][])],
: [uvproj_dic[][][][][][]],
: [uvproj_dic[][][][][][]],
: [uvproj_dic[][][][][][]],
: [uvproj_dic[][][][][][]],
: [uvproj_dic[][][][][][]],
}
}
}
except KeyError:
logging.debug("The project_file %s seems to be not valid .uvproj file.")
return mcu
return mcu | Parse project file to get mcu definition |
382,769 | def get_all_chats(self):
chats = self.wapi_functions.getAllChats()
if chats:
return [factory_chat(chat, self) for chat in chats]
else:
return [] | Fetches all chats
:return: List of chats
:rtype: list[Chat] |
382,770 | def notices(self):
return [self._db.notices.pop()[8:].strip() for x in range(len(self._db.notices))] | pops and returns all notices
http://initd.org/psycopg/docs/connection.html#connection.notices |
382,771 | def get_config_directory():
from .commands.stacker import Stacker
command = Stacker()
namespace = command.parse_args()
return os.path.dirname(namespace.config.name) | Return the directory the config file is located in.
This enables us to use relative paths in config values. |
382,772 | def simple_generate_batch(cls, create, size, **kwargs):
strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY
return cls.generate_batch(strategy, size, **kwargs) | Generate a batch of instances.
These instances will be either 'built' or 'created'.
Args:
size (int): the number of instances to generate
create (bool): whether to 'build' or 'create' the instances.
Returns:
object list: the generated instances |
382,773 | def Nu_Mokry(Re, Pr, rho_w=None, rho_b=None):
r
Nu = 0.0061*Re**0.904*Pr**0.684
if rho_w and rho_b:
Nu *= (rho_w/rho_b)**0.564
return Nu | r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
and reviewed in [2]_.
.. math::
Nu_b = 0.0061 Re_b^{0.904} \bar{Pr}_b^{0.684}
\left(\frac{\rho_w}{\rho_b}\right)^{0.564}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P was set at 20 MPa, and D
was 10 mm. G varied from 200-1500 kg/m^2/s and q varied from 0 to 1250
kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the four most accurate correlation
in [2]_ with a MAD of 24.0%. It was also the 7th most accurate against
enhanced heat transfer, with a MAD of 14.7%, and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Mokry(1E5, 1.2, 330, 290.)
246.1156319156992
References
----------
.. [1] Mokry, Sarah, Igor Pioro, Amjad Farah, Krysten King, Sahil Gupta,
Wargha Peiman, and Pavel Kirillov. "Development of Supercritical Water
Heat-Transfer Correlation for Vertical Bare Tubes." Nuclear Engineering
and Design, International Conference on Nuclear Energy for New Europe
2009, 241, no. 4 (April 2011): 1126-36.
doi:10.1016/j.nucengdes.2010.06.012.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027. |
382,774 | def sample(self, nsims=1000):
if self.latent_variables.estimation_method not in [, ]:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
sigmas = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
data_draws = np.array([ss.t.rvs(loc=self.latent_variables.z_list[-1].prior.transform(lv_draws[-1,i]),
df=self.latent_variables.z_list[-2].prior.transform(lv_draws[-2,i]), scale=np.exp(sigmas[i]/2.0)) for i in range(nsims)])
return data_draws | Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data |
382,775 | def ASRS(self, params):
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_COMMA_SEPARATED, params)
except iarm.exceptions.ParsingError:
Rb, Rc = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
Ra = Rb
if self.is_register(Rc):
self.check_arguments(low_registers=(Ra, Rc))
self.match_first_two_parameters(Ra, Rb)
def ASRS_func():
if (self.register[Rc] > 0) and (self.register[Rb] & (1 << (self.register[Rc] - 1))):
self.set_APSR_flag_to_value(, 1)
else:
self.set_APSR_flag_to_value(, 0)
if self.register[Ra] & (1 << (self._bit_width - 1)):
self.register[Ra] = (self.register[Ra] >> self.register[Rc]) | (
int( * self.register[Rc], 2) << (self._bit_width - self.register[Rc]))
else:
self.register[Ra] = self.register[Ra] >> self.register[Rc]
self.set_NZ_flags(self.register[Ra])
else:
self.check_arguments(low_registers=(Ra, Rb), imm5_counting=(Rc,))
shift_amount = self.check_immediate(Rc)
def ASRS_func():
if self.register[Rb] & (1 << (shift_amount - 1)):
self.set_APSR_flag_to_value(, 1)
else:
self.set_APSR_flag_to_value(, 0)
if self.register[Ra] & (1 << (self._bit_width - 1)):
self.register[Ra] = (self.register[Ra] >> shift_amount) | (
int( * shift_amount, 2) << (self._bit_width - shift_amount))
else:
self.register[Ra] = self.register[Rb] >> shift_amount
self.set_NZ_flags(self.register[Ra])
return ASRS_func | ASRS [Ra,] Ra, Rc
ASRS [Ra,] Rb, #imm5_counting
Arithmetic shift right Rb by Rc or imm5_counting and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb |
382,776 | def compile(self):
from ..engines import get_default_engine
engine = get_default_engine(self)
return engine.compile(self) | Compile this expression into an ODPS SQL
:return: compiled DAG
:rtype: str |
382,777 | def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000 * 1000,
optimism=2):
return ((choices ** length) / (speed * optimism)) | How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit. |
382,778 | def get_as_string(self, key):
value = self.get(key)
return StringConverter.to_string(value) | Converts map element into a string or returns "" if conversion is not possible.
:param key: an index of element to get.
:return: string value ot the element or "" if conversion is not supported. |
382,779 | def missing_particle(separation=0.0, radius=RADIUS, SNR=20):
s = init.create_two_particle_state(imsize=6*radius+4, axis=, sigma=1.0/SNR,
delta=separation, radius=radius, stateargs={: True}, psfargs={: 1e-6})
s.obj.typ[1] = 0.
s.reset()
return s, s.obj.pos.copy() | create a two particle state and compare it to featuring using a single particle guess |
382,780 | def get_group_member_profile(self, group_id, user_id, timeout=None):
response = self._get(
.format(group_id=group_id, user_id=user_id),
timeout=timeout
)
return Profile.new_from_json_dict(response.json) | Call get group member profile API.
https://devdocs.line.me/en/#get-group-room-member-profile
Gets the user profile of a member of a group that
the bot is in. This can be the user ID of a user who has
not added the bot as a friend or has blocked the bot.
:param str group_id: Group ID
:param str user_id: User ID
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Profile`
:return: Profile instance |
382,781 | def locate(self, pattern):
top_matches = self.top.locate(pattern)
bottom_matches = self.bottom.locate(pattern)
return [top_matches, bottom_matches] | Find sequences matching a pattern. For a circular sequence, the
search extends over the origin.
:param pattern: str or NucleicAcidSequence for which to find matches.
:type pattern: str or coral.DNA
:returns: A list of top and bottom strand indices of matches.
:rtype: list of lists of indices (ints)
:raises: ValueError if the pattern is longer than either the input
sequence (for linear DNA) or twice as long as the input
sequence (for circular DNA). |
382,782 | def _hmmalign(self, input_path, directions, pipeline,
forward_reads_output_path, reverse_reads_output_path):
if pipeline == PIPELINE_AA:
reverse_direction_reads_present=False
else:
reverse_direction_reads_present=False in directions.values()
with tempfile.NamedTemporaryFile(prefix=, suffix=) as for_file_fh:
for_file = for_file_fh.name
with tempfile.NamedTemporaryFile(prefix=, suffix=) as rev_file_fh:
rev_file = rev_file_fh.name
if reverse_direction_reads_present:
reverse = []
forward = []
records = list(SeqIO.parse(open(input_path), ))
for record in records:
read_id = record.id
if directions[read_id] == True:
forward.append(record)
elif directions[read_id] == False:
reverse.append(record)
else:
raise Exception(logging.error())
exit(1)
logging.debug("Found %i forward direction reads" % len(forward))
logging.debug("Found %i reverse direction reads" % len(reverse))
with open(for_file, ) as for_aln:
logging.debug("Writing forward direction reads to %s" % for_file)
for record in forward:
for_aln.write( + record.id + )
for_aln.write(str(record.seq) + )
if any(forward):
self.hmmalign_sequences(self.aln_hmm, for_file, forward_reads_output_path)
else:
cmd = % (forward_reads_output_path)
extern.run(cmd)
with open(rev_file, ) as rev_aln:
logging.debug("Writing reverse direction reads to %s" % rev_file)
for record in reverse:
if record.id and record.seq:
rev_aln.write( + record.id + )
rev_aln.write(str(record.seq.reverse_complement()) + )
self.hmmalign_sequences(self.aln_hmm, rev_file, reverse_reads_output_path)
conv_files = [forward_reads_output_path, reverse_reads_output_path]
return conv_files
else:
self.hmmalign_sequences(self.aln_hmm, input_path, forward_reads_output_path)
conv_files = [forward_reads_output_path]
return conv_files | Align reads to the aln_hmm. Receives unaligned sequences and
aligns them.
Parameters
----------
input_path : str
Filename of unaligned hits to be aligned
directions : dict
dictionary containing read names as keys, and complement
as the entry (True=Forward, False=Reverse)
pipeline: str
either PIPELINE_AA = "P" or PIPELINE_NT = "D"
forward_reads_output_fh: str
Where to write aligned forward reads
reverse_reads_output_fh: str
Where to write aligned reverse reads
Returns
-------
Nothing. |
382,783 | def get_stp_mst_detail_output_msti_instance_id(self, **kwargs):
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id = ET.SubElement(msti, "instance-id")
instance_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
382,784 | def get_bounds(self, bin_num):
min_value = pow(2.0, float(bin_num) / 2.0) * self.min_value
max_value = pow(2.0, float(bin_num + 1.0) / 2.0) * self.min_value
return self.Bounds(min_value, max_value) | Get the bonds of a bin, given its index `bin_num`.
:returns: a `Bounds` namedtuple with properties min and max
respectively. |
382,785 | def sphgen(self, force_rerun=False):
log.debug(.format(self.id))
if not self.dms_path:
return ValueError()
sph = op.join(self.dock_dir, .format(self.id))
insph = op.join(self.dock_dir, )
if ssbio.utils.force_rerun(flag=force_rerun, outfile=sph):
with open(insph, "w") as f:
f.write("{}\n".format(self.dms_path))
f.write("R\n")
f.write("X\n")
f.write("0.0\n")
f.write("4.0\n")
f.write("1.4\n")
f.write("{}\n".format(sph))
os.chdir(self.dock_dir)
cmd = "sphgen_cpp"
os.system(cmd)
os.remove(insph)
if ssbio.utils.is_non_zero_file(sph):
self.sphgen_path = sph
log.debug(.format(self.sphgen_path))
else:
log.critical(.format(self.dms_path)) | Create sphere representation (sph file) of receptor from the surface representation
Args:
force_rerun (bool): If method should be rerun even if output file exists |
382,786 | def generate_config_parser(config, include_all=False):
config_parser = SafeConfigParser(allow_no_value=True)
for section_name, option_name in _get_included_schema_sections_options(config, include_all):
if not config_parser.has_section(section_name):
config_parser.add_section(section_name)
option = config[section_name][option_name]
if option.get():
config_parser.set(section_name, )
config_parser.set(section_name, + option.get(, ))
if option.get():
config_parser.set(section_name, )
option_value = _get_value(option)
config_parser.set(section_name, option_name, option_value)
config_parser.set(section_name, )
return config_parser | Generates a config parser from a configuration dictionary.
The dictionary contains the merged informations of the schema and,
optionally, of a source configuration file. Values of the source
configuration file will be stored in the *value* field of an option. |
382,787 | def filter_tess_lcdict(lcdict,
filterqualityflags=True,
nanfilter=,
timestoignore=None,
quiet=False):
sappdcsap,pdct caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
columnstimequality..timeapplied quality flag filter, ndet before = %s, ndet after = %ssap,pdc,timesapsap_fluxsapsap_flux_errpdcpdcsap_fluxpdcpdcsap_flux_errtimesap,timesapsap_fluxsapsap_flux_errtimepdc,timepdcpdcsap_fluxpdcpdcsap_flux_errtimetime..timeremoved nans, ndet before = %s, ndet after = %stimetimetime..timeremoved timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict | This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE! |
382,788 | def status_pipeline(conf, args):
host = conf.config[][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds[][args.host_instance][], conf.creds[][args.host_instance][]])
verify_ssl = host.get(, True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result[] | Stop a pipeline. |
382,789 | async def set_lock(self, resource, lock_identifier):
start_time = time.time()
lock_timeout = self.lock_timeout
successes = await asyncio.gather(*[
i.set_lock(resource, lock_identifier, lock_timeout) for
i in self.instances
], return_exceptions=True)
successful_sets = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
locked = True if successful_sets >= int(len(self.instances) / 2) + 1 else False
self.log.debug(,
resource, successful_sets, len(self.instances), elapsed_time)
if not locked:
raise LockError( % resource)
return elapsed_time | Tries to set the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances
in seconds
:raises: LockError if the lock has not been set to at least (N/2 + 1)
instances |
382,790 | def requires_authentication(func):
def _auth(self, *args, **kwargs):
if not self._authenticated:
raise NotAuthenticatedException(
.format(func.__name__)
+ )
else:
return func(self, *args, **kwargs)
return _auth | Function decorator that throws an exception if the user
is not authenticated, and executes the function normally
if the user is authenticated. |
382,791 | def linear_connection(plist, lane):
logger.debug(
"Establishing linear connection with processes: {}".format(plist))
res = []
previous = None
for p in plist:
if not previous:
previous = p
continue
res.append({
"input": {
"process": previous,
"lane": lane
},
"output": {
"process": p,
"lane": lane
}
})
previous = p
return res | Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes |
382,792 | def open_url(url, httpuser=None, httppassword=None, method=None):
if os.getenv() == :
log.debug()
try:
sslctx = ssl.create_default_context()
except Exception as e:
log.error( % e)
raise Stop(
)
sslctx.check_hostname = False
sslctx.verify_mode = ssl.CERT_NONE
opener = urllib2.build_opener(urllib2.HTTPSHandler(context=sslctx))
else:
opener = urllib2.build_opener()
if in os.environ:
opener.addheaders = [(, os.environ.get())]
log.debug(, os.environ.get())
if httpuser and httppassword:
mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
mgr.add_password(None, url, httpuser, httppassword)
log.debug()
opener.add_handler(urllib2.HTTPBasicAuthHandler(mgr))
opener.add_handler(urllib2.HTTPDigestAuthHandler(mgr))
elif httpuser or httppassword:
raise FileException(
, url)
req = urllib2.Request(url)
if method:
req.get_method = lambda: method
return opener.open(req) | Open a URL using an opener that will simulate a browser user-agent
url: The URL
httpuser, httppassword: HTTP authentication credentials (either both or
neither must be provided)
method: The HTTP method
Caller is reponsible for calling close() on the returned object |
382,793 | def update_field(self, elements):
changed = False
if isinstance(elements, list):
if self.is_any or self.is_none:
self.add_many(elements)
changed = True
else:
_elements = element_resolver(elements, do_raise=False)
if set(self.all_as_href()) ^ set(_elements):
self.data[self.typeof] = _elements
changed = True
if changed and self.rule and (isinstance(self, (Source, Destination)) and \
self.rule.typeof in (, )):
self.rule._update_nat_field(self)
return changed | Update the field with a list of provided values but only if the values
are different. Return a boolean indicating whether a change was made
indicating whether `save` should be called. If the field is currently
set to any or none, then no comparison is made and field is updated.
:param list elements: list of elements in href or Element format
to compare to existing field
:rtype: bool |
382,794 | def dispatch(self, frame):
t need the rest of
the stack.
'
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
frame.class_id, self.channel_id)
else:
raise Frame.InvalidFrameType(
"frame type %d is not supported on channel %d",
frame.type(), self.channel_id) | Override the default dispatch since we don't need the rest of
the stack. |
382,795 | def infer_batch(self, dataloader):
sum_losses = 0
len_losses = 0
for input_data, input_label in dataloader:
data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False)
label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False)
sum_losses, len_losses = self.infer(data, label)
sum_losses += sum_losses
len_losses += len_losses
return sum_losses, len_losses | Description : inference for LipNet |
382,796 | def get_js(self):
js_file = os.path.join(self.theme_dir, , )
if not os.path.exists(js_file):
js_file = os.path.join(THEMES_DIR, , , )
if not os.path.exists(js_file):
raise IOError(u"Cannot find slides.js in default theme")
with codecs.open(js_file, encoding=self.encoding) as js_file_obj:
return {
: utils.get_path_url(js_file, self.relative),
: js_file_obj.read(),
} | Fetches and returns javascript file path or contents, depending if
we want a standalone presentation or not. |
382,797 | def getnamedargs(*args, **kwargs):
adict = {}
for arg in args:
if isinstance(arg, dict):
adict.update(arg)
adict.update(kwargs)
return adict | allows you to pass a dict and named args
so you can pass ({'a':5, 'b':3}, c=8) and get
dict(a=5, b=3, c=8) |
382,798 | def EndEdit(self, row, col, grid, oldVal=None):
self._tc.Unbind(wx.EVT_KEY_UP)
self.ApplyEdit(row, col, grid)
del self._col
del self._row
del self._grid | End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override* |
382,799 | def copy_(name,
source,
force=False,
makedirs=False,
preserve=False,
user=None,
group=None,
mode=None,
subdir=False,
**kwargs):
t exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preserve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, , .
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
: name,
: {},
: .format(source, name),
: True}
if not name:
return _error(ret, )
changed = True
if not os.path.isabs(name):
return _error(
ret, .format(name))
if not os.path.exists(source):
return _error(ret, .format(source))
if preserve:
user = __salt__[](source)
group = __salt__[](source)
mode = __salt__[](source)
else:
user = _test_owner(kwargs, user=user)
if user is None:
user = __opts__[]
if salt.utils.platform.is_windows():
if group is not None:
log.warning(
, name
)
group = user
if group is None:
group = __salt__[](
__salt__[](user).get(, 0)
)
u_check = _check_user(user, group)
if u_check:
return _error(ret, u_check)
if mode is None:
mode = __salt__[](source)
if os.path.isdir(name) and subdir:
name = os.path.join(name, os.path.basename(source))
if os.path.lexists(source) and os.path.lexists(name):
if force and os.path.isfile(name):
hash1 = salt.utils.hashutils.get_hash(name)
hash2 = salt.utils.hashutils.get_hash(source)
if hash1 == hash2:
changed = True
ret[] = .join([ret[], ])
if not force:
changed = False
elif not __opts__[] and changed:
try:
__salt__[](name, force=force)
except (IOError, OSError):
return _error(
ret,
.format(name)
)
if __opts__[]:
if changed:
ret[] = .format(
source,
name
)
ret[] = None
else:
ret[] = (
.format(name))
ret[] = True
return ret
if not changed:
ret[] = (
.format(name))
ret[] = True
return ret
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
try:
_makedirs(name=name, user=user, group=group, dir_mode=mode)
except CommandExecutionError as exc:
return _error(ret, .format(exc.message))
else:
return _error(
ret,
.format(dname))
try:
if os.path.isdir(source):
shutil.copytree(source, name, symlinks=True)
for root, dirs, files in salt.utils.path.os_walk(name):
for dir_ in dirs:
__salt__[](os.path.join(root, dir_), user, group)
for file_ in files:
__salt__[](os.path.join(root, file_), user, group)
else:
shutil.copy(source, name)
ret[] = {name: source}
return ret | If the file defined by the ``source`` option exists on the minion, copy it
to the named path. The file will not be overwritten if it already exists,
unless the ``force`` option is set to ``True``.
.. note::
This state only copies files from one location on a minion to another
location on the same minion. For copying files from the master, use a
:py:func:`file.managed <salt.states.file.managed>` state.
name
The location of the file to copy to
source
The location of the file to copy to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preserve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
subdir
.. versionadded:: 2015.5.0
If the name is a directory then place the file inside the named
directory
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.