code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def set_comment(self, comment = None):
if comment is None or type(comment) is not str:
raise KPError("Need a new image number")
else:
self.comment = comment
self.last_mod = datetime.now().replace(microsecond=0)
return True
|
This method is used to the the comment.
comment must be a string.
|
def GetClassesByArtifact(cls, artifact_name):
return [
cls.classes[c]
for c in cls.classes
if artifact_name in cls.classes[c].supported_artifacts
]
|
Get the classes that support parsing a given artifact.
|
def _wrap_jinja_filter(self, function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
return NestedUndefined()
for attribute in dir(function):
if attribute.endswith('filter'):
setattr(wrapper, attribute, getattr(function, attribute))
return wrapper
|
Propagate exceptions as undefined values filter.
|
def subsample(time_series, downsample_factor):
Ns = np.int(np.floor(np.size(time_series)/downsample_factor))
g = gaussian_kernel(0.5*downsample_factor)
ts_blur = np.convolve(time_series,g,'same')
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = (k+.5)*downsample_factor-.5
cfrac = cpos-np.floor(cpos)
cind = np.floor(cpos)
if cfrac>0:
ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac
else:
ts_out[k]=ts_blur[cind]
return ts_out
|
Subsample with Gaussian prefilter
The prefilter will have the filter size $\sigma_g=.5*ssfactor$
Parameters
--------------
time_series : ndarray
Input signal
downsample_factor : float
Downsampling factor
Returns
--------------
ts_out : ndarray
The downsampled signal
|
def insert(self, node, before=None):
node._list = self
if self._first is None:
self._first = self._last = node
self._size += 1
return node
if before is None:
self._last._next = node
node._prev = self._last
self._last = node
else:
node._next = before
node._prev = before._prev
if node._prev:
node._prev._next = node
else:
self._first = node
node._next._prev = node
self._size += 1
return node
|
Insert a new node in the list.
If *before* is specified, the new node is inserted before this node.
Otherwise, the node is inserted at the end of the list.
|
def _get_document_path(client, path):
parts = (client._database_string, "documents") + path
return _helpers.DOCUMENT_PATH_DELIMITER.join(parts)
|
Convert a path tuple into a full path string.
Of the form:
``projects/{project_id}/databases/{database_id}/...
documents/{document_path}``
Args:
client (~.firestore_v1beta1.client.Client): The client that holds
configuration details and a GAPIC client object.
path (Tuple[str, ...]): The components in a document path.
Returns:
str: The fully-qualified document path.
|
def getIPString():
if not(NetInfo.systemip):
NetInfo.systemip = ",".join(NetInfo.getSystemIps())
return NetInfo.systemip
|
return comma delimited string of all the system IPs
|
def _query(self, url=None, params=""):
if url is None:
raise NoUrlError("No URL was provided.")
headers = {'location': None, 'title': None}
headerdata = urllib.urlencode(params)
try:
request = urllib2.Request(url, headerdata)
response = urllib2.urlopen(request)
if 'jsonp' in params:
status = response.read()
else:
status = response.getcode()
info = response.info()
try:
headers['location'] = info['Content-Location']
except KeyError:
pass
try:
headers['title'] = info['X-Instapaper-Title']
except KeyError:
pass
return (status, headers)
except urllib2.HTTPError as exception:
if 'jsonp' in params:
return ('%s({"status":%d})' % (params['jsonp'], exception.code), headers)
else:
return (exception.code, headers)
except IOError as exception:
return (exception.code, headers)
|
method to query a URL with the given parameters
Parameters:
url -> URL to query
params -> dictionary with parameter values
Returns: HTTP response code, headers
If an exception occurred, headers fields are None
|
def meminfo_send(self, brkval, freemem, force_mavlink1=False):
return self.send(self.meminfo_encode(brkval, freemem), force_mavlink1=force_mavlink1)
|
state of APM memory
brkval : heap top (uint16_t)
freemem : free memory (uint16_t)
|
def get_context(pid_file, daemon=False):
port_file = get_context_file_name(pid_file)
if not os.path.exists(port_file):
return None
with open(port_file, "rt") as f:
json_data = f.read()
try:
data = json.loads(json_data)
except ValueError as e:
logger.error("Damaged context json data %s", json_data)
return None
if not daemon:
pid = data.get("pid")
if pid and not check_pid(int(pid)):
return None
return data
|
Get context of running notebook.
A context file is created when notebook starts.
:param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check.
:return: dict or None if the process is dead/not launcherd
|
def get_events_with_n_cluster(event_number, condition='n_cluster==1'):
logging.debug("Calculate events with clusters where " + condition)
n_cluster_in_events = analysis_utils.get_n_cluster_in_events(event_number)
n_cluster = n_cluster_in_events[:, 1]
return n_cluster_in_events[ne.evaluate(condition), 0]
|
Selects the events with a certain number of cluster.
Parameters
----------
event_number : numpy.array
Returns
-------
numpy.array
|
def trace_next_query(self, ):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_trace_next_query()
return d
|
Enables tracing for the next query in this connection and returns the UUID for that trace session
The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
|
def _lookup_global(self, symbol):
assert symbol.parts
namespace = self.namespaces
if len(symbol.parts) == 1:
namespace = self.namespaces[None]
try:
return self._lookup_namespace(symbol, namespace)
except Error as orig_exc:
try:
namespace = self.namespaces[None]
return self._lookup_namespace(symbol, namespace)
except Error:
raise orig_exc
|
Helper for lookup_symbol that only looks up global variables.
Args:
symbol: Symbol
|
def latex(self):
if not self:
return ""
s = str(self)
s = s.replace("==", " = ")
s = s.replace("<=", " \leq ")
s = s.replace(">=", " \geq ")
s = s.replace("&&", r" \text{ and } ")
s = s.replace("||", r" \text{ or } ")
return s
|
Returns a string representation for use in LaTeX
|
def _iso_week_of_month(date_value):
"0-starting index which ISO-week in the month this date is"
weekday_of_first = date_value.replace(day=1).weekday()
return (date_value.day + weekday_of_first - 1) // 7
|
0-starting index which ISO-week in the month this date is
|
def pearsonr(self, target, correlation_length, mask=NotSpecified):
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
|
Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
|
def binary_hash(self, project, patch_file):
global il
exception_file = None
try:
project_exceptions = il.get('project_exceptions')
except KeyError:
logger.info('project_exceptions missing in %s for %s', ignore_list, project)
for project_files in project_exceptions:
if project in project_files:
exception_file = project_files.get(project)
with open(exception_file, 'r') as f:
bl = yaml.safe_load(f)
for key, value in bl.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
for key, value in il.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
hashvalue = ""
return hashvalue
else:
logger.info('%s not found in %s', project, ignore_list)
logger.info('No project specific exceptions will be applied')
hashvalue = ""
return hashvalue
|
Gathers sha256 hashes from binary lists
|
def stage_import_from_url(self, url, token=None, username=None, password=None, insecure=False):
schema = ImportSchema()
resp = self.service.post(self.base,
params={'url': url, 'token': token, 'username': username, 'password': password, 'insecure': insecure})
return self.service.decode(schema, resp)
|
Stage an import from a URL to another CDRouter system.
:param url: URL to import as string.
:param token: (optional) API token to use as string (may be required if importing from a CDRouter 10+ system).
:param username: (optional) API username to use as string (may be required if importing from a CDRouter 10+ system).
:param password: (optional) API password to use as string (may be required if importing from a CDRouter 10+ system).
:param insecure: (optional) Allow insecure HTTPS connections if bool `True`.
:return: :class:`imports.Import <imports.Import>` object
|
def tabulate(collection, headers, datetime_fmt='%Y-%m-%d %H:%M:%S', **kwargs):
if isinstance(headers, dict):
attrs = headers.keys()
names = [
key if value is None else value for key, value in headers.items()
]
else:
attrs = names = headers
table = [(
format_cell(cell, datetime_fmt=datetime_fmt)
for cell in attrgetter(*attrs)(c)
) for c in collection]
return tblte(table, headers=[h.upper() for h in names], **kwargs)
|
Pretty-print a collection.
|
def get_settings(self, using=None, **kwargs):
return self._get_connection(using).indices.get_settings(index=self._name, **kwargs)
|
Retrieve settings for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_settings`` unchanged.
|
def read_stack_data(self, size = 128, offset = 0):
aProcess = self.get_process()
return aProcess.read(self.get_sp() + offset, size)
|
Reads the contents of the top of the stack.
@type size: int
@param size: Number of bytes to read.
@type offset: int
@param offset: Offset from the stack pointer to begin reading.
@rtype: str
@return: Stack data.
@raise WindowsError: Could not read the requested data.
|
def start(self):
if self.num_workers:
self.queue = Queue(maxsize=self.num_workers)
self.workers = [_Worker(self) for _ in range(self.num_workers)]
for worker in self.workers:
worker.start()
self.running = True
|
Start the workers.
|
def find_version(file_path):
with open(file_path, 'r') as f:
file_contents = f.read()
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
file_contents, re.M)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("unable to find version string")
|
Scrape version information from specified file path.
|
def clean_for_doc(nb):
new_cells = []
for cell in nb.worksheets[0].cells:
if "input" in cell and cell["input"].strip() == "%pylab inline":
continue
if "outputs" in cell:
outputs = [_i for _i in cell["outputs"] if "text" not in _i or
not _i["text"].startswith("<obspy.core")]
cell["outputs"] = outputs
new_cells.append(cell)
nb.worksheets[0].cells = new_cells
return nb
|
Cleans the notebook to be suitable for inclusion in the docs.
|
def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None,
**kwargs):
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
try:
transf = [(transf_cbs[idx][0](xi),
transf_cbs[idx][1](xi))
for idx, xi in enumerate(x)]
except TypeError:
transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x))
try:
exprs = cb(x, p, be)
except TypeError:
exprs = _ensure_3args(cb)(x, p, be)
return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs)
|
Generate a TransformedSys instance from a callback
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
The callback ``cb`` should return *untransformed* expressions.
transf_cbs : pair or iterable of pairs of callables
Callables for forward- and backward-transformations. Each
callable should take a single parameter (expression) and
return a single expression.
nx : int
Number of unkowns.
nparams : int
Number of parameters.
pre_adj : callable, optional
To tweak expression prior to transformation. Takes a
sinlge argument (expression) and return a single argument
rewritten expression.
\\*\\*kwargs :
Keyword arguments passed on to :class:`TransformedSys`. See also
:class:`SymbolicSys` and :class:`pyneqsys.NeqSys`.
Examples
--------
>>> import sympy as sp
>>> transformed = TransformedSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], (sp.log, sp.exp), 2, 1)
...
|
def remote_call(request, cls, method, args, kw):
actor = request.actor
name = 'remote_%s' % cls.__name__
if not hasattr(actor, name):
object = cls(actor)
setattr(actor, name, object)
else:
object = getattr(actor, name)
method_name = '%s%s' % (PREFIX, method)
return getattr(object, method_name)(request, *args, **kw)
|
Command for executing remote calls on a remote object
|
def _set_transmaps(self):
if self._std == 'ascii':
self._lower_chars = string.ascii_lowercase
self._upper_chars = string.ascii_uppercase
elif self._std == 'rfc1459':
self._lower_chars = (string.ascii_lowercase +
''.join(chr(i) for i in range(123, 127)))
self._upper_chars = (string.ascii_uppercase +
''.join(chr(i) for i in range(91, 95)))
elif self._std == 'rfc1459-strict':
self._lower_chars = (string.ascii_lowercase +
''.join(chr(i) for i in range(123, 126)))
self._upper_chars = (string.ascii_uppercase +
''.join(chr(i) for i in range(91, 94)))
|
Set translation maps for our standard.
|
def ask_float(msg="Enter a float", dft=None, vld=None, hlp=None):
vld = vld or [float]
return ask(msg, dft=dft, vld=vld, fmt=partial(cast, typ=float), hlp=hlp)
|
Prompts the user for a float.
|
def get_roles(self, groups):
roles = set([])
parentroles = set([])
notroles = set([])
tmp = set([])
usedgroups = {}
unusedgroups = {}
ret = {}
for role in self.roles:
if self._check_member(
role, groups, notroles,
tmp, parentroles, usedgroups):
roles.add(role)
for b in groups:
for g in groups[b]:
if b not in usedgroups or g not in usedgroups[b]:
if b not in unusedgroups:
unusedgroups[b] = set([])
unusedgroups[b].add(g)
ret['roles'] = roles
ret['unusedgroups'] = unusedgroups
return ret
|
get list of roles and list of standalone groups
|
def has_field(cls, field_name):
if super(ModelWithDynamicFieldMixin, cls).has_field(field_name):
return True
try:
cls._get_dynamic_field_for(field_name)
except ValueError:
return False
else:
return True
|
Check if the current class has a field with the name "field_name"
Add management of dynamic fields, to return True if the name matches an
existing dynamic field without existing copy for this name.
|
def update_probs(self):
syst_error = 0.05
prior_probs = {'syst': {}, 'rand': {}}
for source, (p, n) in self.prior_counts.items():
if n + p == 0:
continue
prior_probs['syst'][source] = syst_error
prior_probs['rand'][source] = \
1 - min((float(p) / (n + p), 1-syst_error)) - syst_error
subtype_probs = {}
for source, entry in self.subtype_counts.items():
for rule, (p, n) in entry.items():
if n + p == 0:
continue
if source not in subtype_probs:
subtype_probs[source] = {}
subtype_probs[source][rule] = \
1 - min((float(p) / (n + p), 1-syst_error)) - syst_error
super(BayesianScorer, self).update_probs(prior_probs, subtype_probs)
|
Update the internal probability values given the counts.
|
def calculate_up_moves(high_data):
up_moves = [high_data[idx] - high_data[idx-1] for idx in range(1, len(high_data))]
return [np.nan] + up_moves
|
Up Move.
Formula:
UPMOVE = Ht - Ht-1
|
def remove_root_gradebook(self, gradebook_id):
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=gradebook_id)
return self._hierarchy_session.remove_root(id_=gradebook_id)
|
Removes a root gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
raise: NotFound - ``gradebook_id`` is not a root
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def _clear_strobes(self):
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False)
|
Resets the "enable" and "load" output streams to all 0.
|
def undeployed(name,
url='http://localhost:8080/manager',
timeout=180):
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if not __salt__['tomcat.status'](url, timeout):
ret['comment'] = 'Tomcat Manager does not respond'
ret['result'] = False
return ret
try:
version = __salt__['tomcat.ls'](url, timeout)[name]['version']
ret['changes'] = {'undeploy': version}
except KeyError:
return ret
if __opts__['test']:
ret['result'] = None
return ret
undeploy = __salt__['tomcat.undeploy'](name, url, timeout=timeout)
if undeploy.startswith('FAIL'):
ret['result'] = False
ret['comment'] = undeploy
return ret
return ret
|
Enforce that the WAR will be undeployed from the server
name
The context path to undeploy.
url : http://localhost:8080/manager
The URL of the server with the Tomcat Manager webapp.
timeout : 180
Timeout for HTTP request to the Tomcat Manager.
Example:
.. code-block:: yaml
jenkins:
tomcat.undeployed:
- name: /ran
- require:
- service: application-service
|
def _build_indices(self):
result = {key: OrderedDict() for key in LINES_WITH_ID}
for line in self.lines:
if line.key in LINES_WITH_ID:
result.setdefault(line.key, OrderedDict())
if line.mapping["ID"] in result[line.key]:
warnings.warn(
("Seen {} header more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result
|
Build indices for the different field types
|
def _prep_binary_content(self):
if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys():
raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')
elif 'Content-Location' in self.resource.headers.keys():
logger.debug('Content-Location header found, using')
self.delivery = 'header'
elif 'Content-Location' not in self.resource.headers.keys():
if self.location:
self.resource.headers['Content-Location'] = self.location
self.delivery = 'header'
elif self.data:
if isinstance(self.data, io.BufferedIOBase):
logger.debug('detected file-like object')
self.delivery = 'payload'
else:
logger.debug('detected bytes')
self.delivery = 'payload'
|
Sets delivery method of either payload or header
Favors Content-Location header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers
|
def _clear_ignore(endpoint_props):
return dict(
(prop_name, prop_val)
for prop_name, prop_val in six.iteritems(endpoint_props)
if prop_name not in _DO_NOT_COMPARE_FIELDS and prop_val is not None
)
|
Both _clear_dict and _ignore_keys in a single iteration.
|
def uuid(self):
self.open()
uuid = lvm_vg_get_uuid(self.handle)
self.close()
return uuid
|
Returns the volume group uuid.
|
def run_muse(job, tumor_bam, normal_bam, univ_options, muse_options):
if muse_options['chromosomes']:
chromosomes = muse_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, muse_options['genome_fai'])
perchrom_muse = defaultdict()
for chrom in chromosomes:
call = job.addChildJobFn(run_muse_perchrom, tumor_bam, normal_bam, univ_options,
muse_options, chrom, disk=PromisedRequirement(
muse_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
muse_options['genome_fasta']),
memory='6G')
sump = call.addChildJobFn(run_muse_sump_perchrom, call.rv(), univ_options, muse_options,
chrom,
disk=PromisedRequirement(muse_sump_disk,
muse_options['dbsnp_vcf']),
memory='6G')
perchrom_muse[chrom] = sump.rv()
return perchrom_muse
|
Spawn a MuSE job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: Dict of results from running MuSE on every chromosome
perchrom_muse:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
|
def eventFilter(self, obj, event):
if obj == self.dataTable and event.type() == QEvent.Resize:
self._resizeVisibleColumnsToContents()
return False
|
Override eventFilter to catch resize event.
|
def get_candidate_votes(self, candidate):
candidate_election = CandidateElection.objects.get(
candidate=candidate, election=self
)
return candidate_election.votes.all()
|
Get all votes attached to a CandidateElection for a Candidate in
this election.
|
def optlist_to_dict(optlist, opt_sep=',', kv_sep='=', strip_quotes=False):
def make_kv(opt):
if kv_sep is not None and kv_sep in opt:
k, v = opt.split(kv_sep, 1)
k = k.strip()
if strip_quotes and v[0] in ('"', "'") and v[-1] == v[0]:
return k, v[1:-1]
else:
return k, v
else:
return opt, True
return dict(make_kv(opt) for opt in optlist.split(opt_sep))
|
Parse an option list into a dictionary.
Takes a list of options separated by ``opt_sep`` and places them into
a dictionary with the default value of ``True``. If ``kv_sep`` option
is specified then key/value options ``key=value`` are parsed. Useful
for parsing options such as mount options in the format
``rw,ro,rsize=32168,xyz``.
Parameters:
optlist (str): String of options to parse.
opt_sep (str): Separater used to split options.
kv_sep (str): If not `None` then `optlist` includes key=value pairs
to be split, and this str is used to split them.
strip_quotes (bool): If set, will remove matching '"' and '"'
characters from start and end of line. No quotes are removed
from inside the string and mismatched quotes are not removed.
Returns:
dict: Returns a dictionary of names present in the list. If `kv_sep`
is not `None` then the values will be the str on the right-hand side
of `kv_sep`. If `kv_sep` is `None` then each key will have a default
value of `True`.
Examples:
>>> optlist = 'rw,ro,rsize=32168,xyz'
>>> optlist_to_dict(optlist)
{'rw': True, 'ro': True, 'rsize': '32168', 'xyz': True}
|
def set(self, uuid, content, encoding="utf-8"):
dest = self.abs_path(uuid)
if not dest.parent.exists():
dest.parent.mkdir(0o775, parents=True)
if hasattr(content, "read"):
content = content.read()
mode = "tw"
if not isinstance(content, str):
mode = "bw"
encoding = None
with dest.open(mode, encoding=encoding) as f:
f.write(content)
|
Store binary content with uuid as key.
:param:uuid: :class:`UUID` instance
:param:content: string, bytes, or any object with a `read()` method
:param:encoding: encoding to use when content is Unicode
|
def get_url(field):
hydrated_path = _get_hydrated_path(field)
base_url = getattr(settings, 'RESOLWE_HOST_URL', 'localhost')
return "{}/data/{}/{}".format(base_url, hydrated_path.data_id, hydrated_path.file_name)
|
Return file's url based on base url set in settings.
|
def available(self):
if not self._schema:
return set()
avail = set(self._schema.__vers_downgraders__.keys())
avail.add(self._schema.__version__)
return avail
|
Returns a set of the available versions.
:returns: A set of integers giving the available versions.
|
def load_subclasses(klass, modules=None):
if modules:
if isinstance(modules, six.string_types):
modules = [modules]
loader = Loader()
loader.load(*modules)
return klass.__subclasses__()
|
Load recursively all all subclasses from a module.
Args:
klass (str or list of str): Class whose subclasses we want to load.
modules: List of additional modules or module names that should be
recursively imported in order to find all the subclasses of the
desired class. Default: None
FIXME: This function is kept only for backward compatibility reasons, it
should not be used. Deprecation warning should be raised and it should
be replaces by the ``Loader`` class.
|
def _handle_job_without_successors(self, job, irsb, insn_addrs):
ins_addr = job.addr
for stmt_idx, stmt in enumerate(irsb.statements):
if type(stmt) is pyvex.IRStmt.IMark:
ins_addr = stmt.addr + stmt.delta
elif type(stmt) is pyvex.IRStmt.Exit:
successor_jumpkind = stmt.jk
self._update_function_transition_graph(
job.block_id, None,
jumpkind = successor_jumpkind,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
successor_jumpkind = irsb.jumpkind
successor_last_ins_addr = insn_addrs[-1]
self._update_function_transition_graph(job.block_id, None,
jumpkind=successor_jumpkind,
ins_addr=successor_last_ins_addr,
stmt_idx=DEFAULT_STATEMENT,
)
|
A block without successors should still be handled so it can be added to the function graph correctly.
:param CFGJob job: The current job that do not have any successor.
:param IRSB irsb: The related IRSB.
:param insn_addrs: A list of instruction addresses of this IRSB.
:return: None
|
def equals(self, data):
if isinstance(data, six.string_types):
return self._add_condition('=', data, types=[int, str])
elif isinstance(data, list):
return self._add_condition('IN', ",".join(map(str, data)), types=[str])
raise QueryTypeError('Expected value of type `str` or `list`, not %s' % type(data))
|
Adds new `IN` or `=` condition depending on if a list or string was provided
:param data: string or list of values
:raise:
- QueryTypeError: if `data` is of an unexpected type
|
def column_list(tables, columns):
columns = set(columns)
foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables))
return list(columns.intersection(foundcols))
|
Take a list of tables and a list of column names and return the columns
that are present in the tables.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
cols : list
Lists of column names available in the tables.
|
def _get_host_libc_from_host_compiler(self):
compiler_exe = self.get_options().host_compiler
library_dirs = self._parse_search_dirs.get_compiler_library_dirs(compiler_exe)
libc_crti_object_file = None
for libc_dir_candidate in library_dirs:
maybe_libc_crti = os.path.join(libc_dir_candidate, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
libc_crti_object_file = maybe_libc_crti
break
if not libc_crti_object_file:
raise self.HostLibcDevResolutionError(
"Could not locate {fname} in library search dirs {dirs} from compiler: {compiler!r}. "
"You may need to install a libc dev package for the current system. "
"For many operating systems, this package is named 'libc-dev' or 'libc6-dev'."
.format(fname=self._LIBC_INIT_OBJECT_FILE, dirs=library_dirs, compiler=compiler_exe))
return HostLibcDev(crti_object=libc_crti_object_file,
fingerprint=hash_file(libc_crti_object_file))
|
Locate the host's libc-dev installation using a specified host compiler's search dirs.
|
def _parse_blkio_metrics(self, stats):
metrics = {
'io_read': 0,
'io_write': 0,
}
for line in stats:
if 'Read' in line:
metrics['io_read'] += int(line.split()[2])
if 'Write' in line:
metrics['io_write'] += int(line.split()[2])
return metrics
|
Parse the blkio metrics.
|
def _input_as_paths(self, data):
return self._command_delimiter.join(
map(str, map(self._input_as_path, data)))
|
Return data as a space delimited string with each path quoted
data: paths or filenames, most likely as a list of
strings
|
def ang_veltoaxisangledot(angle, axis, Omega):
angle_dot = axis.dot(Omega)
axis_dot = 1/2*(hat_map(axis) - 1/np.tan(angle/2) * hat_map(axis).dot(hat_map(axis))).dot(Omega)
return angle_dot, axis_dot
|
Compute kinematics for axis angle representation
|
def finish(self, chunk=None):
self._log_disconnect()
super(BaseHandler, self).finish(chunk)
|
Tornado `finish` handler
|
def child_cardinality(self, child):
for prop, klassdef in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
try:
_min = self.c_cardinality["min"]
except KeyError:
_min = 1
try:
_max = self.c_cardinality["max"]
except KeyError:
_max = "unbounded"
return _min, _max
else:
return 1, 1
return None
|
Return the cardinality of a child element
:param child: The name of the child element
:return: The cardinality as a 2-tuple (min, max).
The max value is either a number or the string "unbounded".
The min value is always a number.
|
def dev():
env.roledefs = {
'web': ['192.168.1.2'],
'lb': ['192.168.1.2'],
}
env.user = 'vagrant'
env.backends = env.roledefs['web']
env.server_name = 'django_search_model-dev.net'
env.short_server_name = 'django_search_model-dev'
env.static_folder = '/site_media/'
env.server_ip = '192.168.1.2'
env.no_shared_sessions = False
env.server_ssl_on = False
env.goal = 'dev'
env.socket_port = '8001'
env.map_settings = {}
execute(build_env)
|
Define dev stage
|
def merged_with(self, provider, requirement, parent):
infos = list(self.information)
infos.append(RequirementInformation(requirement, parent))
candidates = [
c for c in self.candidates
if provider.is_satisfied_by(requirement, c)
]
if not candidates:
raise RequirementsConflicted(self)
return type(self)(candidates, infos)
|
Build a new instance from this and a new requirement.
|
def _evaluate(dataset, predictions):
f1_result = exact_match = total = 0
count = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa_pair in paragraph['qas']:
total += 1
if qa_pair['id'] not in predictions:
count += 1
continue
ground_truths = list(map(lambda x: x['text'], qa_pair['answers']))
prediction = predictions[qa_pair['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1_result += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
print('total', total, 'exact_match', exact_match, 'unanswer_question ', count)
exact_match = 100.0 * exact_match / total
f1_result = 100.0 * f1_result / total
return {'exact_match': exact_match, 'f1': f1_result}
|
Evaluate function.
|
def submitter(self, f):
f = self._wrap_coro_function_with_sem(f)
@wraps(f)
def wrapped(*args, **kwargs):
return self.submit(f(*args, **kwargs))
return wrapped
|
Decorator to submit a coro-function as NewTask to self.loop with sem control.
Use default_callback frequency of loop.
|
def qpop_back(self, name, size=1):
size = get_positive_integer("size", size)
return self.execute_command('qpop_back', name, size)
|
Remove and return the last ``size`` item of the list ``name``
Like **Redis.RPOP**
:param string name: the queue name
:param int size: the length of result
:return: the list of pop elements
:rtype: list
|
def _extract_query(self, redirect_url):
qs = urlparse(redirect_url)
qs = qs.fragment if isinstance(self, ImplicitGrant) else qs.query
query_params = parse_qs(qs)
query_params = {qp: query_params[qp][0] for qp in query_params}
return query_params
|
Extract query parameters from a url.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(dict)
A dictionary of query parameters.
|
def prune(self):
LOG.info('Pruning extra files from scenario ephemeral directory')
safe_files = [
self.config.provisioner.config_file,
self.config.provisioner.inventory_file,
self.config.state.state_file,
] + self.config.driver.safe_files
files = util.os_walk(self.ephemeral_directory, '*')
for f in files:
if not any(sf for sf in safe_files if fnmatch.fnmatch(f, sf)):
os.remove(f)
for dirpath, dirs, files in os.walk(
self.ephemeral_directory, topdown=False):
if not dirs and not files:
os.removedirs(dirpath)
|
Prune the scenario ephemeral directory files and returns None.
"safe files" will not be pruned, including the ansible configuration
and inventory used by this scenario, the scenario state file, and
files declared as "safe_files" in the ``driver`` configuration
declared in ``molecule.yml``.
:return: None
|
def mock_lockfile_update(path):
updated_lockfile_contents = {
'package1': '1.2.0'
}
with open(path, 'w+') as f:
f.write(json.dumps(updated_lockfile_contents, indent=4))
return updated_lockfile_contents
|
This is a mock update. In place of this, you might simply shell out
to a command like `yarn upgrade`.
|
def list_folders(kwargs=None, call=None):
if call != 'function':
raise SaltCloudSystemExit(
'The list_folders function must be called with '
'-f or --function.'
)
return {'Folders': salt.utils.vmware.list_folders(_get_si())}
|
List all the folders for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_folders my-vmware-config
|
def requires_app_credentials(func):
@wraps(func)
def auth_wrapper(self, *args, **kwargs):
client_id, client_secret = self._session.retrieve_client_credentials()
if client_id and client_secret:
return func(self, *args, **kwargs)
else:
from .models import GitHubError
r = generate_fake_error_response(
'{"message": "Requires username/password authentication"}'
)
raise GitHubError(r)
return auth_wrapper
|
Require client_id and client_secret to be associated.
This is used to note and enforce which methods require a client_id and
client_secret to be used.
|
def _do_download(version, download_base, to_dir, download_delay):
py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys)
tp = 'setuptools-{version}-{py_desig}.egg'
egg = os.path.join(to_dir, tp.format(**locals()))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
if 'pkg_resources' in sys.modules:
_unload_pkg_resources()
import setuptools
setuptools.bootstrap_install_from = egg
|
Download Setuptools.
|
def stop(self):
log.debug("Stopping periodic task")
stopframe = build_bcm_tx_delete_header(self.can_id_with_flags, self.flags)
send_bcm(self.bcm_socket, stopframe)
|
Send a TX_DELETE message to cancel this task.
This will delete the entry for the transmission of the CAN-message
with the specified can_id CAN identifier. The message length for the command
TX_DELETE is {[bcm_msg_head]} (only the header).
|
def layer(command=None, *args):
'hints the start of a new layer'
if not command:
return eval([['hint', 'layer']])
else:
lst = [['layer']]
for arg in args:
lst.append([command, arg])
lst.append(['layer'])
return eval(lst)
|
hints the start of a new layer
|
def __build_cmd_maps(cls):
cmd_map_all = {}
cmd_map_visible = {}
cmd_map_internal = {}
for name in dir(cls):
obj = getattr(cls, name)
if iscommand(obj):
for cmd in getcommands(obj):
if cmd in cmd_map_all.keys():
raise PyShellError("The command '{}' already has cmd"
" method '{}', cannot register a"
" second method '{}'.".format( \
cmd, cmd_map_all[cmd], obj.__name__))
cmd_map_all[cmd] = obj.__name__
if isvisiblecommand(obj):
cmd_map_visible[cmd] = obj.__name__
if isinternalcommand(obj):
cmd_map_internal[cmd] = obj.__name__
return cmd_map_all, cmd_map_visible, cmd_map_internal
|
Build the mapping from command names to method names.
One command name maps to at most one method.
Multiple command names can map to the same method.
Only used by __init__() to initialize self._cmd_map. MUST NOT be used
elsewhere.
Returns:
A tuple (cmd_map, hidden_cmd_map, internal_cmd_map).
|
def serve(content):
temp_folder = tempfile.gettempdir()
temp_file_name = tempfile.gettempprefix() + str(uuid.uuid4()) + ".html"
temp_file_path = os.path.join(temp_folder, temp_file_name)
save(temp_file_path, content)
webbrowser.open("file://{}".format(temp_file_path))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
os.remove(temp_file_path)
|
Write content to a temp file and serve it in browser
|
def convert_timespan(timespan):
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
|
Convert an srt timespan into a start and end timestamp.
|
def getPixels(self):
array = self.toArray()
(width, height, depth) = array.size
for x in range(width):
for y in range(height):
yield Pixel(array, x, y)
|
Return a stream of pixels from current Canvas.
|
def validate(self, ymldata=None, messages=None):
schema_val = self.schema_val(messages)
if len(messages) == 0:
content_val = self.content_val(ymldata, messages)
return schema_val and content_val
|
Validates the Telemetry Dictionary definitions
|
def facts_refresh():
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.facts_refresh()
except Exception as exception:
ret['message'] = 'Execution failed due to "{0}"'.format(exception)
ret['out'] = False
return ret
ret['facts'] = __proxy__['junos.get_serialized_facts']()
try:
__salt__['saltutil.sync_grains']()
except Exception as exception:
log.error('Grains could not be updated due to "%s"', exception)
return ret
|
Reload the facts dictionary from the device. Usually only needed if,
the device configuration is changed by some other actor.
This function will also refresh the facts stored in the salt grains.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.facts_refresh
|
def _periodic_callback(self):
try:
self.notify(self._state)
except Exception:
self._error_callback(*sys.exc_info())
if self._subscriptions:
self._call_later_handle = \
self._loop.call_later(self._interval, self._periodic_callback)
else:
self._state = NONE
self._call_later_handle = None
|
Will be started on first emit
|
def send_and_wait(self, path, message, timeout=0, responder=None):
message.on("response", lambda x,event_origin,source:None, once=True)
if timeout > 0:
ts = time.time()
else:
ts = 0
sent = False
while not message.response_received:
if not sent:
self.send(path, message)
sent = True
if ts:
if time.time() - ts > timeout:
raise exceptions.TimeoutError("send_and_wait(%s)"%path, timeout)
return message.response_message
|
Send a message and block until a response is received. Return response message
|
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
|
return my self as a sparse array, do not copy by default
|
def alignment_a(self):
from molmod.transformations import Rotation
new_x = self.matrix[:, 0].copy()
new_x /= np.linalg.norm(new_x)
new_z = np.cross(new_x, self.matrix[:, 1])
new_z /= np.linalg.norm(new_z)
new_y = np.cross(new_z, new_x)
new_y /= np.linalg.norm(new_y)
return Rotation(np.array([new_x, new_y, new_z]))
|
Computes the rotation matrix that aligns the unit cell with the
Cartesian axes, starting with cell vector a.
* a parallel to x
* b in xy-plane with b_y positive
* c with c_z positive
|
def on_controller_change(self, name, value):
if self.__controller != name:
return
self.__controller_on = value
if value:
self._register_service()
else:
self._unregister_service()
|
Called by the instance manager when a controller value has been
modified
:param name: The name of the controller
:param value: The new value of the controller
|
def activate(specifier):
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
|
Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't.
|
def _sum(ctx, *number):
if len(number) == 0:
raise ValueError("Wrong number of arguments")
result = Decimal(0)
for arg in number:
result += conversions.to_decimal(arg, ctx)
return result
|
Returns the sum of all arguments
|
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
|
Print a summary of the difference between the two files.
|
def sum(vari, axis=None):
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = sum(core[key], axis)
return Poly(core, vari.dim, None, vari.dtype)
return np.sum(vari, axis)
|
Sum the components of a shapeable quantity along a given axis.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Input data.
axis (int):
Axis over which the sum is taken. By default ``axis`` is None, and
all elements are summed.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Polynomial array with same shape as ``vari``, with the specified
axis removed. If ``vari`` is an 0-d array, or ``axis`` is None,
a (non-iterable) component is returned.
Examples:
>>> vari = cp.prange(3)
>>> print(vari)
[1, q0, q0^2]
>>> print(cp.sum(vari))
q0^2+q0+1
|
def _build_request(request):
msg = bytes([request['cmd']])
if 'dest' in request:
msg += bytes([request['dest']])
else:
msg += b'\0'
if 'sha' in request:
msg += request['sha']
else:
for dummy in range(64):
msg += b'0'
logging.debug("Request (%d): %s", len(msg), msg)
return msg
|
Build message to transfer over the socket from a request.
|
def maybe_parse_user_type(t):
is_type = isinstance(t, type)
is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types)
is_string = isinstance(t, string_types)
is_iterable = isinstance(t, Iterable)
if is_preserved:
return [t]
elif is_string:
return [t]
elif is_type and not is_iterable:
return [t]
elif is_iterable:
ts = t
return tuple(e for t in ts for e in maybe_parse_user_type(t))
else:
raise TypeError(
'Type specifications must be types or strings. Input: {}'.format(t)
)
|
Try to coerce a user-supplied type directive into a list of types.
This function should be used in all places where a user specifies a type,
for consistency.
The policy for what defines valid user input should be clear from the implementation.
|
def linear_trend_timewise(x, param):
ix = x.index
times_seconds = (ix - ix[0]).total_seconds()
times_hours = np.asarray(times_seconds / float(3600))
linReg = linregress(times_hours, x.values)
return [("attr_\"{}\"".format(config["attr"]), getattr(linReg, config["attr"]))
for config in param]
|
Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature uses the index of the time series to fit the model, which must be of a datetime
dtype.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of. The index must be datetime.
:type x: pandas.Series
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: list
|
def parameterSpace( self ):
ps = self.parameters()
if len(ps) == 0:
return []
else:
return self._crossProduct(ps)
|
Return the parameter space of the experiment as a list of dicts,
with each dict mapping each parameter name to a value.
:returns: the parameter space as a list of dicts
|
def confirm_lock(lockfile):
pidfile = open(lockfile, "r")
pidfile_pid = pidfile.readline().strip()
pidfile.close()
if int(pidfile_pid) != os.getpid():
raise RuntimeError, ("pidfile %s contains pid %s; expected pid %s!" %
(lockfile, os.getpid(), pidfile_pid))
return True
|
Confirm that the given lockfile contains our pid.
Should be entirely unecessary, but paranoia always served me well.
|
def _filters_pb(self):
num_filters = len(self._field_filters)
if num_filters == 0:
return None
elif num_filters == 1:
return _filter_pb(self._field_filters[0])
else:
composite_filter = query_pb2.StructuredQuery.CompositeFilter(
op=enums.StructuredQuery.CompositeFilter.Operator.AND,
filters=[_filter_pb(filter_) for filter_ in self._field_filters],
)
return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter)
|
Convert all the filters into a single generic Filter protobuf.
This may be a lone field filter or unary filter, may be a composite
filter or may be :data:`None`.
Returns:
google.cloud.firestore_v1beta1.types.\
StructuredQuery.Filter: A "generic" filter representing the
current query's filters.
|
def _check_valid_version():
bower_version = _LooseVersion(
__salt__['cmd.run']('bower --version'))
valid_version = _LooseVersion('1.3')
if bower_version < valid_version:
raise CommandExecutionError(
'\'bower\' is not recent enough({0} < {1}). '
'Please Upgrade.'.format(
bower_version, valid_version
)
)
|
Check the version of Bower to ensure this module will work. Currently
bower must be at least version 1.3.
|
def _fulfills_version_spec(version, version_spec):
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
|
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
|
def list_documents(self, page_size=None):
parent, _ = self._parent_info()
iterator = self._client._firestore_api.list_documents(
parent,
self.id,
page_size=page_size,
show_missing=True,
metadata=self._client._rpc_metadata,
)
iterator.collection = self
iterator.item_to_value = _item_to_document_ref
return iterator
|
List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty
|
def varchar(self, field=None):
assert field is not None, "The field parameter must be passed to the 'varchar' method."
max_length = field.max_length
def source():
length = random.choice(range(1, max_length + 1))
return "".join(random.choice(general_chars) for i in xrange(length))
return self.get_allowed_value(source, field)
|
Returns a chunk of text, of maximum length 'max_length'
|
def getMargin(self, name):
for margin in self._margins:
if margin.getName() == name:
return margin
return None
|
Provides the requested margin.
Returns a reference to the margin if found and None otherwise
|
def print_global_config(global_config):
if global_config.has_section('shell'):
print("\nShell configurations:")
for shell_type, set_value in global_config.items('shell'):
print("{0}: {1}".format(shell_type, set_value))
if global_config.has_option('global', 'env_source_rc'):
print("\nHave sprinter env source rc: {0}".format(
global_config.get('global', 'env_source_rc')))
|
print the global configuration
|
def load(cli, yaml_filename):
with open(yaml_filename, 'rb') as filehandle:
for waybill in yaml.load(filehandle.read()):
cli.create(waybill.name,
waybill.docker_id)
|
Creates waybill shims from a given yaml file definiations
|
def _writeFile(cls, filePath, content, encoding = None):
filePath = os.path.realpath(filePath)
log.debug(_("Real file path to write: %s" % filePath))
if encoding is None:
encoding = File.DEFAULT_ENCODING
try:
encodedContent = ''.join(content).encode(encoding)
except LookupError as msg:
raise SubFileError(_("Unknown encoding name: '%s'.") % encoding)
except UnicodeEncodeError:
raise SubFileError(
_("There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.")
% {"file": filePath, "enc": encoding})
tmpFilePath = "%s.tmp" % filePath
bakFilePath = "%s.bak" % filePath
with open(tmpFilePath, 'wb') as f:
f.write(encodedContent)
f.flush()
try:
os.rename(filePath, bakFilePath)
except FileNotFoundError:
pass
os.rename(tmpFilePath, filePath)
try:
os.unlink(bakFilePath)
except FileNotFoundError:
pass
|
Safe file writing. Most common mistakes are checked against and reported before write
operation. After that, if anything unexpected happens, user won't be left without data or
with corrupted one as this method writes to a temporary file and then simply renames it
(which should be atomic operation according to POSIX but who knows how Ext4 really works.
@see: http://lwn.net/Articles/322823/).
|
def sa_indices(num_states, num_actions):
L = num_states * num_actions
dtype = np.int_
s_indices = np.empty(L, dtype=dtype)
a_indices = np.empty(L, dtype=dtype)
i = 0
for s in range(num_states):
for a in range(num_actions):
s_indices[i] = s
a_indices[i] = a
i += 1
return s_indices, a_indices
|
Generate `s_indices` and `a_indices` for `DiscreteDP`, for the case
where all the actions are feasible at every state.
Parameters
----------
num_states : scalar(int)
Number of states.
num_actions : scalar(int)
Number of actions.
Returns
-------
s_indices : ndarray(int, ndim=1)
Array containing the state indices.
a_indices : ndarray(int, ndim=1)
Array containing the action indices.
Examples
--------
>>> s_indices, a_indices = qe.markov.sa_indices(4, 3)
>>> s_indices
array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])
>>> a_indices
array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2])
|
def raster_binarization(given_value, rasterfilename):
origin_raster = RasterUtilClass.read_raster(rasterfilename)
binary_raster = numpy.where(origin_raster.data == given_value, 1, 0)
return binary_raster
|
Make the raster into binarization.
The opening and closing are based on binary image. Therefore we need to
make the raster into binarization.
Args:
given_value: The given value's pixels will be value in 1,
other pixels will be value in 0.
rasterfilename: The initial rasterfilena,e.
Returns:
binary_raster: Raster after binarization.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.