Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
14,300 | def read_mnist_images(filename, dtype=None):
with gzip.open(filename, ) as f:
magic, number, rows, cols = struct.unpack(, f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype=)
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == :
array = array >= 128
elif dtype.kind == :
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array | Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0. |
14,301 | def get_user_id(self):
if self._user_id is None:
self._user_id = self.get_user_data()[]
return self._user_id | Returns "id" of a OneDrive user. |
14,302 | def register(self, model, handler=None):
from permission.handlers import PermissionHandler
if model._meta.abstract:
raise ImproperlyConfigured(
% model)
if model in self._registry:
raise KeyError("A permission handler class is already "
"registered for " % model)
if handler is None:
handler = settings.PERMISSION_DEFAULT_PERMISSION_HANDLER
if isstr(handler):
handler = import_string(handler)
if not inspect.isclass(handler):
raise AttributeError(
"`handler` attribute must be a class. "
"An instance was specified.")
if not issubclass(handler, PermissionHandler):
raise AttributeError(
"`handler` attribute must be a subclass of "
"`permission.handlers.PermissionHandler`")
instance = handler(model)
self._registry[model] = instance | Register a permission handler to the model
Parameters
----------
model : django model class
A django model class
handler : permission handler class, string, or None
A permission handler class or a dotted path
Raises
------
ImproperlyConfigured
Raise when the model is abstract model
KeyError
Raise when the model is already registered in registry
The model cannot have more than one handler. |
14,303 | def action_is_satisfied(action):
num_consumed_args = _num_consumed_args.get(action, 0)
if action.nargs in [OPTIONAL, ZERO_OR_MORE, REMAINDER]:
return True
if action.nargs == ONE_OR_MORE:
return num_consumed_args >= 1
if action.nargs == PARSER:
return False
if action.nargs is None:
return num_consumed_args == 1
assert isinstance(action.nargs, int), % action.nargs
return num_consumed_args == action.nargs | Returns False if the parse would raise an error if no more arguments are given to this action, True otherwise. |
14,304 | def _update_solution_data(self, s):
x = s["x"]
Va = x[self._Va.i1:self._Va.iN + 1]
Vm = x[self._Vm.i1:self._Vm.iN + 1]
Pg = x[self._Pg.i1:self._Pg.iN + 1]
Qg = x[self._Qg.i1:self._Qg.iN + 1]
return Va, Vm, Pg, Qg | Returns the voltage angle and generator set-point vectors. |
14,305 | def split(self, verbose=None, end_in_new_line=None):
elapsed_time = self.get_elapsed_time()
self.split_elapsed_time.append(elapsed_time)
self._cumulative_elapsed_time += elapsed_time
self._elapsed_time = datetime.timedelta()
if verbose is None:
verbose = self.verbose_end
if verbose:
if end_in_new_line is None:
end_in_new_line = self.end_in_new_line
if end_in_new_line:
self.log("{} done in {}".format(self.description, elapsed_time))
else:
self.log(" done in {}".format(elapsed_time))
self._start_time = datetime.datetime.now() | Save the elapsed time of the current split and restart the stopwatch.
The current elapsed time will be appended to :attr:`split_elapsed_time`.
If the stopwatch is paused, then it will remain paused.
Otherwise, it will continue running.
Parameters
----------
verbose : Optional[bool]
Wether to log. If `None`, use `verbose_end` set during initialization.
end_in_new_line : Optional[bool]]
Wether to log the `description`. If `None`, use `end_in_new_line` set during
initialization. |
14,306 | def update_timestamps(self, chan_id, ts):
try:
self.last_update[chan_id] = ts
except KeyError:
self.log.warning("Attempted ts update of channel %s, but channel "
"not present anymore.",
self.channel_directory[chan_id]) | Updates the timestamp for the given channel id.
:param chan_id:
:param ts:
:return: |
14,307 | def _create_tokens_for_next_line_dent(self, newline_token):
indent_delta = self._get_next_line_indent_delta(newline_token)
if indent_delta is None or indent_delta == 0:
return None
dent_type = if indent_delta > 0 else
dent_token = _create_token(
dent_type, , newline_token.lineno + 1,
newline_token.lexpos + len(newline_token.value))
tokens = [dent_token] * abs(indent_delta)
self.cur_indent += indent_delta
return MultiToken(tokens) | Starting from a newline token that isn't followed by another newline
token, returns any indent or dedent tokens that immediately follow.
If indentation doesn't change, returns None. |
14,308 | def best_match(self, seqs, scan_rc=True):
self.set_threshold(threshold=0.0)
for matches in self.scan(seqs, 1, scan_rc):
yield [m[0] for m in matches] | give the best match of each motif in each sequence
returns an iterator of nested lists containing tuples:
(score, position, strand) |
14,309 | def zip(*args, **kwargs):
args = [list(iterable) for iterable in args]
n = max(map(len, args))
v = kwargs.get("default", None)
return _zip(*[i + [v] * (n - len(i)) for i in args]) | Returns a list of tuples, where the i-th tuple contains the i-th element
from each of the argument sequences or iterables (or default if too short). |
14,310 | def check_stops(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
table = "stops"
problems = []
if feed.stops is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stops.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
problems = check_column_id(problems, table, f, "stop_id")
for column in ["stop_code", "stop_desc", "zone_id", "parent_station"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
problems = check_column(problems, table, f, "stop_name", valid_str)
if "location_type" in f.columns:
requires_location = f["location_type"].isin([0, 1, 2])
else:
requires_location = True
for column, bound in [("stop_lon", 180), ("stop_lat", 90)]:
v = lambda x: pd.notnull(x) and -bound <= x <= bound
cond = requires_location & ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
problems = check_column(
problems, table, f, "stop_url", valid_url, column_required=False
)
v = lambda x: x in range(5)
problems = check_column(
problems, table, f, "location_type", v, column_required=False
)
problems = check_column(
problems,
table,
f,
"stop_timezone",
valid_timezone,
column_required=False,
)
v = lambda x: x in range(3)
problems = check_column(
problems, table, f, "wheelchair_boarding", v, column_required=False
)
if "parent_station" in f.columns:
if "location_type" not in f.columns:
problems.append(
[
"error",
"parent_station column present but location_type column missing",
table,
[],
]
)
else:
station_ids = f.loc[
f["parent_station"].notnull(), "parent_station"
]
cond = f["stop_id"].isin(station_ids) & (f["location_type"] != 1)
problems = check_table(
problems, table, f, cond, "A station must have location_type 1"
)
cond = (f["location_type"] == 1) & f["parent_station"].notnull()
problems = check_table(
problems,
table,
f,
cond,
"A station must not lie in another station",
)
cond = (
f["location_type"].isin([2, 3, 4])
& f["parent_station"].isnull()
)
problems = check_table(
problems,
table,
f,
cond,
"Entrances, nodes, and boarding areas must be part of a parent station",
)
if include_warnings:
s = feed.stop_times["stop_id"]
cond = ~feed.stops["stop_id"].isin(s)
problems = check_table(
problems, table, f, cond, "Stop has no stop times", "warning"
)
return format_problems(problems, as_df=as_df) | Analog of :func:`check_agency` for ``feed.stops``. |
14,311 | def init_app(self, app):
if app.config.MONGO_URIS and isinstance(app.config.MONGO_URIS, dict):
self.MONGO_URIS = app.config.MONGO_URIS
self.app = app
else:
raise ValueError(
"nonstandard sanic config MONGO_URIS,MONGO_URIS must be a Dict[dbname,dburl]")
@app.listener("before_server_start")
async def init_mongo_connection(app, loop):
for dbname, dburl in app.config.MONGO_URIS.items():
if isinstance(dburl,str):
db = MongoConnection(dburl,ioloop=loop).db
else:
db = MongoConnection(ioloop=loop,**dburl).db
self.mongodbs[dbname] = db
@app.listener("before_server_stop")
async def sub_close(app, loop):
log.info("mongo connection {numbr}".format(numbr=len(self.mongodbs)))
for dbname,db in self.mongodbs.items():
db.client.close
log.info("{dbname} connection closed".format(dbname=dbname))
if "extensions" not in app.__dir__():
app.extensions = {}
app.extensions[] = self
app.mongo = self.mongodbs
return self | 绑定app |
14,312 | def paginate_queryset(self, queryset, request, view=None):
result = super(MultipleModelLimitOffsetPagination, self).paginate_queryset(queryset, request, view)
try:
if self.max_count < self.count:
self.max_count = self.count
except AttributeError:
self.max_count = self.count
try:
self.total += self.count
except AttributeError:
self.total = self.count
return result | adds `max_count` as a running tally of the largest table size. Used for calculating
next/previous links later |
14,313 | def is_interface_up(interface):
if sys.platform.startswith("linux"):
if interface not in psutil.net_if_addrs():
return False
import fcntl
SIOCGIFFLAGS = 0x8913
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, interface + * 256)
flags, = struct.unpack(, result[16:18])
if flags & 1:
return True
return False
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Exception when checking if {} is up: {}".format(interface, e))
else:
return True | Checks if an interface is up.
:param interface: interface name
:returns: boolean |
14,314 | def uploadfile(baseurl, filename, format_, token, nonce, cert, method=requests.post):
filehash = sha1sum(filename)
files = {: open(filename, )}
payload = {
: filehash,
: os.path.basename(filename),
: token,
: nonce,
}
return method("%s/sign/%s" % (baseurl, format_), files=files, data=payload, verify=cert) | Uploads file (given by `filename`) to server at `baseurl`.
`sesson_key` and `nonce` are string values that get passed as POST
parameters. |
14,315 | def _get_magnitude_term(self, C, mag):
if mag >= self.CONSTS["Mh"]:
return C["e1"] + C["b3"] * (mag - self.CONSTS["Mh"])
else:
return C["e1"] + (C["b1"] * (mag - self.CONSTS["Mh"])) +\
(C["b2"] * (mag - self.CONSTS["Mh"]) ** 2.) | Returns the magnitude scaling term - equation 3 |
14,316 | def post(self, object_type, object_id):
if object_id == 0:
return Response(status=404)
tagged_objects = []
for name in request.get_json(force=True):
if in name:
type_name = name.split(, 1)[0]
type_ = TagTypes[type_name]
else:
type_ = TagTypes.custom
tag = db.session.query(Tag).filter_by(name=name, type=type_).first()
if not tag:
tag = Tag(name=name, type=type_)
tagged_objects.append(
TaggedObject(
object_id=object_id,
object_type=object_type,
tag=tag,
),
)
db.session.add_all(tagged_objects)
db.session.commit()
return Response(status=201) | Add new tags to an object. |
14,317 | def express_route_ports_locations(self):
api_version = self._get_api_version()
if api_version == :
from .v2018_08_01.operations import ExpressRoutePortsLocationsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-08-01: :class:`ExpressRoutePortsLocationsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsLocationsOperations>` |
14,318 | def render(self, doc, context=None, math_option=False, img_path=,
css_path=CSS_PATH):
if self.wait():
self.doc = doc
self.context = context
self.math_option = math_option
self.img_path = img_path
self.css_path = css_path
self.start() | Start thread to render a given documentation |
14,319 | def get_z(self, var, coords=None):
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, , coords)
if coord is not None:
return coord
zname = self.get_zname(var)
if zname is not None:
return coords.get(zname)
return None | Get the vertical (z-) coordinate of a variable
This method searches for the z-coordinate in the :attr:`ds`. It first
checks whether there is one dimension that holds an ``'axis'``
attribute with 'Z', otherwise it looks whether there is an intersection
between the :attr:`z` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the third last dimension of
`var` (or the second last or last if var is two or one-dimensional)
Possible types
--------------
var: xarray.Variable
The variable to get the z-coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The z-coordinate or None if no z coordinate could be found |
14,320 | def add_label(self, query_params=None):
list_json = self.fetch_json(
uri_path=self.base_uri + ,
http_method=,
query_params=query_params or {}
)
return self.create_label(list_json) | Create a label for a board. Returns a new Label object. |
14,321 | def time_series(
self,
start_date=,
end_date=,
precision=None,
distrib=None,
tzinfo=None):
start_date = self._parse_date_time(start_date, tzinfo=tzinfo)
end_date = self._parse_date_time(end_date, tzinfo=tzinfo)
if end_date < start_date:
raise ValueError("`end_date` must be greater than `start_date`.")
if precision is None:
precision = (end_date - start_date) / 30
precision = self._parse_timedelta(precision)
if distrib is None:
def distrib(dt): return self.generator.random.uniform(0, precision)
if not callable(distrib):
raise ValueError(
"`distrib` must be a callable. Got {} instead.".format(distrib))
datapoint = start_date
while datapoint < end_date:
dt = timestamp_to_datetime(datapoint, tzinfo)
datapoint += precision
yield (dt, distrib(dt)) | Returns a generator yielding tuples of ``(<datetime>, <value>)``.
The data points will start at ``start_date``, and be at every time interval specified by
``precision``.
``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>`` |
14,322 | def from_sites(cls, sites, charge=None, validate_proximity=False,
to_unit_cell=False):
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" %
cls)
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell) | Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None. |
14,323 | def _terminalSymbolsGenerator(self):
py2 = sys.version[0] <
UPPAs = list(list(range(0xE000,0xF8FF+1)) + list(range(0xF0000,0xFFFFD+1)) + list(range(0x100000, 0x10FFFD+1)))
for i in UPPAs:
if py2:
yield(unichr(i))
else:
yield(chr(i))
raise ValueError("To many input strings.") | Generator of unique terminal symbols used for building the Generalized Suffix Tree.
Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols
are not part of the input string. |
14,324 | def stream_url(self, item, *, device_id=None, quality=, session_token=None):
if device_id is None:
device_id = self.device_id
if in item:
response = self._call(
mc_calls.PodcastEpisodeStreamURL,
item[],
quality=quality,
device_id=device_id
)
elif in item:
response = self._call(
mc_calls.RadioStationTrackStreamURL,
item[],
item[],
session_token,
quality=quality,
device_id=device_id
)
elif in item:
response = self._call(
mc_calls.TrackStreamURL,
item[],
quality=quality,
device_id=device_id
)
elif in item and self.is_subscribed:
response = self._call(
mc_calls.TrackStreamURL,
item[],
quality=quality,
device_id=device_id
)
elif in item:
response = self._call(
mc_calls.TrackStreamURL,
item[],
quality=quality,
device_id=device_id
)
else:
if in item and not self.is_subscribed:
msg = "CanLocationurl']
return stream_url | Get a URL to stream a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
str: A URL to an MP3 file. |
14,325 | def get_filename(self, task, default_ext):
url_path = urlparse(task[])[2]
extension = url_path.split()[-1] if in url_path else default_ext
file_idx = self.fetched_num + self.file_idx_offset
return .format(file_idx, extension) | Set the path where the image will be saved.
The default strategy is to use an increasing 6-digit number as
the filename. You can override this method if you want to set custom
naming rules. The file extension is kept if it can be obtained from
the url, otherwise ``default_ext`` is used as extension.
Args:
task (dict): The task dict got from ``task_queue``.
Output:
Filename with extension. |
14,326 | def get_consistent_resource(self):
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.PAYMENT_RESOURCE, resource_id=self.id))
return Payment(**response) | :return a payment that you can trust.
:rtype Payment |
14,327 | def write_grid_tpl(name, tpl_file, suffix, zn_array=None, shape=None,
spatial_reference=None,longnames=False):
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme, x, y = [], [], []
with open(tpl_file, ) as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname =
else:
if longnames:
pname = "{0}_i:{0}_j:{1}_{2}".format(name,i,j,suffix)
if spatial_reference is not None:
pname += "_x:{0:10.2E}_y:{1:10.2E}".format(sr.xcentergrid[i,j],
sr.ycentergrid[i,j])
else:
pname = "{0}{1:03d}{2:03d}".format(name, i, j)
if len(pname) > 12:
raise("grid pname too long:{0}". \
format(pname))
parnme.append(pname)
pname = .format(pname)
if spatial_reference is not None:
x.append(spatial_reference.xcentergrid[i, j])
y.append(spatial_reference.ycentergrid[i, j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
if spatial_reference is not None:
df.loc[:,] = x
df.loc[:,] = y
df.loc[:, "pargp"] = "{0}{1}".format(suffix.replace(, ), name)
df.loc[:, "tpl"] = tpl_file
return df | write a grid-based template file
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write - include path
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information |
14,328 | def abs_timedelta(delta):
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta | Returns an "absolute" value for a timedelta, always representing a
time distance. |
14,329 | def attributes_js(cls, attributes):
assign_template =
conditional_template =
code =
for key, attr_path in sorted(attributes.items()):
data_assign = .format(key=key)
attrs = attr_path.split()
obj_name = attrs[0]
attr_getters = .join([.format(attr=attr)
for attr in attrs[1:]])
if obj_name not in [, ]:
assign_str = assign_template.format(
assign=data_assign, obj_name=obj_name, attr_getters=attr_getters
)
code += conditional_template.format(
obj_name=obj_name, assign=assign_str
)
else:
assign_str = .join([data_assign, obj_name, attr_getters, ])
code += assign_str
return code | Generates JS code to look up attributes on JS objects from
an attributes specification dictionary. If the specification
references a plotting particular plotting handle it will also
generate JS code to get the ID of the object.
Simple example (when referencing cb_data or cb_obj):
Input : {'x': 'cb_data.geometry.x'}
Output : data['x'] = cb_data['geometry']['x']
Example referencing plot handle:
Input : {'x0': 'x_range.attributes.start'}
Output : if ((x_range !== undefined)) {
data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']}
} |
14,330 | def linspace(start, stop, num, decimals=18):
start = float(start)
stop = float(stop)
if abs(start - stop) <= 10e-8:
return [start]
num = int(num)
if num > 1:
div = num - 1
delta = stop - start
return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div)))))
for x in range(num)]
return [float(("{:." + str(decimals) + "f}").format(start))] | Returns a list of evenly spaced numbers over a specified interval.
Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py
:param start: starting value
:type start: float
:param stop: end value
:type stop: float
:param num: number of samples to generate
:type num: int
:param decimals: number of significands
:type decimals: int
:return: a list of equally spaced numbers
:rtype: list |
14,331 | def setup_signals(self, ):
self.duplicate_tb.clicked.connect(self.duplicate)
self.delete_tb.clicked.connect(self.delete)
self.load_tb.clicked.connect(self.load)
self.unload_tb.clicked.connect(self.unload)
self.reference_tb.clicked.connect(self.reference)
self.importtf_tb.clicked.connect(self.import_file)
self.importref_tb.clicked.connect(self.import_reference)
self.replace_tb.clicked.connect(self.replace)
self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb))
self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb)) | Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None |
14,332 | def _get_stddevs(self, C, stddev_types, num_sites, mag):
stddevs = []
sigma = (C[] + C[] * mag) if mag < 7.2 else C[]
vals = sigma * np.ones((num_sites))
for _ in stddev_types:
stddevs.append(vals)
return stddevs | Returns standard deviation as defined in equation 23, page 2291
(Tavakoli and Pezeshk, 2005) |
14,333 | def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, verbose=True):
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%f" % mbps)
elif realtime is not None:
argv.append("--multiplier=%i" % realtime)
else:
argv.append("--topspeed")
if not verbose:
argv.append("-q")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--enable-file-cache")
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
with open(os.devnull, "wb") as null:
proc_output = null if not verbose else None
try:
subprocess.check_call(argv,
stdout=proc_output,
stderr=proc_output)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception as e:
log_interactive.error("while trying to exec [%s]: %s" % (argv[0],e))
finally:
os.unlink(f) | Send packets at layer 2 using tcpreplay for performance
pps: packets per second
mpbs: MBits per second
realtime: use packet's timestamp, bending time with realtime value
loop: number of times to process the packet list
file_cache: cache packets in RAM instead of reading from disk at each iteration
iface: output interface
verbose: if False, discard tcpreplay output |
14,334 | def extract_common_fields(self, data):
email = None
for curr_email in data.get("emails", []):
email = email or curr_email.get("email")
if curr_email.get("verified", False) and \
curr_email.get("primary", False):
email = curr_email.get("email")
return dict(
email=email,
id=data.get(),
name=data.get(),
first_name=data.get(),
last_name=data.get(),
image_url=data.get()
) | Extract fields from a basic user query. |
14,335 | def sphere_pos(self, x):
c = 0.0
if x[0] < c:
return np.nan
return -c**2 + sum((x + 0)**2) | Sphere (squared norm) test objective function |
14,336 | def get_location(self,callb=None):
if self.location is None:
mypartial=partial(self.resp_set_location)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetLocation, StateLocation,callb=mycallb )
return self.location | Convenience method to request the location from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str |
14,337 | def get_host_lock(url):
hostname = get_hostname(url)
return host_locks.setdefault(hostname, threading.Lock()) | Get lock object for given URL host. |
14,338 | def merge(self, conflicted, tables=[], diff_only=True):
if os.path.isfile(conflicted):
con, master, reassign = Database(conflicted), self.list("PRAGMA database_list").fetchall()[0][2], {}
con.modify("ATTACH DATABASE AS m".format(master), verbose=False)
self.modify("ATTACH DATABASE AS c".format(conflicted), verbose=False)
con.modify("ATTACH DATABASE AS c".format(conflicted), verbose=False)
self.modify("ATTACH DATABASE AS m".format(master), verbose=False)
for table in tables: self.modify("DROP TABLE IF EXISTS Backup_{0}".format(table), verbose=False)
import socket, datetime
if not diff_only: user = get_input()
machine_name = socket.gethostname()
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
modified_tables = []
if not isinstance(tables, type(list())):
tables = [tables]
tables = tables or [] + [t for t in zip(*self.list(
"SELECT * FROM sqlite_master WHERE name NOT LIKE AND name!= AND type={}".format(
" AND name IN ({})".format(",") if tables else )))[1] if
t != ]
for table in tables:
metadata = self.query("PRAGMA table_info({})".format(table), fmt=)
columns, types, constraints = [np.array(metadata[n]) for n in [, , ]]
conflicted_cols = con.query("PRAGMA table_info({})".format(table), unpack=True)[1]
if any([i not in columns for i in conflicted_cols]):
print(
"\nMerge of {0} table aborted since conflicted copy has columns {1} not present in master.\nAdd new columns to master with astrodb.table() method and try again.\n".format(
table.upper(), [i for i in conflicted_cols if i not in columns]))
else:
if any([i not in conflicted_cols for i in columns]):
con.modify("DROP TABLE IF EXISTS Conflicted_{0}".format(table))
con.modify("ALTER TABLE {0} RENAME TO Conflicted_{0}".format(table))
con.modify("CREATE TABLE {0} ({1})".format(table, .join( \
[.format(c, t, r, if c == else ) \
for c, t, r in zip(columns, types, constraints * [])])))
con.modify("INSERT INTO {0} ({1}) SELECT {1} FROM Conflicted_{0}".format(table, .join(
conflicted_cols)))
con.modify("DROP TABLE Conflicted_{0}".format(table))
data = map(list, con.list(
"SELECT * FROM (SELECT 1 AS db, {0} FROM m.{2} UNION ALL SELECT 2 AS db, {0} FROM c.{2}) GROUP BY {1} HAVING COUNT(*)=1 AND db=2".format(
.join(columns), .join(columns[1:]), table)).fetchall())
if data:
if diff_only:
pprint(zip(*data)[1:], names=columns, title=.format(table.upper()))
else:
self.modify("DROP TABLE IF EXISTS Backup_{0}".format(table), verbose=False)
self.modify("ALTER TABLE {0} RENAME TO Backup_{0}".format(table), verbose=False)
self.modify("CREATE TABLE {0} ({1})".format(table, .join( \
[.format(c, t, r, if c == else ) \
for c, t, r in zip(columns, types, constraints * [])])), verbose=False)
self.modify(
"INSERT INTO {0} ({1}) SELECT {1} FROM Backup_{0}".format(table, .join(columns)),
verbose=False)
print("\nMerging {} tables.\n".format(table.upper()))
try:
count = self.query("SELECT MAX(id) FROM {}".format(table), fetch=)[0] + 1
except TypeError:
count = 1
for n, i in enumerate([d[1:] for d in data]):
if table == :
reassign[i[0]] = count
elif in columns and i[1] in reassign.keys():
i[1] = reassign[i[1]]
else:
pass
i[0] = count
data[n] = i
count += 1
for d in data: self.modify(
"INSERT INTO {} VALUES({})".format(table, .join([ for c in columns])), d,
verbose=False)
pprint(zip(*data), names=columns,
title="{} records added to {} table at :".format(len(data), table, master))
abort = self.clean_up(table)
if abort:
self.modify("DROP TABLE {0}".format(table), verbose=False)
self.modify("ALTER TABLE Backup_{0} RENAME TO {0}".format(table), verbose=False)
else:
self.modify("DROP TABLE Backup_{0}".format(table), verbose=False)
modified_tables.append(table.upper())
else:
print("\n{} tables identical.".format(table.upper()))
if not diff_only:
user_description = get_input()
self.list("INSERT INTO changelog VALUES(?, ?, ?, ?, ?, ?, ?)", \
(None, date, str(user), machine_name, .join(modified_tables), user_description,
os.path.basename(conflicted)))
if diff_only:
print("\nDiff complete. No changes made to either database. Set `diff_only=False{}' not found!".format(conflicted)) | Merges specific **tables** or all tables of **conflicted** database into the master database.
Parameters
----------
conflicted: str
The path of the SQL database to be merged into the master.
tables: list (optional)
The list of tables to merge. If None, all tables are merged.
diff_only: bool
If True, only prints the differences of each table and doesn't actually merge anything. |
14,339 | def validate_data_table(data_table, sed=None):
if isinstance(data_table, Table) or isinstance(data_table, QTable):
data_table = [data_table]
try:
for dt in data_table:
if not isinstance(dt, Table) and not isinstance(dt, QTable):
raise TypeError(
"An object passed as data_table is not an astropy Table!"
)
except TypeError:
raise TypeError(
"Argument passed to validate_data_table is not a table and "
"not a list"
)
def dt_sed_conversion(dt, sed):
f_unit, sedf = sed_conversion(dt["energy"], dt["flux"].unit, sed)
t = Table(dt)
for col in ["flux", "flux_error_lo", "flux_error_hi"]:
t[col].unit = f_unit
ndt = QTable(t)
ndt["flux"] = (dt["flux"] * sedf).to(f_unit)
ndt["flux_error_lo"] = (dt["flux_error_lo"] * sedf).to(f_unit)
ndt["flux_error_hi"] = (dt["flux_error_hi"] * sedf).to(f_unit)
return ndt
data_list = []
for group, dt in enumerate(data_table):
dt_val = _validate_single_data_table(dt, group=group)
data_list.append(dt_val)
data_new = data_list[0].copy()
f_pt = data_new["flux"].unit.physical_type
if sed is None:
sed = f_pt in ["flux", "power"]
data_new = dt_sed_conversion(data_new, sed)
for dt in data_list[1:]:
nf_pt = dt["flux"].unit.physical_type
if ("flux" in nf_pt and "power" in f_pt) or (
"power" in nf_pt and "flux" in f_pt
):
raise TypeError(
"The physical types of the data tables could not be "
"matched: Some are in flux and others in luminosity units"
)
dt = dt_sed_conversion(dt, sed)
for row in dt:
data_new.add_row(row)
return data_new | Validate all columns of a data table. If a list of tables is passed, all
tables will be validated and then concatenated
Parameters
----------
data_table : `astropy.table.Table` or list of `astropy.table.Table`.
sed : bool, optional
Whether to convert the fluxes to SED. If unset, all data tables are
converted to the format of the first data table. |
14,340 | def render_footer(self, ctx, data):
if self.staticContent is None:
return ctx.tag
header = self.staticContent.getFooter()
if header is not None:
return ctx.tag[header]
else:
return ctx.tag | Render any required static content in the footer, from the C{staticContent}
attribute of this page. |
14,341 | def id_lookup(paper_id, idtype):
if idtype not in (, , ):
raise ValueError("Invalid idtype %s; must be , , "
"or ." % idtype)
ids = {: None, : None, : None}
pmc_id_results = pmc_client.id_lookup(paper_id, idtype)
ids[] = pmc_id_results.get()
ids[] = pmc_id_results.get()
ids[] = pmc_id_results.get()
ids[idtype] = paper_id
if idtype == :
return ids
if ids.get() and ids.get() is None and ids.get() is None:
logger.warning( % ids.get())
return ids
assert ids.get() is not None
assert ids.get() is None
ids[] = crossref_client.doi_query(ids[])
return ids | Take an ID of type PMID, PMCID, or DOI and lookup the other IDs.
If the DOI is not found in Pubmed, try to obtain the DOI by doing a
reverse-lookup of the DOI in CrossRef using article metadata.
Parameters
----------
paper_id : str
ID of the article.
idtype : str
Type of the ID: 'pmid', 'pmcid', or 'doi
Returns
-------
ids : dict
A dictionary with the following keys: pmid, pmcid and doi. |
14,342 | def get_mime_message(subject, text):
message = MIMEText(
"<html>" +
str(text).replace("\n", "<br>") +
"</html>", "html"
)
message["subject"] = str(subject)
return message | Creates MIME message
:param subject: Subject of email
:param text: Email content
:return: Email formatted as HTML ready to be sent |
14,343 | def get_cache_key(brain_or_object):
key = [
get_portal_type(brain_or_object),
get_id(brain_or_object),
get_uid(brain_or_object),
get_url(brain_or_object),
get_modification_date(brain_or_object).micros(),
]
return "-".join(map(lambda x: str(x), key)) | Generate a cache key for a common brain or object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Cache Key
:rtype: str |
14,344 | def new_portfolio(self, portfolio_cookie=None):
_portfolio = QA_Portfolio(
user_cookie=self.user_cookie,
portfolio_cookie=portfolio_cookie
)
if _portfolio.portfolio_cookie not in self.portfolio_list.keys():
self.portfolio_list[_portfolio.portfolio_cookie] = _portfolio
return _portfolio
else:
print(
" prortfolio with user_cookie ",
self.user_cookie,
" already exist!!"
)
return self.portfolio_list[portfolio_cookie] | 根据 self.user_cookie 创建一个 portfolio
:return:
如果存在 返回 新建的 QA_Portfolio
如果已经存在 返回 这个portfolio |
14,345 | def _api_handler(self, *args, **kwargs):
keyword_arguments = {}
keyword_arguments.update(self.keywords)
keyword_arguments.update(kwargs)
return api_handler(*args, **keyword_arguments) | Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body |
14,346 | def display_grid_scores(grid_scores, top=None):
grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
if top is not None:
grid_scores = grid_scores[:top]
_, best_mean, best_scores = grid_scores[0]
threshold = best_mean - 2 * sem(best_scores)
for params, mean_score, scores in grid_scores:
append_star = mean_score + 2 * sem(scores) > threshold
print(display_scores(params, scores, append_star=append_star)) | Helper function to format a report on a grid of scores |
14,347 | def _get_nameservers(domain):
nameservers = []
rdtypes_ns = [, ]
rdtypes_ip = [, ]
for rdtype_ns in rdtypes_ns:
for rdata_ns in Provider._dns_lookup(domain, rdtype_ns):
for rdtype_ip in rdtypes_ip:
for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split()[0],
rdtype_ip):
if rdata_ip.to_text() not in nameservers:
nameservers.append(rdata_ip.to_text())
LOGGER.debug(, domain, .join(nameservers))
return nameservers | Looks for domain nameservers and returns the IPs of the nameservers as a list.
The list is empty, if no nameservers were found. Needed associated domain zone
name for lookup. |
14,348 | def get_fastq_files(directory, lane, fc_name):
files = glob.glob(os.path.join(directory, "%s_*%s*txt*" % (lane, fc_name)))
files.sort()
if len(files) > 2 or len(files) == 0:
raise ValueError("Did not find correct files for %s %s %s %s" %
(directory, lane, fc_name, files))
ready_files = []
for fname in files:
if fname.endswith(".gz"):
cl = ["gunzip", fname]
subprocess.check_call(cl)
ready_files.append(os.path.splitext(fname)[0])
else:
ready_files.append(fname)
return ready_files[0], (ready_files[1] if len(ready_files) > 1 else None) | Retrieve fastq files for the given lane, ready to process. |
14,349 | def visit_tryfinally(self, node, parent):
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
newnode.postinit(
[self.visit(child, newnode) for child in node.body],
[self.visit(n, newnode) for n in node.finalbody],
)
return newnode | visit a TryFinally node by returning a fresh instance of it |
14,350 | def _set_prior(self, prior):
if prior is None:
self._prior = None
else:
try:
_ = prior(self.value)
except:
raise NotCallableOrErrorInCall("Could not call the provided prior. " +
"Is it a function accepting the current value of the parameter?")
try:
prior.set_units(self.unit, u.dimensionless_unscaled)
except AttributeError:
raise NotCallableOrErrorInCall("It looks like the provided prior is not a astromodels function.")
self._prior = prior | Set prior for this parameter. The prior must be a function accepting the current value of the parameter
as input and giving the probability density as output. |
14,351 | def consecutiveness(password, consecutive_length=3):
consec = 0
for i in range(len(password) - consecutive_length):
if all([char.islower() for char in password[i:i+consecutive_length]]):
consec += 1
elif all([char.islower() for char in password[i:i+consecutive_length]]):
consec += 1
try:
return consec / (len(password) - consecutive_length)
except ZeroDivisionError:
return 0 | Consecutiveness is the enemy of entropy, but makes it easier to remember.
:param str password:
:param int consecutive_length: length of the segment to be uniform to consider loss of entropy
:param int base_length: usual length of the password
:return int: in range 0-1
>>> Complexity.consecutiveness('password')
1.0
>>> Complexity.consecutiveness('PaSsWoRd')
0.0
>>> Complexity.consecutiveness('yio')
0 |
14,352 | def parse_root(raw):
"Efficiently parses the root element of a *raw* XML document, returning a tuple of its qualified name and attribute dictionary."
if sys.version < :
fp = StringIO(raw)
else:
fp = BytesIO(raw.encode())
for event, element in etree.iterparse(fp, events=(,)):
return (element.tag, element.attrib) | Efficiently parses the root element of a *raw* XML document, returning a tuple of its qualified name and attribute dictionary. |
14,353 | def taxids(self):
r = self.session.query(distinct(models.Entry.taxid)).all()
return [x[0] for x in r] | Distinct NCBI taxonomy identifiers (``taxid``) in :class:`.models.Entry`
:return: NCBI taxonomy identifiers
:rtype: list[int] |
14,354 | def typed_encode(self, r):
try:
value = r.get()
if "json" in r:
value = json2value(r["json"])
elif is_data(value) or value != None:
pass
else:
from mo_logs import Log
raise Log.error("Expecting every record given to have \"value\" or \"json\" property")
_buffer = UnicodeBuilder(1024)
net_new_properties = []
path = []
if is_data(value):
given_id = self.get_id(value)
value[] = None
version = self.get_version(value)
else:
given_id = None
version = None
if given_id:
record_id = r.get()
if record_id and record_id != given_id:
from mo_logs import Log
raise Log.error(
"expecting {{property}} of record ({{record_id|quote}}) to match one given ({{given|quote}})",
property=self.id_info,
record_id=record_id,
given=given_id
)
else:
record_id = r.get()
if record_id:
given_id = record_id
else:
given_id = random_id()
typed_encode(value, self.schema, path, net_new_properties, _buffer)
json = _buffer.build()
return given_id, version, json
except Exception as e:
from mo_logs import Log
Log.error("Serialization of JSON problems", cause=e) | :param record: expecting id and value properties
:return: dict with id and json properties |
14,355 | def exception_handler(job, *exc_info):
job_info = job.to_dict()
rollbar.report_exc_info(exc_info, extra_data=extra_data, payload_data=payload_data)
return True | Called by RQ when there is a failure in a worker.
NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with
handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker. |
14,356 | def validate(nanopub: dict, error_level: str = "WARNING") -> Tuple[str, str, str]:
v = []
bel_version = config["bel"]["lang"]["default_bel_version"]
try:
if not isinstance(nanopub["nanopub"]["assertions"], list):
msg = "Assertions must be a list/array"
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
except Exception as e:
msg =
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
try:
if (
"name" in nanopub["nanopub"]["type"]
and "version" in nanopub["nanopub"]["type"]
):
pass
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
except Exception as e:
msg =
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
try:
for key in ["uri", "database", "reference"]:
if key in nanopub["nanopub"]["citation"]:
break
else:
msg =
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
except Exception as e:
msg =
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
if "assertions" in nanopub["nanopub"]:
for idx, assertion in enumerate(nanopub["nanopub"]["assertions"]):
bo = bel.lang.belobj.BEL(
bel_version, config["bel_api"]["servers"]["api_url"]
)
belstr = f
belstr = belstr.replace("None", "")
try:
messages = (
bo.parse(belstr)
.semantic_validation(error_level=error_level)
.validation_messages
)
for message in messages:
(level, msg) = message
if error_level == "ERROR":
if level == "ERROR":
v.append(
{
"level": f"{level.title()}",
"section": "Assertion",
"label": f"{level.title()}-Assertion",
"index": idx,
"msg": msg,
"msg_html": convert_msg_to_html(msg),
}
)
else:
v.append(
{
"level": f"{level.title()}",
"section": "Assertion",
"label": f"{level.title()}-Assertion",
"index": idx,
"msg": msg,
"msg_html": convert_msg_to_html(msg),
}
)
except Exception as e:
msg = f"Could not parse: {belstr}"
v.append(
{
"level": "Error",
"section": "Assertion",
"label": "Error-Assertion",
"index": idx,
"msg": msg,
"msg_html": msg,
}
)
log.exception(f"Could not parse: {belstr}")
if error_level == "WARNING":
for idx, annotation in enumerate(nanopub["nanopub"].get("annotations", [])):
term_type = annotation["type"]
term_id = annotation["id"]
log.info(f"Annotation: {term_type} ID: {term_id}")
search_body = {
"_source": ["src_id", "id", "name", "label", "annotation_types"],
"query": {"term": {"id": term_id}},
}
results = es.search(index="terms", doc_type="term", body=search_body)
if len(results["hits"]["hits"]) > 0:
result = results["hits"]["hits"][0]["_source"]
if term_type not in result["annotation_types"]:
msg = f
v.append(
{
"level": "Warning",
"section": "Annotation",
"index": idx,
"label": "Warning-Annotation",
"msg": msg,
"msg_html": msg,
}
)
else:
msg = f"Annotation term: {term_id} not found in database"
v.append(
{
"level": "Warning",
"section": "Annotation",
"index": idx,
"label": "Warning-Annotation",
"msg": msg,
"msg_html": msg,
}
)
return v | Validate Nanopub
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
The validation result is a list of objects containing
{
'level': 'Warning|Error',
'section': 'Assertion|Annotation|Structure',
'label': '{Error|Warning}-{Assertion|Annotation|Structure}', # to be used for faceting in Elasticsearch
'index': idx, # Index of Assertion or Annotation in Nanopub - starts at 0
'msg': msg, # Error or Warning message
}
Args:
nanopub: nanopub record starting with nanopub...
level: return WARNING or just ERROR? defaults to warnings and errors
Returns:
list(tuples): [{'level': 'Warning', 'section': 'Assertion', 'label': 'Warning-Assertion', 'index': 0, 'msg': <msg>}] |
14,357 | def createKeyboardTab(self):
_keyboardList = [
, , , , , , , ,
, ,
, , , , , , , ,
, ,
, , , , , , , ,
,
, , , , , , , ,
, ,
]
for keyboard in _keyboardList:
_cpb = ControlPanelButton(self.keyboardTab, self.culebron, self.printOperation, value=keyboard,
text=keyboard[8:],
width=Layout.BUTTON_WIDTH, bg=self.bg, fg=self.fg,
highlightbackground=self.highlightbackground)
_cpb.configure(command=_cpb.command)
_cpb.grid(column=self.childWindow.column, row=self.childWindow.row)
self.tabLayout() | KEYBOARD |
14,358 | def stage_http_response2(self, payload):
if not self._http_response_version and not payload:
return
if self.enabled and self.http_detail_level is not None and \
self.httplogger.isEnabledFor(logging.DEBUG):
if self._http_response_headers:
header_str = \
.join(.format(k, v)
for k, v in self._http_response_headers.items())
else:
header_str =
if self.http_detail_level == :
upayload = ""
elif self.http_maxlen and (len(payload) > self.http_maxlen):
upayload = (_ensure_unicode(payload[:self.http_maxlen]) +
)
else:
upayload = _ensure_unicode(payload)
self.httplogger.debug(,
self._http_response_conn_id,
self._http_response_status,
self._http_response_reason,
self._http_response_version,
header_str, upayload) | Log complete http response, including response1 and payload |
14,359 | def copy_format(self):
row, col, tab = self.grid.actions.cursor
code_array = self.grid.code_array
new_cell_attributes = []
selection = self.get_selection()
if not selection:
selection = Selection([], [], [], [], [(row, col)])
((top, left), (bottom, right)) = \
selection.get_grid_bbox(self.grid.code_array.shape)
cell_attributes = code_array.cell_attributes
for __selection, table, attrs in cell_attributes:
if tab == table:
new_selection = selection & __selection
if new_selection:
new_shifted_selection = new_selection.shifted(-top, -left)
if "merge_area" not in attrs:
selection_params = new_shifted_selection.parameters
cellattribute = selection_params, table, attrs
new_cell_attributes.append(cellattribute)
shifted_new_row_heights = {}
for row, table in code_array.row_heights:
if tab == table and top <= row <= bottom:
shifted_new_row_heights[row-top, table] = \
code_array.row_heights[row, table]
shifted_new_col_widths = {}
for col, table in code_array.col_widths:
if tab == table and left <= col <= right:
shifted_new_col_widths[col-left, table] = \
code_array.col_widths[col, table]
format_data = {
"cell_attributes": new_cell_attributes,
"row_heights": shifted_new_row_heights,
"col_widths": shifted_new_col_widths,
}
attr_string = repr(format_data)
self.grid.main_window.clipboard.set_clipboard(attr_string) | Copies the format of the selected cells to the Clipboard
Cells are shifted so that the top left bbox corner is at 0,0 |
14,360 | def handle_fail_rcs(self, req):
try:
logger.debug("HTTP Status Code: %s", req.status_code)
logger.debug("HTTP Response Text: %s", req.text)
logger.debug("HTTP Response Reason: %s", req.reason)
logger.debug("HTTP Response Content: %s", req.content)
except:
logger.error("Malformed HTTP Request.")
try:
logger.debug("HTTP Response Message: %s", req.json()["message"])
except:
logger.debug("No HTTP Response message present.")
if req.status_code >= 400:
logger.info("Debug Information:\nHTTP Status Code: %s",
req.status_code)
logger.info("HTTP Status Text: %s", req.reason)
if req.status_code == 401:
logger.error("Authorization Required.")
logger.error("Please ensure correct credentials "
"in " + constants.default_conf_file)
logger.debug("HTTP Response Text: %s", req.text)
if req.status_code == 402:
logger.debug()
try:
logger.error(req.json()["message"])
except LookupError:
logger.error("Got 402 but no message")
logger.debug("HTTP Response Text: %s", req.text)
except:
logger.error("Got 402 but no message")
logger.debug("HTTP Response Text: %s", req.text)
if req.status_code == 403 and self.auto_config:
rhsm_hostname = urlparse(self.base_url).hostname
if (rhsm_hostname != and
rhsm_hostname != ):
logger.error(
, rhsm_hostname)
if req.status_code == 412:
try:
unreg_date = req.json()["unregistered_at"]
logger.error(req.json()["message"])
write_unregistered_file(unreg_date)
except LookupError:
unreg_date = "412, but no unreg_date or message"
logger.debug("HTTP Response Text: %s", req.text)
except:
unreg_date = "412, but no unreg_date or message"
logger.debug("HTTP Response Text: %s", req.text)
return True
return False | Bail out if we get a 401 and leave a message |
14,361 | def _to_dict(self):
_dict = {}
if hasattr(self,
) and self.document_retrieval_strategy is not None:
_dict[
] = self.document_retrieval_strategy
return _dict | Return a json dictionary representing this model. |
14,362 | def create(self, message, mid=None, age=60, force=True):
with self.session_lock:
if not hasattr(message, "id"):
message.__setattr__("id", "event-%s" % (uuid.uuid4().hex,))
if self.session_list.get(message.id, None) is not None:
if force is False:
raise SessionError("Message id: %s duplicate!" %
message.id)
else:
message = Message(message.to_dict(), generate_id=True)
session = {
"status": Status.CREATED,
"message": message,
"age": age,
"mid": mid,
"created_at": time(),
"is_published": Event(),
"is_resolved": Event()
}
self.session_list.update({
message.id: session
})
return session | create session
force if you pass `force = False`, it may raise SessionError
due to duplicate message id |
14,363 | def restore_gc_state():
old_isenabled = gc.isenabled()
old_flags = gc.get_debug()
try:
yield
finally:
gc.set_debug(old_flags)
(gc.enable if old_isenabled else gc.disable)() | Restore the garbage collector state on leaving the with block. |
14,364 | def _get_axis_data(self, bunch, dim, cluster_id=None, load_all=None):
if dim in self.attributes:
return self.attributes[dim](cluster_id, load_all=load_all)
masks = bunch.get(, None)
assert dim not in self.attributes
s =
c_rel = int(dim[:-1])
channel_id = self.channel_ids[c_rel % len(self.channel_ids)]
if channel_id not in bunch.channel_ids:
return None
c = list(bunch.channel_ids).index(channel_id)
d = s.index(dim[-1])
if masks is not None:
masks = masks[:, c]
return Bunch(data=bunch.data[:, c, d],
masks=masks,
) | Extract the points from the data on a given dimension.
bunch is returned by the features() function.
dim is the string specifying the dimensions to extract for the data. |
14,365 | def distb(self, tb=None, file=None):
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
self.disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file) | Disassemble a traceback (default: last traceback). |
14,366 | def plot_magseries(times,
mags,
magsarefluxes=False,
errs=None,
out=None,
sigclip=30.0,
normto=,
normmingap=4.0,
timebin=None,
yrange=None,
segmentmingap=100.0,
plotdpi=100):
zeros flux value to yield normalized fluxes with 1.0 as the
global median.
- if `normto` is , then the global median flux value
across the entire time series is multiplied with each measurement.
- if `norm` is set to a `float`, then this number is multiplied with the
flux value for each measurement.
errs : np.array or None
If this is provided, contains the measurement errors associated with
each measurement of flux/mag in time-series. Providing this kwarg will
add errbars to the output plot.
out : str or StringIO/BytesIO object or None
Sets the output type and target:
- If `out` is a string, will save the plot to the specified file name.
- If `out` is a StringIO/BytesIO object, will save the plot to that file
handle. This can be useful to carry out additional operations on the
output binary stream, or convert it to base64 text for embedding in
HTML pages.
- If `out` is None, will save the plot to a file called
in the current working directory.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {, } or a float
Sets the normalization target::
-> norms each mag to the global median of the LC column
-> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
timebin : float or None
The bin size to use to group together measurements closer than this
amount in time. This is in seconds. If this is None, no time-binning
will be performed.
yrange : list of two floats or None
This is used to provide a custom y-axis range to the plot. If None, will
automatically determine y-axis range.
segmentmingap : float or None
This controls the minimum length of time (in days) required to consider
a timegroup in the light curve as a separate segment. This is useful
when the light curve consists of measurements taken over several
seasons, so there
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
if timebin and errs is not None:
binned = time_bin_magseries_with_errs(stimes, smags, serrs,
binsize=timebin)
btimes, bmags, berrs = (binned[],
binned[],
binned[])
elif timebin and errs is None:
binned = time_bin_magseries(stimes, smags,
binsize=timebin)
btimes, bmags, berrs = binned[], binned[], None
else:
btimes, bmags, berrs = stimes, smags, serrs
if normto is not False:
btimes, bmags = normalize_magseries(btimes, bmags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
btimeorigin = btimes.min()
btimes = btimes - btimeorigin
if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == 2:
ymin, ymax = yrange
else:
ycov = bmags.max() - bmags.min()
ymin = bmags.min() - 0.02*ycov
ymax = bmags.max() + 0.02*ycov
ax.get_xaxis().get_major_formatter().set_useOffset(False)
if axind == 0:
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.spines[].set_visible(False)
ax.yaxis.tick_left()
elif 0 < axind < (len(axes)-1):
ax.spines[].set_visible(False)
ax.spines[].set_visible(False)
ax.tick_params(right=, labelright=,
left=,labelleft=)
elif axind == (len(axes)-1):
ax.spines[].set_visible(False)
ax.spines[].set_visible(True)
ax.yaxis.tick_right()
if not magsarefluxes:
ax.set_ylim(ymax, ymin)
else:
ax.set_ylim(ymin, ymax)
tgrange = tgtimes.max() - tgtimes.min()
if tgrange < 10.0:
ticklocations = [tgrange/2.0]
ax.set_xlim(npmin(tgtimes) - 0.5, npmax(tgtimes) + 0.5)
elif 10.0 < tgrange < 30.0:
ticklocations = np.linspace(tgtimes.min()+5.0,
tgtimes.max()-5.0,
num=2)
ax.set_xlim(npmin(tgtimes) - 2.0, npmax(tgtimes) + 2.0)
elif 30.0 < tgrange < 100.0:
ticklocations = np.linspace(tgtimes.min()+10.0,
tgtimes.max()-10.0,
num=3)
ax.set_xlim(npmin(tgtimes) - 2.5, npmax(tgtimes) + 2.5)
else:
ticklocations = np.linspace(tgtimes.min()+20.0,
tgtimes.max()-20.0,
num=3)
ax.set_xlim(npmin(tgtimes) - 3.0, npmax(tgtimes) + 3.0)
ax.xaxis.set_ticks([int(x) for x in ticklocations])
plt.subplots_adjust(wspace=0.07)
fig.text(0.5, 0.00, %
(btimeorigin, segmentmingap), ha=)
if not magsarefluxes:
fig.text(0.02, 0.5, , va=, rotation=)
else:
fig.text(0.02, 0.5, , va=, rotation=)
else:
fig = plt.figure()
fig.set_size_inches(7.5,4.8)
plt.errorbar(btimes, bmags, fmt=, yerr=berrs,
markersize=2.0, markeredgewidth=0.0, ecolor=,
capsize=0)
plt.grid(color=,
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.xlabel( % btimeorigin)
if not magsarefluxes:
plt.ylim(ymax, ymin)
plt.ylabel()
else:
plt.ylim(ymin, ymax)
plt.ylabel()
if sys.version_info[:2] < (3,0):
is_Strio = isinstance(out, cStringIO.InputType)
else:
is_Strio = isinstance(out, Strio)
if out and not is_Strio:
if out.endswith():
plt.savefig(out,bbox_inches=,dpi=plotdpi)
else:
plt.savefig(out,bbox_inches=)
plt.close()
return os.path.abspath(out)
elif out and is_Strio:
plt.savefig(out, bbox_inches=, dpi=plotdpi, format=)
return out
elif not out and dispok:
plt.show()
plt.close()
return
else:
LOGWARNING(
)
outfile =
plt.savefig(outfile,bbox_inches=,dpi=plotdpi)
plt.close()
return os.path.abspath(outfile) | This plots a magnitude/flux time-series.
Parameters
----------
times,mags : np.array
The mag/flux time-series to plot as a function of time.
magsarefluxes : bool
Indicates if the input `mags` array is actually an array of flux
measurements instead of magnitude measurements. If this is set to True,
then the plot y-axis will be set as appropriate for mag or fluxes. In
addition:
- if `normto` is 'zero', then the median flux is divided from each
observation's flux value to yield normalized fluxes with 1.0 as the
global median.
- if `normto` is 'globalmedian', then the global median flux value
across the entire time series is multiplied with each measurement.
- if `norm` is set to a `float`, then this number is multiplied with the
flux value for each measurement.
errs : np.array or None
If this is provided, contains the measurement errors associated with
each measurement of flux/mag in time-series. Providing this kwarg will
add errbars to the output plot.
out : str or StringIO/BytesIO object or None
Sets the output type and target:
- If `out` is a string, will save the plot to the specified file name.
- If `out` is a StringIO/BytesIO object, will save the plot to that file
handle. This can be useful to carry out additional operations on the
output binary stream, or convert it to base64 text for embedding in
HTML pages.
- If `out` is None, will save the plot to a file called
'magseries-plot.png' in the current working directory.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
Sets the normalization target::
'globalmedian' -> norms each mag to the global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
timebin : float or None
The bin size to use to group together measurements closer than this
amount in time. This is in seconds. If this is None, no time-binning
will be performed.
yrange : list of two floats or None
This is used to provide a custom y-axis range to the plot. If None, will
automatically determine y-axis range.
segmentmingap : float or None
This controls the minimum length of time (in days) required to consider
a timegroup in the light curve as a separate segment. This is useful
when the light curve consists of measurements taken over several
seasons, so there's lots of dead space in the plot that can be cut out
to zoom in on the interesting stuff. If `segmentmingap` is not None, the
magseries plot will be cut in this way and the x-axis will show these
breaks.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
Returns
-------
str or BytesIO/StringIO object
Returns based on the input:
- If `out` is a str or None, the path to the generated plot file is
returned.
- If `out` is a StringIO/BytesIO object, will return the
StringIO/BytesIO object to which the plot was written. |
14,367 | def frequency_psd_from_qd(self, tau0=1.0):
a = self.b + 2.0
return self.qd*2.0*pow(2.0*np.pi, a)*pow(tau0, a-1.0) | return frequency power spectral density coefficient h_a
for the noise type defined by (qd, b, tau0)
Colored noise generated with (qd, b, tau0) parameters will
show a frequency power spectral density of
S_y(f) = Frequency_PSD(f) = h_a * f^a
where the slope a comes from the phase PSD slope b:
a = b + 2
Kasdin & Walter eqn (39) |
14,368 | def positionToIntensityUncertainty(image, sx, sy, kernelSize=None):
psf_is_const = not isinstance(sx, np.ndarray)
if not psf_is_const:
assert image.shape == sx.shape == sy.shape, \
"Image and position uncertainty maps need to have same size"
if kernelSize is None:
kernelSize = _kSizeFromStd(max(sx.max(), sy.max()))
else:
assert type(sx) in (int, float) and type(sx) in (int, float), \
"Image and position uncertainty values need to be int OR float"
if kernelSize is None:
kernelSize = _kSizeFromStd(max(sx, sy))
if image.dtype.kind == :
image = image.astype(int)
size = kernelSize // 2
if size < 1:
size = 1
kernelSize = 1 + 2 * size
psf = np.zeros((kernelSize, kernelSize))
sint = np.zeros(image.shape)
if psf_is_const:
_calc_constPSF(image, sint, sx, sy, psf, size)
else:
_calc_variPSF(image, sint, sx, sy, psf, size)
return sint | calculates the estimated standard deviation map from the changes
of neighbouring pixels from a center pixel within a point spread function
defined by a std.dev. in x and y taken from the (sx, sy) maps
sx,sy -> either 2d array of same shape as [image]
of single values |
14,369 | def get_env_credential(env=):
url = .join([API_URL, , env])
credential_response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert credential_response.ok,
credential = credential_response.json()
LOG.debug(, credential)
return credential | Get Account Credential from Spinnaker for *env*.
Args:
env (str): Environment name to find credentials for.
Returns:
dict: Complete credentials for *env*::
{
'accountId': '123098123',
'accountType': 'dev',
'assumeRole': 'role/spinnakerManaged',
'bastionEnabled': False,
'challengeDestructiveActions': False,
'cloudProvider': 'aws',
'defaultKeyPair': 'dev_access',
'discoveryEnabled': False,
'eddaEnabled': False,
'environment': 'dev',
'front50Enabled': False,
'name': 'dev',
'primaryAccount': False,
'provider': 'aws',
'regions': [
{
'availabilityZones': ['us-east-1b', 'us-east-1c',
'us-east-1d', 'us-east-1e'],
'deprecated': False,
'name': 'us-east-1',
'preferredZones':
['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e'
]
}, {
'availabilityZones':
['us-west-2a', 'us-west-2b', 'us-west-2c'],
'deprecated': False,
'name': 'us-west-2',
'preferredZones':
['us-west-2a', 'us-west-2b', 'us-west-2c']
}
],
'requiredGroupMembership': [],
'sessionName': 'Spinnaker',
'type': 'aws'
} |
14,370 | def references(self, env, object_name, model, assoc_class,
result_class_name, role, result_role, keys_only):
pass | Instrument Associations.
All four association-related operations (Associators, AssociatorNames,
References, ReferenceNames) are mapped to this method.
This method is a python generator
Keyword arguments:
env -- Provider Environment (pycimmb.ProviderEnvironment)
object_name -- A pywbem.CIMInstanceName that defines the source
CIM Object whose associated Objects are to be returned.
model -- A template pywbem.CIMInstance to serve as a model
of the objects to be returned. Only properties present on this
model need to be set.
assoc_class -- The pywbem.CIMClass.
result_class_name -- If not empty, this string acts as a filter on
the returned set of Instances by mandating that each returned
Instances MUST represent an association between object_name
and an Instance of a Class whose name matches this parameter
or a subclass.
role -- If not empty, MUST be a valid Property name. It acts as a
filter on the returned set of Instances by mandating that each
returned Instance MUST refer to object_name via a Property
whose name matches the value of this parameter.
result_role -- If not empty, MUST be a valid Property name. It acts
as a filter on the returned set of Instances by mandating that
each returned Instance MUST represent associations of
object_name to other Instances, where the other Instances play
the specified result_role in the association (i.e. the
name of the Property in the Association Class that refers to
the Object related to object_name MUST match the value of this
parameter).
keys_only -- A boolean. True if only the key properties should be
set on the generated instances.
The following diagram may be helpful in understanding the role,
result_role, and result_class_name parameters.
+------------------------+ +-------------------+
| object_name.classname | | result_class_name |
| ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ |
+------------------------+ +-------------------+
| +-----------------------------------+ |
| | [Association] assoc_class | |
| object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
+--------------+ object_name.classname REF role | |
(CIMInstanceName) | result_class_name REF result_role +------+
| |(CIMInstanceName)
+-----------------------------------+
Possible Errors:
CIM_ERR_ACCESS_DENIED
CIM_ERR_NOT_SUPPORTED
CIM_ERR_INVALID_NAMESPACE
CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized
or otherwise incorrect parameters)
CIM_ERR_FAILED (some other unspecified error occurred) |
14,371 | def nvm_primer():
print( +
cyan())
print( +
cyan() + +
cyan() + +
cyan() +
+
cyan() +
)
print( +
cyan())
print( +
cyan())
print() | Getting started with nvm (cf. https://github.com/creationix/nvm#usage). |
14,372 | def _create_keywords_wizard_action(self):
icon = resources_path(, , )
self.action_keywords_wizard = QAction(
QIcon(icon),
self.tr(),
self.iface.mainWindow())
self.action_keywords_wizard.setStatusTip(self.tr(
))
self.action_keywords_wizard.setWhatsThis(self.tr(
))
self.action_keywords_wizard.setEnabled(False)
self.action_keywords_wizard.triggered.connect(
self.show_keywords_wizard)
self.add_action(self.action_keywords_wizard, add_to_legend=True) | Create action for keywords creation wizard. |
14,373 | def clean_videos(self):
if self.videos:
self.videos = [int(v) for v in self.videos if v is not None and is_valid_digit(v)] | Validates that all values in the video list are integer ids and removes all None values. |
14,374 | def _sweep(self):
while self.running:
for am in list(self.activeMeasurements):
now = datetime.datetime.utcnow()
recordingDeviceCount = len(am.recordingDevices)
if recordingDeviceCount > 0:
if all(entry[] == RecordStatus.COMPLETE.name for entry in am.recordingDevices.values()):
logger.info("Detected completedmeasurement " + am.id)
self._moveToComplete(am)
if now > (am.endTime + datetime.timedelta(days=0, seconds=1)):
allFailed = all(entry[] == RecordStatus.FAILED.name
for entry in am.recordingDevices.values())
if (recordingDeviceCount > 0 and allFailed) or recordingDeviceCount == 0:
logger.warning("Detected failed measurement " + am.id + " with " + str(recordingDeviceCount)
+ " devices, allFailed: " + str(allFailed))
self._moveToFailed(am)
if now > (am.endTime + datetime.timedelta(days=0, seconds=self.maxTimeTilDeathbedSeconds)):
if any(entry[] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()):
logger.warning("Detected failed and incomplete measurement " + am.id + ", assumed dead")
self._moveToFailed(am)
elif all(entry[] == RecordStatus.RECORDING.name for entry in am.recordingDevices.values()):
self._handleDeathbed(am)
time.sleep(0.1)
logger.warning("MeasurementCaretaker is now shutdown") | Checks the state of each measurement and verifies their state, if an active measurement is now complete then
passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts.
:return: |
14,375 | def getFreeEnergyDifferences(self, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
f_i = np.matrix(self.f_k)
Deltaf_ij = f_i - f_i.transpose()
self._zerosamestates(Deltaf_ij)
returns = []
returns.append(np.array(Deltaf_ij))
if compute_uncertainty or return_theta:
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(self.Log_W_nk), self.N_k, method=uncertainty_method)
if compute_uncertainty:
diag = Theta_ij.diagonal()
d2DeltaF = diag + diag.transpose() - 2 * Theta_ij
self._zerosamestates(d2DeltaF)
if (np.any(d2DeltaF < 0.0)):
if(np.any(d2DeltaF) < warning_cutoff):
print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)])
else:
d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0
dDeltaf_ij = np.sqrt(d2DeltaF)
returns.append(np.array(dDeltaf_ij))
if (return_theta):
returns.append(np.array(Theta_ij))
return returns | Get the dimensionless free energy differences and uncertainties among all thermodynamic states.
Parameters
----------
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default. See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: svd)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude
than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
Deltaf_ij :L np.ndarray, float, shape=(K, K)
Deltaf_ij[i,j] is the estimated free energy difference
dDeltaf_ij :L np.ndarray, float, shape=(K, K)
dDeltaf_ij[i,j] is the estimated statistical uncertainty
(one standard deviation) in Deltaf_ij[i,j]
Notes
-----
Computation of the covariance matrix may take some time for large K.
The reported statistical uncertainty should, in the asymptotic limit, reflect one standard deviation for the normal distribution of the estimate.
The true free energy difference should fall within the interval [-df, +df] centered on the estimate 68% of the time, and within
the interval [-2 df, +2 df] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
See Section III of Reference [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> [Deltaf_ij, dDeltaf_ij] = mbar.getFreeEnergyDifferences() |
14,376 | def emit(self, record):
try:
if self.max_messages:
p = self.redis_client.pipeline()
p.rpush(self.key, self.format(record))
p.ltrim(self.key, -self.max_messages, -1)
p.execute()
else:
self.redis_client.rpush(self.key, self.format(record))
except redis.RedisError:
pass | Publish record to redis logging list |
14,377 | def monkey_patch():
ozmq = __import__()
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
ioloop = __import__()
ioloop.Poller = zmq.Poller | Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well. |
14,378 | def advance_job_status(namespace: str, job: Job, duration: float,
err: Optional[Exception]):
duration = human_duration(duration)
if not err:
job.status = JobStatus.SUCCEEDED
logger.info(, job, duration)
return
if job.should_retry:
job.status = JobStatus.NOT_SET
job.retries += 1
if isinstance(err, RetryException) and err.at is not None:
job.at = err.at
else:
job.at = (datetime.now(timezone.utc) +
exponential_backoff(job.retries))
signals.job_schedule_retry.send(namespace, job=job, err=err)
log_args = (
job.retries, job.max_retries + 1, job, duration,
human_duration(
(job.at - datetime.now(tz=timezone.utc)).total_seconds()
)
)
if isinstance(err, RetryException):
logger.info(
, *log_args)
else:
logger.warning(
, *log_args)
return
job.status = JobStatus.FAILED
signals.job_failed.send(namespace, job=job, err=err)
logger.error(
,
job.max_retries + 1, job.max_retries + 1, job, duration,
exc_info=err
) | Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals. |
14,379 | def insert(self):
self.default_val = 0
return self.interface.insert(
self.schema,
self.fields
)
return self.interface.insert(self.schema, self.fields) | persist the .fields |
14,380 | def _get_leftMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return xMin | This must return an int or float.
If the glyph has no outlines, this must return `None`.
Subclasses may override this method. |
14,381 | def cbday_roll(self):
cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds)
if self._prefix.endswith():
roll_func = cbday.rollforward
else:
roll_func = cbday.rollback
return roll_func | Define default roll function to be called in apply method. |
14,382 | def colored_level_name(self, levelname):
if self.colors_disabled:
return self.plain_levelname_format.format(levelname)
else:
return self.colored_levelname_format.format(self.color_map[levelname], levelname) | Colors the logging level in the logging record |
14,383 | def create_port(self, context, network_id, port_id, **kwargs):
LOG.info("create_port %s %s %s" % (context.tenant_id, network_id,
port_id))
if not kwargs.get():
raise IronicException(msg=)
base_net_driver = kwargs[]
if not kwargs.get():
raise IronicException(msg=)
device_id = kwargs[]
if not kwargs.get():
raise IronicException(msg=)
instance_node_id = kwargs[]
if not kwargs.get():
raise IronicException(msg=)
mac_address = str(netaddr.EUI(kwargs["mac_address"]["address"]))
mac_address = mac_address.replace(, )
if kwargs.get():
msg =
raise IronicException(msg=msg)
fixed_ips = []
addresses = kwargs.get()
if not isinstance(addresses, list):
addresses = [addresses]
for address in addresses:
fixed_ips.append(self._make_fixed_ip_dict(context, address))
body = {
"id": port_id,
"network_id": network_id,
"device_id": device_id,
"device_owner": kwargs.get(, ),
"tenant_id": context.tenant_id or "quark",
"roles": context.roles,
"mac_address": mac_address,
"fixed_ips": fixed_ips,
"switch:hardware_id": instance_node_id,
"dynamic_network": not STRATEGY.is_provider_network(network_id)
}
net_info = self._get_base_network_info(
context, network_id, base_net_driver)
body.update(net_info)
try:
LOG.info("creating downstream port: %s" % (body))
port = self._create_port(context, body)
LOG.info("created downstream port: %s" % (port))
return {"uuid": port[][],
"vlan_id": port[][]}
except Exception as e:
msg = "failed to create downstream port. Exception: %s" % (e)
raise IronicException(msg=msg) | Create a port.
:param context: neutron api request context.
:param network_id: neutron network id.
:param port_id: neutron port id.
:param kwargs:
required keys - device_id: neutron port device_id (instance_id)
instance_node_id: nova hypervisor host id
mac_address: neutron port mac address
base_net_driver: the base network driver
optional keys - addresses: list of allocated IPAddress models
security_groups: list of associated security groups
:raises IronicException: If the client is unable to create the
downstream port for any reason, the exception will be logged
and IronicException raised. |
14,384 | def series_to_yaml_safe(series, ordered=False):
index = series.index.to_native_types(quoting=True)
values = series.values.tolist()
if ordered:
return OrderedDict(
tuple((k, v)) for k, v in zip(index, values))
else:
return {i: v for i, v in zip(index, values)} | Convert a pandas Series to a dict that will survive YAML serialization
and re-conversion back to a Series.
Parameters
----------
series : pandas.Series
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict |
14,385 | def drawItem(self, item, painter, option):
dataset = item.dataset()
painter.save()
painter.setRenderHint(painter.Antialiasing)
pen = QPen(dataset.color())
pen.setWidth(0.75)
painter.setPen(pen)
for path in item.buildData(, []):
gradient = QLinearGradient()
clr = QColor(dataset.color())
clr.setAlpha(220)
gradient.setColorAt(0.0, clr.lighter(180))
gradient.setColorAt(0.1, clr.lighter(160))
gradient.setColorAt(0.25, clr.lighter(140))
gradient.setColorAt(1.0, clr.lighter(125))
if self.orientation() == Qt.Vertical:
gradient.setStart(0, path.boundingRect().bottom())
gradient.setFinalStop(0, path.boundingRect().top())
else:
gradient.setStart(path.boundingRect().left(), 0)
gradient.setFinalStop(path.boundingRect().right(), 0)
painter.setBrush(gradient)
painter.drawPath(path)
painter.restore() | Draws the inputed item as a bar graph.
:param item | <XChartDatasetItem>
painter | <QPainter>
option | <QStyleOptionGraphicsItem> |
14,386 | def negative_gradient(self, y, y_pred, sample_weight=None, **kwargs):
pred_time = y[] - y_pred.ravel()
mask = (pred_time > 0) | y[]
ret = numpy.zeros(y[].shape[0])
ret[mask] = pred_time.compress(mask, axis=0)
if sample_weight is not None:
ret *= sample_weight
return ret | Negative gradient of partial likelihood
Parameters
---------
y : tuple, len = 2
First element is boolean event indicator and second element survival/censoring time.
y_pred : np.ndarray, shape=(n,):
The predictions. |
14,387 | def firmware_manifest_retrieve(self, manifest_id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.firmware_manifest_retrieve_with_http_info(manifest_id, **kwargs)
else:
(data) = self.firmware_manifest_retrieve_with_http_info(manifest_id, **kwargs)
return data | Get a manifest # noqa: E501
Retrieve a firmware manifest. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.firmware_manifest_retrieve(manifest_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str manifest_id: The firmware manifest ID (required)
:return: FirmwareManifest
If the method is called asynchronously,
returns the request thread. |
14,388 | def validate(self, input_string):
parsed_url = urlparse(url=input_string)
return bool(parsed_url.scheme and parsed_url.netloc) | Validate url
:return: True if match / False otherwise |
14,389 | def approve(group_id, user_id):
membership = Membership.query.get_or_404((user_id, group_id))
group = membership.group
if group.can_edit(current_user):
try:
membership.accept()
except Exception as e:
flash(str(e), )
return redirect(url_for(, group_id=membership.group.id))
flash(_(,
user=membership.user.email,
name=membership.group.name), )
return redirect(url_for(, group_id=membership.group.id))
flash(
_(
,
group_name=group.name
),
)
return redirect(url_for()) | Approve a user. |
14,390 | def pattern_match(pattern, string):
def backtrack(pattern, string, dic):
if len(pattern) == 0 and len(string) > 0:
return False
if len(pattern) == len(string) == 0:
return True
for end in range(1, len(string)-len(pattern)+2):
if pattern[0] not in dic and string[:end] not in dic.values():
dic[pattern[0]] = string[:end]
if backtrack(pattern[1:], string[end:], dic):
return True
del dic[pattern[0]]
elif pattern[0] in dic and dic[pattern[0]] == string[:end]:
if backtrack(pattern[1:], string[end:], dic):
return True
return False
return backtrack(pattern, string, {}) | :type pattern: str
:type string: str
:rtype: bool |
14,391 | async def _get_packet_from_stream(self, stream, existing_data, got_first_packet=True, psml_structure=None):
if self.use_json:
packet, existing_data = self._extract_packet_json_from_data(existing_data,
got_first_packet=got_first_packet)
else:
packet, existing_data = self._extract_tag_from_data(existing_data)
if packet:
if self.use_json:
packet = packet_from_json_packet(packet)
else:
packet = packet_from_xml_packet(packet, psml_structure=psml_structure)
return packet, existing_data
new_data = await stream.read(self.DEFAULT_BATCH_SIZE)
existing_data += new_data
if not new_data:
raise EOFError()
return None, existing_data | A coroutine which returns a single packet if it can be read from the given StreamReader.
:return a tuple of (packet, remaining_data). The packet will be None if there was not enough XML data to create
a packet. remaining_data is the leftover data which was not enough to create a packet from.
:raises EOFError if EOF was reached. |
14,392 | def dev_parameters_vs_axis(dnaRef, dnaSubj, parameter, bp, axis=, bp_range=True, windows=10, err_type=, tool=):
RefParam, ref_bp_idx = dnaRef.get_parameters(parameter, bp, bp_range)
RefAxis, dummy = dnaRef.get_parameters(
.format(axis), bp, bp_range)
SubjParam, subj_bp_idx = dnaSubj.get_parameters(parameter, bp, bp_range)
mean_axis = np.mean(RefAxis, axis=1)
meanRefParam = np.mean(RefParam, axis=1)
meanSubjParam = np.mean(SubjParam, axis=1)
maxAxis = np.amax(mean_axis)
minAxis = np.amin(mean_axis)
axis_range = (maxAxis - minAxis) / windows
Ref_param_error = get_error(dnaRef.time, RefParam, len(ref_bp_idx), err_type=err_type, tool=tool)
Ref_axis_error = get_error(dnaRef.time, RefAxis, len(ref_bp_idx), err_type=err_type, tool=tool)
subj_param_error = get_error(dnaSubj.time, SubjParam, len(subj_bp_idx), err_type=err_type, tool=tool)
merged_ref_param = []
merged_subj_Param = []
merged_Ref_param_error = []
merged_Ref_axis_error = []
merged_subj_param_error = []
final_axis = []
for i in range(windows):
start = minAxis + (i * axis_range)
end = start + axis_range
idx = []
for j in range(len(mean_axis)):
if((start <= mean_axis[j]) and (end > mean_axis[j])):
idx.append(j)
if(len(idx) > 0):
merged_ref_param.append(meanRefParam[idx])
merged_subj_Param.append(meanSubjParam[idx])
final_axis.append(start + (end - start) / 2)
merged_Ref_param_error.append(Ref_param_error[idx])
merged_Ref_axis_error.append(Ref_axis_error[idx])
merged_subj_param_error.append(subj_param_error[idx])
final_ref_param = []
final_subj_param = []
final_ref_param_error = []
final_ref_axis_error = []
final_subj_param_error = []
for i in range(len(merged_ref_param)):
final_ref_param.append(np.sum(merged_ref_param[i]))
final_subj_param.append(np.sum(merged_subj_Param[i]))
final_ref_axis_error.append(
np.sqrt((merged_Ref_axis_error[i]**2).sum()))
final_ref_param_error.append(
np.sqrt((merged_Ref_param_error[i]**2).sum()))
final_subj_param_error.append(
np.sqrt((merged_subj_param_error[i]**2).sum()))
deviation, error = get_deviation(
final_ref_param, final_ref_param_error, final_subj_param, final_subj_param_error)
return deviation, error, final_axis, final_ref_axis_error | To calculate deviation in the given parameters of a Subject DNA to Reference DNA along the given axis.
.. note:: Deviation = Reference_DNA(parameter) - Subject_DNA(parameter)
.. warning::
To calculate errors by using ``error = 'acf'`` or ``error = 'block'``,
GROMACS tool ``g_analyze`` or ``gmx analyze`` should be present in ``$PATH``.
Parameters
----------
dnaRef : :class:`DNA`
Reference DNA
dnaSubj : :class:`DNA`
Subject DNA. Number of base-pairs in Reference and Subject DNA **should be** same.
parameter : str
Name of a base-pair or base-step or helical base-step parameter
For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`.
bp : 1D list or array
base-pairs to analyze
Example: ::
bp = [6] # bp_range = False
bp = [4,15] # bp_range = True
bp = range(4,15) # bp_range = False
bp = np.arange(4,15) # bp_range = False
bp = [2,5,6,7,9,12,18] # bp_range = False
bp_range : bool
``Default=True``: As shown above, if ``True``, bp is taken as a range otherwise list or numpy array.
axis : str
Axis along which DNA axis is parallel. Keywords: ``X``, ``Y`` and ``Z``.
windows : int
Number of bins along the axis
err_type : str
Method of error estimation.
Currently accepted method as follows:
* ``error = 'std'`` : Standard Deviation
* ``error = 'acf'`` : Standard error using autocorrelation time (requires: ``g_analyze`` or ``gmx analyze``)
* ``error = 'block'`` : Standard error using block averaging method (requires: ``g_analyze`` or ``gmx analyze``)
tool : str
Gromacs tool ``g_analyze`` or ``gmx analyze`` or ``gmx_mpi analyze`` etc.
It will be used to calculate autocorrelation time or block averaging error.
It should be present in ``$PATH``
Returns
-------
deviation : 1D array
length of no. of windows; Deviation in the parameter for two given DNA
deviation_error : 1D array
length of no. of windows; Standard error in deviation fo each window/bin
axis : 1D array
length of no. of windows; average position of window/bin along given axis
axis_error : 1D array
length of no. of windows; Standard error in average position of window/bin along given axis |
14,393 | def apply(self, func, axis=0, subset=None, **kwargs):
self._todo.append((lambda instance: getattr(instance, ),
(func, axis, subset), kwargs))
return self | Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max) |
14,394 | def debit(self, amount, credit_account, description, debit_memo="", credit_memo="", datetime=None):
assert amount >= 0
return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime) | Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative. |
14,395 | def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr | Extract from a masked rec array and create the manager. |
14,396 | def find_card_bundles(provider: Provider, deck: Deck) -> Optional[Iterator]:
if isinstance(provider, RpcNode):
if deck.id is None:
raise Exception("deck.id required to listtransactions")
p2th_account = provider.getaccount(deck.p2th_address)
batch_data = [(, [i["txid"], 1]) for
i in provider.listtransactions(p2th_account)]
result = provider.batch(batch_data)
if result is not None:
raw_txns = [i[] for i in result if result]
else:
raise EmptyP2THDirectory({: })
else:
if deck.p2th_address is None:
raise Exception("deck.p2th_address required to listtransactions")
try:
raw_txns = (provider.getrawtransaction(i, 1) for i in
provider.listtransactions(deck.p2th_address))
except TypeError:
raise EmptyP2THDirectory({: })
return (card_bundler(provider, deck, i) for i in raw_txns) | each blockchain transaction can contain multiple cards,
wrapped in bundles. This method finds and returns those bundles. |
14,397 | def Synchronized(f):
@functools.wraps(f)
def NewFunction(self, *args, **kw):
with self.lock:
return f(self, *args, **kw)
return NewFunction | Synchronization decorator. |
14,398 | def compare_dicts(old=None, new=None):
ret = {}
for key in set((new or {})).union((old or {})):
if key not in old:
ret[key] = {: ,
: new[key]}
elif key not in new:
ret[key] = {: ,
: old[key]}
elif new[key] != old[key]:
ret[key] = {: old[key],
: new[key]}
return ret | Compare before and after results from various salt functions, returning a
dict describing the changes that were made. |
14,399 | def imbtree(ntips, treeheight=1.0):
rtree = toytree.tree()
rtree.treenode.add_child(name="0")
rtree.treenode.add_child(name="1")
for i in range(2, ntips):
cherry = toytree.tree()
cherry.treenode.add_child(name=str(i))
cherry.treenode.add_child(rtree.treenode)
rtree = cherry
tre = toytree.tree(rtree.write(tree_format=9))
tre = tre.mod.make_ultrametric()
self = tre.mod.node_scale_root_height(treeheight)
self._coords.update()
return self | Return an imbalanced (comb-like) tree topology. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.