Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
20,000 | def dock_json_has_plugin_conf(self, plugin_type, plugin_name):
try:
self.dock_json_get_plugin_conf(plugin_type, plugin_name)
return True
except (KeyError, IndexError):
return False | Check whether a plugin is configured. |
20,001 | def add_location_timezone_to_device(self, device_obj, location, timezone):
dictify_device_meta(device_obj)
device_obj[][][][] = location
device_obj[][][][] = location
device_obj[][][][] = timezone
device_obj[][][][] = timezone
return self.update_device(device_obj) | Returns 'device object' with updated location
http://docs.exosite.com/portals/#update-device
http://docs.exosite.com/portals/#device-object |
20,002 | def plate_rate_mc(pole1_plon, pole1_plat, pole1_kappa, pole1_N, pole1_age, pole1_age_error,
pole2_plon, pole2_plat, pole2_kappa, pole2_N, pole2_age, pole2_age_error,
ref_loc_lon, ref_loc_lat, samplesize=10000, random_seed=None, plot=True,
savefig=True, save_directory=, figure_name=):
ref_loc = [ref_loc_lon, ref_loc_lat]
pole1 = (pole1_plon, pole1_plat)
pole1_paleolat = 90 - pmag.angle(pole1, ref_loc)
pole2 = (pole2_plon, pole2_plat)
pole2_paleolat = 90 - pmag.angle(pole2, ref_loc)
print("The paleolatitude for ref_loc resulting from pole 1 is:" +
str(pole1_paleolat))
print("The paleolatitude for ref_loc resulting from pole 2 is:" +
str(pole2_paleolat))
rate = old_div(((pole1_paleolat - pole2_paleolat) * 111 *
100000), ((pole1_age - pole2_age) * 1000000))
print("The rate of paleolatitudinal change implied by the poles pairs in cm/yr is:" + str(rate))
if random_seed != None:
np.random.seed(random_seed)
pole1_MCages = np.random.normal(pole1_age, pole1_age_error, samplesize)
pole2_MCages = np.random.normal(pole2_age, pole2_age_error, samplesize)
plt.hist(pole1_MCages, 100, histtype=,
color=, label=)
plt.hist(pole2_MCages, 100, histtype=,
color=, label=)
plt.xlabel()
plt.ylabel()
plt.legend(loc=3)
if savefig == True:
plot_extension =
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
pole1_MCpoles = []
pole1_MCpole_lat = []
pole1_MCpole_long = []
pole1_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole1_N):
direction_atN = pmag.fshdev(pole1_kappa)
tilt_direction = pole1_plon
tilt_amount = 90 - pole1_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean[], mean[])
pole1_MCpoles.append([mean[], mean[], 1.])
pole1_MCpole_lat.append(mean[])
pole1_MCpole_long.append(mean[])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole1_MCpaleolat.append(paleolat[0])
pole2_MCpoles = []
pole2_MCpole_lat = []
pole2_MCpole_long = []
pole2_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole2_N):
direction_atN = pmag.fshdev(pole2_kappa)
tilt_direction = pole2_plon
tilt_amount = 90 - pole2_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean[], mean[])
pole2_MCpoles.append([mean[], mean[], 1.])
pole2_MCpole_lat.append(mean[])
pole2_MCpole_long.append(mean[])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole2_MCpaleolat.append(paleolat[0])
if plot is True:
plt.figure(figsize=(5, 5))
map_axis = make_mollweide_map()
plot_vgp(map_axis, pole1_MCpole_long, pole1_MCpole_lat, color=)
plot_vgp(map_axis, pole2_MCpole_long, pole2_MCpole_lat, color=)
if savefig == True:
plot_extension =
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
pole1_pole2_Delta_degrees = []
pole1_pole2_Delta_kilometers = []
pole1_pole2_Delta_myr = []
pole1_pole2_degrees_per_myr = []
pole1_pole2_cm_per_yr = []
for n in range(samplesize):
Delta_degrees = pole1_MCpaleolat[n] - pole2_MCpaleolat[n]
Delta_Myr = pole1_MCages[n] - pole2_MCages[n]
pole1_pole2_Delta_degrees.append(Delta_degrees)
degrees_per_myr = old_div(Delta_degrees, Delta_Myr)
cm_per_yr = old_div(((Delta_degrees * 111) * 100000),
(Delta_Myr * 1000000))
pole1_pole2_degrees_per_myr.append(degrees_per_myr)
pole1_pole2_cm_per_yr.append(cm_per_yr)
if plot is True:
plotnumber = 100
plt.figure(num=None, figsize=(10, 4))
plt.subplot(1, 2, 1)
for n in range(plotnumber):
plt.plot([pole1_MCpaleolat[n], pole2_MCpaleolat[n]],
[pole1_MCages[n], pole2_MCages[n]], , linewidth=0.1, alpha=0.3)
plt.scatter(pole1_MCpaleolat[:plotnumber],
pole1_MCages[:plotnumber], color=, s=3)
plt.scatter(pole1_paleolat, pole1_age, color=,
s=100, edgecolor=, zorder=10000)
plt.scatter(pole2_MCpaleolat[:plotnumber],
pole2_MCages[:plotnumber], color=, s=3)
plt.scatter(pole2_paleolat, pole2_age, color=,
s=100, edgecolor=, zorder=10000)
plt.plot([pole1_paleolat, pole2_paleolat], [
pole1_age, pole2_age], , linewidth=2)
plt.gca().invert_yaxis()
plt.xlabel(, size=14)
plt.ylabel(, size=14)
plt.subplot(1, 2, 2)
plt.hist(pole1_pole2_cm_per_yr, bins=600)
plt.ylabel(, size=14)
plt.xlabel(, size=14)
if savefig == True:
plot_extension =
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
twopointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 2.5)
fifty_percentile = stats.scoreatpercentile(pole1_pole2_cm_per_yr, 50)
ninetysevenpointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 97.5)
print("2.5th percentile is: " +
str(round(twopointfive_percentile, 2)) + " cm/yr")
print("50th percentile is: " + str(round(fifty_percentile, 2)) + " cm/yr")
print("97.5th percentile is: " +
str(round(ninetysevenpointfive_percentile, 2)) + " cm/yr")
return rate[0], twopointfive_percentile, ninetysevenpointfive_percentile | Determine the latitudinal motion implied by a pair of poles and utilize
the Monte Carlo sampling method of Swanson-Hysell (2014) to determine the
associated uncertainty.
Parameters:
------------
plon : longitude of pole
plat : latitude of pole
kappa : Fisher precision parameter for VPGs in pole
N : number of VGPs in pole
age : age assigned to pole in Ma
age_error : 1 sigma age uncertainty in million years
ref_loc_lon : longitude of reference location
ref_loc_lat : latitude of reference location
samplesize : number of draws from pole and age distributions (default set to 10000)
random_seed : set random seed for reproducible number generation (default is None)
plot : whether to make figures (default is True, optional)
savefig : whether to save figures (default is True, optional)
save_directory = default is local directory (optional)
figure_name = prefix for file names (optional)
Returns
--------
rate : rate of latitudinal motion in cm/yr along with estimated 2.5 and 97.5
percentile rate estimates |
20,003 | def get_map_matrix(inputfile, sheet_name):
matrix = read_excel_file(inputfile, sheet_name)
output = [[, , ]]
for row in matrix:
if row[0] and not re.match(r, row[0]):
output.append([row[0], row[1], row[2]])
return output | Return the matrix representation of the genetic map.
:arg inputfile: the path to the input file from which to retrieve the
genetic map.
:arg sheet_name: the excel sheet containing the data on which to
retrieve the genetic map. |
20,004 | def domain_score(self, domains):
warn(
, DeprecationWarning,
)
url_path =
return self._multi_post(url_path, domains) | Calls domain scores endpoint.
This method is deprecated since OpenDNS Investigate API
endpoint is also deprecated. |
20,005 | def calc(args):
from jcvi.formats.fasta import translate
p = OptionParser(calc.__doc__)
p.add_option("--longest", action="store_true",
help="Get longest ORF, only works if no pep file, "\
"e.g. ESTs [default: %default]")
p.add_option("--msa", default="clustalw", choices=("clustalw", "muscle"),
help="software used to align the proteins [default: %default]")
p.add_option("--workdir", default=os.getcwd(), help="Work directory")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 1:
protein_file, dna_file = None, args[0]
elif len(args) == 2:
protein_file, dna_file = args
else:
print("Incorrect arguments", file=sys.stderr)
sys.exit(not p.print_help())
output_h = must_open(opts.outfile, "w")
print(fields, file=output_h)
work_dir = op.join(opts.workdir, "syn_analysis")
mkdir(work_dir)
if not protein_file:
protein_file = dna_file + ".pep"
translate_args = [dna_file, "--outfile=" + protein_file]
if opts.longest:
translate_args += ["--longest"]
dna_file, protein_file = translate(translate_args)
prot_iterator = SeqIO.parse(open(protein_file), "fasta")
dna_iterator = SeqIO.parse(open(dna_file), "fasta")
for p_rec_1, p_rec_2, n_rec_1, n_rec_2 in \
zip(prot_iterator, prot_iterator, dna_iterator, dna_iterator):
print("--------", p_rec_1.name, p_rec_2.name, file=sys.stderr)
if opts.msa == "clustalw":
align_fasta = clustal_align_protein((p_rec_1, p_rec_2), work_dir)
elif opts.msa == "muscle":
align_fasta = muscle_align_protein((p_rec_1, p_rec_2), work_dir)
mrtrans_fasta = run_mrtrans(align_fasta, (n_rec_1, n_rec_2), work_dir)
if mrtrans_fasta:
ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng = \
find_synonymous(mrtrans_fasta, work_dir)
if ds_subs_yn is not None:
pair_name = "%s;%s" % (p_rec_1.name, p_rec_2.name)
output_h.write("%s\n" % (",".join(str(x) for x in (pair_name,
ds_subs_yn, dn_subs_yn, ds_subs_ng, dn_subs_ng))))
output_h.flush()
sh("rm -rf 2YN.t 2YN.dN 2YN.dS rst rub rst1 syn_analysis") | %prog calc [prot.fasta] cds.fasta > out.ks
Protein file is optional. If only one file is given, it is assumed to
be CDS sequences with correct frame (frame 0). Results will be written to
stdout. Both protein file and nucleotide file are assumed to be Fasta format,
with adjacent records as the pairs to compare.
Author: Haibao Tang <[email protected]>, Brad Chapman, Jingping Li
Calculate synonymous mutation rates for gene pairs
This does the following:
1. Fetches a protein pair.
2. Aligns the protein pair with clustalw (default) or muscle.
3. Convert the output to Fasta format.
4. Use this alignment info to align gene sequences using PAL2NAL
5. Run PAML yn00 to calculate synonymous mutation rates. |
20,006 | def bundle_javascript(context: Context):
args = []
if context.verbosity > 0:
args.append()
if not context.use_colour:
args.append()
return context.node_tool(, *args) | Compiles javascript |
20,007 | def construct_variables(self, kwargs):
if isinstance(self.feedable_kwargs, dict):
warnings.warn("Using a dict for `feedable_kwargs is deprecated."
"Switch to using a tuple."
"It is not longer necessary to specify the types "
"of the arguments---we build a different graph "
"for each received type."
"Using a dict may become an error on or after "
"2019-04-18.")
feedable_names = tuple(sorted(self.feedable_kwargs.keys()))
else:
feedable_names = self.feedable_kwargs
if not isinstance(feedable_names, tuple):
raise TypeError("Attack.feedable_kwargs should be a tuple, but "
"for subclass " + str(type(self)) + " it is "
+ str(self.feedable_kwargs) + " of type "
+ str(type(self.feedable_kwargs)))
fixed = dict(
(k, v) for k, v in kwargs.items() if k in self.structural_kwargs)
feedable = {k: v for k, v in kwargs.items() if k in feedable_names}
for k in feedable:
if isinstance(feedable[k], (float, int)):
feedable[k] = np.array(feedable[k])
for key in kwargs:
if key not in fixed and key not in feedable:
raise ValueError(str(type(self)) + ": Undeclared argument: " + key)
feed_arg_type = arg_type(feedable_names, feedable)
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
hash_key = None
else:
hash_key = tuple(sorted(fixed.items())) + tuple([feed_arg_type])
return fixed, feedable, feed_arg_type, hash_key | Construct the inputs to the attack graph to be used by generate_np.
:param kwargs: Keyword arguments to generate_np.
:return:
Structural arguments
Feedable arguments
Output of `arg_type` describing feedable arguments
A unique key |
20,008 | def check_response(headers: Headers, key: str) -> None:
connection = sum(
[parse_connection(value) for value in headers.get_all("Connection")], []
)
if not any(value.lower() == "upgrade" for value in connection):
raise InvalidUpgrade("Connection", " ".join(connection))
upgrade = sum([parse_upgrade(value) for value in headers.get_all("Upgrade")], [])
if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"):
raise InvalidUpgrade("Upgrade", ", ".join(upgrade))
try:
s_w_accept = headers["Sec-WebSocket-Accept"]
except KeyError:
raise InvalidHeader("Sec-WebSocket-Accept")
except MultipleValuesError:
raise InvalidHeader(
"Sec-WebSocket-Accept", "more than one Sec-WebSocket-Accept header found"
)
if s_w_accept != accept(key):
raise InvalidHeaderValue("Sec-WebSocket-Accept", s_w_accept) | Check a handshake response received from the server.
``key`` comes from :func:`build_request`.
If the handshake is valid, this function returns ``None``.
Otherwise it raises an :exc:`~websockets.exceptions.InvalidHandshake`
exception.
This function doesn't verify that the response is an HTTP/1.1 or higher
response with a 101 status code. These controls are the responsibility of
the caller. |
20,009 | def db_list(**connection_args):
*
dbc = _connect(**connection_args)
if dbc is None:
return []
cur = dbc.cursor()
qry =
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = .format(*exc.args)
__context__[] = err
log.error(err)
return []
ret = []
results = cur.fetchall()
for dbs in results:
ret.append(dbs[0])
log.debug(ret)
return ret | Return a list of databases of a MySQL server using the output
from the ``SHOW DATABASES`` query.
CLI Example:
.. code-block:: bash
salt '*' mysql.db_list |
20,010 | def canonicalize_clusters(clusters: DefaultDict[int, List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]:
merged_clusters: List[Set[Tuple[int, int]]] = []
for cluster in clusters.values():
cluster_with_overlapping_mention = None
for mention in cluster:
for cluster2 in merged_clusters:
if mention in cluster2:
cluster_with_overlapping_mention = cluster2
break
if cluster_with_overlapping_mention is not None:
break
if cluster_with_overlapping_mention is not None:
cluster_with_overlapping_mention.update(cluster)
else:
merged_clusters.append(set(cluster))
return [list(c) for c in merged_clusters] | The CONLL 2012 data includes 2 annotated spans which are identical,
but have different ids. This checks all clusters for spans which are
identical, and if it finds any, merges the clusters containing the
identical spans. |
20,011 | def fetch_items(self, category, **kwargs):
from_date = kwargs[]
to_date = kwargs[]
if category == CATEGORY_ISSUE:
items = self.__fetch_issues(from_date, to_date)
elif category == CATEGORY_PULL_REQUEST:
items = self.__fetch_pull_requests(from_date, to_date)
else:
items = self.__fetch_repo_info()
return items | Fetch the items (issues or pull_requests)
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items |
20,012 | def _chi_lr(self,r, phi, nl,nr,beta):
m=int((nr-nl).real)
n=int((nr+nl).real)
p=int((n-abs(m))/2)
p2=int((n+abs(m))/2)
q=int(abs(m))
if p % 2==0:
prefac=1
else:
prefac=-1
prefactor=prefac/beta**(abs(m)+1)*np.sqrt(math.factorial(p)/(np.pi*math.factorial(p2)))
poly=self.poly[p][q]
return prefactor*r**q*poly((r/beta)**2)*np.exp(-(r/beta)**2/2)*np.exp(-1j*m*phi) | computes the generalized polar basis function in the convention of Massey&Refregier eqn 8
:param nl: left basis
:type nl: int
:param nr: right basis
:type nr: int
:param beta: beta --the characteristic scale typically choosen to be close to the size of the object.
:type beta: float.
:param coord: coordinates [r,phi]
:type coord: array(n,2)
:returns: values at positions of coordinates.
:raises: AttributeError, KeyError |
20,013 | def _AssertIsLocal(path):
from six.moves.urllib.parse import urlparse
if not _UrlIsLocal(urlparse(path)):
from ._exceptions import NotImplementedForRemotePathError
raise NotImplementedForRemotePathError | Checks if a given path is local, raise an exception if not.
This is used in filesystem functions that do not support remote operations yet.
:param unicode path:
:raises NotImplementedForRemotePathError:
If the given path is not local |
20,014 | def makeCubicxFunc(self,mLvl,pLvl,MedShk,xLvl):
pCount = mLvl.shape[1]
MedCount = mLvl.shape[0]
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*np.sum(self.vPPfuncNext(self.mLvlNext,\
self.pLvlNext)*self.ShkPrbs_temp,axis=0)
EndOfPrdvPP = np.tile(np.reshape(EndOfPrdvPP,(1,pCount,EndOfPrdvPP.shape[1])),(MedCount,1,1))
dcda = EndOfPrdvPP/self.uPP(np.array(self.cLvlNow))
dMedda = EndOfPrdvPP/(self.MedShkVals_tiled*self.uMedPP(self.MedLvlNow))
dMedda[0,:,:] = 0.0
MPC = dcda/(1.0 + dcda + self.MedPrice*dMedda)
MPM = dMedda/(1.0 + dcda + self.MedPrice*dMedda)
MPX = MPC + self.MedPrice*MPM
MPX = np.concatenate((np.reshape(MPX[:,:,0],(MedCount,pCount,1)),MPX),axis=2)
MPX[0,:,0] = self.MPCmaxNow
xFunc_by_pLvl_and_MedShk = []
for i in range(pCount):
temp_list = []
pLvl_i = pLvl[0,i,0]
mLvlMin_i = self.BoroCnstNat(pLvl_i)
for j in range(MedCount):
m_temp = mLvl[j,i,:] - mLvlMin_i
x_temp = xLvl[j,i,:]
MPX_temp = MPX[j,i,:]
temp_list.append(CubicInterp(m_temp,x_temp,MPX_temp))
xFunc_by_pLvl_and_MedShk.append(deepcopy(temp_list))
pLvl_temp = pLvl[0,:,0]
MedShk_temp = MedShk[:,0,0]
xFuncUncBase = BilinearInterpOnInterp1D(xFunc_by_pLvl_and_MedShk,pLvl_temp,MedShk_temp)
xFuncUnc = VariableLowerBoundFunc3D(xFuncUncBase,self.BoroCnstNat)
return xFuncUnc | Constructs the (unconstrained) expenditure function for this period using
bilinear interpolation (over permanent income and the medical shock) among
an array of cubic interpolations over market resources.
Parameters
----------
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
MedShk : np.array
Corresponding medical need shocks for interpolation.
xLvl : np.array
Expenditure points for interpolation, corresponding to those in mLvl,
pLvl, and MedShk.
Returns
-------
xFuncUnc : BilinearInterpOnInterp1D
Unconstrained total expenditure function for this period. |
20,015 | def add(self, username, courseid, taskid, consumer_key, service_url, result_id):
search = {"username": username, "courseid": courseid,
"taskid": taskid, "service_url": service_url,
"consumer_key": consumer_key, "result_id": result_id}
entry = self._database.lis_outcome_queue.find_one_and_update(search, {"$set": {"nb_attempt": 0}},
return_document=ReturnDocument.BEFORE, upsert=True)
if entry is None:
self._add_to_queue(self._database.lis_outcome_queue.find_one(search)) | Add a job in the queue
:param username:
:param courseid:
:param taskid:
:param consumer_key:
:param service_url:
:param result_id: |
20,016 | def __alloc_raw_data(self, initial_values=None):
if self.__raw_data == None:
raw_data_type = c_ubyte * self.__raw_report_size
self.__raw_data = raw_data_type()
elif initial_values == self.__raw_data:
return
else:
ctypes.memset(self.__raw_data, 0, len(self.__raw_data))
if initial_values:
for index in range(len(initial_values)):
self.__raw_data[index] = initial_values[index] | Pre-allocate re-usagle memory |
20,017 | def _next(self, **kwargs):
spec = self._pagination_default_spec(kwargs)
spec.update(kwargs)
query = queries.build_query(spec)
query = queries.where_after_entry(query, self._record)
for record in query.order_by(model.Entry.local_date,
model.Entry.id)[:1]:
return Entry(record)
return None | Get the next item in any particular category |
20,018 | def displayHelp(self):
self.outputStream.write(self.linter.help())
sys.exit(32) | Output help message of twistedchecker. |
20,019 | def install_pip(env, requirements):
try:
installation_source_folder = config.installation_cache_folder()
options = setuptools_install_options(installation_source_folder)
if installation_source_folder is not None:
zip_eggs_in_folder(installation_source_folder)
env.execute(["-m", "easy_install"] + options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip installation failed.") | Install pip and its requirements using setuptools. |
20,020 | def unfollow(self, auth_secret, followee_username):
result = {pytwis_constants.ERROR_KEY: None}
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
while True:
try:
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result) | Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username) |
20,021 | def simulate():
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
nodes_ex = nest.Create("iaf_psc_alpha",NE)
nodes_in = nest.Create("iaf_psc_alpha",NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
print("first exc node: {}".format(nodes_ex[0]))
print("first inh node: {}".format(nodes_in[0]))
nest.SetStatus(nodes_ex, "V_m",
random.rand(len(nodes_ex))*neuron_params["V_th"])
nest.SetStatus(nodes_in, "V_m",
random.rand(len(nodes_in))*neuron_params["V_th"])
nest.SetStatus(espikes,[{
"label": os.path.join(spike_output_path, label + "-EX"),
"withtime": True,
"withgid": True,
"to_file": True,
}])
nest.SetStatus(ispikes,[{
"label": os.path.join(spike_output_path, label + "-IN"),
"withtime": True,
"withgid": True,
"to_file": True,}])
print("Connecting devices")
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
all_to_all
if Poisson:
nest.Connect(noise,nodes_ex, , "excitatory")
nest.Connect(noise,nodes_in,, "excitatory")
nest.Connect(nodes_ex,espikes, , "excitatory")
nest.Connect(nodes_in,ispikes, , "excitatory")
print("Connecting network")
print("Excitatory connections")
fixed_indegree
conn_params_ex = {: , : CE}
nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
conn_params_in = {: , : CI}
nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory")
endbuild=time.time()
print("Simulating")
nest.Simulate(simtime)
endsimulate= time.time()
events_ex = nest.GetStatus(espikes,"n_events")[0]
events_in = nest.GetStatus(ispikes,"n_events")[0]
rate_ex = events_ex/simtime*1000.0/N_neurons
rate_in = events_in/simtime*1000.0/N_neurons
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
if False:
nest.raster_plot.from_device(espikes, hist=True)
nest.raster_plot.from_device(ispikes, hist=True)
nest.raster_plot.show() | instantiate and execute network simulation |
20,022 | def build(config, services):
filtered_services = {name: service for name, service in services.iteritems() if in service}
_call_output(.format(.join(filtered_services.iterkeys())))
version = _get_version()
for service_name, service_dict in filtered_services.iteritems():
image=image,
version=version
)
) | Builds images and tags them appropriately.
Where "appropriately" means with the output of:
git describe --tags HEAD
and 'latest' as well (so the "latest" image for each will always be the
most recently built) |
20,023 | def get_credentials(env=None):
environ = env or os.environ
try:
username = environ["TEXTMAGIC_USERNAME"]
token = environ["TEXTMAGIC_AUTH_TOKEN"]
return username, token
except KeyError:
return None, None | Gets the TextMagic credentials from current environment
:param env: environment
:return: username, token |
20,024 | def _delete_file(fileName, n=10):
status = False
count = 0
while not status and count < n:
try:
_os.remove(fileName)
except OSError:
count += 1
_time.sleep(0.2)
else:
status = True
return status | Cleanly deletes a file in `n` attempts (if necessary) |
20,025 | def post_result_data(self, client, check, output, status):
data = {
: client,
: check,
: output,
: status,
}
self._request(, , data=json.dumps(data))
return True | Posts check result data. |
20,026 | def _is_bugged_tarfile(self):
try:
output = subprocess.check_output([, , self.destination]).decode()
return in output and in output
except subprocess.CalledProcessError:
return False | Check for tar file that tarfile library mistakenly reports as invalid.
Happens with tar files created on FAT systems. See:
http://stackoverflow.com/questions/25552162/tarfile-readerror-file-could-not-be-opened-successfully |
20,027 | def rel_path(self, uuid):
_assert_uuid(uuid)
filename = str(uuid)
return Path(filename[0:2], filename[2:4], filename) | Contruct relative path from repository top directory to the file
named after this uuid.
:param:uuid: :class:`UUID` instance |
20,028 | def pipe_split(context, _INPUT, conf, splits, **kwargs):
return Split(context, _INPUT, conf, splits, **kwargs) | An operator that splits a source into identical copies. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : dict
splits : number of copies
Yields
------
_OUTPUT, _OUTPUT2... : copies of all source items |
20,029 | def money_receipts(pronac, dt):
df = verified_repeated_receipts_for_pronac(pronac)
comprovantes_saque = df[df[] == 3.0]
return metric_return(comprovantes_saque) | Checks how many items are in a same receipt when payment type is
withdraw/money
- is_outlier: True if there are any receipts that have more than one
- itens_que_compartilham_comprovantes: List of items that share receipt |
20,030 | def _needs_download(self, f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.isdownloaded():
self.download()
return f(self, *args, **kwargs)
return wrapper | Decorator used to make sure that the downloading happens prior to running the task. |
20,031 | def retrieve_width(self, signum=None, frame=None):
for method_name, args in self.strategies:
method = getattr(self, "from_" + method_name)
width = method(*args)
if width and width > 0:
self.width = width
break
os.environ["COLUMNS"] = str(self.width) | Stores the terminal width into ``self.width``, if possible.
This function is also the SIGWINCH event handler. |
20,032 | def create_download_specifications(ctx_cli_options, config):
cli_conf = ctx_cli_options[ctx_cli_options[]]
cli_options = cli_conf[]
specs = []
for conf in config[]:
if in conf:
conf_options = conf[]
else:
conf_options = {}
mode = _merge_setting(
cli_options, conf_options, , default=).lower()
if mode == :
mode = blobxfer.models.azure.StorageModes.Auto
elif mode == :
mode = blobxfer.models.azure.StorageModes.Append
elif mode == :
mode = blobxfer.models.azure.StorageModes.Block
elif mode == :
mode = blobxfer.models.azure.StorageModes.File
elif mode == :
mode = blobxfer.models.azure.StorageModes.Page
else:
raise ValueError(.format(mode))
rpk = _merge_setting(
cli_options, conf_options, , default=None)
if blobxfer.util.is_not_empty(rpk):
rpkp = _merge_setting(
cli_options, conf_options, ,
default=None)
rpk = blobxfer.operations.crypto.load_rsa_private_key_file(
rpk, rpkp)
else:
rpk = None
conf_sod = conf_options.get(, {})
cli_sod = cli_options[]
conf_rfp = conf_options.get(, {})
cli_rfp = cli_options[]
ds = blobxfer.models.download.Specification(
download_options=blobxfer.models.options.Download(
check_file_md5=_merge_setting(
cli_options, conf_options, ,
default=False),
chunk_size_bytes=_merge_setting(
cli_options, conf_options, ,
default=0),
delete_extraneous_destination=_merge_setting(
cli_options, conf_options,
, default=False),
max_single_object_concurrency=_merge_setting(
cli_options, conf_options,
, default=8),
mode=mode,
overwrite=_merge_setting(
cli_options, conf_options, , default=True),
recursive=_merge_setting(
cli_options, conf_options, , default=True),
rename=_merge_setting(
cli_options, conf_options, , default=False),
restore_file_properties=blobxfer.models.options.FileProperties(
attributes=_merge_setting(
cli_rfp, conf_rfp, ,
default=False),
cache_control=None,
lmt=_merge_setting(
cli_rfp, conf_rfp, , default=False),
md5=None,
),
rsa_private_key=rpk,
strip_components=_merge_setting(
cli_options, conf_options, ,
default=0),
),
skip_on_options=blobxfer.models.options.SkipOn(
filesize_match=_merge_setting(
cli_sod, conf_sod, , default=False),
lmt_ge=_merge_setting(
cli_sod, conf_sod, , default=False),
md5_match=_merge_setting(
cli_sod, conf_sod, , default=False),
),
local_destination_path=blobxfer.models.download.
LocalDestinationPath(
conf[]
)
)
for src in conf[]:
if len(src) != 1:
raise RuntimeError(
)
sa = next(iter(src))
asp = blobxfer.operations.azure.SourcePath()
asp.add_path_with_storage_account(src[sa], sa)
incl = _merge_setting(cli_conf, conf, , default=None)
if blobxfer.util.is_not_empty(incl):
asp.add_includes(incl)
excl = _merge_setting(cli_conf, conf, , default=None)
if blobxfer.util.is_not_empty(excl):
asp.add_excludes(excl)
ds.add_azure_source_path(asp)
specs.append(ds)
return specs | Create a list of Download Specification objects from configuration
:param dict ctx_cli_options: cli options
:param dict config: config dict
:rtype: list
:return: list of Download Specification objects |
20,033 | def topology_from_numpy(atoms, bonds=None):
if bonds is None:
bonds = np.zeros((0, 2))
for col in ["name", "element", "resSeq",
"resName", "chainID", "serial"]:
if col not in atoms.dtype.names:
raise ValueError( % col)
if "segmentID" not in atoms.dtype.names:
atoms["segmentID"] = ""
from mdtraj.core.topology import Atom
from mdtraj.core import element as elem
out = mdtraj.Topology()
out._atoms = [None for _ in range(len(atoms))]
N = np.arange(0, len(atoms))
for ci in np.unique(atoms[]):
chain_atoms = atoms[atoms[] == ci]
subN = N[atoms[] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms[]):
residue_atoms = chain_atoms[chain_atoms[] == ri]
mask = subN[chain_atoms[] == ri]
indices = N[mask]
rnames = residue_atoms[]
residue_name = np.array(rnames)[0]
segids = residue_atoms[]
segment_id = np.array(segids)[0]
if not np.all(rnames == residue_name):
raise ValueError(
% ri)
r = out.add_residue(residue_name.decode(), c, ri, segment_id.decode())
for ix, atom in enumerate(residue_atoms):
e = atom[].decode()
a = Atom(atom[].decode(), elem.get_by_symbol(e),
int(indices[ix]), r, serial=atom[])
out._atoms[indices[ix]] = a
r._atoms.append(a)
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out | Create a mdtraj topology from numpy arrays
Parameters
----------
atoms : np.ndarray
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
and optionally "segmentID", following the same conventions
as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds |
20,034 | def update_dict(self, newdata: dict) -> :
return Language.make(
language=newdata.get(, self.language),
extlangs=newdata.get(, self.extlangs),
script=newdata.get(, self.script),
region=newdata.get(, self.region),
variants=newdata.get(, self.variants),
extensions=newdata.get(, self.extensions),
private=newdata.get(, self.private)
) | Update the attributes of this Language from a dictionary. |
20,035 | def _is_did(did):
return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists() | Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier(). |
20,036 | def _lrepr_fallback(
o: Any,
human_readable: bool = False,
print_dup: bool = PRINT_DUP,
print_length: PrintCountSetting = PRINT_LENGTH,
print_level: PrintCountSetting = PRINT_LEVEL,
print_meta: bool = PRINT_META,
print_readably: bool = PRINT_READABLY,
) -> str:
kwargs = {
"human_readable": human_readable,
"print_dup": print_dup,
"print_length": print_length,
"print_level": print_level,
"print_meta": print_meta,
"print_readably": print_readably,
}
if isinstance(o, bool):
return _lrepr_bool(o)
elif o is None:
return _lrepr_nil(o)
elif isinstance(o, str):
return _lrepr_str(
o, human_readable=human_readable, print_readably=print_readably
)
elif isinstance(o, dict):
return _lrepr_py_dict(o, **kwargs)
elif isinstance(o, list):
return _lrepr_py_list(o, **kwargs)
elif isinstance(o, set):
return _lrepr_py_set(o, **kwargs)
elif isinstance(o, tuple):
return _lrepr_py_tuple(o, **kwargs)
elif isinstance(o, complex):
return _lrepr_complex(o)
elif isinstance(o, datetime.datetime):
return _lrepr_datetime(o)
elif isinstance(o, Decimal):
return _lrepr_decimal(o, print_dup=print_dup)
elif isinstance(o, Fraction):
return _lrepr_fraction(o)
elif isinstance(o, Pattern):
return _lrepr_pattern(o)
elif isinstance(o, uuid.UUID):
return _lrepr_uuid(o)
else:
return repr(o) | Fallback function for lrepr for subclasses of standard types.
The singledispatch used for standard lrepr dispatches using an exact
type match on the first argument, so we will only hit this function
for subclasses of common Python types like strings or lists. |
20,037 | def install(name=None,
refresh=False,
pkgs=None,
sources=None,
reinstall=False,
**kwargs):
**["foo", "bar"]*["foo", {"bar": "1.2.3-0ubuntu0"}]*[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]<package>old<old-version>new<new-version>
refreshdb = salt.utils.data.is_true(refresh)
try:
pkg_params, pkg_type = __salt__[](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
cmd_prefix = [, ]
to_install = []
to_reinstall = []
to_downgrade = []
_append_noaction_if_testmode(cmd_prefix, **kwargs)
if not pkg_params:
return {}
elif pkg_type == :
if reinstall:
cmd_prefix.append()
if not kwargs.get(, False):
cmd_prefix.append()
to_install.extend(pkg_params)
elif pkg_type == :
if not kwargs.get(, True):
cmd_prefix.append()
for pkgname, pkgversion in six.iteritems(pkg_params):
if (name and pkgs is None and kwargs.get() and
len(pkg_params) == 1):
version_num = kwargs[]
else:
version_num = pkgversion
if version_num is None:
)
_process_restartcheck_result(rs_result, **kwargs)
return ret | Install the passed package, add refresh=True to update the opkg database.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
version
Install a specific version of the package, e.g. 1.2.3~0ubuntu0. Ignored
if "pkgs" or "sources" is passed.
.. versionadded:: 2017.7.0
reinstall : False
Specifying reinstall=True will use ``opkg install --force-reinstall``
rather than simply ``opkg install`` for requested packages that are
already installed.
If a version is specified with the requested package, then ``opkg
install --force-reinstall`` will only be used if the installed version
matches the requested version.
.. versionadded:: 2017.7.0
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-0ubuntu0"}]'
sources
A list of IPK packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package. Dependencies are automatically resolved
and marked as auto-installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]'
install_recommends
Whether to install the packages marked as recommended. Default is True.
only_upgrade
Only upgrade the packages (disallow downgrades), if they are already
installed. Default is False.
.. versionadded:: 2017.7.0
always_restart_services
Whether to restart services even if a reboot is required. Default is True.
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}} |
20,038 | async def purge(self, *, limit=100, check=None, before=None, after=None, around=None, oldest_first=False, bulk=True):
if check is None:
check = lambda m: True
iterator = self.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around)
ret = []
count = 0
minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
strategy = self.delete_messages if self._state.is_bot and bulk else _single_delete_strategy
while True:
try:
msg = await iterator.next()
except NoMoreItems:
if count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
elif count == 1:
await ret[-1].delete()
return ret
else:
if count == 100:
to_delete = ret[-100:]
await strategy(to_delete)
count = 0
await asyncio.sleep(1)
if check(msg):
if msg.id < minimum_time:
if count == 1:
await ret[-1].delete()
elif count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
count = 0
strategy = _single_delete_strategy
count += 1
ret.append(msg) | |coro|
Purges a list of messages that meet the criteria given by the predicate
``check``. If a ``check`` is not provided then all messages are deleted
without discrimination.
You must have the :attr:`~Permissions.manage_messages` permission to
delete messages even if they are your own (unless you are a user
account). The :attr:`~Permissions.read_message_history` permission is
also needed to retrieve message history.
Internally, this employs a different number of strategies depending
on the conditions met such as if a bulk delete is possible or if
the account is a user bot or not.
Examples
---------
Deleting bot's messages ::
def is_me(m):
return m.author == client.user
deleted = await channel.purge(limit=100, check=is_me)
await channel.send('Deleted {} message(s)'.format(len(deleted)))
Parameters
-----------
limit: Optional[:class:`int`]
The number of messages to search through. This is not the number
of messages that will be deleted, though it can be.
check: predicate
The function used to check if a message should be deleted.
It must take a :class:`Message` as its sole parameter.
before
Same as ``before`` in :meth:`history`.
after
Same as ``after`` in :meth:`history`.
around
Same as ``around`` in :meth:`history`.
oldest_first
Same as ``oldest_first`` in :meth:`history`.
bulk: :class:`bool`
If True, use bulk delete. bulk=False is useful for mass-deleting
a bot's own messages without manage_messages. When True, will fall
back to single delete if current account is a user bot, or if
messages are older than two weeks.
Raises
-------
Forbidden
You do not have proper permissions to do the actions required.
HTTPException
Purging the messages failed.
Returns
--------
List[:class:`.Message`]
The list of messages that were deleted. |
20,039 | def configure(self, *, hwm: int=None, rcvtimeo: int=None, sndtimeo: int=None, linger: int=None) -> :
if hwm is not None:
self.set_hwm(hwm)
if rcvtimeo is not None:
self.setsockopt(zmq.RCVTIMEO, rcvtimeo)
if sndtimeo is not None:
self.setsockopt(zmq.SNDTIMEO, sndtimeo)
if linger is not None:
self.setsockopt(zmq.LINGER, linger)
return self | Allows to configure some common socket options and configurations, while allowing method chaining |
20,040 | def __add_hopscotch_tour_step(self, message, selector=None, name=None,
title=None, alignment=None):
arrow_offset_row = None
if not selector or selector == "html":
selector = "head"
alignment = "bottom"
arrow_offset_row = "arrowOffset: ,"
else:
arrow_offset_row = ""
step = ( % (selector, title, message, arrow_offset_row, alignment))
self._tour_steps[name].append(step) | Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("bottom" is the default alignment). |
20,041 | def pub_connect(self):
s zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
ipc_modetcptcp://127.0.0.1:{0}tcp_master_publish_pullipc://{0}sock_dirpublish_pull.ipc')
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock | Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket. |
20,042 | def from_date(cls, date):
try:
date = date.date()
except AttributeError:
pass
return cls(date.year, date.month) | Returns a Month instance from the given datetime.date or
datetime.datetime object |
20,043 | def check_lt(self):
lt_valid = self.request.session.get(, [])
lt_send = self.request.POST.get()
self.gen_lt()
if lt_send not in lt_valid:
return False
else:
self.request.session[].remove(lt_send)
self.request.session[] = self.request.session[]
return True | Check is the POSTed LoginTicket is valid, if yes invalide it
:return: ``True`` if the LoginTicket is valid, ``False`` otherwise
:rtype: bool |
20,044 | def get_Generic_parameters(tp, generic_supertype):
try:
res = _select_Generic_superclass_parameters(tp, generic_supertype)
except TypeError:
res = None
if res is None:
raise TypeError("%s has no proper parameters defined by %s."%
(type_str(tp), type_str(generic_supertype)))
else:
return tuple(res) | tp must be a subclass of generic_supertype.
Retrieves the type values from tp that correspond to parameters
defined by generic_supertype.
E.g. get_Generic_parameters(tp, typing.Mapping) is equivalent
to get_Mapping_key_value(tp) except for the error message.
Note that get_Generic_itemtype(tp) is not exactly equal to
get_Generic_parameters(tp, typing.Container), as that method
additionally contains treatment for typing.Tuple and typing.Iterable. |
20,045 | def get_authservers(self, domainid, page=None):
opts = {}
if page:
opts[] = page
return self.api_call(
ENDPOINTS[][],
dict(domainid=domainid), **opts) | Get Authentication servers |
20,046 | def get(self, sid):
return RoomRecordingContext(self._version, room_sid=self._solution[], sid=sid, ) | Constructs a RoomRecordingContext
:param sid: The sid
:returns: twilio.rest.video.v1.room.recording.RoomRecordingContext
:rtype: twilio.rest.video.v1.room.recording.RoomRecordingContext |
20,047 | def _get_name_filter(package, context="decorate", reparse=False):
global name_filters
pkey = (package, context)
if pkey in name_filters and not reparse:
return name_filters[pkey]
from acorn.config import settings
spack = settings(package)
sections = {
"decorate": ["tracking", "acorn.tracking"],
"time": ["timing", "acorn.timing"],
"analyze": ["analysis", "acorn.analysis"]
}
filters, rfilters = None, None
import re
if context in sections:
filters, rfilters = [], []
ignores, rignores = [], []
for section in sections[context]:
if spack.has_section(section):
options = spack.options(section)
if "filter" in options:
filters.extend(re.split(r"\s*\$\s*", spack.get(section, "filter")))
if "rfilter" in options:
pfilters = re.split(r"\s*\$\s*", spack.get(section, "rfilter"))
rfilters.extend([re.compile(p, re.I) for p in pfilters])
if "ignore" in options:
ignores.extend(re.split(r"\s*\$\s*", spack.get(section, "ignore")))
if "rignore" in options:
pignores = re.split(r"\s*\$\s*", spack.get(section, "rignore"))
rignores.extend([re.compile(p, re.I) for p in pfilters])
name_filters[pkey] = {
"filters": filters,
"rfilters": rfilters,
"ignores": ignores,
"rignores": rignores
}
else:
name_filters[pkey] = None
return name_filters[pkey] | Makes sure that the name filters for the specified package have been
loaded.
Args:
package (str): name of the package that this method belongs to.
context (str): one of ['decorate', 'time', 'analyze']; specifies which
section of the configuration settings to check. |
20,048 | def count_account(self, domain):
selector = domain.to_selector()
cos_list = self.request_list(, {: selector})
ret = []
for i in cos_list:
count = int(i[])
ret.append((zobjects.ClassOfService.from_dict(i), count))
return list(ret) | Count the number of accounts for a given domain, sorted by cos
:returns: a list of pairs <ClassOfService object>,count |
20,049 | def plot(self, colorbar=True, cb_orientation=,
cb_label=, show=True, **kwargs):
return self.geoid.plot(colorbar=colorbar,
cb_orientation=cb_orientation,
cb_label=cb_label, show=True, **kwargs) | Plot the geoid.
Usage
-----
x.plot([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation,
cb_label, show, fname, **kwargs])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = True
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = 'geoid, m'
Text label for the colorbar.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods. |
20,050 | def allLocales(self):
if self._allLocales:
return self._allLocales
expr = re.compile()
locales = babel.core.localedata.locale_identifiers()
babel_locales = {}
for locale in locales:
if expr.match(locale):
babel_locale = babel.Locale.parse(locale)
if babel_locale.territory and babel_locale.language:
babel_locales[babel_locale.territory] = babel_locale
babel_locales = babel_locales.values()
babel_locales.sort(key=str)
self._allLocales = babel_locales
return self._allLocales | Returns all the locales that are defined within the babel
architecture.
:return [<str>, ..] |
20,051 | def convert_cg3_to_conll( lines, **kwargs ):
\""Rio de JaneiroRio_de_Janeiro
if not isinstance( lines, list ):
raise Exception()
fix_selfrefs = True
fix_open_punct = True
unesc_quotes = True
rep_spaces = False
error_on_unexp = False
for argName, argVal in kwargs.items() :
if argName in [, ] and argVal in [True, False]:
fix_selfrefs = argVal
if argName in [] and argVal in [True, False]:
fix_open_punct = argVal
if argName in [] and argVal in [True, False]:
error_on_unexp = argVal
if argName in [] and argVal in [True, False]:
unesc_quotes = argVal
if argName in [] and argVal in [True, False]:
rep_spaces = argVal
pat_empty_line = re.compile()
pat_token_line = re.compile()
pat_analysis_line = re.compile()
pat_ending_pos_form = re.compile()
pat_pos_form = re.compile()
pat_ending_pos = re.compile()
pat_opening_punct = re.compile()
analyses_added = 0
conll_lines = []
word_id = 1
i = 0
while ( i < len(lines) ):
line = lines[i]
if not (line.startswith() or line.startswith()):
if len(line)>0 and not (line.startswith() or \
line.startswith()) and not pat_empty_line.match(line):
if unesc_quotes:
line = line.replace( , )
if analyses_added == 0 and word_id > 1:
if error_on_unexp:
raise Exception(+str(i)++\
+lines[i-1])
else:
print(+str(i)++\
+lines[i-1], file=sys.stderr)
conll_lines[-1] +=
conll_lines[-1] +=
conll_lines[-1] +=
conll_lines[-1] +=
conll_lines[-1] += +str(word_id-2)
conll_lines[-1] +=
conll_lines[-1] +=
conll_lines[-1] +=
token_match = pat_token_line.match( line.rstrip() )
if token_match:
word = token_match.group(1)
else:
raise Exception(, line)
if rep_spaces and re.search(, word):
word = re.sub(, , word)
conll_lines.append( str(word_id) + + word )
analyses_added = 0
word_id += 1
if line.startswith():
conll_lines.append()
word_id = 1
else:
if line.count() > 2:
new_line = []
q_count = 0
for j in range( len(line) ):
if line[j]== and (j==0 or line[j-1]!=):
q_count += 1
if q_count < 3:
new_line.append(line[j])
else:
new_line.append(line[j])
line = .join( new_line )
if unesc_quotes:
line = line.replace( , )
analysis_match = pat_analysis_line.match( line )
if analysis_match and analyses_added==0:
lemma = analysis_match.group(1)
cats = analysis_match.group(2)
if cats.startswith():
postag =
else:
postag = (cats.split())[1] if len(cats.split())>1 else
deprels = re.findall( , cats )
deprel = deprels[0] if deprels else
heads = re.findall( , cats )
head = heads[0] if heads else str(word_id-2)
m1 = pat_ending_pos_form.match(cats)
m2 = pat_pos_form.match(cats)
m3 = pat_ending_pos.match(cats)
if m1:
forms = (m1.group(1)).split()
elif m2:
forms = (m2.group(1)).split()
elif m3:
forms = []
else:
if error_on_unexp:
raise Exception(+line)
else:
postag =
forms = []
print(+line, file=sys.stderr)
if fix_selfrefs and int(head) == word_id-1 and word_id-2>0:
head = str(word_id-2)
if fix_open_punct and pat_opening_punct.match(line):
head = str(word_id)
conll_lines[-1] += +lemma
conll_lines[-1] += +postag
conll_lines[-1] += +postag
conll_lines[-1] += +(.join(forms))
conll_lines[-1] += +head
conll_lines[-1] += +deprel
conll_lines[-1] +=
conll_lines[-1] +=
analyses_added += 1
i += 1
return conll_lines | Converts the output of VISL_CG3 based syntactic parsing into CONLL format.
Expects that the output has been cleaned ( via method cleanup_lines() ).
Returns a list of CONLL format lines;
Parameters
-----------
lines : list of str
The input text for the pipeline; Should be in same format as the output
of VISLCG3Pipeline;
fix_selfrefs : bool
Optional argument specifying whether self-references in syntactic
dependencies should be fixed;
Default:True
fix_open_punct : bool
Optional argument specifying whether opening punctuation marks should
be made dependents of the following token;
Default:True
unesc_quotes : bool
Optional argument specifying whether double quotes should be unescaped
in the output, i.e. converted from '\"' to '"';
Default:True
rep_spaces : bool
Optional argument specifying whether spaces in a multiword token (e.g.
'Rio de Janeiro') should be replaced with underscores ('Rio_de_Janeiro');
Default:False
error_on_unexp : bool
Optional argument specifying whether an exception should be raised in
case of missing or unexpected analysis line; if not, only prints warnings
in case of such lines;
Default:False
Example input
--------------
"<s>"
"<Öö>"
"öö" L0 S com sg nom @SUBJ #1->2
"<oli>"
"ole" Li V main indic impf ps3 sg ps af @FMV #2->0
"<täiesti>"
"täiesti" L0 D @ADVL #3->4
"<tuuletu>"
"tuuletu" L0 A pos sg nom @PRD #4->2
"<.>"
"." Z Fst CLB #5->5
"</s>"
Example output
---------------
1 Öö öö S S com|sg|nom 2 @SUBJ _ _
2 oli ole V V main|indic|impf|ps3|sg|ps|af 0 @FMV _ _
3 täiesti täiesti D D _ 4 @ADVL _ _
4 tuuletu tuuletu A A pos|sg|nom 2 @PRD _ _
5 . . Z Z Fst|CLB 4 xxx _ _ |
20,052 | def prj_create_atype(self, *args, **kwargs):
if not self.cur_prj:
return
atype = self.create_atype(projects=[self.cur_prj])
if atype:
atypedata = djitemdata.AtypeItemData(atype)
treemodel.TreeItem(atypedata, self.prj_atype_model.root) | Create a new project
:returns: None
:rtype: None
:raises: None |
20,053 | def set_icon(self, icon):
s icon image'
self._icon = icon
return self._listitem.setIconImage(icon) | Sets the listitem's icon image |
20,054 | def close_comments(self, request, queryset):
queryset.update(comment_enabled=False)
self.message_user(
request, _()) | Close the comments for selected entries. |
20,055 | def report_command_error(self, error_dict):
error = dict(error_dict)
error["command"] = self.commands[error_dict["step"]]
error["target"] = self.frame
del error["index"]
del error["step"]
self.errors.append(error) | Report a server error executing a command.
We keep track of the command's position in the command list,
and we add annotation of what the command was, to the error.
:param error_dict: The server's error dict for the error encountered |
20,056 | def get(self, obj, cls):
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method | Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found. |
20,057 | def has_parser(self, url_info: URLInfo):
key = self.url_info_key(url_info)
return key in self._parsers | Return whether a parser has been created for the URL. |
20,058 | def min_base_quality(self):
try:
return min(self.base_qualities)
except ValueError:
assert self.offset_start == self.offset_end
adjacent_qualities = [
self.alignment.query_qualities[offset]
for offset in [self.offset_start - 1, self.offset_start]
if 0 <= offset < len(self.alignment.query_qualities)
]
return min(adjacent_qualities) | The minimum of the base qualities. In the case of a deletion, in which
case there are no bases in this PileupElement, the minimum is taken
over the sequenced bases immediately before and after the deletion. |
20,059 | def checkValidCell(self, index):
col = index.column()
row = index.row()
return self.model.isFieldValid(row, self._headers[index.column()]) | Asks the model if the value at *index* is valid
See :meth:`isFieldValid<sparkle.stim.auto_parameter_model.AutoParameterModel.isFieldValid>` |
20,060 | def _add_sj_index_commands(fq1, ref_file, gtf_file):
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd | newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths |
20,061 | def _parse_plan(self, match):
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | Parse a matching plan line. |
20,062 | def _read_stdin():
line = sys.stdin.readline()
while line:
yield line
line = sys.stdin.readline() | Generator for reading from standard input in nonblocking mode.
Other ways of reading from ``stdin`` in python waits, until the buffer is
big enough, or until EOF character is sent.
This functions yields immediately after each line. |
20,063 | def toggle_aggregation_layer_combo(self):
selected_hazard_layer = layer_from_combo(self.hazard_layer_combo)
selected_exposure_layer = layer_from_combo(self.exposure_layer_combo)
if ((self.aggregation_layer_combo.count() > 1)
and (selected_hazard_layer is not None)
and (selected_exposure_layer is not None)):
self.aggregation_layer_combo.setEnabled(True)
else:
self.aggregation_layer_combo.setCurrentIndex(0)
self.aggregation_layer_combo.setEnabled(False) | Toggle the aggregation combo enabled status.
Whether the combo is toggled on or off will depend on the current dock
status. |
20,064 | def _add_method_setting(self, conn, api_id, stage_name, path, key, value,
op):
logger.debug(,
op, path, str(value))
res = conn.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
{
: op,
: path,
: str(value)
}
]
)
if res[][][key] != value:
logger.error(
, key, value,
res[][][key])
else:
logger.info(,
key, value) | Update a single method setting on the specified stage. This uses the
'add' operation to PATCH the resource.
:param conn: APIGateway API connection
:type conn: :py:class:`botocore:APIGateway.Client`
:param api_id: ReST API ID
:type api_id: str
:param stage_name: stage name
:type stage_name: str
:param path: path to patch (see https://docs.aws.amazon.com/apigateway/\
api-reference/resource/stage/#methodSettings)
:type path: str
:param key: the dictionary key this should update
:type key: str
:param value: new value to set
:param op: PATCH operation to perform, 'add' or 'replace'
:type op: str |
20,065 | def siret_validator():
def _validate_siret(form, field, siret=""):
if field is not None:
siret = (field.data or "").strip()
if len(siret) != 14:
msg = _("SIRET must have exactly 14 characters ({count})").format(
count=len(siret)
)
raise validators.ValidationError(msg)
if not all(("0" <= c <= "9") for c in siret):
if not siret[-3:] in SIRET_CODES:
msg = _(
"SIRET looks like special SIRET but geographical "
"code seems invalid (%(code)s)",
code=siret[-3:],
)
raise validators.ValidationError(msg)
elif not luhn(siret):
msg = _("SIRET number is invalid (length is ok: verify numbers)")
raise validators.ValidationError(msg)
return _validate_siret | Validate a SIRET: check its length (14), its final code, and pass it
through the Luhn algorithm. |
20,066 | def save_subresource(self, subresource):
data = deepcopy(subresource._resource)
data.pop(, None)
data.pop(self.resource_type + , None)
subresources = getattr(self, subresource.parent_key, {})
subresources[subresource.id] = data
setattr(self, subresource.parent_key, subresources)
yield self._save() | Save the sub-resource
NOTE: Currently assumes subresources are stored within a dictionary,
keyed with the subresource's ID |
20,067 | def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
payload = {: table_name}
if index_name:
payload[] = index_name
if consistent_read is not None:
payload[] = consistent_read
if filter_expression:
payload[] = filter_expression
if expression_attribute_names:
payload[] = expression_attribute_names
if expression_attribute_values:
payload[] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload[] = projection_expression
if segment:
payload[] = segment
if total_segments:
payload[] = total_segments
if select:
_validate_select(select)
payload[] = select
if exclusive_start_key:
payload[] = utils.marshall(exclusive_start_key)
if limit:
payload[] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload[] = return_consumed_capacity
return self.execute(, payload) | The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html |
20,068 | def run(name,
cmd,
container_type=None,
exec_driver=None,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel=,
ignore_retcode=False,
path=None,
use_vt=False,
keep_env=None):
ps aux
valid_output = (, , , )
if output is None:
cmd_func =
elif output not in valid_output:
raise SaltInvocationError(
output\
.format(.join(valid_output))
)
else:
cmd_func =
if keep_env is None or isinstance(keep_env, bool):
to_keep = []
elif not isinstance(keep_env, (list, tuple)):
try:
to_keep = keep_env.split()
except AttributeError:
log.warning()
to_keep = []
else:
to_keep = keep_env
if exec_driver == :
full_cmd =
if path:
full_cmd += .format(pipes.quote(path))
if keep_env is not True:
full_cmd +=
if not in to_keep:
full_cmd += .format(PATH)
full_cmd += .join(
[.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += .format(pipes.quote(name), cmd)
elif exec_driver == :
pid = __salt__[.format(container_type)](name)
full_cmd = (
.format(pid)
)
if keep_env is not True:
full_cmd +=
if not in to_keep:
full_cmd += .format(PATH)
full_cmd += .join(
[.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += .format(cmd)
elif exec_driver == :
full_cmd =
if stdin:
full_cmd +=
full_cmd += .format(name)
if keep_env is not True:
full_cmd +=
if not in to_keep:
full_cmd += .format(PATH)
full_cmd += .join(
[.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += .format(cmd)
if not use_vt:
ret = __salt__[cmd_func](full_cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode)
else:
stdout, stderr = ,
proc = salt.utils.vt.Terminal(
full_cmd,
shell=python_shell,
log_stdin_level= if output_loglevel == else ,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False
)
try:
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = stdout if output is None \
else {: proc.exitstatus,
: 2,
: stdout,
: stderr}
except salt.utils.vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = stdout if output is None \
else {: 127,
: 2,
: stdout,
: stderr}
finally:
proc.terminate()
return ret | Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout |
20,069 | def resolve_address(endpoint_type=PUBLIC, override=True):
resolved_address = None
if override:
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config()
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type][]
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type][]
binding = ADDRESS_MAP[endpoint_type][]
clustered = is_clustered()
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except (NotImplementedError, NoNetworkBinding):
resolved_address = vips[0]
else:
if config():
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
try:
resolved_address = network_get_primary_address(binding)
except (NotImplementedError, NoNetworkBinding):
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address | Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not |
20,070 | def count(self, conn, filters):
pipe, intersect, temp_id = self._prepare(conn, filters)
pipe.zcard(temp_id)
pipe.delete(temp_id)
return pipe.execute()[-2] | Returns the count of the items that match the provided filters.
For the meaning of what the ``filters`` argument means, see the
``.search()`` method docs. |
20,071 | def rank_subgraph_by_node_filter(graph: BELGraph,
node_predicates: Union[NodePredicate, Iterable[NodePredicate]],
annotation: str = ,
reverse: bool = True,
) -> List[Tuple[str, int]]:
r1 = group_nodes_by_annotation_filtered(graph, node_predicates=node_predicates, annotation=annotation)
r2 = count_dict_values(r1)
return sorted(r2.items(), key=itemgetter(1), reverse=reverse) | Rank sub-graphs by which have the most nodes matching an given filter.
A use case for this function would be to identify which subgraphs contain the most differentially expressed
genes.
>>> from pybel import from_pickle
>>> from pybel.constants import GENE
>>> from pybel_tools.integration import overlay_type_data
>>> from pybel_tools.summary import rank_subgraph_by_node_filter
>>> import pandas as pd
>>> graph = from_pickle('~/dev/bms/aetionomy/alzheimers.gpickle')
>>> df = pd.read_csv('~/dev/bananas/data/alzheimers_dgxp.csv', columns=['Gene', 'log2fc'])
>>> data = {gene: log2fc for _, gene, log2fc in df.itertuples()}
>>> overlay_type_data(graph, data, 'log2fc', GENE, 'HGNC', impute=0.0)
>>> results = rank_subgraph_by_node_filter(graph, lambda g, n: 1.3 < abs(g[n]['log2fc'])) |
20,072 | def verify2(self, atv_public_key, data):
self._check_initialized()
log_binary(_LOGGER, , PublicSecret=atv_public_key, Data=data)
public = curve25519.Public(atv_public_key)
shared = self._verify_private.get_shared_key(
public, hashfunc=lambda x: x)
log_binary(_LOGGER, , Secret=shared)
aes_key = hash_sha512(, shared)[0:16]
aes_iv = hash_sha512(, shared)[0:16]
log_binary(_LOGGER, , Key=aes_key, IV=aes_iv)
signer = SigningKey(self._auth_private)
signed = signer.sign(self._verify_public.serialize() + atv_public_key)
signature, _ = aes_encrypt(modes.CTR, aes_key, aes_iv, data, signed)
log_binary(_LOGGER, , Signature=signature)
return b + signature | Last device verification step. |
20,073 | def define_batch_env(constructor, num_agents, env_processes):
with tf.variable_scope():
if env_processes:
envs = [
tools.wrappers.ExternalProcess(constructor)
for _ in range(num_agents)]
else:
envs = [constructor() for _ in range(num_agents)]
batch_env = tools.BatchEnv(envs, blocking=not env_processes)
batch_env = tools.InGraphBatchEnv(batch_env)
return batch_env | Create environments and apply all desired wrappers.
Args:
constructor: Constructor of an OpenAI gym environment.
num_agents: Number of environments to combine in the batch.
env_processes: Whether to step environment in external processes.
Returns:
In-graph environments object. |
20,074 | def p_gate_op_3(self, program):
program[0] = node.CustomUnitary([program[1], program[4]])
self.verify_as_gate(program[1], program[4])
self.verify_bit_list(program[4])
self.verify_distinct([program[4]]) | gate_op : id '(' ')' id_list ';' |
20,075 | def get_operation_ast(
document_ast: DocumentNode, operation_name: Optional[str] = None
) -> Optional[OperationDefinitionNode]:
operation = None
for definition in document_ast.definitions:
if isinstance(definition, OperationDefinitionNode):
if not operation_name:
if operation:
return None
operation = definition
elif definition.name and definition.name.value == operation_name:
return definition
return operation | Get operation AST node.
Returns an operation AST given a document AST and optionally an operation
name. If a name is not provided, an operation is only returned if only one
is provided in the document. |
20,076 | def append_to_path (path, directory):
if not os.path.isdir(directory) or directory in path:
return path
if not path.endswith(os.pathsep):
path += os.pathsep
return path + directory | Add a directory to the PATH environment variable, if it is a valid
directory. |
20,077 | def get_iso_time(date_part, time_part):
r
str_date = datetime.datetime.strptime(
date_part, ).strftime()
str_time = datetime.datetime.strptime(
time_part, ).strftime()
return str_date + "T" + str_time + "-7:00" | r"""Combign date and time into an iso datetime. |
20,078 | def shutdown(self):
if not process.proc_alive(self.proc):
return
logger.info("Attempting to connect to %s", self.hostname)
client = self.connection
attempts = 2
for i in range(attempts):
logger.info("Attempting to send shutdown command to %s",
self.hostname)
try:
client.admin.command("shutdown", force=True)
except ConnectionFailure:
pass
try:
return process.wait_mprocess(self.proc, 5)
except TimeoutError as exc:
logger.info("Timed out waiting on process: %s", exc)
continue
raise ServersError("Server %s failed to shutdown after %s attempts" %
(self.hostname, attempts)) | Send shutdown command and wait for the process to exit. |
20,079 | def check_environment_temperature(the_session, the_helper, the_snmp_value, the_unit=1):
a_snmp_unit = snmpSessionBaseClass.get_data(
the_session,
apc_oid_environment_temperature_unit,
the_helper)
snmp_units = {
: ,
:
}
a_unit = snmp_units.get(a_snmp_unit, )
the_helper.add_metric(
label=the_helper.options.type,
value=the_snmp_value,
warn=the_helper.options.warning,
crit=the_helper.options.critical,
uom=a_unit)
the_helper.check_all_metrics()
the_helper.set_summary("Current environmental temperature is {}{}".format(the_snmp_value, a_unit)) | OID .1.3.6.1.4.1.318.1.1.10.2.3.2.1.4.1
MIB Excerpt
The current temperature reading from the probe displayed
in the units shown in the 'iemStatusProbeTempUnits' OID
(Celsius or Fahrenheit).
Description of unit OID
OID .1.3.6.1.4.1.318.1.1.10.2.3.2.1.5
The temperature scale used to display the temperature
thresholds of the probe, Celsius(1) or Fahrenheit(2).
This setting is based on the system preferences
configuration in the agent. |
20,080 | def ConvertFromWireFormat(self, value, container=None):
result = self.type()
ReadIntoObject(value[2], 0, result)
return result | The wire format is simply a string. |
20,081 | def get_request_token(
cls, consumer_key, redirect_uri=, state=None
):
headers = {
: ,
}
url =
payload = {
: consumer_key,
: redirect_uri,
}
if state:
payload[] = state
return cls._make_request(url, payload, headers)[0][] | Returns the request token that can be used to fetch the access token |
20,082 | def do_drawing(self, size, frame, cairo_ctx):
if self.get_window() and not self.bot_size:
self.set_size_request(*size)
self.bot_size = size
self.backing_store = BackingStore.get_backingstore(self.width, self.height)
cr = pycairo.Context(self.backing_store.surface)
if self.scale_fit:
self.scale_context_and_center(cr)
cairo_ctx = driver.ensure_pycairo_context(cairo_ctx)
cr.set_source_surface(cairo_ctx.get_target())
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
self.queue_draw()
while Gtk.events_pending():
Gtk.main_iteration_do(False) | Update the backing store from a cairo context and
schedule a redraw (expose event)
:param size: width, height in pixels of bot
:param frame: frame # thar was drawn
:param cairo_ctx: cairo context the bot was drawn on |
20,083 | def prefix_fragment(self, nid):
sep=
if nid.startswith():
if in nid:
sep=
else:
sep=
parts = nid.split(sep)
frag = parts.pop()
prefix = sep.join(parts)
return prefix, frag | Return prefix and fragment/localid for a node |
20,084 | def response(request, status, obj):
request.setResponseCode(status)
request.responseHeaders.setRawHeaders(
u"content-type", [u"application/json"],
)
body = dumps_bytes(obj)
return body | Generate a response.
:param IRequest request: The request being responsed to.
:param int status: The response status code to set.
:param obj: Something JSON-dumpable to write into the response body.
:return bytes: The response body to write out. eg, return this from a
*render_* method. |
20,085 | def use_in(ContentHandler):
ContentHandler = table.use_in(ContentHandler)
def startTable(self, parent, attrs, __orig_startTable = ContentHandler.startTable):
name = table.StripTableName(attrs[u"Name"])
if name in TableByName:
return TableByName[name](attrs)
return __orig_startTable(self, parent, attrs)
ContentHandler.startTable = startTable
return ContentHandler | Modify ContentHandler, a sub-class of
pycbc_glue.ligolw.LIGOLWContentHandler, to cause it to use the Table
classes defined in this module when parsing XML documents.
Example:
>>> from pycbc_glue.ligolw import ligolw
>>> class MyContentHandler(ligolw.LIGOLWContentHandler):
... pass
...
>>> use_in(MyContentHandler)
<class 'pycbc_glue.ligolw.lsctables.MyContentHandler'> |
20,086 | def isOriginalLocation(attr):
sourceModule = inspect.getmodule(attr.load())
if sourceModule is None:
return False
currentModule = attr
while not isinstance(currentModule, PythonModule):
currentModule = currentModule.onObject
return currentModule.name == sourceModule.__name__ | Attempt to discover if this appearance of a PythonAttribute
representing a class refers to the module where that class was
defined. |
20,087 | def next_chunk_boundaries_levels(self, buf, prepend_bytes=0):
boundaries = {}
for level_index, chunker in enumerate(self._chunkers):
boundaries.update(
dict([(boundary, level_index) for boundary in chunker.next_chunk_boundaries(buf, prepend_bytes)]))
return sorted(boundaries.items()) | Computes the next chunk boundaries within `buf`.
Similar to :meth:`.next_chunk_boundaries`, but information about which chunker led to a respective boundary is
included in the returned value.
Args:
buf (bytes): The message that is to be chunked.
prepend_bytes (Optional[int]): Optional number of zero bytes that should be input to the chunking algorithm
before `buf`.
Returns:
list: List of tuples (boundary, level), where boundary is a boundary position relative to `buf` and level is
the index of the chunker (i.e., the index of its chunk size specified during instantiation) that yielded
the boundary.
If multiple chunkers yield the same boundary, it is returned only once, along with the highest matching
chunker index. |
20,088 | def altitudes_send(self, time_boot_ms, alt_gps, alt_imu, alt_barometric, alt_optical_flow, alt_range_finder, alt_extra, force_mavlink1=False):
return self.send(self.altitudes_encode(time_boot_ms, alt_gps, alt_imu, alt_barometric, alt_optical_flow, alt_range_finder, alt_extra), force_mavlink1=force_mavlink1) | The altitude measured by sensors and IMU
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
alt_gps : GPS altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
alt_imu : IMU altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_barometric : barometeric altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_optical_flow : Optical flow altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_range_finder : Rangefinder Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_extra : Extra altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) |
20,089 | def xml2object(self, content):
r
content = self.xml_filter(content)
element = ET.fromstring(content)
tree = self.parse(element) if self.__options[] else self.parse_full(element)
if not self.__options[]:
node = self.get_node(element)
if not self.__options[]:
tree[] = node[]
return {node[]: tree}
return tree | r"""Convert xml content to python object.
:param content: xml content
:rtype: dict
.. versionadded:: 1.2 |
20,090 | def newer(new_ver, old_ver, strict=False):
if old_ver == new_ver or old_ver + (0,) == new_ver:
return False
for n, o in zip(new_ver, old_ver):
if not isinstance(n, int):
o = str(o)
if o < n:
return True
elif o > n:
return False
return not strict | Determines if the first version tuple is newer than the second.
True if newer, False if older, None if difference is after specified version parts. |
20,091 | def chords(chord_labels, intervals, fs, **kwargs):
util.validate_intervals(intervals)
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs) | Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels |
20,092 | def smartplugs(self):
return [SmartPlug(self, plug.get())
for plug in self._state.get(,
{}).get(, [])
if plug.get()] | :return: A list of smartplug objects. |
20,093 | def setHandler(self,handler,cbfn):
async-responsesregistrations-expiredde-registrationsreg-updatesregistrationsnotifications
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn(" is not a legitimate notification channel option. Please check your spelling.",handler) | Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing. |
20,094 | def getHelpFileAsString(taskname,taskpath):
pathsplit=os.path.split(taskpath)
else:
helpname = taskname
localdir = pathsplit[0]
if localdir == :
localdir =
helpfile=rglob(localdir,helpname+".help")[0]
if os.access(helpfile,os.R_OK):
fh=open(helpfile,)
ss=fh.readlines()
fh.close()
helpString=""
for line in ss:
helpString+=line
else:
helpString=
return helpString | This functions will return useful help as a string read from a file
in the task's installed directory called "<module>.help".
If no such file can be found, it will simply return an empty string.
Notes
-----
The location of the actual help file will be found under the task's
installed directory using 'irafutils.rglob' to search all sub-dirs to
find the file. This allows the help file to be either in the tasks
installed directory or in any sub-directory, such as a "help/" directory.
Parameters
----------
taskname: string
Value of `__taskname__` for a module/task
taskpath: string
Value of `__file__` for an installed module which defines the task
Returns
-------
helpString: string
multi-line string read from the file '<taskname>.help' |
20,095 | def OnLineWidth(self, event):
linewidth_combobox = event.GetEventObject()
idx = event.GetInt()
width = int(linewidth_combobox.GetString(idx))
borders = self.bordermap[self.borderstate]
post_command_event(self, self.BorderWidthMsg, width=width,
borders=borders) | Line width choice event handler |
20,096 | def ogrn(self) -> str:
numbers = []
for _ in range(0, 12):
numbers.append(self.random.randint(1 if _ == 0 else 0, 9))
ogrn = .join([str(x) for x in numbers])
check_sum = str(int(ogrn) % 11 % 10)
return .format(ogrn, check_sum) | Generate random valid ``OGRN``.
:return: OGRN.
:Example:
4715113303725. |
20,097 | def read(self, n=None):
response = ""
while n is None or n > 0:
c = self.stream.read(1)
if c == "":
break
elif c == "<":
c += self.stream.read(1)
if c == "<?":
while True:
q = self.stream.read(1)
if q == ">":
break
else:
response += c
if n is not None:
n -= len(c)
else:
response += c
if n is not None:
n -= 1
return response | Read at most *n* characters from this stream.
If *n* is ``None``, return all available characters. |
20,098 | def lmfit_parameters(self):
p0 = []
for param in self.fitting_parameters:
opts = param[].copy() if in param else {}
if in opts: opts[] = prefix_factor(param) * opts[]
if in opts: opts[] = prefix_factor(param) * opts[]
p0.append((prefix_factor(param) * param[], opts))
params = lmfit.Parameters()
for p in zip(itertools.count(), p0):
params.add( + "%05d" % p[0], value=p[1][0], **p[1][1])
return params | A [`lmfit.Parameters`][1] object built from `scipy_data_fitting.Fit.fitting_parameters`,
see `scipy_data_fitting.Fit.parameters`.
Each parameters is assigned a key of the form `p_00000`, `p_00001`, `p_00002`, etc.
Thus, `sorted(self.lmfit_parameters)` will give the keys in the same
order defined by `scipy_data_fitting.Fit.fitting_parameters`.
Parameter values are scaled by `prefix` before assignment.
The values of `min` and `max`, if specified in the `limft` key,
will be scaled by `prefix` before being used to add the parameter.
[1]: http://lmfit.github.io/lmfit-py/parameters.html#the-parameters-class |
20,099 | def make_label_index(self, stream_item):
labels = stream_item.body.labels.get(self.annotator_id)
if not labels:
labels = []
self.label_index = SortedCollection(
[l for l in labels if OffsetType.CHARS in l.offsets],
key=lambda label: label.offsets[OffsetType.CHARS].first) | make a sortedcollection on body.labels |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.