lang
stringclasses 10
values | seed
stringlengths 5
2.12k
|
---|---|
python |
df = DataFrame([[1, 2], [3, 4]], columns=['odds', 'evens'])
# This is how you can create your own labels on the fly
@test(EXAMPLE_LABEL_NAME, "My custom label test")
def test_custom_label():
assert True
return df
|
python | object.add_method(state, 'get_element3d_salinity')
object.add_method(state, 'get_element3d_density')
object.add_method('EDIT', 'set_node_coriolis_f')
object.add_method('EDIT', 'set_node_barotropic_vel')
object.add_method('EDIT', 'set_node_surface_state')
object.add_method('EDIT', 'set_node3d_velocity_xvel')
object.add_method('EDIT', 'set_node3d_velocity_yvel') |
python | self.check_if_cmd_runs(figure_dir, "fig4b")
def test_fig4c(self):
self.check_if_cmd_runs(figure_dir, "fig4c")
def test_fig5a(self):
self.check_if_cmd_runs(figure_dir, "fig5a")
def test_fig5b(self):
self.check_if_cmd_runs(figure_dir, "fig5b")
def test_fig5c(self):
self.check_if_cmd_runs(figure_dir, "fig5c")
def test_fig5d(self): |
python | match 42:
case x:
y
case 42:
y
z |
python | # and the second one as update.
elif type(res) is tuple and len(res) == 2 and type(res[1]) is dict:
new_values_in_this_phase[key] = res[0]
new_values_in_this_phase.update(res[1])
# Otherwise, it is not a valid result. |
python |
@app.route('/')
@app.route('/quotes/')
@app.route('/quotes/<int:quote_id>')
def show_quotes(quote_id=None):
if quote_id:
quote = query_db('SELECT id, quote, author FROM quotes WHERE id = ?', [quote_id], one=True)
return render_template('show_quote.html', q=quote)
quotes = query_db('SELECT id, quote, author FROM quotes ORDER BY id DESC') |
python | for k in rp:
pw.append(pow(pw0, k, MODULE))
print('Primitive roots:', sorted(pw))
|
python | prob = models.FloatField(default=0.0)
manual = models.BooleanField(default=False)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Category(Label):
objects = CategoryManager()
example = models.ForeignKey(
to=Example, |
python | data = pd.read_csv(file_path, encoding='utf-8')
return data
def accuracy(self,label,predict):
'''
:param Label: represents the observed value
:param Predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = label - predict
average_error = np.mean(np.fabs(error.astype(float)))
print("mae is : %.6f" % (average_error)) |
python | return padded
def random_cei(formatted=True):
"""Create a random, valid CEI identifier."""
uf = random.randint(11, 53) |
python | #
# In addition to the permissions in the GNU General Public License, the
# authors give you unlimited permission to link or embed the compiled
# version of this file into combinations with other programs, and to
# distribute those combinations without any restriction coming from the
# use of this file. (The General Public License restrictions do apply in
# other respects; for example, they cover modification of the file, and
# distribution when not linked into a combine executable.)
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
python | from django.urls import path
from public.views import index
app_name = "public"
urlpatterns = [
path("", index, name="index"),
]
|
python | commands_with_2_args = {
'--version' : show_version,
'--help' : visit_digolds
}
def run():
argc = len(sys.argv)
if argc == 2:
commands_with_2_args.get(sys.argv[1], show_version)()
return |
python |
#calculate total return of our portfolio for the last x days
total_return = 0
for item in allocation:
total_return = total_return + (allocation[item]['perc_position'] * allocation[item]['return'] )
print(total_return) |
python | import sys
sys.path.insert(0, './app/app')
from tools import decrypt # noqa
print(decrypt(sys.argv[1]))
|
python | from datetime import datetime
from infobip.util.models import DefaultObject, serializable
from infobip.util.TimeUnit import TimeUnit
class EmailData(DefaultObject):
@property
@serializable(name="subject", type='basestring')
def subject(self):
"""
Property is of type: 'basestring'
"""
return self.get_field_value("subject")
@subject.setter |
python | user1, user2, user3 = users
task = Task.objects.first()
field = task.items.first().template.fields.get(name='output')
item = task.items.first()
df_probs = get_votings(item.annotations.all(), field) |
python | from rest_framework import status
from api.uploaders.dropbox_upload import DropboxLocalUpload, \
DropboxGCPRemoteUpload
from api.tests.base import BaseAPITestCase
from api.tests import test_settings
class LocalDropboxUploadTests(BaseAPITestCase):
def setUp(self):
self.url = reverse('dropbox-upload') |
python |
def test_another_sample_pipeline_1():
parser = Parser(another_sample)
cmds = parser.consume()
|
python | elif any([self.grasped(obj) for obj in range(1,self.num_objects+1)]):
return
self.state[0] = False # set gripper_free False
self.state[obj] = True # grasp object
return |
python | data_iter_test = []
# data_entry_train 是一个dict,字段名称用FieldName来标记
# FieldName.FEAT_DYNAMIC_REAL 产生的数据形状和target长度相同
for k in range(num_ts):
ts_length = randint(min_length, max_length) # 随机产生的一个长度
data_entry_train = {
FieldName.START: start,
FieldName.TARGET: [0.0] * ts_length,
}
if len(cardinality) > 0: |
python | from pm import app
# app.run(debug=False,host='0.0.0.0',port=9876)
app.run(debug=True,port=5000) |
python | # local imports
from .database import Database
class Parcel(Database):
__table__ = 'parcels'
return_columns = ('id', 'title', 'destination',
'current_location', 'quantity', 'status', 'date_ordered')
|
python | max_x = int(numpy.round(-270 + self.size_image[1]/2, 0))
min_x = int(numpy.round(-390 + self.size_image[1]/2, 0))
min_y = int(numpy.round(-820 + self.size_image[0]/2, 0))
max_y = int(numpy.round(-890 + self.size_image[0]/2, 0))
if min_x < 0:
min_x = 0
if max_x > self.size_image[0] - 1:
max_x = self.size_image[0] - 1
if min_y > self.size_image[1] - 1:
min_y = self.size_image[1] - 1 |
python | return qmark_regex.join(fragment_to_regex(ff) for ff in f.split("?"))
return re.escape(f)
twostars_regex = "(?:.*/)*"
onestar_regex = "[^/]*"
qmark_regex = "[^/]"
def fragments_to_regex(fragments):
regex_str_pieces = []
for fragment in fragments:
if fragment == "":
regex_str_pieces.append("/")
elif fragment == "**":
regex_str_pieces.append(twostars_regex)
else: |
python | version_file = VSVersionInfo(
ffi=FixedFileInfo(
filevers=_ver_tuple,
prodvers=_ver_tuple,
mask=0x3F,
flags=0x0,
OS=0x4,
fileType=0x1,
subtype=0x0,
date=(0, 0),
),
kids=[
StringFileInfo(
[ |
python |
cars = data.cars()
fuel_efficiency = alt.Chart(cars).mark_area().encode(
x='Year',
y='mean(Miles_per_Gallon)').properties(title="Fuel efficiency over time") |
python | self._ACTIVE = False
# Terminate context
self._zmq_context.destroy(0)
print("\n++ [INFO] Strategy safely terminated")
##########################################################################
def set_status(self, _new_status=False):
"""
Set Status (to enable/disable strategy manually)
""" |
python | reason=("Not passing in CI although it works locally. Will handle it later.")
)
@pytest.mark.asyncio
async def test_api_manager_list_runtime_envs(state_api_manager):
data_source_client = state_api_manager.data_source_client
data_source_client.get_all_registered_agent_ids = MagicMock()
data_source_client.get_all_registered_agent_ids.return_value = ["1", "2", "3"]
data_source_client.get_runtime_envs_info.side_effect = [
generate_runtime_env_info(RuntimeEnv(**{"pip": ["requests"]})),
generate_runtime_env_info(
RuntimeEnv(**{"pip": ["tensorflow"]}), creation_time=15
), |
python | from slack_bolt import App
from .sample_view import sample_view_callback
def register(app: App):
app.view("sample_view_id")(sample_view_callback)
|
python | 0: 0,
1: 24,
2: 48,
3: 72,
4: 96,
5: 120,
6: 144,
7: 168,
8: 192,
9: 216 |
python | for index in range(commonHead, len1 - commonTail):
self.result.append({
#'debug' : 'common head & tail pretreatment processing -- only in array1',
'remove' : delim.join([path, str(index)]),
'value' : array1[index],
'details' : 'array-item'
}) |
python | def create_plot():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
ax.set_autoscale_on(False)
return fig, ax
def update_plot(X, Y, Z, X1, Y1, Z1, fig, ax):
X = np.reshape(X, (1, 7))
|
python |
from pyspedas import tnames
from pytplot import options
def mms_eis_set_metadata(tplotnames, data_rate='srvy', datatype='extof', suffix=''): |
python | # step 1 - extract dataframe from dataset
step = PrimitiveStep(
primitive_description=DatasetToDataFramePrimitive.metadata.query(),
resolver=resolver,
)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference="steps.0.produce",
) |
python | admin.site.register(ConferenceVars)
admin.site.register(User, UserAdmin)
admin.site.register(UserInfo, UserInfoAdmin)
admin.site.register(Workshops)
admin.site.register(Families) |
python |
class TestTable(unittest.TestCase):
@classmethod
def setUpClass(cls):
d ={'ColA': np.linspace(0,1,100)+1,'ColB': np.random.normal(0,1,100)+0}
cls.df1 = pd.DataFrame(data=d)
d ={'ColA': np.linspace(0,1,100)+1,'ColB': np.random.normal(0,1,100)+0}
cls.df2 = pd.DataFrame(data=d) |
python | import csv
import numpy as np
with open('qet_sart_checked.csv') as csvfile:
reader = csv.DictReader(csvfile)
ppts = {}
for row in reader:
ppt = row['observation']
if not ppt in ppts:
ppts[ppt] = [] |
python |
query_3 = "select array_binary_search([0,0,0,1,1,1,2,2,2], 2)"
verify_3 = "select 6"
query_4 = "select array_binary_search([0,0,1,1,1,2,2,2], 0)"
verify_4 = "select 0"
query_5 = "select array_binary_search([0,0,1,1,1,2,2,2], 1)"
verify_5 = "select 2"
query_6 = "select array_binary_search([0,0,1,1,1,2,2,2], 2)" |
python |
import voluptuous as vol
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
|
python | # pylint: disable=protected-access
import unittest
from pylib.base import base_test_result
from pylib.base import mock_environment
from pylib.base import mock_test_instance
from pylib.local.device import local_device_instrumentation_test_run
|
python | parser_remove.add_argument("-scoutnet_adultgroup_without_accounts=", action="store", dest="awoa")
# Oauth2
parser_oauth2 = subparsers.add_parser("oauth2", help="Setup Oauth2 authentication for Google. "
"See https://github.com/eriste/scoutnet2google for "
"instructions about how to create a secret file.",)
parser_oauth2.set_defaults(func=oauth2)
parser_oauth2.add_argument("-new-client-secret-file", help="Install a (new) client_secret file.",
action="store", dest="csf")
# Formalia
parser.add_argument(
"-v", "--verbose", dest="verbose", action="store_true", help="Enable verbose output"
) |
python | df = pd.DataFrame(index=range(0,ROWS),columns=range(0,10))
print(df.memory_usage())
gc.enable()
for index, colLabel in enumerate(labels):
df.iloc[:,index] = np.array(DATA)
print(df.memory_usage()) |
python | def render_list_csv(l):
str_l = ["'%s'" % (v) for v in l]
return ", ".join(str_l)
def render_list_csv_as_list(l):
str_l = ["'%s'" % (v) for v in l]
return "[%s]" % ", ".join(str_l)
|
python | )
train_ds, valid_ds, test_ds = data.TabularDataset.splits(
path=data_dir,
format='tsv', |
python | """
Provide constants for renter endpoint.
"""
SETTINGS_URL = '/renter'
PRICES_URL = '/renter/prices'
CONTRACTS_URL = '/renter/contracts'
DOWNLOADS_URL = '/renter/downloads'
|
python | except RuntimeError as e:
self.fail(e)
self.info(" * check that {} server started successfully.".format(builder))
self.small_sleep()
self.assertTrue(len(j.sal.process.getProcessPid(process)))
self.info(" * {} builder: run stop method.".format(builder))
try:
getattr(j.builders.db, builder).stop()
except RuntimeError as e: |
python | data_checksum,platform_msb,platform_lsb= i2c_read_transaction(handle,0x0,3)
print("data_checksum:",hex(data_checksum))
print("platform_msb:",hex(platform_msb))
print("platform_lsb:",hex(platform_lsb))
platform_ID = (platform_msb << 8) + platform_lsb
print("Platform ID:", hex(platform_ID))
if platform_ID != 0x4612:
print("platform_ID mismatch the GPU baseboard")
sys.exit(-1) |
python | )
measurements = self.process_data(
self._meters_type[i], raw_measurements, date_to_get
)
if len(measurements) > 0:
reports.update(measurements)
i += 1
time_series = TimeSeries((len(reports) > 0), date_to_get, reports)
return time_series |
python | )
t = time.time()
np.save("data/RBM/X_rbm_s" + str(int(sigma * 100)) + ".npy", X)
np.save("data/RBM/score_X_rbm_s" + str(int(sigma * 100)) + ".npy", score_X)
print("RBM data has been saved in data/RBM.")
|
python |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aldryn_social_addthis', '0002_links_google'),
]
operations = [
migrations.AddField( |
python | def __str__(*args, **kwargs):
"""__str__(self) -> String"""
return _gdi_.NativeFontInfo___str__(*args, **kwargs)
def FromUserString(*args, **kwargs):
"""FromUserString(self, String s) -> bool"""
return _gdi_.NativeFontInfo_FromUserString(*args, **kwargs)
def ToUserString(*args, **kwargs): |
python | dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.float).cuda(), |
python |
def icecream_parlor(m: int, arr: List[int]) -> List[int]:
n = len(arr)
mapped = {}
for i in range(0, n):
mapped[arr[i]] = i
for i in range(0, n):
first = i |
python | # Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out observation for Japan
print(cars.loc['JAP'])
# Print out observations for Australia and Egypt
print(cars.loc[['AUS','EG']]) |
python | def count_bulls(secret_number, user_number):
count_bulls = 0
for i in range(0, 4):
if user_number[i] == secret_number[i]:
count_bulls += 1
return count_bulls
|
python | # Make Experience Replay
exp_replay = ExperienceReplay(intl_data=data, max_size=hyps['max_steps'])
# Make Models
h_size = hyps['h_size']
s_size = hyps['s_size']
a_size = data['actions'].shape[-1]
min_sigma = hyps['min_sigma']
obs_shape = data['observs'].shape[1:]
env_name = hyps['env_name']
bnorm = hyps['bnorm']
dynamics = Dynamics(obs_shape, h_size, s_size, a_size, bnorm=bnorm, env_name=env_name, min_sigma=min_sigma)
if env_name=="Pendulum-v0":
decoder = SimpleDecoder(dynamics.encoder.emb_shape, obs_shape, h_size, s_size, bnorm=bnorm) |
python |
faces.append( (i3,i4,i2,i1) )
i1+=1
return verts, faces
def main():
Draw = Blender.Draw
PREF_MAJOR_RAD = Draw.Create(1.0)
PREF_MINOR_RAD = Draw.Create(0.25)
PREF_MAJOR_SEG = Draw.Create(48)
PREF_MINOR_SEG = Draw.Create(16)
|
python |
def get_kernel_layer_represent_name(node: BaseNode) -> str:
"""
Returns the mapping between a layer's type and its name to appear in the visualization figure.
We apply this function only to map types of layers that have kernels (other layers do not appear by name
in the figures)
Args: |
python | #
#
# For second class requirement 2b: rankAdvancementData["2"]["2"]["b"]
def readRankData(files):
data = {}
for file in files:
filehdl = open(file, 'r')
lines = filehdl.readlines()
filehdl.close()
for line in lines:
(rank, requirement, subrequirement, reqtext) = line.split('%') |
python | top_data[idx]=maxval;
argmax_data[idx]=maxidx;
}
'''
kernel_backward = '''
extern "C"
__global__ void roi_backward(const float* const top_diff,
const int* const argmax_data,const float* const bottom_rois,
float* bottom_diff, const int num_rois,
const double spatial_scale, int channels,
int height, int width, int pooled_height,
int pooled_width,const int NN)
{
|
python | tab.append(lista[0]['id'])
print(test,len(df))
df['directorId']=pd.Series(tab)
df=df[df['directorId'].notnull()]
df=df.astype({'directorId':'int32'}) |
python | :param send_sns:
:return:
"""
log_data: dict = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Function is not configured.", |
python | for c in range(0, nr_cols):
i = r * nr_cols + c
if i < len(li_cat_feats):
sns.boxplot(x=li_cat_feats[i], y=target, data=df, ax = axs[r][c])
plt.tight_layout() |
python |
while x != 0:
if x > 0:
remainder = x % 10
x = x // 10
if new_x > max_int // 10:
return 0
elif new_x == max_int // 10 and remainder > 7:
return 0
else:
remainder = 10 - x % 10
# Eg. -10 |
python |
# 获取录音列表出错。
FAILEDOPERATION_DESCRIBERECORDSERROR = 'FailedOperation.DescribeRecordsError'
# 查询任务状态出错。
FAILEDOPERATION_DESCRIBETASKSTATUSERROR = 'FailedOperation.DescribeTaskStatusError'
# 录音列表下载出错。
FAILEDOPERATION_DOWNLOADRECORDLISTERROR = 'FailedOperation.DownloadRecordListError'
|
python | network = np.empty((4,len(params['N_subs'])))
kappas = np.empty((len(params['N_subs']),params['num_fits']))
for n, N_sub in enumerate(params['N_subs']):
print 'N_sub = %d' % N_sub
estimate = np.empty((params['B'],2,params['num_fits']))
network_obs = np.empty((4,params['num_fits']))
for num_fit in range(params['num_fits']):
alpha_sub, kappa_sub, A_sub, x_sub = subnetwork(N_sub)
print 'kappa_sub = %.2f' % kappa_sub
network_obs[0,num_fit] = 1.0 * np.sum(A_sub) / N_sub
network_obs[1,num_fit] = np.max(np.sum(A_sub, axis = 1))
network_obs[2,num_fit] = np.max(np.sum(A_sub, axis = 0))
network_obs[3,num_fit] = 1.0 * np.sum(np.diagonal(A_sub)) / N_sub |
python | # MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
python | assert g.vertices == {0:(0.2,0.5), 5:(1,2)}
assert g.edges == [(0,5)]
b1 = network.PlanarGraphBuilder(g)
assert b1.add_vertex(5,6) == 6
b1.add_edge(5,6)
g1 = b1.build()
assert g1.vertices == {0:(0.2,0.5), 5:(1,2), 6:(5,6)}
assert g1.edges == [(0,5), (5,6)] |
python | from opentmi_client import OpenTmiClient, Result
client = OpenTmiClient()
result = Result()
result.tcid = "test-case-a"
result.verdict = "pass"
client.post_result(result)
|
python | """
d = { b:a for a,b in zip(widths,string.ascii_lowercase)}
lines, count = 1,0
for c in S:
if count + d[c] > 100:
lines += 1
count = 0
count += d[c] |
python | <gh_stars>1-10
/usr/lib/python2.6/_abcoll.py |
python | 'scripts': [],
'playbooks': [],
'integrations': [],
'TestPlaybooks': [],
'Classifiers': [],
'Dashboards': [],
'IncidentFields': [],
'IncidentTypes': [], |
python | batches=batches,
num_iters_per_epoch=3,
shuffle=True,
collate_fn=collate,
)
for i in range(1, 10): |
python | if False:
yield self
return GetCertificateAuthorityActivationResult(
complete_certificate_chain=self.complete_certificate_chain,
status=self.status) |
python | def __init__(self):
self._tasks: WorkerTask = []
self._context = None
def register_tasks(self, tasks: Union[List[WorkerTask], WorkerTask]) -> Union[bool, Exception]:
if isinstance(tasks, list):
if all(isinstance(x, WorkerTask) for x in tasks): |
python |
def convert(func):
@wraps(func)
def _convert(sequence, *args, **kwargs):
if isinstance(sequence, torch.Tensor):
return func(sequence, *args, **kwargs)
elif isinstance(sequence, (list, tuple)):
return [
_convert(ip, *args, **kwargs) if ip is not None else None |
python | type=int,
default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument(
'--env-name',
default='Cassie-v0', |
python | actions:
- rename: "found-regex.txt"
""".format(
b"Ertr\xc3\xa4gnisaufstellung\\.txt".decode("utf-8")
),
)
main(["run", "--config-file=%s" % (tmp_path / "config.yaml")])
assertdir(tmp_path, "found-regex.txt")
@pytest.mark.skip(reason="Todo")
def test_normalization_glob(tmp_path): |
python | 'molecular data matrices for phylogenetic inference '
'based on GenBank records.')
# Add version argument
ver = pkg_resources.require("MatPhylobi")[0].version
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {version}'.format(version=ver)) # TODO add also citation to the version
# Prepare subparsers for different actions
actions = parser.add_subparsers(title="program action", help="either run an analysis or update an older run",
dest='action')
actions.required = True
parser_analyze = actions.add_parser('analyze') |
python | doc_1 = models.ImageField(upload_to="static/menu/images")
doc_2 = models.ImageField(upload_to="static/menu/images")
doc_3 = models.ImageField(upload_to="static/menu/images")
nom_cite = models.CharField(max_length=60)
batiment = models.CharField(max_length=60)
inexistant = models.BooleanField(default=False)
etage = models.IntegerField()
porte = models.CharField(max_length=5)
plan_masse_local = models.ImageField(upload_to="static/menu/images")
def __str__(self):
return self.site
class TypeArticleMoto(models.Model):
|
python |
if resname_b != "-":
resnum_b += 1
if resname_a != "-" and resname_b != "-":
numbering_dic[chain][resnum_b] = resnum_a
izone_fname = Path(output_path, "lovoalign.izone")
log.debug(f"Saving .izone to {izone_fname.name}")
dump_as_izone(izone_fname, numbering_dic)
return numbering_dic
|
python | chain_denoising (bool, optional): whether denoising should be performed during frame ordering
"""
# Read video frames and settings
frames, fps_setting, frame_setting = read_video(video_path)
|
python | # Fakeroot is required to create some character devices that reside in the /var/lib/docker folder.
subprocess.run(f"fakeroot tar xzf {gzip_with_files_for_integration_tests}",
shell=True,
check=True,
cwd=tmp_path)
else:
subprocess.run(f"tar xzf {gzip_with_files_for_integration_tests}",
shell=True,
check=True,
cwd=tmp_path)
return Path(tmp_path) |
python | self.__affiliations = Affiliations( )
self.__affiliations.setAncestor( self )
self.__note = Note( )
self.__note.setAncestor( self )
@property |
python | logging.info(f"Challenge: {challenge}")
logging.info(f"Type: {type}")
# URL verification
if type == 'url_verification':
return func.HttpResponse(body=challenge,
status_code=200) |
python | n = len(list_obj)
for x in list_obj:
skew += (x - mean_) ** 3
skew = skew / n if not sample else n * skew / ((n - 1) * (n - 2))
SD_ = SD(list_obj, sample)
skew = skew / (SD_ ** 3)
return skew
|
python | from simple_ansible_api.callback import ResultsResultCallBack
def v1():
cli = AnsiBleApi(hosts_list="/etc/ansible/hosts")
# set custom callback object
cli.set_callback(callback=ResultsResultCallBack())
cli.ansible_playbook(playbooks=["test.yaml"])
ret = cli.result(to_json=True)
return ret |
python | if tokens[0] == 'create-student':
tokens[0] = '/home/coder/containers/codeserver/create_student.sh'
if tokens[0] == 'delete-student':
tokens[0] = '/home/coder/containers/codeserver/delete_student.sh'
|
python | self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.labelSearch = QtWidgets.QLabel(self.horizontalLayoutWidget) |
python | # ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"data_table_query_execution",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("table_id", sa.Integer(), nullable=False),
sa.Column("cell_id", sa.Integer(), nullable=False),
sa.Column("query_execution_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["cell_id"], ["data_cell.id"], name="data_table_query_execution_ibfk_1"
),
sa.ForeignKeyConstraint(
["query_execution_id"], |
python | SUCCESS_CODE = 1
# Dictionary of all possible error codes in send API
# and their corresponding exception.
INIT_RESPONSE_MAPPER = {
'-1': ApiKeyNotFoundException,
'-2': AmountNotFoundException,
'-3': AmountNotIntegerException,
'-4': AmountNotCorrect,
'-5': RedirectUrlNotFoundException, |
python | F.conv3d(
F.pad(x, list(self.padding)[::-1], mode='replicate'),
self.kernel.repeat(CH, 1, 1, 1, 1).to(x.device, x.dtype),
stride=1,
groups=CH,
)
.view(B, CH, -1, D, H, W)
.max(dim=2, keepdim=False)[0]
)
mask = x > max_non_center
if mask_only:
return mask |
python | import argparse
sys.path.append(os.path.dirname(__file__) + '/../')
import vaetc
def main(checkpoint_path: str):
checkpoint = vaetc.load_checkpoint(checkpoint_path)
vaetc.evaluate(checkpoint)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_path", type=str, help="checkpoint_path")
|
python | scilla_data, evm_data = plot_data
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, scilla_data, bar_width,
alpha=opacity,
color='#D7191C',
label='Scilla')
|
python | env_inc = _make_narrow_noise(bw, envrate, dur, fs, rise, rng)
env_inc[env_inc < 0] = 0.
env_inc = np.convolve(b, env_inc)[:len(t)]
if k in use_group:
env = np.sqrt(rho) * env_coh + np.sqrt(1 - rho ** 2) * env_inc |
python |
list_filter = CustomUserAdmin.list_filter + ('department',)
list_display = CustomUserAdmin.list_display + ('department',)
fieldsets = CustomUserAdmin.fieldsets + (
(None, {'fields' : ('department',)},),
)
add_fieldsets = CustomUserAdmin.add_fieldsets + (
(None, {
'classes' : ('wide',),
'fields' : ('department',)} |
python | """
Scene object base class.
Subclass this to properly receive on_*_event() messages automatically.
"""
log = LOG
FPS = 0
NAME = 'Unnamed Scene'
VERSION = '0.0' |
python |
Returns
-------
Area of box determined by these two anchors
"""
m = a2[0]-a1[0]+1
n = a2[1]-a1[1]+1
return m*n
def mrmsdtw(X, Y, tau, debug=False, refine=True):
"""
An implementation of the approximate, memory-restricted |
python | return False
if __name__ == "__main__":
my_parser = argparse.ArgumentParser(
description="Returns indices of compounds that failed to generate conformers",
allow_abbrev=False)
my_parser.add_argument('--logfile', |
Subsets and Splits