lang
stringclasses 10
values | seed
stringlengths 5
2.12k
|
---|---|
python |
# These are simple run tests to ensure parallel wrapper register backend.
# See parallel for more rigorous tests
def test_learner_fit():
"""[Module | TransformerEstimator] test fit""" |
python | tmp = flag | (x << (8 * (ofs - 1)))
l.append(N - pol(tmp))
s, r = min(l), max(l)
if s <= N - r:
flag |= (l.index(s) << (8 * (ofs - 1)))
result += chr(l.index(s)) |
python | down_filename = 'vtgate-down.sh'
up_instance_template = 'vtgate-up-instance.sh'
down_instance_template = 'vtgate-down-instance.sh'
short_name = 'vtgate'
def __init__(self, hostname, ls):
self.hostname = hostname
self.ls = ls
self.ports = dict(web_port=15001, grpc_port=15991, mysql_server_port=15306)
self.configured_hosts = []
self.read_config()
|
python |
_OUT_RESOLUTION = (64, 64)
_SEQUENCE_LENGTH = 20
_URL = "http://www.cs.toronto.edu/~nitish/unsupervised_video/"
_CITATION = """\
@article{DBLP:journals/corr/SrivastavaMS15,
author = {<NAME> and
<NAME> and
<NAME>},
title = {Unsupervised Learning of Video Representations using LSTMs},
journal = {CoRR},
volume = {abs/1502.04681},
year = {2015},
url = {http://arxiv.org/abs/1502.04681},
archivePrefix = {arXiv}, |
python | import warnings
from commercetools.platform.models import * # noqa
warnings.warn(
"This `commercetools.types` module will be removed in the near future, "
"use the compatible `commercetools.platform.models`",
DeprecationWarning,
)
|
python | def niDCPower_QueryOutputState(self, vi, channel_name, output_state, in_state): # noqa: N802
with self._func_lock:
if self.niDCPower_QueryOutputState_cfunc is None:
self.niDCPower_QueryOutputState_cfunc = self._get_library_function('niDCPower_QueryOutputState')
self.niDCPower_QueryOutputState_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDCPower_QueryOutputState_cfunc.restype = ViStatus # noqa: F405
return self.niDCPower_QueryOutputState_cfunc(vi, channel_name, output_state, in_state)
def niDCPower_ReadCurrentTemperature(self, vi, temperature): # noqa: N802
with self._func_lock:
if self.niDCPower_ReadCurrentTemperature_cfunc is None: |
python |
if batch_size == 1:
for item in generator:
yield item
return
if lazy:
# Lazy returns batches as a generator where objects are only touched upon actually querying them
iterator = iter(generator)
try:
while True:
first = next(iterator)
def chunk(): |
python | print("#########################")
sqf_list = []
all_warnings = 0
all_errors = 0 |
python | from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-expiring-tokens',
version='0.1.2',
description='Add an API to your Django app using token-based authentication. Tokens expire on subsequent logins.',
long_description=read('README.md'),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/shebson/django-expiring-tokens',
packages=['tokenapi'],
license='Apache License, Version 2.0', |
python | self.assertTrue(isinstance(resp.json(), list))
resp = self.post(teams_url, data={}, expected_status_code=http_client.CREATED)
team_url = resp.json()["url"]
resp = self.get(team_url)
self.assertTrue(isinstance(resp.json()["players"], list))
self.assertEqual(len(resp.json()["players"]), 0)
resp = self.get("/matches/%s/teams/99999" % match_id,
expected_status_code=http_client.NOT_FOUND) |
python |
media = imjustgood("YOUR_APIKEY_HERE")
query = "ciuman" # example query
data = media.mimpi(query)
# Get attributes
result = "<NAME>"
for a in data["result"]:
result += "\n\nMimpi : {}".format(a["dream"])
result += "\nArti : {}".format(a["meaning"])
print(result)
# Get JSON results
print(data) |
python | """
Create the expected string representation of an instance.
Args:
instance:
The instance to create the expected repr of.
fields:
An array of field names that should be in the repr.
Returns:
The expected output of ``repr(instance)``.
"""
values = [f"{field}={repr(getattr(instance, field))}" for field in fields]
return f"{instance.__class__.__name__}({', '.join(values)})" |
python | assert os.path.exists(result_filename)
source_dir = OutputDirectories.getSourceDirectoryPath()
# Attach the binary blob as a Windows resource.
addResourceToFile(
target_filename=result_filename,
data=getFileContents(getConstantBlobFilename(source_dir), "rb"),
resource_kind=RT_RCDATA,
res_name=3,
lang_id=0,
)
|
python | text = takeCommand().lower()
print('Hanna: ¿Que nombre le pongo?')
func.takeNote(text)
speak('Listo!')
elif query in trig.TIME_TRIGGERS:
currentTime = datetime.datetime.now().strftime("%H:%M")
print(f"Hanna: Son las, {currentTime}, horas") |
python | print('step:%d, eq_delay:%e, orig_delay:%e, gain:%e'%(step, d_e, d_o, (d_e-d_o)/d_o))
record_file = open(txt_file, 'a')
record_file.write('%d %e %e %e\n'%(step, d_e, d_o, (d_e-d_o)/d_o))
record_file.close()
Orig_delays[step] = d_o
Eq_delays[step] = d_e
|
python |
# Note: use of this script may require installing the SMARTS package with the "[ros]" extensions.
mod_path = os.path.dirname(__file__)
install_arg = "" |
python | # proxy module
from __future__ import absolute_import
from apptools.template.impl.any_data_name_item import *
|
python | BaseBatchAcquisitionFunction
from genedisco.active_learning_methods.acquisition_functions.random_acquisition_function import \
RandomBatchAcquisitionFunction
class ActiveLearningLoop(sp.AbstractBaseApplication):
ACQUISITION_FUNCTIONS = [
"random", "topuncertain", "softuncertain", "marginsample", "coreset", "badge",
"kmeans_embedding", "kmeans_data", "adversarialBIM", "custom"
]
|
python | print('This is a print statement')
|
python |
def arrayExtractor(self, state, action):
features = self.dictExtractor(state, action)
arrayFeatures = np.zeros(self.prefix["tot"]) |
python | "`kernel` must be a `gpflow.mean_functions.MeanFunction`"
)
latent_inducing_points: Optional[int] = None
if isinstance(inducing_variable, FallbackSeparateIndependentInducingVariables): |
python |
@dynamic_form_field
class CaptchaField(BaseDynamicFormField):
cls = 'captcha.fields.CaptchaField'
display_label = _('CAPTCHA')
class Meta:
_exclude = ('required',)
@classonlymethod
def do_display_data(cls):
return False
|
python | # update url attribute of the metadata 'type' element
type_element = comp_res.metadata.type
type_element.url = '{0}/terms/{1}'.format(current_site_url(), to_resource_type)
type_element.save()
|
python | from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('calls', '0016_auto_20181015_0239'),
]
operations = [
migrations.AlterField(
model_name='call',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, unique=True), |
python | return decorator
def create_empty_json_response(status):
"""Create a JSON response with the given status code and an empty
object as its content.
"""
return Response('{}', status=status, mimetype='application/json')
def jsonified(f): |
python | print("{} - {} sim = {}".format(f_name, s_name, sim))
if 'norm' in f_name:
print("{} range: min {} \t max {}".format(f_name, f_param.min(), f_param.max()))
print("{} range: min {} \t max {}".format(s_name, s_param.min(), s_param.max()))
return sims, x_names, y_names
def compute_merge(model, sims, x_names, perc=0.3):
"""takes output of calculate_sim_by_type and returns merged layers at perc rate"""
if perc > 1: perc /= 100
indices, Ns = [], []
model_name = model.__class__.__name__.lower().split('for')[0] |
python |
filt_records = ifilter(cond,fileIterator(inm4, it))
for r in imap(recordToString, filt_records):
print r
|
python | 'DUPE=dupe1\n'
'DUPE=dupe2\n')
expected = {
'foo': ['bar'],
'DUPE': ['dupe1', 'dupe2'],
} |
python | # 0.0-30.0 days);
26.5 #croute free scaling parameter [timestep^2/mm] (e.g., 0.0-50.0 days^2/mm);
]).reshape(15, 1)
incon = array([50, 0, 2.5, 2.5]).reshape(4, 1)
import tuwmodel as tuw
data = tuw.simulate(area=1, param=params, incon=incon, prec=prec, |
python |
with open('name' + '.csv', 'r', encoding='gbk') as f:
reader = csv.reader(f)
csvfile = list(reader)
name2city = {}
for pair in csvfile: |
python | :type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
# build trie
self.trie = {}
for word in words:
node = self.trie
for c in word:
if c not in node:
node[c] = {} |
python | None, just sleep in base
"""
print('unknow context_status:%s, do nothing' % context['status'])
time.sleep(60)
|
python | most_common_element = collections.Counter(polymer_template).most_common(1)[0]
least_common_element = collections.Counter(polymer_template).most_common()[::-1][0]
diff = most_common_element[1] - least_common_element[1]
print(most_common_element) |
python | The slings and arrows of outrageous fortune
Or to take arms against a sea of troubles
HAMLET: To be, or not to be--that is the question:
Whether 'tis nobler in the mind to suffer
The slings and arrows of outrageous fortune
Or to take arms against a sea of troubles
And by opposing end them. To die, to sleep--
No more--and by a sleep to say we end
The heartache, and the thousand natural shocks |
python |
# init SQLAlchemy so we can use it later in our models
db = SQLAlchemy()
app = Flask(__name__)
app.config['SECRET_KEY'] = ''
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
db.init_app(app)
login_manager = LoginManager() |
python |
# class ShopItemStorageHistory(models.Model):
# class Meta:
# verbose_name = u'设备商品存货历史记录'
# verbose_name_plural = u'设备商品存货历史记录'
# |
python | # :param date: datetime object
# :param prices: list of Price objects
# :param fx: and FX object
self.date = date
self.prices = prices
self.fx = fx
self.divs = divs
self.curves = curves
self.surfaces = surfaces
def simulate(self, shock: pd.Series):
new_env = deepcopy(self) |
python | ygains : PIDGains (default = PIDGains(1, 0, 0))
Returns:
vel : (class utils.states.Velocity) with the required velocity commands
"""
def __init__(
self, maxX=0.25, maxY=0.25, xgains=PIDGains(1, 0, 0), ygains=PIDGains(1, 0, 0)
):
super(OmniWheelPID, self).__init__() |
python | exist_value = current.value
next_value = current.next.value
print(next_value)
print(exist_value)
if exist_value == next_value:
current.next = current.next.next
if current.next is None:
break
if current.value != current.next.value:
print('possible error')
current = current.next
return dummy.next
|
python |
setup(name='prae',
version='0.1',
description='Code for Plannable Approximations to MDP Homomorphisms: Equivariance under Actions',
author='<NAME>', |
python | await asyncio.sleep(1)
async def discovery_test(*args):
rv = await BroadlinkUDP.discovery(local_ip_address=args[2],timeout=int(args[3]))
if rv:
_LOGGER.info("Discovery OK %s",rv)
else:
_LOGGER.warning("Discovery failed")
|
python |
logger = logging.getLogger(__name__)
def run(cfg_path):
"""Run vega automl.
:param cfg_path: config path.
"""
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported.')
_init_env(cfg_path)
_backup_cfg(cfg_path) |
python |
def tag_args(kwargs):
return "".join(f" {'class' if key == 'clazz' else key.replace('_', '-')}={repr(value)}" for key, value in kwargs.items())
class PairTag:
__slots__ = ["stream", "name", "kwargs"]
def __init__(self, stream, name, **kwargs):
self.stream = stream
self.name = name
self.kwargs = tag_args(kwargs)
def __enter__(self):
print(f"<{self.name}{self.kwargs}>", file=self.stream, end="")
def __exit__(self, _1, _2, _3):
print(f"</{self.name}>", file=self.stream, end="")
|
python | <gh_stars>0
from hacksport.problem_templates import CompiledBinary
Problem = CompiledBinary(
sources=["mybinary.c"],
share_source=True,
static_flag="this_is_the_flag",
remote=True)
|
python |
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
|
python | self.table = {}
def insert(self, key: 'str', val: 'int') -> 'None':
diff = val - self.table.get(key, 0)
if diff != 0:
self.table[key] = val
node = self.root
for c in key:
node = node.next[c]
node.sum += diff
|
python |
from .literals import COLOR_RED
from .models import Tag
TEST_DOCUMENT_PATH = os.path.join(settings.BASE_DIR, 'contrib', 'sample_documents', 'title_page.png')
class TagTestCase(TestCase):
def setUp(self):
self.document_type = DocumentType(name='test doc type')
self.document_type.save() |
python | from uqbar import book # noqa
from uqbar import containers # noqa
from uqbar import graphs # noqa
from uqbar._version import __version__, __version_info__ # noqa
|
python | from .alliance import Alliance
__red_end_user_data_statement__ = "This cog does not store user data or metadata."
|
python | 'description': 'Preprocessing modules for hatespeech detection project',
'author': '<NAME>',
'url': 'https://github.com/JherezTaylor/thesis-preprocessing',
'download_url': 'https://github.com/JherezTaylor/thesis-preprocessing',
'author_email': '<EMAIL>',
'long_description': README,
'license': LICENSE,
'version': '0.1',
'install_requires': ['nose'],
'packages': ['hatespeech_core'],
'name': 'Hatespeech Preprocessing' |
python | name='voting_close',
field=models.DateTimeField(verbose_name=b'Date voting closes'),
),
migrations.AlterField(
model_name='poll',
name='voting_open',
field=models.DateTimeField(verbose_name=b'Date voting begins'),
),
] |
python | current_user = g.current_user
if hasattr(current_user, f):
return getattr(current_user, f)
return default
'''
用户信息加密
可以把多个字段进行加密,还可以把状态放到里面,这样状态变了就会立即退出 |
python | from django.contrib.auth.models import User
from django.http import HttpResponse
def execute_sql(request):
list(User.objects.all())
return HttpResponse()
def resolving_view(request, arg1, arg2):
# see test_url_resolving in tests.py
return HttpResponse()
|
python |
def _test_categorical_ordinal_encoder(
test_case, device_tag, dtype, size, capacity, num_tokens, num_iters
):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float) |
python | from django.views.generic import View
from django.http import HttpResponse
class HelloWordView(View):
def get(self, request):
return HttpResponse(
"Helloword!"
)
|
python | <gh_stars>1-10
__version__="0.1.0"
|
python |
import subprocess
pipe = subprocess.Popen(["/usr/bin/perl", "filters/CreateInterval.pl", chrom, start, end, name, strand, out_file1]).communicate()
|
python | self.set_scip2()
self.get_parameter()
return True
def is_open(self):
'''If port is opening, return True.'''
return self.isOpen()
def flush_input_buf(self):
'''Clear input buffer.'''
self.flushInput()
def send_command(self, cmd):
'''Send command to device.'''
self.write(cmd) |
python | #Author: <NAME>
from myTables.ConfigTables import TaskMemoryTable
from myTables.ConfigTables import routeinfo
from myTables.ConfigTables import FpcInfoTable
from myTables.ConfigTables import reinfo
from lxml import etree
import yaml |
python | class Point():
def __init__(self, x, y):
self.x = x
self.y = y
def GetAreaOfPolyGon(points_x, points_y):
points = []
for index in range(len(points_x)):
points.append(Point(points_x[index], points_y[index]))
area = 0
if (len(points) < 3): |
python | }
for name in custom_features.keys():
example_features[name] = custom_features[name].raw_feature_type
# Native example parsing
example = tf.parse_single_example(serialized=raw_record, features=example_features)
|
python |
if online_norm:
self.onl_f = OnlineNormalizationLayer(torch.Size([self.n_features]))
else:
self.onl_f = lambda f: f
def forward(self, sim: ObsType) -> torch.Tensor: |
python | """
def load_only_poses_file_hydrogen_bond(sc, path_file_result_file_only_pose):
only_poseRDD = sc.textFile(path_file_result_file_only_pose)
header = only_poseRDD.first() #extract header
#Spliting file by \t |
python | simulated_log.input(log.LocalLogInputEvent(wall_time=time.time()))
parameters_services.transform(store, simulated_log)
alarm_limits_services.transform(store, simulated_log)
service_event_log(
simulated_log, active_log_events, simulated_log_receiver
)
await trio.sleep(REQUEST_SERVICE_INTERVAL / 1000) |
python | from ..manager import SimpleManager
from ..base import MetricGenerator
from ...dataassociator import Associator
from ...types.association import Association
from ...types.detection import Detection |
python | <gh_stars>1-10
# 填写学号
stuIDs = ['1', '2', '3']
# 依次是 xxx
# 填写平台密码
pwds = ['1', '2', '3']
|
python |
"""
A helper function that retrieves the user's input
and does basic data sanitization (like removing beginning and trailing whitespace)
Args:
self: The class object
lowercase: The flag that determines if the returned value is
converted to all lowercase characters or not
Returns:
user_input: A string object from the user's input
in the terminal interface of the program |
python |
@celsius.route('/', methods=['POST'])
def form_post():
celsius = int(request.form['Celsius'])
return convert(celsius)
#return render_template('fahrenheit.html', input=celsius, output=fahrenheit)
@celsius.route('/<int:celsius>')
def convert(celsius):
fahrenheit = ((celsius * 9) // 5 ) + 32
return render_template('fahrenheit.html', input=celsius, output=fahrenheit) |
python | else:
import shutil
shutil.rmtree(downpath)
with open(f'{path}/ManagerFiles/Bin/install2.bin', 'r+') as f:
leitura = f.readlines()
f.seek(0)
for i in leitura:
|
python | minor: Optional[int]
micro: Optional[int]
dev: Optional[str]
def __init__(
self,
major: Union[int, str],
minor: Optional[int] = None,
micro: Optional[int] = None,
dev: Optional[str] = None,
):
if isinstance(major, str):
if not all(arg is None for arg in (minor, micro, dev)):
raise ValueError( |
python | class EvaluationWarning(Enum):
CONNECTION_DEFINITION_DESCRIPTION_MISSING = auto()
DATASET_ABSTRACT_MISSING = auto()
DATASET_ABSTRACT_TOO_SHORT = auto()
DATASET_COVERAGE_MISSING = auto()
DATASET_METHOD_STEPS_MISSING = auto()
DATASET_PROJECT_MISSING = auto()
DATATABLE_DESCRIPTION_MISSING = auto()
DATATABLE_MISSING = auto()
DESIGN_DESCRIPTION_DESCRIPTION_MISSING = auto()
EMAIL_MISSING = auto()
INDIVIDUAL_NAME_INCOMPLETE = auto()
INTELLECTUAL_RIGHTS_MISSING = auto()
KEYWORDS_MISSING = auto() |
python | from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
MESSAGE_TEMPLATE = (
"You are going to derail from goal **{goal_name}** in **{time:0.1f} hours**. " |
python | import os
from passlib.apps import custom_app_context as context
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Everyone, Authenticated
from pyramid.security import Allow
from stock_analysis.models.mymodel import User
def includeme(config): |
python | atservers = aternos.servers
mineserver = atservers[0]
addr = mineserver.address
permissions = [257541382627917825]
@bot.event
async def on_ready():
await bot.change_presence(
activity=discord.Activity( |
python | template = "Exception {0} in timer proc. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
raise SystemExit(1)
if (curr - prev) < 1:
gevent.sleep(1 - (curr - prev))
self._logger.info("UVE Done")
else:
self._logger.info("UVE Process saturated")
gevent.sleep(0)
def stop_uve_partition(self, part):
if not part in self.ptab_info:
return |
python | private_key, key_id = keys.get_private_key_and_key_id(issuer="test-issuer")
self.assertIsInstance(private_key, _RSAPrivateKey)
self.assertEqual(key_id, "test-2048")
def test_second_key(self):
private_key, key_id = keys.get_private_key_and_key_id(issuer="test-issuer", key_id="test-1024")
self.assertIsInstance(private_key, _RSAPrivateKey)
self.assertEqual(key_id, "test-1024")
|
python |
def solution1():
l = [1]
while True:
yield l
l = [1] + [x + l[i + 1] for i, x in enumerate(l) if i < len(l) - 1] + [1]
|
python | remote_cache_path = self._get_remote_cache_filepath(
dataset_id=dataset_id, dataset_version=dataset_version
)
with self._rw_client.write_lock(
storage_type=self._storage_type,
bucket=self._configurations.bucket,
cache_path=remote_cache_path,
):
# It is possible that the cache was created while
# the write lock was being acquired.
if not self._is_cloud_cache_present(
dataset_id=dataset_id, dataset_version=dataset_version
):
with self._lock_local_cache(
dataset_id=dataset_id, |
python | def can_update(self, perm_ctx: NamespaceScopedPermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_multi_actions(
perm_ctx, [NamespaceScopedAction.UPDATE, NamespaceScopedAction.VIEW, NamespaceAction.VIEW], raise_exception
)
@related_cluster_perm(method_name='can_view')
def can_delete(self, perm_ctx: NamespaceScopedPermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_multi_actions( |
python | el_found, abundances = abunds_func(x_0)
blobs_dtype = [('ep_r','f8'), ('rew_r','f8')]
for el in el_found:
blobs_dtype.append((el, 'f8'))
blobs_dtype.append((el+'_sigma_mean', 'f8'))
sampler = emcee.EnsembleSampler(n_walkers, n_dim, obj_func, blobs_dtype=blobs_dtype, args=(len(el_found),sun_el,sun_abs))
results = sampler.run_mcmc(x_ball, n_steps, progress=True)
flat_blob = sampler.get_blobs().reshape((n_walkers*n_steps)) |
python |
Optional Parameters:
- continuation_token -- The continuation token for getting the next page of results. Null for first page.
- filters -- List of filters.
- order_by -- List of OrderBy option.
'''
return _call_az("az synapse pipeline-run query-by-workspace", locals())
def show(run_id, workspace_name):
'''
Get a pipeline run by its run ID.
Required Parameters: |
python | # License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestJamaica(unittest.TestCase): |
python | # wait for the cogs to be ready as the bot methods are executed first
# to send a msg to a channel
# await self.stdout.send(ready_txt) # method to send messages, asynchronous
# await self.stdout.send(
# "https://www.youtube.com/watch?v=Dvut-96X-Ng&list=PLYeOw6sTSy6ZGyygcbta7GcpI8a5-Cooc&index=8&t=626s"
# )
# await self.stdout.send( |
python | from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
def main(args):
df = pd.load(args.df)
rfc_seed = int(time.time() * 1000)
estimators = [('reduce_dim', KernelPCA(kernel='linear')),
('rfc', RandomForestClassifier(32, random_state=rfc_seed))]
clf = Pipeline(estimators) |
python | self.uvs = []
self.triangles = []
self.has_normals = has_normals
self.has_uv = has_uv
|
python |
def RMSE(pred, true):
return np.sqrt(MSE(pred, true))
def MAPE(pred, true):
return np.mean(np.abs((pred - true) / true))
def MSPE(pred, true):
return np.mean(np.square((pred - true) / true))
|
python | taxa = float(input("Juros (Ex.: 3 para 3%): "))
pagamento = float(input("Pagamento mensal:"))
mês = 1
if (dívida * (taxa/100) > pagamento):
print("Sua dívida não será paga nunca, pois os juros são superiores ao pagamento mensal.")
else:
saldo = dívida
juros_pago = 0
while saldo > pagamento:
juros = saldo * taxa / 100
saldo = saldo + juros - pagamento |
python |
@staticmethod
def ws_status():
"""
返回工作空间状态
:return:
"""
return {i: Workspace.__dict__[i] for i in Workspace.__dict__ if str(i).startswith("STATUS_")}
@staticmethod
|
python |
mock_aovs.return_value = {mock_varname: mock_existing_aov}
mgr = init_manager()
mgr._init_reader_aovs(mock_reader)
mock_add.assert_called_with(mock_new_aov)
# _init_reader_groups
|
python |
EXAMPLE_CONFIG_DIR = './example-config'
class CapturingOutputSink:
def __init__(self, limit: int = 1):
self.images = [] |
python | # Remove any trailing slash in the base URL.
result["BASE_URL"] = result["BASE_URL"].rstrip("/")
return result
def get_hrefs(data):
"""Traversing the data recursively, return the list of values
for all 'href' keys in the dictionary.
If the value for an 'href' key is not a string, then ignore it.
"""
result = []
if isinstance(data, list):
for value in data:
result.append(get_hrefs(value)) |
python | for message in messages:
if message['update_id'] >= self.message_offset:
self.message_offset = message['update_id']
self.message_handler(message)
break
def register(self, command, handler):
self.commands[command] = handler
def set_default_handler(self, handler):
self.default_handler = handler
def set_sleep_btw_updates(self, sleep_time):
self.sleep_btw_updates = sleep_time |
python | }
'''
def test_write_config():
config = {}
options = json.loads(TEST_JSON)
|
python | class SerializerVotesFieldMixin(serializers.Serializer):
'''
Adds fields 'votes' for hyperlinked API of comments to a given object
'''
user_vote = serializers.SerializerMethodField()
votes_score_avg = serializers.SerializerMethodField()
votes_count = serializers.SerializerMethodField()
votes_score_distribution = serializers.SerializerMethodField()
def get_user_vote(self, obj):
context = getattr(self, 'context') |
python |
if __name__ == "__main__":
data = parseInput("input.txt")
s = data[0][:-1]
# s0 = 'dabAcCaCBAcCcaDA'
# s0 = '<KEY>'
# s0 = '<KEY>'
# s0 ='<KEY>'
# s = s0
s2 = elim3(s)
while len(s2) < len(s):
|
python | exec $GPHOME/bin/gp_primarymirror -h this-is-my-host -p 60002 -i /tmp/temporaryNamedFile0
sys.exit 0"""
)
driver.initOneHostConfiguration()
test_main( "syncNoWaitTest2", ["--mode", "sync", "--host", "this-is-my-host", "--role", "mirror", \
"-f", "resetFaultName", "-y", "status", "-c", "create table", "-d", "db1", \
"-t", "table10", "-o", "55", "-z", "11" \
], parserFn, commandFn, None, \ |
python | except:
continue
try:
word2count[word] = word2count[word] + count
except:
word2count[word] = count
for word in word2count.keys(): |
python | # Generated by Django 1.11 on 2018-06-29 09:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
|
python | if text:
if "card" in text.lower():
flag = True
if "credit" in text.lower():
flag = True |
python | class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False )
data_created = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '< Task %r >' % self.id |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.