blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23cb6c73db0e3711ff0ecbd0b6aa7165e94b3584
|
a01fb7bb8e8738a3170083d84bc3fcfd40e7e44f
|
/python3/module/pandas/df/sql/join.py
|
540fb2077f46a30f47e810c2b98ebc2c0a79da73
|
[] |
no_license
|
jk983294/CommonScript
|
f07acf603611b4691b176aa4a02791ef7d4d9370
|
774bcbbae9c146f37312c771c9e867fb93a0c452
|
refs/heads/master
| 2023-08-21T17:50:19.036159 | 2023-08-16T00:22:03 | 2023-08-16T00:22:03 | 42,732,160 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 739 |
py
|
import pandas as pd
import numpy as np
df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'], 'value': np.random.randn(4)})
df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'], 'value': np.random.randn(4)})
print(df1)
print(df2)
# SELECT * FROM df1 INNER JOIN df2 ON df1.key = df2.key;
print(pd.merge(df1, df2, on='key'))
# in case join key is different
print(pd.merge(df1, df2, left_on='key', right_on='key'))
# SELECT * FROM df1 LEFT OUTER JOIN df2 ON df1.key = df2.key;
print(pd.merge(df1, df2, on='key', how='left'))
# SELECT * FROM df1 RIGHT OUTER JOIN df2 ON df1.key = df2.key;
print(pd.merge(df1, df2, on='key', how='right'))
# SELECT * FROM df1 FULL OUTER JOIN df2 ON df1.key = df2.key;
print(pd.merge(df1, df2, on='key', how='outer'))
|
[
"[email protected]"
] | |
f46f3f29cb80c2826087623308da18f78f72a5fc
|
91f948b849a03f27c96aa6b76980a5fa68970b70
|
/experiments/__init__.py
|
de913a706b51dac74f50aafe9917d627f649419c
|
[
"MIT"
] |
permissive
|
satyam-cyc/MASS-Learning
|
3d987af7622f604db02b64313179590651285170
|
0d40de5227c94d1a5e4b18e44d16374e12821ad2
|
refs/heads/master
| 2022-01-10T02:23:06.670225 | 2019-06-11T19:41:35 | 2019-06-11T19:41:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 304 |
py
|
from .LogModelParameters import LogModelParameters
from .MASSLossTerms import MASSLossTerms
from .ModelLossAndAccuracy import ModelLossAndAccuracy
from .OODDetection import OODDetection
from .SaveModelParameters import SaveModelParameters
from .UncertaintyQuantification import UncertaintyQuantification
|
[
"[email protected]"
] | |
cfe436e359c52cb80c53b6b3d45d67431576f12c
|
16f173135e81215d05ee8f475c13a16e3796e1fa
|
/Deep_Learning_with_Keras_in_Python/3.Improving_Your_Model_Performance/Learning the digits.py
|
4219e773851dd4e8ea25cc68e96088e4bed25bb3
|
[] |
no_license
|
jerry-mkpong/DataCamp
|
1b53821f1a32b48efdc8465251401721ba75bb56
|
10445bad35ef11567910ffab6ac70a980555a1b7
|
refs/heads/master
| 2022-11-11T03:57:21.923366 | 2020-06-28T17:36:10 | 2020-06-28T17:36:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,060 |
py
|
'''
You're going to build a model on the digits dataset, a sample dataset that comes pre-loaded with scikit learn. The digits dataset consist of 8x8 pixel handwritten digits from 0 to 9:
You want to distinguish between each of the 10 possible digits given an image, so we are dealing with multi-class classification.
The dataset has already been partitioned into X_train, y_train, X_test, and y_test using 30% of the data as testing data. The labels are one-hot encoded vectors, so you don't need to use Keras to_categorical() function.
Let's build this new model!
'''
# Instantiate a Sequential model
model = Sequential()
# Input and hidden layer with input_shape, 16 neurons, and relu
model.add(Dense(16, input_shape = (64,), activation = 'relu'))
# Output layer with 10 neurons (one per digit) and softmax
model.add(Dense(10, activation='softmax'))
# Compile your model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Test if your model works and can process input data
print(model.predict(X_train))
|
[
"[email protected]"
] | |
cefea000be2b8713b9d4ea548c735c4984caf7de
|
3904a5773c5aa047692895dce1225be7d84f5cc7
|
/ML_AI_TechWithTim/K-Means/K_Means.py
|
f33bc323b87c4aba7ff873f2b6d3cbe38641d449
|
[] |
no_license
|
snehilk1312/ML_1
|
063038586296c4f6f0ab92422a6c60dd007c4068
|
8e3b081b1037ab999ca78fa282ce7041730d082a
|
refs/heads/master
| 2020-09-07T20:01:45.509060 | 2020-03-15T15:44:54 | 2020-03-15T15:44:54 | 220,898,676 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
# Importing modules
import numpy as np
import sklearn
from sklearn.preprocessing import scale
from sklearn.datasets import load_digits
from sklearn.cluster import KMeans
from sklearn import metrics
# Loading Data sets
digits = load_digits()
data = scale(digits.data)
y = digits.target
k = len(np.unique(y)) # or here k=10
samples, features = data.shape
def bench_k_means(estimator, name, data):
estimator.fit(data)
print('%-9s\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, estimator.inertia_,
metrics.homogeneity_score(y, estimator.labels_),
metrics.completeness_score(y, estimator.labels_),
metrics.v_measure_score(y, estimator.labels_),
metrics.adjusted_rand_score(y, estimator.labels_),
metrics.adjusted_mutual_info_score(y, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean')))
clf = KMeans(n_clusters=k, init="random", n_init=10)
bench_k_means(clf, "1", data)
|
[
"[email protected]"
] | |
00bb139bc7606403b576ce7cbadcf0745f8fc7fb
|
cc1eeb43eb9e4e83078f4c87e40a5c7fe56b109f
|
/Day05/shuixianhua.py
|
8cb8f1e3429d4bb2394b367a322d9a2886c2fb28
|
[] |
no_license
|
test-wsl/learn_100
|
d57ac4e8e7c062472273622351374decbae6d213
|
9fbb83455c15115b3cdec80d17c542e0aba2a6df
|
refs/heads/master
| 2020-08-29T22:43:10.800177 | 2019-11-04T08:17:38 | 2019-11-04T08:17:38 | 218,192,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 326 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
寻找水仙花数
水仙花数为一个三位数,每位上的立方之和正好等于本身
version: 0.1
"""
for num in range(100, 1000):
low = num %10
mid = num // 10 %10
high = num // 100
if num == low ** 3 + mid ** 3 + high **3:
print (num)
|
[
"weishl"
] |
weishl
|
e953daf74af26ba80d58f622e7985c62eaf4cadd
|
76de53bd3923a57a36d0ed4b4a900b56050ebb31
|
/SW Expert Academy/190926/1263_사람 네트워크2.py
|
61dbab0dcf1c40b17376a408ca7e36d21934b1bb
|
[] |
no_license
|
Seungjin22/Algorithm
|
5b4fd53ae5742d830594d116e536531959b3454d
|
753dda47334e445f7a9e1e41df5e44564d99e79e
|
refs/heads/master
| 2020-09-04T08:54:01.359518 | 2020-02-03T10:41:05 | 2020-02-03T10:41:05 | 219,697,780 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
import sys
sys.stdin = open('1263_input.txt')
def AllPairsShortest(D):
for k in range(N):
for i in range(N):
if i != k:
for j in range(N):
if j != k and j != i:
D[i][j] = min(D[i][k] + D[k][j], D[i][j])
T = int(input())
for tc in range(1, T + 1):
data = list(map(int, input().split()))
N = data.pop(0)
dist = [[987654321] * N for _ in range(N)]
idx = 0
for i in range(N):
for j in range(N):
if i == j:
dist[i][j] = 0
if data[idx]:
dist[i][j] = data[idx]
idx += 1
AllPairsShortest(dist)
mini = 987654321
for i in range(N):
if sum(dist[i]) < mini:
mini = sum(dist[i])
print('#{} {}'.format(tc, mini))
|
[
"[email protected]"
] | |
b4577f6dc2ca7a3c75449f92e21cad3aa1b6b5fe
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2814/60652/240209.py
|
19b8d713af73e09dfece90f18c9ba12646de0b4a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 162 |
py
|
n = int(input())
l=list(map(int,input().split()))
l.sort()
num_s=0
wait_time=0
for i in l:
if i>=wait_time:
num_s+=1
wait_time+=i
print(num_s)
|
[
"[email protected]"
] | |
8196a6d153f61f9ad7d3d169b3850fb382e2b167
|
6963f191a3574edcfaecc265a363bc10d4cdfc19
|
/osf/management/commands/osf_shell.py
|
11ed88684d009e3e84f839751c5ea9a4012a6410
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
rdm-dev12/RDM-osf.io
|
8f3c2f7057b17512921292e84578d24ad4ca2bb5
|
14d9a924b8c6bc7d79fd34b87830ffa29acafed1
|
refs/heads/timestamp-v18.2.7.file_upload_x_of_y
| 2022-12-09T06:23:43.320341 | 2019-02-27T07:39:12 | 2019-02-27T07:39:12 | 172,862,723 | 0 | 0 |
Apache-2.0
| 2022-09-16T17:58:51 | 2019-02-27T07:07:48 |
Python
|
UTF-8
|
Python
| false | false | 7,816 |
py
|
"""Enhanced python shell.
Includes all features from django-extension's shell_plus command plus OSF-specific
niceties.
By default, sessions run in a transaction, so changes won't be commited until
you execute `commit()`.
All models are imported by default, as well as common OSF and Django objects.
To add more objects, set the `OSF_SHELL_USER_IMPORTS` Django setting
to a dictionary or a callable that returns a dictionary.
Example: ::
from django.apps import apps
def get_user_imports():
User = apps.get_model('osf.OSFUser')
Node = apps.get_model('osf.AbstractNode')
me = User.objects.get(username='[email protected]')
node = Node.objects.first()
return {
'me': me,
'node': node,
}
OSF_SHELL_USER_IMPORTS = get_user_imports
"""
from django.conf import settings
from django.db import transaction
from django.utils.termcolors import colorize
from django.db.models import Model
from django_extensions.management.commands import shell_plus
from django_extensions.management.utils import signalcommand
def header(text):
return colorize(text, fg='green', opts=('bold', ))
def format_imported_objects(models, osf, transaction, other, user):
def format_dict(d):
return ', '.join(sorted(d.keys()))
ret = """
{models_header}
{models}
{osf_header}
{osf}
{transaction_header}
{transaction}
{other_header}
{other}""".format(
models_header=header('Models:'),
models=format_dict(models),
osf_header=header('OSF:'),
osf=format_dict(osf),
transaction_header=header('Transaction:'),
transaction=format_dict(transaction),
other_header=header('Django:'),
other=format_dict(other),
)
if user:
ret += '\n\n{user_header}\n{user}'.format(
user_header=header('User Imports:'),
user=format_dict(user)
)
return ret
# kwargs will be the grouped imports, e.g. {'models': {...}, 'osf': {...}}
def make_banner(auto_transact=True, **kwargs):
logo = """
.+yhhys/`
`smmmmmmmmd:
`--.` ommmmmmmmmmm. `.--.
`odmmmmmh/ smmmhhyhdmmm- :ymmmmmdo.
-dmmmmmmmmmy .hho+++++sdo smmmmmmmmmm:
smmmmmmmmmmm: `++++++++: -mmmmmmmmmmmy
+mmmmmmmmmmmo: :+++++++.:+mmmmmmmmmmmo
+dmmmmmmmds++. .://:-``++odmmmmmmmmo
`:osyhys+++/ :+++oyhyso/`
`/shddds/``.-::-. `-::-.``/shdddy/`
-dmmmmmds++++/. ./++++sdmmmmmd:
hmmmmmmo+++++++. .++++++++dmmmmmd`
hmmmmmmo+++++++. .++++++++dmmmmmd`
-dmmmmmds++++/. ./++++sdmmmmmd:
`/shddhs/``.-::-. `-::-.``/shdddy/`
`:osyhys+++/ :+++oyhyso/`
+dmmmmmmmds++. .://:- `++odmmmmmmmmo
+mmmmmmmmmmmo: /++++++/`:+mmmmmmmmmmmo
smmmmmmmmmmm: `++++++++. -mmmmmmmmmmmy
-dmmmmmmmmmy `s++++++y/ smmmmmmmmmm:
`odmmmmmh/ hmmhyyhdmm/ :ymmmmmds.
`--.` `mmmmmmmmmmo `.--.
/mmmmmmmmh`
`+shhyo:
"""
greeting = 'Welcome to the OSF Shell. Happy hacking!'
imported_objects = format_imported_objects(**kwargs)
transaction_warning = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes, run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag."""
no_transaction_warning = """
*** AUTO-TRANSACTION DISABLED ***
All changes will persist. Transactions must be handled manually."""
template = """{logo}
{greeting}
{imported_objects}
{warning}
"""
if auto_transact:
warning = colorize(transaction_warning, fg='yellow')
else:
warning = colorize(no_transaction_warning, fg='red')
return template.format(
logo=colorize(logo, fg='cyan'),
greeting=colorize(greeting, opts=('bold', )),
imported_objects=imported_objects,
warning=warning,
)
class Command(shell_plus.Command):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--no-transaction', action='store_false', dest='transaction',
help="Don't run session in transaction. Transactions must be "
'started manually with start_transaction()'
)
def get_osf_imports(self):
"""Return a dictionary of common OSF objects and utilities."""
from osf.management.utils import print_sql
from website import settings as website_settings
from framework.auth import Auth, get_user
ret = {
'print_sql': print_sql,
'Auth': Auth,
'get_user': get_user,
'website_settings': website_settings,
}
try: # faker isn't a prod requirement
from faker import Factory
except ImportError:
pass
else:
fake = Factory.create()
ret['fake'] = fake
return ret
def get_grouped_imports(self, options):
"""Return a dictionary of grouped import of the form:
{
'osf': {
'Auth': <framework.auth.Auth>,
....
}
'models': {...}
'transaction': {...}
'other': {...}
}
"""
auto_transact = options.get('transaction', True)
def start_transaction():
self.atomic.__enter__()
print('New transaction opened.')
def commit():
self.atomic.__exit__(None, None, None)
print('Transaction committed.')
if auto_transact:
start_transaction()
def rollback():
exc_type = RuntimeError
exc_value = exc_type('Transaction rollback')
self.atomic.__exit__(exc_type, exc_value, None)
print('Transaction rolled back.')
if auto_transact:
start_transaction()
groups = {
'models': {},
'other': {},
'osf': self.get_osf_imports(),
'transaction': {
'start_transaction': start_transaction,
'commit': commit,
'rollback': rollback,
},
'user': self.get_user_imports(),
}
# Import models and common django imports
shell_plus_imports = shell_plus.Command.get_imported_objects(self, options)
for name, object in shell_plus_imports.items():
if isinstance(object, type) and issubclass(object, Model):
groups['models'][name] = object
else:
groups['other'][name] = object
return groups
def get_user_imports(self):
imports = getattr(settings, 'OSF_SHELL_USER_IMPORTS', None)
if imports:
if callable(imports):
imports = imports()
return imports
else:
return {}
# Override shell_plus.Command
def get_imported_objects(self, options):
# Merge all the values of grouped_imports
imported_objects = {}
for imports in self.grouped_imports.values():
imported_objects.update(imports)
return imported_objects
# Override shell_plus.Command
@signalcommand
def handle(self, *args, **options):
self.atomic = transaction.atomic()
auto_transact = options.get('transaction', True)
options['quiet_load'] = True # Don't show default shell_plus banner
self.grouped_imports = self.get_grouped_imports(options)
banner = make_banner(auto_transact=auto_transact, **self.grouped_imports)
print(banner)
if auto_transact:
self.atomic.__enter__()
super(Command, self).handle(*args, **options)
|
[
"[email protected]"
] | |
4b07d1427059017a5efe9aaa2f4d709d14931aa8
|
d4ea1f9747799bf503523b86b8b5ee29bab65eff
|
/gyun/cli/iaas_client/actions/s2/modify_s2_shared_target_attributes.py
|
a1ea167b28dd9c16607678be1997a591d7b7c26d
|
[
"Apache-2.0"
] |
permissive
|
gyun-gome/gyun-cli
|
88b5493d90a19c5bf56a1bba4bf301d1b4a3156d
|
275b6664335e2ef21a01a48f8c06d6a89dd63467
|
refs/heads/master
| 2021-06-28T13:53:01.300135 | 2017-09-13T04:44:01 | 2017-09-13T04:44:01 | 103,353,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,206 |
py
|
# encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from gyun.cli.misc.utils import explode_array
from gyun.cli.iaas_client.actions.base import BaseAction
class ModifyS2SharedTargetAttributesAction(BaseAction):
action = 'ModifyS2SharedTargetAttributes'
command = 'modify-s2-shared-target-attributes'
usage = '%(prog)s -s <shared_target> -o <operation> [-p <parameters> ...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-s", "--shared-target", dest="shared_target",
action="store", type=str, default=None,
help="the ID of shared target.")
parser.add_argument("-o", "--operation", dest="operation",
action="store", type=str, default=None,
help="valid values includes add, modify, delete, set.")
parser.add_argument("-p", "--parameters", dest="parameters",
action="store", type=str, default=None,
help="please refer http://docs.qc.gyun.com/api/s2/describle_s2_default_parameters.html")
parser.add_argument("-i", "--initiator-names", dest="initiator_names",
action="store", type=str, default=None,
help="client IQN.")
parser.add_argument("-S", "--s2-group", dest="s2_group",
action="store", type=str, default=None,
help="the ID of permission group.")
parser.add_argument("-n", "--export-name", dest="export_name",
action="store", type=str, default=None,
help="the name of shared target, available in vnas.")
@classmethod
def build_directive(cls, options):
for key in ['shared_target', 'operation']:
if not hasattr(options, key):
print("error: [%s] should be specified." % key)
return None
directive = {
"shared_target": options.shared_target,
"operation": options.operation,
"parameters": explode_array(options.parameters),
"initiator_names": explode_array(options.initiator_names),
"s2_group": options.s2_group,
"export_name": options.export_name,
}
return directive
|
[
"[email protected]"
] | |
f686c14d3f3ccf88ac38fcd8a34d6d9f001befd4
|
3c0f50b6563e2c9c6306f7ca2216ff46c8250b96
|
/address/migrations/0003_usuario.py
|
b33eb28800063dbfeb0a7fb4e8513ef46fb1f55c
|
[] |
no_license
|
JoamirS/project-curriculo
|
895e72b34a8a51478c3fe5958d509bfa89be761e
|
490ed533dae740a7d2e1b652ce36fdb2af294eb3
|
refs/heads/master
| 2020-06-01T19:50:42.019259 | 2019-06-22T22:16:18 | 2019-06-22T22:16:18 | 190,904,296 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 576 |
py
|
# Generated by Django 2.2.2 on 2019-06-12 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('address', '0002_auto_20190612_1127'),
]
operations = [
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('senha', models.CharField(max_length=30)),
],
),
]
|
[
"[email protected]"
] | |
bf604c5c88de4b1652ed6c32594f61c0e84a082f
|
b6a59c78b4143441077f9ce81c9a6951687f9103
|
/quiz/common/templatetags/common_tags.py
|
c22c495f3760396d2cbf01c3943b9cb2026abee6
|
[] |
no_license
|
EkaterinaEIvanova/quiz
|
7389bd26eb891ba5a7033b91698321cbba7d2d7d
|
6f93a5d6e604f127be0d29e8eebbb07c10eb9d47
|
refs/heads/master
| 2023-03-22T00:54:27.100204 | 2021-03-10T07:35:08 | 2021-03-10T07:35:08 | 346,270,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 179 |
py
|
from django import template
register = template.Library()
@register.simple_tag()
def get_name_or_email(user):
name = user.name if user.name else user.email
return name
|
[
"[email protected]"
] | |
cff93f064b230f06153b1a99ce69e4f99f7623ed
|
82f5a3e139034da846db0c3516848e3a797a52f0
|
/sixteen.py
|
16eeac1b1be9cf18646774eb3f75c61a77c5b307
|
[] |
no_license
|
Yanl05/LeetCode-git
|
d98f807d05d80b7df6c0a4f69cf233e25b0695b5
|
ce617247645517f15d513c29e12c7fff33e1cccf
|
refs/heads/master
| 2020-04-12T08:33:30.416069 | 2018-12-19T06:07:36 | 2018-12-19T06:07:36 | 162,388,594 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,272 |
py
|
'''
给定一个包括 n 个整数的数组 nums 和 一个目标值 target。
找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和。假定每组输入只存在唯一答案。
例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.
与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).
'''
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
print(nums)
difmin = 9999999
ans = 0
lenn = len(nums)
for i in range(lenn - 2):
left = i + 1
right = lenn - 1
while left < right:
count = nums[i] + nums[left] + nums[right] -target
if count == 0:
return target
else:
dif = abs(count)
if dif <= difmin:
ans = count + target
difmin = dif
if count + target < target:
left += 1
else:
right -= 1
return ans
print(Solution().threeSumClosest([-1,2,1,-4], 1))
|
[
"[email protected]"
] | |
d4877edd8d5a2d480e47bd50800d5ab3ebf850c1
|
411e5de8629d6449ff9aad2eeb8bb1dbd5977768
|
/AlgoExpert/array/largestRange/1.py
|
821c57749e45fe5a0fdc1c4e4747e82157bdece3
|
[
"MIT"
] |
permissive
|
Muzque/Leetcode
|
cd22a8f5a17d9bdad48f8e2e4dba84051e2fb92b
|
2c37b4426b7e8bfc1cd2a807240b0afab2051d03
|
refs/heads/master
| 2022-06-01T20:40:28.019107 | 2022-04-01T15:38:16 | 2022-04-01T15:39:24 | 129,880,002 | 1 | 1 |
MIT
| 2022-04-01T15:39:25 | 2018-04-17T09:28:02 |
Python
|
UTF-8
|
Python
| false | false | 395 |
py
|
def largestRange(array):
array.sort()
h = len(array)
mid = int(h/2) if h % 2 == 0 else int(h/2)+1
left, right = array[0], array[h-1]
for i in range(1, mid):
j = h - i - 1
if array[i] not in (array[i-1], array[i - 1]+1):
left = array[i]
if array[j] not in (array[j+1], array[j + 1]-1):
right = array[j]
return [left, right]
|
[
"[email protected]"
] | |
df35d4e2bc4e83da4aa1b6939db8d9e229e0bd70
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/type_agency_profile_level_4.py
|
5ee1ce876f63b649381647bc034c48d77dea4ecb
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 |
Python
|
UTF-8
|
Python
| false | false | 318 |
py
|
from __future__ import annotations
from enum import Enum
__NAMESPACE__ = "http://www.travelport.com/schema/common_v37_0"
class TypeAgencyProfileLevel4(Enum):
"""
Profile levels in the Agency Hierarchy.
"""
AGENCY = "Agency"
BRANCH = "Branch"
BRANCH_GROUP = "BranchGroup"
AGENT = "Agent"
|
[
"[email protected]"
] | |
6ffac5ea208ba2d6e273b1fdd1775d31f9762364
|
9eab77cb998e94ceb2b2d08738b05a98982505f1
|
/sentiment-analysis/pythoncodes/01-text-to-id.py
|
16b8e56535efcf07addf12250c40f7bd8382a0a7
|
[] |
no_license
|
behrouzmadahian/python
|
1584dd13cde8531e69bb6fab76f148dc3fc0da57
|
5d4dbde8d570623fe785e78a3e45cd05ea80aa08
|
refs/heads/master
| 2021-06-28T16:53:09.927450 | 2020-09-21T14:02:55 | 2020-09-21T14:02:55 | 156,713,696 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 827 |
py
|
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
stop_words = stopwords.words('english')
def remove_stop_words(word_list, stopwords):
filtered_list = [w for w in word_list if not w in stopwords]
return filtered_list
# Removes punctuation, parentheses, question marks, etc., and leaves only alphanumeric characters
def clean_sentences(string):
strip_special_chars = re.compile("[^a-zA-Z0-9_]+")
string = string.lower().replace("<br />", " ")
return re.sub(strip_special_chars, "", string.lower())
def text_to_ids(text, vocab_list):
text_cleaned = clean_sentences(text)
word_list = word_tokenize(text_cleaned)
word_list = remove_stop_words(word_list, stop_words)
word_inds = [vocab_list.index(w) for w in word_list]
return word_inds
|
[
"[email protected]"
] | |
225ceeb7e8183ff4fe55fd640c53ec2f3624a6c8
|
2bcc421ee345b00cf805c543b37d18b5d019dc04
|
/adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/matrixportal_simpletest.py
|
6acecd14cfce626edae83fa7e034c7cadbe1bf85
|
[] |
no_license
|
saewoonam/sc-current-source-titano
|
5a1ad46889c1b09c168424901fd71cb4eab5c61b
|
1c136aa8b61268d9ac0b5a682b30ece70ab87663
|
refs/heads/main
| 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,528 |
py
|
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
This example checks the current Bitcoin price and displays it in the middle of the screen
"""
import time
import board
import terminalio
from adafruit_matrixportal.matrixportal import MatrixPortal
# You can display in 'GBP', 'EUR' or 'USD'
CURRENCY = "USD"
# Set up where we'll be fetching data from
DATA_SOURCE = "https://api.coindesk.com/v1/bpi/currentprice.json"
DATA_LOCATION = ["bpi", CURRENCY, "rate_float"]
def text_transform(val):
if CURRENCY == "USD":
return "$%d" % val
if CURRENCY == "EUR":
return "€%d" % val
if CURRENCY == "GBP":
return "£%d" % val
return "%d" % val
# the current working directory (where this file is)
cwd = ("/" + __file__).rsplit("/", 1)[0]
matrixportal = MatrixPortal(
url=DATA_SOURCE, json_path=DATA_LOCATION, status_neopixel=board.NEOPIXEL,
)
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(16, 16),
text_color=0xFFFFFF,
text_transform=text_transform,
)
matrixportal.preload_font(b"$012345789") # preload numbers
matrixportal.preload_font((0x00A3, 0x20AC)) # preload gbp/euro symbol
while True:
try:
value = matrixportal.fetch()
print("Response is", value)
except (ValueError, RuntimeError) as e:
print("Some error occured, retrying! -", e)
time.sleep(3 * 60) # wait 3 minutes
|
[
"[email protected]"
] | |
6ba923e2897f1f34a8d9fefb279295ca6d447ad8
|
3c88b31090d6568435e811a455ce934604fa5c9f
|
/category/migrations/0004_auto_20210604_1515.py
|
1598b851990f23fb79ef630c2f5f22ca87780b4e
|
[] |
no_license
|
kamran1231/Great-Cart-Django-Website
|
09e0e7b5085737cf54614b45b5424ac5c273bb5b
|
a674593d5c8cb15be7b24dca397f9027659033e2
|
refs/heads/main
| 2023-05-17T08:52:31.092404 | 2021-06-09T20:21:08 | 2021-06-09T20:21:08 | 373,645,947 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
# Generated by Django 3.1.7 on 2021-06-04 09:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('category', '0003_alter_category_category_name'),
]
operations = [
migrations.AlterField(
model_name='category',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"[email protected]"
] | |
d8985677a59029b17e03e42069b38812e14ecf8d
|
743d58c35caf21568feddc86946bbee340174721
|
/leet_code/labuladong/stack/p0739_Daily_Temperatures.py
|
0d1b05a124037d6c13f214f35b0a1ee5f41b145f
|
[] |
no_license
|
klgentle/lc_python
|
38009ed82614c8f21ca9af6e3779a2e0898af09f
|
aabe56e690b40e4b93afef99bfe46d9a06e20cea
|
refs/heads/master
| 2022-12-02T05:23:55.585659 | 2022-08-07T12:11:38 | 2022-08-07T12:11:38 | 176,750,473 | 2 | 0 | null | 2022-11-15T23:42:06 | 2019-03-20T14:21:51 |
Python
|
UTF-8
|
Python
| false | false | 926 |
py
|
"""
739. Daily Temperatures
Medium
Given a list of daily temperatures T, return a list such that, for each day in the input, tells you how many days you would have to wait until a warmer temperature. If there is no future day for which this is possible, put 0 instead.
For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].
Note: The length of temperatures will be in the range [1, 30000]. Each temperature will be an integer in the range [30, 100].
"""
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
res = [0] * len(T)
stack = []
for i in range(len(T)-1, -1, -1):
while stack and T[stack[-1]] <= T[i]:
stack.pop()
if stack:
res[i] = stack[-1] - i
stack.append(i)
#print(f"stack:{stack}")
return res
|
[
"[email protected]"
] | |
ffd932dbd780505eb4bef606f414e3d7a4c848cc
|
fa93e53a9eee6cb476b8998d62067fce2fbcea13
|
/build/position_controllers/catkin_generated/pkg.installspace.context.pc.py
|
23b00e2290c58c2e5784fc5a4572705354fb4fd1
|
[] |
no_license
|
oyetripathi/ROS_conclusion_project
|
2947ee2f575ddf05480dabc69cf8af3c2df53f73
|
01e71350437d57d8112b6cec298f89fc8291fb5f
|
refs/heads/master
| 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_interface;forward_command_controller".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lposition_controllers".split(';') if "-lposition_controllers" != "" else []
PROJECT_NAME = "position_controllers"
PROJECT_SPACE_DIR = "/home/sandeepan/tiago_public_ws/install"
PROJECT_VERSION = "0.4.2"
|
[
"[email protected]"
] | |
430b886607c68f95ee1443b58e22c10b06ca0c36
|
b2135e3fc77666f043f0fbafd0d88ed9865d5b4f
|
/7183 Python Basics/32 Chapter 6 - About Properties/07 test_validation3/78794_01_code.py
|
2f8efd74a7afa4db194872d5c8b652ef492fbd27
|
[] |
no_license
|
Felienne/spea
|
164d05e9fbba82c7b7df8d00295f7157054f9248
|
ecb06c66aaf6a2dced3f141ca415be9efb7dbff5
|
refs/heads/master
| 2020-03-17T17:35:27.302219 | 2018-05-17T10:14:49 | 2018-05-17T10:14:49 | 133,794,299 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 568 |
py
|
#
class AboutAssignments(unittest.TestCase):
class Doctor:
def __init__(self):
self._age = 903
@property
def age(self):
return self._age
@age.setter
def age(self, value):
if value < self.age:
pass
# nice try! you can't get any younger
else:
self._age = value
def test_validation3(self):
jodie = self.Doctor()
self.assertEqual(903, jodie.age)
jodie.age += 9
self.assertEqual(__, jodie.age)
|
[
"[email protected]"
] | |
7b14e461e9ba7105b24ef8d77b490e8ec0419f57
|
c0239d75a8199ec84ad683f945c21785c1b59386
|
/dingtalk/api/rest/OapiChatTagDeleteRequest.py
|
2292ed627d4873421afe37fd82864be50c362d9b
|
[] |
no_license
|
luss613/oauth_dingtalk
|
9f253a75ce914c577dbabfb84e97fd883e80e04b
|
1e2554642d2b16c642a031670d08efa4a74e8252
|
refs/heads/master
| 2023-04-23T01:16:33.450821 | 2020-06-18T08:22:57 | 2020-06-18T08:22:57 | 264,966,287 | 1 | 1 | null | 2020-06-18T08:31:24 | 2020-05-18T14:33:25 |
Python
|
UTF-8
|
Python
| false | false | 348 |
py
|
'''
Created by auto_sdk on 2019.10.31
'''
from dingtalk.api.base import RestApi
class OapiChatTagDeleteRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.chatid = None
self.group_tag = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.chat.tag.delete'
|
[
"[email protected]"
] | |
187ba8799480652d89c93f0faa7a2c97b7f99b6a
|
d61f7eda203a336868c010abb8f9a6f45dd51adb
|
/497. Random Point in Non-overlapping Rectangles.py
|
01542c98bf043ff665c52427319b5c46b11bdf49
|
[] |
no_license
|
Mschikay/leetcode
|
b91df914afc728c2ae1a13d3994568bb6c1dcffb
|
7c5e5fe76cee542f67cd7dd3a389470b02597548
|
refs/heads/master
| 2020-04-17T12:11:38.810325 | 2019-10-06T02:37:32 | 2019-10-06T02:37:32 | 166,570,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 761 |
py
|
class Solution:
def __init__(self, rects: List[List[int]]):
self.rects = rects
self.prefix = [0]
for x1, y1, x2, y2 in rects:
self.prefix.append((x2 - x1 + 1) * (y2 - y1 + 1) + self.prefix[-1])
def pick(self) -> List[int]:
num = random.randint(0, self.prefix[-1])
l, h = 0, len(self.prefix) - 1
while l <= h:
m = (l + h) // 2
if self.prefix[m] < num:
l = m + 1
else:
h = m - 1
x1, y1, x2, y2 = self.rects[l - 1]
x = random.randint(x1, x2)
y = random.randint(y1, y2)
return [x, y]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
|
[
"[email protected]"
] | |
3d6198b0abdc87164e432fd09c0390ecba72de19
|
de1abd0ebbb817aa5f23d369e7dda360fd6f1c32
|
/chapter8/7-NltkAnalysis.py
|
486c02f2c7559694ee722504c06720e50861ed6a
|
[] |
no_license
|
CodedQuen/Web-Scraping-with-Python-
|
33aaa2e3733aa1f2b8c7a533d74f5d08ac868197
|
67f2d5f57726d5a943f5f044480e68c36076965b
|
refs/heads/master
| 2022-06-13T01:34:39.764531 | 2020-05-05T11:07:01 | 2020-05-05T11:07:01 | 261,435,932 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
from nltk import word_tokenize, sent_tokenize, pos_tag
sentences = sent_tokenize("Google is one of the best companies in the world. I constantly google myself to see what I'm up to.")
nouns = ['NN', 'NNS', 'NNP', 'NNPS']
for sentence in sentences:
if "google" in sentence.lower():
taggedWords = pos_tag(word_tokenize(sentence))
for word in taggedWords:
if word[0].lower() == "google" and word[1] in nouns:
print(sentence)
|
[
"[email protected]"
] | |
7b44412ce11d8c6c342152422abcba093327737b
|
3a48cfb0b43fe61f52355a67b2b5700aa8c5ddf2
|
/src/som/interpreter/ast/nodes/message/generic_node.py
|
5cfc38a7257dfdd24617ab9116a1996177084454
|
[
"MIT"
] |
permissive
|
SOM-st/RTruffleSOM
|
ce380d02985b0ef1f41f400409f61377dc3a583e
|
1efc698577830ff3fcd1607e7155d9c6423e8804
|
refs/heads/master
| 2021-01-17T07:25:19.895376 | 2020-12-08T18:56:50 | 2020-12-08T18:56:50 | 17,311,290 | 9 | 2 |
MIT
| 2020-09-02T16:08:31 | 2014-03-01T08:45:25 |
Python
|
UTF-8
|
Python
| false | false | 2,256 |
py
|
from rpython.rlib.debug import make_sure_not_resized
from rpython.rlib.jit import we_are_jitted
from ..dispatch import SuperDispatchNode, UninitializedDispatchNode, send_does_not_understand
from .abstract_node import AbstractMessageNode
class GenericMessageNode(AbstractMessageNode):
_immutable_fields_ = ['_dispatch?']
_child_nodes_ = ['_dispatch']
def __init__(self, selector, universe, rcvr_expr, arg_exprs,
source_section = None):
AbstractMessageNode.__init__(self, selector, universe, rcvr_expr,
arg_exprs, source_section)
if rcvr_expr.is_super_node():
dispatch = SuperDispatchNode(selector, rcvr_expr.get_super_class(),
universe)
else:
dispatch = UninitializedDispatchNode(selector, universe)
self._dispatch = self.adopt_child(dispatch)
def replace_dispatch_list_head(self, node):
self._dispatch.replace(node)
def execute(self, frame):
rcvr, args = self._evaluate_rcvr_and_args(frame)
return self.execute_evaluated(frame, rcvr, args)
def execute_evaluated(self, frame, rcvr, args):
assert frame is not None
assert rcvr is not None
assert args is not None
make_sure_not_resized(args)
if we_are_jitted():
return self._direct_dispatch(rcvr, args)
else:
return self._dispatch.execute_dispatch(rcvr, args)
def _direct_dispatch(self, rcvr, args):
method = self._lookup_method(rcvr)
if method:
return method.invoke(rcvr, args)
else:
return send_does_not_understand(rcvr, self._selector, args, self._universe)
def _lookup_method(self, rcvr):
rcvr_class = self._class_of_receiver(rcvr)
return rcvr_class.lookup_invokable(self._selector)
def _class_of_receiver(self, rcvr):
if self._rcvr_expr.is_super_node():
return self._rcvr_expr.get_super_class()
return rcvr.get_class(self._universe)
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
self._selector,
self._source_section)
|
[
"[email protected]"
] | |
e1f076a8b40ac225debbdfe4c6812b58dabf08a9
|
ef74d9ad851021bcb0ed12880e14269b6ed7f617
|
/Sample/Doudizhu/Server/src/ZyGames.Doudizhu.HostServer/PyScript/Action/Action12001.py
|
7d60a50f0c7331ca1c254a61ca9b33c5de93279d
|
[
"BSD-2-Clause-Views",
"MIT"
] |
permissive
|
sunyuping/Scut
|
b5e5798e9b519941f0ac3a08a3263dc0f45beb47
|
ec2ea35c0e4de1f2da49c50d14e119a4f17cd93a
|
refs/heads/master
| 2020-12-25T23:19:26.597830 | 2013-11-16T07:50:01 | 2013-11-16T07:50:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,188 |
py
|
import clr, sys
from action import *
from System import *
from mathUtils import MathUtils
clr.AddReference('ZyGames.Framework');
clr.AddReference('ZyGames.Framework.Common');
clr.AddReference('ZyGames.Framework.Game');
clr.AddReference('ZyGames.Doudizhu.Bll');
clr.AddReference('ZyGames.Doudizhu.Model');
clr.AddReference('ZyGames.Doudizhu.Lang');
from System.Collections.Generic import *
from ZyGames.Framework.SyncThreading import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Com.Rank import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Doudizhu.Bll import *
from ZyGames.Doudizhu.Bll.Logic import *
from ZyGames.Doudizhu.Bll.Com.Chat import *
from ZyGames.Doudizhu.Lang import *
from ZyGames.Doudizhu.Model import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Game.Runtime import *
from ZyGames.Framework.Cache import *
#12001_转盘界面接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.IsFree = 0
self.FreeNum = 0
self.DailList = List[DialInfo]
self.UserCoin = 0
self.UserGold = 0
def getUrlElement(httpGet, parent):
urlParam = UrlParam()
if True:
urlParam.Result = True
else:
urlParam.Result = False
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
userId = parent.Current.User.PersonalId;
user = parent.Current.User
gameRoom = GameRoom.Current
dailyFreeNum = ConfigEnvSet.GetInt("User.DailyFreeNum", 3);
useNum = 0
userRestrain = GameDataCacheSet[UserDailyRestrain]().FindKey(userId)
if userRestrain!=None:
gameRoom.RefreshRestrain(userRestrain)
if userRestrain.RestrainProperty!= None:
useNum = userRestrain.RestrainProperty.DialFreeNum
if dailyFreeNum > useNum:
actionResult.FreeNum = MathUtils.Subtraction(dailyFreeNum,useNum)
else:
actionResult.IsFree = 1;
actionResult.DailList = ConfigCacheSet[DialInfo]().FindAll();
actionResult.UserCoin = user.GameCoin
gameHall = GameHall(user)
actionResult.UserGold = gameHall.UserGold
#需要实现
return actionResult
def buildPacket(writer, urlParam, actionResult):
postion = 0
writer.PushShortIntoStack(actionResult.IsFree)
writer.PushIntoStack(actionResult.FreeNum)
writer.PushIntoStack(len(actionResult.DailList))
for info in actionResult.DailList:
postion = MathUtils.Addition(postion, 1);
Probability = PythonHelper.TransformString(info.Probability)
dsItem = DataStruct()
dsItem.PushIntoStack(postion)
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.HeadID))
dsItem.PushIntoStack(MathUtils.ToNotNullString(Probability))
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.ItemDesc))
dsItem.PushIntoStack(info.GameCoin)
writer.PushIntoStack(dsItem)
writer.PushIntoStack(actionResult.UserCoin)
writer.PushIntoStack(actionResult.UserGold)
return True
|
[
"[email protected]"
] | |
33984d775374f698a16233b294ee3e505d447c22
|
75519d2a9bf55e2d9376ea08a36676948a8b232c
|
/ui/uikits/TextSteam.py
|
222dfb2dcd7959a0cc728b523b9bf881ec8afbf0
|
[
"MIT"
] |
permissive
|
CGFanTuan/damgteam
|
9c32d59cbd0ecb9d3acffd9b902b918c40797e14
|
aec414f084f6ab6ec5897314390605aaa8380d62
|
refs/heads/master
| 2020-09-17T00:29:24.832648 | 2019-11-25T09:51:13 | 2019-11-25T09:51:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,118 |
py
|
# -*- coding: utf-8 -*-
"""
Script Name: TextSteam.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
from __future__ import absolute_import, unicode_literals
from PyQt5.QtCore import QTextStream
from appData import __copyright__
class TextStream(QTextStream):
Type = 'DAMGSTREAM'
key = 'TextStream'
_name = 'DAMG Text Stream'
_copyright = __copyright__
@property
def copyright(self):
return self._copyright
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = newName
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 15/11/2019 - 5:43 PM
# © 2017 - 2018 DAMGteam. All rights reserved
|
[
"[email protected]"
] | |
5bc9f7cb725e608b584db5bb260968104795a451
|
8aefdf04c115c6c6ab64997576ced97d4727dd06
|
/curation-api/src/users/migrations/0003_auto_20170809_0921.py
|
b1d063c42c10db300647e9e67f63a3b2095bfcd5
|
[] |
no_license
|
mohanj1919/django_app_test
|
a0d47bc98c604d81253c74488dcdbc2ccd039863
|
5d5bc4c1eecbf627d38260e4d314d8451d67a4f5
|
refs/heads/master
| 2021-05-08T06:01:21.712986 | 2017-10-11T12:12:07 | 2017-10-11T12:12:07 | 106,544,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 485 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-09 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170703_1555'),
]
operations = [
migrations.AlterField(
model_name='curationuser',
name='phone_number',
field=models.CharField(max_length=15, null=True, unique=True),
),
]
|
[
"[email protected]"
] | |
59919a9d9900991467fcaabb4cc8e2acaff0e9e0
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/aphotomanager/testcase/firstcases/testcase5_028.py
|
6856a16cc6fb6a518aa1c467766e72d1e3596a1c
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,391 |
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.k3b.android.androFotoFinder',
'appActivity' : 'de.k3b.android.androFotoFinder.FotoGalleryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.k3b.android.androFotoFinder/de.k3b.android.androFotoFinder.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase028
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Show in new gallery\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"/storage/sdcard/Pictures/Wikipedia/Michael Mosman District Judge.jpg\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"/storage/sdcard/pic4.jpg\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/action_edit\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/menu_item_share\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_028\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.k3b.android.androFotoFinder'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"[email protected]"
] | |
b15a144177a3426684ef389cecaaf365fc24dcb7
|
f54070cd3048a3645cb25f301592a904d387a1c9
|
/python_prgrams/testpython/file7.py
|
d8e98c05bbd14af3e9bf261e2d23c7dc207b2a22
|
[] |
no_license
|
mak705/Python_interview
|
02bded60417f1e6e2d81e1f6cde6961d95da2a8e
|
aff2d6018fd539dbcde9e3a6b3f8a69167ffca0d
|
refs/heads/master
| 2020-03-22T21:03:34.018919 | 2019-11-15T08:51:34 | 2019-11-15T08:51:34 | 140,653,056 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
##counting lines in a file
fhand = open('mul.py')
for line in fhand:
line = line.rstrip()
if not line.startswith('#From'):
print line
|
[
"[email protected]"
] | |
fe616439df2cf983c744ea323919525c2e94cbb2
|
814fd0bea5bc063a4e34ebdd0a5597c9ff67532b
|
/chrome/common/extensions/docs/server2/refresh_tracker_test.py
|
f1f596f1afefe93317d8fa365571a158aa4abe97
|
[
"BSD-3-Clause"
] |
permissive
|
rzr/chromium-crosswalk
|
1b22208ff556d69c009ad292bc17dca3fe15c493
|
d391344809adf7b4f39764ac0e15c378169b805f
|
refs/heads/master
| 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 |
NOASSERTION
| 2019-08-07T21:59:20 | 2015-07-10T15:35:50 |
C++
|
UTF-8
|
Python
| false | false | 1,941 |
py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from object_store_creator import ObjectStoreCreator
from refresh_tracker import RefreshTracker
class RefreshTrackerTest(unittest.TestCase):
def setUp(self):
self._refresh_tracker = RefreshTracker(ObjectStoreCreator.ForTest())
def testNonExistentRefreshIsIncomplete(self):
self.assertFalse(self._refresh_tracker.GetRefreshComplete('unicorns').Get())
def testEmptyRefreshIsComplete(self):
refresh_id = 'abcdefghijklmnopqrstuvwxyz'
self._refresh_tracker.StartRefresh(refresh_id, []).Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
def testRefreshCompletion(self):
refresh_id = 'this is fun'
self._refresh_tracker.StartRefresh(refresh_id, ['/do/foo', '/do/bar']).Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, '/do/foo').Get()
self.assertFalse(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
self._refresh_tracker.MarkTaskComplete(refresh_id, '/do/bar').Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
def testUnknownTasksAreIrrelevant(self):
refresh_id = 'i am a banana'
self._refresh_tracker.StartRefresh(refresh_id, ['a', 'b', 'c', 'd']).Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'a').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'b').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'c').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'q').Get()
self.assertFalse(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
self._refresh_tracker.MarkTaskComplete(refresh_id, 'd').Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a13bd7f9da7ea032c84dac021788da7cb8446ba9
|
ac2c3e8c278d0aac250d31fd023c645fa3984a1b
|
/saleor/saleor/wishlist/error_codes.py
|
5f77c477ea3948543085f5817a1d759cf6bc6e85
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
jonndoe/saleor-test-shop
|
152bc8bef615382a45ca5f4f86f3527398bd1ef9
|
1e83176684f418a96260c276f6a0d72adf7dcbe6
|
refs/heads/master
| 2023-01-21T16:54:36.372313 | 2020-12-02T10:19:13 | 2020-12-02T10:19:13 | 316,514,489 | 1 | 1 |
BSD-3-Clause
| 2020-11-27T23:29:20 | 2020-11-27T13:52:33 |
TypeScript
|
UTF-8
|
Python
| false | false | 196 |
py
|
from enum import Enum
class WishlistErrorCode(str, Enum):
GRAPHQL_ERROR = "graphql_error"
INVALID = "invalid"
NOT_FOUND = "not_found"
REQUIRED = "required"
UNIQUE = "unique"
|
[
"[email protected]"
] | |
b9a387605d577d71f54a61961bb4e49480104471
|
0180b1a8e19c0a02e7c00ebe1a58e17347ad1996
|
/BCR2000/consts.py
|
a1a23805ec9ecae2ff31a2bf1a642c416c9ebe69
|
[] |
no_license
|
cce/buttons
|
e486af364c6032b4be75ab9de26f42b8d882c5b0
|
7d4936c91df99f4c6e08f7e347de64361c75e652
|
refs/heads/master
| 2021-01-17T06:56:55.859306 | 2014-12-22T05:03:00 | 2015-11-25T03:42:28 | 46,657,841 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,666 |
py
|
# Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/BCR2000/consts.py
""" The following consts should be substituted with the Sys Ex messages for requesting
a controller's ID response and that response to allow for automatic lookup"""
ID_REQUEST = 0
ID_RESP = 0
GENERIC_STOP = 105
GENERIC_PLAY = 106
GENERIC_REC = 107
GENERIC_LOOP = 108
GENERIC_RWD = -1
GENERIC_FFWD = -1
GENERIC_TRANSPORT = (GENERIC_STOP,
GENERIC_PLAY,
GENERIC_REC,
GENERIC_LOOP,
GENERIC_RWD,
GENERIC_FFWD)
GENERIC_ENC1 = 1
GENERIC_ENC2 = 2
GENERIC_ENC3 = 3
GENERIC_ENC4 = 4
GENERIC_ENC5 = 5
GENERIC_ENC6 = 6
GENERIC_ENC7 = 7
GENERIC_ENC8 = 8
GENERIC_ENCODERS = (GENERIC_ENC1,
GENERIC_ENC2,
GENERIC_ENC3,
GENERIC_ENC4,
GENERIC_ENC5,
GENERIC_ENC6,
GENERIC_ENC7,
GENERIC_ENC8)
GENERIC_SLI1 = 81
GENERIC_SLI2 = 82
GENERIC_SLI3 = 83
GENERIC_SLI4 = 84
GENERIC_SLI5 = 85
GENERIC_SLI6 = 86
GENERIC_SLI7 = 87
GENERIC_SLI8 = 88
GENERIC_SLIDERS = (GENERIC_SLI1,
GENERIC_SLI2,
GENERIC_SLI3,
GENERIC_SLI4,
GENERIC_SLI5,
GENERIC_SLI6,
GENERIC_SLI7,
GENERIC_SLI8)
GENERIC_BUT1 = 73
GENERIC_BUT2 = 74
GENERIC_BUT3 = 75
GENERIC_BUT4 = 76
GENERIC_BUT5 = 77
GENERIC_BUT6 = 78
GENERIC_BUT7 = 79
GENERIC_BUT8 = 80
GENERIC_BUT9 = -1
GENERIC_BUTTONS = (GENERIC_BUT1,
GENERIC_BUT2,
GENERIC_BUT3,
GENERIC_BUT4,
GENERIC_BUT5,
GENERIC_BUT6,
GENERIC_BUT7,
GENERIC_BUT8)
GENERIC_PAD1 = 65
GENERIC_PAD2 = 66
GENERIC_PAD3 = 67
GENERIC_PAD4 = 68
GENERIC_PAD5 = 69
GENERIC_PAD6 = 70
GENERIC_PAD7 = 71
GENERIC_PAD8 = 72
GENERIC_PADS = (GENERIC_PAD1,
GENERIC_PAD2,
GENERIC_PAD3,
GENERIC_PAD4,
GENERIC_PAD5,
GENERIC_PAD6,
GENERIC_PAD7,
GENERIC_PAD8)
|
[
"[email protected]"
] | |
62fed4f8d716eb544aca34dbe492a0dfcc899225
|
4da57c6e9efb0a884449e019ce5c9b5d516d2bb1
|
/exp/kernel_benchmark/bin_clean/amarel_aggr_data.py
|
6d0a278193addea1d73a624d1f74908838af8828
|
[] |
no_license
|
radical-experiments/affinity_model
|
dc848fe1666b2f017d37ba041890462890eba9b5
|
fc67420a2278020eee770680fa7ccef76ed2dfa5
|
refs/heads/master
| 2021-04-06T16:56:26.847920 | 2018-09-25T03:15:47 | 2018-09-25T03:15:47 | 83,361,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,431 |
py
|
import os
import sys
import csv
from pprint import pprint
from files_dir_mod import *
def amarel_aggregate(src_path, dst_path):
for dirname in dirnames:
if not os.path.isdir(src_path+'/'+dirname):
print "{0} does not exist".format(dirname)
continue
dir_keywords = dirname.split('/')
pprint(dir_keywords)
machine = dir_keywords[1]
if machine != "amarel":
continue
dir_list = os.listdir(src_path+'/'+dirname)
if dir_list:
kernel = dir_keywords[0]
node_type = dir_keywords[2]
usage = dir_keywords[3]
for meas in measurements:
fd_out = open(dst_path+'/'+dirname+'/'+meas+'.csv', 'w')
writer = csv.writer(fd_out)
for session in dir_list:
with open(src_path+'/'+dirname+'/'+session+'/'+meas+'.csv') as fd_in:
reader = csv.reader(fd_in)
for row in reader:
cleaned_row = row
cleaned_row[0] = session + "__" + cleaned_row[0]
writer.writerow(cleaned_row)
fd_out.close()
pprint(dirname)
pprint(dir_list)
if __name__ == "__main__":
src_path = sys.argv[1]
dst_path = sys.argv[2]
amarel_aggregate(src_path, dst_path)
|
[
"[email protected]"
] | |
0255e46bd31fd1ecc2393fdf7322e84db39abf47
|
97e60d0ca572d0dc3fc80f8719cd57a707ab6069
|
/bias_zebra_print/stock.py
|
dd94d374c932338a87ab830754b76fb7b1fe5b94
|
[] |
no_license
|
josepato/bias_trunk_v6
|
0c7c86493c88f015c049a139360478cabec7f698
|
b6ab6fc2ff3dc832f26effdba421bcc76d5cabac
|
refs/heads/master
| 2020-06-12T14:18:31.101513 | 2016-12-15T22:55:54 | 2016-12-15T22:55:54 | 75,803,957 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,033 |
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
#Bias Product / PriceList
#
from osv import osv
from osv import fields
import time
import netsvc
#----------------------------------------------------------
# Price List
#----------------------------------------------------------
OLDPRINTSTR = """XA~TA000~JSN^LT0^MNW^MTT^PON^PMN^LH0,0^JMA^PR4,4^MD0^JUS^LRN^CI0^XZ
^XA
^MMT
^LL0142
^LS0
^FT246,119^A0N,17,16^FH\^FD%s^FS
^FT164,18^A0N,17,16^FH\^FD%s^FS
^FT286,110^A0B,17,16^FH\^FD%s^FS
^FT21,136^A0N,17,16^FH\^FD%s^FS
^FT4,123^A0N,17,16^FH\^FD%s^FS
^FT193,51^A0N,17,16^FH\^FD%s^FS
^FT4,67^A0N,17,16^FH\^FD%s/%s^FS
^FT3,51^A0N,17,16^FH\^FD%s/%s^FS
^FT3,34^A0N,17,16^FH\^FD%s^FS
^FT8,18^A0N,17,16^FH\^FD%s^FS
^PQ%i,0,1,Y^XZ"""
PRINTSTR = """^XA~TA000~JSN^LT0^MNW^MTT^PON^PMN^LH0,0^JMA^PR4,4^MD0^JUS^LRN^CI0^XZ
^XA
^MMT
^LL0850
^LS0
^FT48,731^A0I,17,16^FH\^F%sB^FS
^FT131,831^A0I,17,16^FH\^FD%s^FS
^FT8,740^A0R,17,16^FH\^FD%s^FS
^FT273,713^A0I,17,16^FH\^FD%s^FS
^FT290,727^A0I,17,16^FH\^FD%s^FS
^FT101,799^A0I,17,16^FH\^FD%s^FS
^FT291,782^A0I,17,16^FH\^FD%s/%s^FS
^FT291,799^A0I,17,16^FH\^FD%s/%s^FS
^FT291,815^A0I,17,16^FH\^FD%s^FS
^FT287,832^A0I,17,16^FH\^FD%s^FS
^BY1,3,22^FT291,755^BCI,,Y,N
^FD>:LA>50001>6BB^FS
^PQ%i,0,1,Y^XZ
"""
class stock_picking(osv.osv):
_inherit = "stock.picking"
def getZebraData(self, cr, uid, ids):
if isinstance(ids, (int, long)):
ids = [ids]
res = []
move_obj = self.pool.get('stock.move')
for picking in self.browse(cr, uid, ids):
mydict = {'id': picking.id}
mylines = []
for move in picking.move_lines:
mystr = PRINTSTR %(move.product_id.product_writing_kind_id.name,
move.product_id.product_colection_id.name,
move.product_id.default_code,
move.product_id.product_tmpl_id.categ_id.parent_id.name,
move.product_id.product_writing_metaerial_id.name,
(move.product_id.product_hardware_ids and move.product_id.product_hardware_ids[0].name) or "-",
(move.product_id.product_top_material_ids and move.product_id.product_top_material_ids[0].name) or "-",
(move.product_id.product_bottom_material_ids and move.product_id.product_bottom_material_ids[0].name) or "-",
(move.product_id.product_top_color_ids and move.product_id.product_top_color_ids[0].name) or "-",
(move.product_id.product_bottom_color_ids and move.product_id.product_bottom_color_ids[0].name) or "-",
move.product_id.product_line_id.name,
move.product_id.product_brand_id.name,
move.product_qty)
mylines.append(mystr)
mydict['lines'] = mylines
res.append(mydict)
return res
stock_picking()
|
[
"[email protected]"
] | |
b563672c1f0906584832778d726b6ba3cac18c7f
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_microsoft_defender/fn_microsoft_defender/util/customize.py
|
bb2e546adca2b9b9f81794d806d0518c8a1f2dd2
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 |
MIT
| 2023-03-29T20:40:31 | 2017-08-25T14:07:33 |
Python
|
UTF-8
|
Python
| false | false | 6,691 |
py
|
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_microsoft_defender"""
import base64
import os
import io
try:
from resilient import ImportDefinition
except ImportError:
# Support Apps running on resilient-circuits < v35.0.195
from resilient_circuits.util import ImportDefinition
RES_FILE = "data/export.res"
def codegen_reload_data():
"""
Parameters required reload codegen for the fn_microsoft_defender package
"""
return {
"package": u"fn_microsoft_defender",
"message_destinations": [u"fn_microsoft_defender"],
"functions": [u"defender_alert_search", u"defender_app_execution", u"defender_collect_machine_investigation_package", u"defender_delete_indicator", u"defender_find_machines", u"defender_find_machines_by_file", u"defender_find_machines_by_filter", u"defender_get_file_information", u"defender_get_incident", u"defender_get_related_alert_information", u"defender_list_indicators", u"defender_machine_isolation", u"defender_machine_scan", u"defender_machine_vulnerabilities", u"defender_quarantine_file", u"defender_set_indicator", u"defender_update_alert", u"defender_update_incident"],
"workflows": [u"defender_atp_app_execution", u"defender_atp_collect_machine_investigation_package", u"defender_atp_delete_indicator", u"defender_atp_find_machines", u"defender_atp_find_machines_by_file_hash", u"defender_atp_get_file_information", u"defender_atp_machine_isolation", u"defender_atp_machine_scan", u"defender_atp_machine_vulnerabilities", u"defender_atp_set_indicator", u"defender_atp_update_alert", u"defender_atp_update_indicator", u"defender_close_incident", u"defender_find_machines_by_filter", u"defender_get_incident", u"defender_get_updated_machine_information", u"defender_list_indicators", u"defender_quarantine_file", u"defender_refresh_incident", u"defender_sync_comment", u"defender_sync_incident"],
"actions": [u"Create Artifact from Indicator", u"Defender Close Incident", u"Defender Find Machine by DNS name", u"Defender Find Machines by File Hash", u"Defender Find Machines by Internal IP Address", u"Defender Get File Information", u"Defender Get Incident", u"Defender List Indicators", u"Defender Machine App Execution Restriction", u"Defender Machine Collect Investigation Package", u"Defender Machine Isolate Action", u"Defender Machine Quarantine File", u"Defender Machine Refresh Information", u"Defender Machine Scan", u"Defender Machine Update Information", u"Defender Machine Vulnerabilities", u"Defender Refresh Incident", u"Defender Set Indicator", u"Defender Sync Comment", u"Defender Sync Incident", u"Defender Update Alert", u"Delete Indicator", u"Update Indicator"],
"incident_fields": [u"defender_classification", u"defender_determination", u"defender_incident_createtime", u"defender_incident_id", u"defender_incident_lastupdatetime", u"defender_incident_url", u"defender_tags"],
"incident_artifact_types": [],
"incident_types": [],
"datatables": [u"defender_alerts", u"defender_indicators", u"defender_machines"],
"automatic_tasks": [],
"scripts": [u"Create Artifact from Indicator"],
}
def customization_data(client=None):
"""
Returns a Generator of ImportDefinitions (Customizations).
Install them using `resilient-circuits customize`
IBM Resilient Platform Version: 39.0.6328
Contents:
- Message Destinations:
- fn_microsoft_defender
- Functions:
- defender_alert_search
- defender_app_execution
- defender_collect_machine_investigation_package
- defender_delete_indicator
- defender_find_machines
- defender_find_machines_by_file
- defender_find_machines_by_filter
- defender_get_file_information
- defender_get_incident
- defender_get_related_alert_information
- defender_list_indicators
- defender_machine_isolation
- defender_machine_scan
- defender_machine_vulnerabilities
- defender_quarantine_file
- defender_set_indicator
- defender_update_alert
- defender_update_incident
- Workflows:
- defender_atp_app_execution
- defender_atp_collect_machine_investigation_package
- defender_atp_delete_indicator
- defender_atp_find_machines
- defender_atp_find_machines_by_file_hash
- defender_atp_get_file_information
- defender_atp_machine_isolation
- defender_atp_machine_scan
- defender_atp_machine_vulnerabilities
- defender_atp_set_indicator
- defender_atp_update_alert
- defender_atp_update_indicator
- defender_close_incident
- defender_find_machines_by_filter
- defender_get_incident
- defender_get_updated_machine_information
- defender_list_indicators
- defender_quarantine_file
- defender_refresh_incident
- defender_sync_comment
- defender_sync_incident
- Rules:
- Create Artifact from Indicator
- Defender Close Incident
- Defender Find Machine by DNS name
- Defender Find Machines by File Hash
- Defender Find Machines by Internal IP Address
- Defender Get File Information
- Defender Get Incident
- Defender List Indicators
- Defender Machine App Execution Restriction
- Defender Machine Collect Investigation Package
- Defender Machine Isolate Action
- Defender Machine Quarantine File
- Defender Machine Refresh Information
- Defender Machine Scan
- Defender Machine Update Information
- Defender Machine Vulnerabilities
- Defender Refresh Incident
- Defender Set Indicator
- Defender Sync Comment
- Defender Sync Incident
- Defender Update Alert
- Delete Indicator
- Update Indicator
- Incident Fields:
- defender_classification
- defender_determination
- defender_incident_createtime
- defender_incident_id
- defender_incident_lastupdatetime
- defender_incident_url
- defender_tags
- Data Tables:
- defender_alerts
- defender_indicators
- defender_machines
- Scripts:
- Create Artifact from Indicator
"""
res_file = os.path.join(os.path.dirname(__file__), RES_FILE)
if not os.path.isfile(res_file):
raise FileNotFoundError("{} not found".format(RES_FILE))
with io.open(res_file, mode='rt') as f:
b64_data = base64.b64encode(f.read().encode('utf-8'))
yield ImportDefinition(b64_data)
|
[
"[email protected]"
] | |
5f5e98e0204db775e5b06fd86453f2a62c41f96b
|
6dc685fdb6f4a556225f13a1d26170ee203e9eb6
|
/Windows2016Lab/scripts/Service_Windows2016_Action___create___Task_set_parameters.py
|
57d63f95d0ebaa657302006a67576086a8cb18df
|
[
"MIT"
] |
permissive
|
amaniai/calm
|
dffe6227af4c9aa3d95a08b059eac619b2180889
|
fefc8b9f75e098daa4c88c7c4570495ce6be9ee4
|
refs/heads/master
| 2023-08-15T17:52:50.555026 | 2021-10-10T08:33:01 | 2021-10-10T08:33:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 549 |
py
|
username = 'user-{}'.format(_construct_random_password(8,numDigits=4, numLetters=4, numPuncs=0, numCaps=0).lower())
password = _construct_random_password(10,upper=14, numDigits=4)
print('ACCESS_USERNAME={}'.format(username))
print('ACCESS_PASSWORD={}'.format(password))
calm_index = int('@@{calm_array_index}@@')
email_list = '''@@{EMAIL_LIST}@@'''
clean_list = [x for x in email_list.splitlines() if x.strip(' ')]
if calm_index < len(clean_list):
print('EMAIL={}'.format(clean_list[calm_index]))
else:
print('EMAIL={}'.format(clean_list[0]))
|
[
"[email protected]"
] | |
dd0eb441e105f56c21813d7d9263c17466d46938
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/217/usersdata/274/113684/submittedfiles/av2_p3_m2.py
|
56a351331cc54ba12f7e3c1497129b302fa40d64
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 916 |
py
|
# -*- coding: utf-8 -*
n=int(input("Dimensão do Quadrado: "))
while notn>=3:
n=int(input("Dimensão do Quadrado: "))
M=[]
for i in range(0,n,1):
L=[]
for j in range(o,n,1):
L.append(int(input("Elemento da Linha: "))
M.append(L)
somaL=[]
for i in range(0,n,1):
somaL.append(sum(M[i]))
somaC=[]
for j in range(0,n,1):
C=0
for i in range (0,n,1):
C=C+M[i][j]
somaC.append(C)
b=[somaL[0]]
cont=0
k=0
VE=0
VC=0
for i in range(0,n,1):
if somaL[i]in b:
continue
else:
ct+ct=1
k=1
if ct==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k=0
b2=[somaC[0]]
cont2=0
k2=0
VE2=0
for i in range(0,n,1):
if somaC[i]in b2:
continue
else:
ct2=ct2+1
k2=i
if cont2==1:
VE2=somaC[k2]
if ct!=1:
VE2=somaC[0]
k2=0
O=VC-(VE-M[k][k2])
P=M[k][k2]
print(O)
print(P)
|
[
"[email protected]"
] | |
db889d7c5e5cba1d1b2ed71e137b42acf283c13f
|
b89ec2839b4a6bd4e2d774f64be9138f4b71a97e
|
/dataent/patches/v7_2/set_doctype_engine.py
|
6de22a5c653dc5755560998976ce23c246a2026d
|
[
"MIT"
] |
permissive
|
dataent/dataent
|
ec0e9a21d864bc0f7413ea39670584109c971855
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
refs/heads/master
| 2022-12-14T08:33:48.008587 | 2019-07-09T18:49:21 | 2019-07-09T18:49:21 | 195,729,981 | 0 | 0 |
MIT
| 2022-12-09T17:23:49 | 2019-07-08T03:26:28 |
Python
|
UTF-8
|
Python
| false | false | 231 |
py
|
from __future__ import unicode_literals
import dataent
def execute():
for t in dataent.db.sql('show table status'):
if t[0].startswith('tab'):
dataent.db.sql('update tabDocType set engine=%s where name=%s', (t[1], t[0][3:]))
|
[
"[email protected]"
] | |
6a4c16868431e1e23eb5da001f0272c6e45ae97e
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/7ECZC8CBEhy5QkvN3_15.py
|
b7cee2eac0f62400c8ad19d3b56c9c8b2daff2e8
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 151 |
py
|
def how_many_walls(n, w, h):
sum_ = 0
count = 0
wallSquare = w * h
while sum_ <= n:
sum_ += wallSquare
count += 1
return count - 1
|
[
"[email protected]"
] | |
677d22f42d470e7e6fab11f89b82637deaaa0fb6
|
be80a2468706ab99c838fa85555c75db8f38bdeb
|
/app/reward/migrations/0002_auto_20180822_0903.py
|
2e25721da289ed95493031d61d3ce8c3cf1f9c9a
|
[] |
no_license
|
kimdohwan/Wadiz
|
5468d218ba069387deabf83376b42a4f69360881
|
91f85f09a7c9a59864b69990127911a112d4bdbd
|
refs/heads/master
| 2021-06-24T06:41:04.111305 | 2019-07-03T12:51:18 | 2019-07-03T12:51:18 | 143,955,968 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,105 |
py
|
# Generated by Django 2.1 on 2018-08-22 00:03
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reward', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FundingOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('phone_number', models.CharField(blank=True, max_length=11, validators=[django.core.validators.RegexValidator(message='Phone number must be 11 numbers', regex='\\d{11}')])),
('address1', models.CharField(max_length=30)),
('address2', models.CharField(max_length=30)),
('comment', models.TextField()),
('requested_at', models.DateTimeField(auto_now_add=True)),
('cancel_at', models.DateTimeField(null=True)),
],
),
migrations.RemoveField(
model_name='funding',
name='address1',
),
migrations.RemoveField(
model_name='funding',
name='address2',
),
migrations.RemoveField(
model_name='funding',
name='cancel_at',
),
migrations.RemoveField(
model_name='funding',
name='comment',
),
migrations.RemoveField(
model_name='funding',
name='phone_number',
),
migrations.RemoveField(
model_name='funding',
name='requested_at',
),
migrations.RemoveField(
model_name='funding',
name='username',
),
migrations.AddField(
model_name='funding',
name='order',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='order', to='reward.FundingOrder'),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
f44f6d9972814a4e7a1f84001a60cf2ac08ac418
|
5c26eafece0ee85a7ed4b6a34ee52753d7c86e49
|
/polyaxon/estimators/hooks/step_hooks.py
|
0e177575b29f1a02195d3439137b45db2c0d2a1a
|
[
"MIT"
] |
permissive
|
StetHD/polyaxon
|
345257076d484b2267ba20d9d346f1367cdd92d3
|
dabddb9b6ea922a0549e3c6fd7711231f7462fa3
|
refs/heads/master
| 2021-03-19T06:45:51.806485 | 2017-09-26T14:31:26 | 2017-09-26T14:36:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,184 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from tensorflow.python.training import basic_session_run_hooks
from polyaxon.estimators.hooks.utils import can_run_hook
class StepLoggingTensorHook(basic_session_run_hooks.LoggingTensorHook):
"""Prints the given tensors once every N local steps or once every N seconds.
A modified version of tensorflow.python.training.basic_session_run_hooks LoggingTensorHook.
Checks the context for `no_run_hooks_op` before calling the the hook.
The tensors will be printed to the log, with `INFO` severity.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None, formatter=None):
super(StepLoggingTensorHook, self).__init__(tensors, every_n_iter, every_n_secs, formatter)
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = can_run_hook(run_context)
if self._should_trigger:
return super(StepLoggingTensorHook, self).before_run(run_context)
else:
return None
class StopAtStepHook(basic_session_run_hooks.StopAtStepHook):
"""Monitor to request stop at a specified step.
(A mirror to tensorflow.python.training.basic_session_run_hooks StopAtStepHook.)
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
def __init__(self, num_steps=None, last_step=None):
super(StopAtStepHook, self).__init__(num_steps, last_step)
class StepCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds.
(A mirror to tensorflow.python.training.basic_session_run_hooks CheckpointSaverHook.)
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately after the corresponding
CheckpointSaverHook callbacks, only in steps where the
CheckpointSaverHook was triggered.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: Exactly one of saver or scaffold should be set.
"""
def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None,
checkpoint_basename="model.ckpt", scaffold=None, listeners=None):
super(StepCheckpointSaverHook, self).__init__(checkpoint_dir, save_secs, save_steps, saver,
checkpoint_basename, scaffold, listeners)
class StepCounterHook(basic_session_run_hooks.StepCounterHook):
"""Steps per second monitor.
(A mirror to tensorflow.python.training.basic_session_run_hooks CheckpointSaverHook.)
"""
def __init__(self, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None):
super(StepCounterHook, self).__init__(
every_n_steps, every_n_secs, output_dir, summary_writer)
class StepSummarySaverHook(basic_session_run_hooks.SummarySaverHook):
"""Saves summaries every N steps.
(A mirror to tensorflow.python.training.basic_session_run_hooks NanTensorHook.)
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None,
scaffold=None, summary_op=None):
super(StepSummarySaverHook, self).__init__(
save_steps, save_secs, output_dir, summary_writer, scaffold, summary_op)
STEP_HOOKS = OrderedDict([
('StepLoggingTensorHook', StepLoggingTensorHook),
('StopAtStepHook', StopAtStepHook),
('StepCheckpointSaverHook', StepCheckpointSaverHook),
('StepCounterHook', StepCounterHook),
('StepSummarySaverHook', StepSummarySaverHook),
])
|
[
"[email protected]"
] | |
1e30a64ef30c526d7e94b66f205c369d97dd8da2
|
fa04309288a0f8b2daae2fd73c8224a1c0ad4d95
|
/eventkit_cloud/tasks/tests/test_task_factory.py
|
b02b4477e9ad630dbbdb95b91ae28bb1c39b5c47
|
[] |
no_license
|
jj0hns0n/eventkit-cloud
|
7bb828c57f29887621e47fe7ce0baa14071ef39e
|
2f749090baf796b507e79251a4c4b30cb0b4e126
|
refs/heads/master
| 2021-01-01T19:45:32.464729 | 2017-07-24T19:01:24 | 2017-07-24T19:01:24 | 98,675,805 | 0 | 0 | null | 2017-07-28T18:16:34 | 2017-07-28T18:16:34 | null |
UTF-8
|
Python
| false | false | 7,545 |
py
|
# -*- coding: utf-8 -*-
import logging
import os
import uuid
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.db import DatabaseError
from django.test import TestCase
from eventkit_cloud.jobs.models import Job, Region, ProviderTask, ExportProvider, License, UserLicense
from eventkit_cloud.tasks.models import ExportRun
from eventkit_cloud.tasks.task_factory import (TaskFactory, create_run, create_finalize_run_task_collection,
get_invalid_licenses)
from mock import patch, Mock, MagicMock
logger = logging.getLogger(__name__)
class TestExportTaskFactory(TestCase):
"""
Test cases for the TaskFactory.
"""
fixtures = ('insert_provider_types.json', 'osm_provider.json',)
def setUp(self,):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-10.85, 6.25, -10.62, 6.40))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob', description='Test description', user=self.user,
the_geom=the_geom)
provider = ExportProvider.objects.get(slug='osm')
self.license = License.objects.create(slug='odbl-test', name='test_osm_license')
provider.license = self.license
provider.save()
UserLicense.objects.create(license=self.license, user=self.user)
provider_task = ProviderTask.objects.create(provider=provider)
self.job.provider_tasks.add(provider_task)
self.region = Region.objects.get(name='Africa')
self.job.region = self.region
self.uid = str(provider_task.uid)
self.job.save()
def test_create_run_success(self):
run_uid = create_run(job_uid=self.job.uid)
self.assertIsNotNone(run_uid)
self.assertIsNotNone(ExportRun.objects.get(uid=run_uid))
@patch('eventkit_cloud.tasks.task_factory.ExportRun')
def test_create_run_failure(self, ExportRun):
ExportRun.objects.create.side_effect = DatabaseError('FAIL')
with self.assertRaises(DatabaseError):
run_uid = create_run(job_uid=self.job.uid)
self.assertIsNone(run_uid)
@patch('eventkit_cloud.tasks.task_factory.get_invalid_licenses')
@patch('eventkit_cloud.tasks.task_factory.finalize_export_provider_task')
@patch('eventkit_cloud.tasks.task_factory.create_task')
@patch('eventkit_cloud.tasks.task_factory.chain')
def test_task_factory(self, task_factory_chain, create_task,
finalize_task, mock_invalid_licenses):
mock_invalid_licenses.return_value = []
run_uid = create_run(job_uid=self.job.uid)
self.assertIsNotNone(run_uid)
self.assertIsNotNone(ExportRun.objects.get(uid=run_uid))
worker = "some_worker"
provider_uuid = uuid.uuid4()
task_runner = MagicMock()
task = Mock()
task_runner().run_task.return_value = (provider_uuid, task)
create_task.return_value = task
task_factory = TaskFactory()
task_factory.type_task_map = {'osm-generic': task_runner, 'osm': task_runner}
task_factory.parse_tasks(run_uid=run_uid, worker=worker)
task_factory_chain.assert_called()
create_task.assert_called()
finalize_task.s.assert_called()
# Test that run is prevented and deleted if the user has not agreed to the licenses.
mock_invalid_licenses.return_value = ['invalid-licenses']
with self.assertRaises(Exception):
task_factory.parse_tasks(run_uid=run_uid, worker=worker)
run = ExportRun.objects.filter(uid=run_uid).first()
self.assertIsNone(run)
def test_get_invalid_licenses(self):
# The license should not be returned if the user has agreed to it.
expected_invalid_licenses = []
invalid_licenses = get_invalid_licenses(self.job)
self.assertEquals(invalid_licenses, expected_invalid_licenses)
# A license should be returned if the user has not agreed to it.
UserLicense.objects.get(license=self.license, user=self.user).delete()
expected_invalid_licenses = [self.license.name]
invalid_licenses = get_invalid_licenses(self.job)
self.assertEquals(invalid_licenses, expected_invalid_licenses)
UserLicense.objects.create(license=self.license, user=self.user)
class CreateFinalizeRunTaskCollectionTests(TestCase):
@patch('eventkit_cloud.tasks.task_factory.example_finalize_run_hook_task')
@patch('eventkit_cloud.tasks.task_factory.prepare_for_export_zip_task')
@patch('eventkit_cloud.tasks.task_factory.zip_file_task')
@patch('eventkit_cloud.tasks.task_factory.finalize_run_task_as_errback')
@patch('eventkit_cloud.tasks.task_factory.finalize_run_task')
@patch('eventkit_cloud.tasks.task_factory.chain')
def test_create_finalize_run_task_collection(
self, chain, finalize_run_task, finalize_run_task_as_errback, zip_file_task, prepare_for_export_zip_task, example_finalize_run_hook_task):
""" Checks that all of the expected tasks were prepared and combined in a chain for return.
"""
chain.return_value = 'When not mocked, this would be a celery chain'
# None of these need correspond to real things, they're just to check the inner calls.
run_uid = 1
run_dir = 'test_dir'
worker = 'test_worker'
expected_task_settings = {
'interval': 1, 'max_retries': 10, 'queue': worker, 'routing_key': worker, 'priority': 70}
# This should return a chain of tasks ending in the finalize_run_task, plus a task sig for just the
# finalize_run_task.
finalize_chain, errback = create_finalize_run_task_collection(run_uid=run_uid, run_dir=run_dir, worker=worker)
example_finalize_run_hook_task.si.assert_called_once_with([], run_uid=run_uid)
example_finalize_run_hook_task.si.return_value.set.assert_called_once_with(**expected_task_settings)
prepare_for_export_zip_task.s.assert_called_once_with(run_uid=run_uid)
prepare_for_export_zip_task.s.return_value.set.assert_called_once_with(**expected_task_settings)
zip_file_task.s.assert_called_once_with(run_uid=run_uid)
zip_file_task.s.return_value.set.assert_called_once_with(**expected_task_settings)
finalize_run_task.si.assert_called_once_with(run_uid=run_uid, stage_dir=run_dir)
finalize_run_task.si.return_value.set.assert_called_once_with(**expected_task_settings)
self.assertEqual(finalize_chain, 'When not mocked, this would be a celery chain')
self.assertEqual(errback, finalize_run_task_as_errback.si())
self.assertEqual(chain.call_count, 1)
# Grab the args for the first (only) call
chain_inputs = chain.call_args[0]
# The result of setting the args & settings for each task,
# which unmocked would be a task signature, should be passed to celery.chain
expected_chain_inputs = (
example_finalize_run_hook_task.si.return_value.set.return_value,
prepare_for_export_zip_task.s.return_value.set.return_value,
zip_file_task.s.return_value.set.return_value,
finalize_run_task.si.return_value.set.return_value,
)
self.assertEqual(chain_inputs, expected_chain_inputs)
|
[
"[email protected]"
] | |
bfe394598000549c8aa731dc5185e43ee6e450f1
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/Leetcode Contests/Biweekly Contest 24/Minimum Value to Get Positive Step by Step Sum.py
|
ed393ceda76cec842051a7cd8dd259618306c947
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 318 |
py
|
from typing import List
class Solution:
def minStartValue(self, nums: List[int]) -> int:
sum = 0
min = nums[0]
for i in nums:
sum += i
if sum < min:
min = sum
if min >= 0:
return 1
else:
return abs(min) + 1
|
[
"[email protected]"
] | |
06ce341e0e7626e2104a0667155275b069268653
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Kivy/pycon2013/html5slides/scripts/md/render.py
|
b5ef0975e20eb201985c57c5b48cd150050171da
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6e3940fcf589334234bc7943dfc2c0d8e860fc139a432eae485128714022232c
size 1807
|
[
"[email protected]"
] | |
e5b0887d810d27576528bafda388fdfd915d3c4f
|
c6320d68968de93ce9d686f5a59bb34909d089bb
|
/03_Polynomial_Regression/polynomial_regression_rad.py
|
fafb65739a4f26fa1c7981097fe77412704b96b8
|
[] |
no_license
|
rbartosinski/MachineLearningRes
|
0835e6b9f94c309bf2ce8ff7ceb73912a7eeea63
|
5a1af15e77d589149aa1cb22cb96f56956fd9a0f
|
refs/heads/master
| 2020-04-07T00:58:03.692579 | 2019-01-11T13:49:12 | 2019-01-11T13:49:12 | 157,925,825 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,313 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 15:04:28 2018
@author: radek
"""
#wczytanie bibliotek
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#wczytanie danych
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
#dopasowanie LR do setu
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
#dopasowanie Polynomial Regr. do setu
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
lin_reg2 = LinearRegression()
lin_reg2.fit(X_poly, y)
#wizualizacja LR
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg.predict(X), color='blue')
plt.title('Position level vs. Salary (Linear Regression')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#wizulizacja PR
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, lin_reg2.predict(poly_reg.fit_transform(X_grid)), color='blue')
plt.title('Position level vs. Salary (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#wynik z LR
lin_reg.predict(6.5)
#wynik z PR
lin_reg_2.predict(poly_reg.fit_transform(6.5))
|
[
"[email protected]"
] | |
e4ae96c0131406c2419a148c0186b3269acfa42f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03964/s755365360.py
|
9f2a66cabd6d3f24f2aafce6d59b731dbfbc227f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
import bisect
import collections
import copy
import functools
import heapq
import math
import sys
from collections import deque
from collections import defaultdict
input = sys.stdin.readline
MOD = 10**9+7
N = int(input())
T = [0]*N
A = [0]*N
for i in range(N):
T[i],A[i] = map(int,(input().split()))
t,a = T[0],A[0]
for i in range(1,N):
s = T[i] + A[i]
now = 1
l = 1
r = 10**18//s + 1
mae = -1
while now != mae:
mae = now
if T[i]*now < t or A[i]*now < a:
l = now
else:
r = now
now = (l+r+1)//2
t,a = T[i]*now,A[i]*now
print(t+a)
|
[
"[email protected]"
] | |
5a1e071972d89f69b241aff120e8fcd705ae1ca1
|
cc0d06e2aad3d30152c4a3f3356befdc58748313
|
/2.til8.oktober/plot_wavepacket.py
|
a4583b0987077f652a46aaf25eff8dbe8cd4c6bb
|
[] |
no_license
|
lasse-steinnes/IN1900
|
db0bb4da33fa024d4fe9207337c0f1d956197c50
|
c8d97c2903078471f8e419f88cc8488d9b8fc7da
|
refs/heads/master
| 2020-12-14T15:34:36.429764 | 2020-01-18T19:59:46 | 2020-01-18T19:59:46 | 234,789,653 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
###
## Definerer funksjonen
from numpy import exp, sin, pi, linspace
bølge = lambda x,t=0: exp(-(x-3*t)**2)*sin(3*pi*(x-t))
## Lager intervallet for x
x_matrise = linspace(-4,4,1500)
# Slik at
bølge_t0 = bølge(x_matrise)
### Plotter funksjonen
import matplotlib.pyplot as plt
plt.plot(x_matrise, bølge_t0, label = 'bølgepakke for t=0')
plt.legend()
plt.xlabel("x")
plt.ylabel("Amplitude")
plt.show()
## Kjøreeksempel
"""
>> python plot_wavepacket.py
(plot)
"""
|
[
"[email protected]"
] | |
3701dcb0526d0abec2a1850baf3176ed362ec0d1
|
d0eb582894eff3c44e3de4bd50f571f9d9ab3a02
|
/venv/lib/python3.7/site-packages/flake8/plugins/pyflakes.py
|
018d1c98a1f847fa743d847fa6d66a99ac4dbc0c
|
[
"MIT"
] |
permissive
|
tdle94/app-store-scrapper
|
159187ef3825213d40425215dd9c9806b415769e
|
ed75880bac0c9ef685b2c1bf57a6997901abface
|
refs/heads/master
| 2022-12-20T21:10:59.621305 | 2020-10-28T00:32:21 | 2020-10-28T00:32:21 | 247,291,364 | 1 | 2 |
MIT
| 2022-12-08T03:53:08 | 2020-03-14T14:25:44 |
Python
|
UTF-8
|
Python
| false | false | 6,021 |
py
|
"""Plugin built-in to Flake8 to treat pyflakes as a plugin."""
# -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
# The 'demandimport' breaks pyflakes and flake8.plugins.pyflakes
from mercurial import demandimport
except ImportError:
pass
else:
demandimport.disable()
import os
from typing import List
import pyflakes
import pyflakes.checker
from flake8 import utils
FLAKE8_PYFLAKES_CODES = {
"UnusedImport": "F401",
"ImportShadowedByLoopVar": "F402",
"ImportStarUsed": "F403",
"LateFutureImport": "F404",
"ImportStarUsage": "F405",
"ImportStarNotPermitted": "F406",
"FutureFeatureNotDefined": "F407",
"MultiValueRepeatedKeyLiteral": "F601",
"MultiValueRepeatedKeyVariable": "F602",
"TooManyExpressionsInStarredAssignment": "F621",
"TwoStarredExpressions": "F622",
"AssertTuple": "F631",
"IsLiteral": "F632",
"InvalidPrintSyntax": "F633",
"BreakOutsideLoop": "F701",
"ContinueOutsideLoop": "F702",
"ContinueInFinally": "F703",
"YieldOutsideFunction": "F704",
"ReturnWithArgsInsideGenerator": "F705",
"ReturnOutsideFunction": "F706",
"DefaultExceptNotLast": "F707",
"DoctestSyntaxError": "F721",
"ForwardAnnotationSyntaxError": "F722",
"CommentAnnotationSyntaxError": "F723",
"RedefinedWhileUnused": "F811",
"RedefinedInListComp": "F812",
"UndefinedName": "F821",
"UndefinedExport": "F822",
"UndefinedLocal": "F823",
"DuplicateArgument": "F831",
"UnusedVariable": "F841",
"RaiseNotImplemented": "F901",
}
class FlakesChecker(pyflakes.checker.Checker):
"""Subclass the Pyflakes checker to conform with the flake8 API."""
name = "pyflakes"
version = pyflakes.__version__
with_doctest = False
include_in_doctest = [] # type: List[str]
exclude_from_doctest = [] # type: List[str]
def __init__(self, tree, file_tokens, filename):
"""Initialize the PyFlakes plugin with an AST tree and filename."""
filename = utils.normalize_path(filename)
with_doctest = self.with_doctest
included_by = [
include
for include in self.include_in_doctest
if include != "" and filename.startswith(include)
]
if included_by:
with_doctest = True
for exclude in self.exclude_from_doctest:
if exclude != "" and filename.startswith(exclude):
with_doctest = False
overlaped_by = [
include
for include in included_by
if include.startswith(exclude)
]
if overlaped_by:
with_doctest = True
super(FlakesChecker, self).__init__(
tree,
filename=filename,
withDoctest=with_doctest,
file_tokens=file_tokens,
)
@classmethod
def add_options(cls, parser):
"""Register options for PyFlakes on the Flake8 OptionManager."""
parser.add_option(
"--builtins",
parse_from_config=True,
comma_separated_list=True,
help="define more built-ins, comma separated",
)
parser.add_option(
"--doctests",
default=False,
action="store_true",
parse_from_config=True,
help="check syntax of the doctests",
)
parser.add_option(
"--include-in-doctest",
default="",
dest="include_in_doctest",
parse_from_config=True,
comma_separated_list=True,
normalize_paths=True,
help="Run doctests only on these files",
type="string",
)
parser.add_option(
"--exclude-from-doctest",
default="",
dest="exclude_from_doctest",
parse_from_config=True,
comma_separated_list=True,
normalize_paths=True,
help="Skip these files when running doctests",
type="string",
)
@classmethod
def parse_options(cls, options):
"""Parse option values from Flake8's OptionManager."""
if options.builtins:
cls.builtIns = cls.builtIns.union(options.builtins)
cls.with_doctest = options.doctests
included_files = []
for included_file in options.include_in_doctest:
if included_file == "":
continue
if not included_file.startswith((os.sep, "./", "~/")):
included_files.append("./" + included_file)
else:
included_files.append(included_file)
cls.include_in_doctest = utils.normalize_paths(included_files)
excluded_files = []
for excluded_file in options.exclude_from_doctest:
if excluded_file == "":
continue
if not excluded_file.startswith((os.sep, "./", "~/")):
excluded_files.append("./" + excluded_file)
else:
excluded_files.append(excluded_file)
cls.exclude_from_doctest = utils.normalize_paths(excluded_files)
inc_exc = set(cls.include_in_doctest).intersection(
cls.exclude_from_doctest
)
if inc_exc:
raise ValueError(
'"%s" was specified in both the '
"include-in-doctest and exclude-from-doctest "
"options. You are not allowed to specify it in "
"both for doctesting." % inc_exc
)
def run(self):
"""Run the plugin."""
for message in self.messages:
col = getattr(message, "col", 0)
yield (
message.lineno,
col,
"{} {}".format(
FLAKE8_PYFLAKES_CODES.get(type(message).__name__, "F999"),
message.message % message.message_args,
),
message.__class__,
)
|
[
"[email protected]"
] | |
1d0479b10748363c8598f680dd8ac691974f0c9e
|
11060ca244940baef96a51d794d73aab44fc31c6
|
/src/brainstorming/tornado/modbus/pymodbus/__init__.py
|
0bb3d9b53e2360b44fb5246e72a6c065e1fdb427
|
[] |
no_license
|
D3f0/txscada
|
eb54072b7311068a181c05a03076a0b835bb0fe1
|
f8e1fd067a1d001006163e8c3316029f37af139c
|
refs/heads/master
| 2020-12-24T06:27:17.042056 | 2016-07-27T17:17:56 | 2016-07-27T17:17:56 | 3,565,335 | 9 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,280 |
py
|
"""
Pymodbus: Modbus Protocol Implementation
-----------------------------------------
This package can supply modbus clients and servers:
client:
- Can perform single get/set on discretes and registers
- Can perform multiple get/set on discretes and registers
- Working on diagnostic/file/pipe/setting/info requets
- Can fully scrape a host to be cloned
server:
- Can function as a fully implemented TCP modbus server
- Working on creating server control context
- Working on serial communication
- Working on funtioning as a RTU/ASCII
- Can mimic a server based on the supplied input data
TwistedModbus is built on top of the Pymodbus developed from code by:
Copyright (c) 2001-2005 S.W.A.C. GmbH, Germany.
Copyright (c) 2001-2005 S.W.A.C. Bohemia s.r.o., Czech Republic.
Hynek Petrak <[email protected]>
Released under the the GPLv2
"""
from pymodbus.version import _version
__version__ = _version.short().split('+')[0]
#---------------------------------------------------------------------------#
# Block unhandled logging
#---------------------------------------------------------------------------#
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
h = NullHandler()
logging.getLogger("pymodbus").addHandler(h)
|
[
"devnull@localhost"
] |
devnull@localhost
|
ac8fcee7be310f87e1cf6a7479d7dec05c585cc6
|
6413fe58b04ac2a7efe1e56050ad42d0e688adc6
|
/tempenv/lib/python3.7/site-packages/dash_bootstrap_components/_components/CardText.py
|
c0146873ce75910bc6733eabc85670d925f82320
|
[
"MIT"
] |
permissive
|
tytechortz/Denver_temperature
|
7f91e0ac649f9584147d59193568f6ec7efe3a77
|
9d9ea31cd7ec003e8431dcbb10a3320be272996d
|
refs/heads/master
| 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 |
MIT
| 2022-06-21T23:04:21 | 2019-02-13T21:22:53 |
Python
|
UTF-8
|
Python
| false | false | 3,332 |
py
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class CardText(Component):
"""A CardText component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- className (string; optional): Often used with CSS to style elements with common properties.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- tag (string; optional): HTML tag to use for the card text, default: p
- color (string; optional): Text color, options: primary, secondary, success, warning, danger, info,
muted, light, dark, body, white, black-50, white-50."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, tag=Component.UNDEFINED, color=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'style', 'className', 'key', 'tag', 'color']
self._type = 'CardText'
self._namespace = 'dash_bootstrap_components/_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'style', 'className', 'key', 'tag', 'color']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(CardText, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('CardText(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'CardText(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
|
[
"[email protected]"
] | |
f2acacf75129142364d47c4372031342a19566a9
|
1554150a9720ebf35cd11c746f69169b595dca10
|
/tk_practise/shape_display_view.py
|
908a4219294e3677bf29d3a5afa33665d56b7ca5
|
[] |
no_license
|
andrewili/shape-grammar-engine
|
37a809f8cf78b133f8f1c3f9cf13a7fbbb564713
|
2859d8021442542561bdd1387deebc85e26f2d03
|
refs/heads/master
| 2021-01-18T22:46:51.221257 | 2016-05-31T21:15:28 | 2016-05-31T21:15:28 | 14,129,359 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,640 |
py
|
# shape_display_view.py
import Tkinter as tk
import tkFileDialog
import tkFont
import ttk
class Observable(object):
def __init__(self):
self.observers = []
def broadcast(self, widget):
for observer in self.observers:
observer.respond(widget)
def add_observer(self, observer):
self.observers.append(observer)
class View(tk.Toplevel, Observable):
def __init__(self, master):
tk.Toplevel.__init__(self, master)
self.protocol('WM_DELETE_WINDOW', self.master.destroy)
Observable.__init__(self)
self.title('Shape display 2014-04-03')
self.text_var_a = tk.StringVar()
self.text_var_b = tk.StringVar()
self.text_var_c = tk.StringVar()
self.label_width = 28
self.label_height = 15
self.label_font = ('Andale Mono', '11')
self.background_color = '#EEEEEE'
self._make_main_frame()
self._make_label_frame_a( 0, 0)
self._make_spacer( 1, 0)
self._make_label_frame_b( 2, 0)
self._make_spacer( 3, 0)
self._make_label_frame_buttons( 4, 0)
self._make_spacer( 5, 0)
self._make_label_frame_c( 6, 0)
def _make_main_frame(self):
self.mainframe = ttk.Frame(
self,
padding='10 10 10 10')
self.mainframe.grid(
column=0,
row=0,
sticky='NSEW')
self.mainframe.rowconfigure(
0,
weight=1)
self.mainframe.columnconfigure(
0,
weight=1)
def _make_label_frame_a(self, column_in, row_in):
self.label_frame_a = ttk.LabelFrame(
self.mainframe)
self.label_frame_a.grid(
column=column_in,
row=row_in,
sticky='EW')
self.canvas_a = self.make_canvas(
self.label_frame_a,
0, 0)
self.get_lshape_a_button = ttk.Button(
self.label_frame_a,
width=15,
text='Get A',
command=(self.get_lshape_a))
self.get_lshape_a_button.grid(
column=0,
row=2)
self.label_a = tk.Label(
self.label_frame_a,
width=self.label_width,
height=self.label_height,
textvariable=self.text_var_a,
anchor=tk.NW,
justify=tk.LEFT,
font=self.label_font)
self.label_a.grid(
column=0,
row=3)
def _make_label_frame_b(self, column_in, row_in):
self.label_frame_b = ttk.LabelFrame(
self.mainframe)
self.label_frame_b.grid(
column=column_in,
row=row_in,
sticky='EW')
self.canvas_b = self.make_canvas(
self.label_frame_b,
0, 0)
self.get_lshape_b_button = ttk.Button(
self.label_frame_b,
width=15,
text='Get B',
command=self.get_lshape_b)
self.get_lshape_b_button.grid(
column=0,
row=2)
self.label_b = tk.Label(
self.label_frame_b,
width=self.label_width,
height=self.label_height,
textvariable=self.text_var_b,
anchor=tk.NW,
justify=tk.LEFT,
font=self.label_font)
self.label_b.grid(
column=0,
row=3)
def _make_label_frame_buttons(self, column_in, row_in):
self.label_frame_buttons = ttk.LabelFrame(
self.mainframe)
self.label_frame_buttons.grid(
column=column_in,
row=row_in,
sticky='NEW')
self.result_button_frame_spacer_upper = tk.Label(
self.label_frame_buttons,
height=5,
background=self.background_color)
self.result_button_frame_spacer_upper.grid(
column=0,
row=0)
self.get_lshape_a_plus_b_button = ttk.Button(
self.label_frame_buttons,
width=15,
text='A + B',
command=self.get_lshape_a_plus_b)
self.get_lshape_a_plus_b_button.grid(
column=0,
row=1)
self.get_lshape_a_minus_b_button = ttk.Button(
self.label_frame_buttons,
width=15,
text='A - B',
command=self.get_lshape_a_minus_b)
self.get_lshape_a_minus_b_button.grid(
column=0,
row=2)
self.get_lshape_a_sub_lshape_b_button = ttk.Button(
self.label_frame_buttons,
width=15,
text='A <= B',
command=self.get_lshape_a_sub_lshape_b)
self.get_lshape_a_sub_lshape_b_button.grid(
column=0,
row=3)
self.result_button_frame_spacer_lower = tk.Label(
self.label_frame_buttons,
height=17,
background=self.background_color)
self.result_button_frame_spacer_lower.grid(
column=0,
row=4)
def _make_label_frame_c(self, column_in, row_in):
self.label_frame_c = ttk.LabelFrame(
self.mainframe)
self.label_frame_c.grid(
column=column_in,
row=row_in,
sticky='NEW')
self.canvas_c = self.make_canvas(
self.label_frame_c,
0, 0)
self.spacer_c = tk.Label(
self.label_frame_c,
width=2,
background=self.background_color,
text=' ')
self.spacer_c.grid(
column=0,
row=1)
self.label_c = tk.Label(
self.label_frame_c,
width=self.label_width,
height=self.label_height,
textvariable=self.text_var_c,
anchor=tk.NW,
justify=tk.LEFT,
font=self.label_font)
self.label_c.grid(
column=0,
row=2)
def make_canvas(self, parent, column_in, row_in):
canvas = tk.Canvas(
parent,
width=200,
height=200,
background='#DDDDDD') # use constant
canvas.xview_moveto(0) # move origin to visible area
canvas.yview_moveto(0)
canvas.grid(
column=column_in,
row=row_in,
sticky='EW')
return canvas
def _make_spacer(self, column_in, row_in):
self.spacer = tk.Label(
self.mainframe,
width=2,
background=self.background_color,
text=' ')
self.spacer.grid(
column=column_in,
row=row_in)
## def make_spacer_above_buttons(self, column_in, row_in):
## spacer = tk.Label(
## self.mainframe,
## width=2,
## height=5,
## text=' ')
## spacer.grid(
## column=column_in,
## row=row_in)
def get_lshape_a(self):
self.file_a = tkFileDialog.askopenfile()
self.broadcast(self.get_lshape_a_button)
def get_lshape_b(self):
self.file_b = tkFileDialog.askopenfile()
self.broadcast(self.get_lshape_b_button)
def get_lshape_a_plus_b(self):
self.broadcast(self.get_lshape_a_plus_b_button)
def get_lshape_a_minus_b(self):
self.broadcast(self.get_lshape_a_minus_b_button)
def get_lshape_a_sub_lshape_b(self):
self.broadcast(self.get_lshape_a_sub_lshape_b_button)
if __name__ == '__main__':
import doctest
doctest.testfile('tests/shape_display_view_test.txt')
|
[
"[email protected]"
] | |
3609cbd86fe366108bed83305f57d5ac02c3ce24
|
a2dc75a80398dee58c49fa00759ac99cfefeea36
|
/bluebottle/bb_projects/migrations/0018_auto_20210302_1417.py
|
69d49b4e0234875309c1a920a6cf0af3e76ba9e8
|
[
"BSD-2-Clause"
] |
permissive
|
onepercentclub/bluebottle
|
e38b0df2218772adf9febb8c6e25a2937889acc0
|
2b5f3562584137c8c9f5392265db1ab8ee8acf75
|
refs/heads/master
| 2023-08-29T14:01:50.565314 | 2023-08-24T11:18:58 | 2023-08-24T11:18:58 | 13,149,527 | 15 | 9 |
BSD-3-Clause
| 2023-09-13T10:46:20 | 2013-09-27T12:09:13 |
Python
|
UTF-8
|
Python
| false | false | 956 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-03-02 13:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bb_projects', '0017_auto_20210302_1417'),
('projects', '0095_auto_20210302_1417'),
('suggestions', '0005_auto_20210302_1417'),
('initiatives', '0030_auto_20210302_1405'),
('members', '0041_auto_20210302_1416'),
]
state_operations = [
migrations.DeleteModel(
name='ProjectTheme',
),
migrations.DeleteModel(
name='ProjectThemeTranslation',
),
]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=state_operations
),
migrations.DeleteModel(
name='ProjectPhase',
),
migrations.DeleteModel(
name='ProjectPhaseTranslation',
),
]
|
[
"[email protected]"
] | |
9b73114f7ea4cb451dfbd939500b3c97b30e2d8a
|
673440c09033912157d1c3767d5308f95755e76a
|
/ManachersAlgo.py
|
34e2ae34f01f3af98fb2e6b72aa5e397af5e4c02
|
[] |
no_license
|
jagadeshwarrao/programming
|
414193b1c538e37684378233d0532bd786d63b32
|
1b343251a8ad6a81e307d31b2025b11e0b28a707
|
refs/heads/master
| 2023-02-02T19:26:21.187561 | 2020-12-21T18:21:00 | 2020-12-21T18:21:00 | 274,644,612 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,482 |
py
|
def findLongestPalindromicString(text):
N = len(text)
if N == 0:
return
N = 2*N+1
L = [0] * N
L[0] = 0
L[1] = 1
C = 1
R = 2
i = 0
iMirror = 0
maxLPSLength = 0
maxLPSCenterPosition = 0
start = -1
end = -1
diff = -1
for i in xrange(2,N):
iMirror = 2*C-i
L[i] = 0
diff = R - i
if diff > 0:
L[i] = min(L[iMirror], diff)
try:
while ((i+L[i]) < N and (i-L[i]) > 0) and \
(((i+L[i]+1) % 2 == 0) or \
(text[(i+L[i]+1)/2] == text[(i-L[i]-1)/2])):
L[i]+=1
except Exception as e:
pass
if L[i] > maxLPSLength:
maxLPSLength = L[i]
maxLPSCenterPosition = i
if i + L[i] > R:
C = i
R = i + L[i]
start = (maxLPSCenterPosition - maxLPSLength) / 2
end = start + maxLPSLength - 1
print "LPS of string is " + text + " : ",
print text[start:end+1],
print "\n",
text1 = "babcbabcbaccba"
findLongestPalindromicString(text1)
text2 = "abaaba"
findLongestPalindromicString(text2)
text3 = "abababa"
findLongestPalindromicString(text3)
text4 = "abcbabcbabcba"
findLongestPalindromicString(text4)
text5 = "forgeeksskeegfor"
findLongestPalindromicString(text5)
text6 = "caba"
findLongestPalindromicString(text6)
text7 = "abacdfgdcaba"
findLongestPalindromicString(text7)
text8 = "abacdfgdcabba"
findLongestPalindromicString(text8)
text9 = "abacdedcaba"
findLongestPalindromicString(text9)
|
[
"[email protected]"
] | |
a40784738ed092668081456e1b724bb29a5780e8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2790/60589/243105.py
|
230152d093784ddcfff077a0a0b37bbdb892f405
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 350 |
py
|
nm=input().split(' ')
n=int(nm[0])
m=int(nm[1])
a=list(map(int,input().split(' ')))
b=list(map(int,input().split(' ')))
ans=[]
a.sort()
for e in b:
has=False
for i in range(n):
if a[i]>e:
has=True
ans.append(i)
break
if not has:
ans.append(n)
ans=list(map(str,ans))
print(' '.join(ans))
|
[
"[email protected]"
] | |
6677355c1c7383d94b434226fae40b8cf76ba2d0
|
bdf86d69efc1c5b21950c316ddd078ad8a2f2ec0
|
/venv/Lib/site-packages/twisted/plugins/twisted_core.py
|
a66ad7f0104dc02e960fa9fecfcfe59830bb8d40
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
DuaNoDo/PythonProject
|
543e153553c58e7174031b910fd6451399afcc81
|
2c5c8aa89dda4dec2ff4ca7171189788bf8b5f2c
|
refs/heads/master
| 2020-05-07T22:22:29.878944 | 2019-06-14T07:44:35 | 2019-06-14T07:44:35 | 180,941,166 | 1 | 1 | null | 2019-06-04T06:27:29 | 2019-04-12T06:05:42 |
Python
|
UTF-8
|
Python
| false | false | 588 |
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import absolute_import, division
from twisted.internet.endpoints import (
_SystemdParser, _TCP6ServerParser, _StandardIOParser,
_TLSClientEndpointParser)
from twisted.protocols.haproxy._parser import (
HAProxyServerParser as _HAProxyServerParser
)
systemdEndpointParser = _SystemdParser()
tcp6ServerEndpointParser = _TCP6ServerParser()
stdioEndpointParser = _StandardIOParser()
tlsClientEndpointParser = _TLSClientEndpointParser()
_haProxyServerEndpointParser = _HAProxyServerParser()
|
[
"[email protected]"
] | |
d2f61390d6b2c4b81f9dcb27acbe7b81d9e4cc13
|
16734d189c2bafa9c66fdc989126b7d9aa95c478
|
/Python/flask/counter/server.py
|
1c6c4ef1a9e0e7f137e799fcf25543dac002609e
|
[] |
no_license
|
Ericksmith/CD-projects
|
3dddd3a3819341be7202f11603cf793a2067c140
|
3b06b6e289d241c2f1115178c693d304280c2502
|
refs/heads/master
| 2021-08-15T17:41:32.329647 | 2017-11-18T01:18:04 | 2017-11-18T01:18:04 | 104,279,162 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 534 |
py
|
from flask import Flask, session, render_template, request, redirect
app = Flask(__name__)
app.secret_key = "Dojo"
@app.route('/')
def index():
if session.get('counter') == None:
session['counter'] = 0
session['counter'] += 1
return render_template('index.html', counter = session['counter'])
@app.route('/doubleCount')
def doubleCount():
session['counter'] += 2
return redirect('/')
@app.route('/countReset')
def countReset():
session['counter'] = 0
return redirect('/')
app.run(debug=True)
|
[
"[email protected]"
] | |
c702a1355b9688ac31eb5f513f2d151be4f47134
|
f242b489b9d3db618cf04415d4a7d490bac36db0
|
/Archives_Homework/src/archivesziped.py
|
2b15451a84885e66b830d42976855d566e4d935e
|
[] |
no_license
|
LABETE/Python2_Homework
|
e33d92d4f8a1867a850430600ccc7baf7ebc6dad
|
b24207b74c7883c220efc28d315e386dedead41d
|
refs/heads/master
| 2016-08-12T19:04:05.304348 | 2015-05-27T04:05:18 | 2015-05-27T04:05:18 | 36,182,485 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
import zipfile
import os
import glob
def zippedfiles(zipfilename):
path = os.getcwd()
zip_file = os.path.join(path, os.path.basename(zipfilename)+".zip")
files_to_zip = [os.path.basename(fn) for fn in glob.glob(zipfilename+"\*") if os.path.isfile(fn)]
zf = zipfile.ZipFile(zip_file, "w", zipfile.ZIP_DEFLATED)
file_to_zip = os.path.split(zipfilename)
file_to_zip = file_to_zip[-1]
for file in files_to_zip:
zf.write(os.path.join(path,file),os.path.join(file_to_zip,file))
list_ziped_files = zf.namelist()
zf.close()
sorted_ziped_files = []
for file in list_ziped_files:
sorted_ziped_files.append(file.replace("/","\\"))
return sorted_ziped_files
|
[
"[email protected]"
] | |
0a79edc64c01026d73147c2ba199040dde418acb
|
0d75e69be45600c5ef5f700e409e8522b9678a02
|
/IWDjangoAssignment1/settings.py
|
fbefbb96d64fcd6e7f9d12d0300504134dbaecd7
|
[] |
no_license
|
sdrsnadkry/IWDjangoAssignment1
|
28d4d6c264aac250e66a7be568fee29f1700464b
|
6eb533d8bbdae68a6952113511626405e718cac6
|
refs/heads/master
| 2022-11-29T07:53:37.374821 | 2020-07-18T03:27:52 | 2020-07-18T03:27:52 | 280,572,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,138 |
py
|
"""
Django settings for IWDjangoAssignment1 project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%g^opnnuo)*09sbtnne1)v9%b%r&k$166ox+no@$%eeshu42ho'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'IWDjangoAssignment1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'IWDjangoAssignment1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
5bc8073cfa36a998bb67cbfb0078c319d984d68b
|
f561a219c57bd75790d3155acac6f54299a88b08
|
/city/migrations/0010_auto_20170406_1957.py
|
c4a0d52767af5f7c0852ea55762bea83e23cf8ea
|
[] |
no_license
|
ujjwalagrawal17/OfferCartServer
|
1e81cf2dc17f19fa896062c2a084e6b232a8929e
|
b3cd1c5f8eecc167b6f4baebed3c4471140d905f
|
refs/heads/master
| 2020-12-30T15:31:04.380084 | 2017-05-24T18:26:20 | 2017-05-24T18:26:20 | 91,155,405 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-06 19:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('city', '0009_auto_20170406_1951'),
]
operations = [
migrations.AlterField(
model_name='citydata',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"[email protected]"
] | |
00525edd9f91bf0763fa8d35db247a55724a0f90
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/nist_data/atomic/duration/schema_instance/nistschema_sv_iv_atomic_duration_enumeration_2_xsd/__init__.py
|
aa2a5364c28c8263b4cba85ac2516304e22deade
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 381 |
py
|
from output.models.nist_data.atomic.duration.schema_instance.nistschema_sv_iv_atomic_duration_enumeration_2_xsd.nistschema_sv_iv_atomic_duration_enumeration_2 import (
NistschemaSvIvAtomicDurationEnumeration2,
NistschemaSvIvAtomicDurationEnumeration2Type,
)
__all__ = [
"NistschemaSvIvAtomicDurationEnumeration2",
"NistschemaSvIvAtomicDurationEnumeration2Type",
]
|
[
"[email protected]"
] | |
04c9978ad6a95cfed263e81ffc0cdeaba8a93b6c
|
ab460d3c0c3cbc4bd45542caea46fed8b1ee8c26
|
/dprs/common/sftp/PySFTPAuthException.py
|
a6d9b4cf620bb2d34ac77e41957792eefe8c126a
|
[
"Unlicense"
] |
permissive
|
sone777/automl-dprs
|
8c7f977402f6819565c45acd1cb27d8d53c40144
|
63572d1877079d8390b0e4a3153edf470056acf0
|
refs/heads/main
| 2023-09-03T21:54:43.440111 | 2021-11-02T14:44:35 | 2021-11-02T14:44:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
# -*- coding: utf-8 -*-
# Author : Jin Kim
# e-mail : [email protected]
# Powered by Seculayer © 2020 AI Service Model Team, R&D Center.
class PySFTPAuthException(Exception):
def __str__(self):
return "[ERROR-C0001] Authentication failed. check username and password!"
|
[
"[email protected]"
] | |
5406a0bd1a39311a7a0f09d7800aa9d20636919f
|
c631e9756210bab774afda2b228853cb93ae28fe
|
/src/test/test_trainer_attention.py
|
5e4bbec6ab8ba0a3a905e64b3e3157bbcaafa0c8
|
[] |
no_license
|
AIRob/pytorch-chat-bot
|
9a9af2078ef4ee6b5ce5a10a75977fb0b5adfe6a
|
1b604f9fecee70e519a930525afaa83facbfaf68
|
refs/heads/master
| 2020-03-27T10:00:35.117537 | 2017-12-09T01:38:40 | 2017-12-09T01:38:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,541 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from unittest import TestCase
from data.data_loader_attention import DataLoaderAttention
from models.encoder import Encoder
from models.decoder import Decoder
from models.trainer import Trainer
class TestTrainerAttention(TestCase):
def test_train_method(self):
file_name = 'test/test_data/attention_test.txt'
fine_tune_model_name = '../models/glove_model_40.pth'
self.test_data_loader_attention = DataLoaderAttention(file_name=file_name)
self.test_data_loader_attention.load_data()
source2index, index2source, target2index, index2target, train_data = \
self.test_data_loader_attention.load_data()
EMBEDDING_SIZE = 50
HIDDEN_SIZE = 32
encoder = Encoder(len(source2index), EMBEDDING_SIZE, HIDDEN_SIZE, 3, True)
decoder = Decoder(len(target2index), EMBEDDING_SIZE, HIDDEN_SIZE*2)
self.trainer = Trainer(
fine_tune_model=fine_tune_model_name
)
self.trainer.train_attention(train_data=train_data,
source2index=source2index,
target2index=target2index,
index2source=index2source,
index2target=index2target,
encoder_model=encoder,
decoder_model=decoder,
)
|
[
"[email protected]"
] | |
8403194c971606033bb11b869b9d4c323b5903ff
|
2e00546708761532e0081dc9be928b58307c5941
|
/setup.py
|
6f30596ede067a7daf4e98a7a4a82ac3164c7708
|
[
"BSD-3-Clause"
] |
permissive
|
gijs/bulbs
|
5f16b9d748face55f514f73c849745af91a8bd97
|
650e03d1ee635d0d8f40557f4697b3a85b88cdff
|
refs/heads/master
| 2021-01-18T06:23:04.496132 | 2011-07-15T15:00:49 | 2011-07-15T15:00:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,726 |
py
|
"""
Bulbs
-----
Bulbs is a Python persistence framework for graph databases that
connects to Rexster.
"""
from setuptools import Command, setup
class run_audit(Command):
"""Audits source code using PyFlakes for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit source code with PyFlakes"
user_options = []
def initialize_options(self):
all = None
def finalize_options(self):
pass
def run(self):
import os, sys
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print "Audit requires PyFlakes installed in your system."""
sys.exit(-1)
dirs = ['bulbs', 'tests']
# Add example directories
#for dir in ['blog',]:
# dirs.append(os.path.join('examples', dir))
# TODO: Add test subdirectories
warns = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.py') and filename != '__init__.py':
warns += flakes.checkPath(os.path.join(dir, filename))
if warns > 0:
print ("Audit finished with total %d warnings." % warns)
else:
print ("No problems found in sourcecode.")
def run_tests():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'tests'))
from bulbs_tests import suite
return suite()
setup (
name = 'Bulbs',
version = '0.2-dev',
url = 'http://bulbflow.com',
license = 'BSD',
author = 'James Thornton',
author_email = '[email protected]',
description = 'A Python persistence framework for graph databases that '
'connects to Rexster.',
long_description = __doc__,
keywords = "graph database DB persistence framework rexster gremlin",
packages = ['bulbs'],
zip_safe=False,
platforms='any',
install_requires=[
'httplib2>=0.7.1',
'simplejson>=2.1.6',
],
classifiers = [
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Distributed Computing",
],
cmdclass={'audit': run_audit},
test_suite='__main__.run_tests'
)
|
[
"[email protected]"
] | |
2e6261677ddc3501e9d60c2a0868e8ae1938e26e
|
f33e2e9e10a7c8a5ecc9997f86548bad071ce33e
|
/alerta/app/exceptions.py
|
6c0b0caccf525a16c4431797256c413948898f77
|
[
"Apache-2.0"
] |
permissive
|
sasha-astiadi/alerta
|
01f1136adbfc26f79935c1c44e9ca3d49efd6f00
|
f9a33f50af562e5d0a470e1091e9d696d76558f4
|
refs/heads/master
| 2023-03-16T10:35:42.300274 | 2018-01-23T14:06:42 | 2018-01-23T14:06:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
class AlertaException(IOError):
pass
class RejectException(AlertaException):
"""The alert was rejected because the format did not meet the required policy."""
pass
class RateLimit(AlertaException):
"""Too many alerts have been received for a resource or from an origin."""
pass
class BlackoutPeriod(AlertaException):
"""Alert was not processed becauese it was sent during a blackout period."""
pass
|
[
"[email protected]"
] | |
b71941a91b5406892fc0962d46ddbf6b15406fb4
|
64d923ab490341af97c4e7f6d91bf0e6ccefdf4b
|
/tensorforce/core/networks/auto.py
|
6a445b051b06ae683e4435e6f34e5c608037ef5b
|
[
"Apache-2.0"
] |
permissive
|
tensorforce/tensorforce
|
38d458fedeeaa481adf083397829cea434d020cd
|
1bf4c3abb471062fb66f9fe52852437756fd527b
|
refs/heads/master
| 2023-08-17T17:35:34.578444 | 2023-08-14T20:14:08 | 2023-08-14T20:14:08 | 85,491,050 | 1,312 | 246 |
Apache-2.0
| 2023-08-14T20:14:10 | 2017-03-19T16:24:22 |
Python
|
UTF-8
|
Python
| false | false | 7,625 |
py
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from tensorforce import TensorforceError
from tensorforce.core.networks import LayeredNetwork
class AutoNetwork(LayeredNetwork):
"""
Network whose architecture is automatically configured based on input types and shapes,
offering high-level customization (specification key: `auto`).
Args:
size (int > 0): Layer size, before concatenation if multiple states
(<span style="color:#00C000"><b>default</b></span>: 64).
depth (int > 0): Number of layers per state, before concatenation if multiple states
(<span style="color:#00C000"><b>default</b></span>: 2).
final_size (int > 0): Layer size after concatenation if multiple states
(<span style="color:#00C000"><b>default</b></span>: layer size).
final_depth (int > 0): Number of layers after concatenation if multiple states
(<span style="color:#00C000"><b>default</b></span>: 1).
rnn (false | [parameter](../modules/parameters.html), int >= 0): Whether to add an LSTM cell
with internal state as last layer, and if so, horizon of the LSTM for truncated
backpropagation through time
(<span style="color:#00C000"><b>default</b></span>: false).
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
inputs_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
outputs (iter[string]): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, size=64, depth=2, final_size=None, final_depth=1, rnn=False, device=None,
l2_regularization=None, name=None, inputs_spec=None, outputs=None,
# Deprecated
internal_rnn=None
):
if internal_rnn is not None:
raise TensorforceError.deprecated(
name='AutoNetwork', argument='internal_rnn', replacement='rnn'
)
if len(inputs_spec) == 1:
if final_size is not None:
raise TensorforceError.invalid(
name='AutoNetwork', argument='final_size', condition='input size = 1'
)
if final_depth is not None and final_depth != 1:
raise TensorforceError.invalid(
name='AutoNetwork', argument='final_depth', condition='input size = 1'
)
if len(inputs_spec) > 8:
logging.warning("Large number of state components {} which may cause poor performance, "
"consider merging components where possible.".format(len(inputs_spec)))
if outputs is not None:
raise TensorforceError.invalid(
name='policy', argument='single_output', condition='AutoNetwork'
)
if final_size is None:
final_size = size
if final_depth is None:
final_depth = 0
layers = list()
for input_name, spec in inputs_spec.items():
if len(inputs_spec) == 1:
state_layers = layers
else:
state_layers = list()
layers.append(state_layers)
# Retrieve input state
if input_name is None:
prefix = ''
else:
prefix = input_name + '_'
state_layers.append(dict(
type='retrieve', name=(prefix + 'retrieve'), tensors=(input_name,)
))
# Embed bool and int states
requires_embedding = (spec.type == 'bool' or spec.type == 'int')
if spec.type == 'int' and spec.num_values is None:
if input_name is None:
raise TensorforceError.required(
name='state', argument='num_values', condition='state type is int'
)
else:
raise TensorforceError.required(
name=(input_name + ' state'), argument='num_values',
condition='state type is int'
)
if requires_embedding:
state_layers.append(dict(
type='embedding', name=(prefix + 'embedding'), size=size
))
# Shape-specific layer type
if spec.rank == 1 - requires_embedding:
layer = 'dense'
elif spec.rank == 2 - requires_embedding:
layer = 'conv1d'
elif spec.rank == 3 - requires_embedding:
layer = 'conv2d'
elif spec.rank == 0:
state_layers.append(dict(type='flatten', name=(prefix + 'flatten')))
layer = 'dense'
else:
raise TensorforceError.value(
name='AutoNetwork', argument='input rank', value=spec.rank, hint='>= 3'
)
# Repeat layer according to depth (one less if embedded)
for n in range(depth - requires_embedding):
state_layers.append(dict(
type=layer, name='{}{}{}'.format(prefix, layer, n), size=size
))
# Max pool if rank greater than one
if spec.rank > 1 - requires_embedding:
state_layers.append(dict(
type='pooling', name=(prefix + 'pooling'), reduction='max'
))
# Register state-specific embedding
if input_name is not None:
state_layers.append(dict(
type='register', name=(prefix + 'register'), tensor=(input_name + '-embedding')
))
# Final combined layers
if len(inputs_spec) == 1:
final_layers = layers
else:
final_layers = list()
layers.append(final_layers)
# Retrieve state-specific embeddings
final_layers.append(dict(
type='retrieve', name='retrieve',
tensors=tuple(input_name + '-embedding' for input_name in inputs_spec),
aggregation='concat'
))
# Repeat layer according to depth
for n in range(final_depth):
final_layers.append(dict(type='dense', name=('dense' + str(n)), size=final_size))
# Rnn
if rnn is not None and rnn is not False:
final_layers.append(dict(type='lstm', name='lstm', size=final_size, horizon=rnn))
super().__init__(
layers=layers, device=device, l2_regularization=l2_regularization, name=name,
inputs_spec=inputs_spec, outputs=outputs
)
|
[
"[email protected]"
] | |
4aadfcd20a040ed6e5cbe84affd38b0320fa6928
|
e12385c85e41d98bc3104f3e4dde22025a0b6365
|
/m5stack-u105/examples/test_saw.py
|
f4472bdcdd32643ae248bca4c8a0e8d2eb67553a
|
[] |
no_license
|
mchobby/esp8266-upy
|
6ee046856ec03c900ebde594967dd50c5f0a8e21
|
75184da49e8578315a26bc42d9c3816ae5d5afe8
|
refs/heads/master
| 2023-08-04T15:11:03.031121 | 2023-07-27T15:43:08 | 2023-07-27T15:43:08 | 72,998,023 | 47 | 30 | null | 2021-06-20T16:12:59 | 2016-11-06T15:00:57 |
Python
|
UTF-8
|
Python
| false | false | 515 |
py
|
"""
Test the MicroPython driver for M5Stack U105, DDS unit (AD9833), I2C grove.
Set SAWTOOTH signal output (this have fixed frequency)
* Author(s):
30 may 2021: Meurisse D. (shop.mchobby.be) - Initial Writing
"""
from machine import I2C
from mdds import *
from time import sleep
# Pico - I2C(0) - sda=GP8, scl=GP9
i2c = I2C(0)
# M5Stack core
# i2c = I2C( sda=Pin(21), scl=Pin(22) )
dds = DDS(i2c)
# Generates the SAW TOOTH signal at 55.9Hz (fixed frequency)
dds.quick_out( SAWTOOTH_MODE, freq=1, phase=0 )
|
[
"[email protected]"
] | |
d04e9de9a1c3e8805f81d233500ea425bbc2a27d
|
55646e56d6bb31ae0913eb71879f49efdfaf904f
|
/scribbli/profiles/constants.py
|
1dc3127b96a3d1fc9dcffb567188491d639a6e3d
|
[] |
no_license
|
jacobbridges/scribbli-mvp
|
2d8851aba018b54431af0eb8cb030d02d35f173f
|
c24f2f1a2a19480a6b5f69ffbcccf0269d156140
|
refs/heads/master
| 2023-02-22T11:37:12.239845 | 2021-06-17T04:10:30 | 2021-06-17T04:10:30 | 156,637,826 | 0 | 0 | null | 2023-02-15T20:18:03 | 2018-11-08T02:20:53 |
Python
|
UTF-8
|
Python
| false | false | 328 |
py
|
class RoleChoices(object):
Guest = 0
User = 1
Moderator = 2
Admin = 3
@staticmethod
def as_choices():
return (
(RoleChoices.Guest, "Guest"),
(RoleChoices.User, "User"),
(RoleChoices.Moderator, "Moderator"),
(RoleChoices.Admin, "Admin"),
)
|
[
"[email protected]"
] | |
2bfe0ce34f0883cb0a19b9e1ddc4a134e88153f8
|
bbea9b1f64284c9ca95d9f72f35e06aa39522c67
|
/Scripts/plot_MS-FIGURE_4b_v2.py
|
179017277abe54d6e9bf27d6a766bc9dfc223aaa
|
[
"MIT"
] |
permissive
|
zmlabe/ModelBiasesANN
|
1e70c150bd8897fa5fb822daf8ffad0ee581c5f1
|
cece4a4b01ca1950f73c4d23fb379458778c221e
|
refs/heads/main
| 2023-05-23T06:05:23.826345 | 2022-07-22T18:36:27 | 2022-07-22T18:36:27 | 339,145,668 | 7 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,625 |
py
|
"""
Script to plot figure 4b
Author : Zachary M. Labe
Date : 12 July 2021
Version : 2
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as c
import numpy as np
import palettable.cubehelix as cm
import palettable.scientific.sequential as sss
import palettable.cartocolors.qualitative as cc
import cmocean as cmocean
import cmasher as cmr
import calc_Utilities as UT
import scipy.stats as sts
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
### Set parameters
directorydata = '/Users/zlabe/Documents/Research/ModelComparison/Data/MSFigures_v2/'
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/MSFigures_v2/'
variablesall = ['T2M']
yearsall = np.arange(1950,2019+1,1)
allDataLabels = ['CanESM2','MPI','CSIRO-MK3.6','EC-EARTH','GFDL-CM3','GFDL-ESM2M','LENS','MM-Mean']
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p"]
### Read in frequency data
globef = np.load(directorydata + 'CountingIterations_%s.npz' % ('SMILEGlobe'))
arcticf = np.load(directorydata + 'CountingIterations_%s.npz' % ('LowerArctic'))
gmeanff = globef['mmean']
ggfdlff = globef['gfdlcm']
ameanff = arcticf['mmean']
agfdlff = arcticf['gfdlcm']
###############################################################################
###############################################################################
###############################################################################
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Begin plot
fig = plt.figure(figsize=(8,6))
ax = plt.subplot(211)
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')
ax.yaxis.grid(zorder=1,color='darkgrey',alpha=0.35,clip_on=False,linewidth=0.5)
x=np.arange(1950,2019+1,1)
plt.plot(yearsall,gmeanff,linewidth=5,color='k',alpha=1,zorder=3,clip_on=False)
plt.yticks(np.arange(0,101,10),map(str,np.round(np.arange(0,101,10),2)),size=9)
plt.xticks(np.arange(1950,2030+1,10),map(str,np.arange(1950,2030+1,10)),size=9)
plt.xlim([1950,2020])
plt.ylim([0,100])
plt.text(1949,104,r'\textbf{[a]}',color='dimgrey',
fontsize=7,ha='center')
plt.text(2022,50,r'\textbf{GLOBAL}',color='dimgrey',fontsize=25,rotation=270,
ha='center',va='center')
plt.ylabel(r'\textbf{Frequency of Label}',color='k',fontsize=10)
###############################################################################
ax = plt.subplot(212)
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')
ax.yaxis.grid(zorder=1,color='darkgrey',alpha=0.35,clip_on=False,linewidth=0.5)
x=np.arange(1950,2019+1,1)
plt.plot(yearsall,ameanff,linewidth=5,color='k',alpha=1,zorder=3,clip_on=False,label=r'\textbf{MM-Mean}')
plt.plot(yearsall,agfdlff,linewidth=4,color=plt.cm.CMRmap(0.6),alpha=1,zorder=3,clip_on=False,label=r'\textbf{GFDL-CM3}',
linestyle='--',dashes=(1,0.3))
plt.yticks(np.arange(0,101,10),map(str,np.round(np.arange(0,101,10),2)),size=9)
plt.xticks(np.arange(1950,2030+1,10),map(str,np.arange(1950,2030+1,10)),size=9)
plt.xlim([1950,2020])
plt.ylim([0,100])
plt.text(1949,104,r'\textbf{[b]}',color='dimgrey',
fontsize=7,ha='center')
leg = plt.legend(shadow=False,fontsize=11,loc='upper center',
bbox_to_anchor=(0.5,1.22),fancybox=True,ncol=4,frameon=False,
handlelength=5,handletextpad=1)
plt.ylabel(r'\textbf{Frequency of Label}',color='k',fontsize=10)
plt.text(2022,50,r'\textbf{ARCTIC}',color='dimgrey',fontsize=25,rotation=270,
ha='center',va='center')
plt.tight_layout()
plt.subplots_adjust(hspace=0.4)
plt.savefig(directoryfigure + 'MS-Figure_4b_v2_Poster.png',dpi=1000)
|
[
"[email protected]"
] | |
6db9b78246aef370efc8ef609a33b1dadab124a8
|
53e58c213232e02250e64f48b97403ca86cd02f9
|
/18/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM4500_R_0-7.py
|
fc7eba74e67b0348603262470fab519845902f68
|
[] |
no_license
|
xdlyu/fullRunII_ntuple_102X
|
32e79c3bbc704cfaa00c67ab5124d40627fdacaf
|
d420b83eb9626a8ff1c79af5d34779cb805d57d8
|
refs/heads/master
| 2020-12-23T15:39:35.938678 | 2020-05-01T14:41:38 | 2020-05-01T14:41:38 | 237,192,426 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,160 |
py
|
from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4500_R0-7_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V19_MC_L1FastJet_AK4PFchs.txt','Autumn18_V19_MC_L2Relative_AK4PFchs.txt','Autumn18_V19_MC_L3Absolute_AK4PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFchs.txt','Autumn18_V19_MC_L2Relative_AK8PFchs.txt','Autumn18_V19_MC_L3Absolute_AK8PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK8PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK8PFPuppi.txt','Autumn18_V19_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis_sig.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M4500-R0-7_TuneCP5_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =20
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4500_R0-7_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
|
[
"[email protected]"
] | |
87934c23053f09c259a1ce2e6270ea821fc90da6
|
520baeba0e86b0bab3c5590f40b868ca4306dc7e
|
/hazelcast/protocol/codec/count_down_latch_get_count_codec.py
|
345de04de3a44161676bfb0d96b360bac2e606ad
|
[
"Apache-2.0"
] |
permissive
|
mustafaiman/hazelcast-python-client
|
69f27367162045bbfa4e66e7adadcfd254dfab21
|
85f29f975c91520075d0461327e38ab93c2e78c2
|
refs/heads/master
| 2021-01-18T04:23:10.740371 | 2015-12-11T14:26:06 | 2015-12-11T14:26:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,038 |
py
|
from hazelcast.serialization.data import *
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.protocol.codec.count_down_latch_message_type import *
REQUEST_TYPE = COUNTDOWNLATCH_GETCOUNT
RESPONSE_TYPE = 102
RETRYABLE = True
def calculate_size(name):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
return data_size
def encode_request(name):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.update_frame_length()
return client_message
def decode_response(client_message):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_int()
return parameters
|
[
"[email protected]"
] | |
fd8a249b1f44b14a3c11896e5a12e1c86a1988e9
|
372a0eb8d3be3d40b9dfb5cf45a7df2149d2dd0d
|
/charles/Week 07/lab08/lab08.py
|
198fffad72e36dfcdfe4b7505ec51e6fe007c177
|
[] |
no_license
|
charlesfrye/cs61a-summer2015
|
5d14b679e5bea53cfa26c2a6a86720e8e77c322c
|
1f5c0fbf5dce5d1322285595ca964493d9adbdfe
|
refs/heads/master
| 2016-08-07T06:06:09.335913 | 2015-08-21T00:33:25 | 2015-08-21T00:33:25 | 38,509,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,460 |
py
|
## Linked Lists and Sets ##
# Linked Lists
class Link:
"""A linked list.
>>> s = Link(1, Link(2, Link(3, Link(4))))
>>> len(s)
4
>>> s[2]
3
>>> s
Link(1, Link(2, Link(3, Link(4))))
"""
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
def __getitem__(self, i):
if i == 0:
return self.first
else:
return self.rest[i-1]
def __len__(self):
return 1 + len(self.rest)
def __repr__(self):
if self.rest is not Link.empty:
rest_str = ', ' + repr(self.rest)
else:
rest_str = ''
return 'Link({0}{1})'.format(repr(self.first), rest_str)
def slice_link(link, start, end):
"""Slices a Link from start to end (as with a normal Python list).
>>> link = Link(3, Link(1, Link(4, Link(1, Link(5, Link(9))))))
>>> slice_link(link, 1, 4)
Link(1, Link(4, Link(1)))
"""
if start == end:
return Link.empty
return Link(link[start],slice_link(link.rest,0,end-1-start))
# Sets
def union(s1, s2):
"""Returns the union of two sets.
>>> r = {0, 6, 6}
>>> s = {1, 2, 3, 4}
>>> t = union(s, {1, 6})
>>> t
{1, 2, 3, 4, 6}
>>> union(r, t)
{0, 1, 2, 3, 4, 6}
"""
union_set = set()
for element in s1:
union_set.add(element)
for element in s2:
union_set.add(element)
return union_set
def intersection(s1, s2):
"""Returns the intersection of two sets.
>>> r = {0, 1, 4, 0}
>>> s = {1, 2, 3, 4}
>>> t = intersection(s, {3, 4, 2})
>>> t
{2, 3, 4}
>>> intersection(r, t)
{4}
"""
intersect = set()
for element in s1:
if element in s2:
intersect.add(element)
return intersect
def extra_elem(a,b):
"""B contains every element in A, and has one additional member, find
the additional member.
>>> extra_elem(['dog', 'cat', 'monkey'], ['dog', 'cat', 'monkey', 'giraffe'])
'giraffe'
>>> extra_elem([1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 6])
6
"""
return list(set(b)-set(a))[0]
def find_duplicates(lst):
"""Returns True if lst has any duplicates and False if it does not.
>>> find_duplicates([1, 2, 3, 4, 5])
False
>>> find_duplicates([1, 2, 3, 4, 2])
True
"""
return len(set(lst)) != len(lst)
|
[
"[email protected]"
] | |
95303882335933bf48710ea4c6a92ec77ab6fa8b
|
71748e7379548d75fcf6713f0e6d66d6db1c2bbd
|
/4AL16IS051_SHETTY _TANVI/Jaishma Ma'am/Challenge 1/p1.py
|
94f3b31275c157f72cf5617f380da3fcadaba83b
|
[] |
no_license
|
alvas-education-foundation/ISE_4th_Year_Coding_challenge
|
fcf78c755cc924bea7e905e67c1e30385cf5af0b
|
96cfc92d679576dab15ef7d1cb6773f2082abfb2
|
refs/heads/master
| 2022-11-17T09:19:23.851817 | 2020-07-21T09:59:40 | 2020-07-21T09:59:40 | 265,195,858 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
@@ -0,0 +1,26 @@
1)
x = input("")
n = int(x)
if n % 2 == 1:
print("Weird")
elif n % 2 == 0 and 2 <= n <= 5:
print("Not Weird")
elif n % 2 == 0 and 6 <= n <= 20:
print("Weird")
else:
print("Not Weird")
|
[
"[email protected]"
] | |
6b17dc10db7ef000a03c12afdf0d7cd7b9821e29
|
4904acd900496b4883c2f5b4aa6b45d1ef6654c0
|
/graphgallery/datasets/tu_dataset.py
|
a2979a79643bdb0f96d5d8b81ba2af4af7188b33
|
[
"MIT"
] |
permissive
|
blindSpoter01/GraphGallery
|
aee039edd759be9272d123463b0ad73a57e561c7
|
e41caeb32a07da95364f15b85cad527a67763255
|
refs/heads/master
| 2023-06-17T11:42:27.169751 | 2021-07-15T03:07:39 | 2021-07-15T03:07:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,887 |
py
|
import os
import glob
import requests
import os.path as osp
import numpy as np
import pickle as pkl
import pandas as pd
from urllib.error import URLError
from typing import Optional, List
from .in_memory_dataset import InMemoryDataset
from ..data.edge_graph import EdgeGraph
from ..data.io import makedirs, extractall, remove
_DATASET_URL = 'https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets'
_DATASET_CLEAN_URL = 'https://raw.githubusercontent.com/nd7141/graph_datasets/master/datasets'
class TUDataset(InMemoryDataset):
r"""A variety of graph kernel benchmark datasets, *.e.g.* "IMDB-BINARY",
"REDDIT-BINARY" or "PROTEINS", collected from the `TU Dortmund University
<https://chrsmrrs.github.io/datasets>`_.
In addition, this dataset wrapper provides `cleaned dataset versions
<https://github.com/nd7141/graph_datasets>`_ as motivated by the
`"Understanding Isomorphism Bias in Graph Data Sets"
<https://arxiv.org/abs/1910.12091>`_ paper, containing only non-isomorphic
graphs.
"""
def __init__(self,
name,
root=None,
*,
transform=None,
verbose=True,
url=None,
remove_download=True):
if name.endswith('_clean'):
name = name[:-6]
self._url = _DATASET_CLEAN_URL
else:
self._url = _DATASET_URL
super().__init__(name=name, root=root,
transform=transform,
verbose=verbose, url=url,
remove_download=remove_download)
@staticmethod
def available_datasets():
try:
return [
d[:-4] for d in pd.read_html(_DATASET_URL)
[0].Name[2:-1].values.tolist()
]
except URLError:
# No internet, don't panic
print('No connection. See {}'.format(_DATASET_URL))
return []
def _download(self):
req = requests.get(self.url)
if req.status_code == 404:
raise ValueError(
f"Unknown dataset {self.name}. See '{self.__class__.__name__}.available_datasets()'"
" for a list of available datasets.")
makedirs(self.download_dir)
with open(self.download_paths[0], 'wb') as f:
f.write(req.content)
extractall(self.download_paths, osp.split(self.download_dir)[0])
if self.remove_download:
remove(self.download_paths)
def _process(self):
folder = self.download_dir
prefix = self.name
files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))
names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]
edge_index = genfromtxt(osp.join(folder, prefix + '_A.txt'),
dtype=np.int64).T - 1
node_graph_label = genfromtxt(osp.join(folder, prefix + '_graph_indicator.txt'),
dtype=np.int64) - 1
edge_graph_label = node_graph_label[edge_index[0]]
node_attr = node_label = None
if 'node_attributes' in names:
node_attr = genfromtxt(osp.join(folder,
prefix + '_node_attributes.txt'),
dtype=np.float32)
if 'node_labels' in names:
node_label = genfromtxt(osp.join(folder,
prefix + '_node_labels.txt'),
dtype=np.int64)
node_label = node_label - node_label.min(0)
edge_attr = edge_label = None
if 'edge_attributes' in names:
edge_attr = genfromtxt(osp.join(folder,
prefix + '_edge_attributes.txt'),
dtype=np.float32)
if 'edge_labels' in names:
edge_label = genfromtxt(osp.join(folder,
prefix + '_edge_labels.txt'),
dtype=np.int64)
edge_label = edge_label - edge_label.min(0)
graph_attr = graph_label = None
if 'graph_attributes' in names: # Regression problem.
graph_attr = np.genfromtxt(osp.join(
folder, prefix + '_graph_attributes.txt'),
dtype=np.float32)
if 'graph_labels' in names: # Classification problem.
graph_label = np.genfromtxt(osp.join(folder,
prefix + '_graph_labels.txt'),
dtype=np.int64)
_, graph_label = np.unique(graph_label, return_inverse=True)
graph = EdgeGraph(edge_index,
edge_attr=edge_attr,
edge_label=edge_label,
edge_graph_label=edge_graph_label,
node_attr=node_attr,
node_label=node_label,
node_graph_label=node_graph_label,
graph_attr=graph_attr,
graph_label=graph_label)
cache = {'graph': graph}
with open(self.process_path, 'wb') as f:
pkl.dump(cache, f)
return cache
@property
def download_dir(self):
return osp.join(self.root, "TU", self.name)
def split_graphs(self,
train_size=None,
val_size=None,
test_size=None,
split_by=None,
random_state: Optional[int] = None):
raise NotImplementedError
@property
def url(self) -> str:
return '{}/{}.zip'.format(self._url, self.name)
@property
def process_filename(self):
return f'{self.name}.pkl'
@property
def raw_filenames(self) -> List[str]:
names = ['A', 'graph_indicator'] # and more
return ['{}_{}.txt'.format(self.name, name) for name in names]
@property
def download_paths(self):
return [osp.join(self.download_dir, self.name + '.zip')]
@property
def raw_paths(self) -> List[str]:
return [
osp.join(self.download_dir, raw_filename)
for raw_filename in self.raw_filenames
]
def genfromtxt(path, sep=',', start=0, end=None, dtype=None, device=None):
# with open(path, 'r') as f:
# src = f.read().split('\n')[:-1]
# src = [[float(x) for x in line.split(sep)[start:end]] for line in src]
# src = np.asarray(src, dtype=dtype).squeeze()
# # return src
return np.loadtxt(path, delimiter=sep).astype(dtype).squeeze()
|
[
"[email protected]"
] | |
c4acc48e7f4000ebbf4268909ad39fdf1dab8ec8
|
ae11eda73ad0a61f8f7f894314bd9aa40798b50a
|
/MyAnalysis/IsolationTools/python/muonDirectionalPFIsolations_cff.py
|
8d48cf05750f63c872425cdbbf934b676a67f71b
|
[] |
no_license
|
hbakhshi/NTupleProducer
|
087a7286f7352e9f6c517d257d7f195280db058d
|
eec377339008d2139128059d7127f9a2184c080c
|
refs/heads/master
| 2021-01-22T14:32:44.891691 | 2014-06-10T12:48:12 | 2014-06-10T12:48:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,644 |
py
|
import FWCore.ParameterSet.Config as cms
import MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi
### DR=0.1 cone
# Charged Hadron isolation
muonDirPFIsoChHad01 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad01.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad01.deltaR = 0.1
muonDirPFIsoChHad01.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad01 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad01.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad01.deltaR = 0.1
muonDirPFIsoNHad01.directional = True
# Photon isolation
muonDirPFIsoPhoton01 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton01.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton01.deltaR = 0.1
muonDirPFIsoPhoton01.directional = True
### DR=0.2 cone
# Charged Hadron isolation
muonDirPFIsoChHad02 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad02.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad02.deltaR = 0.2
muonDirPFIsoChHad02.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad02 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad02.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad02.deltaR = 0.2
muonDirPFIsoNHad02.directional = True
# Photon isolation
muonDirPFIsoPhoton02 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton02.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton02.deltaR = 0.2
muonDirPFIsoPhoton02.directional = True
### DR=0.3 cone
# Charged Hadron isolation
muonDirPFIsoChHad03 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad03.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad03.deltaR = 0.3
muonDirPFIsoChHad03.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad03 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad03.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad03.deltaR = 0.3
muonDirPFIsoNHad03.directional = True
# Photon isolation
muonDirPFIsoPhoton03 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton03.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton03.deltaR = 0.3
muonDirPFIsoPhoton03.directional = True
### DR=0.4 cone
# Charged Hadron isolation
muonDirPFIsoChHad04 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad04.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad04.deltaR = 0.4
muonDirPFIsoChHad04.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad04 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad04.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad04.deltaR = 0.4
muonDirPFIsoNHad04.directional = True
# Photon isolation
muonDirPFIsoPhoton04 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton04.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton04.deltaR = 0.4
muonDirPFIsoPhoton04.directional = True
### DR=0.5 cone
# Charged Hadron isolation
muonDirPFIsoChHad05 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad05.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad05.deltaR = 0.5
muonDirPFIsoChHad05.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad05 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad05.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad05.deltaR = 0.5
muonDirPFIsoNHad05.directional = True
# Photon isolation
muonDirPFIsoPhoton05 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton05.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton05.deltaR = 0.5
muonDirPFIsoPhoton05.directional = True
### DR=0.6 cone
# Charged Hadron isolation
muonDirPFIsoChHad06 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad06.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad06.deltaR = 0.6
muonDirPFIsoChHad06.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad06 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad06.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad06.deltaR = 0.6
muonDirPFIsoNHad06.directional = True
# Photon isolation
muonDirPFIsoPhoton06 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton06.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton06.deltaR = 0.6
muonDirPFIsoPhoton06.directional = True
### DR=0.7 cone
# Charged Hadron isolation
muonDirPFIsoChHad07 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad07.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad07.deltaR = 0.7
muonDirPFIsoChHad07.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad07 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad07.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad07.deltaR = 0.7
muonDirPFIsoNHad07.directional = True
# Photon isolation
muonDirPFIsoPhoton07 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton07.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton07.deltaR = 0.7
muonDirPFIsoPhoton07.directional = True
|
[
"[email protected]"
] | |
a9625efecd45a7fb3e4a24d22d5c9bdcebcb29c7
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/homeassistant/components/homekit/type_triggers.py
|
b239d67877c7d22f4ee6b162d2d1ac3e503fea4d
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 |
Apache-2.0
| 2023-02-21T23:40:57 | 2019-10-31T04:33:09 |
Python
|
UTF-8
|
Python
| false | false | 4,485 |
py
|
"""Class to hold all sensor accessories."""
from __future__ import annotations
import logging
from typing import Any
from pyhap.const import CATEGORY_SENSOR
from homeassistant.core import CALLBACK_TYPE, Context
from homeassistant.helpers import entity_registry
from homeassistant.helpers.trigger import async_initialize_triggers
from .accessories import TYPES, HomeAccessory
from .aidmanager import get_system_unique_id
from .const import (
CHAR_NAME,
CHAR_PROGRAMMABLE_SWITCH_EVENT,
CHAR_SERVICE_LABEL_INDEX,
CHAR_SERVICE_LABEL_NAMESPACE,
SERV_SERVICE_LABEL,
SERV_STATELESS_PROGRAMMABLE_SWITCH,
)
from .util import cleanup_name_for_homekit
_LOGGER = logging.getLogger(__name__)
@TYPES.register("DeviceTriggerAccessory")
class DeviceTriggerAccessory(HomeAccessory):
"""Generate a Programmable switch."""
def __init__(
self,
*args: Any,
device_triggers: list[dict[str, Any]] | None = None,
device_id: str | None = None,
) -> None:
"""Initialize a Programmable switch accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR, device_id=device_id)
assert device_triggers is not None
self._device_triggers = device_triggers
self._remove_triggers: CALLBACK_TYPE | None = None
self.triggers = []
assert device_triggers is not None
ent_reg = entity_registry.async_get(self.hass)
for idx, trigger in enumerate(device_triggers):
type_: str = trigger["type"]
subtype: str | None = trigger.get("subtype")
unique_id = f'{type_}-{subtype or ""}'
if (entity_id := trigger.get("entity_id")) and (
entry := ent_reg.async_get(entity_id)
):
unique_id += f"-entity_unique_id:{get_system_unique_id(entry)}"
trigger_name_parts = []
if entity_id and (state := self.hass.states.get(entity_id)):
trigger_name_parts.append(state.name)
trigger_name_parts.append(type_.replace("_", " ").title())
if subtype:
trigger_name_parts.append(subtype.replace("_", " ").title())
trigger_name = cleanup_name_for_homekit(" ".join(trigger_name_parts))
serv_stateless_switch = self.add_preload_service(
SERV_STATELESS_PROGRAMMABLE_SWITCH,
[CHAR_NAME, CHAR_SERVICE_LABEL_INDEX],
unique_id=unique_id,
)
self.triggers.append(
serv_stateless_switch.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT,
value=0,
valid_values={"Trigger": 0},
)
)
serv_stateless_switch.configure_char(CHAR_NAME, value=trigger_name)
serv_stateless_switch.configure_char(
CHAR_SERVICE_LABEL_INDEX, value=idx + 1
)
serv_service_label = self.add_preload_service(
SERV_SERVICE_LABEL, unique_id=unique_id
)
serv_service_label.configure_char(CHAR_SERVICE_LABEL_NAMESPACE, value=1)
serv_stateless_switch.add_linked_service(serv_service_label)
async def async_trigger(
self,
run_variables: dict[str, Any],
context: Context | None = None,
skip_condition: bool = False,
) -> None:
"""Trigger button press.
This method is a coroutine.
"""
reason = ""
if "trigger" in run_variables and "description" in run_variables["trigger"]:
reason = f' by {run_variables["trigger"]["description"]}'
_LOGGER.debug("Button triggered%s - %s", reason, run_variables)
idx = int(run_variables["trigger"]["idx"])
self.triggers[idx].set_value(0)
# Attach the trigger using the helper in async run
# and detach it in async stop
async def run(self) -> None:
"""Handle accessory driver started event."""
self._remove_triggers = await async_initialize_triggers(
self.hass,
self._device_triggers,
self.async_trigger,
"homekit",
self.display_name,
_LOGGER.log,
)
async def stop(self) -> None:
"""Handle accessory driver stop event."""
if self._remove_triggers:
self._remove_triggers()
@property
def available(self) -> bool:
"""Return available."""
return True
|
[
"[email protected]"
] | |
5f3f39608a38d86ff22999affdb2aa8d25fb22ae
|
e3eead40e93fdf5186269536edefab4f08e9a5a2
|
/LeetCode/75-sort_colors.py
|
f9126b00d4c74a0e97e76d064217b730e50cc3d7
|
[] |
no_license
|
davll/practical-algorithms
|
bbc930b42363cae00ce39e8a686854c19131d334
|
0e35e4cc87bd41144b8e34302aafe776fec1b356
|
refs/heads/master
| 2021-08-22T13:12:34.555074 | 2020-03-28T08:56:13 | 2020-03-28T08:56:13 | 147,224,029 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 883 |
py
|
# https://leetcode.com/problems/sort-colors/
def sort_colours_v1(nums):
count = [0] * 3
for x in nums:
count[x] += 1
print("count = " + str(count))
start = 0
for k in range(3):
for i in range(count[k]):
nums[i+start] = k
start += count[k]
def sort_colours_v2(nums):
n = len(nums)
if n < 2:
return
i, tail0, head2 = 0, 0, n-1
while i <= head2:
if nums[i] == 0:
nums[i], nums[tail0] = nums[tail0], 0
tail0 += 1
i += 1
elif nums[i] == 2:
nums[i], nums[head2] = nums[head2], 2
head2 -= 1
else:
i += 1
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
sort_colours_v2(nums)
|
[
"[email protected]"
] | |
e617b920f9e2568d05f0b9b81923724255ed6437
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Platform/darwin.py
|
f997a7d9e6f3b45fabc0a4a6ede8551a69bcf4bd
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328 | 2023-08-27T09:16:45 | 2023-08-27T09:16:45 | 9,626,741 | 8,573 | 599 |
Apache-2.0
| 2023-09-13T02:49:41 | 2013-04-23T15:40:33 |
Python
|
UTF-8
|
Python
| false | false | 2,630 |
py
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
from . import posix
import os
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['HOST_OS'] = 'darwin'
# put macports paths at front to override Apple's versions, fink path is after
# For now let people who want Macports or Fink tools specify it!
# env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin'
# Store extra system paths in env['ENV']['PATHOSX']
filelist = ['/etc/paths',]
# make sure this works on Macs with Tiger or earlier
try:
dirlist = os.listdir('/etc/paths.d')
except FileNotFoundError:
dirlist = []
for file in dirlist:
filelist.append('/etc/paths.d/'+file)
for file in filelist:
if os.path.isfile(file):
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
if line:
env.AppendENVPath('PATHOSX', line.strip('\n'))
# Not sure why this wasn't the case all along?
if env['ENV'].get('PATHOSX', False) and os.environ.get('SCONS_USE_MAC_PATHS', False):
env.AppendENVPath('PATH',env['ENV']['PATHOSX'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"[email protected]"
] | |
545c240dc43ec38cffd97004bd6125bf765692d6
|
5e49afd9c6ca73d7074c7ae220d5186fe4f44c08
|
/setup.py
|
100a3637c77fb07f8f43449aadc017a221620a02
|
[
"MIT"
] |
permissive
|
pylover/sharedlists
|
c2842618e7f6f9fea9dfefd710b9f94f36c19e7c
|
b020be26d3a64a3cdb9417a066a454b5b92006c5
|
refs/heads/master
| 2020-06-22T02:55:47.892070 | 2019-08-04T20:37:20 | 2019-08-04T20:37:20 | 197,615,918 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
import re
from os.path import join, dirname
from setuptools import setup, find_packages
# reading package version (same way the sqlalchemy does)
with open(join(dirname(__file__), 'sharedlists', '__init__.py')) as v_file:
package_version = re.compile('.*__version__ = \'(.*?)\'', re.S).\
match(v_file.read()).group(1)
dependencies = [
'restfulpy >= 3.4, < 4',
'easycli >= 1.4, < 2',
# Deployment
'gunicorn',
]
setup(
name='sharedlists',
version=package_version,
packages=find_packages(exclude=['tests']),
install_requires=dependencies,
include_package_data=True,
license='MIT',
entry_points={
'console_scripts': [
'sharedlists = sharedlists:server_main',
'bee = sharedlists:client_main'
]
}
)
|
[
"[email protected]"
] | |
87e06c5f092bc078e57470af3c2e97ccb8e14638
|
6c137e70bb6b1b618fbbceddaeb74416d387520f
|
/lantz/lantz/drivers/agilent/__init__.py
|
6cb05fee840da445ceb8ceea76d2bfa2c2dd3fe9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
zhong-lab/code
|
fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15
|
b810362e06b44387f0768353c602ec5d29b551a2
|
refs/heads/master
| 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 |
BSD-2-Clause
| 2022-12-08T21:46:15 | 2019-05-02T23:37:39 |
Python
|
UTF-8
|
Python
| false | false | 593 |
py
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.agilent
~~~~~~~~~~~~~~~~~~~~~~
:company: Agilent Technologies.
:description: Manufactures test instruments for research and industrial applications
:website: http://www.agilent.com/home
----
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .n51xx import N51xx
from .ag33220A import Ag33220A
from .ag81130a import Ag81130A
from .e8257c import E8257C
from .AG33522a import AG33522A
__all__ = ['N51xx', 'Ag33220A', 'Ag81130A', 'AG33522A', 'E8257C']
|
[
"none"
] |
none
|
7107ab73e45047060a6a8580092971ab13b86db0
|
ab616e26a623fe7e81d30ba7b86fabe4a3658794
|
/LibriSpeech/Get_Meta_LibriSpeech.py
|
39a801bf12ffad5efc95d8bb95ea6ef3ab2b9afa
|
[] |
no_license
|
ruclion/linears_decoder
|
1d2367fbfa8fdde3ae0a8c53e5e82ed7035d1eed
|
93cf874f87a601584c07ba5e4b673e401e9e7c90
|
refs/heads/master
| 2022-12-16T14:25:34.373534 | 2020-09-22T14:42:58 | 2020-09-22T14:42:58 | 289,808,115 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,973 |
py
|
import os
# import numpy as np
# from audio import wav2mfcc_v2, load_wav
wavs_dir = 'wavs'
ppgs_dir = 'alignments'
zhaoxt_train = 'train.txt'
zhaoxt_test = 'test.txt'
meta_list_fromWavs = []
meta_list_fromPPGs = []
meta_list_fromZhaoxt = []
meta_list = []
meta_path = 'meta.txt'
def main():
# 2391-145015-0048
f = open(zhaoxt_train, 'r')
a = [t.strip() for t in f.readlines()]
meta_list_fromZhaoxt.extend(a)
f = open(zhaoxt_test, 'r')
a = [t.strip() for t in f.readlines()]
meta_list_fromZhaoxt.extend(a)
print('Zhaoxts:', len(meta_list_fromZhaoxt), meta_list_fromZhaoxt[0])
# wavs
for second_dir in os.listdir(wavs_dir):
for third_dir in os.listdir(os.path.join(wavs_dir,second_dir)):
third_wavs_dir = os.path.join(os.path.join(wavs_dir,second_dir),third_dir)
wav_files = [f[:-4] for f in os.listdir(third_wavs_dir) if f.endswith('.wav')]
# print('Extracting MFCC from {}...'.format(third_wavs_dir))
meta_list_fromWavs.extend(wav_files)
print('Wavs:', len(meta_list_fromWavs), meta_list_fromWavs[0])
# 100-121669-0000 1 1 1 1 1 1 1
for f_path in os.listdir(ppgs_dir):
f = open(os.path.join(ppgs_dir, f_path), 'r')
a = f.readlines()
for line in a:
line = line.strip().split(' ')
meta_list_fromPPGs.append(line[0])
print('PPGs:', len(meta_list_fromPPGs), meta_list_fromPPGs[0])
# 主要用欣陶的list,辅助看看wavs和ppgs有没有;会跑1分钟,也就暴力看又没有了
for idx in meta_list_fromZhaoxt:
if idx in meta_list_fromPPGs and idx in meta_list_fromWavs:
meta_list.append(idx)
else:
print('为什么不用:', idx)
# break
f = open(meta_path, 'w')
for idx in meta_list:
f.write(idx + '\n')
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
39b4713bb06e115f5fef7f696c1b2c73fcf47adf
|
1ed536ef1527e6655217e731f622d643ece49c2b
|
/scripts/align_all_vs_all.py
|
c152b8b783b8dffd40812fc5cb7771efc2c163fb
|
[] |
no_license
|
siping/cgat
|
de0f7af124eb38c72d7dece78fff83ff92ddbf96
|
aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e
|
refs/heads/master
| 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,781 |
py
|
'''
align_all_vs_all.py - all-vs-all pairwise alignment
===================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script computes all-vs-all alignments between
sequences in a :term:`fasta` formatted file.
Currently only Smith-Waterman protein alignment is
implemented.
Usage
-----
Example::
python align_all_vs_all.py --help
Type::
python align_all_vs_all.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import time
import optparse
import math
import tempfile
import CGAT.Experiment as E
import alignlib
import CGAT.FastaIterator as FastaIterator
""" program $Id: align_all_vs_all.py 2782 2009-09-10 11:40:29Z andreas $
"""
if __name__ == "__main__":
parser = E.OptionParser( version = "%prog version: $Id: align_all_vs_all.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-s", "--sequences", dest="filename_sequences", type="string",
help="input file with sequences" )
parser.set_defaults(
filename_sequences = None,
gop = -10.0,
gep = -1.0,
)
(options, args) = E.Start( parser, add_pipe_options = True )
if options.filename_sequences:
infile = open(options.filename_sequences, "r")
else:
infile = sys.stdin
parser = FastaIterator.FastaIterator( infile )
sequences = []
while 1:
cur_record = iterator.next()
if cur_record is None: break
sequences.append( (cur_record.title, alignlib.makeSequence(re.sub( " ", "", cur_record.sequence)) ) )
if options.filename_sequences:
infile.close()
alignator = alignlib.makeAlignatorFullDP( options.gop, options.gep )
map_a2b = alignlib.makeAlignataVector()
nsequences = len(sequences)
for x in range(0,nsequences-1):
for y in range(x+1, nsequences):
alignator.Align( sequences[x][1], sequences[y][1], map_a2b)
row_ali, col_ali = alignlib.writeAlignataCompressed( map_a2b )
options.stdout.write( "%s\t%s\t%i\t%i\t%i\t%s\t%i\t%i\t%s\t%i\t%i\t%i\t%i\n" % (\
sequences[x][0], sequences[y][0],
map_a2b.getScore(),
map_a2b.getRowFrom(),
map_a2b.getRowTo(),
row_ali,
map_a2b.getColFrom(),
map_a2b.getColTo(),
col_ali,
map_a2b.getScore(),
100 * alignlib.calculatePercentIdentity( map_a2b, sequences[x][1], sequences[y][1]),
sequences[x][1].getLength(),
sequences[y][1].getLength() ))
E.Stop()
|
[
"[email protected]"
] | |
ede6a7910e34d87a8089ec7a7a792cc145ae0a44
|
ec700463d9af81f68a477535ac233646f4d262f7
|
/python/__main__.py
|
2966615ac56651c81b277b34316ddc91361aca73
|
[] |
no_license
|
gregjhansell97/grid-map
|
36579afa7beadb78a4b8cc53e2c7f45c75ac28a2
|
7d4c25b583474ec45265b01e524ed0884aaa2937
|
refs/heads/master
| 2020-03-16T20:42:24.156940 | 2018-10-02T23:29:58 | 2018-10-02T23:29:58 | 132,969,337 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
from grid_map import GridMap
import timeit
if __name__ == "__main__":
gm = GridMap(5, bit_depth=10)
for x in range(1000):
for y in range(1000):
gm.add(x, y, "loc:" + str((x, y)))
gm = gm.sub_grids[1][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
|
[
"[email protected]"
] | |
d2856e764575cdb8308c02b69d2303ddf1692b83
|
c6d852e5842cf6f74123445d20ff03876377ae26
|
/lemon/python22/lemon_14_190918_测试框架_unittest/test_练习相减02.py
|
447882bd4b22fb5aed635fbc7eb95a77abf6e076
|
[] |
no_license
|
songyongzhuang/PythonCode_office
|
0b3d35ca5d58bc305ae90fea8b1e8c7214619979
|
cfadd3132c2c7c518c784589e0dab6510a662a6c
|
refs/heads/master
| 2023-02-13T14:06:10.610935 | 2021-01-14T09:11:32 | 2021-01-14T09:11:32 | 327,183,429 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,436 |
py
|
# --*-- coding : utf-8 --*--
# Project : python22
# Current file : test_练习相减02.py
# Author : Administrator
# Create time : 2019-09-19 10:22
# IDE : PyCharm
# TODO 成长很苦, 进步很甜, 加油!
import unittest
def minus(a, b): # add 加起来
""" 相减 """''
return a - b
x = 3
y = 5
expected = -2
class TestMinus(unittest.TestCase):
# 测试类方法,每一个测试类只运行一次
@classmethod
def setUpClass(cls):
print('每一个测试类之前只运行一次')
@classmethod
def tearDownClass(cls):
print('每一个测试类之后只运行一次')
# 测试用例的设计
# 前置条件
def setUp(self):
"""前置条件
测试用例方法之前自动运行 setUp 里面的程序"""
print('每个测试用例执行前置条件')
# 后置条件
def tearDown(self):
"""后置条件
测试用例方法之后自动运行 tearDown 里面的程序"""
print('每个测试用例执行后置条件')
def test_add_success(self):
""" 判断表达式是否为真 """''
self.assertTrue(expected == minus(x, y))
def test_add_error(self):
"""如果确定两个对象不相等,则失败。"""''
try:
self.assertEqual(-2, minus(x, y))
except SyntaxError:
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
4cb569f1636bfc4eae939e6f9a0744d37db16326
|
20899d453bc61c169153338ac9d22d324df089c1
|
/abc/abc162/B.py
|
9eb9826bfab9e83ccd7c92096c9c66a9611d1f39
|
[] |
no_license
|
mui-nyan/AtCoder
|
b2d926b113963915426af679bf9b28430569707c
|
a702280f11a5b0b1b29dd099dbfc7b1c31fb89fd
|
refs/heads/master
| 2022-07-04T16:32:41.164564 | 2022-06-19T07:24:11 | 2022-06-19T07:24:11 | 182,425,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 778 |
py
|
import math
from functools import reduce
from collections import deque
import sys
sys.setrecursionlimit(10**7)
# スペース区切りの入力を読み込んで数値リストにして返します。
def get_nums_l():
return [ int(s) for s in input().split(" ")]
# 改行区切りの入力をn行読み込んで数値リストにして返します。
def get_nums_n(n):
return [ int(input()) for _ in range(n)]
# 改行またはスペース区切りの入力をすべて読み込んでイテレータを返します。
def get_all_int():
return map(int, open(0).read().split())
def log(*args):
print("DEBUG:", *args, file=sys.stderr)
n = int(input())
ans = 0
for i in range(1, n+1):
if i%3 == 0 or i%5 == 0:
continue
ans += i
print(ans)
|
[
"[email protected]"
] | |
85e88feb381eeaebe8cd19e82b3cf2a9e88051bc
|
c8d7f2da5ff9e13a5bb6f92b9387a336e7059644
|
/dolo/numeric/matrix_equations.py
|
0d3eb87483d5360957fdf884ec03b391a427d468
|
[
"BSD-2-Clause"
] |
permissive
|
TomAugspurger/dolo
|
675e5c051e7fdcc8d0af441335d526408128b71f
|
5d9f0f772860eadf3b9df79e47d158155835bd6b
|
refs/heads/master
| 2020-12-25T12:47:30.156775 | 2013-02-11T20:13:56 | 2013-02-11T20:13:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,339 |
py
|
from dolo.numeric.tensor import sdot,mdot
import numpy as np
TOL = 1e-10
# credits : second_order_solver is adapted from Sven Schreiber's port of Uhlig's Toolkit.
def second_order_solver(FF,GG,HH):
from scipy.linalg import qz
from dolo.numeric.extern.qz import qzdiv
from numpy import array,mat,c_,r_,eye,zeros,real_if_close,diag,allclose,where,diagflat
from numpy.linalg import solve
Psi_mat = array(FF)
Gamma_mat = array(-GG)
Theta_mat = array(-HH)
m_states = FF.shape[0]
Xi_mat = r_[c_[Gamma_mat, Theta_mat],
c_[eye(m_states), zeros((m_states, m_states))]]
Delta_mat = r_[c_[Psi_mat, zeros((m_states, m_states))],
c_[zeros((m_states, m_states)), eye(m_states)]]
AAA,BBB,Q,Z = qz(Delta_mat, Xi_mat)
Delta_up,Xi_up,UUU,VVV = [real_if_close(mm) for mm in (AAA,BBB,Q,Z)]
Xi_eigval = diag(Xi_up)/where(diag(Delta_up)>TOL, diag(Delta_up), TOL)
Xi_sortindex = abs(Xi_eigval).argsort()
# (Xi_sortabs doesn't really seem to be needed)
Xi_sortval = Xi_eigval[Xi_sortindex]
Xi_select = slice(0, m_states)
stake = (abs(Xi_sortval[Xi_select])).max() + TOL
Delta_up,Xi_up,UUU,VVV = qzdiv(stake,Delta_up,Xi_up,UUU,VVV)
try:
# check that all unused roots are unstable
assert abs(Xi_sortval[m_states]) > (1-TOL)
# check that all used roots are stable
assert abs(Xi_sortval[Xi_select]).max() < 1+TOL
except:
raise BKError('generic')
# check for unit roots anywhere
# assert (abs((abs(Xi_sortval) - 1)) > TOL).all()
Lambda_mat = diagflat(Xi_sortval[Xi_select])
VVVH = VVV.T
VVV_2_1 = VVVH[m_states:2*m_states, :m_states]
VVV_2_2 = VVVH[m_states:2*m_states, m_states:2*m_states]
UUU_2_1 = UUU[m_states:2*m_states, :m_states]
PP = - solve(VVV_2_1, VVV_2_2)
# slightly different check than in the original toolkit:
assert allclose(real_if_close(PP), PP.real)
PP = PP.real
## end of solve_qz!
print(PP.__class__)
return [Xi_sortval[Xi_select],PP]
def solve_sylvester(A,B,C,D,Ainv = None):
# Solves equation : A X + B X [C,...,C] + D = 0
# where X is a multilinear function whose dimension is determined by D
# inverse of A can be optionally specified as an argument
import slycot
n_d = D.ndim - 1
n_v = C.shape[1]
n_c = D.size/n_v**n_d
# import dolo.config
# opts = dolo.config.use_engine
# if opts['sylvester']:
# DD = D.flatten().reshape( n_c, n_v**n_d)
# [err,XX] = dolo.config.engine.engine.feval(2,'gensylv',n_d,A,B,C,-DD)
# X = XX.reshape( (n_c,)+(n_v,)*(n_d))
DD = D.reshape( n_c, n_v**n_d )
if n_d == 1:
CC = C
else:
CC = np.kron(C,C)
for i in range(n_d-2):
CC = np.kron(CC,C)
if Ainv != None:
Q = sdot(Ainv,B)
S = sdot(Ainv,DD)
else:
Q = np.linalg.solve(A,B)
S = np.linalg.solve(A,DD)
n = n_c
m = n_v**n_d
XX = slycot.sb04qd(n,m,Q,CC,-S)
X = XX.reshape( (n_c,)+(n_v,)*(n_d) )
return X
class BKError(Exception):
def __init__(self,type):
self.type = type
def __str__(self):
return 'Blanchard-Kahn error ({0})'.format(self.type)
|
[
"[email protected]"
] | |
9eb155ab168b320e301794c6d06721d8159379c8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/329/usersdata/297/91364/submittedfiles/dec2bin.py
|
f499b6f8e6c0b866d68629df150aa2c83d3d617b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
# -*- coding: utf-8 -*-
while(true):
p=int(input('digite um numero p: '))
q=int(input('digite um numero q: '))
if q>=p:
break
if str(p) in str(q):
print('S')
else :
print('N')
|
[
"[email protected]"
] | |
91ed919fe4f82d66d4c1e181233dc01892ee1182
|
420376c5a1fbf8a4572545a9c891a0f8f204ed5b
|
/scrapy_amazon/items.py
|
d2aeed20eb2ea2833ebfb79da6fce00b903d6891
|
[] |
no_license
|
kishoresurana/scrapy_amazon
|
946fb8fe198736ba4233a2f3727ca1a1873ae937
|
bbb72cdb5f468d5c8b605d273bb5c93b9a2b249a
|
refs/heads/master
| 2020-12-25T21:55:35.192394 | 2014-07-27T20:09:24 | 2014-07-27T20:09:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapyAmazonItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
price = scrapy.Field()
condition = scrapy.Field()
seller = scrapy.Field()
delivery = scrapy.Field()
title = scrapy.Field()
date = scrapy.Field()
|
[
"[email protected]"
] | |
c4e8389d93f36f8805d8c3cdf58cabc747343f84
|
91fe8f479fa921fa84111d19222a5c6aa6eff030
|
/basis/execute-unit/aiohttp-and-asyncio-test.py
|
25312be5c6ecba564f33a7ed14ddc40b68021a95
|
[] |
no_license
|
romanticair/python
|
2055c9cdaa46894c9788d5797643283786ed46dd
|
6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141
|
refs/heads/master
| 2022-11-03T17:17:17.608786 | 2019-07-05T07:07:29 | 2019-07-05T07:07:29 | 195,356,190 | 0 | 1 | null | 2022-10-14T20:51:14 | 2019-07-05T07:00:33 |
Python
|
UTF-8
|
Python
| false | false | 1,355 |
py
|
"""
asyncio 可以实现单线程并发IO操作。如果仅用在客户端,发挥的威力不大。
如果把asyncio用在服务器端,例如Web服务器,由于HTTP连接就是IO操作,
因此可以用单线程+coroutine实现多用户的高并发支持
asyncio实现了TCP、UDP、SSL等协议,aiohttp则是基于asyncio实现的HTTP框架
aiohttp的初始化函数init()也是一个coroutine,loop.create_server()则利用asyncio创建TCP服务
编写一个HTTP服务器,分别处理以下URL
1. / - 首页返回b'<h1>Index</h1>';
2. /hello/{name} - 根据URL参数返回文本hello, %s!
"""
import asyncio
from aiohttp import web
async def index(request):
await asyncio.sleep(0.5)
return web.Response(body=b'<h1>Index</h1>')
async def hello(request):
await asyncio.sleep(0.5)
text = '<h1>hello, %s!</h1>' % request.match_info['name']
return web.Response(body=text.encode('utf-8'))
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hello)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 3000)
print('Server started at http://127.0.0.1:3000...')
return srv
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
[
"[email protected]"
] | |
6a42d49d7d83b0b0520c6e6d394d79b1e6c4fd48
|
b8302a17ad124b2432380c7274e4780ec5adfe55
|
/exercises/de/solution_04_03.py
|
e63f7c7a9d4320eaae8436a4c058573e32639ff4
|
[
"MIT",
"CC-BY-NC-4.0"
] |
permissive
|
FrankGrimm/spacy-course
|
10da4ebf976d93aec50aa1b200019b4217f4043e
|
5e09ef9d296dad2b0fd5ff1945f4cf9a55109906
|
refs/heads/master
| 2022-04-24T18:18:06.202131 | 2020-04-21T19:17:09 | 2020-04-21T19:17:09 | 257,692,388 | 1 | 0 |
MIT
| 2020-04-21T19:14:21 | 2020-04-21T19:14:20 | null |
UTF-8
|
Python
| false | false | 650 |
py
|
import json
from spacy.matcher import Matcher
from spacy.lang.de import German
with open("exercises/de/iphone.json") as f:
TEXTS = json.loads(f.read())
nlp = German()
matcher = Matcher(nlp.vocab)
# Zwei Tokens, deren kleingeschriebene Formen "iphone" und "x" sind
pattern1 = [{"LOWER": "iphone"}, {"LOWER": "x"}]
# Token mit der kleingeschriebenen Form "iphone" und eine Ziffer
pattern2 = [{"LOWER": "iphone"}, {"IS_DIGIT": True}]
# Füge Patterns zum Matcher hinzu und überprüfe die Resultate
matcher.add("GADGET", None, pattern1, pattern2)
for doc in nlp.pipe(TEXTS):
print([doc[start:end] for match_id, start, end in matcher(doc)])
|
[
"[email protected]"
] | |
e9056dcc8a8628a344e0ddf4e9add6e257ddabae
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_310/ch25_2019_03_01_00_00_25_791523.py
|
70bb03eaebe4809ffcc0bcea7e9b4073d6f8312b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
km=int(input(distancia):
if km <=200:
preco= km*0.5
print("{:.2f}".format(preco))
else:
preco= km*0.45
print("{:.2f}".format(preco))
|
[
"[email protected]"
] | |
3cc7dc94fdb029bb70bc409a3dc8ffef0368bf06
|
2cec0797981b73c497866a75fb6d33f4c3a4c06c
|
/brain_tumor_classification/modules/data/utils.py
|
e5cd18bf3458f2de6aa299ac09b545c77cfc04b4
|
[] |
no_license
|
Vadbeg/brain_tumor_classification
|
ed44e50076627a0682e2eca13cf115716c510ed1
|
ba87b65717cd1fe75871f3108db1394de271c62d
|
refs/heads/master
| 2023-08-01T13:46:27.176780 | 2021-09-19T15:14:32 | 2021-09-19T15:14:32 | 397,667,617 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,718 |
py
|
"""Module with utilities for dataset"""
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
from monai.transforms import (
AddChanneld,
Compose,
LoadImaged,
Resized,
ScaleIntensityRanged,
Transform,
)
from torch.utils.data import DataLoader, Dataset
def get_train_val_paths(
train_path: Union[str, Path],
train_split_percent: float = 0.7,
ct_file_extension: str = '*.nii.gz',
item_limit: Optional[int] = None,
shuffle: bool = True,
) -> Tuple[List[Path], List[Path]]:
train_path = Path(train_path)
list_of_paths = list(train_path.glob(ct_file_extension))
if shuffle:
np.random.shuffle(list_of_paths)
edge_value = int(train_split_percent * len(list_of_paths))
train_list_of_paths = list_of_paths[:edge_value]
val_list_of_paths = list_of_paths[edge_value:]
if item_limit:
train_list_of_paths = train_list_of_paths[:item_limit]
val_list_of_paths = val_list_of_paths[:item_limit]
return train_list_of_paths, val_list_of_paths
def create_data_loader(
dataset: Dataset, batch_size: int = 1, shuffle: bool = True, num_workers: int = 2
) -> DataLoader:
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
return data_loader
def get_load_transforms(
img_key: str,
original_min: float = 0.0,
original_max: float = 200.0,
res_min: float = 0.0,
res_max: float = 1.0,
spatial_size: Tuple[int, int, int] = (196, 196, 128),
) -> Compose:
preprocessing_transforms = get_preprocessing_transforms(
img_key=img_key,
original_min=original_min,
original_max=original_max,
res_min=res_min,
res_max=res_max,
spatial_size=spatial_size,
)
load_transforms = Compose(
[LoadImaged(keys=[img_key], dtype=np.float32), preprocessing_transforms]
)
return load_transforms
def get_preprocessing_transforms(
img_key: str,
original_min: float = 0.0,
original_max: float = 200.0,
res_min: float = 0.0,
res_max: float = 1.0,
spatial_size: Tuple[int, int, int] = (196, 196, 128),
) -> Compose:
preprocessing_transforms = Compose(
[
AddChanneld(keys=[img_key]),
ScaleIntensityRanged(
keys=[img_key],
a_min=original_min,
a_max=original_max,
b_min=res_min,
b_max=res_max,
clip=True,
),
Resized(keys=[img_key], spatial_size=spatial_size),
]
)
return preprocessing_transforms
|
[
"[email protected]"
] | |
e3f9b9ccd9704d797def23c50f582b8c877f8f37
|
9059d9cbad4188ed2980f551151b9678ffb68b44
|
/Chapter12_logging/12-3.logging_config_example.py
|
0262db2fa4267b523bc6fa234849422e7c5042d2
|
[] |
no_license
|
mhee4321/python_basic
|
ad0e64fa21ecfab231a6627ba6abeea82d725690
|
86031975a9121efe5785e83f663255a7b4e4ba77
|
refs/heads/master
| 2023-02-11T20:31:54.353219 | 2021-01-07T05:44:31 | 2021-01-07T05:44:31 | 326,850,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 750 |
py
|
import logging # 로깅 모듈 탑재
import logging.config # 로깅 설정 모듈 탑재
# 설정 파일 읽어 오기
logging.config.fileConfig('12-2.logging.conf')
# 로거 생성
logger = logging.getLogger(__name__) # 로거 생성
# 로그 메시지 출력
logger.debug('이 메시지는 개발자만 이해해요.') # DEBUG 로그 출력
logger.info('생각대로 동작 하고 있어요.') # INFO 로그 출력
logger.warning('곧 문제가 생길 가능성이 높습니다.') # WARNING 로그 출력
logger.error('문제가 생겼어요.기능이 동작 안해요.') # ERROR 로그 출력
logger.critical('시스템이 다운됩니다!!!!') # CRITICAL 로그 출력
|
[
"[email protected]"
] | |
8a4871b4d661ef4a0a122394b00d6b5f55566f2e
|
9d2bafb07baf657c447d09a6bc5a6e551ba1806d
|
/ros2_ws/build/std_msgs/rosidl_generator_py/std_msgs/msg/_multi_array_layout.py
|
e830a59dc03efc5d1893c4f8d32f97cabca4ecd6
|
[] |
no_license
|
weidafan/ros2_dds
|
f65c4352899a72e1ade662b4106e822d80a99403
|
c0d9e6ff97cb7cc822fe25a62c0b1d56f7d12c59
|
refs/heads/master
| 2021-09-05T20:47:49.088161 | 2018-01-30T21:03:59 | 2018-01-30T21:03:59 | 119,592,597 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,630 |
py
|
# generated from rosidl_generator_py/resource/_msg.py.em
# generated code does not contain a copyright notice
import logging
import traceback
class Metaclass(type):
"""Metaclass of message 'MultiArrayLayout'."""
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('std_msgs')
except ImportError:
logger = logging.getLogger('rosidl_generator_py.MultiArrayLayout')
logger.debug(
'Failed to import needed modules for type support:\n' + traceback.format_exc())
else:
cls._CONVERT_FROM_PY = module.convert_from_py_msg_multi_array_layout
cls._CONVERT_TO_PY = module.convert_to_py_msg_multi_array_layout
cls._TYPE_SUPPORT = module.type_support_msg_multi_array_layout
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg_multi_array_layout
from std_msgs.msg import MultiArrayDimension
if MultiArrayDimension.__class__._TYPE_SUPPORT is None:
MultiArrayDimension.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class MultiArrayLayout(metaclass=Metaclass):
"""Message class 'MultiArrayLayout'."""
__slots__ = [
'_dim',
'_data_offset',
]
def __init__(self, **kwargs):
assert all(['_' + key in self.__slots__ for key in kwargs.keys()]), \
'Invalid arguments passed to constructor: %r' % kwargs.keys()
self.dim = kwargs.get('dim', list())
self.data_offset = kwargs.get('data_offset', int())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = [s[1:] + '=' + repr(getattr(self, s, None)) for s in self.__slots__]
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
@property
def dim(self):
"""Message field 'dim'."""
return self._dim
@dim.setter
def dim(self, value):
from std_msgs.msg import MultiArrayDimension
from collections import Sequence
from collections import Set
from collections import UserList
from collections import UserString
assert \
((isinstance(value, Sequence) or
isinstance(value, Set) or
isinstance(value, UserList)) and
not isinstance(value, str) and
not isinstance(value, UserString) and
all([isinstance(v, MultiArrayDimension) for v in value]) and
True), \
"The 'dim' field must be a set or sequence and each value of type 'MultiArrayDimension'"
self._dim = value
@property
def data_offset(self):
"""Message field 'data_offset'."""
return self._data_offset
@data_offset.setter
def data_offset(self, value):
assert \
isinstance(value, int), \
"The 'data_offset' field must of type 'int'"
assert value >= 0 and value < 4294967296, \
"The 'data_offset' field must be an unsigned integer in [0, 4294967296)"
self._data_offset = value
|
[
"[email protected]"
] | |
4b32a00c650bafd26ad85ee0f76ed96d200dfce0
|
d99ac626d62c663704444a9cce7e7fc793a9e75e
|
/crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_alg_info_der_serializer.py
|
222936908c80c90638db7d52f3cdf4d1a644e7ae
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Experiment5X/CryptoFunctionDetection
|
3ab32d5573a249d24db1faf772721bc80b8d905d
|
dac700193e7e84963943593e36844b173211a8a1
|
refs/heads/master
| 2023-04-19T09:12:35.828268 | 2021-05-13T22:39:27 | 2021-05-13T22:39:27 | 355,299,557 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,174 |
py
|
# Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <[email protected]>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
class vscf_alg_info_der_serializer_t(Structure):
pass
class VscfAlgInfoDerSerializer(object):
"""Provide DER serializer of algorithm information."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_alg_info_der_serializer_new(self):
vscf_alg_info_der_serializer_new = self._lib.vscf_alg_info_der_serializer_new
vscf_alg_info_der_serializer_new.argtypes = []
vscf_alg_info_der_serializer_new.restype = POINTER(vscf_alg_info_der_serializer_t)
return vscf_alg_info_der_serializer_new()
def vscf_alg_info_der_serializer_delete(self, ctx):
vscf_alg_info_der_serializer_delete = self._lib.vscf_alg_info_der_serializer_delete
vscf_alg_info_der_serializer_delete.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_delete.restype = None
return vscf_alg_info_der_serializer_delete(ctx)
def vscf_alg_info_der_serializer_use_asn1_writer(self, ctx, asn1_writer):
vscf_alg_info_der_serializer_use_asn1_writer = self._lib.vscf_alg_info_der_serializer_use_asn1_writer
vscf_alg_info_der_serializer_use_asn1_writer.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_use_asn1_writer.restype = None
return vscf_alg_info_der_serializer_use_asn1_writer(ctx, asn1_writer)
def vscf_alg_info_der_serializer_serialized_len(self, ctx, alg_info):
"""Return buffer size enough to hold serialized algorithm."""
vscf_alg_info_der_serializer_serialized_len = self._lib.vscf_alg_info_der_serializer_serialized_len
vscf_alg_info_der_serializer_serialized_len.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_serialized_len.restype = c_size_t
return vscf_alg_info_der_serializer_serialized_len(ctx, alg_info)
def vscf_alg_info_der_serializer_serialize(self, ctx, alg_info, out):
"""Serialize algorithm info to buffer class."""
vscf_alg_info_der_serializer_serialize = self._lib.vscf_alg_info_der_serializer_serialize
vscf_alg_info_der_serializer_serialize.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t), POINTER(vsc_buffer_t)]
vscf_alg_info_der_serializer_serialize.restype = None
return vscf_alg_info_der_serializer_serialize(ctx, alg_info, out)
def vscf_alg_info_der_serializer_setup_defaults(self, ctx):
"""Setup predefined values to the uninitialized class dependencies."""
vscf_alg_info_der_serializer_setup_defaults = self._lib.vscf_alg_info_der_serializer_setup_defaults
vscf_alg_info_der_serializer_setup_defaults.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_setup_defaults.restype = None
return vscf_alg_info_der_serializer_setup_defaults(ctx)
def vscf_alg_info_der_serializer_serialize_inplace(self, ctx, alg_info):
"""Serialize by using internal ASN.1 writer.
Note, that caller code is responsible to reset ASN.1 writer with
an output buffer."""
vscf_alg_info_der_serializer_serialize_inplace = self._lib.vscf_alg_info_der_serializer_serialize_inplace
vscf_alg_info_der_serializer_serialize_inplace.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_serialize_inplace.restype = c_size_t
return vscf_alg_info_der_serializer_serialize_inplace(ctx, alg_info)
def vscf_alg_info_der_serializer_shallow_copy(self, ctx):
vscf_alg_info_der_serializer_shallow_copy = self._lib.vscf_alg_info_der_serializer_shallow_copy
vscf_alg_info_der_serializer_shallow_copy.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_shallow_copy.restype = POINTER(vscf_alg_info_der_serializer_t)
return vscf_alg_info_der_serializer_shallow_copy(ctx)
def vscf_alg_info_der_serializer_impl(self, ctx):
vscf_alg_info_der_serializer_impl = self._lib.vscf_alg_info_der_serializer_impl
vscf_alg_info_der_serializer_impl.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_impl.restype = POINTER(vscf_impl_t)
return vscf_alg_info_der_serializer_impl(ctx)
|
[
"[email protected]"
] | |
519e6d1ad5bda54f6ed5b6ff5dc4202c57d10141
|
6f0d8416daeb787b13938d5fa49c3d2e08d15e02
|
/tests/test_cam.py
|
5a5dbc61b10d60caf62b858b4f880f2bed62d9ec
|
[
"MIT"
] |
permissive
|
MartinHjelmare/matrixscreener
|
cbfc0ba95614c7dd6e152bb63a24b67ed03045ca
|
b6e93d9c96139cf5f2b8942d61681e45d7b6b4e5
|
refs/heads/master
| 2021-01-22T14:21:16.758654 | 2015-02-19T11:53:46 | 2015-02-19T11:53:46 | 57,959,734 | 0 | 0 | null | 2016-05-03T10:03:40 | 2016-05-03T10:03:40 | null |
UTF-8
|
Python
| false | false | 1,526 |
py
|
from matrixscreener.cam import *
import pytest
class EchoSocket:
"Dummy echo socket for mocking."
msg = ''
def send(self, msg):
self.msg = msg
return len(msg)
def recv(self, buffer_size):
return self.msg[0:buffer_size]
def connect(self, where):
pass
def settimeout(self, timeout):
pass
# TEST
#- key (here cli) overrided if defined several times
#- prefix added
#- types (integer, float) should be converted to strings
def test_echo(monkeypatch):
"Prefix + command sent should be same as echoed socket message."
# mock socket
monkeypatch.setattr("socket.socket", EchoSocket)
# setup cam
cam = CAM()
cmd = [('cli', 'custom'), ('cmd', 'enableall'), ('value', 'true'),
('integer', 1234), ('float', 0.00234)]
# monkeypathced EchoSocket will never flush
def flush():
pass
cam.flush = flush
echoed = cam.send(cmd)[0]
sent = tuples_as_dict(cam.prefix + cmd)
assert sent == echoed
def test_commands(monkeypatch):
"short hand commands should work as intended"
# mock socket
monkeypatch.setattr("socket.socket", EchoSocket)
# setup cam
cam = CAM()
# monkeypathced EchoSocket will never flush
def flush():
pass
cam.flush = flush
# get_information
cmd = cam.prefix + [
('cmd', 'getinfo'),
('dev', 'stage')
]
information = cam.get_information()
should_be = tuples_as_dict(cmd)
assert information == should_be
|
[
"[email protected]"
] | |
20d215ab84216efee4da368d5a8ad6e24ed57fc4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03679/s358798230.py
|
083bf4ccd4da704fe0bfff938691cf5dbc1ec004
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
X, A, B = map(int, input().split())
if A >= B:
print('delicious')
elif A + X < B:
print('dangerous')
else:
print('safe')
|
[
"[email protected]"
] | |
da878145baa16b59947043420038f917d29d43bd
|
e7b483d88f80703c89553e1b9e2f5dd0322f7e38
|
/sketch/util/http.py
|
e69fe5f151af3818aae7e26ffc6a7d32826a3f52
|
[
"BSD-2-Clause"
] |
permissive
|
nikcub/Sketch
|
0f559ff9948bd355407257c25c261c1e0f237021
|
5d2d5f7e51c3eed374a8b12441dc8577b16c101e
|
refs/heads/master
| 2016-09-09T23:32:10.243530 | 2011-11-04T13:56:03 | 2011-11-04T13:56:03 | 2,592,091 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,451 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=2:sw=2:expandtab
#
# Copyright (c) 2011, Nik Cubrilovic. All rights reserved.
#
# <[email protected]> <http://nikcub.appspot.com>
#
# Licensed under a BSD license. You may obtain a copy of the License at
#
# http://nikcub.appspot.com/bsd-license
#
"""
Sketch - TM_FILENAME}
desc
"""
import webob
import urlparse
def extract_dataurl(dataurl):
if not dataurl[:5] == 'data:':
return (None, None)
img_index = dataurl.index(',')
if not img_index:
return (None, None)
img_type = dataurl[5:img_index].split(';')[0]
img_dat_enc = dataurl[img_index + 1:]
import base64
img_dat = base64.decodestring(img_dat_enc)
return (img_dat, img_type)
def urlunsplit(scheme=None, netloc=None, path=None, query=None, fragment=None):
"""Similar to ``urlparse.urlunsplit``, but will escape values and
urlencode and sort query arguments.
:param scheme:
URL scheme, e.g., `http` or `https`.
:param netloc:
Network location, e.g., `localhost:8080` or `www.google.com`.
:param path:
URL path.
:param query:
URL query as an escaped string, or a dictionary or list of key-values
tuples to build a query.
:param fragment:
Fragment identifier, also known as "anchor".
:returns:
An assembled absolute or relative URL.
"""
if not scheme or not netloc:
scheme = None
netloc = None
if path:
path = urllib.quote(to_utf8(path))
if query and not isinstance(query, basestring):
if isinstance(query, dict):
query = query.items()
query_args = []
for key, values in query:
if isinstance(values, basestring):
values = (values,)
for value in values:
query_args.append((to_utf8(key), to_utf8(value)))
# Sorting should be optional? Sorted args are commonly needed to build
# URL signatures for services.
query_args.sort()
query = urllib.urlencode(query_args)
if fragment:
fragment = urllib.quote(to_utf8(fragment))
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def test_normalize_url():
urls = [
# 'example.com',
# 'example.com/',
# 'http://example.com/',
# 'http://example.com',
# 'http://example.com?',
# 'http://example.com/?',
# 'http://example.com//',
# 'http://example.com/a',
# 'http://example.com/a/',
# 'http://example.com/a/?',
# 'http://example.com/a/../',
# 'http://example.com/a/../?',
# 'http://example.com/a/b/../?',
# 'http://example.com/a/../',
# 'http://example.com/a/b/?z=1',
'http://example.com/a/?',
'http://@example.com/a/?',
'http://example.com:/a/?',
'http://@example.com:/a/?',
'http://example.com:80/a/?',
]
for url in urls:
print "%s \t\t\t\t\t\tclean: %s" % (url, normalize_url(url))
def normalize_url(s, charset='utf-8'):
"""
function that attempts to mimic browser URL normalization.
Partly taken from werkzeug.utils
<http://www.bitbucket.org/mitsuhiko/werkzeug-main/src/tip/werkzeug/utils.py>
There is a lot to URL normalization, see:
<http://en.wikipedia.org/wiki/URL_normalization>
:param charset: The target charset for the URL if the url was
given as unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
# print "scheme: %s\n netloc:%s\n path:%s\n qs:%s\n anchor:%s\n" % (scheme, netloc, path, qs, anchor)
path = urllib.unquote(path)
if not netloc:
netloc = path.strip("/\\:?&")
path = '/'
if not scheme:
scheme = "http"
if not path:
path = '/'
netloc = netloc.strip("/\\:@?&")
path = posixpath.normpath(path)
path = urlparse.urljoin('/', path)
# path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=')
# print "scheme: %s\n netloc:%s\n path:%s\n qs:%s\n anchor:%s\n" % (scheme, netloc, path, qs, anchor)
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def redirect(location, code = 302):
assert code in (301, 302, 303, 305, 307), 'invalid code'
from sketch import Response
display_location = location
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(location, display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def abort_old(code, *args, **kwargs):
"""Raises an ``HTTPException``. The exception is instantiated passing
*args* and *kwargs*.
:param code:
A valid HTTP error code from ``webob.exc.status_map``, a dictionary
mapping status codes to subclasses of ``HTTPException``.
:param args:
Arguments to be used to instantiate the exception.
:param kwargs:
Keyword arguments to be used to instantiate the exception.
"""
cls = webob.exc.status_map.get(code)
if not cls:
raise KeyError('No exception is defined for code %r.' % code)
raise cls(*args, **kwargs)
def get_valid_methods(handler):
"""Returns a list of HTTP methods supported by a handler.
:param handler:
A :class:`RequestHandler` instance.
:returns:
A list of HTTP methods supported by the handler.
"""
return [method for method in Application.ALLOWED_METHODS if getattr(handler,
method.lower().replace('-', '_'), None)]
|
[
"[email protected]"
] | |
2182531e49175062ac8b030e998b5c2c6ca3ae8d
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/PyTorch/Recommendation/NCF/feature_spec.py
|
40d56a0e310d345e17261e9bbfbd4618f5acb691
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455 | 2023-08-23T10:09:12 | 2023-08-23T10:09:12 | 131,881,622 | 11,838 | 3,124 | null | 2023-08-28T16:57:33 | 2018-05-02T17:04:05 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,943 |
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import List, Dict
class FeatureSpec:
def __init__(self, feature_spec, source_spec, channel_spec, metadata, base_directory):
self.feature_spec: Dict = feature_spec
self.source_spec: Dict = source_spec
self.channel_spec: Dict = channel_spec
self.metadata: Dict = metadata
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
|
[
"[email protected]"
] | |
ac4cec9c23d857374e16c812fac948e0c272797e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03108/s870352488.py
|
0b87a41dcc411c3fbc8ae14366e08bef4bb0f7fc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,652 |
py
|
import sys
import collections
readline = sys.stdin.readline
class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1]*n
self.rank = [0]*n
self.size = [1]*n
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.rank[x] < self.rank[y]:
self.size[y] += self.size[x]
self.parents[x] = y
else:
self.size[x] += self.size[y]
self.parents[y] = x
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def msize(self, x):
return -self.size[self.find(x)]
def main():
N, M = map(int, readline().split())
nodelist = []
for _ in range(M):
A, B = map(int, readline().split())
A -= 1; B -= 1
nodelist.append((A, B))
uf = UnionFind(N)
anstmp = (N*(N-1))//2
anslist = [anstmp]
for _ in range(M):
node = nodelist.pop()
n0 = uf.find(node[0])
n1 = uf.find(node[1])
if n0 != n1:
n0size = uf.size[n0]
n1size = uf.size[n1]
else:
n0size = 0; n1size = 0
uf.union(node[0], node[1])
anstmp = anslist[-1]
ans = anstmp - n0size*n1size
anslist.append(ans)
anslist = anslist[:-1]
for _ in range(len(anslist)):
ans = anslist.pop()
print(ans)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
9d31dd701cf90d929170893cddab05db06011ba7
|
c4544c22c0618451746795090e07c80bc85a0877
|
/file_upload/article/forms.py
|
fd00ffba0492b96c7d39b7f2448d488bfccf1d67
|
[] |
no_license
|
RelaxedDong/Django_course
|
35f7027dc552ad148d2dc8679a19a1ffb12b8d14
|
2965089d15e4c80cd6402d362ee37f8cc675c08b
|
refs/heads/master
| 2022-01-09T14:28:40.503099 | 2019-05-24T07:07:03 | 2019-05-24T07:07:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 379 |
py
|
#encoding:utf-8
# __author__ = 'donghao'
# __time__ = 2019/5/13 21:52
from django import forms
from .models import Book
from django.core import validators
class BookForm(forms.ModelForm):
cover_url = forms.FileField(validators=[validators.FileExtensionValidator(allowed_extensions=['jpg','jpeg'])])
class Meta:
model = Book
fields = ['title','cover_url']
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.