blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c8dcc3f1ad262de6fac4180e5c8b6618dde5d9f
|
5ebe164e1cf64e03b13298d16d6e865720ec0b45
|
/supervised_learning/0x0A-object_detection/7-main.py
|
81c7712ed0de7d821e8164f8ee59fdae3152d908
|
[] |
no_license
|
rodrigocruz13/holbertonschool-machine_learning
|
c17a0df1f692eeaa6c3195d00908a0c95cfc8bc3
|
eb47cd4d12e2f0627bb5e5af28cc0802ff13d0d9
|
refs/heads/master
| 2020-12-22T13:56:18.216499 | 2020-11-14T08:21:05 | 2020-11-14T08:21:05 | 236,808,432 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 618 |
py
|
#!/usr/bin/env python3
if __name__ == '__main__':
import numpy as np
Yolo = __import__('7-yolo').Yolo
np.random.seed(0)
anchors = np.array([[[116, 90], [156, 198], [373, 326]],
[[30, 61], [62, 45], [59, 119]],
[[10, 13], [16, 30], [33, 23]]])
yolo = Yolo('../data/yolo.h5', '../data/coco_classes.txt', 0.6, 0.5, anchors)
predictions, image_paths = yolo.predict('../data/yolo')
for i, name in enumerate(image_paths):
if "dog.jpg" in name:
ind = i
break
print(image_paths[ind])
print(predictions[ind])
|
[
"rodrigocruz13"
] |
rodrigocruz13
|
8c225e4d22f42a09cb512b35d97f30b69e592e2c
|
389d64906ac8371878b0a020618dae9dfe47e691
|
/mysite/blog/migrations/0005_auto_20190403_1214.py
|
de0b4b03724b29762e81112e8382d959ff3f5620
|
[] |
no_license
|
czhang86/blog
|
507e5a45ef5c19c6c5c78e3214f4822c132255f1
|
324e49c03806499a982566317ee68d99f787e646
|
refs/heads/master
| 2020-05-04T19:57:40.926193 | 2019-04-04T04:20:36 | 2019-04-04T04:20:36 | 179,414,965 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 721 |
py
|
# Generated by Django 2.1.2 on 2019-04-03 16:14
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20190403_1153'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2019, 4, 3, 16, 14, 29, 723774, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2019, 4, 3, 16, 14, 29, 723774, tzinfo=utc)),
),
]
|
[
"[email protected]"
] | |
3780b8a8f5c3e1035a2fb5be411f5088ff20ded0
|
e303169e4fedbd9d69d76361333ee40d556a247d
|
/hyperboost/opt.py
|
13302eb665b0c0a1de54e07ef1ee0854ce67663e
|
[] |
no_license
|
rpietrusinski/hyperboost
|
23dce3a986fd54b2e972cc8d5263d79ee6be819d
|
580ad28f8bd8f0825361875c8a849280a31adaf7
|
refs/heads/master
| 2020-03-30T20:54:27.028159 | 2018-10-08T19:12:19 | 2018-10-08T19:12:19 | 151,609,371 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,786 |
py
|
import numpy as np
import xgboost as xgb
import pickle
import os
from sklearn.metrics import roc_auc_score
from hyperopt import tpe, Trials, space_eval, fmin
from sklearn.model_selection import KFold
class HyperparametersOptimizer(object):
def __init__(self, x: np.ndarray, y: np.ndarray, path: str):
"""Object of class HyperparametersOptimizer performs XGBoost's hyperparameters optimization using the Hyperopt's
Tree Parzen Estimators. During each iteration the 5-fold cross-validation is performed and the algorithm
optimizes test AUC.
:param x: X data of type numpy ndarray
:param y: y data of type numpy ndarray
:param path: Experiment path which is where the Trials object is saved/loaded
"""
self.x: np.ndarray = x
self.y: np.ndarray = y
self.trials: Trials = None
self.best: dict = None
self.path: str = os.path.join(path, 'trials.p')
self.max_evals: int = None
def cross_validation(self, params: dict):
"""Performs a single run of 5-fold cross-validation and returns the average auc.
:param params: Dictionary of parameters
:return: Average auc
"""
auc = []
for train_index, test_index in KFold(n_splits=5, shuffle=True).split(self.x):
train = xgb.DMatrix(self.x[train_index], self.y[train_index])
test = xgb.DMatrix(self.x[test_index], self.y[test_index])
model = xgb.train(params, train)
preds = model.predict(test)
auc.append(roc_auc_score(test.get_label(), preds))
return 1 - np.mean(auc)
def run_experiment(self, space: dict, evals: int):
"""Function either loads the Trials object and continues previous experiments or starts form the beginning.
:param space: Parameters space
:param evals: Number of evals in the experiment
:return: Function saves the trials.p pickle object in the experiment path.
"""
try:
self.trials = pickle.load(open(self.path, "rb"))
except FileNotFoundError:
self.trials = Trials()
self.max_evals = len(self.trials.trials) + evals
self.best = fmin(fn=self.cross_validation, space=space, algo=tpe.suggest, max_evals=self.max_evals,
trials=self.trials)
pickle.dump(self.trials, open(self.path, "wb"))
def opt_params(self, space: dict):
"""Function returns best parameters set based on previous experiments.
:param space: parameters space
:return: dictionary of parameter values
"""
if self.trials is None:
print("No experiment has been conducted!")
else:
return space_eval(space, self.trials.argmin)
|
[
"[email protected]"
] | |
5760b13b612b0a305cedcee911b325427d81504d
|
2807aa56f6bae8923d78c10ee4f22fe1970b12f3
|
/build/pymongo/test/test_threads.py
|
ec22ffc3476a4f5d74b1d4f8bb95b02e35a7bea3
|
[
"Apache-2.0"
] |
permissive
|
rutanmedellin/swmanager
|
7b965b75d419e9be1e25b815b58021ec8e80f9d0
|
27349c9d85e8de089f29eed1b0413b234a4c1f37
|
refs/heads/master
| 2020-06-04T02:27:51.466087 | 2012-08-21T03:57:13 | 2012-08-21T03:57:13 | 3,830,545 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,420 |
py
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that pymongo is thread safe."""
import unittest
import threading
import traceback
from nose.plugins.skip import SkipTest
from test.utils import server_started_with_auth, joinall
from test.test_connection import get_connection
from pymongo.connection import Connection
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.pool import SocketInfo, _closed
from pymongo.errors import (AutoReconnect,
OperationFailure,
DuplicateKeyError)
def get_pool(connection):
if isinstance(connection, Connection):
return connection._Connection__pool
elif isinstance(connection, ReplicaSetConnection):
writer = connection._ReplicaSetConnection__writer
pools = connection._ReplicaSetConnection__pools
return pools[writer]['pool']
else:
raise TypeError(str(connection))
class AutoAuthenticateThreads(threading.Thread):
def __init__(self, collection, num):
threading.Thread.__init__(self)
self.coll = collection
self.num = num
self.success = True
self.setDaemon(True)
def run(self):
try:
for i in xrange(self.num):
self.coll.insert({'num':i}, safe=True)
self.coll.find_one({'num':i})
except Exception:
traceback.print_exc()
self.success = False
class SaveAndFind(threading.Thread):
def __init__(self, collection):
threading.Thread.__init__(self)
self.collection = collection
self.setDaemon(True)
def run(self):
sum = 0
for document in self.collection.find():
sum += document["x"]
assert sum == 499500, "sum was %d not 499500" % sum
class Insert(threading.Thread):
def __init__(self, collection, n, expect_exception):
threading.Thread.__init__(self)
self.collection = collection
self.n = n
self.expect_exception = expect_exception
self.setDaemon(True)
def run(self):
for _ in xrange(self.n):
error = True
try:
self.collection.insert({"test": "insert"}, safe=True)
error = False
except:
if not self.expect_exception:
raise
if self.expect_exception:
assert error
class Update(threading.Thread):
def __init__(self, collection, n, expect_exception):
threading.Thread.__init__(self)
self.collection = collection
self.n = n
self.expect_exception = expect_exception
self.setDaemon(True)
def run(self):
for _ in xrange(self.n):
error = True
try:
self.collection.update({"test": "unique"},
{"$set": {"test": "update"}}, safe=True)
error = False
except:
if not self.expect_exception:
raise
if self.expect_exception:
assert error
class IgnoreAutoReconnect(threading.Thread):
def __init__(self, collection, n):
threading.Thread.__init__(self)
self.c = collection
self.n = n
self.setDaemon(True)
def run(self):
for _ in range(self.n):
try:
self.c.find_one()
except AutoReconnect:
pass
class FindPauseFind(threading.Thread):
"""See test_server_disconnect() for details"""
@classmethod
def shared_state(cls, nthreads):
class SharedState(object):
pass
state = SharedState()
# Number of threads total
state.nthreads = nthreads
# Number of threads that have arrived at rendezvous point
state.arrived_threads = 0
state.arrived_threads_lock = threading.Lock()
# set when all threads reach rendezvous
state.ev_arrived = threading.Event()
# set from outside FindPauseFind to let threads resume after
# rendezvous
state.ev_resume = threading.Event()
return state
def __init__(self, collection, state):
"""Params: A collection, an event to signal when all threads have
done the first find(), an event to signal when threads should resume,
and the total number of threads
"""
super(FindPauseFind, self).__init__()
self.collection = collection
self.state = state
self.passed = False
# If this thread fails to terminate, don't hang the whole program
self.setDaemon(True)
def rendezvous(self):
# pause until all threads arrive here
s = self.state
s.arrived_threads_lock.acquire()
s.arrived_threads += 1
if s.arrived_threads == s.nthreads:
s.arrived_threads_lock.release()
s.ev_arrived.set()
else:
s.arrived_threads_lock.release()
s.ev_arrived.wait()
def run(self):
try:
# acquire a socket
list(self.collection.find())
pool = get_pool(self.collection.database.connection)
socket_info = pool._get_request_state()
assert isinstance(socket_info, SocketInfo)
self.request_sock = socket_info.sock
assert not _closed(self.request_sock)
# Dereference socket_info so it can potentially return to the pool
del socket_info
finally:
self.rendezvous()
# all threads have passed the rendezvous, wait for
# test_server_disconnect() to disconnect the connection
self.state.ev_resume.wait()
# test_server_disconnect() has closed this socket, but that's ok
# because it's not our request socket anymore
assert _closed(self.request_sock)
# if disconnect() properly closed all threads' request sockets, then
# this won't raise AutoReconnect because it will acquire a new socket
assert self.request_sock == pool._get_request_state().sock
list(self.collection.find())
assert self.collection.database.connection.in_request()
assert self.request_sock != pool._get_request_state().sock
self.passed = True
class BaseTestThreads(object):
"""
Base test class for TestThreads and TestThreadsReplicaSet. (This is not
itself a unittest.TestCase, otherwise it'd be run twice -- once when nose
imports this module, and once when nose imports
test_threads_replica_set_connection.py, which imports this module.)
"""
def setUp(self):
self.db = self._get_connection().pymongo_test
def tearDown(self):
pass
def _get_connection(self):
"""
Intended for overriding in TestThreadsReplicaSet. This method
returns a Connection here, and a ReplicaSetConnection in
test_threads_replica_set_connection.py.
"""
# Regular test connection
return get_connection()
def test_threading(self):
self.db.drop_collection("test")
for i in xrange(1000):
self.db.test.save({"x": i}, safe=True)
threads = []
for i in range(10):
t = SaveAndFind(self.db.test)
t.start()
threads.append(t)
joinall(threads)
def test_safe_insert(self):
self.db.drop_collection("test1")
self.db.test1.insert({"test": "insert"})
self.db.drop_collection("test2")
self.db.test2.insert({"test": "insert"})
self.db.test2.create_index("test", unique=True)
self.db.test2.find_one()
okay = Insert(self.db.test1, 2000, False)
error = Insert(self.db.test2, 2000, True)
error.start()
okay.start()
error.join()
okay.join()
def test_safe_update(self):
self.db.drop_collection("test1")
self.db.test1.insert({"test": "update"})
self.db.test1.insert({"test": "unique"})
self.db.drop_collection("test2")
self.db.test2.insert({"test": "update"})
self.db.test2.insert({"test": "unique"})
self.db.test2.create_index("test", unique=True)
self.db.test2.find_one()
okay = Update(self.db.test1, 2000, False)
error = Update(self.db.test2, 2000, True)
error.start()
okay.start()
error.join()
okay.join()
def test_low_network_timeout(self):
db = None
i = 0
n = 10
while db is None and i < n:
try:
db = get_connection(network_timeout=0.0001).pymongo_test
except AutoReconnect:
i += 1
if i == n:
raise SkipTest()
threads = []
for _ in range(4):
t = IgnoreAutoReconnect(db.test, 100)
t.start()
threads.append(t)
joinall(threads)
def test_server_disconnect(self):
# PYTHON-345, we need to make sure that threads' request sockets are
# closed by disconnect().
#
# 1. Create a connection with auto_start_request=True
# 2. Start N threads and do a find() in each to get a request socket
# 3. Pause all threads
# 4. In the main thread close all sockets, including threads' request
# sockets
# 5. In main thread, do a find(), which raises AutoReconnect and resets
# pool
# 6. Resume all threads, do a find() in them
#
# If we've fixed PYTHON-345, then only one AutoReconnect is raised,
# and all the threads get new request sockets.
cx = self.db.connection
self.assertTrue(cx.auto_start_request)
collection = self.db.pymongo_test
# acquire a request socket for the main thread
collection.find_one()
pool = get_pool(collection.database.connection)
socket_info = pool._get_request_state()
assert isinstance(socket_info, SocketInfo)
request_sock = socket_info.sock
state = FindPauseFind.shared_state(nthreads=40)
threads = [
FindPauseFind(collection, state)
for _ in range(state.nthreads)
]
# Each thread does a find(), thus acquiring a request socket
for t in threads:
t.start()
# Wait for the threads to reach the rendezvous
state.ev_arrived.wait(10)
self.assertTrue(state.ev_arrived.isSet(), "Thread timeout")
try:
self.assertEqual(state.nthreads, state.arrived_threads)
# Simulate an event that closes all sockets, e.g. primary stepdown
for t in threads:
t.request_sock.close()
# Finally, ensure the main thread's socket's last_checkout is
# updated:
collection.find_one()
# ... and close it:
request_sock.close()
# Doing an operation on the connection raises an AutoReconnect and
# resets the pool behind the scenes
self.assertRaises(AutoReconnect, collection.find_one)
finally:
# Let threads do a second find()
state.ev_resume.set()
joinall(threads)
for t in threads:
self.assertTrue(t.passed, "%s threw exception" % t)
class BaseTestThreadsAuth(object):
"""
Base test class for TestThreadsAuth and TestThreadsAuthReplicaSet. (This is
not itself a unittest.TestCase, otherwise it'd be run twice -- once when
nose imports this module, and once when nose imports
test_threads_replica_set_connection.py, which imports this module.)
"""
def _get_connection(self):
"""
Intended for overriding in TestThreadsAuthReplicaSet. This method
returns a Connection here, and a ReplicaSetConnection in
test_threads_replica_set_connection.py.
"""
# Regular test connection
return get_connection()
def setUp(self):
self.conn = self._get_connection()
if not server_started_with_auth(self.conn):
raise SkipTest("Authentication is not enabled on server")
self.conn.admin.system.users.remove({})
self.conn.admin.add_user('admin-user', 'password')
self.conn.admin.authenticate("admin-user", "password")
self.conn.auth_test.system.users.remove({})
self.conn.auth_test.add_user("test-user", "password")
def tearDown(self):
# Remove auth users from databases
self.conn.admin.authenticate("admin-user", "password")
self.conn.admin.system.users.remove({})
self.conn.auth_test.system.users.remove({})
def test_auto_auth_login(self):
conn = self._get_connection()
self.assertRaises(OperationFailure, conn.auth_test.test.find_one)
# Admin auth
conn = self._get_connection()
conn.admin.authenticate("admin-user", "password")
nthreads = 10
threads = []
for _ in xrange(nthreads):
t = AutoAuthenticateThreads(conn.auth_test.test, 100)
t.start()
threads.append(t)
joinall(threads)
for t in threads:
self.assertTrue(t.success)
# Database-specific auth
conn = self._get_connection()
conn.auth_test.authenticate("test-user", "password")
threads = []
for _ in xrange(nthreads):
t = AutoAuthenticateThreads(conn.auth_test.test, 100)
t.start()
threads.append(t)
joinall(threads)
for t in threads:
self.assertTrue(t.success)
class TestThreads(BaseTestThreads, unittest.TestCase):
pass
class TestThreadsAuth(BaseTestThreadsAuth, unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
d944222d39aa2c0f4eb6c53856e08e6f051fae7a
|
df541a802b2dfa89d3aab14af627358dc7c76e6e
|
/接口自动化/Frame5/httpUnittest.py
|
21a2012f8446211b06c3e9b5b336e248861a73a5
|
[] |
no_license
|
gupan2018/PyAutomation
|
de966aff91f750c7207c9d3f3dfb488698492342
|
230aebe3eca5799c621673afb647d35a175c74f1
|
refs/heads/master
| 2021-09-07T19:44:20.710574 | 2017-12-22T15:58:23 | 2017-12-22T15:58:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,024 |
py
|
__author__ = 'Administrator'
import unittest
import mysql.connector
class Http_Unittest(unittest.TestCase):
def __init__(self, test_case_id, test_method, http_method, http, test_url, test_data, cousor):
super(Http_Unittest,self).__init__(test_method)
self.test_case_id = test_case_id
self.test_method = test_method
self.http = http
self.test_url = test_url
self.test_data = test_data
self.http_method = http_method
self.mobilephone = test_data["mobilephone"]
self.regname = test_data["regname"]
self.cursor = cousor
def test_register(self):
if self.http_method == "GET":
response = self.http.get_req(self.test_url, self.test_data)
elif self.http_method == "POST":
response = self.http.post_req(self.test_url, self.test_data)
else:
print("error in class Http_Unittest")
try:
#将执行结果存到数据库中
sql_insert = 'INSERT INTO test_result ' \
'(case_id, http_method, request_name, request_url, mobilephone, regname, test_method, test_desc, status, code, msg) ' \
'VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
insert_data = (self.test_case_id, self.http_method ,'register',self.test_url, self.mobilephone, self.regname, self.test_method, "测试注册接口",response["status"], response["code"], response["msg"])
self.cursor.execute(sql_insert, insert_data)
self.cursor.execute("commit")
except mysql.connector.Error as e:
print(e)
self.cursor.execute("rollback")
try:
self.assertEqual(response["code"], "10001", "register请求失败")
except AssertionError as e:
print(str(e))
#pass
#下面是测试代码
'''
path_http = "http.conf"
http = HttpRequest(path_http)
test_Demo = Http_Unittest("test_register", "GET", http)
test_Demo.test_register()'''
|
[
"[email protected]"
] | |
435ce25fccf4bd20dbf5ae423dd02ada727c70e2
|
b07ea8c5a075e3c7e7a0f9aca6bec73a22cdb7df
|
/PART 1/ch03/10_is_anagram_using_ord.py
|
469076cacf26d0facbbfc5e8a9ede66cabd8f11c
|
[] |
no_license
|
jaeehooon/data_structure_and_algorithm_python
|
bb721bdbcff1804c04b944b4a01ed6be93124462
|
6d07438bfaaa1ec5283cb350ef4904eb94826c48
|
refs/heads/master
| 2023-02-21T10:08:20.765399 | 2021-01-22T13:37:11 | 2021-01-22T13:37:11 | 323,367,191 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 972 |
py
|
# 3.4.2 애너그램 (2)
"""
ord() 함수는 인수가 유니코드 객체일 때, 문자의 유니코드를 나타내는 정수를 반환
인수가 8바이트 문자열인 경우 바이트 값을 반환함
문자열에서 모든 문자의 ord() 함수 결과를 더했을 때 그 결과가 같으면 두 문자열은 애너그램
"""
import string
def hash_func(astring):
"""
:param astring:
:return:
"""
s = 0
for one in astring:
if one in string.whitespace:
continue
s += ord(one)
return s
def find_anagram_hash_function(word1, word2):
return hash_func(word1) == hash_func(word2)
def test_find_anagram_hash_function():
word1 = "buffy"
word2 = "bffyu"
word3 = "bffya"
assert(find_anagram_hash_function(word1, word2) is True)
assert(find_anagram_hash_function(word1, word3) is False)
print("테스트 통과!")
if __name__ == '__main__':
test_find_anagram_hash_function()
|
[
"[email protected]"
] | |
ed9bb04561e90339701c63eaba88122fa21c0fd6
|
be5deac90813d7e9c600f6d01f57bc10ade6a158
|
/tests/demos/demo_number_and_quantifier_recognition.py
|
6a8eee91e653fc39cb145be92718227cc60617c4
|
[
"Apache-2.0"
] |
permissive
|
hankcs/pyhanlp
|
a49ed4ae3e32b30ed8d9c8010c0a3e87a4e413d0
|
cda01245d68b3e94d16f37d979427433b20d7c3e
|
refs/heads/master
| 2023-08-19T07:04:14.413190 | 2023-04-04T17:59:25 | 2023-04-04T17:59:25 | 125,806,243 | 3,169 | 888 |
Apache-2.0
| 2022-01-11T16:33:55 | 2018-03-19T05:34:36 |
Python
|
UTF-8
|
Python
| false | false | 1,203 |
py
|
# # -*- coding:utf-8 -*-
# Author:wancong
# Date: 2018-04-30
from pyhanlp import *
def demo_number_and_quantifier_recognition(sentences):
""" 演示数词和数量词识别
>>> sentences = [
... "十九元套餐包括什么",
... "九千九百九十九朵玫瑰",
... "壹佰块都不给我",
... "9012345678只蚂蚁",
... "牛奶三〇〇克*2",
... "ChinaJoy“扫黄”细则露胸超2厘米罚款",
... ]
>>> demo_number_and_quantifier_recognition(sentences)
[十九元/mq, 套餐/n, 包括/v, 什么/ry]
[九千九百九十九朵/mq, 玫瑰/n]
[壹佰块/mq, 都/d, 不/d, 给/p, 我/rr]
[9012345678只/mq, 蚂蚁/n]
[牛奶/nf, 三〇〇克/mq, */w, 2/m]
[ChinaJoy/nx, “/w, 扫黄/vi, ”/w, 细则/n, 露/v, 胸/ng, 超/v, 2厘米/mq, 罚款/vi]
"""
StandardTokenizer = JClass("com.hankcs.hanlp.tokenizer.StandardTokenizer")
StandardTokenizer.SEGMENT.enableNumberQuantifierRecognize(True)
for sentence in sentences:
print(StandardTokenizer.segment(sentence))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
|
[
"[email protected]"
] | |
1352914267c4014c90ed3edb12bed5ff3dcde7ef
|
f237ca25c78fd1c89a08cce60f70fe1dc7206d89
|
/Py/src/ueb/stub_aufg15_brackets_test.py
|
79e64d715d7d8ad0816009e4b162cc49c1032ffa
|
[] |
no_license
|
KonstantinTwardzik/Theoretical-Computer-Science
|
2d08632ad26cd572a853e64b9826e254fd7150ea
|
b33a60ad7a34c090e05e03cf35333ab25aab6238
|
refs/heads/master
| 2021-08-22T23:45:31.724166 | 2017-12-01T18:33:30 | 2017-12-01T18:33:30 | 105,768,326 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
# Ihre Loesung fuer Aufgabe 15
import unittest
import ueb.aufg10_valid_brackets as candidate
# Testfaelle fuer candidate
class Test_valid_brackets(unittest.TestCase):
#
def test_1(self):
|
[
"[email protected]"
] | |
d81d21379e5af810c27b2b1d3e4c8f32d8faec6d
|
9d454ae0d5dd1d7e96e904ced80ca502019bb659
|
/198_rob.py
|
9c17186c04b2ad05f74577de361aeef0ece28d64
|
[] |
no_license
|
zzz686970/leetcode-2018
|
dad2c3db3b6360662a90ea709e58d7facec5c797
|
16e4343922041929bc3021e152093425066620bb
|
refs/heads/master
| 2021-08-18T08:11:10.153394 | 2021-07-22T15:58:52 | 2021-07-22T15:58:52 | 135,581,395 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
def rob(nums):
## too naive
# return max(sum(nums[0::2]), sum(nums[1::2]), sum(nums[0::3], sum(nums[1::3])))
l =r=0
for n in nums:
l, r = r, max(n+l, r)
return r
assert 4 == rob([2,1,1,2])
|
[
"[email protected]"
] | |
9ff318e046b87d76579e6d5b06d8f22e909203d4
|
1b596568ef6ced06173e60c71f01141682329ac4
|
/version-example
|
0c6ba046e0d538c2d3d1a402526ebec6ad7fb3c5
|
[] |
no_license
|
pfuntner/gists
|
4eb1847ef22d3d9cb1e17e870a8434c376c4dbfc
|
3322c922bd43480b4cc2759b1c31e5c76668c7ef
|
refs/heads/master
| 2020-04-17T08:40:29.444378 | 2019-01-18T16:23:49 | 2019-01-18T16:23:49 | 166,421,209 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,957 |
#! /usr/bin/env python
import os
import re
import sys
import logging
import argparse
import datetime
import subprocess
def run(cmd):
(rc, stdout, stderr) = (None, '', '')
if isinstance(cmd, basestring):
cmd = cmd.split()
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
log.debug('Ignoring `{e!s}` from {cmd}'.format(**locals()))
else:
(stdout, stderr) = p.communicate()
rc = p.wait()
log.debug('{cmd}: {rc}, {stdout!r}, {stderr!r}'.format(**locals()))
if (rc == 0) and (not stdout):
rc = None
return (rc, stdout, stderr)
def get_version():
git_used = False
ret = '?'
dir = os.path.dirname(sys.argv[0])
base = os.path.basename(sys.argv[0])
cwd = os.getcwd()
try:
os.chdir(dir)
except:
pass
else:
(rc, stdout, stderr) = run(['git', 'log', '-1', base])
"""
commit {SHA1}
Author: {FIRST_NAME} {LAST_NAME} <{EMAIL_ADDRESS}>
Date: Wed Jan 16 09:32:03 2019 -0500
.
.
.
"""
match = re.search(r'^commit\s+(\S+).*\nDate:\s+(([A-Z][a-z]{2} ){2}[ 0123]\d (\d{2}:){2}\d{2} \d{4})', stdout, re.DOTALL)
log.debug('`git log -1` search groups: {groups}'.format(groups=match.groups() if match else None))
if match:
commit = match.group(1)[:6]
timestamp = datetime.datetime.strptime(match.group(2), '%a %b %d %H:%M:%S %Y')
log.debug('timestamp: {timestamp!s}'.format(**locals()))
(rc, stdout, stderr) = run('git branch')
match = re.search(r'\*\s(\S+)', stdout, re.DOTALL)
log.debug('`git branch` search groups: {groups}'.format(groups=match.groups() if match else None))
if match:
branch = match.group(1)
(rc, stdout, stderr) = run('git remote -v')
"""
origin https://github.com/pfuntner/gists.git (fetch)
"""
hits = list(re.finditer(r'(\S+)\s(https?://\S+)\s\(fetch\)', stdout))
log.debug('`git remote -v` hits: {hits}'.format(hits=[hit.groups() for hit in hits]))
if hits:
hits = ['{name}:{url}'.format(name=hit.group(1), url=hit.group(2)) for hit in hits]
ret = '{commit}, {branch}, {timestamp!s}, {hits}'.format(**locals())
git_used = True
os.chdir(cwd)
if not git_used:
ret = str(datetime.datetime.fromtimestamp(os.path.getmtime(sys.argv[0])))
return ret
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(logging.WARNING)
parser = argparse.ArgumentParser(description='Example of doing a nifty --version')
parser.add_argument('-v', '--verbose', dest='verbose', action='count', help='Print more messages')
parser.add_argument('--version', action='version', version=get_version(), help='See wonderful version information')
args = parser.parse_args()
log.setLevel(logging.WARNING - (args.verbose or 0) * 10)
# print repr(get_version())
|
[
"[email protected]"
] | ||
909120dd6bc0e5a9a8652a04195635298bb7c5b1
|
08c25b97c357d4422ae4e7fc893211d46523b82b
|
/scripts/make_table.py
|
240f81de22750522e8f3633acac713d8196221ce
|
[
"MIT"
] |
permissive
|
trulfos/spatial-benchmarker
|
ecad2e8d89d7e01681df45bddc12df9c7ebced09
|
44ff7f9a60b9a2d5cd98def48ddff43dd0d64220
|
refs/heads/master
| 2021-09-02T04:39:42.450867 | 2017-12-30T11:32:42 | 2017-12-30T11:34:30 | 115,791,961 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,384 |
py
|
#!/usr/bin/python3
from database import Database
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(
description='Make a table similar to the one from the RR* article'
)
parser.add_argument(
'index', metavar='<index name>',
help='Index for which to generate the table'
)
parser.add_argument(
'suite', metavar='<suite_id>',
help='Suite to collect numbers from'
)
parser.add_argument(
'--database', '-d', metavar='filename', default='results',
help='Path to sqlite database file'
)
return parser.parse_args()
def to_dict(results):
return dict((r['name'], r['value']) for r in results)
def main():
args = parse_arguments()
db = Database(args.database)
# Identify relevant runs
benchmark_runs = db.connection.execute(
"""
select * from `latest_run`
inner join `suite_member` using (`benchmark_id`, `config_id`)
where `index` = ? and `suite_id` = ? and exists (
select * from reporter
where `benchmark_id` = `latest_run`.`benchmark_id` and
`name` in ('struct', 'stats', 'correctness')
)
""",
[args.index, args.suite]
).fetchall()
benchmark_runs.sort(key=lambda x: x['dataset'])
values = ['benchmark'] \
+ 3 * ['results', 'leafAcc'] \
+ ['Leafs'] + (
['Perimspls'] if args.index == 'rtree-rstar' else []
) + ['commit', 'id']
# TODO: Guarantee sort order?
for run in benchmark_runs:
values.append(run['dataset'].split('/')[-1])
reporters = db.connection.execute(
"""
select * from reporter
where `benchmark_id` = ? and
`name` in ('struct', 'stats', 'correctness')
""",
(run['benchmark_id'],)
)
for reporter in reporters:
results = to_dict(db.get_where(
'result',
run_id=run['run_id'],
reporter_id=reporter['reporter_id']
))
if reporter['name'] == 'stats':
values += [
'%.2e' % results['results'],
'%.2e' % results['leaf_accesses']
]
elif reporter['name'] == 'struct':
h = results['height']
values.append('%.2e' % (results['level_2'] / 1000))
if args.index == 'rtree-rstar':
values.append(
'%.2e' % (
100*results['perimeter_splits'] /
(results['nodes'] - h)
)
)
elif reporter['name'] == 'correctness' and len(results):
print('Incorrect results detected! Go hunt more bugs.')
exit(1)
values += [
run['commit'].decode('utf-8')[:6],
'%d' % run['benchmark_id']
]
width = len(benchmark_runs) + 1
height = len(values) // width
for i in range(0, len(values) // width):
print('\t'.join(v for v in values[i::height]))
main()
|
[
"[email protected]"
] | |
065afde0ad990602c145f176bbbaf950115db7e7
|
4d03e487b60afc85d1f3372fe43f2a7b081f0e41
|
/file_list/thumbnail_cache.py
|
88b1134b24f906a6286c8193055960e31d5d945b
|
[] |
no_license
|
hal1932/Explorer
|
d051bd0bb09b0952bad35deeeec0d4ad00947666
|
869ce3323aee499048f98f33910fc05126947942
|
refs/heads/master
| 2021-01-19T13:27:22.485124 | 2017-04-18T14:03:17 | 2017-04-18T14:03:17 | 82,392,096 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,203 |
py
|
# encoding: utf-8
from lib import *
import cv2
import os
import threading
import Queue
class ThumbnailCache(QObject):
load_item_async = Signal()
def __init__(self, enable_load_async=False):
super(ThumbnailCache, self).__init__()
self.__items_dic = {}
if enable_load_async:
self.__load_queue = Queue.Queue()
self.__items_lock = threading.Lock()
self.__load_thread = threading.Thread(target=self.__load_async_impl)
self.__load_thread.daemon = True
self.__load_thread.start()
self.__enable_async = enable_load_async
def get_cached_pixmap(self, path):
if self.__enable_async:
with self.__items_lock:
if path not in self.__items_dic:
return None
image = self.__items_dic[path]
if isinstance(image, QPixmap):
return image
height, width, dim = image.shape
image = QImage(
image.data,
width, height, dim * width,
QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
with self.__items_lock:
self.__items_dic[path] = pixmap
return pixmap
else:
if path not in self.__items_dic:
return None
return self.__items_dic[path]
def load(self, path, size):
if self.__enable_async:
raise ValueError('load_sync is not enabled')
if os.path.splitext(path)[1].lower() in ThumbnailCache.__image_exts:
pixmap = QPixmap(path)
pixmap_size = qt.fitting_scale_down(size, pixmap.size())
pixmap = pixmap.scaled(pixmap_size)
else:
icon = qt.get_file_icon(path)
size = icon.actualSize(size)
pixmap = icon.pixmap(size)
self.__items_dic[path] = pixmap
return pixmap
def load_async(self, path, size):
if not self.__enable_async:
raise ValueError('load_async is not enabled')
if os.path.splitext(path)[1].lower() in ThumbnailCache.__image_exts:
self.__load_queue.put((path, size))
else:
icon = qt.get_file_icon(path)
size = icon.actualSize(size)
pixmap = icon.pixmap(size)
with self.__items_lock:
self.__items_dic[path] = pixmap
def __load_async_impl(self):
while True:
path, size = self.__load_queue.get()
image = cv2.imread(path)
height, width = image.shape[:2]
if width != size.width() or height != size.height():
size = qt.fitting_scale_down(size, QSize(width, height))
image = cv2.resize(image, (size.width(), size.height()))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
with self.__items_lock:
self.__items_dic[path] = image
self.__load_queue.task_done()
self.load_item_async.emit()
print(path)
__initialized = False
__directory_thumbnail = None
__image_exts = (u'.png', u'.jpg', u'.jpeg', u'.gif', u'.bmp')
|
[
"[email protected]"
] | |
4f659d9fc0303fc1ebfcc69527ae63585c957b79
|
e397e996e6ecbda3742980352f8642dfb058fbdb
|
/src/nodescripts/corenodes/blend/__init__.py
|
e7c9684c39899c9ddf33aec8b393b96d49b08834
|
[
"Apache-2.0"
] |
permissive
|
dephora/GimelStudio
|
bdcac2155b0021af0a60df4ed4df045a86353ab7
|
0cdaed3ffa93fd735ca8d65a0d99f1be64c2c522
|
refs/heads/master
| 2023-08-13T02:53:46.269443 | 2021-10-08T15:35:39 | 2021-10-08T15:35:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 72 |
py
|
from .mix_node import MixNode
from .alpha_over_node import AlphaOverNode
|
[
"[email protected]"
] | |
0b697bf8ee814996d74fb061231aeabb70a184c9
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/surface/compute/ssl_policies/describe.py
|
0546d3f6604bd3a747040e4520dae448783faf92
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 |
NOASSERTION
| 2019-02-04T19:04:40 | 2019-02-04T18:58:36 |
Python
|
UTF-8
|
Python
| false | false | 2,150 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe SSL policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.ssl_policies import ssl_policies_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.ssl_policies import flags
_SSL_POLICY_ARG = flags.GetSslPolicyArgument()
class Describe(base.DescribeCommand):
"""Describe a Google Compute Engine ssl policy.
*{command}* is used to display all data associated with a Google Compute
Engine SSL policy in a project.
An SSL policy specifies the server-side support for SSL features. An SSL
policy can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects
connections between clients and the HTTPS or SSL proxy load balancer. SSL
policies do not affect the connection between the load balancers and the
backends.
"""
@staticmethod
def Args(parser):
_SSL_POLICY_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
"""Issues the request to describe a SSL policy."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
helper = ssl_policies_utils.SslPolicyHelper(holder)
ref = _SSL_POLICY_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
return helper.Describe(ref)
|
[
"[email protected]"
] | |
0666f46140d410d989a463d0a2545a97377dfda1
|
dc349cfb2b18f7abdae47b892c03e2d6bb77fe11
|
/cube_reinforcement_learning/cube2X2/env_cube.py
|
4d0b6bd1651f3f31a94f275f25581acfac0e0a7a
|
[] |
no_license
|
vanviet93/ML
|
6e88cb4db728af000437042b59155c5efaa7de46
|
13336226e91dcb3751caa9fb19fbfb6b916124c5
|
refs/heads/master
| 2023-01-03T23:36:22.109552 | 2020-10-25T14:50:41 | 2020-10-25T14:50:41 | 307,107,860 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,545 |
py
|
# This environment can be apply for cube 2X2 or 3X3
import numpy as np
CUBE_SIZE = 2
REWARD_NOT_SOLVED = -0.10
REWARD_SOLVED = 1.00
class Environment:
# action
ROTATE_FRONT = 0
ROTATE_RIGHT = 1
ROTATE_UP = 2
ROTATE_LEFT = 3
ROTATE_BACK = 4
ROTATE_DOWN = 5
ROTATE_REVERSE_FRONT = 6
ROTATE_REVERSE_RIGHT = 7
ROTATE_REVERSE_UP = 8
ROTATE_REVERSE_LEFT = 9
ROTATE_REVERSE_BACK = 10
ROTATE_REVERSE_DOWN = 11
def __init__(self):
self._up = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 1
self._left = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 2
self._back = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 3
self._right = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 4
self._front = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 5
self._down = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 6
self.n_action = 12 # only rotate side clockwise, when display, display inside faces-> rotate anti_clockwise
self.action_space = list(range(self.n_action))
self.state_size = [CUBE_SIZE * CUBE_SIZE * 6, 6]
self._lookup_table = self._make_lookup_table() # convert numbers to binary format
def reset(self):
self._up = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 1
self._left = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 2
self._back = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 3
self._right = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 4
self._front = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 5
self._down = np.ones([CUBE_SIZE,CUBE_SIZE], dtype=np.int8) * 6
def _to_string(self):
sides = [self._up, self._left, self._back, self._right, self._front, self._down]
s = ''
space = ' ' * CUBE_SIZE
for i in range(CUBE_SIZE):
s += space
for j in range(CUBE_SIZE):
s+='[' + str(sides[0][i,j]) + ']'
s+='\n'
for m in range(CUBE_SIZE):
for i in range(1,5):
for n in range(CUBE_SIZE):
s+='[' + str(sides[i][m,n]) + ']'
s+='\n'
for i in range(CUBE_SIZE):
s += space
for j in range(CUBE_SIZE):
s+='[' + str(sides[5][i,j]) + ']'
if i!=CUBE_SIZE-1:
s+='\n'
return s
def _make_lookup_table(self):
return np.float32(np.eye(6))
def render(self):
print(self._to_string())
def sample_action(self):
return np.random.choice(self.n_action)
def get_result(self):
for i in range(CUBE_SIZE):
for j in range(CUBE_SIZE):
if self._up[i,j]!=1 or self._left[i,j]!=2 or self._back[i,j]!=3 or self._right[i,j]!=4 or self._front[i,j]!=5 or self._down[i,j]!=6:
return REWARD_NOT_SOLVED
return REWARD_SOLVED
def get_state(self):
flattened_sides = np.concatenate([
self._up.flatten(),
self._left.flatten(),
self._back.flatten(),
self._right.flatten(),
self._front.flatten(),
self._down.flatten()])
return np.take(self._lookup_table, flattened_sides-1, axis=0)
# return 1 -> end
# return 0 -> not end
def step(self, action):
if action==Environment.ROTATE_FRONT:
self._front = np.rot90(self._front, 1)
temp = self._left[:, 0].copy()
self._left[:, 0] = self._down[CUBE_SIZE-1]
self._down[CUBE_SIZE-1] = self._right[:, CUBE_SIZE-1][::-1]
self._right[:, CUBE_SIZE-1] = self._up[0]
self._up[0] = temp[::-1]
elif action==Environment.ROTATE_REVERSE_FRONT:
self._front = np.rot90(self._front, 3)
temp = self._left[:, 0].copy()
self._left[:, 0] = self._up[0][::-1]
self._up[0] = self._right[:,CUBE_SIZE-1]
self._right[:,CUBE_SIZE-1] = self._down[CUBE_SIZE-1][::-1]
self._down[CUBE_SIZE-1] = temp
elif action==Environment.ROTATE_RIGHT:
self._right = np.rot90(self._right, 1)
temp = self._front[:, 0].copy()
self._front[:, 0] = self._down[:,CUBE_SIZE-1][::-1]
self._down[:,CUBE_SIZE-1] = self._back[:,CUBE_SIZE-1]
self._back[:,CUBE_SIZE-1] = self._up[:,CUBE_SIZE-1]
self._up[:,CUBE_SIZE-1] = temp[::-1]
elif action==Environment.ROTATE_REVERSE_RIGHT:
self._right = np.rot90(self._right, 3)
temp = self._front[:, 0].copy()
self._front[:,0] = self._up[:,CUBE_SIZE-1][::-1]
self._up[:,CUBE_SIZE-1] = self._back[:,CUBE_SIZE-1]
self._back[:,CUBE_SIZE-1] = self._down[:,CUBE_SIZE-1]
self._down[:,CUBE_SIZE-1] = temp[::-1]
elif action==Environment.ROTATE_UP:
self._up = np.rot90(self._up, 1)
temp = self._front[0].copy()
self._front[0] = self._right[0]
self._right[0] = self._back[0]
self._back[0] = self._left[0]
self._left[0] = temp
elif action==Environment.ROTATE_REVERSE_UP:
self._up = np.rot90(self._up, 3)
temp = self._front[0].copy()
self._front[0] = self._left[0]
self._left[0] = self._back[0]
self._back[0] = self._right[0]
self._right[0] = temp
elif action==Environment.ROTATE_LEFT:
self._left = np.rot90(self._left, 1)
temp = self._front[:,CUBE_SIZE-1].copy()
self._front[:,CUBE_SIZE-1] = self._up[:,0][::-1]
self._up[:,0] = self._back[:,0]
self._back[:,0] = self._down[:,0]
self._down[:,0] = temp[::-1]
elif action==Environment.ROTATE_REVERSE_LEFT:
self._left = np.rot90(self._left, 3)
temp = self._front[:,CUBE_SIZE-1].copy()
self._front[:,CUBE_SIZE-1] = self._down[:,0][::-1]
self._down[:,0] = self._back[:,0]
self._back[:,0] = self._up[:,0]
self._up[:,0] = temp[::-1]
elif action==Environment.ROTATE_BACK:
self._back = np.rot90(self._back, 1)
temp = self._left[:,CUBE_SIZE-1].copy()
self._left[:,CUBE_SIZE-1] = self._up[CUBE_SIZE-1][::-1]
self._up[CUBE_SIZE-1] = self._right[:,0]
self._right[:,0] = self._down[0][::-1]
self._down[0] = temp
elif action==Environment.ROTATE_REVERSE_BACK:
self._back = np.rot90(self._back, 3)
temp = self._left[:,CUBE_SIZE-1].copy()
self._left[:,CUBE_SIZE-1] = self._down[0]
self._down[0] = self._right[:,0][::-1]
self._right[:,0] = self._up[CUBE_SIZE-1]
self._up[CUBE_SIZE-1] = temp[::-1]
elif action==Environment.ROTATE_DOWN:
self._down = np.rot90(self._down, 1)
temp = self._front[CUBE_SIZE-1].copy()
self._front[CUBE_SIZE-1] = self._left[CUBE_SIZE-1]
self._left[CUBE_SIZE-1] = self._back[CUBE_SIZE-1]
self._back[CUBE_SIZE-1] = self._right[CUBE_SIZE-1]
self._right[CUBE_SIZE-1] = temp
elif action==Environment.ROTATE_REVERSE_DOWN:
self._down = np.rot90(self._down, 3)
temp = self._front[CUBE_SIZE-1].copy()
self._front[CUBE_SIZE-1] = self._right[CUBE_SIZE-1]
self._right[CUBE_SIZE-1] = self._back[CUBE_SIZE-1]
self._back[CUBE_SIZE-1] = self._left[CUBE_SIZE-1]
self._left[CUBE_SIZE-1] = temp
return self.get_result()
def undo(self, action):
if action>=6:
action-=6
else:
action+=6
return self.step(action)
def get_sides(self):
return [
self._up.copy(),
self._left.copy(),
self._back.copy(),
self._right.copy(),
self._front.copy(),
self._down.copy()]
def init_from_sides(self, sides):
self._up = sides[0]
self._left = sides[1]
self._back = sides[2]
self._right = sides[3]
self._front = sides[4]
self._down = sides[5]
'''
env = Environment()
env.reset()
actions = []
for i in range(10):
action = env.sample_action()
env.step(action)
if action>=6:
action-=6
else:
action+=6
actions.insert(0, action)
sides = env.get_sides()
env.reset()
env.render()
env.init_from_sides(sides)
env.render()
for action in actions:
env.step(action)
env.render()
'''
|
[
"[email protected]"
] | |
046c39ef298843d7d823a4400810349921c3ac4a
|
a20e3fbd68ef3a788509bf09f072ad1ff3412735
|
/app/app.py
|
515c21ad88f912246877df8c4046ea39278ae514
|
[] |
no_license
|
datasorcery/genre_classification
|
4b2f4e11a34dd04eeb59a128a2180689c26ecac0
|
5026d76f932a9ea7035945330845f53debf28d1a
|
refs/heads/master
| 2021-01-11T13:53:33.457105 | 2017-06-20T23:01:32 | 2017-06-20T23:01:32 | 94,877,420 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,050 |
py
|
from flask import Flask
from flask import render_template
from flask import request
import os
import string
from sklearn.externals import joblib
from collections import Counter
app = Flask(__name__)
MODEL = os.path.join('.','model')
vec = joblib.load(os.path.join(MODEL,'vetorizador.pkl.bz2'))
mdl = joblib.load(os.path.join(MODEL,'modelo.pkl.bz2'))
stopwords = joblib.load(os.path.join(MODEL,'stopwords.pkl.bz2'))
# TODO: pickel stopwords
def prepara_estrofes(frase, translator = str.maketrans('', '', string.punctuation)):
# Remove pontuação
frase = frase.translate(translator)
# Garante que todas as palavras estão em caixa baixa
palavras = frase.lower().split()
# Lista de stopwords que usaremos
stop_w = set(stopwords)
# Retorna apenas as palavras que não são stopwords
palavras_filtradas = [palavra for palavra in palavras if palavra not in stop_w]
# Retorna a estrofe preparada
return(" ".join(palavras_filtradas))
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
letra_post = request.form['letra']
else:
letra_post = 'Digite a letra aqui'
return render_template('index.html', letra=letra_post)
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
letra_post = request.form['letra']
# Gera lista de versos
versos = letra_post.split('\n')
# Prepara versos removendo pontuacao e stopwords
versos = [prepara_estrofes(v) for v in versos]
new_pred = vec.transform(versos).toarray()
# Realiza predição por verso
prediction = mdl.predict(new_pred)
# Encontra classe mais comum
count = Counter(prediction)
pred_result = count.most_common(1)[0][0]
return render_template('predict.html', letra=letra_post, pred=pred_result)
#@app.route('/hello/')
#@app.route('/hello/<name>')
#def hello(name=None):
# return render_template('hello.html', name=name)
|
[
"[email protected]"
] | |
35fa6737055a0ebc29fc0f78d895ee7d05208758
|
c138f28eb1bb3be2227911b80af67bc5b3c29685
|
/example/server
|
6f0b9d6eb220a312664a104f7068b3be1d9005f9
|
[] |
no_license
|
sheepslinky/python-websocketd
|
39d892c0ee46fa08a19adeda2ff98e2954911b1d
|
8b6b0edc8a0e9d80d48b78099ce3a78775fa783c
|
refs/heads/master
| 2021-04-15T10:12:12.693499 | 2018-03-17T09:41:37 | 2018-03-17T09:41:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,109 |
#!/usr/bin/python3
# vim: set fileencoding=utf-8 foldmethod=marker :
# {{{ Copyright 2013-2016 Bas Wijnen <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# }}}
import websocketd
from gi.repository import GLib
class Rpc:
'An object of this class is instanced for every accepted WebSocket.'
def __init__(self, remote):
'remote is an object like this one, to call remote functions on.'
self.remote = remote
def test1(self, argument):
'This function is called from the client.'
print('test1 called with %s' % argument)
# Return a value.
return 'a value'
def test2(self):
'This function is called from the client and makes a call to the client.'
print('test2 called; sending quit in two seconds.')
GLib.timeout_add_seconds(2, self.remote.quit)
# Run the server on port 1234, serving html pages from the current directory.
# The Rpc class is used for handling WebSockets.
# Note that the server defaults to https connections; for this example,
# encryptions is not used. This is done by adding 'ths = False' as an
# argument.
# In normal use, encryption should almost always be used. The network module
# automatically generates an encryption key for you and puts it in
# ~/.local/share/network/. If you need a certificate for your encryption key,
# see http://letsencrypt.org.
server = websocketd.RPChttpd(1234, Rpc, httpdirs = ('.',), tls = False)
# Tell the user that things are set up.
websocketd.log('running')
# Run the main loop.
websocketd.fgloop()
|
[
"[email protected]"
] | ||
b7aed369d0aa8d680c47f718ba7ec76754ae21c9
|
6ee33870daa76b5188cb41851b0d73a8fc08e583
|
/Case 2 -- Muddy River/Other Code/simu.py
|
a397c186f9e532c622c33dbb31aecd665624d087
|
[] |
no_license
|
kid3night/FinancialEngineerCaseStudies
|
423f0e598905f432f6c7fac4f69c1b4940a864e8
|
73b3b30b0db91b2f44bb63b81713e39b93ad8219
|
refs/heads/master
| 2020-08-23T23:17:07.278735 | 2019-10-22T04:19:07 | 2019-10-22T04:19:07 | 216,722,062 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,266 |
py
|
def new_path(eval_para_pos, n = 5000):
# N_month = 97
N = 24
N *= 4
dt = 1/48
thetap1 = [39.28, 40.65, 40.48, 42.75, 46.62, 71.88, 86.92, 103.63, 60.78, 35.55, 39.67, 44.47]
thetag1 = [2.51, 2.49, 2.66, 2.59, 5.28, 6.33, 6.39, 5.93, 3.12, 2.73, 2.02, 3.07]
p_theta_std = np.array([1.45, 1.92, 2.36, 2.65, 5.01, 8.66, 12.21, 13.79, 12.78, 14.01, 12.77, 10.9])
g_theta_std = np.array([0.17, 0.2, 0.28, 0.4, 0.43, 0.67, 0.48, 0.53, 0.45, 0.78, 0.84, 0.71])
variable_list = [7, 20, 3, 75, 0.083, 35, 0, 3, 75, 7.22, 0.3, 28.87, 10.83, np.array(thetap1), np.array(thetag1)]
variables_names = ['alphaCC', 'alphaJC', 'alphaG',
'm', 'p', 'CC0', 'JC0', 'G0',
'spike_thres', 'sigmaG', 'rho',
'v_summer', 'v_winter', 'PowerTheta', 'GasTheta']
vec_res_p = list()
vec_res_g = list()
month_list = [i for i in range(1, 98)]
num_ses = np.arange(-3, 3.2, 0.2)
len_muls = len(num_ses)
result_ = list()
for num_se in num_ses:
if eval_para_pos == 0:
new_variable = 7 - num_se * 0.5
if eval_para_pos == 1:
new_variable = 20 - num_se * 1.3
if eval_para_pos == 2:
new_variable = 3 - num_se * 0.2
if eval_para_pos == 3:
new_variable = 75 - num_se * 5
elif eval_para_pos == 4:
new_variable = 0.083 - num_se * 0.005
elif eval_para_pos == 9:
new_variable = 7.22 - num_se * 0.05
elif eval_para_pos == 10:
new_variable = 0.3 - num_se * 0.05
elif eval_para_pos == 11:
new_variable = 28.87 - num_se * 2
elif eval_para_pos == 12:
new_variable = 10.83 - num_se * 2
elif eval_para_pos == 13:
new_variable = variable_list[13] - num_se * p_theta_std
elif eval_para_pos == 14:
new_variable = variable_list[14] - num_se * g_theta_std
else:
new_variable = variable_list[eval_para_pos]
variable_list[eval_para_pos] = new_variable
alphaCC = variable_list[0]
alphaJC = variable_list[1]
alphaG = variable_list[2]
m = variable_list[3]
p = variable_list[4]
CC0 = variable_list[5]
JC0 = variable_list[6]
G0 = variable_list[7]
spike_thres = variable_list[8]
sigmaG = variable_list[9]/np.sqrt(dt)/100
rho = variable_list[10]
v_summer = variable_list[11]/np.sqrt(dt)/100
v_winter = variable_list[12]/np.sqrt(dt)/100
PowerTheta = variable_list[13]
GasTheta = variable_list[14]
V = np.zeros((N+1,n))
W = norm.rvs(size = (N+1,n))*np.sqrt(dt)
Wtilde = norm.rvs(size = (N+1,n))*np.sqrt(dt)
B = rho*W + np.sqrt(1-rho**2)*Wtilde
CC = np.zeros((N+1,n))
CC[0,:] = CC0
JC = np.zeros((N+1,n))
JC[0,:] = JC0
G = np.zeros((N+1,n))
G[0,:] = G0
PC = np.zeros((N+1,n))
PC[0,:] = CC[0,:]
Power_MSE,Gas_MSE = 0, 0
Power_Price_Fit, Gas_Price_Fit, CC_Price_Fit, JC_Price_Fit = list(), list(), list(), list()
for i in range(1, N + 1):
month1 = month_list[i] % 12
monthIndicator = (month1 > 3)&(month1 < 8)
V[i,:] = monthIndicator*v_summer + (1 - monthIndicator)*v_winter
CC[i,:] = alphaCC*(thetap1[month1-1] - CC[i-1,:])*dt + V[i,:]*CC[i-1,:]*W[i,:] + CC[i-1,:]
JC[i,:] = alphaJC*( 0 - JC[i-1,:])*dt + m*(uniform.rvs() < p)+JC[i-1,:]
#Power Price
PC[i,:] = CC[i,:] + JC[i,:]*(PC[i-1,:] > spike_thres)
#Gas Price
G[i,:] = alphaG*(thetag1[month1-1] - G[i-1,:])*dt + sigmaG * G[i-1,:] * B[i,:] + G[i-1,:]
result_.append(valuation(PC.T, G.T, upper_, lower_))
results = np.array(result_) / result_[int(len_muls / 2)]
return (variables_names[eval_para_pos], np.mean(np.abs(results - 1)), result_[int(len_muls / 2)])
# plt.plot(num_ses, results, 'o')
# plt.title('Final Profit Sensitivity Analysis - {}'.format(variables_names[eval_para_pos]))
# plt.xlabel('multiplier of SE')
# plt.ylabel('Profit')
# return PC, G
|
[
"[email protected]"
] | |
518bc4aa64f4e5aac711a4ed163b4a5f8f2a09f8
|
0cf269af0e6f8266c26b3bc68e57368e8c3d9edb
|
/src/outpost/django/thesis/migrations/0002_discipline_doctoralschool_thesis.py
|
4dd83c63267a93ce7139bdb7ee8f8290691ea608
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
medunigraz/outpost.django.thesis
|
c1518aa516d2177b0cacf381432bcdde41f2b5e1
|
1f0dbaa6edb6d91216d9bd97c79ee8b3bbc153cc
|
refs/heads/master
| 2021-09-25T16:47:59.469921 | 2020-08-04T19:16:07 | 2020-08-04T19:16:07 | 184,580,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,819 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-08 09:47
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("thesis", "0001_initial")]
operations = [
migrations.CreateModel(
name="Discipline",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(blank=True, max_length=256, null=True)),
("number", models.CharField(blank=True, max_length=256, null=True)),
("thesistype", models.CharField(blank=True, max_length=256, null=True)),
],
options={"db_table": "thesis_discipline", "managed": False},
),
migrations.CreateModel(
name="DoctoralSchool",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(blank=True, max_length=256, null=True)),
(
"emails",
django.contrib.postgres.fields.ArrayField(
base_field=models.EmailField(
blank=True, max_length=254, null=True
),
size=None,
),
),
],
options={"db_table": "thesis_doctoralschool", "managed": False},
),
migrations.CreateModel(
name="Thesis",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("topic", models.CharField(blank=True, max_length=256, null=True)),
("created", models.DateTimeField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("prerequisites", models.TextField(blank=True, null=True)),
("processstart", models.DateTimeField(blank=True, null=True)),
("goals", models.TextField(blank=True, null=True)),
("hypothesis", models.TextField(blank=True, null=True)),
("methods", models.TextField(blank=True, null=True)),
("schedule", models.TextField(blank=True, null=True)),
(
"milestones",
django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(blank=True, null=True), size=None
),
),
],
options={
"db_table": "thesis_thesis",
"permissions": (("view_thesis", "View thesis"),),
"managed": False,
},
),
]
|
[
"[email protected]"
] | |
1f3d51045015821dd085d687dab26a5d6b6a1d8c
|
ac7c7f114f9ae9086c29f13fe81e9294d0fbb9cc
|
/py/collections_deque.py
|
f91a65c21cf7002d2255ffc1b0a22da080be87b0
|
[] |
no_license
|
yyltwin/backupFile
|
fa5fe5dd519ea17b1a83cceacf8f052e5405d7e5
|
0f13a1fbab09d42955b80b424186da213074fc6a
|
refs/heads/master
| 2020-04-27T16:45:44.008040 | 2019-03-08T07:51:36 | 2019-03-08T07:51:36 | 174,492,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,133 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# deque , 使用c语言实现
# 可以从队列前和后操作队列
# queue.Queue 内部使用 deque 实现
from collections import deque
# -----------
# deque 是线程安全的
# -----------
# 双端队列 deque 已经在字节码级别上达到 线程安全
from collections import deque
import time
nums = deque(maxlen=30) # 设置最大长度
def get(nums_p):
while 1:
try:
n = nums_p.pop()
if n is None:
break
time.sleep(0.5)
print("get func :", n)
except IndexError:
pass
def put(nums_p):
for i in range(10):
time.sleep(0.3)
nums_p.append(i)
nums_p.append(None)
print(nums_p)
if __name__ == '__main__':
# t1 = threading.Thread(target=put, args=(nums,))
# t1.start()
#
# t2 = threading.Thread(target=get, args=(nums,))
# t2.start()
#
# t2.join()
# t1.join()
pass
# 取文件 最后 maxLen 行,
# maxLen 固定长度时, 后入的数据会把先入的挤出去,
def tail(file, n=10):
with open(file) as f:
return deque(f, maxlen=10)
# print(tail("read_file"))
user_list1 = deque(["wp1", "wp2"]) # 传入字典时默认取key值存入
user_list2 = deque(["wp3", "wp4"])
user_list2.reverse() # 反转
user_list2.count("wp3") # 统计个数
user_list2.rotate(3) # 旋转移动元素 正数向右, 负数向左
user_list2.rotate(-3)
# extend ; extendleft 对当前元素进行修改
user_list1.extend(user_list2)
user_list1.extendleft(user_list2)
'''
from collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for li in lines:
if pattern in li:
yield li, previous_lines
previous_lines.append(li)
# Example use on a file
if __name__ == '__main__':
with open(r'read_file') as f:
for line, prevlines in search(f, 'python', 5):
for pline in prevlines:
print(pline, end='')
print(line, end='')
print('-' * 20)
'''
|
[
"[email protected]"
] | |
6dd2391b1a566786d194913b72e160c7c445f124
|
79f6873839b54a21dff11ceeef5160e7b9330864
|
/Project Euler/009.py
|
4ebd2beb7a9bb309da5bd351727beba8a2474343
|
[] |
no_license
|
paperwraith/Project_Euler
|
45c0890a8e42d0277712da8289972c4d2542c663
|
07d27231f0137bee0b419e5474173ef086ae529e
|
refs/heads/main
| 2023-08-11T14:10:39.373972 | 2021-09-25T23:11:42 | 2021-09-25T23:11:42 | 410,400,150 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a^2 + b^2 = c^2
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
# TODO:
# Generate Triples
# Check if sum of triples equals 1000
def triple_gen():
m = 2
n = 1
|
[
"[email protected]"
] | |
0caab767fb4e0d37798b0ac6117ce887812f3619
|
1cf63e84bf8c87414baed3983b2fa4a07ac7c338
|
/app1/ml_load_graph.py
|
16d7518062d969f49e728392bf98560d1668fba4
|
[] |
no_license
|
mahesh-dilhan/deep-learning
|
d941349bf09765b3b14b7542268eeaf5d03329d6
|
427446a7c9d151d33629980c496cdaaae990ab5c
|
refs/heads/master
| 2023-06-07T06:46:51.609322 | 2021-07-04T07:43:21 | 2021-07-04T07:43:21 | 382,750,175 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
# Load libraries
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# visualize the data
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pyplot.show()
# histograms
dataset.hist()
pyplot.show()
# scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
|
[
"[email protected]"
] | |
a8687da378ff2d8a5dc8dd39449c21aaa0e7210e
|
7c6e8857b42615a8517f5660d15bd7d0e41afdd5
|
/proxy/proxyauth.cgi
|
284c6689d84517de615da4760419969d53d17f18
|
[] |
no_license
|
geobretagne/cacheadmin
|
12c32a3c1cb74048fc7fe962dec7665fd9ca8afa
|
4589c2be8320d5461149533b35a63796aaff2d72
|
refs/heads/master
| 2020-06-01T12:38:57.229588 | 2014-04-30T14:44:27 | 2014-04-30T14:44:27 | 17,907,193 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,596 |
cgi
|
#!/usr/bin/env python
"""This is a blind proxy that we use to get around browser
restrictions that prevent the Javascript from loading pages not on the
same server as the Javascript. This has several problems: it's less
efficient, it might break some sites, and it's a security risk because
people can use this proxy to browse the web and possibly do bad stuff
with it. It only loads pages via http and https, but it can load any
content type. It supports GET and POST requests."""
import urllib2
import cgi
import sys, os
user =""
pwd=""
# Designed to prevent Open Proxy type stuff.
allowedHosts = ['www.openlayers.org', 'openlayers.org',
'labs.metacarta.com', 'world.freemap.in',
'prototype.openmnnd.org', 'geo.openplans.org',
'sigma.openplans.org', 'demo.opengeo.org',
'www.openstreetmap.org', 'sample.azavea.com',
'v2.suite.opengeo.org', 'v-swe.uni-muenster.de:8080',
'vmap0.tiles.osgeo.org', 'www.openrouteservice.org',
'geobretagne.fr', 'test.geobretagne.fr',
'tile.geobretagne.fr', 'osm.geobretagne.fr']
method = os.environ["REQUEST_METHOD"]
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = ""
password_mgr.add_password(None, top_level_url, user, pwd)
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPBasicAuthHandler(password_mgr))
urllib2.install_opener(opener)
if method == "POST":
qs = os.environ["QUERY_STRING"]
d = cgi.parse_qs(qs)
if d.has_key("url"):
params = d["url"][0].split("@")
url = params[0]
if len(params)>1:
user = params[1]
pwd = params[2]
else:
url = "http://www.openlayers.org"
else:
fs = cgi.FieldStorage()
tmp = fs.getvalue('url', "http://www.openlayers.org")
params = tmp.split("@")
url = params[0]
if len(params)>1:
user = params[1]
pwd = params[2]
try:
tmp = url.split("/")
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = "http://" + tmp[2] + "/"
password_mgr.add_password(None, top_level_url, user, pwd)
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPBasicAuthHandler(password_mgr))
urllib2.install_opener(opener)
host = url.split("/")[2]
if allowedHosts and not host in allowedHosts:
print "Status: 502 Bad Gateway"
print "Content-Type: text/plain"
print
print "This proxy does not allow you to access that location (%s)." % (host,)
print
print os.environ
elif url.startswith("http://") or url.startswith("https://"):
if method == "POST":
length = int(os.environ["CONTENT_LENGTH"])
headers = {"Content-Type": os.environ["CONTENT_TYPE"]}
body = sys.stdin.read(length)
r = urllib2.Request(url, body, headers)
y = urllib2.urlopen(r)
else:
y = urllib2.urlopen(url)
# print content type header
i = y.info()
if i.has_key("Content-Type"):
print "Content-Type: %s" % (i["Content-Type"])
else:
print "Content-Type: text/plain"
print
print y.read()
y.close()
else:
print "Content-Type: text/plain"
print
print "Illegal request."
except Exception, E:
print "Status: 500 Unexpected Error"
print "Content-Type: text/plain"
print
print "Erreur:", E
|
[
"[email protected]"
] | |
1433ed9a66cf8f030d0107507d432670a7d51f0f
|
58baf0dd6a9aa51ef5a7cf4b0ee74c9cb0d2030f
|
/tools/testrunner/standard_runner.py
|
a59fe0839665fe1699fff41e3e9e4b837c952af2
|
[
"bzip2-1.0.6",
"BSD-3-Clause",
"SunPro"
] |
permissive
|
eachLee/v8
|
cce8d6e620625c97a2e969ee8a52cc5eb77444ce
|
1abeb0caa21301f5ace7177711c4f09f2d6447d9
|
refs/heads/master
| 2021-08-14T08:21:44.549890 | 2017-11-14T20:35:38 | 2017-11-14T23:06:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,667 |
py
|
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
from os.path import join
import multiprocessing
import os
import random
import shlex
import subprocess
import sys
import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
TIMEOUT_DEFAULT = 60
# Variants ordered by expected runtime (slowest first).
VARIANTS = ["default"]
MORE_VARIANTS = [
"stress",
"stress_incremental_marking",
"nooptimization",
"stress_asm_wasm",
"wasm_traps",
]
EXHAUSTIVE_VARIANTS = MORE_VARIANTS + VARIANTS
VARIANT_ALIASES = {
# The default for developer workstations.
"dev": VARIANTS,
# Additional variants, run on all bots.
"more": MORE_VARIANTS,
# TODO(machenbach): Deprecate this after the step is removed on infra side.
# Additional variants, run on a subset of bots.
"extra": [],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"arm64"]
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self):
super(StandardTestRunner, self).__init__()
self.sancov_dir = None
def _do_execute(self, options, args):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print ' '.join(sys.argv)
if utils.GuessOS() == "macos":
# TODO(machenbach): Temporary output for investigating hanging test
# driver on mac.
print "V8 related processes running on this host:"
try:
print subprocess.check_output(
"ps -e | egrep 'd8|cctest|unittests'", shell=True)
except Exception:
pass
suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
# Use default tests if no test configuration was provided at the cmd line.
if len(args) == 0:
args = ["default"]
# Expand arguments with grouped tests. The args should reflect the list
# of suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in base_runner.TEST_MAP:
return [suite for suite in base_runner.TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
[ExpandTestGroups(arg) for arg in args],
[])
args_suites = OrderedDict() # Used as set
for arg in args:
args_suites[arg.split('/')[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(base_runner.BASE_DIR, "test", root))
if suite:
suites.append(suite)
for s in suites:
s.PrepareSources()
try:
return self._execute(args, options, suites)
except KeyboardInterrupt:
return 2
def _add_parser_options(self, parser):
parser.add_option("--sancov-dir",
help="Directory where to collect coverage data")
parser.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
parser.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3"
" support",
default=False, action="store_true")
parser.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
parser.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
parser.add_option("--command-prefix",
help="Prepended to each shell command used to run a"
" test",
default="")
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
action="append", default=[])
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("--no-harness", "--noharness",
help="Run without test harness of a given suite",
default=False, action="store_true")
parser.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
parser.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last"
" run.",
default=False, dest="no_sorting", action="store_true")
parser.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
parser.add_option("--variants",
help="Comma-separated list of testing variants;"
" default: \"%s\"" % ",".join(VARIANTS))
parser.add_option("--exhaustive-variants",
default=False, action="store_true",
help="Use exhaustive set of default variants:"
" \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
parser.add_option("--report", help="Print a summary of the tests to be"
" run",
default=False, action="store_true")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--flakiness-results",
help="Path to a file for storing flakiness json.")
parser.add_option("--rerun-failures-count",
help=("Number of times to rerun each failing test case."
" Very slow tests will be rerun only once."),
default=0, type="int")
parser.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
parser.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
parser.add_option("--swarming",
help="Indicates running test driver on swarming.",
default=False, action="store_true")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default=TIMEOUT_DEFAULT, type="int")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
parser.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator",
type=int)
parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
help="Number of runs with different random seeds")
def _process_options(self, options):
global VARIANTS
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
if not os.path.exists(self.sancov_dir):
print("sancov-dir %s doesn't exist" % self.sancov_dir)
raise base_runner.TestRunnerError()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if self.build_config.asan:
options.extra_flags.append("--invoke-weak-callbacks")
options.extra_flags.append("--omit-quit")
if options.novfp3:
options.extra_flags.append("--noenable-vfp3")
if options.exhaustive_variants:
# This is used on many bots. It includes a larger set of default
# variants.
# Other options for manipulating variants still apply afterwards.
VARIANTS = EXHAUSTIVE_VARIANTS
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
if self.build_config.msan:
VARIANTS = ["default"]
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
options.random_seed = self._random_seed()
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_variants, bool(options.variants)):
print("Use only one of --no-variants or --variants.")
raise base_runner.TestRunnerError()
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if options.no_variants:
VARIANTS = ["default"]
if options.variants:
VARIANTS = options.variants.split(",")
# Resolve variant aliases.
VARIANTS = reduce(
list.__add__,
(VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
[],
)
if not set(VARIANTS).issubset(ALL_VARIANTS):
print "All variants must be in %s" % str(ALL_VARIANTS)
raise base_runner.TestRunnerError()
if self.build_config.predictable:
VARIANTS = ["default"]
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
# Dedupe.
VARIANTS = list(set(VARIANTS))
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
raise base_runner.TestRunnerError()
CheckTestMode("slow test", options.slow_tests)
CheckTestMode("pass|fail test", options.pass_fail_tests)
if self.build_config.no_i18n:
base_runner.TEST_MAP["bot_default"].remove("intl")
base_runner.TEST_MAP["default"].remove("intl")
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
symbolizer_option = self._get_external_symbolizer_option()
if self.sancov_dir:
os.environ['ASAN_OPTIONS'] = ":".join([
'coverage=1',
'coverage_dir=%s' % self.sancov_dir,
symbolizer_option,
"allow_user_segv_handler=1",
])
def _random_seed(self):
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
# Populate context object.
# Simulators are slow, therefore allow a longer timeout.
if self.build_config.arch in SLOW_ARCHS:
options.timeout *= 2
options.timeout *= self.mode_options.timeout_scalefactor
if self.build_config.predictable:
# Predictable mode is slower.
options.timeout *= 2
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags,
options.verbose,
options.timeout,
options.isolates,
options.command_prefix,
options.extra_flags,
self.build_config.no_i18n,
options.random_seed,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
self.build_config.predictable,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
simulator_run = (
not options.dont_skip_simulator_slow_tests and
self.build_config.arch in [
'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
'ppc64', 's390', 's390x'] and
bool(base_runner.ARCH_GUESS) and
self.build_config.arch != base_runner.ARCH_GUESS)
# Find available test suites and read test cases from them.
variables = {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"gc_fuzzer": False,
"gc_stress": options.gc_stress,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": options.novfp3,
"predictable": self.build_config.predictable,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": simulator_run,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
all_tests = []
num_tests = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
# First filtering by status applying the generic rules (independent of
# variants).
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_gen = s.CreateVariantGenerator(VARIANTS)
variant_tests = [ t.CopyAddingFlags(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1:
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for _ in range(0, options.random_seed_stress_count):
# Use given random seed for all runs (set by default in
# execution.py) or a new random seed if none is specified.
if options.random_seed:
yield []
else:
yield ["--random-seed=%d" % self._random_seed()]
s.tests = [
t.CopyAddingFlags(t.variant, flags)
for t in variant_tests
for flags in iter_seed_flags()
]
else:
s.tests = variant_tests
# Second filtering by status applying the variant-dependent rules.
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests, variants=True)
s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
# Run the tests.
start_time = time.time()
progress_indicator = progress.IndicatorNotifier()
progress_indicator.Register(
progress.PROGRESS_INDICATORS[options.progress]())
if options.junitout:
progress_indicator.Register(progress.JUnitTestProgressIndicator(
options.junitout, options.junittestsuite))
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode,
ctx.random_seed))
if options.flakiness_results:
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")
if exit_code == 1 and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = 0
if self.sancov_dir:
# If tests ran with sanitizer coverage, merge coverage files in the end.
try:
print "Merging sancov files."
subprocess.check_call([
sys.executable,
join(
base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % self.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
exit_code = 1
return exit_code
def _shard_tests(self, tests, options):
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count:
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if options.shard_run > 1 and options.shard_run != shard_run:
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
|
[
"[email protected]"
] | |
26088ff7733bb15d42a931600351936eaacec3cc
|
6f401aed6b736b07224c5da774fef5124536e4c3
|
/python bible/health.py
|
ec10fb386d8e0e6610e608e3b0a5f7eb0320a013
|
[] |
no_license
|
Fumitus/First_Python_lessons
|
871a417cdfe828c68da1003e24d5e93de5a466dd
|
07a161c886a00ddb86eca3ede32e82df00b938a8
|
refs/heads/master
| 2020-04-10T06:49:20.666396 | 2018-12-07T19:23:41 | 2018-12-07T19:23:41 | 160,865,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 150 |
py
|
import random
health = 50
difficulty = 1
potion_health = int(random.randint(25, 50) / difficulty)
health = health + potion_health
print(health)
|
[
"[email protected]"
] | |
a666762fd34411a901f443d2ec06dd10658e150c
|
e787a46d354e3bf9666cb0d8b0c7d5f8ed0a8169
|
/ccdproc/tests/make_mef.py
|
a871eaab6869b53192f486e57ffb6a99680fc3eb
|
[] |
permissive
|
astropy/ccdproc
|
25270fec41e64e635f7f22bcf340b2dee9ef88ac
|
5af6ee5eee16a99591dd9fcbe81735e70c1cc681
|
refs/heads/main
| 2023-09-01T11:48:06.969582 | 2023-06-08T18:01:43 | 2023-06-08T18:01:43 | 13,384,007 | 81 | 88 |
BSD-3-Clause
| 2023-06-08T18:01:45 | 2013-10-07T13:05:51 |
Python
|
UTF-8
|
Python
| false | false | 2,156 |
py
|
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from astropy.io import fits
from astropy.nddata import CCDData
from ccdproc import flat_correct
def make_sample_mef(science_name, flat_name, size=10, dtype='float32'):
"""
Make a multi-extension FITS image with random data
and a MEF flat.
Parameters
----------
science_name : str
Name of the science image created by this function.
flat_name : str
Name of the flat image created by this function.
size : int, optional
Size of each dimension of the image; images created are square.
dtype : str or numpy dtype, optional
dtype of the generated images.
"""
with NumpyRNGContext(1234):
number_of_image_extensions = 3
science_image = [fits.PrimaryHDU()]
flat_image = [fits.PrimaryHDU()]
for _ in range(number_of_image_extensions):
# Simulate a cloudy night, average pixel
# value of 100 with a read_noise of 1 electron.
data = np.random.normal(100., 1.0, [size, size]).astype(dtype)
hdu = fits.ImageHDU(data=data)
# Make a header that is at least somewhat realistic
hdu.header['unit'] = 'electron'
hdu.header['object'] = 'clouds'
hdu.header['exptime'] = 30.0
hdu.header['date-obs'] = '1928-07-23T21:03:27'
hdu.header['filter'] = 'B'
hdu.header['imagetyp'] = 'LIGHT'
science_image.append(hdu)
# Make a perfect flat
flat = np.ones_like(data, dtype=dtype)
flat_hdu = fits.ImageHDU(data=flat)
flat_hdu.header['unit'] = 'electron'
flat_hdu.header['filter'] = 'B'
flat_hdu.header['imagetyp'] = 'FLAT'
flat_hdu.header['date-obs'] = '1928-07-23T21:03:27'
flat_image.append(flat_hdu)
science_image = fits.HDUList(science_image)
science_image.writeto(science_name)
flat_image = fits.HDUList(flat_image)
flat_image.writeto(flat_name)
if __name__ == '__main__':
make_sample_mef('data/science-mef.fits', 'data/flat-mef.fits')
|
[
"[email protected]"
] | |
1dc48a51f14dfb066f1271301f6c7b6f8c4048ff
|
16f34a866a0fba5352dc9d7c83de741fdbc13df9
|
/lecture_1/exercise_1.py
|
7b4f719055861ca0fd3133587ff7c37b0847ebc2
|
[] |
no_license
|
ethanbar11/haskal_28_6_21
|
02fd6ae22b83d4e4678a9bba0dd0b90af147a05e
|
2c6a2a5fc1db2cafdfde88593b3d55048ed86bc3
|
refs/heads/main
| 2023-07-10T17:33:20.513451 | 2021-08-23T16:57:15 | 2021-08-23T16:57:15 | 383,196,071 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 62 |
py
|
num1=5
num2=10
result=num1+num2
print('The result is:',result)
|
[
"[email protected]"
] | |
5215a084044fb39cce1d96120767a0cf0684d3fe
|
72fd9d49d89a9fc23ca896154fa54cba836c41ca
|
/tasks.py
|
0ea3f55768a7233a886cb6707e616c923561b8c6
|
[
"MIT"
] |
permissive
|
envobe/pydash
|
15066046fbc07458c29b6b33b1489aaadda5d074
|
6c0f778f6a2535397706aab68636485702ff3565
|
refs/heads/master
| 2023-01-05T18:14:09.923169 | 2020-10-29T02:16:34 | 2020-10-29T02:16:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,593 |
py
|
"""
This module provides the CLI interface for invoke tasks.
All tasks can be executed from this file's directory using:
$ inv <task>
Where <task> is a function defined below with the @task decorator.
"""
from __future__ import print_function
from functools import partial
from invoke import Exit, UnexpectedExit, run as _run, task
PACKAGE_SOURCE = "src/pydash"
TEST_TARGETS = "{} tests".format(PACKAGE_SOURCE)
LINT_TARGETS = "{} tasks.py".format(PACKAGE_SOURCE)
EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit)
# Set pyt=True to enable colored output when available.
run = partial(_run, pty=True)
@task
def black(ctx, quiet=False):
"""Autoformat code using black."""
run("black {}".format(LINT_TARGETS), hide=quiet)
@task
def isort(ctx, quiet=False):
"""Autoformat Python imports."""
run("isort {}".format(LINT_TARGETS), hide=quiet)
@task
def docformatter(ctx):
"""Autoformat docstrings using docformatter."""
run(
"docformatter -r {} "
"--in-place --pre-summary-newline --wrap-descriptions 100 --wrap-summaries 100".format(
LINT_TARGETS
)
)
@task
def fmt(ctx):
"""Autoformat code and docstrings."""
print("Running docformatter")
docformatter(ctx)
print("Running isort")
isort(ctx, quiet=True)
print("Running black")
black(ctx, quiet=True)
@task
def flake8(ctx):
"""Check code for PEP8 violations using flake8."""
run("flake8 --format=pylint {}".format(LINT_TARGETS))
@task
def pylint(ctx):
"""Check code for static errors using pylint."""
run("pylint {}".format(LINT_TARGETS))
@task
def lint(ctx):
"""Run linters."""
linters = {"flake8": flake8, "pylint": pylint}
failures = []
for name, linter in linters.items():
print("Running {}".format(name))
try:
linter(ctx)
except EXIT_EXCEPTIONS:
failures.append(name)
result = "FAILED"
else:
result = "PASSED"
print("{}\n".format(result))
if failures:
failed = ", ".join(failures)
raise Exit("ERROR: Linters that failed: {}".format(failed))
@task(help={"args": "Override default pytest arguments"})
def unit(ctx, args="--cov={} {}".format(PACKAGE_SOURCE, TEST_TARGETS)):
"""Run unit tests using pytest."""
run("pytest {}".format(args))
@task
def test(ctx):
"""Run linters and tests."""
print("Building package")
build(ctx)
print("Building docs")
docs(ctx)
print("Running unit tests")
unit(ctx)
@task
def docs(ctx, serve=False, bind="127.0.0.1", port=8000):
"""Build docs."""
run("rm -rf docs/_build")
run("sphinx-build -q -W -b html docs docs/_build/html")
if serve:
print(
"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...".format(
bind=bind, port=port
)
)
run(
"python -m http.server -b {bind} --directory docs/_build/html {port}".format(
bind=bind, port=port
),
hide=True,
)
@task
def build(ctx):
"""Build Python package."""
run("rm -rf dist build docs/_build")
run("python setup.py -q sdist bdist_wheel")
@task
def clean(ctx):
"""Remove temporary files related to development."""
run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete")
run("rm -rf .tox .coverage .cache .pytest_cache **/.egg* **/*.egg* dist build")
@task(pre=[build])
def release(ctx):
"""Release Python package."""
run("twine upload dist/*")
|
[
"[email protected]"
] | |
c9768e8e9428435fc33c6fdc6fdb30504230dd3f
|
d5f4d4dcad6217a2736cbefb5d091b7b4e822aee
|
/dk-crypto.py
|
69a215778e15e4454d6251936df3677c93da6d44
|
[] |
no_license
|
dknific/dk-crypto
|
288761e820e60046759247713b0d64c07f189a63
|
295fb0ff2a683adf942d9514268fa9372f43c546
|
refs/heads/main
| 2023-03-15T17:57:29.773739 | 2021-03-08T23:15:17 | 2021-03-08T23:15:17 | 345,809,118 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 761 |
py
|
import helpers
running = True
helpers.printIntro()
helpers.printOptions()
while running:
userIn = input('\nType the currency symbol you want a rate for: \n> ')
if (helpers.validateUserIn(userIn)):
response = helpers.getCurrencies(userIn.upper())
print('\n' + response[0]["name"] + ' (' + response[0]["symbol"] + ') is currently evaluated at USD $' + helpers.roundValue(response[0]["price"]) + '.')
check = input('Convert another coin? (Y/N):\n> ')
if check.upper() == 'N' or check.upper() == 'NO':
running = False
else:
print('\nWhoops! \'' + userIn + '\' wasn\'t valid input.')
helpers.printOptions();
print('\n---------')
print('Quitting...')
print('Have a great day!')
print('-dk')
|
[
"[email protected]"
] | |
6d45bf858d903c9299f1bee1a02048ff896e95e0
|
f6cc91efee7fdb865923586152ecdee5f73e3214
|
/image_loader/wsgi.py
|
2e6383d87daf5e5390fad9ecec1a086b4902efbe
|
[
"MIT"
] |
permissive
|
PiochU19/image-loader
|
5e9186cbdd12eae46bcb66b49f604e7dd652e906
|
7abec95f5d41e859fe65607ab7bd442a855bc2a0
|
refs/heads/master
| 2023-05-04T05:41:41.362774 | 2021-05-18T20:26:44 | 2021-05-18T20:26:44 | 368,221,416 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
"""
WSGI config for image_loader project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'image_loader.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
e9db39b888b252aed531dfd86ac972d05320c65e
|
980a0013679dd53bc4385087f22b2eb42e84be29
|
/3-BE_Firefighter_Robot_game/PyPOMDP/pypomdp/replay.py
|
74a15aea9824cf4affa40dddaf2d586c9d16acaa
|
[] |
no_license
|
LudovicSterlin/nia-ai-tools
|
9cb084c74dea3ed1f10e66b63e9814b81a89a8f5
|
8ceebc77709a8fe9a0e2ed63db175c39729ff4ea
|
refs/heads/master
| 2023-04-13T12:44:28.421150 | 2021-04-09T09:23:21 | 2021-04-09T09:23:21 | 320,511,319 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,800 |
py
|
import argparse
import os
import json
import multiprocessing
from pomdp_runreplay import PomdpRunReplay
from util import ReplayParams
if __name__ == '__main__':
"""
Parse generic params for the POMDP runner, and configurations for the chosen algorithm.
Algorithm configurations the JSON files in ./configs
Example usage:
> python main.py pomcp --env Tiger-2D.POMDP
> python main.py pbvi --env Tiger-2D.POMDP
"""
parser = argparse.ArgumentParser(description='Solve pomdp')
parser.add_argument('config', type=str, help='The file name of algorithm configuration (without JSON extension)')
parser.add_argument('--env', type=str, default='GridWorld.POMDP', help='The name of environment\'s config file')
parser.add_argument('--budget', type=float, default=float('inf'), help='The total action budget (defeault to inf)')
parser.add_argument('--snapshot', type=bool, default=False, help='Whether to snapshot the belief tree after each episode')
parser.add_argument('--logfile', type=str, default=None, help='Logfile path')
parser.add_argument('--random_prior', type=bool, default=False,
help='Whether or not to use a randomly generated distribution as prior belief, default to False')
parser.add_argument('--max_play', type=int, default=100, help='Maximum number of play steps')
parser.add_argument('--sim', type=int, default=100, help='Maximum number of simulations')
parser.add_argument('--policyfile', type=str, default='alphavecfile.policy', help='alphaVec policy file')
parser.add_argument('--option', type=str, default='onsolve',
help='please choose between : onsolve - for online solving; offsolve - for offline solving; simulate - for simulating a policyfile; replay - for a experience replay')
parser.add_argument('--expfile', type=str, default='data/dfsub_19alldataproc.csv', help='experimental data processed file')
parser.add_argument('--classif', type=str, default='data/classifier.joblib', help='classifier model')
parser.add_argument('--fnames', type=str, default='HRV,HRnorm,mode,nav,tank,nbAOI1,nbAOI2,nbAOI3,nbAOI4,nbAOI5', help='a string with a list of features names separated by comma ex. HRV,HRnorm')
args = vars(parser.parse_args())
params = ReplayParams(**args)
with open(params.algo_config) as algo_config:
algo_params = json.load(algo_config)
runner = PomdpRunReplay(params)
if params.option == 'onsolve':
runner.run(**algo_params)
if params.option == 'offsolve':
runner.offsolving(**algo_params)
if params.option == 'simulate':
runner.policy_eval(**algo_params)
if params.option == 'replay' :
runner.replay(**algo_params)
|
[
"[email protected]"
] | |
872575137733eefdd93c6b8133bae73913127c46
|
819897ab3c1aefd1cc966312e722d6e18226697b
|
/samples/baxter/baxter_mugs.py
|
e6a78678f666c70fa8310be4131172a83b0fdc8f
|
[
"MIT"
] |
permissive
|
msieb1/mask-rcnn
|
29bda1d79add2d83af9412e701bdf30cb085a751
|
535e6d02b22edc5624516a4eb139ddd480f75a34
|
refs/heads/master
| 2020-04-02T23:20:46.047757 | 2019-05-23T16:26:34 | 2019-05-23T16:26:34 | 154,864,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,558 |
py
|
"""
Mask R-CNN
Configurations and data loading code for the synthetic Shapes dataset.
This is a duplicate of the code in the noteobook train_shapes.ipynb for easy
import into other notebooks, such as inspect_model.ipynb.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import math
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("/home/msieb/projects/Mask_RCNN/")
DATASET_DIR = '/home/msieb/projects/Mask_RCNN/datasets/baxter_mugs'
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
from ipdb import set_trace
class BaxterConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy baxter dataset.
"""
# Give the configuration a recognizable name
NAME = "baxter"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 objects
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
# IMAGE_MIN_DIM = 128
# IMAGE_MAX_DIM = 128
# Use smaller anchors because our image and objects are small
# RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
# TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 200
BACKBONE = "resnet101"
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = BaxterConfig()
config.display()
class InferenceConfig(BaxterConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
CLASS_NAMES = ['BG', 'mug', 'robot']
TARGET_IDS = [1]
class BaxterDataset(utils.Dataset):
"""Generates the baxter synthetic dataset. The dataset consists of simple
baxter (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_baxter(self, dataset_dir, subset):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("baxter", 1, "mug")
# self.add_class("baxter", 2, "mug")
# self.add_class("baxter", 3, "brown_mug")
# self.add_class("baxter", 4, "black_mug")
# self.add_class("baxter", 5, "hand")
# self.add_class("baxter", 2 , "robot")
# self.idx2class = {1: 'green_ring', 2: 'blue_ring'} # ,3: 'hand', 4: 'robot'}
# self.class2idx = {val: key for key, val in self.idx2class.items()}
# Train or validation dataset?
assert subset in ["train", "val", "test"]
dataset_dir = os.path.join(dataset_dir, subset)
filenames = os.listdir(dataset_dir)
filenames = [file for file in filenames if '.jpg' in file]
# Add images
for i, filename in enumerate(filenames):
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, filename)
height = 480
width = 640
if len(filename.split('_')) == 3:
try:
label_path = os.path.join(dataset_dir, '_' + filename.split('_')[0] + '.txt')
with open(label_path, 'r') as fp:
line = fp.readline().strip('\n')
if line != 'robot':
line = 'mug'
classes = [line]
except:
classes =[-1]
self.add_image(
"baxter",
image_id=filename, # use file name as a unique image id
path=image_path,
width=width, height=height, classes=classes)
else:
# # Synthetic image
# label_path_1 = os.path.join(dataset_dir, '_' + filename[0] + '.txt')
# label_path_2 = os.path.join(dataset_dir, '_' + filename.split('_')[3] + '.txt')
# try:
# with open(label_path_1, 'r') as fp:
# line = fp.readline().strip('\n')
# classes = [line]
# with open(label_path_2, 'r') as fp:
# line = fp.readline().strip('\n')
# classes.append(line)
# except:
# classes = [-1]
# self.add_image(
# "baxter",
# image_id=filename, # use file name as a unique image id
# path=image_path,
# width=width, height=height, classes=classes)
# Synthetic image
_split = filename.split('_')
n_added_objects = int((len(_split) - 2) / 3)
label_path = os.path.join(dataset_dir, '_' + filename.split('_')[0] + '.txt')
with open(label_path, 'r') as fp:
line = fp.readline().strip('\n')
if line != 'robot':
line = 'mug'
classes = [line]
for i in range(n_added_objects):
label_path = os.path.join(dataset_dir, '_' + filename.split('_')[3 + i*3] + '.txt')
with open(label_path, 'r') as fp:
line = fp.readline().strip('\n')
if line != 'robot':
line = 'mug'
classes.append(line)
self.add_image(
"baxter",
image_id=filename, # use file name as a unique image id
path=image_path,
width=width, height=height, classes=classes)
def load_image(self, image_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
image_info = self.image_info[image_id]
if image_info["source"] != "baxter":
return super(self.__class__, self).load_mask(image_id)
# print("image info: {}".format(image_info))
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
# mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
# dtype=np.uint8)
image_path = info['path']
image = plt.imread(image_path)
return image
def image_reference(self, image_id):
"""Return the baxter data of the image."""
info = self.image_info[image_id]
if info["source"] == "baxter":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def load_mask(self, image_id):
"""Generate instance masks for baxter of the given image ID.
"""
image_info = self.image_info[image_id]
if image_info["source"] != "baxter":
return super(self.__class__, self).load_mask(image_id)
# print("image info: {}".format(image_info))
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
# mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
# dtype=np.uint8)
image_path = info['path']
image_ = plt.imread(image_path)
mask_path = image_path.split('.')[0] + '.npy'
mask = np.load(mask_path)
if len(mask.shape) == 2:
mask = mask[:, :, np.newaxis]
mask_ids = np.ones((1,1), dtype=np.int32) * self.class_names.index(info["classes"][0])
else:
# Synthetic image
mask_ids = np.ones([mask.shape[-1]], dtype=np.int32)
for i in range(mask.shape[-1]):
mask_ids[i] = self.class_names.index(info["classes"][i])
# print(info["classes"])
# mask_ids = np.ones([mask.shape[-1]], dtype=np.int32)
# print("class id: ", self.class2idx[info["classes"]])
# print(type(self.class2idx[info["classes"]])) # Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
# print ("mask: {}".format(np.where(mask.astype(np.bool) == True)))
# print("class ids: {}".format(mask_ids))
return mask.astype(np.bool), mask_ids
|
[
"[email protected]"
] | |
842240a63093b1ea755d9ef1824ad3d6792f4177
|
9e658976a6fdfbe031fc3452c69243dc66359f6a
|
/pythonExercise/four.py
|
b4fadd64058df0da705a77f23dd57f2e54e2cff1
|
[] |
no_license
|
zyyxydwl/Python-Learning
|
b2ed0f80121b284e5fb65cc212ccb84a0eb14cb6
|
6a5d36aa8805da647229fa747fa96452638d830e
|
refs/heads/master
| 2018-10-04T23:42:21.076668 | 2018-06-08T03:19:33 | 2018-06-08T03:19:33 | 107,348,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,088 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time :2017/12/2 9:57
#@Author :zhouyuyao
#@File :four.py
# 题目:输入某年某月某日,判断这一天是这一年的第几天?
# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:
# 程序源代码:
# 实例(题目:输入某年某月某日,判断这一天是这一年的第几天?
# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:
year = int(input('year:\n'))
month = int(input('month:\n'))
day = int(input('day:\n'))
months = (0,31,59,90,120,151,181,212,243,273,304,334)
if 0 < month <= 12:
sum = months[month - 1]
else:
print('data error')
sum += day
leap = 0
if (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):
leap = 1
if (leap == 1) and (month > 2):
sum += 1
print('it is the %dth day.' % sum)
|
[
"[email protected]"
] | |
ad8ace3d9d368e692a32a964c2ad9c4e137f94dd
|
474b53e2dfbecab16d92fc44e8246b44fa2d840b
|
/NaiveBayes_Titanic/NaiveBayesTitanic.py
|
af1366cf7a34b0eeda5db147f20bb21ac4da3be2
|
[] |
no_license
|
YYJIANG/classPatternRecognition
|
db98493b53f28149ad3b828a8e0cd95e6c0e0920
|
093dfd9d0a80abb126e73a8c3de8ff74d3e49699
|
refs/heads/main
| 2023-08-27T10:40:06.280524 | 2021-10-28T01:23:36 | 2021-10-28T01:23:36 | 413,449,657 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,097 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 16:15:26 2021
@author: JYY
"""
import csv
import numpy as np
def dataProcess(filename='train.csv',a=0,b=1,c=4,d=5):
'''读取csv文件'''
train_data = []
with open(filename,'r',newline='') as f:
csvreader = csv.reader(f)
for line in csvreader:
train_data.append(line)
'''将所需的特征重组成矩阵。每行为一个乘客,每列为乘客信息(编号,存活,性别,年龄)'''
train = []
male_age = female_age = 0
male_num = female_num = 0
for i in range(len(train_data)-1):
train_row= []
train_row.append(eval(train_data[i+1][a]))
train_row.append(eval(train_data[i+1][b]))
train_row.append(1 if train_data[i+1][c]=='male' else 0)
age = train_data[i+1][d]
if age.isdigit():
train_row.append(eval(age))
if train_row[2] == 1:
male_num += 1
male_age += eval(age)
else:
female_num += 1
female_age += eval(age)
else:
train_row.append(-1)
train.append(train_row)
male_age = male_age/male_num
female_age = female_age/female_num
'''年龄————0(0~5)——1(6-15)——2(16-25)——3(26-35)——4(35-45)——5(45-...),默认3'''
for i in range(len(train)):
if train[i][3] == -1:
train[i][3] = 3
elif train[i][3] <=5:
train[i][3] = 0
elif train[i][3] <=15:
train[i][3] =1
elif train[i][3] <= 25:
train[i][3] = 2
elif train[i][3] <=35:
train[i][3] =3
elif train[i][3] <=45:
train[i][3] = 4
else:
train[i][3] = 5
return train
'''导入训练数据'''
train = dataProcess('train.csv',0,1,4,5)
'''计算先验概率和条件概率'''
sur = np.zeros(2)
sur_sex = np.zeros([2,2])
sur_age = np.zeros([2,6])
'''死——活''''''死——活————性别0~1''''''死——活————年龄0~6'''
for i in range(len(train)):
sur[train[i][1]] += 1
sur_sex[train[i][1]][train[i][2]] += 1
sur_age[train[i][1]][train[i][3]] += 1
sur = sur/sur.sum()
for i in range(2):
sur_sex[i] = sur_sex[i]/sur_sex[i].sum()
sur_age[i] = sur_age[i]/sur_age[i].sum()
print(sur)
print(sur_sex)
print(sur_age)
'''导入测试数据'''
test = dataProcess('test.csv',0,0,3,4)
''' '''
pre = []
for i in range(len(test)):
precision_row = [str(test[i][0])]
p_sur_0 = sur[0]*sur_sex[0][test[i][2]]*sur_age[0][test[i][3]]
p_sur_1 = sur[1]*sur_sex[1][test[i][2]]*sur_age[1][test[i][3]]
precision_row.append('0' if p_sur_0 > p_sur_1 else '1')
pre.append(precision_row)
print(pre)
with open('precision.csv','w',newline='') as f:
csvwriter=csv.writer(f)
csvwriter.writerow(['PassengerId','Survived'])
csvwriter.writerows(pre)
# print(train)
|
[
"[email protected]"
] | |
8a62ea9c003ba8f15a6395525cc0affc3dd20b47
|
2b0f76f83969d5403e169909e95ec122bc6ac97f
|
/junk/vector2D.py
|
768387c6d9e87bf70a2403370b0e8b3499f2f15f
|
[] |
no_license
|
Ankitdulani/Face-Potriat
|
64a65131876e3ddae4a0f5bea9e7fd84c1bb290a
|
322234ae350ab672a2aafcdbb536af23b5d2ca7c
|
refs/heads/master
| 2020-04-27T21:04:23.265249 | 2020-01-18T14:10:24 | 2020-01-18T14:10:24 | 174,683,235 | 0 | 0 | null | 2020-01-18T14:10:25 | 2019-03-09T11:03:29 |
Python
|
UTF-8
|
Python
| false | false | 545 |
py
|
import math
class vector:
def __init__(self, x = 0, y =0 ):
self.x = (x)
self.y = (y)
def unitVector(self):
mag = self.getModulus()
if mag == float(0):
return (vector2D())
return vector2D(self.x/mag, self.y/mag)
def getModulus(self):
return math.sqrt( self.x **2 + self.y **2)
## return vector2D
def addVector( self, A):
return ( vector2D(self.x+A.x,self.y+A.y))
def scaledVector(self ,mag):
n = self.unitVector()
return (vector2D( mag * n.x , mag * n.y ))
def printVector(self):
print ( self.x , self.y)
|
[
"[email protected]"
] | |
a458806ec8505b651615e5e6abd60a4ff5f646ed
|
89f8181a659ecaf3af77740eb60e4031b654f728
|
/onestage/步骤2/utils/trans/__init__.py
|
c96e724b71b1a5dfd10448b3616e3d224f1afc96
|
[] |
no_license
|
sealandsigh/python4stage
|
c6a3eb40ffdb54f7b77a9ae1b2406232b705d0e6
|
f2e08e7207e796cbb874f8a15f8fedbb24e7c7b0
|
refs/heads/master
| 2020-12-01T20:31:52.754076 | 2020-04-23T09:08:16 | 2020-04-23T09:08:16 | 230,760,233 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 67 |
py
|
# -*- coding: utf-8 -*-
# __author__="jiajun.zhu"
# DATE:2020/1/15
|
[
"[email protected]"
] | |
c81f32fd9551171eca3f5765147895606e3573ff
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2759/60610/245707.py
|
0d1aca295915e41bef3bdf5a5262c94f0f29f52f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 278 |
py
|
num=input();
for i in range(num):
string=raw_input();
numList=string.split();
count=0
for j in range(int(numList[0]),int(numList[1])+1):
a=int(numList[2]);
b=int(numList[3]);
if (j%a==0) | (j%b==0):
count+=1;
print(count);
|
[
"[email protected]"
] | |
2c041d0cdb86f298fb695188dd4cdb2a03070fe3
|
fa8016fd0f971a94bd1206b70ff19b9e709d8fa3
|
/First_site/First_site_2.py
|
c27f6e399ff29a669577ac0cf2346343611ec28e
|
[] |
no_license
|
hannahphp/Web-Project-1
|
fea4276531ed484d45282e7c5bcd947b2ffd31f5
|
814c998dfb1f09127b44e0dd67012130fe529d31
|
refs/heads/master
| 2020-03-23T18:07:35.980453 | 2018-07-19T18:09:06 | 2018-07-19T18:09:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12 |
py
|
First_site_2
|
[
"[email protected]"
] | |
3d5664d5e503269e5557e6b98623f3cb0c80edbc
|
e211000d0d843fd944266892f49a7649c7e8918d
|
/abc/065/python/code_c.py
|
fc52911095e41bda42258728f4b59ac2a5a9d1b0
|
[] |
no_license
|
habroptilus/atcoder-src
|
63dfa16c6d4b80d1e36618377d3201888183281f
|
4cd54202037996b3f4a4442b1bd19d42d8a46db1
|
refs/heads/master
| 2020-04-26T07:14:38.322156 | 2019-06-08T14:44:26 | 2019-06-08T14:44:26 | 173,388,602 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 228 |
py
|
import math
N, M = map(int, input().split())
if abs(N - M) == 1:
print(math.factorial(N) * math.factorial(M) % (10**9 + 7))
elif N == M:
print(math.factorial(N) * math.factorial(M) * 2 % (10**9 + 7))
else:
print(0)
|
[
"[email protected]"
] | |
2af082e346df1acaa0004afb1d856e2c49d3ee69
|
6ac0b1f627976e7fcf69d41b723e155a01d689ce
|
/m.py
|
e14330dc53678e40d61224761ef1ec6a03c6b378
|
[] |
no_license
|
MrFaxel/Sec
|
954ad750284fe6e33e820e6fb8f2a51941c14d3e
|
bf7746ab6199f8d746a1c6197b44f982aad01224
|
refs/heads/master
| 2022-12-04T12:32:57.340518 | 2020-09-02T22:41:40 | 2020-09-02T22:41:40 | 292,390,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,268 |
py
|
#Compiler par Faxel
#Twitter https://twitter.com/Faxel2020
import zlib , marshal , base64
from base64 import *
exec(zlib.decompress(base64.b16decode('789CCD574B6FE33610BE1BF07F983A073B8B95B3B637ED26410EC576736A4F057A4916022D535E1614A925A9D42E7A29F6984B0BD82850A0A71E8B6C5CF4987FE35FD2A19E9622BF80855B82B0257238CF6F469CA3CF4E22AD4E864C9C8453F34E8A41B371E4C91113E3CBC8F8CE2B7CDD36E0B5A22366346CA55C3B500A7C4DC18B19412823051EBECA11855B0204BE51704526945BBA6F19DCCA48DB1F4E7F845051315274DDF1EFE41489AE883654C197C8A2BB83459F60341B5C8EA5EB5BADE1B2D36AB56E5E0C06D7835717A717FD975F04CBF9FD72F6F372FE71E5E16F9C2B54BD971955323392BA798FBB37A22262F6C1CEF962397FC89E6B44D8E54C0A12AE131133A81311CB4E7F677F268AD688586486FC533CA4D6E72B7FAD11519956D6C3062B904FAECEFCCECE98F33E22625F3D1591B9B0B0BB568475F9D3702481C87F678F89BBAB115FAC44A1421B3FA4363E26F142601DA758D3D44B9136E90DAF7B1783FEEEF82901A9B2585610F6C564A14C1E23EBB70F3B689278EBE32665F6446F59993A4CD682D38638C1F85DA6CFFD1398C3BE385F09D3A2F0670AC00CC619FD0AF6EFCA2B99751B3CB34346D478262F1B39AA0B783F9473655110C45B35CAEC993B35CEC961BFEEA194238F457DAD560BD837CBCAFA6C4DA23578CE9DF309B8E5807948EB4CEFE2CCD698D972FECBFF63FE5656ECD7ECA5175C678F83E0D9B362F96D11A4B3D300E2F115BDA55C8621C54F7AC1EDBC12CEE47200C5D85156AE5A49D3DFFF6BCFE5F38FA4BA1FE6BE7204CE1BDF271EDE9338053DC51B5340810A781F516D98140E12B12094CA80D4CF434E8C2F55D06C8CA80FAEC775E7F8BCD9B0DE673E127405C1E39797D016A69D6EC403B712E69D361E6A1F275B946BBA968A12D53E981F8C9A6676E4C61A16D0E7A84FB21E2A264CA79561A61FC09B091B53E1510D23A64329D89053DD4A4D4B7CD36CD0894743735E5EACE1F77900AFA5F0D93852C4FA1D147D1F314D938B2D6F33A10DE13CDEEBE228B07B23329985FB5A2434CE981A488F818A86533092715C4321F85773684C83FC0097DC23A66ACD618281504295BAB75469B4F6FAC55BD40ADAFDF65A2F16B7EE0C5813667D2B6E4C7D51A8AD09D8366087216297DFDA268368880CE31805955161894A5AA6FECAB938447442BDC8F61BD884684FB1D01414DDBD35A111E3B6D1E1ED9C2F26A521211106CA57CCB4854B607098001D42CA4A614FAA4D40BECF8BCD2A0456009C02DC41FC048114F2CCF6908EE3336E1D3826D3564DF2A58E3C0DEA32C2769611A683546C8C200CE296725F2E855A56A95E1F9C2B184A35A269830B3F55D22D659CBDDA42D4D59CD2B033386412BAAE2DE7AE6B0B7ACB7503C284EBB6D210C4E13818E02A31F1E2521987A6ABE890609D342AA271066C27F5097E77B6D1FAF83D137CBA81CC71C65C0E09DF48A2DFC91F9C043D87CBCF7F0100EAFB80')))
|
[
"[email protected]"
] | |
4c32d982753b0e4db0881f6e9c9fa2a743f9a9eb
|
a26b60ba7be3251b0cf95542b5b2bb539ca8cc7b
|
/donatingthings/migrations/0001_initial.py
|
3cdec032487be9d97bdf7f3f6fb213ee09d7d068
|
[] |
no_license
|
rokesh27/wadd
|
3e663def9cf1c5bfa583fc7c99feb4130f6c68a9
|
ed50633af4d5b89b80d467a9330d3f0588332bb6
|
refs/heads/main
| 2023-07-07T00:54:10.970420 | 2021-08-11T21:19:16 | 2021-08-11T21:19:16 | 395,082,237 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,044 |
py
|
# Generated by Django 3.2.1 on 2021-05-09 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='contactform',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('phone', models.BigIntegerField()),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=50)),
('message', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='regformdthings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=50)),
('lname', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('gender', models.CharField(choices=[('m', 'male'), ('f', 'female'), ('u', 'other')], max_length=1, null=True)),
('age', models.IntegerField()),
('pnumber', models.BigIntegerField()),
('ntoys', models.CharField(max_length=50)),
('nbooks', models.CharField(max_length=50)),
('nblankets', models.CharField(max_length=50)),
('dress', models.CharField(max_length=50)),
('cgender', models.CharField(choices=[('M', 'boy'), ('F', 'girl')], max_length=1, null=True)),
('others', models.CharField(max_length=50)),
('adress', models.CharField(max_length=5000)),
('landmark', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('district', models.CharField(max_length=50)),
],
),
]
|
[
"[email protected]"
] | |
fd6858277f5f415ab9122b3ca391257c09bda9a1
|
acaeef460a66ac90e01c28bdec80aa82828f2df6
|
/src/sort/__init__.py
|
45f40284a8c3f0e8badc2e9dfa812ce70e060458
|
[
"Apache-2.0"
] |
permissive
|
dmvieira/algs
|
e1acd58c2f744dc4e7495ee2ab255eb4ceb7586f
|
bf88d026ecf7210f0a1d601b36d7fc82364bd46f
|
refs/heads/master
| 2020-05-02T01:58:14.427951 | 2019-04-13T16:55:53 | 2019-04-13T16:55:53 | 177,695,293 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 102 |
py
|
MAX_NUMBER_SIZE=100
class Sort(object):
def __init__(self, sample):
self.sample = sample
|
[
"[email protected]"
] | |
6eb9b2055a467de99b0a35b99f5c540e841ed140
|
7c135c7d34b1dd6dbad71e2ffa8d5b132a5e3fef
|
/Chat-dupe.py
|
bb4e67fd39066f6faf0168ce4a1a4d7adb804877
|
[] |
no_license
|
theminshew/simplechat
|
02ef7a9c1683faaaea9e15f4204189fec4b19c4e
|
a8304782963a54d2ed9bd8c088bb2164e988ce70
|
refs/heads/Master-Primary
| 2021-01-20T06:54:32.800617 | 2018-02-07T21:08:41 | 2018-02-07T21:08:41 | 89,943,944 | 1 | 0 | null | 2017-05-18T19:58:58 | 2017-05-01T17:05:12 |
Python
|
UTF-8
|
Python
| false | false | 1,787 |
py
|
#initial prompting, basic raw_input to variable mapping
age = raw_input (" How old are you? ")
weight = raw_input (" How much do you weigh? ")
real_weight = raw_input (" No really, how much do you really weigh? ")
print " Hmm, ok lets see what I can do with that?"
# timing inserts pause for simulated thinking effect
import time
time.sleep(3)
print "Gimme a moment, I'm still crunching numbers"
time.sleep(3)
# variables for adjusting weight for us in function
fudged_answer = int (real_weight) - 10
alternate_answer = int (real_weight) - 25
# this prompts user with basic menu and gives resultss
print "ok how does this look? %s is that number better?" % fudged_answer
def menu():
menu = raw_input("1.) Yes \n2.) No\n")
if raw_input == 1:
print "Excellent, Glad I could find a number that works"
elif raw_input == 2:
recalculate()
else:
print "That is not an option, Please choose 1 or 2."
def recalculate():
print "Finally, ok let's try again, is %s lbs better?" % alternate_answer
def menu():
menu = raw_input("1.) Yes \n2.) No\n")
if menu == 1:
abouttime()
elif menu == 2:
letsmoveonagain()
else:
print "That is not an option, Please choose 1 or 2."
def abouttime():
print "Geez, about time"
def letsmoveonagain():
print "Excellent, Glad I could find a number that works"
time.sleep(3)
print " Alright, so you're %s old and weigh about '%r' " % (
age, fudged_answer)
def menu():
print ("are those numbers about right?")
menu = raw_input("1.) Yes \n2.) No\n")
if menu == 1:
good()
if menu == 2:
bad()
def good():
print ("Ok, based on those calculations I think you're a 10 ")
time.sleep(1)
print
print "hope this made you smile"
def bad():
print ("I am sorry if I offended you, I am going to leave now")
menu()
|
[
"[email protected]"
] | |
070d2ffacad8dbdcc16c98b9921ba3c9c2b5c0ca
|
3a21eac318260972a0f50aa6517bebd62d9634f3
|
/minimarket/settings.py
|
a6e40467c5437d3caa279c03850dc038c10d6db9
|
[] |
no_license
|
alviandk/ahp
|
adaf735c2ad14cfffee41eca37df5ff2452e8812
|
60764c12bb30cd134bbce53d62cda835503191d2
|
refs/heads/master
| 2016-09-05T19:16:02.907235 | 2015-03-27T09:42:43 | 2015-03-27T09:42:43 | 32,963,488 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,267 |
py
|
"""
Django settings for minimarket project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4z+05z^%!e=1p&*2uyz^_tel^5($l##z8f80t^@=60%*4z$#4m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ahp_aps',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'minimarket.urls'
WSGI_APPLICATION = 'minimarket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ahp',
'USER': 'root',
'PASSWORD':'',
'HOST':'localhost'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
[
"[email protected]"
] | |
9ef217d20fb794abbb33d9876fd5f7161ca24886
|
5323eac10ecfbbd415c84333e2758c4b05e99aa0
|
/Practice_Class1.py
|
5d09060d922d8b41dd7f523003e915a6f228b862
|
[] |
no_license
|
StanleyCY/Practice
|
f49b6cf8c501cf91a2ad69fa57c5025e53b786b4
|
ee05c3e1bde97fefe8bb45e6fae3eafb0df45b34
|
refs/heads/master
| 2020-03-30T09:48:57.780739 | 2018-10-03T00:58:23 | 2018-10-03T00:58:23 | 151,093,625 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 658 |
py
|
class personal():
__number = 18 #__兩個下底線的動作,將calss內的某個元素封閉起來,因此外界無法調用
def __init__(self,name,grade,age):
self.name = name
self.grade = grade
self.age = age
def walk(self):
print('I always with',self.name)
def talk(self):
print('I always talk with other I\'m',self.grade,'now')
def care(self):
print('when I\'m',self.age,',I can take myself')
PersonA = personal('Stanley',6,25)
PersonA.walk()
PersonA.talk()
PersonA.care()
print (PersonA._personal__number)#想要調用class內封閉條件時,需使用_class__條件才能調用出來)
|
[
"[email protected]"
] | |
10e5d0ab383ecab54b9aadc82335bbeb4294a2c2
|
90a81de919ad18a2cd88c7f488a8b88136832be2
|
/data_ub_tasks/__init__.py
|
dfbaba9df0cd43e290583cabe5521a22ae2d6406
|
[] |
no_license
|
scriptotek/data_ub_tasks
|
0305557ed8cebfc28e5cecbd83c3b4f83c708e58
|
66e4cfc3ffa94bdd2903d04e71aec5491fd43ffc
|
refs/heads/master
| 2023-01-10T10:49:00.809131 | 2023-01-01T20:53:06 | 2023-01-01T20:53:06 | 51,712,304 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28 |
py
|
from .data_ub_tasks import *
|
[
"[email protected]"
] | |
c5c561e0a70c1027a7c149cd7ffb4e4f5bb38d0f
|
9a9f31265c65bec0060271cd337580e7b4f3a7e9
|
/project/pokupka.py
|
81819a11dce3c4e08f65498d21c98238d72d5f98
|
[] |
no_license
|
AnatolyDomrachev/1kurs
|
efaabde4852172b61d3584237611fe19b9faa462
|
84ed0dceb670ec64c958bf1901636a02baf8f533
|
refs/heads/master
| 2023-02-19T21:42:53.286190 | 2021-01-19T07:41:15 | 2021-01-19T07:41:15 | 292,637,199 | 0 | 1 | null | 2020-09-16T02:29:14 | 2020-09-03T17:32:29 |
Python
|
UTF-8
|
Python
| false | false | 586 |
py
|
import magazin
import etc
magazin = magazin.Magazin('magazin.conf')
korzina = []
net_v_magazine = []
def pokupka(spisok):
for slovar in spisok:
est_v_mag = 'No'
for tovar in magazin.tovary:
if slovar['name'] == tovar['name']:
kupil = etc.beru(slovar, tovar)
korzina.append(kupil)
est_v_mag = 'Yes'
if est_v_mag == 'No':
print(slovar," нет в магазине")
print("Купили: ",korzina)
print()
print("Осталось: ",magazin.tovary)
|
[
"[email protected]"
] | |
84e8e2a34adc392dbabc3541f6defc2c829bdb23
|
a40f749cb8e876f49890ab8fbbbbf2c07a0dd210
|
/examples/ad_manager/v201902/adjustment_service/update_traffic_adjustments.py
|
60a54bd299660da60a8ece16a64cfb2643030b0a
|
[
"Apache-2.0"
] |
permissive
|
ale180192/googleads-python-lib
|
77afff4c352ac3f342fc8b3922ec08873d6da5be
|
783a2d40a49956fb16ed73280708f6f9e322aa09
|
refs/heads/master
| 2020-08-10T15:20:06.051974 | 2019-10-11T07:06:58 | 2019-10-11T07:06:58 | 214,367,074 | 0 | 0 |
Apache-2.0
| 2019-10-11T07:04:21 | 2019-10-11T07:04:20 | null |
UTF-8
|
Python
| false | false | 3,009 |
py
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a historical adjustment of 110% for New Years Day traffic.
"""
from __future__ import print_function
import datetime
# Import appropriate modules from the client library.
from googleads import ad_manager
ADJUSTMENT_ID = 'INSERT_ADJUSTMENT_ID_HERE'
def main(client, adjustment_id):
# Initialize the adjustment service.
adjustment_service = client.GetService('AdjustmentService', version='v201902')
# Create a statement to select a single traffic forecast adjustment by id.
statement = (
ad_manager.StatementBuilder(
version='v201902').Where('id = :id').WithBindVariable(
'id', adjustment_id))
# Get the forecast traffic adjustment.
response = adjustment_service.getTrafficAdjustmentsByStatement(
statement.ToStatement())
# Create a new historical adjustment segment for New Year's Day.
this_new_years = datetime.date(datetime.date.today().year, 12, 31)
next_new_years = datetime.date(datetime.date.today().year + 1, 12, 31)
new_years_segment = {
'basisType': 'HISTORICAL',
'historicalAdjustment': {
'targetDateRange': {
'startDate': next_new_years,
'endDate': next_new_years
},
'referenceDateRange': {
'startDate': this_new_years,
'endDate': this_new_years
},
'milliPercentMultiplier': 110000
}
}
if 'results' in response and len(response['results']):
# Update each local traffic adjustment.
updated_adjustments = []
for adjustment in response['results']:
adjustment['forecastAdjustmentSegments'].append(new_years_segment)
updated_adjustments.append(adjustment)
# Update traffic adjustments remotely.
adjustments = adjustment_service.updateTrafficAdjustments(
updated_adjustments)
# Display the results.
if adjustments:
for adjustment in adjustments:
print('Traffic forecast adjustment with id %d and %d segments was '
'created.' % (adjustment['id'],
len(adjustment['forecastAdjustmentSegments'])))
else:
print('No traffic adjustments were updated.')
else:
print('No traffic adjustments found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADJUSTMENT_ID)
|
[
"[email protected]"
] | |
068e2f98b0b9c31dd0661f4df08b136ee519f3a0
|
2978e1838c010007972e18e015ef7ae364d7ad1f
|
/jump7.py
|
83e200bf486a0e808abbc8c21d39617d57bb4afa
|
[] |
no_license
|
GodLikesJ/shiyanlou-code
|
251f9441f11f371aa299ea2f991d60de27776024
|
d2e0c82d3a34599d7ebe5cecf07fb2fc1ecac2bc
|
refs/heads/master
| 2020-07-30T06:56:57.403696 | 2019-09-22T10:07:10 | 2019-09-22T10:07:10 | 210,125,592 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 72 |
py
|
for i in range(1, 101):
if i%7!=0 and i%10!=7 and i//10!=7:
print(i)
|
[
"[email protected]"
] | |
979286ffb46a102ab49df74f8383e498329ab818
|
e5eec1428da1d24d3e9b86f5723c51cd2ca636cd
|
/dynamic_programming/백준/가장큰정사각형_백준.py
|
4db92f7d4eee1a5199ea97cc10a52e85fa483fca
|
[] |
no_license
|
jamwomsoo/Algorithm_prac
|
3c36c381f59277721517d331a8f1640399d80c1d
|
8393f3cc2f950214c47f3cf0b2c1271791f115d0
|
refs/heads/master
| 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 671 |
py
|
# 백준 DP 난이도 골드 5
# 전형적인 dp문제
# dp[i][j]는 위, 왼쪽, 대각선 위 중 작은 것중에 하나를 자신과 더한 값
# -> 정사각형이라면 변의 길이가 모두 같아야하므로
# 1 1 1 1 1 1
# 1 1 1 -> 1 2 2
# 1 1 1 1 2 3
n, m = map(int, input().split())
arr = []
dp = [[0]*(m+1) for _ in range(n+1)]
for i in range(n):
arr.append(list(map(int, input())))
for j in range(m):
dp[i+1][j+1] = arr[i][j]
for i in range(n+1):
for j in range(m+1):
if dp[i][j] != 0:
dp[i][j] += min(dp[i-1][j-1], dp[i][j-1], dp[i-1][j])
res = 0
for row in dp:
res = max(res, max(row))
print(res**2)
|
[
"[email protected]"
] | |
320c673db21bce86bbbec9d35d068e467d904230
|
ce79f454483beef0007337219d9415e5feabb0cc
|
/6/scripts/py_bash.py
|
93089bb40f79bad8f789d185757f81b179562cdd
|
[
"MIT"
] |
permissive
|
mohisen/zdotfiles
|
5be5417dca38f93f44443566737effc0cd0d3514
|
8c127d0e9ffdc43f9bc12a9d79a1690df063c2b4
|
refs/heads/master
| 2021-01-01T17:02:58.296952 | 2017-01-30T02:08:44 | 2017-01-30T02:08:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 51 |
py
|
import commands
result = commands.getoutput("ls")
|
[
"[email protected]"
] | |
41b484284addb15b16578a1ec7343aec2a1fc131
|
24f85f41cc42f6829d9e711b8cf5355a1b944284
|
/rest/schema.py
|
791cedf334f2df5d8319fd7ac50282173d7fe81d
|
[] |
no_license
|
djm158/rest
|
4a03d87b3625917cd591b055cde2c64403a08b3c
|
d52631781adbec7a2a0a848c7f62e8797d1ae3d3
|
refs/heads/master
| 2020-12-18T14:55:31.471706 | 2017-07-14T15:28:04 | 2017-07-14T15:28:04 | 235,427,617 | 0 | 0 | null | 2020-01-21T19:45:23 | 2020-01-21T19:45:22 | null |
UTF-8
|
Python
| false | false | 2,413 |
py
|
class Schema(object):
@classmethod
def combined_errors(self, *args):
errors = []
for schema in args:
errors.extend(schema._errors.items())
return dict(errors)
def __init__(self, **kwargs):
self._fields = self._get_fields()
self._errors = {}
for name, field in self._fields.items():
if name in kwargs:
field.set(kwargs.pop(name))
else:
field.reset()
def get_namespace(self):
if hasattr(self, '__namespace__'):
return self.__namespace__
return self.__class__.__name__.replace('Schema', '')
def __call__(self, data=None):
if data is None:
data = {}
errors = {}
def add_errors(name, errs):
if errs:
field_errors = errors.get(name, [])
field_errors += errs
errors[name] = field_errors
# First, set the values on the field. If anything goes wrong, it'll return
# a list of errors
for name, value in data.items():
if name not in self._fields:
continue
try:
errs = self._fields[name].set(value)
except ValueError as err:
errs = [err.message]
add_errors(name, errs)
# Check to see if we need to default to defalut values
for name, field in self._fields.items():
if not field.serialize:
continue
field.default()
# Then, now the everything's been set, run all the field validators.
for name, field in self._fields.items():
if not field.serialize:
continue
errs = field.validate()
add_errors(name, errs)
validator = 'validate_%s' % name
if hasattr(self, validator):
try:
getattr(self, validator)(field.get())
except ValueError as v:
add_errors(name, v.args)
self._errors = errors
return not bool(errors)
def __repr__(self):
return str(self._get())
def _get(self):
rep = {}
for name, field in self._fields.items():
if field.serialize:
name = getattr(field, 'name', name)
rep[name] = field.get_simplified()
rep['__namespace__'] = self.get_namespace()
return rep
def dict(self):
return self._get()
def _get_fields(self):
fields = {}
for field_name in dir(self):
if not field_name.startswith('_'):
field = getattr(self, field_name)
if not hasattr(field, '__call__'):
fields[field_name] = field
return fields
|
[
"[email protected]"
] | |
1449e09c9233293ca4022000600e95acfe938497
|
f24b229ac4ee0c0a94e48c400be4b52a7a585871
|
/home/apps.py
|
0a6d0b19dce6f1c25e2ae31d407f6fa52c5004ec
|
[] |
no_license
|
RudreshVeerkhare/GroupChat
|
462c0f9c98949d418fa10cb2aaf173c8352419ba
|
39ddaaee0e5d0f36d4e44ae2b16f3a6440171259
|
refs/heads/master
| 2021-06-19T11:29:34.372870 | 2021-03-26T14:40:55 | 2021-03-26T14:40:55 | 193,379,367 | 28 | 6 | null | 2021-03-26T14:40:56 | 2019-06-23T17:57:19 |
CSS
|
UTF-8
|
Python
| false | false | 134 |
py
|
from django.apps import AppConfig
class HomeConfig(AppConfig):
name = "home"
def ready(self):
import home.signals
|
[
"[email protected]"
] | |
09e856c62ec6b5f6165f16429120e91e6ae6aea2
|
019c733ea5c77d04003cd7d828b1e016d7b838b5
|
/contrib/wallettools/walletunlock.py
|
a1390d55baac73b4b10acfaa83266f406eb8ce4b
|
[] |
no_license
|
rsjudge17/Appcoins
|
ee986ddb60096d7516f84999aa9f67badf0ab9d7
|
f1311cd289d8c5f55a6efed5922e88c286c90f6e
|
refs/heads/master
| 2021-01-15T20:18:52.857056 | 2014-03-20T11:05:36 | 2014-03-20T11:05:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 159 |
py
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:16556")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
[
"[email protected]"
] | |
132158a21c498725862cc23ae626f36d7f28db28
|
0c41f2fd4c1ad9b954097b0662e556b3eb288987
|
/cellbender/remove_background/data/priors.py
|
3989769165ab538647ccca8e672a97fca80bd06d
|
[] |
permissive
|
broadinstitute/CellBender
|
e884a5520fc3e0fc2f422f8cd6dcdc6c594b5094
|
4990df713f296256577c92cab3314daeeca0f3d7
|
refs/heads/master
| 2023-08-21T14:55:33.619290 | 2023-08-08T18:40:14 | 2023-08-08T18:40:14 | 171,951,233 | 207 | 40 |
BSD-3-Clause
| 2023-08-30T05:27:18 | 2019-02-21T21:53:57 |
Python
|
UTF-8
|
Python
| false | false | 15,821 |
py
|
"""Functionality for estimating various priors from the data"""
import numpy as np
import torch
from scipy.stats import gaussian_kde
from cellbender.remove_background import consts
from typing import Dict, Tuple, Union
import logging
logger = logging.getLogger('cellbender')
def _threshold_otsu(umi_counts: np.ndarray, n_bins: int = 256) -> float:
"""Return threshold value based on fast implementation of Otsu's method.
From skimage, with slight modifications:
https://github.com/scikit-image/scikit-image/blob/
a4e533ea2a1947f13b88219e5f2c5931ab092413/skimage/filters/thresholding.py#L312
Args:
umi_counts: Array of UMI counts
n_bins: Number of bins used to calculate histogram
Returns:
threshold: Upper threshold value. All droplets with UMI counts greater
than this value are assumed to contain cells.
References
----------
.. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
.. [2] https://scikit-image.org/docs/stable/auto_examples/applications/plot_thresholding.html
Notes
-----
The input image must be grayscale.
"""
# create a UMI count histogram
counts, bin_centers = _create_histogram(umi_counts=umi_counts, n_bins=n_bins)
# class probabilities for all possible thresholds
weight1 = np.cumsum(counts)
weight2 = np.cumsum(counts[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(counts * bin_centers) / weight1
mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of ``weight1``/``mean1`` should pair with zero values in
# ``weight2``/``mean2``, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[idx]
return threshold
def _create_histogram(umi_counts: np.ndarray, n_bins: int) -> Tuple[np.ndarray, np.ndarray]:
"""Return a histogram.
Args:
umi_counts: Array of UMI counts
n_bins: Number of bins used to calculate histogram
Returns:
counts: Each element is the number of droplets falling in each UMI
count bin
bin_centers: Each element is the value corresponding to the center of
each UMI count bin
"""
counts, bin_edges = np.histogram(umi_counts.reshape(-1), n_bins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
return counts.astype('float32', copy=False), bin_centers
def _peak_density_given_cutoff(umi_counts: np.ndarray,
cutoff: float,
cell_count_low_limit: float) -> Tuple[float, float]:
"""Run scipy.stats gaussian_kde on part of the UMI curve"""
# get the UMI count values we are including
noncell_counts = umi_counts[umi_counts <= cutoff]
# resample them: the magic of looking at a log log plot
n_putative_cells = (umi_counts > cell_count_low_limit).sum()
n_putative_empties = len(noncell_counts)
inds = np.logspace(np.log10(n_putative_cells),
np.log10(n_putative_cells + n_putative_empties),
num=1000,
base=10)
inds = [max(0, min(int(ind - n_putative_cells), len(noncell_counts) - 1)) for ind in inds]
noncell_counts = np.sort(noncell_counts)[::-1][inds]
# find the peak density: that is the empty count prior
# calculate range of data, rounding out to make sure we cover everything
log_noncell_counts = np.log(noncell_counts)
x = np.arange(
np.floor(log_noncell_counts.min()) - 0.01,
np.ceil(log_noncell_counts.max()) + 0.01,
0.1
)
# fit a KDE to estimate density
k = gaussian_kde(log_noncell_counts)
density = k.evaluate(x)
# the density peak is almost surely the empty droplets
log_peak_ind = np.argmax(density)
log_peak = x[log_peak_ind]
empty_count_prior = np.exp(log_peak)
# try to go about 1 stdev up from the peak
peak_density = np.max(density)
one_std_density = 0.6 * peak_density
one_std_inds = np.where(density[log_peak_ind:] < one_std_density)[0]
if len(one_std_inds) > 0:
one_std_ind = one_std_inds[0]
else:
one_std_ind = len(density[log_peak_ind:]) - 1
empty_count_upper_limit = np.exp(x[log_peak_ind:][one_std_ind])
return empty_count_prior, empty_count_upper_limit
def get_cell_count_given_expected_cells(umi_counts: np.ndarray,
expected_cells: int) -> Dict[str, float]:
"""In the case where a prior is passed in as input, use it
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
expected_cells: Input by user
Returns:
Dict with keys ['cell_counts']
"""
order = np.argsort(umi_counts)[::-1]
cell_counts = np.exp(np.mean(np.log(umi_counts[order][:expected_cells]))).item()
return {'cell_counts': cell_counts}
def get_empty_count_given_expected_cells_and_total_droplets(
umi_counts: np.ndarray,
expected_cells: int,
total_droplets: int,
) -> Dict[str, float]:
"""In the case where a prior is passed in as input, use it
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
expected_cells: Input by user, or prior estimate
total_droplets: Input by user
Returns:
Dict with keys ['empty_counts', 'empty_count_upper_limit']
"""
order = np.argsort(umi_counts)[::-1]
starting_point = max(expected_cells, total_droplets - 500)
empty_counts = np.median(umi_counts[order]
[int(starting_point):int(total_droplets)]).item()
# need to estimate here
cell_counts = np.exp(np.mean(np.log(umi_counts[order][:expected_cells]))).item()
middle = np.sqrt(cell_counts * empty_counts)
empty_count_upper_limit = min(middle, 1.5 * empty_counts)
return {'empty_counts': empty_counts,
'empty_count_upper_limit': empty_count_upper_limit}
def get_cell_count_empty_count(umi_counts: np.ndarray,
low_count_threshold: float = 15) -> Dict[str, float]:
"""Obtain priors on cell counts and empty droplet counts from a UMI curve
using heuristics, and without applying any other prior information.
Heuristics:
0. Ignore droplets with counts below low_count_threshold
1. Use Otsu's method to threshold the log UMI count data (ignoring droplets
past 1/4 of the total droplets above low_count_threshold, as we go down
the UMI curve). This is used as a lower limit on cell counts.
It seems quite robust.
2. Use the following iterative approach, until converged:
a. Establish an upper cutoff on possible empty droplets, using the
current estimate of empty counts and our cell count prior (the
estimate is 3/4 of the geometric mean of the two).
b. Use gaussian_kde from scipy.stats to create a smooth histogram of
the log UMI counts, for droplets with counts below the cutoff.
- A trick is used to resample the droplets before creating the
histogram, so that it looks more like a log-log plot
c. Identify the peak density of the histogram as the empty count
estimate.
- Convergence happens when our estimate of empty counts stops changing.
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
low_count_threshold: Ignore droplets with counts below this value
Returns:
Dict with keys ['cell_counts', 'empty_counts']
"""
logger.debug('Beginning priors.get_cell_count_empty_count()')
reverse_sorted_umi_counts = np.sort(umi_counts)[::-1]
umi_counts_for_otsu = reverse_sorted_umi_counts[:(umi_counts > low_count_threshold).sum() // 4]
log_cell_count_low_limit = _threshold_otsu(np.log(umi_counts_for_otsu))
cell_count_low_limit = np.exp(log_cell_count_low_limit)
logger.debug(f'cell_count_low_limit is {cell_count_low_limit}')
cell_count_prior = np.mean(umi_counts[umi_counts > cell_count_low_limit])
umi_counts_for_kde = reverse_sorted_umi_counts[reverse_sorted_umi_counts > low_count_threshold]
# initial conditions for the loop
# start low, but have a failsafe (especially for simulated data)
cutoff = max(0.1 * cell_count_low_limit, umi_counts_for_kde[-100])
empty_count_prior = -100
empty_count_upper_limit = None
delta = np.inf
a = 0
# iterate to convergence, at most 5 times
while delta > 10:
logger.debug(f'cutoff = {cutoff}')
# use gaussian_kde to find the peak in the histogram
new_empty_count_prior, empty_count_upper_limit = _peak_density_given_cutoff(
umi_counts=umi_counts_for_kde,
cutoff=cutoff,
cell_count_low_limit=cell_count_low_limit,
)
logger.debug(f'new_empty_count_prior = {new_empty_count_prior}')
# 3/4 of the geometric mean is our new upper cutoff
cutoff = 0.75 * np.sqrt(cell_count_prior * new_empty_count_prior)
delta = np.abs(new_empty_count_prior - empty_count_prior)
logger.debug(f'delta = {delta}')
empty_count_prior = new_empty_count_prior
a += 1
if a >= 5:
logger.debug('Heuristics for determining empty counts exceeded 5 '
'iterations without converging')
break
# do a final estimation of cell counts:
# go to the halfway point and then take the median of the droplets above
count_crossover = np.sqrt(cell_count_prior * empty_count_prior)
cell_count_prior = np.median(umi_counts[umi_counts > count_crossover])
logger.debug(f'cell_count_prior is {cell_count_prior}')
logger.debug(f'empty_count_prior is {empty_count_prior}')
logger.debug('End of priors.get_cell_count_empty_count()')
return {'cell_counts': cell_count_prior,
'empty_counts': empty_count_prior,
'empty_count_upper_limit': empty_count_upper_limit}
def get_expected_cells_and_total_droplets(umi_counts: np.ndarray,
cell_counts: float,
empty_counts: float,
empty_count_upper_limit: float,
max_empties: int = consts.MAX_EMPTIES_TO_INCLUDE) \
-> Dict[str, int]:
"""Obtain priors on cell counts and empty droplet counts from a UMI curve
using heuristics, and without applying any other prior information.
NOTE: to be run using inputs from get_cell_count_empty_count()
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
cell_counts: Prior from get_cell_count_empty_count()
empty_counts: Prior from get_cell_count_empty_count()
empty_count_upper_limit: Prior from get_cell_count_empty_count()
max_empties: Do not include more putative empty droplets than this
Returns:
Dict with keys ['expected_cells', 'total_droplets', 'transition_point']
Example:
>>> priors = get_cell_count_empty_count(umi_counts)
>>> priors.update(get_expected_cells_and_total_droplets(umi_counts, **priors))
"""
# expected cells does well when you give it a very conservative estimate
expected_cells = (umi_counts >= cell_counts).sum()
# total droplets will be between empty_count_prior and its upper limit
total_droplets_count_value = np.sqrt(empty_counts * empty_count_upper_limit)
total_droplets = (umi_counts >= total_droplets_count_value).sum()
# find the transition point
count_crossover = np.sqrt(cell_counts * empty_counts)
transition_point = (umi_counts >= count_crossover).sum()
logger.debug(f'In get_expected_cells_and_total_droplets(), found transition '
f'point at droplet {transition_point}')
# ensure out heuristics don't go too far out datasets with many cells
total_droplets = min(total_droplets, transition_point + max_empties)
return {'expected_cells': expected_cells,
'total_droplets': total_droplets,
'transition_point': transition_point}
def get_priors(umi_counts: np.ndarray,
low_count_threshold: float,
max_total_droplets: int = consts.MAX_TOTAL_DROPLETS_GUESSED) \
-> Dict[str, Union[int, float]]:
"""Get all priors using get_cell_count_empty_count() and
get_expected_cells_and_total_droplets(), employing a failsafe if
total_droplets is improbably large.
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
low_count_threshold: Ignore droplets with counts below this value
max_total_droplets: If the initial heuristics come up with a
total_droplets value greater than this, we re-run the heuristics
with higher low_count_threshold
Returns:
Dict with keys ['cell_counts', 'empty_counts',
'empty_count_upper_limit', 'surely_empty_counts',
'expected_cells', 'total_droplets', 'log_counts_crossover']
"""
logger.debug("Computing priors from the UMI curve")
priors = get_cell_count_empty_count(
umi_counts=umi_counts,
low_count_threshold=low_count_threshold,
)
priors.update(get_expected_cells_and_total_droplets(umi_counts=umi_counts, **priors))
logger.debug(f'Automatically computed priors: {priors}')
a = 0
while priors['total_droplets'] > max_total_droplets:
logger.debug(f'Heuristics for estimating priors resulted in '
f'{priors["total_droplets"]} total_droplets, which is '
f'typically too large. Recomputing with '
f'low_count_threshold = {priors["empty_count_upper_limit"]:.0f}')
priors = get_cell_count_empty_count(
umi_counts=umi_counts,
low_count_threshold=priors['empty_count_upper_limit'],
)
priors.update(get_expected_cells_and_total_droplets(umi_counts=umi_counts, **priors))
logger.debug(f'Automatically computed priors: {priors}')
a += 1
if a > 5:
break
# compute a few last things
compute_crossover_surely_empty_and_stds(umi_counts=umi_counts, priors=priors)
return priors
def compute_crossover_surely_empty_and_stds(umi_counts, priors):
"""Given cell_counts and total_droplets, compute a few more quantities
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
priors: Dict of priors
Returns:
None. Modifies priors dict in place.
"""
assert 'total_droplets' in priors.keys(), \
'Need total_droplets in priors to run compute_crossover_surely_empty_and_stds()'
assert 'cell_counts' in priors.keys(), \
'Need cell_counts in priors to run compute_crossover_surely_empty_and_stds()'
# Compute a crossover point in log count space.
reverse_sorted_counts = np.sort(umi_counts)[::-1]
surely_empty_counts = reverse_sorted_counts[priors['total_droplets']]
log_counts_crossover = (np.log(surely_empty_counts) + np.log(priors['cell_counts'])) / 2
priors.update({'log_counts_crossover': log_counts_crossover,
'surely_empty_counts': surely_empty_counts})
# Compute several other priors.
log_nonzero_umi_counts = np.log(umi_counts[umi_counts > 0])
d_std = np.std(log_nonzero_umi_counts[log_nonzero_umi_counts > log_counts_crossover]).item() / 5.
d_empty_std = 0.01 # this is basically turned off in favor of epsilon
priors.update({'d_std': d_std, 'd_empty_std': d_empty_std})
|
[
"[email protected]"
] | |
e6f76f3ae72d3982282bf5277e7aba4a0ec5c316
|
218ad2898e2851f97eb2fb166d2811c80da11a34
|
/e17-01.py
|
1eb5fbf035bf21f213d2dec84766b24c80d5963e
|
[] |
no_license
|
mariane-sm/python_scripts
|
650b862ab797fa5bfa05fe1052814597ccdf352c
|
d8ba8c998cb635b155519cd18b7be7cc034395a0
|
refs/heads/master
| 2021-01-02T22:30:40.089088 | 2015-06-06T21:50:41 | 2015-06-06T21:50:41 | 35,857,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
import copy
def calc(n, out, solutions):
if sum(out) == n:
out.sort()
solutions.add(tuple(out))
else:
for number in [2,3,7]:
x = copy.deepcopy(out)
x.append(number)
if is_valid(x, n):
calc(n, x, solutions)
def is_valid(out, n):
return sum(out) <= n
z = set()
calc(12, [], z)
print z;
|
[
"[email protected]"
] | |
47186bd9b8e23ac196e3ad4e78bae3502606a8d1
|
ed72519cec8f7ca796cd7871c3e4875d852381f7
|
/layers.py
|
34addd78fd1d4c842197f01bf208d0683709133f
|
[
"MIT"
] |
permissive
|
kivicode/keras_layers
|
140b23228caab68a8eee05dbbed8d65c5bae3f45
|
e37f70bee1ff39b574895eebc4eee1f2acb4993e
|
refs/heads/master
| 2023-07-09T23:54:00.669055 | 2021-08-10T15:51:44 | 2021-08-10T15:51:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55,154 |
py
|
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Layer, Lambda
from tensorflow.python.keras.layers import InputSpec
from tensorflow.python.ops import nn_ops
from tensorflow.python.keras import initializers, regularizers, constraints, activations
from tensorflow.python.keras.utils import conv_utils
def gaussian_init(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
return K.constant(v, dtype=dtype)
def conv_init_linear(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5)
return K.constant(v, dtype=dtype)
def conv_init_relu(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5) * 2**0.5
return K.constant(v, dtype=dtype)
def conv_init_relu2(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5) * 2
return K.constant(v, dtype=dtype)
def depthwiseconv_init_linear(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:2])
v = v / (fan_in**0.5)
return K.constant(v, dtype=dtype)
def depthwiseconv_init_relu(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:2])
v = v / (fan_in**0.5) * 2**0.5
return K.constant(v, dtype=dtype)
class Covn2DBaseLayer(Layer):
"""Basic Conv2D class from which other layers inherit.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
#data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer='zeros',
bias_regularizer=None,
bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Covn2DBaseLayer, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.rank = rank = 2
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
def get_config(self):
config = super(Covn2DBaseLayer, self).get_config()
config.update({
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_initializer': initializers.serialize(self.bias_initializer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'bias_constraint': constraints.serialize(self.bias_constraint),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
})
return config
class Conv2D(Covn2DBaseLayer):
"""Conv2D Layer with Weight Normalization.
# Arguments
They are the same as for the normal Conv2D layer.
weightnorm: Boolean flag, whether Weight Normalization is used or not.
# References
[Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](http://arxiv.org/abs/1602.07868)
"""
def __init__(self, filters, kernel_size, weightnorm=False, eps=1e-6, **kwargs):
super(Conv2D, self).__init__(kernel_size, **kwargs)
self.filters = filters
self.weightnorm = weightnorm
self.eps = eps
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.weightnorm:
self.wn_g = self.add_weight(name='wn_g',
shape=(self.filters,),
initializer=initializers.Ones(),
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(Conv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
else:
features = inputs
if self.weightnorm:
norm = tf.sqrt(tf.reduce_sum(tf.square(self.kernel), (0,1,2)) + self.eps)
kernel = self.kernel / norm * self.wn_g
else:
kernel = self.kernel
features = K.conv2d(features, kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def get_config(self):
config = super(Conv2D, self).get_config()
config.update({
'filters': self.filters,
'weightnorm': self.weightnorm,
'eps': self.eps,
})
return config
class SparseConv2D(Covn2DBaseLayer):
"""2D Sparse Convolution layer for sparse input data.
# Arguments
They are the same as for the normal Conv2D layer.
binary: Boolean flag, whether the sparsity is propagated as binary
mask or as float values.
# Input shape
features: 4D tensor with shape (batch_size, rows, cols, channels)
mask: 4D tensor with shape (batch_size, rows, cols, 1)
If no mask is provided, all input pixels with features unequal
to zero are considered as valid.
# Example
x, m = SparseConv2D(32, 3, padding='same')(x)
x = Activation('relu')(x)
x, m = SparseConv2D(32, 3, padding='same')([x,m])
x = Activation('relu')(x)
# Notes
Sparse Convolution propagates the sparsity of the input data
through the network using a 2D mask.
# References
[Sparsity Invariant CNNs](https://arxiv.org/abs/1708.06500)
"""
def __init__(self, filters, kernel_size,
kernel_initializer=conv_init_relu,
binary=True,
**kwargs):
super(SparseConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.filters = filters
self.binary = binary
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.mask_kernel_shape = (*self.kernel_size, 1, 1)
self.mask_kernel = tf.ones(self.mask_kernel_shape)
self.mask_fan_in = tf.reduce_prod(self.mask_kernel_shape[:3])
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(SparseConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
mask = inputs[1]
else:
# if no mask is provided, get it from the features
features = inputs
mask = tf.where(tf.equal(tf.reduce_sum(features, axis=-1, keepdims=True), 0), 0.0, 1.0)
features = tf.multiply(features, mask)
features = nn_ops.convolution(features, self.kernel, self.padding.upper(), self.strides, self.dilation_rate)
norm = nn_ops.convolution(mask, self.mask_kernel, self.padding.upper(), self.strides, self.dilation_rate)
mask_fan_in = tf.cast(self.mask_fan_in, 'float32')
if self.binary:
mask = tf.where(tf.greater(norm,0), 1.0, 0.0)
else:
mask = norm / mask_fan_in
#ratio = tf.where(tf.equal(norm,0), 0.0, 1/norm) # Note: The authors use this in the paper, but it would require special initialization...
ratio = tf.where(tf.equal(norm,0), 0.0, mask_fan_in/norm)
features = tf.multiply(features, ratio)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return [features, mask]
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, self.filters]
mask_shape = [*feature_shape[:-1], 1]
return [feature_shape, mask_shape]
def get_config(self):
config = super(SparseConv2D, self).get_config()
config.update({
'filters': self.filters,
'binary': self.binary,
})
return config
class PartialConv2D(Covn2DBaseLayer):
"""2D Partial Convolution layer for sparse input data.
# Arguments
They are the same as for the normal Conv2D layer.
binary: Boolean flag, whether the sparsity is propagated as binary
mask or as float values.
# Input shape
features: 4D tensor with shape (batch_size, rows, cols, channels)
mask: 4D tensor with shape (batch_size, rows, cols, channels)
If the shape is (batch_size, rows, cols, 1), the mask is repeated
for each channel. If no mask is provided, all input elements
unequal to zero are considered as valid.
# Example
x, m = PartialConv2D(32, 3, padding='same')(x)
x = Activation('relu')(x)
x, m = PartialConv2D(32, 3, padding='same')([x,m])
x = Activation('relu')(x)
# Notes
In contrast to Sparse Convolution, Partial Convolution propagates
the sparsity for each channel separately. This makes it possible
to concatenate the features and the masks from different branches
in architecture.
# References
[Image Inpainting for Irregular Holes Using Partial Convolutions](https://arxiv.org/abs/1804.07723)
[Sparsity Invariant CNNs](https://arxiv.org/abs/1708.06500)
"""
def __init__(self, filters, kernel_size,
kernel_initializer=conv_init_relu,
binary=True,
weightnorm=False,
eps=1e-6,
**kwargs):
super(PartialConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.filters = filters
self.binary = binary
self.weightnorm = weightnorm
self.eps = eps
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
mask_shape = input_shape[1]
self.mask_shape = mask_shape
else:
feature_shape = input_shape
self.mask_shape = feature_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.mask_kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.mask_kernel = tf.ones(self.mask_kernel_shape)
self.mask_fan_in = tf.reduce_prod(self.mask_kernel_shape[:3])
if self.weightnorm:
self.wn_g = self.add_weight(name='wn_g',
shape=(self.filters,),
initializer=initializers.Ones(),
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(PartialConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
mask = inputs[1]
# if mask has only one channel, repeat
if self.mask_shape[-1] == 1:
mask = tf.repeat(mask, tf.shape(features)[-1], axis=-1)
else:
# if no mask is provided, get it from the features
features = inputs
mask = tf.where(tf.equal(features, 0), 0.0, 1.0)
if self.weightnorm:
norm = tf.sqrt(tf.reduce_sum(tf.square(self.kernel), (0,1,2)) + self.eps)
kernel = self.kernel / norm * self.wn_g
else:
kernel = self.kernel
mask_kernel = self.mask_kernel
features = tf.multiply(features, mask)
features = nn_ops.convolution(features, kernel, self.padding.upper(), self.strides, self.dilation_rate)
norm = nn_ops.convolution(mask, mask_kernel, self.padding.upper(), self.strides, self.dilation_rate)
mask_fan_in = tf.cast(self.mask_fan_in, 'float32')
if self.binary:
mask = tf.where(tf.greater(norm,0), 1.0, 0.0)
else:
mask = norm / mask_fan_in
ratio = tf.where(tf.equal(norm,0), 0.0, mask_fan_in/norm)
features = tf.multiply(features, ratio)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return [features, mask]
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, self.filters]
mask_shape = [feature_shape[0], *new_space, self.filters]
return [feature_shape, mask_shape]
def get_config(self):
config = super(PartialConv2D, self).get_config()
config.update({
'filters': self.filters,
'binary': self.binary,
'weightnorm': self.weightnorm,
'eps': self.eps,
})
return config
class GroupConv2D(Covn2DBaseLayer):
"""2D Group Convolution layer that shares weights over symmetries.
Group Convolution provides discrete rotation equivariance. It reduces the number
of parameters and typically lead to better results.
The following two finite groups are supported:
Cyclic Group C4 (p4, 4 rotational symmetries)
Dihedral Group D4 (p4m, 4 rotational and 4 reflection symmetries)
# Arguments
They are the same as for the normal Conv2D layer.
filters: int, The effective number of filters is this value multiplied by the
number of transformations in the group (4 for C4 and 8 for D4)
kernel_size: int, Only odd values are supported
group: 'C4' or 'D4', Stay with one group when stacking layers
# Input shape
featurs: 4D tensor with shape (batch_size, rows, cols, in_channels)
or 5D tensor with shape (batch_size, rows, cols, num_transformations, in_channels)
# Output shape
featurs: 5D tensor with shape (batch_size, rows, cols, num_transformations, out_channels)
# Notes
- BatchNormalization works as expected and shares the statistict over symmetries.
- Spatial Pooling can be done via AvgPool3D.
- Pooling along the group dimension can be done via MaxPool3D.
- Concatenation along the group dimension can be done via Reshape.
- To get a model with the inference time of a normal CNN, you can load the
expanded kernel into a normal Conv2D layer. The kernel expansion is
done in the 'call' method and the expanded kernel is stored in the
'transformed_kernel' attribute.
# Example
x = Input((16,16,3))
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = AvgPool3D(pool_size=(2,2,1), strides=(2,2,1), padding='same')(x)
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = MaxPool3D(pool_size=(1,1,x.shape[-2]))(x)
s = x.shape
x = Reshape((s[1],s[2],s[3]*s[4]))(x)
# References
[Group Equivariant Convolutional Networks](https://arxiv.org/abs/1602.07576)
[Rotation Equivariant CNNs for Digital Pathology](https://arxiv.org/abs/1806.03962)
https://github.com/tscohen/GrouPy
https://github.com/basveeling/keras-gcnn
"""
def __init__(self, filters, kernel_size, group='D4', **kwargs):
super(GroupConv2D, self).__init__(kernel_size, **kwargs)
if not self.kernel_size[0] == self.kernel_size[1]:
raise ValueError('Requires square kernel')
if self.kernel_size[0] % 2 != 1:
raise ValueError('Requires odd kernel size')
group = group.upper()
if group == 'C4':
self.num_transformations = 4
elif group == 'D4':
self.num_transformations = 8
else:
raise ValueError('Unknown group')
self.filters = filters
self.group = group
self.input_spec = InputSpec(min_ndim=4, max_ndim=5)
def compute_output_shape(self, input_shape):
space = input_shape[1:3]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], *new_space, self.num_transformations, self.filters)
def build(self, input_shape):
if len(input_shape) == 4:
self.first = True
num_in_channels = input_shape[-1]
else:
self.first = False
num_in_channels = input_shape[-2] * input_shape[-1]
self.kernel = self.add_weight(name='kernel',
shape=(*self.kernel_size, num_in_channels, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, features):
ni = features.shape[-1]
no = self.filters
if self.group == 'C4':
nt = 4
elif self.group == 'D4':
nt = 8
nti = 1 if self.first else nt
nto = nt
k = self.kernel_size[0]
t = np.reshape(np.arange(nti*k*k), (nti,k,k))
trafos = [np.rot90(t,k,axes=(1, 2)) for k in range(4)]
if nt == 8:
trafos = trafos + [np.flip(t,1) for t in trafos]
self.trafos = trafos = np.array(trafos)
# index magic happens here
if nti == 1:
indices = trafos
elif nti == 4:
indices = [[trafos[l, (m-l)%4 ,:,:] for m in range(4)] for l in range(4)]
elif nti == 8:
indices = [[trafos[l, (m-l)%4 if ((m < 4) == (l < 4)) else (m+l)%4+4 ,:,:] for m in range(8)] for l in range(8)]
self.indices = indices = np.reshape(indices, (nto,nti,k,k))
# transform the kernel
kernel = self.kernel
kernel = tf.reshape(kernel, (nti*k*k, ni, no))
kernel = tf.gather(kernel, indices, axis=0)
kernel = tf.reshape(kernel, (nto, nti, k,k, ni, no))
kernel = tf.transpose(kernel, (2,3,1,4,0,5))
kernel = tf.reshape(kernel, (k,k, nti*ni, nto*no))
self.transformed_kernel = kernel
if self.first:
x = features
else:
s = features.shape
x = tf.reshape(features, (-1,s[1],s[2],s[3]*s[4]))
x = K.conv2d(x, kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate)
s = x.shape
x = tf.reshape(x, (-1,s[1],s[2],nto,no))
features = x
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def get_config(self):
config = super(GroupConv2D, self).get_config()
config.update({
'filters': self.filters,
'group': self.group,
})
return config
class DeformableConv2D(Covn2DBaseLayer):
"""2D Deformable Convolution layer that learns the spatial offsets where
the input elements of the convolution are sampled.
The layer is basically a updated version of An Jiaoyang's code.
# Notes
- A layer does not use a native CUDA kernel which would have better
performance https://github.com/tensorflow/addons/issues/179
# References
[Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211)
# related code
https://github.com/DHZS/tf-deformable-conv-layer (An Jiaoyang, 2018-10-11)
"""
def __init__(self, filters, kernel_size, num_deformable_group=None, **kwargs):
"""`kernel_size`, `strides` and `dilation_rate` must have the same value in both axis.
:param num_deformable_group: split output channels into groups, offset shared in each group. If
this parameter is None, then set num_deformable_group=filters.
"""
super(DeformableConv2D, self).__init__(kernel_size, **kwargs)
if not self.kernel_size[0] == self.kernel_size[1]:
raise ValueError('Requires square kernel')
if not self.strides[0] == self.strides[1]:
raise ValueError('Requires equal stride')
if not self.dilation_rate[0] == self.dilation_rate[1]:
raise ValueError('Requires equal dilation')
self.filters = filters
if num_deformable_group is None:
num_deformable_group = filters
if filters % num_deformable_group != 0:
raise ValueError('"filters" mod "num_deformable_group" must be zero')
self.num_deformable_group = num_deformable_group
self.kernel = None
self.bias = None
self.offset_layer_kernel = None
self.offset_layer_bias = None
def build(self, input_shape):
input_dim = input_shape[-1]
# kernel_shape = self.kernel_size + (input_dim, self.filters)
# we want to use depth-wise conv
kernel_shape = self.kernel_size + (self.filters * input_dim, 1)
self.kernel = self.add_weight(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
# create offset conv layer
offset_num = self.kernel_size[0] * self.kernel_size[1] * self.num_deformable_group
self.offset_layer_kernel = self.add_weight(name='offset_layer_kernel',
shape=self.kernel_size + (input_dim, offset_num * 2), # 2 means x and y axis
initializer=tf.zeros_initializer(),
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.offset_layer_bias = self.add_weight(name='offset_layer_bias',
shape=(offset_num * 2,),
initializer=tf.zeros_initializer(),
# initializer=tf.random_uniform_initializer(-5, 5),
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
self.built = True
def call(self, inputs, training=None, **kwargs):
# get offset, shape [batch_size, out_h, out_w, filter_h, * filter_w * channel_out * 2]
offset = tf.nn.conv2d(inputs,
filters=self.offset_layer_kernel,
strides=[1, *self.strides, 1],
padding=self.padding.upper(),
dilations=[1, *self.dilation_rate, 1])
offset += self.offset_layer_bias
# add padding if needed
inputs = self._pad_input(inputs)
# some length
batch_size = tf.shape(inputs)[0]
channel_in = int(inputs.shape[-1])
in_h, in_w = [int(i) for i in inputs.shape[1: 3]] # input feature map size
out_h, out_w = [int(i) for i in offset.shape[1: 3]] # output feature map size
filter_h, filter_w = self.kernel_size
# get x, y axis offset
offset = tf.reshape(offset, [batch_size, out_h, out_w, -1, 2])
y_off, x_off = offset[:, :, :, :, 0], offset[:, :, :, :, 1]
# input feature map gird coordinates
y, x = self._get_conv_indices([in_h, in_w])
y, x = [tf.expand_dims(i, axis=-1) for i in [y, x]]
y, x = [tf.tile(i, [batch_size, 1, 1, 1, self.num_deformable_group]) for i in [y, x]]
y, x = [tf.reshape(i, [batch_size, *i.shape[1: 3], -1]) for i in [y, x]]
y, x = [tf.cast(i, 'float32') for i in [y, x]]
# add offset
y, x = y + y_off, x + x_off
y = tf.clip_by_value(y, 0, in_h - 1)
x = tf.clip_by_value(x, 0, in_w - 1)
# get four coordinates of points around (x, y)
y0, x0 = [tf.cast(tf.floor(i), 'int32') for i in [y, x]]
y1, x1 = y0 + 1, x0 + 1
# clip
y0, y1 = [tf.clip_by_value(i, 0, in_h - 1) for i in [y0, y1]]
x0, x1 = [tf.clip_by_value(i, 0, in_w - 1) for i in [x0, x1]]
# get pixel values
indices = [[y0, x0], [y0, x1], [y1, x0], [y1, x1]]
p0, p1, p2, p3 = [DeformableConv2D._get_pixel_values_at_point(inputs, i) for i in indices]
# cast to float
x0, x1, y0, y1 = [tf.cast(i, 'float32') for i in [x0, x1, y0, y1]]
# weights
w0 = (y1 - y) * (x1 - x)
w1 = (y1 - y) * (x - x0)
w2 = (y - y0) * (x1 - x)
w3 = (y - y0) * (x - x0)
# expand dim for broadcast
w0, w1, w2, w3 = [tf.expand_dims(i, axis=-1) for i in [w0, w1, w2, w3]]
# bilinear interpolation
pixels = tf.add_n([w0 * p0, w1 * p1, w2 * p2, w3 * p3])
# reshape the "big" feature map
pixels = tf.reshape(pixels, [batch_size, out_h, out_w, filter_h, filter_w, self.num_deformable_group, channel_in])
pixels = tf.transpose(pixels, [0, 1, 3, 2, 4, 5, 6])
pixels = tf.reshape(pixels, [batch_size, out_h * filter_h, out_w * filter_w, self.num_deformable_group, channel_in])
# copy channels to same group
feat_in_group = self.filters // self.num_deformable_group
pixels = tf.tile(pixels, [1, 1, 1, 1, feat_in_group])
pixels = tf.reshape(pixels, [batch_size, out_h * filter_h, out_w * filter_w, -1])
# depth-wise conv
out = tf.nn.depthwise_conv2d(pixels, self.kernel, [1, filter_h, filter_w, 1], 'VALID')
# add the output feature maps in the same group
out = tf.reshape(out, [batch_size, out_h, out_w, self.filters, channel_in])
out = tf.reduce_sum(out, axis=-1)
if self.use_bias:
out += self.bias
return self.activation(out)
def _pad_input(self, inputs):
"""Check if input feature map needs padding, because we don't use the standard Conv() function.
:param inputs:
:return: padded input feature map
"""
# When padding is 'same', we should pad the feature map.
# if padding == 'same', output size should be `ceil(input / stride)`
if self.padding == 'same':
in_shape = inputs.shape.as_list()[1:3]
padding_list = []
for i in range(2):
filter_size = self.kernel_size[i]
dilation = self.dilation_rate[i]
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
same_output = (in_shape[i] + self.strides[i] - 1) // self.strides[i]
valid_output = (in_shape[i] - dilated_filter_size + self.strides[i]) // self.strides[i]
if same_output == valid_output:
padding_list += [0, 0]
else:
p = dilated_filter_size - 1
p_0 = p // 2
padding_list += [p_0, p - p_0]
if sum(padding_list) != 0:
padding = [[0, 0],
[padding_list[0], padding_list[1]], # top, bottom padding
[padding_list[2], padding_list[3]], # left, right padding
[0, 0]]
inputs = tf.pad(inputs, padding)
return inputs
def _get_conv_indices(self, feature_map_size):
"""the x, y coordinates in the window when a filter sliding on the feature map
:param feature_map_size:
:return: y, x with shape [1, out_h, out_w, filter_h * filter_w]
"""
feat_h, feat_w = [int(i) for i in feature_map_size[0: 2]]
x, y = tf.meshgrid(tf.range(feat_w), tf.range(feat_h))
x, y = [tf.reshape(i, [1, *i.get_shape(), 1]) for i in [x, y]] # shape [1, h, w, 1]
x, y = [tf.image.extract_patches(i,
[1, *self.kernel_size, 1],
[1, *self.strides, 1],
[1, *self.dilation_rate, 1],
'VALID')
for i in [x, y]] # shape [1, out_h, out_w, filter_h * filter_w]
return y, x
@staticmethod
def _get_pixel_values_at_point(inputs, indices):
"""get pixel values
:param inputs:
:param indices: shape [batch_size, H, W, I], I = filter_h * filter_w * channel_out
:return:
"""
y, x = indices
batch, h, w, n = y.shape.as_list()[0: 4]
y_shape = tf.shape(y)
batch, n = y_shape[0], y_shape[3]
batch_idx = tf.reshape(tf.range(0, batch), (batch, 1, 1, 1))
b = tf.tile(batch_idx, (1, h, w, n))
pixel_idx = tf.stack([b, y, x], axis=-1)
return tf.gather_nd(inputs, pixel_idx)
class DepthwiseConv2D(Covn2DBaseLayer):
"""2D depthwise convolution layer.
# Notes
A DepthwiseConv2D layer followed by an 1x1 Conv2D layer is equivalent
to the SeparableConv2D layer provided by Keras.
# References
[Xception: Deep Learning with Depthwise Separable Convolutions](http://arxiv.org/abs/1610.02357)
"""
def __init__(self, depth_multiplier, kernel_size,
kernel_initializer=depthwiseconv_init_relu,
**kwargs):
super(DepthwiseConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.depth_multiplier = depth_multiplier
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
kernel_shape = (*self.kernel_size, feature_shape[-1], self.depth_multiplier)
self.kernel = self.add_weight(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(feature_shape[-1]*self.depth_multiplier,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(DepthwiseConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
else:
features = inputs
features = K.depthwise_conv2d(features, self.kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, feature_shape[-1]*self.depth_multiplier]
return feature_shape
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.update({
'depth_multiplier': self.depth_multiplier,
})
return config
class MaxPoolingWithArgmax2D(Layer):
'''MaxPooling for unpooling with indices.
# References
[SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation](http://arxiv.org/abs/1511.00561)
# related code:
https://github.com/PavlosMelissinos/enet-keras
https://github.com/ykamikawa/SegNet
'''
def __init__(self, pool_size=(2, 2), strides=(2, 2), padding='same', **kwargs):
super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
def call(self, inputs, **kwargs):
ksize = [1, self.pool_size[0], self.pool_size[1], 1]
strides = [1, self.strides[0], self.strides[1], 1]
padding = self.padding.upper()
output, argmax = nn_ops.max_pool_with_argmax(inputs, ksize, strides, padding)
argmax = tf.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [dim // ratio[idx] if dim is not None else None for idx, dim in enumerate(input_shape)]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
def get_config(self):
config = super(MaxPoolingWithArgmax2D, self).get_config()
config.update({
'pool_size': self.pool_size,
'strides': self.strides,
'padding': self.padding,
})
return config
class MaxUnpooling2D(Layer):
'''Inversion of MaxPooling with indices.
# References
[SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation](http://arxiv.org/abs/1511.00561)
# related code:
https://github.com/PavlosMelissinos/enet-keras
https://github.com/ykamikawa/SegNet
'''
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype='int32')
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
output_shape = [mask_shape[0], mask_shape[1] * self.size[0], mask_shape[2] * self.size[1], mask_shape[3]]
return tuple(output_shape)
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({
'size': self.size,
})
return config
class AddCoords2D(Layer):
"""Add coords to a tensor as described in CoordConv paper.
# Arguments
with_r: Boolean flag, whether the r coordinate is added or not. See paper for more details.
# Input shape
featurs: 4D tensor with shape (batch_size, rows, cols, channels)
# Output shape
featurs: same as input except channels + 2, channels + 3 if with_r is True
# Example
x = Conv2D(32, 3, padding='same', activation='relu')(x)
x = AddCoords2D()(x)
x = Conv2D(32, 3, padding='same', activation='relu')(x)
# Notes
Semi-convolutional Operators is an approach that is closely related to CoordConv.
# References
[An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](http://arxiv.org/abs/1807.03247)
[Semi-convolutional Operators for Instance Segmentation](https://arxiv.org/abs/1807.10712)
"""
def __init__(self, with_r=False, **kwargs):
super(AddCoords2D, self).__init__(**kwargs)
self.with_r = with_r
def call(self, features):
y_dim = features.shape[1]
x_dim = features.shape[2]
ones = tf.ones_like(features[:,:,:,:1])
y_range = tf.range(y_dim, dtype='float32') / tf.cast(y_dim-1, 'float32') * 2 - 1
x_range = tf.range(x_dim, dtype='float32') / tf.cast(x_dim-1, 'float32') * 2 - 1
yy = ones * y_range[None, :, None, None]
xx = ones * x_range[None, None, :, None]
if self.with_r:
rr = tf.sqrt(tf.square(yy-0.5) + tf.square(xx-0.5))
features = tf.concat([features, yy, xx, rr], axis=-1)
else:
features = tf.concat([features, yy, xx], axis=-1)
return features
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[3] = output_shape[3] + 2
if self.with_r:
output_shape[3] = output_shape[3] + 1
return tuple(output_shape)
def get_config(self):
config = super(AddCoords2D, self).get_config()
config.update({
'with_r': self.with_r,
})
return config
class LayerNormalization(Layer):
"""Layer Normalization Layer.
# References
[Layer Normalization](http://arxiv.org/abs/1607.06450)
"""
def __init__(self, eps=1e-6, **kwargs):
super(LayerNormalization, self).__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=initializers.Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
def call(self, x):
mean = K.mean(x, axis=-1, keepdims=True)
std = K.std(x, axis=-1, keepdims=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(LayerNormalization, self).get_config()
config.update({
'eps': self.eps,
})
return config
class InstanceNormalization(Layer):
"""Instance Normalization Layer.
# References
[Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022)
"""
def __init__(self, eps=1e-6, **kwargs):
super(InstanceNormalization, self).__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=initializers.Zeros(), trainable=True)
super(InstanceNormalization, self).build(input_shape)
def call(self, x):
mean = K.mean(x, axis=list(range(len(x.shape))[1:-1]), keepdims=True)
std = K.std(x, axis=list(range(len(x.shape))[1:-1]), keepdims=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(InstanceNormalization, self).get_config()
config.update({
'eps': self.eps,
})
return config
def Resize2D(size, method='bilinear'):
"""Spatial resizing layer.
# Arguments
size: spatial output size (rows, cols)
method: 'bilinear', 'bicubic', 'nearest', ...
"""
return Lambda(lambda x: tf.image.resize(x, size, method=method))
class Blur2D(Layer):
"""2D Blur Layer as used in Antialiased CNNs for Subsampling.
# Notes
The layer handles boundary effects similar to AvgPool2D.
# References
[Making Convolutional Networks Shift-Invariant Again](https://arxiv.org/abs/1904.11486)
# related code
https://github.com/adobe/antialiased-cnns
https://github.com/adobe/antialiased-cnns/issues/10
"""
def __init__(self, filter_size=3, strides=2, padding='valid', **kwargs):
rank = 2
self.filter_size = filter_size
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.filter_size == 1:
self.a = np.array([1.,])
elif self.filter_size == 2:
self.a = np.array([1., 1.])
elif self.filter_size == 3:
self.a = np.array([1., 2., 1.])
elif self.filter_size == 4:
self.a = np.array([1., 3., 3., 1.])
elif self.filter_size == 5:
self.a = np.array([1., 4., 6., 4., 1.])
elif self.filter_size == 6:
self.a = np.array([1., 5., 10., 10., 5., 1.])
elif self.filter_size == 7:
self.a = np.array([1., 6., 15., 20., 15., 6., 1.])
super(Blur2D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, feature_shape[3]]
return feature_shape
def build(self, input_shape):
k = self.a[:,None] * self.a[None,:]
k = np.tile(k[:,:,None,None], (1,1,input_shape[-1],1))
self.kernel = K.constant(k, dtype=K.floatx())
def call(self, x):
features = K.depthwise_conv2d(x, self.kernel, strides=self.strides, padding=self.padding)
# normalize the features
mask = tf.ones_like(x)
norm = K.depthwise_conv2d(mask, self.kernel, strides=self.strides, padding=self.padding)
features = tf.multiply(features, 1./norm)
return features
def get_config(self):
config = super(Blur2D, self).get_config()
config.update({
'filter_size': self.filter_size,
'strides': self.strides,
'padding': self.padding,
})
return config
class Scale(Layer):
"""Layer to learn a linear feature scaling.
"""
def __init__(self,
use_shift=True,
use_scale=True,
shift_initializer='zeros',
shift_regularizer=None,
shift_constraint=None,
scale_initializer='ones',
scale_regularizer=None,
scale_constraint=None,
**kwargs):
super(Scale, self).__init__(**kwargs)
self.use_shift = use_shift
self.use_scale = use_scale
self.shift_initializer = initializers.get(shift_initializer)
self.shift_regularizer = regularizers.get(shift_regularizer)
self.shift_constraint = constraints.get(shift_constraint)
self.scale_initializer = initializers.get(scale_initializer)
self.scale_regularizer = regularizers.get(scale_regularizer)
self.scale_constraint = constraints.get(scale_constraint)
def compute_output_shape(self, input_shape):
return input_shape
def build(self, input_shape):
if self.use_shift:
self.shift = self.add_variable(name='shift',
shape=(input_shape[-1],),
initializer=self.shift_initializer,
regularizer=self.shift_regularizer,
constraint=self.shift_constraint,
trainable=True,
dtype=self.dtype)
else:
self.shfit = None
if self.use_scale:
self.scale = self.add_variable(name='scale',
shape=(input_shape[-1],),
initializer=self.scale_initializer,
regularizer=self.scale_regularizer,
constraint=self.scale_constraint,
trainable=True,
dtype=self.dtype)
else:
self.scale = None
super(Scale, self).build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
if self.use_scale:
x = tf.multiply(x, self.scale)
if self.use_shift:
x = tf.add(x, self.shift)
return x
def get_config(self):
config = super(Scale, self).get_config()
config.update({
'use_shift': self.use_shift,
'use_scale': self.use_scale,
'shift_initializer': initializers.serialize(self.shift_initializer),
'shift_regularizer': regularizers.serialize(self.shift_regularizer),
'shift_constraint': constraints.serialize(self.shift_constraint),
'scale_initializer': initializers.serialize(self.scale_initializer),
'scale_regularizer': regularizers.serialize(self.scale_regularizer),
'scale_constraint': constraints.serialize(self.scale_constraint),
})
return config
|
[
"[email protected]"
] | |
71240c639014721fc67dd2c7ff9f05d6c32de443
|
095a1c126ffaf703d923431ce5279a0dac384740
|
/timecard/views/auth_views.py
|
f01ca3273a967bcb926fb3d487993405f8ebdcb9
|
[] |
no_license
|
patpio/timecard
|
8bc5c6dbfc3877157dc8bfca7f9f5debd1e7b486
|
f4a2f2db69410a2b98d9815fbac5048ba8c47126
|
refs/heads/master
| 2023-03-22T15:51:06.658738 | 2021-01-12T22:42:44 | 2021-01-12T22:42:44 | 321,773,318 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,926 |
py
|
from flask import Blueprint, render_template, url_for, flash, request, abort
from flask_login import login_user, logout_user, login_required, current_user
from werkzeug.utils import redirect
from timecard import db
from ..models import User
from ..forms import SignUpForm, LoginForm
bp_auth = Blueprint('auth', __name__, url_prefix='/auth')
@bp_auth.route('/signup', methods=['GET', 'POST'])
@login_required
def signup():
if current_user != User.query.filter_by(username='admin').first():
abort(403)
form = SignUpForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('main.home'))
return render_template('signup.html', form=form)
@bp_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_username(form.username.data)
if user is not None and user.check_password(form.password.data):
login_user(user, form.remember_me.data)
flash(f'Logged in successfully as {user.username}', 'success')
return redirect(request.args.get('next') or url_for('main.home'))
return render_template('login.html', form=form)
@bp_auth.route('/logout', methods=['GET'])
def logout():
logout_user()
flash('Logged out successfully.', 'success')
return redirect(url_for('main.home'))
@bp_auth.route('/admin', methods=['GET', 'POST'])
def admin():
if User.query.all():
abort(403)
form = SignUpForm()
if form.validate_on_submit():
user = User(username='admin', email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
return render_template('signup.html', form=form)
|
[
"[email protected]"
] | |
3b08d5e5012c6be97d95a34bfaa3fb73f3314425
|
de9272f47f10a4dd35a10be5c3e0cab31b66feb4
|
/udpClient-message.py
|
4b3f72f94452f8abe65373e64c56cbc0646628d2
|
[] |
no_license
|
ipwave-hackathon-ietf/ipwave-hackathon-ietf-106
|
1dafcb7de3f3f60a89fa056dea6a44ba9eea12b9
|
f2015c07bba4e15f4c42012bc3a8dcff9a8840e2
|
refs/heads/master
| 2021-06-27T12:18:48.770298 | 2021-03-03T06:55:04 | 2021-03-03T06:55:04 | 220,438,550 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 524 |
py
|
#!/usr/bin/python2
# by Yiwen Shen (SKKU)
# Email: [email protected]
import socket
import time
# IPv4
# server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# IPv6
server = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server.settimeout(0.2)
# server.bind(("", 44444))
message = b"OCB Test Message in a DSRC channel."
while True:
server.sendto(message, ('2001:db8:100:15a::3', 37020)) # 2001:db8:100:15a::3
print("message sent!")
time.sleep(1)
|
[
"[email protected]"
] | |
dafd10119274e9bc8f0ee8f596204f2095fbc05a
|
f823db6961fd815b10a8828188d7b2ab58a1f699
|
/testchild.py
|
e8b0c714f9ca3ceb6f4cb88343760c6f8f210842
|
[] |
no_license
|
mariosebastian-bit/testrepo
|
26f9262c52b96ac93f2fd41cee3f137353b45b84
|
22eea3f0b58e5e9ceb44d1d0729ab3ae39ef9d1e
|
refs/heads/main
| 2023-03-03T15:29:43.840495 | 2021-02-08T09:21:27 | 2021-02-08T09:21:27 | 336,845,049 | 0 | 0 | null | 2021-02-08T09:21:28 | 2021-02-07T17:22:07 | null |
UTF-8
|
Python
| false | false | 44 |
py
|
## TESTCHILD
print ("inside the testchild")
|
[
"[email protected]"
] | |
5399e23352d99fa49189fb77253df88e8639566e
|
eb82022c0cfc7c8747661cff9624ad2099fa1c3f
|
/dev_accounting_report/report/sales_delivery_out_rekap_xls.py
|
195552276cf5b3294ff059aa939ef9c184ff83a4
|
[] |
no_license
|
dadysuarsa/Odoo
|
8d026a066c390cc8f72805d2672212e61260c1cb
|
c9becd0c192fa239520ad3e1a11d81f70832eddf
|
refs/heads/master
| 2023-03-11T06:02:06.011575 | 2021-02-26T02:17:37 | 2021-02-26T02:17:37 | 276,346,540 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,299 |
py
|
import time
import xlwt, operator
from odoo.report import report_sxw
from report_engine_xls import report_xls
from odoo.tools.translate import _
from datetime import datetime
import pytz
class ReportStatus(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(ReportStatus, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'cr': cr,
'uid': uid,
'time': time,
})
_xs = report_xls.xls_styles
style_title = xlwt.easyxf(_xs['xls_title'])
style_blue = xlwt.easyxf(_xs['wrap'] + _xs['bold'] + _xs['fill_blue'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_blue_center = xlwt.easyxf(_xs['bold'] + _xs['fill_blue'] + _xs['center'] + _xs['borders_all'])
style_blue_center.alignment.middle = 1
style_yellow = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_yellow_right = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'] + _xs['right'], num_format_str=report_xls.decimal_format)
style_yellow_percent = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'], num_format_str=report_xls.percentage_format)
style_normal_bold = xlwt.easyxf(_xs['bold'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_normal = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.decimal_format)
style_normal_date = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.date_format)
style_normal_center = xlwt.easyxf(_xs['wrap'] + _xs['top'] + _xs['center'] + _xs['borders_all'])
style_normal_italic = xlwt.easyxf(_xs['italic'] + _xs['borders_all'])
style_normal_percent = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.percentage_format)
columns = [
['Tanggal Kirim', 13],
['No SJ/DO', 15],
['Satuan', 8],
['QTY Kirim', 17],
['Buyer', 45],
['No SC', 15],
['No Invoice', 15],
['Tgl Invoice', 12],
['Mata Uang', 10],
['Qty Invoice', 17],
['PPN VALAS', 17],
['PPN IDR', 17],
['DPP VALAS', 17],
['DPP IDR', 17],
['TOTAL VALAS', 17],
['TOTAL IDR', 17],
]
class sales_delivery_out_rekap_xls(report_xls):
def generate_xls_report(self, parser, _xs, data, obj, wb):
# import ipdb;ipdb.set_trace()
ws = wb.add_sheet(('Rekap Sales Detail Delivery'))
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
ws.set_horz_split_pos(7)
ws.write_merge(0, 0, 0, 5, 'REKAP SALES DELIVERY', style_title)
ws.write_merge(1, 1, 0, 3, (('Downloaded Date : %s') %(datetime.strptime(str(datetime.now(pytz.timezone('Asia/Jakarta')))[:18], "%Y-%m-%d %H:%M:%S").strftime("%d-%m-%Y %H:%M:%S"))), style_normal_date)
ws.write_merge(2, 2, 0, 3, 'Tanggal', style_blue_center)
ws.write_merge(2, 2, 4, 4, 'Divisi', style_blue_center)
ws.row(3).height_mismatch = True
ws.row(3).height = 20 * 28
ws.write_merge(3, 3, 0, 3, data['date_from'] + ' - ' + data['date_to'], style_normal_center)
ws.write_merge(3, 3, 4, 4, data['divisi'], style_normal_center)
ws.write_merge(5, 5, 0, 4, 'Delivery', style_blue_center)
ws.write_merge(5, 5, 5, 15, 'SO & Invoice', style_blue_center)
c_hdr_cell_style = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'],
num_format_str=report_xls.decimal_format)
c_hdr_cell_style_right = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'] + _xs['right'],
num_format_str=report_xls.decimal_format)
c_cell_style = xlwt.easyxf(_xs['borders_all'],
num_format_str=report_xls.decimal_format)
c_hdr_cell_style_grey = xlwt.easyxf(_xs['bold'] + _xs['fill_grey'] + _xs['borders_all'],
num_format_str=report_xls.decimal_format)
row_count = 6
col_count = 0
for column in columns:
ws.col(col_count).width = 256 * column[1]
ws.write(row_count, col_count, column[0], c_hdr_cell_style)
col_count += 1
row_count += 1
col_count = 0
row_start = row_count
for lines in data['csv']:
for line in lines:
ws.write(row_count, col_count, line, c_cell_style)
col_count += 1
row_count += 1
col_count = 0
row_count += 1
ws.write_merge(row_count, row_count, 6, 8, 'GRAND TOTAL', c_hdr_cell_style_grey)
col_count = 9
while col_count <= 15:
sum_cell_start = xlwt.Utils.rowcol_to_cell(row_start, col_count)
sum_cell_end = xlwt.Utils.rowcol_to_cell(row_count - 2, col_count)
ws.write(row_count, col_count, xlwt.Formula('sum(' + sum_cell_start + ':' + sum_cell_end + ')'), c_hdr_cell_style_grey)
col_count += 1
pass
sales_delivery_out_rekap_xls('report.sales.delivery.out.rekap.xls','stock.picking','addons/dev_accounting_report/report/report_excel.mako', parser=ReportStatus, header=False)
|
[
"[email protected]"
] | |
bc8a973ecdbf81c7111b5f9094c7a8318ec65860
|
5d1106528200b8c55e4147dbd7ba7a987dbfa368
|
/src/warzone/warzone.py
|
bc5187e00aa789289d9dc1c1d870399445b401f8
|
[
"WTFPL"
] |
permissive
|
Tyewrwerree/warzone
|
d5d1683dbac559e6cf9689395905e987bc099140
|
f2d4e0944138ef82f63a2212e9804b3a1e920749
|
refs/heads/master
| 2023-05-03T14:35:43.615159 | 2014-05-26T06:34:45 | 2014-05-26T06:34:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 808 |
py
|
#!/usr/bin/env python
import argparse
import warmerise
import interpreter as warzonecmd
def main():
parser = create_parser()
args = parser.parse_args()
#if args.email and args.password:
# session = warmerise.login(email=args.email, password=args.password)
# pprint(vars(session))
interpreter = warzonecmd.WarzoneCmd()
if args.email or args.password:
interpreter.prompt_login(email=args.email, password=args.password)
interpreter.start()
def create_parser():
parser = argparse.ArgumentParser(add_help=True,
description="tool to mess with Warmerise")
parser.add_argument("-l", "--email", metavar="EMAIL", help="e-mail address to login with")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="password to login with")
return parser
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
866b56f8009fd7f8a034eff87f37008c86df78d1
|
c79bc3b25aac5f958da011119bf71fcca534bd1a
|
/hostedpi/cli.py
|
9b5ae581de90ba5a42336748f8ef5c6dfe1b4a90
|
[
"BSD-3-Clause"
] |
permissive
|
gnuchu/hostedpi
|
c4ff4d398bcc8fde0d2d421f8a67b315c40fcc33
|
325e8035e0bf671daeabb4d696eb5b36a6daa12d
|
refs/heads/main
| 2023-03-31T04:07:17.604847 | 2021-03-28T00:10:22 | 2021-03-28T00:10:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,699 |
py
|
import os
import sys
import argparse
from .picloud import PiCloud
from .utils import read_ssh_key, ssh_import_id
from .exc import HostedPiException
from .__version__ import __version__
class CLI:
def __init__(self):
self._args = None
self._commands = None
self._config = None
self._parser = None
self._output = None
self._store = None
self._cloud = None
self._pis = None
def __call__(self, args=None):
self._args = self.parser.parse_args(args)
try:
return self._args.func()
except HostedPiException as e:
sys.stderr.write("hostedpi error: {e}\n".format(e=e))
return 2
except KeyboardInterrupt:
print("Operation cancelled during process")
@property
def cloud(self):
if self._cloud is None:
API_ID = os.environ.get('HOSTEDPI_ID')
API_SECRET = os.environ.get('HOSTEDPI_SECRET')
if API_ID is None or API_SECRET is None:
print("HOSTEDPI_ID and HOSTEDPI_SECRET environment variables "
"must be set")
self._cloud = PiCloud(API_ID, API_SECRET)
return self._cloud
@property
def pis(self):
if self._pis is None:
self._pis = self.cloud.pis
return self._pis
@property
def parser(self):
"""
The parser for all the sub-commands that the script accepts. Returns the
newly constructed argument parser.
"""
if self._parser is None:
self._parser, self._commands = self._get_parser()
return self._parser
@property
def commands(self):
"A dictionary mapping command names to their sub-parser."
if self._commands is None:
self._parser, self._commands = self._get_parser()
return self._commands
def _get_parser(self):
parser = argparse.ArgumentParser(
description=(
"hostedpi is a tool for provisioning and managing Raspberry Pis "
"in the Mythic Beasts Pi Cloud"))
parser.add_argument(
'--version', action='version', version=__version__)
parser.set_defaults(func=self.do_help, cmd=None)
commands = parser.add_subparsers(title=("commands"))
help_cmd = commands.add_parser(
"help", aliases=["h"],
description=(
"With no arguments, displays the list of hostedpi "
"commands. If a command name is given, displays the "
"description and options for the named command. If a "
"setting name is given, displays the description and "
"default value for that setting."),
help=("Displays help about the specified command or setting"))
help_cmd.add_argument(
"cmd", metavar="cmd", nargs='?',
help=("The name of the command to output help for")
)
help_cmd.set_defaults(func=self.do_help)
test_cmd = commands.add_parser(
"test", aliases=["connect"],
description=(
"Test a connection to the Mythic Beasts API using API ID and "
"secret in environment variables."),
help=("Test a connection to the Mythic Beasts API"))
test_cmd.set_defaults(func=self.do_test)
get_images_cmd = commands.add_parser(
"images",
description=("Retrieve the list of operating system images available for the given Pi model."),
help=("Retrieve the list of operating system images available for the given Pi model"))
get_images_cmd.add_argument(
"model", metavar="model", type=int,
help=("The Pi model number (3 or 4) to get operating systems for")
)
get_images_cmd.set_defaults(func=self.do_get_images)
list_cmd = commands.add_parser(
"list", aliases=["ls"],
description=("List all Pis in the account"),
help=("List all Pis in the account"))
list_cmd.set_defaults(func=self.do_list)
show_cmd = commands.add_parser(
"show", aliases=["cat"],
description=("Show the information about one or more Pis in the account"),
help=("Show the information about one or more Pis in the account"))
show_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to show information for")
)
show_cmd.set_defaults(func=self.do_show_pis)
create_cmd = commands.add_parser(
"create",
description=("Provision a new Pi in the account"),
help=("Provision a new Pi in the account"))
create_cmd.add_argument(
"name", metavar="name",
help=("The name of the new Pi to provision")
)
create_cmd.add_argument(
"--model", metavar="model", type=int, nargs='?',
help=("The model of the new Pi to provision (3 or 4)")
)
create_cmd.add_argument(
"--disk", metavar="disk", type=int, nargs='?',
help=("The disk size in GB")
)
create_cmd.add_argument(
"--image", metavar="image", type=str, nargs='?',
help=("The operating system image to use")
)
create_cmd.add_argument(
"--ssh-key-path", metavar="ssh_key_path", nargs='?',
help=("The path to an SSH public key file to add to the Pi")
)
create_cmd.set_defaults(func=self.do_create)
provision_status_cmd = commands.add_parser(
"status",
description=("Get the provision status of one or more Pis"),
help=("Get the provision status of one or more Pis"))
provision_status_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get the provision status for")
)
provision_status_cmd.set_defaults(func=self.do_provision_status)
power_status_cmd = commands.add_parser(
"power",
description=("Get the power status for one or more Pis"),
help=("Get the power status (on/off) for one or more Pis"))
power_status_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get the power status for")
)
power_status_cmd.set_defaults(func=self.do_power_status)
reboot_cmd = commands.add_parser(
"reboot",
description=("Reboot one or more Pis in the account"),
help=("Reboot one or more Pis in the account"))
reboot_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pi to reboot")
)
reboot_cmd.set_defaults(func=self.do_reboot)
power_on_cmd = commands.add_parser(
"on", aliases=["poweron"],
description=("Power on one or more Pis in the account"),
help=("Power on one or more Pis in the account"))
power_on_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pi to power on")
)
power_on_cmd.set_defaults(func=self.do_power_on)
power_off_cmd = commands.add_parser(
"off", aliases=["poweroff"],
description=("Power off one or more Pis in the account"),
help=("Power off one or more Pis in the account"))
power_off_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pi to power off")
)
power_off_cmd.set_defaults(func=self.do_power_off)
cancel_cmd = commands.add_parser(
"cancel",
description=("Cancel one or more Pis in the account"),
help=("Cancel one or more Pis in the account"))
cancel_cmd.add_argument(
"names", metavar="names", nargs='+',
help=("The names of the Pis to cancel")
)
cancel_cmd.add_argument(
"-y", "--yes",
action="store_true",
help=("Proceed without confirmation")
)
cancel_cmd.set_defaults(func=self.do_cancel)
count_keys_cmd = commands.add_parser(
"count-keys", aliases=["num-keys"],
description=("Show the number of SSH keys currently on one or more Pis"),
help=("Show the number of SSH keys currently on one or more Pis"))
count_keys_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get keys for")
)
count_keys_cmd.set_defaults(func=self.do_count_keys)
show_keys_cmd = commands.add_parser(
"keys",
description=("Show the SSH keys currently on a Pi"),
help=("Show the SSH keys currently on a Pi"))
show_keys_cmd.add_argument(
"name", metavar="name",
help=("The name of the Pi to get keys for")
)
show_keys_cmd.set_defaults(func=self.do_show_keys)
add_key_cmd = commands.add_parser(
"add-key",
description=("Add an SSH key from a public key file to one or more Pis"),
help=("Add an SSH key from a public key file to one or more Pis"))
add_key_cmd.add_argument(
"ssh_key_path", metavar="ssh_key_path", nargs='?',
help=("The path to an SSH public key file to add to the Pi")
)
add_key_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The name of the Pis to add keys to")
)
add_key_cmd.set_defaults(func=self.do_add_key)
copy_keys_cmd = commands.add_parser(
"copy-keys", aliases=["cp"],
description=("Copy all SSH keys from one Pi to one or more others"),
help=("Copy all SSH keys from one Pi to one or more others"))
copy_keys_cmd.add_argument(
"name_src", metavar="name_src",
help=("The name of the Pi to copy keys from")
)
copy_keys_cmd.add_argument(
"names_dest", metavar="names_dest", nargs='*',
help=("The name of the Pis to copy keys to")
)
copy_keys_cmd.set_defaults(func=self.do_copy_keys)
remove_keys_cmd = commands.add_parser(
"remove-keys",
description=("Remove all SSH keys from one or more Pis"),
help=("Remove all SSH keys from one or more Pis"))
remove_keys_cmd.add_argument(
"names", metavar="names", nargs='+',
help=("The names of the Pis to remove keys from")
)
remove_keys_cmd.set_defaults(func=self.do_remove_keys)
ssh_import_id_cmd = commands.add_parser(
"ssh-import-id",
description=("Import SSH keys from GitHub or Launchpad and add them to one or more Pis"),
help=("Import SSH keys from GitHub or Launchpad and add them to one or more Pis"))
ssh_import_id_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to import keys onto")
)
ssh_import_id_cmd.add_argument(
"--gh", metavar="github username", nargs='?',
help=("The GitHub username to import keys from")
)
ssh_import_id_cmd.add_argument(
"--lp", metavar="launchpad username", nargs='?',
help=("The Launchpad username to import keys from")
)
ssh_import_id_cmd.set_defaults(func=self.do_ssh_import_id)
ssh_command_cmd = commands.add_parser(
"ssh-command",
description=("Output the SSH command for one or more Pis in the account"),
help=("Output the (IPv4 or IPv6) SSH command for one or more Pis in the account"))
ssh_command_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get SSH commands for")
)
ssh_command_cmd.add_argument(
"--ipv6",
action="store_true",
help=("Show IPv6 command")
)
ssh_command_cmd.set_defaults(func=self.do_ssh_command)
ssh_config_cmd = commands.add_parser(
"ssh-config",
description=("Output the SSH config for one or more Pis in the account"),
help=("Output the (IPv4 or IPv6) SSH config for one or more Pis in the account"))
ssh_config_cmd.add_argument(
"names", metavar="names", nargs='*',
help=("The names of the Pis to get SSH config for")
)
ssh_config_cmd.add_argument(
"--ipv6",
action="store_true",
help=("Show IPv6 command")
)
ssh_config_cmd.set_defaults(func=self.do_ssh_config)
return parser, commands.choices
def get_pi(self, name):
pi = self.pis.get(name)
if not pi:
self.print_not_found(name)
return
return pi
def get_pis(self, names):
if not names:
return self.pis.items()
return {name: self.pis.get(name) for name in names}.items()
def print_not_found(self, name):
sys.stderr.write("{name} not found\n".format(name=name))
def do_help(self):
if self._args.cmd:
self.parser.parse_args([self._args.cmd, '-h'])
else:
self.parser.parse_args(['-h'])
def do_test(self):
if self.cloud:
print("Connected to the Mythic Beasts API")
return
return 2
def do_get_images(self):
images = self.cloud.get_operating_systems(model=self._args.model)
col_width = max(len(name) for name in images.values()) + 1
for id, name in images.items():
print("{name:{col_width}}: {id}".format(name=name, id=id, col_width=col_width))
def do_list(self):
for name in self.pis:
print(name)
def do_show_pis(self):
for name, pi in self.get_pis(self._args.names):
if pi:
print(pi, end='\n\n')
else:
self.print_not_found(name)
def do_create(self):
name = self._args.name
model = self._args.model
disk_size = self._args.disk
ssh_key_path = self._args.ssh_key_path
os_image = self._args.os_image
args = {
'model': model,
'disk_size': disk_size,
'ssh_key_path': ssh_key_path,
'os_image': os_image,
}
kwargs = {k: v for k, v in args.items() if v is not None}
pi = self.cloud.create_pi(name, **kwargs)
print("Pi {} provisioned successfully".format(name))
print()
print(pi)
def do_reboot(self):
for name, pi in self.get_pis(self._args.names):
if pi:
pi.reboot()
print("{name} rebooted".format(name=name))
else:
self.print_not_found(name)
def do_power_on(self):
for name, pi in self.get_pis(self._args.names):
if pi:
pi.on()
print("{name} powered on".format(name=name))
else:
self.print_not_found(name)
def do_power_off(self):
for name, pi in self.get_pis(self._args.names):
if pi:
pi.off()
print("{name} powered off".format(name=name))
else:
self.print_not_found(name)
def do_cancel(self):
if not self._args.yes:
num_pis = len(self._args.names)
try:
s = '' if num_pis == 1 else 's'
y = input("Cancelling {n} Pi{s}. Proceed? [Y/n]".format(n=num_pis, s=s))
except KeyboardInterrupt:
print()
print("Not cancelled")
return
if y.lower() not in 'y':
print("Not cancelled")
return
for name, pi in self.get_pis(self._args.names):
if pi:
pi.cancel()
print("{name} cancelled".format(name=name))
else:
self.print_not_found(name)
def do_show_keys(self):
pi = self.get_pi(self._args.name)
if not pi:
return 2
print(*pi.ssh_keys, sep='\n')
def do_count_keys(self):
for name, pi in self.get_pis(self._args.names):
num_keys = len(pi.ssh_keys)
s = '' if num_keys == 1 else 's'
print("{name}: {n} key{s}".format(name=name, n=num_keys, s=s))
def do_add_key(self):
ssh_key = read_ssh_key(self._args.ssh_key_path)
for name, pi in self.get_pis(self._args.names):
keys_before = len(pi.ssh_keys)
pi.ssh_keys |= {ssh_key}
keys_after = len(pi.ssh_keys)
num_keys = keys_after - keys_before
s = '' if num_keys == 1 else ''
print("{n} key{s} added to {name}".format(n=num_keys, name=name, s=s))
def do_copy_keys(self):
src_pi = self.get_pi(self._args.name_src)
if not src_pi:
return 2
ssh_keys = src_pi.ssh_keys
for name, pi in self.get_pis(self._args.names_dest):
if pi:
keys_before = len(pi.ssh_keys)
pi.ssh_keys |= ssh_keys
keys_after = len(pi.ssh_keys)
num_keys = keys_after - keys_before
s = '' if num_keys == 1 else 's'
print("{n} key{s} added to {name}".format(n=num_keys, name=name, s=s))
def do_remove_keys(self):
for name, pi in self.get_pis(self._args.names):
if pi:
num_keys = len(pi.ssh_keys)
pi.ssh_keys = set()
s = '' if num_keys == 1 else 's'
print("{n} key{s} removed from {name}".format(n=num_keys, name=name, s=s))
else:
self.print_not_found(name)
def do_ssh_import_id(self):
github = self._args.gh
launchpad = self._args.lp
github_keys = set()
launchpad_keys = set()
if github:
github_keys |= ssh_import_id(github=github)
s = '' if len(github_keys) == 1 else 's'
print("{n} key{s} retrieved from GitHub".format(n=len(github_keys), s=s))
if launchpad:
launchpad_keys |= ssh_import_id(launchpad=launchpad)
s = '' if len(launchpad_keys) == 1 else 's'
print("{n} key{s} retrieved from Launchpad".format(n=len(launchpad_keys), s=s))
print()
new_keys = github_keys | launchpad_keys
if len(new_keys) < (len(github_keys) + len(launchpad_keys)):
s = '' if len(new_keys) == 1 else 's'
print("{n} key{s} to add".format(n=len(new_keys), s=s))
if new_keys:
for name, pi in self.get_pis(self._args.names):
if pi:
keys_before = len(pi.ssh_keys)
pi.ssh_keys |= new_keys
keys_after = len(pi.ssh_keys)
num_keys = keys_after - keys_before
s = '' if num_keys == 1 else 's'
print("{n} key{s} added to {name}".format(n=num_keys, name=name, s=s))
else:
self.print_not_found(name)
else:
print("No keys to add")
def do_ssh_command(self):
for name, pi in self.get_pis(self._args.names):
if pi:
if self._args.ipv6:
print(pi.ipv6_ssh_command)
else:
print(pi.ipv4_ssh_command)
else:
self.print_not_found(name)
def do_ssh_config(self):
for name, pi in self.get_pis(self._args.names):
if pi:
if self._args.ipv6:
print(pi.ipv6_ssh_config)
else:
print(pi.ipv4_ssh_config)
else:
self.print_not_found(name)
def do_provision_status(self):
for name, pi in self.get_pis(self._args.names):
if pi:
print("{pi.name}: {pi.provision_status}".format(pi=pi))
else:
self.print_not_found(name)
def do_power_status(self):
for name, pi in self.get_pis(self._args.names):
if pi:
on_off = "on" if pi.power else "off"
print("{name}: powered {on_off}".format(name=name, on_off=on_off))
else:
self.print_not_found(name)
main = CLI()
|
[
"[email protected]"
] | |
84fbbae6519e6ef37d423e06fad72516f396cfc5
|
38902540746c70149ffdfe8e8dc11be0afa14e27
|
/Homework2/code/ui/pages/segments_page.py
|
66541a94e77ca1c249dfb03929ec816a1c5a928f
|
[] |
no_license
|
bubenchikus/2021-1-MAILRU-SDET-Python-V-Tarasenko
|
a214835bf20c2b28a86450e3809d24350703d48d
|
9053f430010fcdc221b815028ad79c8a743117db
|
refs/heads/main
| 2023-05-12T06:54:09.209535 | 2021-06-01T00:25:52 | 2021-06-01T00:25:52 | 351,537,046 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
from ui.pages.base_page import BasePage
from ui.locators.page_locators import SegmentsPageLocators
class SegmentsPage(BasePage):
locators = SegmentsPageLocators()
def go_to_create_new_segment(self):
self.driver.get('https://target.my.com/segments/segments_list/new/')
def create_segment(self, segment_name):
self.go_to_create_new_segment()
self.click(self.locators.SEGMENTS_CREATE_APPS_LOCATOR)
self.click(self.locators.SEGMENTS_CREATE_GAMERS_LOCATOR)
self.click(self.locators.SEGMENTS_CREATE_GAMERS_CHOOSE_LOCATOR)
self.click(self.locators.SEGMENTS_CREATE_ADD_LOCATOR)
self.fill_field(self.locators.SEGMENTS_CREATE_NAME_LOCATOR, segment_name)
self.click(self.locators.SEGMENTS_CREATE_SUBMIT_LOCATOR)
self.find(self.locators.SEGMENTS_SEARCH_LOCATOR) # wait segments list page content to load
def delete_segments(self):
self.find(self.locators.SEGMENTS_TABLE_FRAGMENT_LOCATOR)
self.click(self.locators.SEGMENTS_CHOOSE_ALL_LOCATOR)
self.click(self.locators.SEGMENTS_ACTION_LOCATOR)
self.click(self.locators.SEGMENTS_DELETE_LOCATOR)
|
[
"[email protected]"
] | |
eea30e56bd7ad538bbff6138eacfdae145a7a558
|
72ca10a09bb5d760b2ad6aaed1fdf3b1d2a11421
|
/manage.py
|
7b5306d285cedc439c308386e1ed5f95a76ab651
|
[] |
no_license
|
zhaluza/django-polls-app
|
fd4002a19dcae30b6ebb1293cbd6aae3b650eaf0
|
fa9504467541b25b363937b07fb4811c92221884
|
refs/heads/master
| 2021-04-14T20:44:19.919929 | 2020-03-30T02:02:44 | 2020-03-30T02:02:44 | 249,265,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 633 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'docs_tutorial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
bab08e55bdf13a41319af498b14816e44e7fcf85
|
b6c4fdb823af6b3295b33949f37b8a008610db7a
|
/3jet/3jet_UFO/decays.py
|
953903333a52d441a4395a6b50645028ef995063
|
[] |
no_license
|
tymorrison/gridpacks
|
d041f6390ea34a340ae0d7805de3c6ee26708c7e
|
3909df96fd87b290be5edab995a7e4f167c401c2
|
refs/heads/master
| 2021-07-03T18:38:52.863659 | 2017-09-24T19:49:14 | 2017-09-24T19:49:14 | 104,660,009 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,760 |
py
|
# This file was automatically created by FeynRules 2.3.21
# Mathematica version: 10.3.1 for Mac OS X x86 (64-bit) (December 9, 2015)
# Date: Thu 20 Oct 2016 00:50:38
from object_library import all_decays, Decay
import particles as P
Decay_b = Decay(name = 'Decay_b',
particle = P.b,
partial_widths = {(P.g,P.omega):'((MB**2 - Momega**2)*((coef**2*I2b3*MB**4*complexconjugate(I2b3))/(8.*cmath.pi**4*moct**2) - (coef**2*I2b3*MB**2*Momega**2*complexconjugate(I2b3))/(4.*cmath.pi**4*moct**2) + (coef**2*I2b3*Momega**4*complexconjugate(I2b3))/(8.*cmath.pi**4*moct**2)))/(96.*cmath.pi*abs(MB)**3)',
(P.W__minus__,P.t):'(((3*ee**2*MB**2)/(2.*sw**2) + (3*ee**2*MT**2)/(2.*sw**2) + (3*ee**2*MB**4)/(2.*MW**2*sw**2) - (3*ee**2*MB**2*MT**2)/(MW**2*sw**2) + (3*ee**2*MT**4)/(2.*MW**2*sw**2) - (3*ee**2*MW**2)/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MB)**3)',
(P.Zp,P.omega):'(((3*gz**2*I2b3*MB**2*zomega**2*complexconjugate(I2b3))/4. + (3*gz**2*I2b3*Momega**2*zomega**2*complexconjugate(I2b3))/4. + (3*gz**2*I2b3*MB**4*zomega**2*complexconjugate(I2b3))/(4.*MZp**2) - (3*gz**2*I2b3*MB**2*Momega**2*zomega**2*complexconjugate(I2b3))/(2.*MZp**2) + (3*gz**2*I2b3*Momega**4*zomega**2*complexconjugate(I2b3))/(4.*MZp**2) - (3*gz**2*I2b3*MZp**2*zomega**2*complexconjugate(I2b3))/2.)*cmath.sqrt(MB**4 - 2*MB**2*Momega**2 + Momega**4 - 2*MB**2*MZp**2 - 2*Momega**2*MZp**2 + MZp**4))/(96.*cmath.pi*abs(MB)**3)'})
Decay_H = Decay(name = 'Decay_H',
particle = P.H,
partial_widths = {(P.b,P.b__tilde__):'((-12*MB**2*yb**2 + 3*MH**2*yb**2)*cmath.sqrt(-4*MB**2*MH**2 + MH**4))/(16.*cmath.pi*abs(MH)**3)',
(P.t,P.t__tilde__):'((3*MH**2*yt**2 - 12*MT**2*yt**2)*cmath.sqrt(MH**4 - 4*MH**2*MT**2))/(16.*cmath.pi*abs(MH)**3)',
(P.ta__minus__,P.ta__plus__):'((MH**2*ytau**2 - 4*MTA**2*ytau**2)*cmath.sqrt(MH**4 - 4*MH**2*MTA**2))/(16.*cmath.pi*abs(MH)**3)',
(P.W__minus__,P.W__plus__):'(((3*ee**4*vev**2)/(4.*sw**4) + (ee**4*MH**4*vev**2)/(16.*MW**4*sw**4) - (ee**4*MH**2*vev**2)/(4.*MW**2*sw**4))*cmath.sqrt(MH**4 - 4*MH**2*MW**2))/(16.*cmath.pi*abs(MH)**3)',
(P.Z,P.Z):'(((9*ee**4*vev**2)/2. + (3*ee**4*MH**4*vev**2)/(8.*MZ**4) - (3*ee**4*MH**2*vev**2)/(2.*MZ**2) + (3*cw**4*ee**4*vev**2)/(4.*sw**4) + (cw**4*ee**4*MH**4*vev**2)/(16.*MZ**4*sw**4) - (cw**4*ee**4*MH**2*vev**2)/(4.*MZ**2*sw**4) + (3*cw**2*ee**4*vev**2)/sw**2 + (cw**2*ee**4*MH**4*vev**2)/(4.*MZ**4*sw**2) - (cw**2*ee**4*MH**2*vev**2)/(MZ**2*sw**2) + (3*ee**4*sw**2*vev**2)/cw**2 + (ee**4*MH**4*sw**2*vev**2)/(4.*cw**2*MZ**4) - (ee**4*MH**2*sw**2*vev**2)/(cw**2*MZ**2) + (3*ee**4*sw**4*vev**2)/(4.*cw**4) + (ee**4*MH**4*sw**4*vev**2)/(16.*cw**4*MZ**4) - (ee**4*MH**2*sw**4*vev**2)/(4.*cw**4*MZ**2))*cmath.sqrt(MH**4 - 4*MH**2*MZ**2))/(32.*cmath.pi*abs(MH)**3)'})
Decay_omega = Decay(name = 'Decay_omega',
particle = P.omega,
partial_widths = {(P.g,P.b):'((-MB**2 + Momega**2)*((coef**2*I1b3*MB**4*complexconjugate(I1b3))/(8.*cmath.pi**4*moct**2) - (coef**2*I1b3*MB**2*Momega**2*complexconjugate(I1b3))/(4.*cmath.pi**4*moct**2) + (coef**2*I1b3*Momega**4*complexconjugate(I1b3))/(8.*cmath.pi**4*moct**2)))/(96.*cmath.pi*abs(Momega)**3)',
(P.g,P.d):'(coef**2*I1b1*Momega**6*complexconjugate(I1b1))/(768.*cmath.pi**5*moct**2*abs(Momega)**3)',
(P.g,P.s):'(coef**2*I1b2*Momega**6*complexconjugate(I1b2))/(768.*cmath.pi**5*moct**2*abs(Momega)**3)',
(P.Zp,P.b):'(((3*gz**2*I1b3*MB**2*zomega**2*complexconjugate(I1b3))/4. + (3*gz**2*I1b3*Momega**2*zomega**2*complexconjugate(I1b3))/4. + (3*gz**2*I1b3*MB**4*zomega**2*complexconjugate(I1b3))/(4.*MZp**2) - (3*gz**2*I1b3*MB**2*Momega**2*zomega**2*complexconjugate(I1b3))/(2.*MZp**2) + (3*gz**2*I1b3*Momega**4*zomega**2*complexconjugate(I1b3))/(4.*MZp**2) - (3*gz**2*I1b3*MZp**2*zomega**2*complexconjugate(I1b3))/2.)*cmath.sqrt(MB**4 - 2*MB**2*Momega**2 + Momega**4 - 2*MB**2*MZp**2 - 2*Momega**2*MZp**2 + MZp**4))/(96.*cmath.pi*abs(Momega)**3)',
(P.Zp,P.d):'((Momega**2 - MZp**2)*((3*gz**2*I1b1*Momega**2*zomega**2*complexconjugate(I1b1))/4. + (3*gz**2*I1b1*Momega**4*zomega**2*complexconjugate(I1b1))/(4.*MZp**2) - (3*gz**2*I1b1*MZp**2*zomega**2*complexconjugate(I1b1))/2.))/(96.*cmath.pi*abs(Momega)**3)',
(P.Zp,P.s):'((Momega**2 - MZp**2)*((3*gz**2*I1b2*Momega**2*zomega**2*complexconjugate(I1b2))/4. + (3*gz**2*I1b2*Momega**4*zomega**2*complexconjugate(I1b2))/(4.*MZp**2) - (3*gz**2*I1b2*MZp**2*zomega**2*complexconjugate(I1b2))/2.))/(96.*cmath.pi*abs(Momega)**3)'})
Decay_t = Decay(name = 'Decay_t',
particle = P.t,
partial_widths = {(P.W__plus__,P.b):'(((3*ee**2*MB**2)/(2.*sw**2) + (3*ee**2*MT**2)/(2.*sw**2) + (3*ee**2*MB**4)/(2.*MW**2*sw**2) - (3*ee**2*MB**2*MT**2)/(MW**2*sw**2) + (3*ee**2*MT**4)/(2.*MW**2*sw**2) - (3*ee**2*MW**2)/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MT)**3)'})
Decay_ta__minus__ = Decay(name = 'Decay_ta__minus__',
particle = P.ta__minus__,
partial_widths = {(P.W__minus__,P.vt):'((MTA**2 - MW**2)*((ee**2*MTA**2)/(2.*sw**2) + (ee**2*MTA**4)/(2.*MW**2*sw**2) - (ee**2*MW**2)/sw**2))/(32.*cmath.pi*abs(MTA)**3)'})
Decay_W__plus__ = Decay(name = 'Decay_W__plus__',
particle = P.W__plus__,
partial_widths = {(P.c,P.s__tilde__):'(ee**2*MW**4)/(16.*cmath.pi*sw**2*abs(MW)**3)',
(P.t,P.b__tilde__):'(((-3*ee**2*MB**2)/(2.*sw**2) - (3*ee**2*MT**2)/(2.*sw**2) - (3*ee**2*MB**4)/(2.*MW**2*sw**2) + (3*ee**2*MB**2*MT**2)/(MW**2*sw**2) - (3*ee**2*MT**4)/(2.*MW**2*sw**2) + (3*ee**2*MW**2)/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.u,P.d__tilde__):'(ee**2*MW**4)/(16.*cmath.pi*sw**2*abs(MW)**3)',
(P.ve,P.e__plus__):'(ee**2*MW**4)/(48.*cmath.pi*sw**2*abs(MW)**3)',
(P.vm,P.mu__plus__):'(ee**2*MW**4)/(48.*cmath.pi*sw**2*abs(MW)**3)',
(P.vt,P.ta__plus__):'((-MTA**2 + MW**2)*(-(ee**2*MTA**2)/(2.*sw**2) - (ee**2*MTA**4)/(2.*MW**2*sw**2) + (ee**2*MW**2)/sw**2))/(48.*cmath.pi*abs(MW)**3)'})
Decay_Z = Decay(name = 'Decay_Z',
particle = P.Z,
partial_widths = {(P.b,P.b__tilde__):'((-7*ee**2*MB**2 + ee**2*MZ**2 - (3*cw**2*ee**2*MB**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) - (17*ee**2*MB**2*sw**2)/(6.*cw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MB**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.c,P.c__tilde__):'(MZ**2*(-(ee**2*MZ**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.d,P.d__tilde__):'(MZ**2*(ee**2*MZ**2 + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.e__minus__,P.e__plus__):'(MZ**2*(-(ee**2*MZ**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.mu__minus__,P.mu__plus__):'(MZ**2*(-(ee**2*MZ**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.s,P.s__tilde__):'(MZ**2*(ee**2*MZ**2 + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.t,P.t__tilde__):'((-11*ee**2*MT**2 - ee**2*MZ**2 - (3*cw**2*ee**2*MT**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MT**2*sw**2)/(6.*cw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MT**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.ta__minus__,P.ta__plus__):'((-5*ee**2*MTA**2 - ee**2*MZ**2 - (cw**2*ee**2*MTA**2)/(2.*sw**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MTA**2*sw**2)/(2.*cw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2))*cmath.sqrt(-4*MTA**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.u,P.u__tilde__):'(MZ**2*(-(ee**2*MZ**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.ve,P.ve__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.vm,P.vm__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.vt,P.vt__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.W__minus__,P.W__plus__):'(((-12*cw**2*ee**2*MW**2)/sw**2 - (17*cw**2*ee**2*MZ**2)/sw**2 + (4*cw**2*ee**2*MZ**4)/(MW**2*sw**2) + (cw**2*ee**2*MZ**6)/(4.*MW**4*sw**2))*cmath.sqrt(-4*MW**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)'})
Decay_Zp = Decay(name = 'Decay_Zp',
particle = P.Zp,
partial_widths = {(P.b,P.b__tilde__):'(((2*gz**2*MB**2)/3. + (gz**2*MZp**2)/3.)*cmath.sqrt(-4*MB**2*MZp**2 + MZp**4))/(48.*cmath.pi*abs(MZp)**3)',
(P.b,P.omega__tilde__):'(((-3*gz**2*I1b3*MB**2*zomega**2*complexconjugate(I1b3))/4. - (3*gz**2*I1b3*Momega**2*zomega**2*complexconjugate(I1b3))/4. - (3*gz**2*I1b3*MB**4*zomega**2*complexconjugate(I1b3))/(4.*MZp**2) + (3*gz**2*I1b3*MB**2*Momega**2*zomega**2*complexconjugate(I1b3))/(2.*MZp**2) - (3*gz**2*I1b3*Momega**4*zomega**2*complexconjugate(I1b3))/(4.*MZp**2) + (3*gz**2*I1b3*MZp**2*zomega**2*complexconjugate(I1b3))/2.)*cmath.sqrt(MB**4 - 2*MB**2*Momega**2 + Momega**4 - 2*MB**2*MZp**2 - 2*Momega**2*MZp**2 + MZp**4))/(48.*cmath.pi*abs(MZp)**3)',
(P.c,P.c__tilde__):'(gz**2*MZp**4)/(144.*cmath.pi*abs(MZp)**3)',
(P.d,P.d__tilde__):'(gz**2*MZp**4)/(144.*cmath.pi*abs(MZp)**3)',
(P.d,P.omega__tilde__):'((-Momega**2 + MZp**2)*((-3*gz**2*I1b1*Momega**2*zomega**2*complexconjugate(I1b1))/4. - (3*gz**2*I1b1*Momega**4*zomega**2*complexconjugate(I1b1))/(4.*MZp**2) + (3*gz**2*I1b1*MZp**2*zomega**2*complexconjugate(I1b1))/2.))/(48.*cmath.pi*abs(MZp)**3)',
(P.omega,P.b__tilde__):'(((-3*gz**2*I2b3*MB**2*zomega**2*complexconjugate(I2b3))/4. - (3*gz**2*I2b3*Momega**2*zomega**2*complexconjugate(I2b3))/4. - (3*gz**2*I2b3*MB**4*zomega**2*complexconjugate(I2b3))/(4.*MZp**2) + (3*gz**2*I2b3*MB**2*Momega**2*zomega**2*complexconjugate(I2b3))/(2.*MZp**2) - (3*gz**2*I2b3*Momega**4*zomega**2*complexconjugate(I2b3))/(4.*MZp**2) + (3*gz**2*I2b3*MZp**2*zomega**2*complexconjugate(I2b3))/2.)*cmath.sqrt(MB**4 - 2*MB**2*Momega**2 + Momega**4 - 2*MB**2*MZp**2 - 2*Momega**2*MZp**2 + MZp**4))/(48.*cmath.pi*abs(MZp)**3)',
(P.omega,P.d__tilde__):'((-Momega**2 + MZp**2)*((-3*gz**2*I2b1*Momega**2*zomega**2*complexconjugate(I2b1))/4. - (3*gz**2*I2b1*Momega**4*zomega**2*complexconjugate(I2b1))/(4.*MZp**2) + (3*gz**2*I2b1*MZp**2*zomega**2*complexconjugate(I2b1))/2.))/(48.*cmath.pi*abs(MZp)**3)',
(P.omega,P.s__tilde__):'((-Momega**2 + MZp**2)*((-3*gz**2*I2b2*Momega**2*zomega**2*complexconjugate(I2b2))/4. - (3*gz**2*I2b2*Momega**4*zomega**2*complexconjugate(I2b2))/(4.*MZp**2) + (3*gz**2*I2b2*MZp**2*zomega**2*complexconjugate(I2b2))/2.))/(48.*cmath.pi*abs(MZp)**3)',
(P.s,P.omega__tilde__):'((-Momega**2 + MZp**2)*((-3*gz**2*I1b2*Momega**2*zomega**2*complexconjugate(I1b2))/4. - (3*gz**2*I1b2*Momega**4*zomega**2*complexconjugate(I1b2))/(4.*MZp**2) + (3*gz**2*I1b2*MZp**2*zomega**2*complexconjugate(I1b2))/2.))/(48.*cmath.pi*abs(MZp)**3)',
(P.s,P.s__tilde__):'(gz**2*MZp**4)/(144.*cmath.pi*abs(MZp)**3)',
(P.t,P.t__tilde__):'(((2*gz**2*MT**2)/3. + (gz**2*MZp**2)/3.)*cmath.sqrt(-4*MT**2*MZp**2 + MZp**4))/(48.*cmath.pi*abs(MZp)**3)',
(P.u,P.u__tilde__):'(gz**2*MZp**4)/(144.*cmath.pi*abs(MZp)**3)'})
|
[
"[email protected]"
] | |
08842649a48eb36c8cf0554d9be65a5eb137f4a6
|
006ff11fd8cfd5406c6f4318f1bafa1542095f2a
|
/CondTools/L1Trigger/test/L1ConfigWriteRSOnline_cfg.py
|
64fa0d8ce4623c21378f8483b5e825899465cb4e
|
[] |
permissive
|
amkalsi/cmssw
|
8ac5f481c7d7263741b5015381473811c59ac3b1
|
ad0f69098dfbe449ca0570fbcf6fcebd6acc1154
|
refs/heads/CMSSW_7_4_X
| 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 |
Apache-2.0
| 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null |
UTF-8
|
Python
| false | false | 8,202 |
py
|
# This script doesn't work yet. PoolDBESSource does not see the IOV updates made earlier in the
# same event.
import FWCore.ParameterSet.Config as cms
process = cms.Process("L1ConfigWriteRSOnline")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cout.placeholder = cms.untracked.bool(False)
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring('*')
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register('runNumber',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Run number")
options.register('outputDBConnect',
'sqlite_file:l1config.db', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Connection string for output DB")
options.register('outputDBAuth',
'.', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Authentication path for outputDB")
options.register('keysFromDB',
1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"1 = read keys from OMDS, 0 = read keys from command line")
options.register('overwriteKeys',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Overwrite existing keys")
options.register('logTransactions',
1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Record transactions in log DB")
# arguments for setting object keys by hand
options.register('L1MuDTTFMasksRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1MuGMTChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1RCTChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GctChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtPrescaleFactorsAlgoTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtPrescaleFactorsTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskAlgoTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskVetoTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.parseArguments()
# Define CondDB tags
from CondTools.L1Trigger.L1CondEnum_cfi import L1CondEnum
from CondTools.L1Trigger.L1O2OTags_cfi import initL1O2OTags
initL1O2OTags()
if options.keysFromDB == 1:
process.load("CondTools.L1Trigger.L1ConfigRSKeys_cff")
else:
process.load("CondTools.L1Trigger.L1TriggerKeyDummy_cff")
from CondTools.L1Trigger.L1RSSubsystemParams_cfi import initL1RSSubsystems
initL1RSSubsystems( tagBaseVec = initL1O2OTags.tagBaseVec,
L1MuDTTFMasksRcdKey = options.L1MuDTTFMasksRcdKey,
L1MuGMTChannelMaskRcdKey = options.L1MuGMTChannelMaskRcdKey,
L1RCTChannelMaskRcdKey = options.L1RCTChannelMaskRcdKey,
L1GctChannelMaskRcdKey = options.L1GctChannelMaskRcdKey,
L1GtPrescaleFactorsAlgoTrigRcdKey = options.L1GtPrescaleFactorsAlgoTrigRcdKey,
L1GtPrescaleFactorsTechTrigRcdKey = options.L1GtPrescaleFactorsTechTrigRcdKey,
L1GtTriggerMaskAlgoTrigRcdKey = options.L1GtTriggerMaskAlgoTrigRcdKey,
L1GtTriggerMaskTechTrigRcdKey = options.L1GtTriggerMaskTechTrigRcdKey,
L1GtTriggerMaskVetoTechTrigRcdKey = options.L1GtTriggerMaskVetoTechTrigRcdKey,
includeL1RCTNoisyChannelMask = False )
process.L1TriggerKeyDummy.objectKeys = initL1RSSubsystems.params.recordInfo
# Get L1TriggerKeyList from DB
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.outputDB = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
toGet = cms.VPSet(cms.PSet(
record = cms.string('L1TriggerKeyListRcd'),
tag = cms.string('L1TriggerKeyList_' + initL1O2OTags.tagBaseVec[ L1CondEnum.L1TriggerKeyList ] )
)),
RefreshEachRun=cms.untracked.bool(True)
)
process.outputDB.connect = options.outputDBConnect
process.outputDB.DBParameters.authenticationPath = options.outputDBAuth
# Generate configuration data
process.load("CondTools.L1Trigger.L1ConfigRSPayloads_cff")
# writer modules
from CondTools.L1Trigger.L1CondDBPayloadWriter_cff import initPayloadWriter
initPayloadWriter( process,
outputDBConnect = options.outputDBConnect,
outputDBAuth = options.outputDBAuth,
tagBaseVec = initL1O2OTags.tagBaseVec )
process.L1CondDBPayloadWriter.writeL1TriggerKey = cms.bool(False)
if options.logTransactions == 1:
# initPayloadWriter.outputDB.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
initPayloadWriter.outputDB.logconnect = cms.untracked.string('sqlite_file:l1o2o-log.db')
process.L1CondDBPayloadWriter.logTransactions = True
if options.overwriteKeys == 0:
process.L1CondDBPayloadWriter.overwriteKeys = False
else:
process.L1CondDBPayloadWriter.overwriteKeys = True
from CondTools.L1Trigger.L1CondDBIOVWriter_cff import initIOVWriter
initIOVWriter( process,
outputDBConnect = options.outputDBConnect,
outputDBAuth = options.outputDBAuth,
tagBaseVec = initL1O2OTags.tagBaseVec,
tscKey = '' )
process.L1CondDBIOVWriter.logKeys = True
if options.logTransactions == 1:
# initIOVWriter.outputDB.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
initIOVWriter.outputDB.logconnect = cms.untracked.string('sqlite_file:l1o2o-log.db')
process.L1CondDBIOVWriter.logTransactions = True
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(options.runNumber),
lastValue = cms.uint64(options.runNumber),
interval = cms.uint64(1)
)
# CORAL debugging
#process.outputDB.DBParameters.messageLevel = cms.untracked.int32(3)
process.p = cms.Path(process.L1CondDBPayloadWriter*process.L1CondDBIOVWriter)
|
[
"[email protected]"
] | |
55303a17c04c8a0efbd951d112b3225f0d9cb8b7
|
48983b88ebd7a81bfeba7abd6f45d6462adc0385
|
/MOG/50.py
|
4f0d7d569452389c938806754ec6d5d1f0269de2
|
[] |
no_license
|
lozdan/oj
|
c6366f450bb6fed5afbaa5573c7091adffb4fa4f
|
79007879c5a3976da1e4713947312508adef2e89
|
refs/heads/master
| 2018-09-24T01:29:49.447076 | 2018-06-19T14:33:37 | 2018-06-19T14:33:37 | 109,335,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 420 |
py
|
# author: Daniel Lozano
# source: MatcomOnlineGrader (MOG) ( http://matcomgrader.com )
# problem name: El Numero Decodificado
# problem url: http://matcomgrader.com/problem/50/el-numero-decodificado/
n = int(input())
count = 1
def digits_sum(num):
add = 0
while num != 0:
add += num % 10
num = num // 10
return add
while count != digits_sum(n - count):
count += 1
print(n - count)
|
[
"[email protected]"
] | |
e8ee91c193ae03c4cb6b080a67303d266cfd31f9
|
29fba8902c6af23622c675992cab8e3d85cf94de
|
/AIver3.py
|
91e3c28701f04826af0a3ba9b235ea1257cb77f1
|
[] |
no_license
|
gulmohar96/Backup_holly
|
04b078885c97d439b2723f947db071f3b46935d5
|
66fd8bffce4a08061393e3966f09dd9f44834bed
|
refs/heads/master
| 2021-01-10T11:03:00.198410 | 2016-01-11T03:28:19 | 2016-01-11T03:28:19 | 47,603,289 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,142 |
py
|
## _____________OOP Representation____________ ##
class Resources(object):
def __init__(self,food = 100,sleep = 100,water = 100):
self.sleep = sleep
self.water = water
self.food = food
def copy(self):
return Resources(self.food, self.sleep, self.water)
## _____IGNORE THIS AS OF NOW______##
class Locations(object):
## Problems with location recording:
# dont know how sensors are set up
# Especially where a single room satisfies multiple needs such as the kitchen supplies food and water
# Also how to have a global empty data structure like location = [] be a global empty variable to store
# different locations through different methods under a class
# How to link to things that are kept in different rooms
# 0 - bedroom , 1- bathroom, 2- Living Room , 3- Kitchen
location = []
def __init__(self,bedroom,bathroom,livingRoom,kitchen,location):
self.bedroom = bedroom
self.bathroom = bathroom
self.livingRoom = livingRoom
self.kitchen = kitchen
self.location = location
def bedroom(self):
if sleep #sensor activated
location.append(0)
def bathroom(self):
if water #sensor activated
location.append(1)
def livingRoom(self):
if sleep #later recreational stuff/entertainment etc activated
location.append(2)
def kitchen(self):
if water #sensor activated
location.append(3)
elif food #sensor activated
location.append(3)
class Provider(object):
# The provider provides different offers from all the possible resources in the house
def __init__(self,name,time_consumed,resources):
self.name = name
self.time_consumed = time_consumed
self.resources = resources # Gives the status of teh resources at after the usage of a particular object
# that is making a offer
def make_offer(self):
return(self.name, self.time_consumed, self.resources) # SEE WHAAATTTT
class Offer(object):
# Intermediate...Just represents the offers from the provider for easy code usage
def __init__(self, name, time_consumed, resources):
self.name = name
self.time_consumed = time_consumed
self.resources = resources
return(self.name, self.time_consumed, self.resources)
## _____Objects in the house______##
# Put this in class provider factory later
#FOOD
microwave = Provider('Microwave', 10, Resources(5, -0.009 , -0.0.4) ) #confirm resource depletion design
# 10 minutes consumed, 5 food point provided causing 0.1 and 0.12 depletion in water and food over a simulated time
oven = Provider('Oven', 30, Resources(10, -0.003 , -0.012) )
toaster = Provider('toaster', 8, Resources(7, -0.001 , -0.08) )
#WATER
kitchen_tap = Provider('Kitchen Tap', 2, Resources(6, -0.005,-0.02))
bathroom_tap = Provider('Kitchen Tap', 3, Resources(4, -0.003,-0.04))
#SLEEP
bed = Provider('Bed', 2, Resources(6, -0.005,-0.02))
couch = Provider('Kitchen Tap', 2, Resources(6, -0.005,-0.02))
class Actor(object):
def __init__(self):
self.resources = Resources(100, 100, 100) # internal level
self.interacting_provider = None
@staticmethod
def update(resources, delta):
resources.food -= 1 * delta
resources.water -= 1 * delta
resources.sleep -= 1 * delta
def update(self, delta):
if self.interacting_provider != None:
self.interacting_provider.time_left -= delta
if self.interacting_provider.time_left == 0:
self.interacting_provider = None
def utility(self, offer):
newResources = self.resources.copy()
update(newResources, offer.time_consumed)
offeredFood = newResources.food + offer.resources.food
offeredWater = newResources.water + offer.resources.water
offeredSleep = newResources.sleep + offer.resources.sleep
return offeredFood * offeredWater * offeredSleep
sort(providers, lambda provider: actor.utility(provider.make_offer()))
|
[
"[email protected]"
] | |
7e45cbd57f56b0f0355d5458456b103b18c6e5ce
|
aefba5494d3eb3a04ee7c3d11058023447710529
|
/api/models.py
|
5de18ecac8682d7653abe1898ab2a52416ad5d63
|
[] |
no_license
|
752217726/jwt
|
511691bc9e6381b558bf3859b91186e70e12d161
|
4b80f60a2e6b811a3661c0d0749e1cc1182af390
|
refs/heads/master
| 2022-11-20T11:27:59.198913 | 2020-07-07T17:47:58 | 2020-07-07T17:47:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
phone=models.CharField(max_length=11,unique=True)
class Meta:
db_table="api_user"
verbose_name="用户"
verbose_name_plural=verbose_name
def __str__(self):
return self.username
class Computer(models.Model):
name=models.CharField(max_length=80,unique=True)
price=models.DecimalField(max_digits=8,decimal_places=2)
brand=models.CharField(max_length=16,verbose_name="品牌")
class Meta:
db_table="computer"
verbose_name="电脑"
verbose_name_plural=verbose_name
def __str__(self):
return self.name
|
[
"[email protected]"
] | |
8edeeff5764b44cd45bbf5cb023c0d6da482a2ff
|
3f8cce815acefa97deb3d971415c6f869e106bee
|
/firmware/python/handlers.py
|
bc6bf6b6742fc75e5aeb8eb054c699a8b42aaeb3
|
[] |
no_license
|
enricocirignaco/berry_case
|
ced80e2140d8b75b87c3952476d82f50152b0eef
|
8f60dcbe62bf01d76fa8928c66271d89aa8d7e08
|
refs/heads/master
| 2023-06-11T09:50:46.820588 | 2021-06-29T12:14:12 | 2021-06-29T12:14:12 | 271,569,305 | 1 | 3 | null | 2021-03-09T09:24:21 | 2020-06-11T14:34:48 |
Python
|
UTF-8
|
Python
| false | false | 8,620 |
py
|
# Project: berry_case
# File: handlers
# Autor: Enrico Cirignaco
# Created: 29.11.2020
# Import modules
#############################################################################
import time
import oled_display
import parameters
import globals
import subprocess
import fan
is_yes_state = False
is_fan_mode_auto = False
#############################################################################
# GPIO Callbacks
# Callback right button
def btn_right_callback(arg):
globals.display_counter = 0
global is_yes_state
global is_fan_mode_auto
# Check if Display was off, then turn it on and to nothing else
if(globals.is_display_on == False):
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
return
# submenu depth 0
# if in main menu go inside submenu
if globals.menu_depth == 0:
globals.menu_depth+= 1
update_submenu()
elif globals.menu_depth == 1 and globals.main_menu_entry > 1:
is_yes_state = False
is_fan_mode_auto = False
update_submenu()
elif globals.menu_depth == 2 and globals.main_menu_entry == 4:
if(globals.fan_speed_man < 10):
globals.fan_speed_man+= 1
fan.run_fan_man(globals.fan_speed_man)
# Callback left button
def btn_left_callback(arg):
global is_yes_state
global is_fan_mode_auto
globals.display_counter = 0
#if depth=0 do nothing
# Check if Display was off, then turn it on and to nothing else
if(globals.is_display_on == False):
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
return
if globals.menu_depth == 1:
#if depth=0 and Main entry is network or system, go back.
if globals.main_menu_entry == 0 or globals.main_menu_entry == 1:
globals.menu_depth-= 1
globals.network_menu_entry = 0
globals.system_info_menu_entry = 0
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
else:
is_yes_state = True
is_fan_mode_auto = True
update_submenu()
elif globals.menu_depth == 2 and globals.main_menu_entry == 4:
if(globals.fan_speed_man > 0):
globals.fan_speed_man-= 1
fan.run_fan_man(globals.fan_speed_man)
# Callback down button
def btn_down_callback(arg):
globals.display_counter = 0
# Check if Display was off, then turn it on and to nothing else
if(globals.is_display_on == False):
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
return
# if in main manu scroll to next entry
if globals.menu_depth == 0:
if globals.main_menu_entry < parameters.MAIN_MENU_ENTRY_CNT-1:
globals.main_menu_entry+= 1
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
elif globals.menu_depth ==1:
#network submenu
if globals.main_menu_entry == 0:
if globals.network_menu_entry < parameters.NETWORK_MENU_ENTRY_CNT-1:
globals.network_menu_entry+= 1
oled_display.draw_entry(parameters.DEPTH_1_NETWORK_LABELS[globals.network_menu_entry], parameters.NETWORK_ENTRY_FONT_SIZE)
# system info submenu
elif globals.main_menu_entry == 1:
if globals.system_info_menu_entry < parameters.SYSTEM_INFO_MENU_ENTRY_CNT-1:
globals.system_info_menu_entry+= 1
oled_display.draw_entry(parameters.DEPTH_1_SYSTEM_INFO_LABELS[globals.system_info_menu_entry], parameters.NETWORK_ENTRY_FONT_SIZE)
# Callback up button
def btn_up_callback(arg):
globals.display_counter = 0
# Check if Display was off, then turn it on and to nothing else
if(globals.is_display_on == False):
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
return
# if in main menu scroll to previous menu
if globals.menu_depth == 0:
if globals.main_menu_entry > 0:
globals.main_menu_entry-= 1
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
elif globals.menu_depth == 1:
#network submenu
if globals.main_menu_entry == 0:
if globals.network_menu_entry > 0:
globals.network_menu_entry-= 1
oled_display.draw_entry(parameters.DEPTH_1_NETWORK_LABELS[globals.network_menu_entry], parameters.NETWORK_ENTRY_FONT_SIZE)
# system info submenu
elif globals.main_menu_entry == 1:
if globals.system_info_menu_entry > 0:
globals.system_info_menu_entry-= 1
oled_display.draw_entry(parameters.DEPTH_1_SYSTEM_INFO_LABELS[globals.system_info_menu_entry], parameters.NETWORK_ENTRY_FONT_SIZE)
# Callback center button
def btn_center_callback(arg):
globals.display_counter = 0
global is_yes_state
global is_fan_mode_auto
# Check if Display was off, then turn it on and to nothing else
if(globals.is_display_on == False):
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
return
# submenu depth 0
# if in main menu go inside submenu
if globals.menu_depth == 0:
globals.menu_depth+= 1
update_submenu()
# submenu depth 1
elif globals.menu_depth == 1:
if globals.main_menu_entry == 0 or globals.main_menu_entry == 1:
globals.menu_depth-= 1
globals.network_menu_entry = 0
globals.system_info_menu_entry = 0
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
else:
if(is_yes_state == True and globals.main_menu_entry == 2):
#reboot
oled_display.draw_entry("Shutting Down...", parameters.SHUTDOWN_FONT_SIZE)
time.sleep(2)
oled_display.draw_turn_off()
subprocess.run("reboot", shell=True)
exit()
elif(is_yes_state == True and globals.main_menu_entry == 3):
#shutdown
oled_display.draw_entry("Shutting Down...", parameters.SHUTDOWN_FONT_SIZE)
time.sleep(2)
oled_display.draw_turn_off()
subprocess.run("systemctl poweroff -i", shell=True)
exit()
elif(globals.main_menu_entry == 4):
if(is_fan_mode_auto == True):
#set auto and escape submenu
globals.is_fan_auto_enabled = True
globals.menu_depth-= 1
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
else:
#enter fan speed menu
globals.is_fan_auto_enabled = False
fan.run_fan_man(globals.fan_speed_man)
globals.menu_depth+= 1
else:
globals.menu_depth-= 1
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
# submenu depth 2
elif(globals.menu_depth == 2):
if(globals.main_menu_entry == 4):
globals.menu_depth = 0
oled_display.draw_entry(parameters.DEPTH_0_LABELS[globals.main_menu_entry], parameters.MAIN_ENTRY_FONT_SIZE)
# other functions
#############################################################################
def update_submenu():
global is_yes_state
global is_fan_mode_auto
if globals.main_menu_entry == 0:
#net
oled_display.draw_entry(parameters.DEPTH_1_NETWORK_LABELS[globals.network_menu_entry], parameters.NETWORK_ENTRY_FONT_SIZE)
elif globals.main_menu_entry == 1:
#system info
oled_display.draw_entry(parameters.DEPTH_1_SYSTEM_INFO_LABELS[globals.system_info_menu_entry], parameters.SYSTEM_INFO_ENTRY_FONT_SIZE)
# reboot submenu
elif globals.main_menu_entry == 2 :
oled_display.draw_selection("yes", "no", is_yes_state)
elif globals.main_menu_entry == 3:
oled_display.draw_selection("yes", "no", is_yes_state)
elif globals.main_menu_entry == 4:
oled_display.draw_selection("auto", "man", is_fan_mode_auto)
|
[
"[email protected]"
] | |
517bd5acebde99e8880d3c59fab298bdb6e1de60
|
6032d8dd4ce8e80c68f5a543fdd367c2f05b0681
|
/SConstruct
|
188cd329642b28f85b35351a7a6a754d3fc96322
|
[
"BSD-2-Clause"
] |
permissive
|
drakedevel/nethatch
|
dc8b6f478884457345c7ee6d90954a4ee30c3378
|
0187871129d223f9905ac50367a0e30cac285ef7
|
refs/heads/master
| 2021-06-08T08:55:40.509717 | 2011-05-09T23:51:03 | 2011-05-09T23:51:03 | 2,227,979 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
# -*- Mode: Python -*-
Program('nethatch',
['Main.cc'],
CC='clang',
CXX='clang++',
LIBS=['pcap', 'net'],
CXXFLAGS=['-g'])
|
[
"[email protected]"
] | ||
22b9583d4e86075bcd2f54a1ae3c118d1a1510ef
|
0bb49acb7bb13a09adafc2e43e339f4c956e17a6
|
/OpenAssembler/Gui/OAS_Window/oas_main04.py
|
92052edb6cc3e03aebcd64f5fd56a89706efd491
|
[] |
no_license
|
all-in-one-of/openassembler-7
|
94f6cdc866bceb844246de7920b7cbff9fcc69bf
|
69704d1c4aa4b1b99f484c8c7884cf73d412fafe
|
refs/heads/master
| 2021-01-04T18:08:10.264830 | 2010-07-02T10:50:16 | 2010-07-02T10:50:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,262 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'oas_main04.ui'
#
# Created: Wed Jul 15 10:23:49 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_oasWindow(object):
def setupUi(self, oasWindow):
oasWindow.setObjectName("oasWindow")
oasWindow.resize(885, 533)
self.oas_centralwidget = QtGui.QWidget(oasWindow)
self.oas_centralwidget.setObjectName("oas_centralwidget")
self.gridLayout_6 = QtGui.QGridLayout(self.oas_centralwidget)
self.gridLayout_6.setMargin(2)
self.gridLayout_6.setSpacing(2)
self.gridLayout_6.setObjectName("gridLayout_6")
self.oas_splitter = QtGui.QSplitter(self.oas_centralwidget)
self.oas_splitter.setOrientation(QtCore.Qt.Horizontal)
self.oas_splitter.setObjectName("oas_splitter")
self.oas_splitter02 = QtGui.QSplitter(self.oas_splitter)
self.oas_splitter02.setOrientation(QtCore.Qt.Vertical)
self.oas_splitter02.setObjectName("oas_splitter02")
self.oas_menuline_frame = QtGui.QFrame(self.oas_splitter02)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_menuline_frame.sizePolicy().hasHeightForWidth())
self.oas_menuline_frame.setSizePolicy(sizePolicy)
self.oas_menuline_frame.setMinimumSize(QtCore.QSize(0, 30))
self.oas_menuline_frame.setMaximumSize(QtCore.QSize(16777215, 30))
self.oas_menuline_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.oas_menuline_frame.setFrameShadow(QtGui.QFrame.Raised)
self.oas_menuline_frame.setObjectName("oas_menuline_frame")
self.gridLayout_3 = QtGui.QGridLayout(self.oas_menuline_frame)
self.gridLayout_3.setMargin(2)
self.gridLayout_3.setSpacing(2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.oas_horizontalLayout_3 = QtGui.QHBoxLayout()
self.oas_horizontalLayout_3.setSpacing(2)
self.oas_horizontalLayout_3.setObjectName("oas_horizontalLayout_3")
self.oas_new_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_new_bu.setObjectName("oas_new_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_new_bu)
spacerItem = QtGui.QSpacerItem(10, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_3.addItem(spacerItem)
self.oas_open_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_open_bu.setObjectName("oas_open_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_open_bu)
self.oas_save_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_save_bu.setObjectName("oas_save_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_save_bu)
self.oas_saveas_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_saveas_bu.setObjectName("oas_saveas_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_saveas_bu)
spacerItem1 = QtGui.QSpacerItem(15, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_3.addItem(spacerItem1)
self.oas_run_bu = QtGui.QToolButton(self.oas_menuline_frame)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(86, 255, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 255, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 255, 56))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(86, 255, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 255, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 255, 56))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 28, 28))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 255, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 28, 28))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.oas_run_bu.setPalette(palette)
self.oas_run_bu.setObjectName("oas_run_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_run_bu)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_3.addItem(spacerItem2)
self.oas_search_entry = QtGui.QLineEdit(self.oas_menuline_frame)
self.oas_search_entry.setObjectName("oas_search_entry")
self.oas_horizontalLayout_3.addWidget(self.oas_search_entry)
self.oas_search_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_search_bu.setObjectName("oas_search_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_search_bu)
self.gridLayout_3.addLayout(self.oas_horizontalLayout_3, 0, 0, 1, 1)
self.oas_graphicsView = QtGui.QGraphicsView(self.oas_splitter02)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(225, 225, 225))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(187, 187, 187))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(202, 202, 202))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(225, 225, 225))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(187, 187, 187))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(202, 202, 202))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(225, 225, 225))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(187, 187, 187))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.oas_graphicsView.setPalette(palette)
self.oas_graphicsView.setFocusPolicy(QtCore.Qt.StrongFocus)
self.oas_graphicsView.setObjectName("oas_graphicsView")
self.oas_timeline_frame = QtGui.QFrame(self.oas_splitter02)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_timeline_frame.sizePolicy().hasHeightForWidth())
self.oas_timeline_frame.setSizePolicy(sizePolicy)
self.oas_timeline_frame.setMinimumSize(QtCore.QSize(0, 70))
self.oas_timeline_frame.setMaximumSize(QtCore.QSize(16777215, 70))
self.oas_timeline_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.oas_timeline_frame.setFrameShadow(QtGui.QFrame.Raised)
self.oas_timeline_frame.setObjectName("oas_timeline_frame")
self.gridLayout = QtGui.QGridLayout(self.oas_timeline_frame)
self.gridLayout.setMargin(2)
self.gridLayout.setSpacing(2)
self.gridLayout.setObjectName("gridLayout")
self.oas_verticalLayout = QtGui.QVBoxLayout()
self.oas_verticalLayout.setObjectName("oas_verticalLayout")
self.oas_horizontalLayout = QtGui.QHBoxLayout()
self.oas_horizontalLayout.setObjectName("oas_horizontalLayout")
self.oas_sframe_spin = QtGui.QSpinBox(self.oas_timeline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_sframe_spin.sizePolicy().hasHeightForWidth())
self.oas_sframe_spin.setSizePolicy(sizePolicy)
self.oas_sframe_spin.setMinimum(-100000)
self.oas_sframe_spin.setMaximum(100000)
self.oas_sframe_spin.setObjectName("oas_sframe_spin")
self.oas_horizontalLayout.addWidget(self.oas_sframe_spin)
self.oas_time_slider = QtGui.QSlider(self.oas_timeline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_time_slider.sizePolicy().hasHeightForWidth())
self.oas_time_slider.setSizePolicy(sizePolicy)
self.oas_time_slider.setSliderPosition(0)
self.oas_time_slider.setOrientation(QtCore.Qt.Horizontal)
self.oas_time_slider.setTickPosition(QtGui.QSlider.TicksBothSides)
self.oas_time_slider.setTickInterval(0)
self.oas_time_slider.setObjectName("oas_time_slider")
self.oas_horizontalLayout.addWidget(self.oas_time_slider)
self.oas_eframe_spin = QtGui.QSpinBox(self.oas_timeline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_eframe_spin.sizePolicy().hasHeightForWidth())
self.oas_eframe_spin.setSizePolicy(sizePolicy)
self.oas_eframe_spin.setMinimum(-100000)
self.oas_eframe_spin.setMaximum(100000)
self.oas_eframe_spin.setObjectName("oas_eframe_spin")
self.oas_horizontalLayout.addWidget(self.oas_eframe_spin)
self.oas_verticalLayout.addLayout(self.oas_horizontalLayout)
self.oas_horizontalLayout_2 = QtGui.QHBoxLayout()
self.oas_horizontalLayout_2.setObjectName("oas_horizontalLayout_2")
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_2.addItem(spacerItem3)
self.oas_firstF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_firstF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_firstF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_firstF.setObjectName("oas_firstF")
self.oas_horizontalLayout_2.addWidget(self.oas_firstF)
self.oas_prewF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_prewF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_prewF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_prewF.setObjectName("oas_prewF")
self.oas_horizontalLayout_2.addWidget(self.oas_prewF)
self.oas_cframe_spin = QtGui.QSpinBox(self.oas_timeline_frame)
self.oas_cframe_spin.setMinimumSize(QtCore.QSize(0, 20))
self.oas_cframe_spin.setMaximumSize(QtCore.QSize(16777215, 20))
self.oas_cframe_spin.setMinimum(-100000)
self.oas_cframe_spin.setMaximum(100000)
self.oas_cframe_spin.setObjectName("oas_cframe_spin")
self.oas_horizontalLayout_2.addWidget(self.oas_cframe_spin)
self.oas_nextF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_nextF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_nextF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_nextF.setObjectName("oas_nextF")
self.oas_horizontalLayout_2.addWidget(self.oas_nextF)
self.oas_lastF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_lastF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_lastF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_lastF.setObjectName("oas_lastF")
self.oas_horizontalLayout_2.addWidget(self.oas_lastF)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_2.addItem(spacerItem4)
self.oas_verticalLayout.addLayout(self.oas_horizontalLayout_2)
self.gridLayout.addLayout(self.oas_verticalLayout, 0, 0, 1, 1)
self.oas_splitter03 = QtGui.QSplitter(self.oas_splitter)
self.oas_splitter03.setOrientation(QtCore.Qt.Vertical)
self.oas_splitter03.setObjectName("oas_splitter03")
self.oas_attribute_frame = QtGui.QFrame(self.oas_splitter03)
self.oas_attribute_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.oas_attribute_frame.setFrameShadow(QtGui.QFrame.Raised)
self.oas_attribute_frame.setObjectName("oas_attribute_frame")
self.gridLayout_2 = QtGui.QGridLayout(self.oas_attribute_frame)
self.gridLayout_2.setMargin(2)
self.gridLayout_2.setSpacing(2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.oas_verticalLayout_2 = QtGui.QVBoxLayout()
self.oas_verticalLayout_2.setObjectName("oas_verticalLayout_2")
self.oas_nodeName = QtGui.QLineEdit(self.oas_attribute_frame)
self.oas_nodeName.setObjectName("oas_nodeName")
self.oas_verticalLayout_2.addWidget(self.oas_nodeName)
self.oas_horizontalLayout_4 = QtGui.QHBoxLayout()
self.oas_horizontalLayout_4.setObjectName("oas_horizontalLayout_4")
self.oas_label_2 = QtGui.QLabel(self.oas_attribute_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_label_2.sizePolicy().hasHeightForWidth())
self.oas_label_2.setSizePolicy(sizePolicy)
self.oas_label_2.setObjectName("oas_label_2")
self.oas_horizontalLayout_4.addWidget(self.oas_label_2)
self.oas_attribute_nodetype = QtGui.QLabel(self.oas_attribute_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_attribute_nodetype.sizePolicy().hasHeightForWidth())
self.oas_attribute_nodetype.setSizePolicy(sizePolicy)
self.oas_attribute_nodetype.setObjectName("oas_attribute_nodetype")
self.oas_horizontalLayout_4.addWidget(self.oas_attribute_nodetype)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_4.addItem(spacerItem5)
self.oas_attribute_cache = QtGui.QCheckBox(self.oas_attribute_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_attribute_cache.sizePolicy().hasHeightForWidth())
self.oas_attribute_cache.setSizePolicy(sizePolicy)
self.oas_attribute_cache.setObjectName("oas_attribute_cache")
self.oas_horizontalLayout_4.addWidget(self.oas_attribute_cache)
self.oas_verticalLayout_2.addLayout(self.oas_horizontalLayout_4)
self.oas_attribute_area = QtGui.QScrollArea(self.oas_attribute_frame)
self.oas_attribute_area.setWidgetResizable(True)
self.oas_attribute_area.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.oas_attribute_area.setObjectName("oas_attribute_area")
self.oas_attribute_areaContents = QtGui.QWidget(self.oas_attribute_area)
self.oas_attribute_areaContents.setGeometry(QtCore.QRect(0, 0, 267, 127))
self.oas_attribute_areaContents.setLayoutDirection(QtCore.Qt.LeftToRight)
self.oas_attribute_areaContents.setObjectName("oas_attribute_areaContents")
self.gridLayout_5 = QtGui.QGridLayout(self.oas_attribute_areaContents)
self.gridLayout_5.setMargin(2)
self.gridLayout_5.setSpacing(2)
self.gridLayout_5.setObjectName("gridLayout_5")
self.oas_attribute_layout = QtGui.QVBoxLayout()
self.oas_attribute_layout.setObjectName("oas_attribute_layout")
self.place_to_widgets = QtGui.QVBoxLayout()
self.place_to_widgets.setObjectName("place_to_widgets")
self.oas_attribute_layout.addLayout(self.place_to_widgets)
spacerItem6 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.oas_attribute_layout.addItem(spacerItem6)
self.gridLayout_5.addLayout(self.oas_attribute_layout, 0, 0, 1, 1)
self.oas_attribute_area.setWidget(self.oas_attribute_areaContents)
self.oas_verticalLayout_2.addWidget(self.oas_attribute_area)
self.gridLayout_2.addLayout(self.oas_verticalLayout_2, 0, 0, 1, 1)
self.frame = QtGui.QFrame(self.oas_splitter03)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName("frame")
self.gridLayout_4 = QtGui.QGridLayout(self.frame)
self.gridLayout_4.setMargin(2)
self.gridLayout_4.setSpacing(2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.consoleOutArea = QtGui.QTextEdit(self.frame)
self.consoleOutArea.setReadOnly(True)
self.consoleOutArea.setObjectName("consoleOutArea")
self.verticalLayout.addWidget(self.consoleOutArea)
self.consoleInArea = QtGui.QLineEdit(self.frame)
self.consoleInArea.setObjectName("consoleInArea")
self.verticalLayout.addWidget(self.consoleInArea)
self.gridLayout_4.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.gridLayout_6.addWidget(self.oas_splitter, 0, 0, 1, 1)
oasWindow.setCentralWidget(self.oas_centralwidget)
self.oas_menubar = QtGui.QMenuBar(oasWindow)
self.oas_menubar.setEnabled(False)
self.oas_menubar.setGeometry(QtCore.QRect(0, 0, 885, 25))
self.oas_menubar.setObjectName("oas_menubar")
oasWindow.setMenuBar(self.oas_menubar)
self.retranslateUi(oasWindow)
QtCore.QObject.connect(self.oas_nextF, QtCore.SIGNAL("clicked()"), self.oas_cframe_spin.stepUp)
QtCore.QObject.connect(self.oas_prewF, QtCore.SIGNAL("clicked()"), self.oas_cframe_spin.stepDown)
QtCore.QObject.connect(self.oas_time_slider, QtCore.SIGNAL("sliderMoved(int)"), self.oas_cframe_spin.setValue)
QtCore.QObject.connect(self.oas_cframe_spin, QtCore.SIGNAL("valueChanged(int)"), self.oas_time_slider.setValue)
QtCore.QMetaObject.connectSlotsByName(oasWindow)
def retranslateUi(self, oasWindow):
oasWindow.setWindowTitle(QtGui.QApplication.translate("oasWindow", "OpenAssembler", None, QtGui.QApplication.UnicodeUTF8))
self.oas_new_bu.setText(QtGui.QApplication.translate("oasWindow", "New", None, QtGui.QApplication.UnicodeUTF8))
self.oas_open_bu.setText(QtGui.QApplication.translate("oasWindow", "Open", None, QtGui.QApplication.UnicodeUTF8))
self.oas_save_bu.setText(QtGui.QApplication.translate("oasWindow", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.oas_saveas_bu.setText(QtGui.QApplication.translate("oasWindow", "SaveAs", None, QtGui.QApplication.UnicodeUTF8))
self.oas_run_bu.setText(QtGui.QApplication.translate("oasWindow", " RUN ", None, QtGui.QApplication.UnicodeUTF8))
self.oas_search_bu.setText(QtGui.QApplication.translate("oasWindow", "Search", None, QtGui.QApplication.UnicodeUTF8))
self.oas_firstF.setText(QtGui.QApplication.translate("oasWindow", "<<", None, QtGui.QApplication.UnicodeUTF8))
self.oas_prewF.setText(QtGui.QApplication.translate("oasWindow", "<", None, QtGui.QApplication.UnicodeUTF8))
self.oas_nextF.setText(QtGui.QApplication.translate("oasWindow", ">", None, QtGui.QApplication.UnicodeUTF8))
self.oas_lastF.setText(QtGui.QApplication.translate("oasWindow", ">>", None, QtGui.QApplication.UnicodeUTF8))
self.oas_label_2.setText(QtGui.QApplication.translate("oasWindow", "Node Type:", None, QtGui.QApplication.UnicodeUTF8))
self.oas_attribute_nodetype.setText(QtGui.QApplication.translate("oasWindow", "empty", None, QtGui.QApplication.UnicodeUTF8))
self.oas_attribute_cache.setText(QtGui.QApplication.translate("oasWindow", "cache", None, QtGui.QApplication.UnicodeUTF8))
|
[
"laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771"
] |
laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771
|
acc5140eed459a8d0031c36fe860025d7d8e74cc
|
a05107c0603a4e5bc4015da023ff8b3f3d3feb43
|
/scratch/zip_func.py
|
0196e0f033348c9e6488f92b90fc70364f97343a
|
[] |
no_license
|
kokomyoheinhtet/justAScratch
|
408377bd521c398aee79b4b55517e1867e1009aa
|
f7aa385131c8083f1f18b6f1f94797a85c98c943
|
refs/heads/master
| 2023-05-05T19:44:56.519003 | 2021-05-28T02:50:37 | 2021-05-28T02:50:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
name = ["Max", "Mike", "Dustin"]
roll_num = [10, 11, 12]
house_num = [22, "2B", "GF"]
z = list(zip(name, roll_num))
print(z)
z = list(zip(name, roll_num, house_num))
print(z)
|
[
"[email protected]"
] | |
0cbdef8a37a86fe2d3b1635440bd6bb69bce25a1
|
c516970a5be83f1267876ae389ac724e91d324d1
|
/tests/sample.py
|
44499a6ce912a84202bba16d6bd25ff8db6b9e3d
|
[
"Apache-2.0"
] |
permissive
|
thaichat04/conciliator-python
|
5938f6bec5da85057099501223930b0f2fb9e559
|
30063c658bd234a10674098711c3651ceca1c5ef
|
refs/heads/main
| 2023-04-29T07:51:02.543089 | 2021-05-18T14:27:28 | 2021-05-18T14:27:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 775 |
py
|
import conciliator as cc
import json
import datetime
config_file = 'password.json'
config = json.load(open(config_file, 'r'))
#cc.connect(config['onetenant']['username'], config['onetenant']['password'])
cc.connect(config['selecttenant']['username'], config['selecttenant']['password'],
config['selecttenant']['selectedtenant'])
cs = cc.Connector.list()
for c in cs:
if c.type == 'CSV':
print(c.param('purchaseCode'))
#c.config[name = 'purchasCode'] .value
pass
e = cc.Entity.load(query='DH')
es = cc.Entity.list()
e.set_current()
#cc.current_entity = e
#f = cc.File.load(name='in1HnhltCcKlYJxALVbRmifsM2.pdf')
#i = cc.Invoice.list("1442", filter={'date': datetime.date(2020, 11, 19)})
invoices = cc.Invoice.search("intercom")
print(invoices)
|
[
"[email protected]"
] | |
d7addf1ab910c3b3d0915f35ba5d16cbad4c059c
|
cbcb602f3ce73794b926323381dfc9ca65c6f8d4
|
/python_practise/strings.py
|
410cc9372f127bd079937dfe0be4e4dd6a0f2d76
|
[] |
no_license
|
medesiv/ds_algo
|
820ab7eaab64e5f34b211daae58dce961ca0ef11
|
70cb1ee0cdc1ddec93861aef56610f7def1472e1
|
refs/heads/master
| 2023-07-16T21:41:04.682282 | 2021-08-31T17:12:32 | 2021-08-31T17:12:32 | 372,052,748 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
#Strings: ordered, immutable, text representation
my_string = 'I\'m a programmer\n'
who_rules = """"who
""""rules"
print(my_string+who_rules)
|
[
"[email protected]"
] | |
6d7a2f0794f08d46abf1e59b99592168d8106756
|
824fc4e1cd5369d8049f34d6913d605ef5440db3
|
/chap3_11_practice.py
|
a5a6da87503a4f3f7f35829373ce866885d3f55f
|
[] |
no_license
|
rainbowchang/PythonProject
|
dcd57e77ec80c1afe423af5bf7df1f143dd7cd0a
|
4362786fc265926b1f8259a5e2bbf016b26fb518
|
refs/heads/master
| 2022-12-05T04:14:38.119357 | 2020-08-26T11:56:39 | 2020-08-26T11:56:39 | 284,892,367 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,441 |
py
|
import requests
import smtplib
import schedule
import time
from bs4 import BeautifulSoup
from email.mime.text import MIMEText
from email.header import Header
account = input('请输入你的邮箱:')
password = input('请输入你的密码:')
receiver = input('请输入收件人的邮箱:')
index = 0
# index的目的是让程序只运行两次就结束
def movie_spider():
res_movies = requests.get('https://movie.douban.com/chart')
bs_movies = BeautifulSoup(res_movies.text, 'html.parser')
list_movies = bs_movies.find_all('div', class_='pl2')
list_all = []
for movie in list_movies:
tag_a = movie.find('a')
name = tag_a.text.replace(' ', '').replace('\n', '')
# 电影名,使用replace方法去掉多余的空格及换行符
url = tag_a['href']
# 电影详情页的链接
tag_p = movie.find('p', class_='pl')
# 提取父级标签中的<p>标签
information = tag_p.text.replace(' ', '').replace('\n', '')
# 电影基本信息,使用replace方法去掉多余的空格及换行符
tag_div = movie.find('div', class_='star clearfix')
# 提取父级标签中的<div>标签
rating = tag_div.text.replace(' ', '').replace('\n', '')
# 电影评分信息,使用replace方法去掉多余的空格及换行符
list_all.append(name + url + information + rating)
# 将电影名、URL、电影基本信息和电影评分信息,封装为列表,用append方法添加进list_all
return list_all
def send_email(movie_list):
global account, password, receiver
mailhost = 'smtp.qq.com'
qqmail = smtplib.SMTP_SSL()
qqmail.connect(mailhost, 465)
qqmail.login(account, password)
content = '\n'.join(movie_list)
print(content)
message = MIMEText(content, 'plain', 'utf-8')
subject = '本周豆瓣新片榜'
message['Subject'] = Header(subject, 'utf-8')
try:
qqmail.sendmail(account, receiver, message.as_string())
print('邮件发送成功')
except:
print('邮件发送失败')
qqmail.quit()
def job():
global index
print('开始任务')
movie_list = movie_spider()
send_email(movie_list)
print(movie_list)
print('任务完成')
index += 1
schedule.every().second.do(job)
while index != 2:
# 这里会当index == 2的时候程序结束
schedule.run_pending()
time.sleep(1)
|
[
"[email protected]"
] | |
454b50fa8f0f2c29380704440f1aae41aa362ceb
|
45fc9f8a678deb918e7e2c71116812e7e9793047
|
/cms_test/wsgi.py
|
e9d452fb71367d70dd646842b013ccd1cf962103
|
[] |
no_license
|
SanjivVajra/Django-CMS-test
|
ce48852f0cdb7abdb225f26731bdec1e87588a30
|
a7d76242861f2b8cea854c976b868d25dbe91a1c
|
refs/heads/master
| 2021-06-14T14:23:54.624077 | 2018-05-07T09:03:52 | 2018-05-07T09:03:52 | 131,136,988 | 0 | 0 | null | 2021-06-10T20:16:14 | 2018-04-26T10:05:38 |
Python
|
UTF-8
|
Python
| false | false | 394 |
py
|
"""
WSGI config for cms_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms_test.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
9f64b6fde8ce5918644f9e426104b18db422e7c5
|
f881c10e0d654da82218403dbd2adbdc606dc455
|
/apps/user_login/models.py
|
96fff17dd9d564cfa7fed5ca4f762658b6b74462
|
[] |
no_license
|
alialwahish/restfull_users
|
1732dceeddf4367d678ff6cdf2668dbc95463182
|
24d00811b2b46b33e5cf5c311367bd153344dc70
|
refs/heads/master
| 2020-03-17T15:37:13.562082 | 2018-05-16T20:05:21 | 2018-05-16T20:05:21 | 133,717,438 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 419 |
py
|
from __future__ import unicode_literals
from django.db import models
class dojo(models.Model):
name=models.CharField(max_length=255)
city=models.CharField(max_length=255)
state=models.CharField(max_length=2)
class ninjas(models.Model):
first_name=models.CharField(max_length=255)
last_name=models.CharField(max_length=255)
dojo = models.ForeignKey(dojo, on_delete=True, related_name="ninjas")
|
[
"[email protected]"
] | |
44097da54a0bb03ac14196712111a1489a956689
|
172d8623d20d374bee782c3eb08a5e2b5382f412
|
/python/s3_boto3.py
|
22a5c98b6f60bb3b7dd4c85ed335cfc1011560b7
|
[] |
no_license
|
Abhishek24094/dev
|
8124702e3e7da04eb626bd88cbfcc1a0645a8cae
|
17d6bbc1bc371e60a69f646340e2d851a8a94899
|
refs/heads/master
| 2021-06-30T15:56:35.505092 | 2020-09-25T08:10:52 | 2020-09-25T08:10:52 | 162,133,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 374 |
py
|
#proper clarification for requirement is required
import boto3
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
|
[
"[email protected]"
] | |
6e5bfeee02160589220079caf6d6e3e3b76ab585
|
629090051b975b5814b4b48e2cb2c784fa6705e4
|
/pgsmo/objects/sequence/sequence.py
|
58b4198fa17dee038f943fed6dd518f8db8054e6
|
[
"MIT"
] |
permissive
|
microsoft/pgtoolsservice
|
3d3597821c7cae1d216436d4f8143929e2c8a82a
|
24a048226f7f30c775bbcbab462d499a465be5da
|
refs/heads/master
| 2023-08-28T12:55:47.817628 | 2023-08-25T22:47:53 | 2023-08-25T22:47:53 | 80,681,087 | 68 | 35 |
NOASSERTION
| 2023-09-13T21:46:55 | 2017-02-02T01:00:33 |
Python
|
UTF-8
|
Python
| false | false | 6,637 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Optional, List, Dict
from smo.common.node_object import NodeObject, NodeLazyPropertyCollection, NodeCollection
from smo.common.scripting_mixins import ScriptableCreate, ScriptableDelete, ScriptableUpdate
from pgsmo.objects.server import server as s # noqa
import smo.utils.templating as templating
class Sequence(NodeObject, ScriptableCreate, ScriptableDelete, ScriptableUpdate):
TEMPLATE_ROOT = templating.get_template_root(__file__, 'templates')
MACRO_ROOT = templating.get_template_root(__file__, 'macros')
GLOBAL_MACRO_ROOT = templating.get_template_root(__file__, '../global_macros')
@classmethod
def _from_node_query(cls, server: 's.Server', parent: NodeObject, **kwargs) -> 'Sequence':
"""
Creates a Sequence object from the result of a sequence node query
:param server: Server that owns the sequence
:param parent: Parent object of the sequence
:param kwargs: Row from a sequence node query
Kwargs:
oid int: Object ID of the sequence
name str: Name of the sequence
:return: A Sequence instance
"""
seq = cls(server, parent, kwargs['name'])
seq._oid = kwargs['oid']
seq._schema = kwargs['schema']
seq._scid = kwargs['schemaoid']
seq._is_system = kwargs['is_system']
return seq
def __init__(self, server: 's.Server', parent: NodeObject, name: str):
self._server = server
self._parent: Optional['NodeObject'] = parent
self._name: str = name
self._oid: Optional[int] = None
self._is_system: bool = False
self._child_collections: Dict[str, NodeCollection] = {}
self._property_collections: List[NodeLazyPropertyCollection] = []
# Use _column_property_generator instead of _property_generator
self._full_properties: NodeLazyPropertyCollection = self._register_property_collection(self._sequence_property_generator)
ScriptableCreate.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableDelete.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableUpdate.__init__(self, self._template_root(server), self._macro_root(), server.version)
self._schema: str = None
self._scid: int = None
self._def: dict = None
def _sequence_property_generator(self):
template_root = self._template_root(self._server)
# Setup the parameters for the query
template_vars = self.template_vars
# Render and execute the template
sql = templating.render_template(
templating.get_template_path(template_root, 'properties.sql', self._server.version),
self._macro_root(),
**template_vars
)
cols, rows = self._server.connection.execute_dict(sql)
if len(rows) > 0:
return rows[0]
# PROPERTIES ###########################################################
@property
def schema(self):
return self._schema
@property
def scid(self):
return self._scid
# -FULL OBJECT PROPERTIES ##############################################
@property
def cycled(self):
return self._full_properties.get("cycled", "")
@property
def increment(self):
return self._full_properties.get("increment", "")
@property
def start(self):
return self._full_properties.get("start", "")
@property
def current_value(self):
return self._full_properties.get("current_value", "")
@property
def minimum(self):
return self._full_properties.get("minimum", "")
@property
def maximum(self):
return self._full_properties.get("maximum", "")
@property
def cache(self):
return self._full_properties.get("cache", "")
@property
def cascade(self):
return self._full_properties.get("cascade", "")
@property
def seqowner(self):
return self._full_properties.get("seqowner", "")
@property
def comment(self):
return self._full_properties.get("comment", "")
# IMPLEMENTATION DETAILS ###############################################
@classmethod
def _macro_root(cls) -> List[str]:
return [cls.MACRO_ROOT, cls.GLOBAL_MACRO_ROOT]
@classmethod
def _template_root(cls, server: 's.Server') -> str:
return cls.TEMPLATE_ROOT
# HELPER METHODS ##################################################################
def _create_query_data(self):
""" Gives the data object for create query """
return {"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
}}
def _update_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"o_data": {
"schema": self.schema,
"name": self.name,
"seqowner": self.seqowner,
"comment": self.comment
}
}
def _delete_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"cascade": self.cascade
}
|
[
"[email protected]"
] | |
ce9b8310408ce87449d91271d25e7ac0f517de18
|
ec2028b4aa5c75559bb5aced9bc3edfcd4b3ad10
|
/lists/urls.py
|
ebf149f51b5fd1777abc83ac451abf4722d959b5
|
[] |
no_license
|
macvatsal/Superlists
|
04a59b9c62673904df52f8e6828d8b582b58e82f
|
d0d14b9f1d6628bf19b1383027598772e3ace927
|
refs/heads/master
| 2022-12-20T14:53:58.964981 | 2020-10-01T07:01:44 | 2020-10-01T07:01:44 | 300,179,608 | 0 | 0 | null | 2020-10-01T07:00:34 | 2020-10-01T07:00:33 | null |
UTF-8
|
Python
| false | false | 845 |
py
|
"""superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, patterns
urlpatterns = patterns('',
url(r'^(\d+)/$', 'lists.views.view_list', name='view_list'),
url(r'^new$', 'lists.views.new_list', name='new_list'),
)
|
[
"[email protected]"
] | |
5133ca2f594fe1d6dbb95d3a5eabeedc56a82b41
|
114b9483d72bb6027d6f88a00fab43972177f2e9
|
/setup.py
|
942ef5db97bc494cc7d2110177e34d3a0916a319
|
[] |
no_license
|
dmose/Selenium-Proxy
|
f1326b054dbe7e24631f76c419d04d927364e856
|
e959332a08f4d9d56957ff8ae6370aabc4daec8d
|
refs/heads/master
| 2021-01-18T07:48:55.882296 | 2012-08-10T14:53:51 | 2012-08-10T14:53:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,217 |
py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import setup
setup(name='SeleniumProxy',
version='0.1.0',
description='A shim that allows the use of Selenium Bindings with Marionette',
author='David Burns',
author_email='dburns at mozilladotcom',
url='https://github.com/AutomatedTester/Selenium-Proxy',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python'],
py_modules=['selenium_proxy'],
# TODO(David) add Marionette when that is up on Pypi
install_requires=["mozrunner==5.2","mozprofile==0.2", "selenium"]
)
|
[
"[email protected]"
] | |
826d4da8cc659583c4853fb4e2bd9de5bafa1d8d
|
3d12ddf42d8bf8503792506ff52e7f2e45ecbc76
|
/prep/evaluate.py
|
cdfe6218d7ec5552c36ac475e40ec33cf01889f6
|
[] |
no_license
|
Jinhojeong/Thermal_Display
|
5f4849410864dc2ddfa43e7d76ae8b51dd31abf8
|
16f64f373899db381b68c95e274363b9c1aec5fd
|
refs/heads/main
| 2023-06-13T03:15:44.526154 | 2021-07-08T01:29:04 | 2021-07-08T01:29:04 | 371,365,383 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,239 |
py
|
import tensorflow as tf
import numpy as np
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import datetime
import os
from tqdm import tqdm
from config import config
#'LSTM40_2hidden_tanhtanh_6432_201109'
date = 201130
model_name = '10_2LSTM4020_4hidden_tanhs_641286432_sgfon_201116'
model = tf.keras.models.load_model(
'./models/' + model_name
)
benchmark_name = 'rawdata_Al_400_2.npy'
benchmark_data = np.load(config.npy_dest_dir+benchmark_name)
if config.eval_windowing:
init_data = benchmark_data[0,1:].reshape(1,1,config.num_features)
benchmark_traj = np.repeat(init_data, config.n_steps, axis=1)
extp_size = 1
outputs_ = benchmark_data[0,1:3].reshape(-1,2)
else:
benchmark_traj = benchmark_data[:config.n_steps,1:].reshape(1,config.n_steps,config.num_features)
extp_size = config.n_steps
outputs_ = benchmark_data[:config.n_steps,1:3].reshape(-1,2)
timesteps = np.shape(benchmark_data)[0]
for idx, item in enumerate(config.scaler):
benchmark_traj[:,:,idx] = benchmark_traj[:,:,idx]/item
for t in tqdm(range(timesteps-extp_size)):
predictions = model.predict(
benchmark_traj
)[0]
pre_part = benchmark_traj[:,1:,:]
post_part = np.array([predictions[0], predictions[1], benchmark_data[t+extp_size,3]/config.scaler[2], benchmark_data[t+extp_size,4]/config.scaler[3], benchmark_data[t+extp_size,5]/config.scaler[4]]).reshape(1,1,np.shape(benchmark_traj)[2])
benchmark_traj = np.concatenate((pre_part, post_part), axis=1)
outputs_ = np.vstack((outputs_, predictions))
x_axis = benchmark_data[:,0][:timesteps]
true_hf = benchmark_data[:,1][:timesteps]
true_temp = benchmark_data[:,2][:timesteps]
est_hf = outputs_[:,0]*config.scaler[0]
est_temp = outputs_[:,1]*config.scaler[1]
plt.figure()
plt.subplot(211)
plt.plot(x_axis, true_hf, 'b-', label='True heat flux')
plt.plot(x_axis, est_hf, 'g--', label='Estimated heat flux')
plt.legend()
plt.subplot(212)
plt.plot(x_axis, true_temp, 'r-', label='True temperature')
plt.plot(x_axis, est_temp, 'k--', label='Estimated temperature')
plt.legend()
plt.savefig('../figure/{0}/{1}_{2}_extp{3}.png'.format(date, benchmark_name[:-4], model_name, extp_size), dpi=300)
plt.show()
|
[
"[email protected]"
] | |
cf524498c3c354af1507470178228b9d04709912
|
9ab59fd67ef2ab343cc3036d2f9d0ad68ab887c5
|
/Python/word order.py
|
16a8fbd52ea859f51078c0bf030ace2c343c62d0
|
[] |
no_license
|
Arifuzzaman-Munaf/HackerRank
|
ff094dfc6d8a618ab9c54db4460c5190c81583bf
|
5fd9b6ffa3caf3afddba5d6f35978becf989d3b8
|
refs/heads/main
| 2023-06-16T01:09:11.371654 | 2021-07-14T15:28:53 | 2021-07-14T15:28:53 | 385,230,007 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 278 |
py
|
n = int(input())
distinct = {}
for i in range(n):
word = input()
if word not in distinct :
distinct[word] = 1
else :
distinct[word] = distinct[word] + 1
print(len(distinct))
for i in distinct.values():
print(i , end=' ')
|
[
"[email protected]"
] | |
048383c825e457044d8a854820e5a6f4faa3051e
|
76b49bab52dc088828816bf00605471af4536dbc
|
/cliente/tags/releases/es/rel-1.0.0/password/formulario.py
|
b9161472b2b974dd775f15370ce37cd505855af9
|
[] |
no_license
|
mboscovich/Kerberus-Control-Parental
|
a5352d53c588854805f124febf578345f3033ed2
|
5025078af9ebdcf7b3feb4b0d5efa6cbde6e6a37
|
refs/heads/master
| 2020-12-24T16:58:51.288519 | 2015-05-25T15:18:25 | 2015-05-25T15:18:25 | 23,494,363 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,546 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'formulario.ui'
#
# Created: Sat Dec 24 18:02:56 2011
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.setWindowModality(QtCore.Qt.ApplicationModal)
Form.resize(607, 343)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Password de administrador", None, QtGui.QApplication.UnicodeUTF8))
self.verticalLayoutWidget = QtGui.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 20, 581, 311))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setText(QtGui.QApplication.translate("Form", "Configure la password de Administrador.", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setText(QtGui.QApplication.translate("Form", "Esta password le permitirá deshabilitar temporalmente el filtrado de\n"
"kerberus y a su vez, le será requerida para desinstalarlo.\n"
"Es muy recomendable que la recuerde.", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setScaledContents(False)
self.label_2.setWordWrap(False)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.label_passwordActual = QtGui.QLabel(self.verticalLayoutWidget)
self.label_passwordActual.setEnabled(True)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_passwordActual.setFont(font)
self.label_passwordActual.setText(QtGui.QApplication.translate("Form", "Password actual", None, QtGui.QApplication.UnicodeUTF8))
self.label_passwordActual.setObjectName(_fromUtf8("label_passwordActual"))
self.verticalLayout.addWidget(self.label_passwordActual)
self.password_actual = QtGui.QLineEdit(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
self.password_actual.setFont(font)
self.password_actual.setObjectName(_fromUtf8("password_actual"))
self.verticalLayout.addWidget(self.password_actual)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setText(QtGui.QApplication.translate("Form", "Password", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.password1 = QtGui.QLineEdit(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
self.password1.setFont(font)
self.password1.setEchoMode(QtGui.QLineEdit.Password)
self.password1.setObjectName(_fromUtf8("password1"))
self.verticalLayout.addWidget(self.password1)
self.label_4 = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setText(QtGui.QApplication.translate("Form", "Vuelva a ingresar la password", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout.addWidget(self.label_4)
self.password2 = QtGui.QLineEdit(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
self.password2.setFont(font)
self.password2.setEchoMode(QtGui.QLineEdit.Password)
self.password2.setObjectName(_fromUtf8("password2"))
self.verticalLayout.addWidget(self.password2)
self.boton = QtGui.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.boton.sizePolicy().hasHeightForWidth())
self.boton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
self.boton.setFont(font)
self.boton.setText(QtGui.QApplication.translate("Form", "Aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.boton.setObjectName(_fromUtf8("boton"))
self.verticalLayout.addWidget(self.boton)
self.label_3.setBuddy(self.password1)
self.label_4.setBuddy(self.password2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
pass
|
[
"[email protected]"
] | |
b010903e25916100bf14853c50f2dc3195a2f161
|
c773ff87e9b0f5e444332b48850f63beee033066
|
/tests/test_pipeline.py
|
25b00e475021eacce1654c771166a594e14fc529
|
[
"Apache-2.0"
] |
permissive
|
DavydovDmitry/absa
|
f3a3ecd2fbb520c004d98251d22411301ec9aa45
|
d9da45c2070c00de9ec379dd59f43ff4b5116267
|
refs/heads/master
| 2022-12-08T12:27:47.921273 | 2020-09-05T23:28:39 | 2020-09-05T23:28:39 | 293,034,019 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,686 |
py
|
import os
from absa.utils.embedding import Embeddings
from absa.utils.nlp import NLPPipeline
from absa.utils.dump import make_dump, load_dump
from absa.io.input.semeval2016 import from_xml
from absa.preprocess.dependency import dep_parse_reviews
from . import SemEval2016_filename, test_dumps_path, RAW_POSTFIX, DEP_POSTFIX, SemEval2016_pathway
def test_xml_parsing_basic():
vocabulary = Embeddings.vocabulary
reviews = from_xml(vocabulary=vocabulary, pathway=SemEval2016_pathway)
make_dump(obj=reviews,
pathway=os.path.join(test_dumps_path, SemEval2016_filename + RAW_POSTFIX))
def test_dep_parsing_basic():
nlp = NLPPipeline.nlp
reviews = load_dump(pathway=os.path.join(test_dumps_path, SemEval2016_filename +
RAW_POSTFIX))
parsed_reviews = dep_parse_reviews(texts=reviews, nlp=nlp)
make_dump(obj=parsed_reviews,
pathway=os.path.join(test_dumps_path, SemEval2016_filename + DEP_POSTFIX))
def test_dep_parsing():
reviews = load_dump(pathway=os.path.join(test_dumps_path, SemEval2016_filename +
RAW_POSTFIX))
parsed_reviews = load_dump(pathway=os.path.join(test_dumps_path, SemEval2016_filename +
DEP_POSTFIX))
for raw_review, parsed_review in zip(reviews, parsed_reviews):
for raw_sentence, parsed_sentence in zip(raw_review, parsed_review):
if not parsed_sentence.is_opinions_contain_unknown():
parsed_sentence = parsed_sentence.to_specified_sentence(text=raw_sentence.text)
assert raw_sentence == parsed_sentence
|
[
"[email protected]"
] | |
b36eebd7908fc5bfc69460c3026340ba9b970673
|
79bd286af3547115f59944f6aee24d0a8d45db04
|
/ex/sml231n/W1516_A3/cs231n/classifiers/rnn.py
|
c65d423806ea6927f22c6dd18575e2bd71f1bb18
|
[] |
no_license
|
rxl194/cpcv102
|
aa003c15cbc25d86582b7804af55a28322403f4f
|
be200650701d34087afabf3cbe97e1f1ea5c383f
|
refs/heads/master
| 2023-02-25T15:01:54.214787 | 2022-09-24T23:16:23 | 2022-09-24T23:16:23 | 82,126,640 | 0 | 0 | null | 2023-02-10T20:22:41 | 2017-02-16T01:47:21 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 12,694 |
py
|
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
# Forward pass
# Step 1
h0 = np.dot(features, W_proj) + b_proj
# Step 2
x, cache_embedding = word_embedding_forward(captions_in, W_embed)
# Step 3
if self.cell_type == 'rnn':
h, cache_rnn = rnn_forward(x, h0, Wx, Wh, b)
elif self.cell_type == 'lstm':
h, cache_rnn = lstm_forward(x, h0, Wx, Wh, b)
else:
raise ValueError('%s not implemented' % (self.cell_type))
# Step 4
scores, cache_scores = temporal_affine_forward(h, W_vocab, b_vocab)
# Step 5
loss, dscores = temporal_softmax_loss(
scores, captions_out, mask, verbose=False)
# Backward pass
grads = dict.fromkeys(self.params)
# Backaward into step 4
dh, dW_vocab, db_vocab = temporal_affine_backward(
dscores, cache_scores)
# Backward into step 3
if self.cell_type == 'rnn':
dx, dh0, dWx, dWh, db = rnn_backward(dh, cache_rnn)
elif self.cell_type == 'lstm':
dx, dh0, dWx, dWh, db = lstm_backward(dh, cache_rnn)
else:
raise ValueError('%s not implemented' % (self.cell_type))
# Backward into step 2
dW_embed = word_embedding_backward(dx, cache_embedding)
# Backward into step 1
dW_proj = np.dot(features.T, dh0)
db_proj = np.sum(dh0, axis=0)
# Gather everythhing in the dict
grads['W_proj'] = dW_proj
grads['b_proj'] = db_proj
grads['W_embed'] = dW_embed
grads['Wx'] = dWx
grads['Wh'] = dWh
grads['b'] = db
grads['W_vocab'] = dW_vocab
grads['b_vocab'] = db_vocab
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
# Get first hidden state
h0 = np.dot(features, W_proj) + b_proj
captions[:, 0] = self._start
prev_h = h0 # Previous hidden state
prev_c = np.zeros_like(h0) # Previous cell state
# Current word (start word)
capt = self._start * np.ones((N, 1), dtype=np.int32)
for t in xrange(max_length): # Let's go over the sequence
word_embed, _ = word_embedding_forward(
capt, W_embed) # Embedded current word
if self.cell_type == 'rnn':
# Run a step of rnn
h, _ = rnn_step_forward(np.squeeze(
word_embed), prev_h, Wx, Wh, b)
elif self.cell_type == 'lstm':
# Run a step of lstm
h, c, _ = lstm_step_forward(np.squeeze(
word_embed), prev_h, prev_c, Wx, Wh, b)
else:
raise ValueError('%s not implemented' % (self.cell_type))
# Compute the score distrib over the dictionary
scores, _ = temporal_affine_forward(
h[:, np.newaxis, :], W_vocab, b_vocab)
# Squeeze unecessari dimension and get the best word idx
idx_best = np.squeeze(np.argmax(scores, axis=2))
# Put it in the captions
captions[:, t] = idx_best
# Update the hidden state, the cell state (if lstm) and the current
# word
prev_h = h
if self.cell_type == 'lstm':
prev_c = c
capt = captions[:, t]
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
|
[
"[email protected]"
] | |
00a381aeb7167105dd238935079539892169d20b
|
903cbb9e85a4a236086b07a9a76413431955359e
|
/2015/Day22/day22.py
|
667f344c9a0fe9346b8699e997820d53f6705f4a
|
[] |
no_license
|
mjmeli/AdventOfCode
|
80a07cd18404bf315a6dd19f384f178ec47cf8b1
|
5f7baedbb6bec4e53bc7e94a5c268b79d70cd159
|
refs/heads/master
| 2021-01-10T06:46:08.010431 | 2016-12-02T16:56:32 | 2016-12-02T16:56:32 | 47,277,975 | 0 | 0 | null | 2015-12-07T00:08:10 | 2015-12-02T17:48:20 |
C
|
UTF-8
|
Python
| false | false | 3,618 |
py
|
import sys, random, time, copy
from spells import *
from players import *
from enum import Enum
""" Possible results from each turn """
class TurnResult(Enum):
none = 0
player_cant_cast = 1
player_dead = 2
boss_dead = 3
""" Execute a Player's turn """
def playerTurn(player, boss, spell, hardMode):
# If in hard mode (part 2), reduce HP by 1 at beginning
if hardMode:
player.health = player.health - 1
if player.isDead():
return TurnResult.player_dead
# Cast all effects at the beginning of the turn
player.applyEffects(boss)
# Ensure the player can cast; if so, cast the spell
if player.canCast(spell):
player.cast(spell, boss)
else:
return TurnResult.player_cant_cast
# Check if this cast has killed the boss
if boss.isDead():
return TurnResult.boss_dead
# Remove any effects that expire in this turn
player.removeEffects()
return TurnResult.none
""" Execute a Boss' turn """
def bossTurn(player, boss):
# Cast all effects at the beginning of the turn and see if they kill the boss
player.applyEffects(boss)
if boss.isDead():
return TurnResult.boss_dead
# Have the boss attack the player and see if the player is dead
boss.attack(player)
if player.isDead():
return TurnResult.player_dead
"""
Driving code for solving the problem. First, read command line input for
the starting conditions. Then, simulate parts 1 and 2 by randomly carrying
out a battle. In each player's turn, a random spell is chosen and cast.
When the boss is killed, the amount of mana spent is tallied and compared
to the running minimum.
"""
# Read command line input for starting conditions
playerHP = int(sys.argv[1])
playerMana = int(sys.argv[2])
bossHP = int(sys.argv[3])
bossDamage = int(sys.argv[4])
# Randomly simulate the battle many times to converge on the solution
numIterations = 1000000
spells = [MagicMissile(), Drain(), Shield(), Poison(), Recharge()]
for j in range(1,3):
minMana = 9999
hardMode = (j == 2)
for i in range(numIterations):
completion = i / numIterations
print("Progress: {:2.2%}".format(completion), end="\r")
# Reset players to starting conditions
player = Player(playerHP, playerMana)
boss = Boss(bossHP, bossDamage)
# Play until a result ends the game
while True:
# Choose a random spell to cast
choice = copy.deepcopy(random.choice(spells))
# Player goes first. Check if the boss is dead or mana runs out.
playerRes = playerTurn(player, boss, choice, hardMode)
if playerRes == TurnResult.boss_dead and player.manaSpent < minMana:
minMana = player.manaSpent # update minMana if boss dies
break
elif playerRes == TurnResult.player_cant_cast or playerRes == TurnResult.player_dead:
break # player loses if can't cast or dead
# Boss goes next. Check if the boss or player dies.
bossRes = bossTurn(player, boss)
if playerRes == TurnResult.boss_dead and player.manaSpent < minMana:
minMana = player.manaSpent # update minMana if boss dies
break
elif bossRes == TurnResult.player_dead:
break # player loses if dies
# Stop early if current mana spent is larger than minimum
if (player.manaSpent > minMana):
break
# Print results
print("Part %d: Minimum mana is %d." % (j, minMana))
|
[
"[email protected]"
] | |
32a9080820f79c628edcd8a11fb345d860e9800a
|
28b1ed1359bd9539f9a15b64663652ec4eb3f284
|
/Week_12/matplotlib_example.py
|
301f43223dac2683ae8891d160b23ec806636397
|
[] |
no_license
|
achapkowski/Python_for_GIS_and_RS
|
5fb68cbe1d46f28487e2a41099cf42b942587afa
|
9b5d8da6b7bdbbfaa2f45b20d8704c317a86e785
|
refs/heads/master
| 2021-01-20T02:12:01.785780 | 2017-04-24T22:44:08 | 2017-04-24T22:44:08 | 89,385,947 | 1 | 0 | null | 2017-04-25T17:02:35 | 2017-04-25T17:02:35 | null |
UTF-8
|
Python
| false | false | 1,115 |
py
|
import xlrd
file_and_path = r"C:\Users\greg6750\Documents\IPython Notebooks\Python_for_GIS_and_RS\Week_12\SENZA_0_SUNAA_0_CORN.xlsx"
print("Reading Workbook")
workbook = xlrd.open_workbook(file_and_path)
worksheet = workbook.sheet_by_index(0)
freq = []
g = []
t = []
print("Creating Arrays")
for row in range(worksheet.nrows):
if row>0:
#Frequency
freq_cell = worksheet.cell(row,0)
freq.append(freq_cell.value)
GRR_cell = worksheet.cell(row,8)
g.append(GRR_cell.value)
TOA_cell = worksheet.cell(row,14)
t.append(TOA_cell.value)
#For plotting, import matplotlib
from matplotlib import pyplot as plt
#import matplotlib.pyplot as plt
##Basic single plot
#plt.plot(freq, g)
#plt.show()
####Multiple plots
##plt.subplot(211)
###plt.figure(1)
##plt.plot(freq, g, 'b-o')
##plt.subplot(2, 1, 2)
##plt.plot(freq, t, 'r-o')
##plt.show()
##Typing numpy and matplotlib together
import numpy as np
gaussian = np.random.normal(0, 1, 100000)
plt.hist(gaussian, bins=100)
#print "Mean: %f Standard Deviation: %f" % (gaussian.mean(), gaussian.std())
plt.show()
|
[
"[email protected]"
] | |
d86627e50f23f2b9e75214dbd1da6239295523e1
|
4e7faaa81c7d46e3a3f98fe8ae3686809485a5f5
|
/main.spec
|
a2b2a2f4ecdca508da6f590f7771e7bc466aa96c
|
[] |
no_license
|
sayntan4u/phoenix
|
7122f32f9ba8c6e8dbe6507ef3ad0f7806898f2d
|
438903301cc057dc425f04cd120ae3f373e58e30
|
refs/heads/main
| 2023-07-11T07:40:30.676620 | 2021-08-22T16:43:52 | 2021-08-22T16:43:52 | 398,310,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,213 |
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['C:\\Users\\Sayantan\\Desktop\\test\\generated_code'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='main')
|
[
"[email protected]"
] | |
cc2c2fe551e045dfe76ee595c5264c0e7f1b2859
|
1fc4a82d425568b983faaf15dd45caff43d557e5
|
/src/pythongraph/pygraph/algorithms/heuristics/Euclidean.py
|
e4f32f024ab19f0dfdbf620fb78b56d5c7e43179
|
[
"MIT"
] |
permissive
|
rajeshhcu/TemporalPlanning
|
72464adb39fba88c7ed7827c5d1e659892477857
|
caa87d40e5d427c48b2a3442d1d0235f4864ba9c
|
refs/heads/master
| 2020-12-31T07:33:53.185920 | 2017-03-29T02:42:08 | 2017-03-29T02:42:08 | 86,528,906 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,469 |
py
|
# Copyright (c) 2008-2009 Pedro Matiello <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
A* heuristic for euclidean graphs.
"""
# Imports
import warnings
class euclidean(object):
"""
A* heuristic for Euclidean graphs.
This heuristic has three requirements:
1. All nodes should have the attribute 'position';
2. The weight of all edges should be the euclidean distance between the nodes it links;
3. The C{optimize()} method should be called before the heuristic search.
A small example for clarification:
>>> g = graph.graph()
>>> g.add_nodes(['A','B','C'])
>>> g.add_node_attribute('A', ('position',(0,0)))
>>> g.add_node_attribute('B', ('position',(1,1)))
>>> g.add_node_attribute('C', ('position',(0,2)))
>>> g.add_edge('A','B', wt=2)
>>> g.add_edge('B','C', wt=2)
>>> g.add_edge('A','C', wt=4)
>>> h = graph.heuristics.euclidean()
>>> h.optimize(g)
>>> g.heuristic_search('A', 'C', h)
"""
def __init__(self):
"""
Initialize the heuristic object.
"""
self.distances = {}
def optimize(self, graph):
"""
Build a dictionary mapping each pair of nodes to a number (the distance between them).
@type graph: graph
@param graph: Graph.
"""
for start in graph.nodes():
for end in graph.nodes():
for each in graph.node_attributes(start):
if (each[0] == 'position'):
start_attr = each[1]
break
for each in graph.node_attributes(end):
if (each[0] == 'position'):
end_attr = each[1]
break
dist = 0
for i in xrange(len(start_attr)):
dist = dist + (float(start_attr[i]) - float(end_attr[i]))**2
self.distances[(start,end)] = dist
def __call__(self, start, end):
"""
Estimate how far start is from end.
@type start: node
@param start: Start node.
@type end: node
@param end: End node.
"""
assert len(self.distances.keys()) > 0, "You need to optimize this heuristic for your graph before it can be used to estimate."
return self.distances[(start,end)]
|
[
"[email protected]"
] | |
1d2727bcd9c1d0f2832ab7c4d16b0e3365b13eaa
|
5a28841b8598ad3bfc9aac824a2d556278f006aa
|
/rgbd_seg/models/encoders/enhance_modules/registry.py
|
771c2051c5d35fef2e2cab50b2a5a6a802d6120a
|
[
"Apache-2.0"
] |
permissive
|
thanyu-hub/ShapeConv
|
ac60b332c89ea7c51f6d1e3fd310ea200123e9a2
|
25bee65af4952c10ed4e24f6556765654e56575f
|
refs/heads/master
| 2023-07-15T02:02:49.077406 | 2021-08-30T11:13:21 | 2021-08-30T11:13:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 82 |
py
|
from rgbd_seg.utils import Registry
ENHANCE_MODULES = Registry('enhance_module')
|
[
"[email protected]"
] | |
c674b0a58e029302461e9515a02b8d8294b99a98
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/website_sale/models/product.py
|
a5439d9d8d7175fb044e2fceb1e9f39ddba127a7
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,408 |
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.addons import decimal_precision as dp
from odoo.addons.website.models import ir_http
from odoo.tools.translate import html_translate
class ProductStyle(models.Model):
_name = "product.style"
_description = 'Product Style'
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
def _default_website(self):
""" Find the first company's website, if there is one. """
company_id = self.env.user.company_id.id
domain = [('company_id', '=', company_id)]
return self.env['website'].search(domain, limit=1)
website_id = fields.Many2one('website', string="Website", default=_default_website)
code = fields.Char(string='E-commerce Promotional Code', groups="base.group_user")
selectable = fields.Boolean(help="Allow the end user to choose this price list")
def clear_cache(self):
# website._get_pl_partner_order() is cached to avoid to recompute at each request the
# list of available pricelists. So, we need to invalidate the cache when
# we change the config of website price list to force to recompute.
website = self.env['website']
website._get_pl_partner_order.clear_cache(website)
@api.model
def create(self, data):
res = super(ProductPricelist, self).create(data)
self.clear_cache()
return res
@api.multi
def write(self, data):
res = super(ProductPricelist, self).write(data)
self.clear_cache()
return res
@api.multi
def unlink(self):
res = super(ProductPricelist, self).unlink()
self.clear_cache()
return res
def _get_partner_pricelist_multi_search_domain_hook(self):
domain = super(ProductPricelist, self)._get_partner_pricelist_multi_search_domain_hook()
website = ir_http.get_request_website()
if website:
domain += self._get_website_pricelists_domain(website.id)
return domain
def _get_partner_pricelist_multi_filter_hook(self):
res = super(ProductPricelist, self)._get_partner_pricelist_multi_filter_hook()
website = ir_http.get_request_website()
if website:
res = res.filtered(lambda pl: pl._is_available_on_website(website.id))
return res
@api.multi
def _is_available_on_website(self, website_id):
""" To be able to be used on a website, a pricelist should either:
- Have its `website_id` set to current website (specific pricelist).
- Have no `website_id` set and should be `selectable` (generic pricelist)
or should have a `code` (generic promotion).
Note: A pricelist without a website_id, not selectable and without a
code is a backend pricelist.
Change in this method should be reflected in `_get_website_pricelists_domain`.
"""
self.ensure_one()
return self.website_id.id == website_id or (not self.website_id and (self.selectable or self.sudo().code))
def _get_website_pricelists_domain(self, website_id):
''' Check above `_is_available_on_website` for explanation.
Change in this method should be reflected in `_is_available_on_website`.
'''
return [
'|', ('website_id', '=', website_id),
'&', ('website_id', '=', False),
'|', ('selectable', '=', True), ('code', '!=', False),
]
def _get_partner_pricelist_multi(self, partner_ids, company_id=None):
''' If `property_product_pricelist` is read from website, we should use
the website's company and not the user's one.
Passing a `company_id` to super will avoid using the current user's
company.
'''
website = ir_http.get_request_website()
if not company_id and website:
company_id = website.company_id.id
return super(ProductPricelist, self)._get_partner_pricelist_multi(partner_ids, company_id)
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata", "website.multi.mixin"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(attachment=True, help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = fields.Binary(string='Medium-sized image', attachment=True,
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image', attachment=True,
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You cannot create recursive categories.'))
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
parent_category = parent_category.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
class ProductTemplate(models.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.multi.mixin', 'rating.mixin']
_name = 'product.template'
_mail_post_access = 'read'
website_description = fields.Html('Description for the website', sanitize_attributes=False, translate=html_translate)
alternative_product_ids = fields.Many2many('product.template', 'product_alternative_rel', 'src_id', 'dest_id',
string='Alternative Products', help='Suggest alternatives to your customer'
'(upsell strategy).Those product show up on the product page.')
accessory_product_ids = fields.Many2many('product.product', 'product_accessory_rel', 'src_id', 'dest_id',
string='Accessory Products', help='Accessories show up when the customer'
'reviews the cart before payment (cross-sell strategy).')
website_size_x = fields.Integer('Size X', default=1)
website_size_y = fields.Integer('Size Y', default=1)
website_style_ids = fields.Many2many('product.style', string='Styles')
website_sequence = fields.Integer('Website Sequence', help="Determine the display order in the Website E-commerce",
default=lambda self: self._default_website_sequence())
public_categ_ids = fields.Many2many('product.public.category', string='Website Product Category',
help="The product will be available in each mentioned e-commerce category. Go to"
"Shop > Customize and enable 'E-commerce categories' to view all e-commerce categories.")
product_image_ids = fields.One2many('product.image', 'product_tmpl_id', string='Images')
# website_price deprecated, directly use _get_combination_info instead
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_public_price deprecated, directly use _get_combination_info instead
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_price_difference deprecated, directly use _get_combination_info instead
website_price_difference = fields.Boolean('Website price difference', compute='_website_price')
def _website_price(self):
current_website = self.env['website'].get_current_website()
for template in self.with_context(website_id=current_website.id):
res = template._get_combination_info()
template.website_price = res.get('price')
template.website_public_price = res.get('list_price')
template.website_price_difference = res.get('has_discounted_price')
@api.multi
def _has_no_variant_attributes(self):
"""Return whether this `product.template` has at least one no_variant
attribute.
:return: True if at least one no_variant attribute, False otherwise
:rtype: bool
"""
self.ensure_one()
return any(a.create_variant == 'no_variant' for a in self._get_valid_product_attributes())
@api.multi
def _has_is_custom_values(self):
self.ensure_one()
"""Return whether this `product.template` has at least one is_custom
attribute value.
:return: True if at least one is_custom attribute value, False otherwise
:rtype: bool
"""
return any(v.is_custom for v in self._get_valid_product_attribute_values())
@api.multi
def _is_quick_add_to_cart_possible(self, parent_combination=None):
"""
It's possible to quickly add to cart if there's no optional product,
there's only one possible combination and no value is set to is_custom.
Attributes set to dynamic or no_variant don't have to be tested
specifically because they will be taken into account when checking for
the possible combinations.
:param parent_combination: combination from which `self` is an
optional or accessory product
:type parent_combination: recordset `product.template.attribute.value`
:return: True if it's possible to quickly add to cart, else False
:rtype: bool
"""
self.ensure_one()
if not self._is_add_to_cart_possible(parent_combination):
return False
gen = self._get_possible_combinations(parent_combination)
first_possible_combination = next(gen)
if next(gen, False) is not False:
# there are at least 2 possible combinations.
return False
if self._has_is_custom_values():
return False
if self.optional_product_ids.filtered(lambda p: p._is_add_to_cart_possible(first_possible_combination)):
return False
return True
@api.multi
def _get_possible_variants_sorted(self, parent_combination=None):
"""Return the sorted recordset of variants that are possible.
The order is based on the order of the attributes and their values.
See `_get_possible_variants` for the limitations of this method with
dynamic or no_variant attributes, and also for a warning about
performances.
:param parent_combination: combination from which `self` is an
optional or accessory product
:type parent_combination: recordset `product.template.attribute.value`
:return: the sorted variants that are possible
:rtype: recordset of `product.product`
"""
self.ensure_one()
def _sort_key_attribute_value(value):
# if you change this order, keep it in sync with _order from `product.attribute`
return (value.attribute_id.sequence, value.attribute_id.id)
def _sort_key_variant(variant):
"""
We assume all variants will have the same attributes, with only one value for each.
- first level sort: same as "product.attribute"._order
- second level sort: same as "product.attribute.value"._order
"""
keys = []
for attribute in variant.attribute_value_ids.sorted(_sort_key_attribute_value):
# if you change this order, keep it in sync with _order from `product.attribute.value`
keys.append(attribute.sequence)
keys.append(attribute.id)
return keys
return self._get_possible_variants(parent_combination).sorted(_sort_key_variant)
@api.multi
def _get_combination_info(self, combination=False, product_id=False, add_qty=1, pricelist=False, parent_combination=False, only_template=False):
"""Override for website, where we want to:
- take the website pricelist if no pricelist is set
- apply the b2b/b2c setting to the result
This will work when adding website_id to the context, which is done
automatically when called from routes with website=True.
"""
self.ensure_one()
current_website = False
if self.env.context.get('website_id'):
current_website = self.env['website'].get_current_website()
if not pricelist:
pricelist = current_website.get_current_pricelist()
combination_info = super(ProductTemplate, self)._get_combination_info(
combination=combination, product_id=product_id, add_qty=add_qty, pricelist=pricelist,
parent_combination=parent_combination, only_template=only_template)
if self.env.context.get('website_id'):
partner = self.env.user.partner_id
company_id = current_website.company_id
product = self.env['product.product'].browse(combination_info['product_id']) or self
tax_display = self.env.user.has_group('account.group_show_line_subtotals_tax_excluded') and 'total_excluded' or 'total_included'
taxes = partner.property_account_position_id.map_tax(product.sudo().taxes_id.filtered(lambda x: x.company_id == company_id), product, partner)
# The list_price is always the price of one.
quantity_1 = 1
price = taxes.compute_all(combination_info['price'], pricelist.currency_id, quantity_1, product, partner)[tax_display]
if pricelist.discount_policy == 'without_discount':
list_price = taxes.compute_all(combination_info['list_price'], pricelist.currency_id, quantity_1, product, partner)[tax_display]
else:
list_price = price
has_discounted_price = pricelist.currency_id.compare_amounts(list_price, price) == 1
combination_info.update(
price=price,
list_price=list_price,
has_discounted_price=has_discounted_price,
)
return combination_info
@api.multi
def _create_first_product_variant(self, log_warning=False):
"""Create if necessary and possible and return the first product
variant for this template.
:param log_warning: whether a warning should be logged on fail
:type log_warning: bool
:return: the first product variant or none
:rtype: recordset of `product.product`
"""
return self._create_product_variant(self._get_first_possible_combination(), log_warning)
@api.multi
def _get_current_company_fallback(self, **kwargs):
"""Override: if a website is set on the product or given, fallback to
the company of the website. Otherwise use the one from parent method."""
res = super(ProductTemplate, self)._get_current_company_fallback(**kwargs)
website = self.website_id or kwargs.get('website')
return website and website.company_id or res
def _default_website_sequence(self):
self._cr.execute("SELECT MIN(website_sequence) FROM %s" % self._table)
min_sequence = self._cr.fetchone()[0]
return min_sequence and min_sequence - 1 or 10
def set_sequence_top(self):
self.website_sequence = self.sudo().search([], order='website_sequence desc', limit=1).website_sequence + 1
def set_sequence_bottom(self):
self.website_sequence = self.sudo().search([], order='website_sequence', limit=1).website_sequence - 1
def set_sequence_up(self):
previous_product_tmpl = self.sudo().search(
[('website_sequence', '>', self.website_sequence), ('website_published', '=', self.website_published)],
order='website_sequence', limit=1)
if previous_product_tmpl:
previous_product_tmpl.website_sequence, self.website_sequence = self.website_sequence, previous_product_tmpl.website_sequence
else:
self.set_sequence_top()
def set_sequence_down(self):
next_prodcut_tmpl = self.search([('website_sequence', '<', self.website_sequence), ('website_published', '=', self.website_published)], order='website_sequence desc', limit=1)
if next_prodcut_tmpl:
next_prodcut_tmpl.website_sequence, self.website_sequence = self.website_sequence, next_prodcut_tmpl.website_sequence
else:
return self.set_sequence_bottom()
def _default_website_meta(self):
res = super(ProductTemplate, self)._default_website_meta()
res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.description_sale
res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
res['default_opengraph']['og:image'] = res['default_twitter']['twitter:image'] = "/web/image/product.template/%s/image" % (self.id)
return res
@api.multi
def _compute_website_url(self):
super(ProductTemplate, self)._compute_website_url()
for product in self:
product.website_url = "/shop/product/%s" % (product.id,)
class Product(models.Model):
_inherit = "product.product"
website_id = fields.Many2one(related='product_tmpl_id.website_id', readonly=False)
# website_price deprecated, directly use _get_combination_info instead
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_public_price deprecated, directly use _get_combination_info instead
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_price_difference deprecated, directly use _get_combination_info instead
website_price_difference = fields.Boolean('Website price difference', compute='_website_price')
def _website_price(self):
for product in self:
res = product._get_combination_info_variant()
product.website_price = res.get('price')
product.website_public_price = res.get('list_price')
product.website_price_difference = res.get('has_discounted_price')
@api.multi
def website_publish_button(self):
self.ensure_one()
return self.product_tmpl_id.website_publish_button()
class ProductImage(models.Model):
_name = 'product.image'
_description = 'Product Image'
name = fields.Char('Name')
image = fields.Binary('Image', attachment=True)
product_tmpl_id = fields.Many2one('product.template', 'Related Product', copy=True)
|
[
"[email protected]"
] | |
b9d1aac37da008912ab199a5366e09629d26f0ea
|
90d9af671b450db0c601b97a09a7b83d48093474
|
/base/urls.py
|
37891abf98597cde7635fb5896b1001857819e38
|
[] |
no_license
|
Kailash-Sankar/agog
|
24a4ff95f607d2c43ce7fe4d435b8ff769ac04bb
|
2a94bdb471f38de693a98fd5b07c4c84dbada278
|
refs/heads/master
| 2020-12-24T12:32:56.995381 | 2018-11-11T09:32:12 | 2018-11-11T09:32:12 | 72,984,097 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,352 |
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
#Home
url(r'^$', views.home, name='home'),
url(r'^dashboard/$', views.dashboard, name='dashboard'),
#Auth
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
#meta debug
url(r'^meta/$', views.display_meta, name='meta'),
#Views
url(r'^ask/$', views.ask, name='ask'),
url(r'^question/([0-9]+)$', views.view_question, name='view_question'),
#Q&A
url(r'^q/([0-9]+)$', views.question, name='question'),
url(r'^question/([0-9]+)/answers/([0-9]*)$', views.answers, name='answers'),
url(r'^tags/([a-zA-Z]+)$', views.tags, name='tags'),
#Tending
url(r'^trending/([a-zA-Z]+)$', views.trending, name='trending'),
#like
url(r'^question/([0-9]+)/like$', views.qlike, name='qlike'),
url(r'^answer/([0-9]+)/like$', views.alike, name='alike'),
#save
url(r'^question/save$', views.saveQuestion, name='saveq'),
url(r'^question/([0-9]+)/answer/save$', views.saveAnswer, name='savea'),
#profile
url(r'^profile/$', views.profile, name='profile'),
url(r'^me/$', views.me, name='me'),
url(r'^me/tags$', views.userTags, name='userTags'),
url(r'^me/tags/save$', views.saveUserTags, name='saveUserTags'),
]
|
[
"[email protected]"
] | |
25081511c362930e4fc1566bff2d190c95181841
|
7fe74e56cd991cb5f4af96daf99193bfbf5af065
|
/GetData/GetSPECvsHS06data.py
|
0a239187ec2f8c2f4671e23ae7a36dc80158e82f
|
[] |
no_license
|
TristanSullivan/CSBS_BMPaper_Analysis
|
a42f844fb53044f1b0679b8aa108039ece9bfa49
|
4d1e990d79dc721782190bb06987c3cfd458b443
|
refs/heads/main
| 2023-07-23T15:25:11.548398 | 2021-09-03T22:32:50 | 2021-09-03T22:32:50 | 402,901,176 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,567 |
py
|
# Code to extract the data for Figure 1 of the 2021 CSBS Paper
import datetime
import pandas as pd
import json
# data file and fix "dashes"
filename = 'es_bmk_2018-06-27.pickle'
pdf0 = pd.read_pickle(filename)
pdf0.columns = [i.replace('-', '_') for i in pdf0.columns]
# Print keys
#for x in pdf0.keys():
# print(x)
# Cuts from SPEC-CPU2017-Analysis-Include-AMD.ipynb
pdf1 = pdf0.query('cloud != "CERN-wig-project-011"')
pdf2 = pdf1.query('tstart > datetime.date(2018,3,15)')
# For next set of cuts
pdf = pdf2
pdf['rstart'] = pdf.tstart.apply(lambda x: x.replace(microsecond=0,second=0,minute=0))
pdf['spec2017_bmks_541_leela_r'].head()
pdf_threads_socket = pdf.groupby(['cpuname']).cpunum.quantile(.75).reset_index().rename(columns={'cpunum':'totThreads'}).copy()
npdf = pdf.merge(pdf_threads_socket,how='left',on='cpuname')
npdf.groupby(['cloud','pnode'])[['cpunum','totThreads']].describe()
to_be_excluded = npdf[(npdf.cpunum<npdf.totThreads) & ~(npdf.cloud == 'GridKa')].groupby(['cloud','pnode','totThreads','rstart']).cpunum.agg(['mean','count']).reset_index().query('mean*count<totThreads')
#print(to_be_excluded['pnode'].unique())
#print(to_be_excluded['cloud'].unique())
tmp_pdf = npdf.merge(to_be_excluded,how='left',on=['cloud','pnode','totThreads'],suffixes=('','_y'))
#redefine npdf
fpdf = tmp_pdf[tmp_pdf.rstart_y.isnull()]
vm1 = 'bmk12-slc6-iuogw8ce6s_d7b47da5-b2d1-46e0-b0aa-f5b0adc25b9d'
vm2 = 'bmk12-slc6-o5jvcmjxsm_8b179c55-7967-4979-a4db-470d891c1ba9'
fpdf = fpdf[~(((fpdf.UID==vm1) | (fpdf.UID==vm2)) & (fpdf.hs06_32_avg_core_score>10.7))]
vm1 = 'bmk8-slc6-pvdd93rtag_61dfe509-27cd-44b7-8022-33d804baf902'
vm2 = 'bmk8-slc6-pdv4w02yj5_37c8109e-a34b-41de-939d-43020cebbcac'
vm3 = 'bmk8-slc6-oxddsmf2pl_bc675907-0fd8-4891-bc40-1a67bf138333'
#fpdf = fpdf[~(((fpdf.UID==vm1) | (fpdf.UID==vm2) | (fpdf.UID==vm3)) & (fpdf.hs06_32_avg_core_score>13))]
fpdf = fpdf[~(((fpdf.UID==vm1) | (fpdf.UID==vm2) | (fpdf.UID==vm3)))]
fpdf = fpdf.query('pnode!="p06253971a08885"')
fpdf[fpdf.cpunum<fpdf.totThreads].groupby(['cloud','pnode','totThreads','rstart']).cpunum.agg(['mean','count']).reset_index().query('mean*count<totThreads')
fpdf = fpdf[(fpdf.cloud != 'GridKa') | (fpdf.rstart > '2018-07-02')]
pdf = fpdf
# summarize input file
# --------------------
"""
print()
print("**********************************************************************")
print("\nInitial number of entries in file = ", len(pdf0))
print("------> remove bad cloud = ", len(pdf1))
print("------> Time cut = ", len(pdf2))
print("-------------------------------------------------")
print("------> Final number for analysis = ", len(pdf))
print()
print("Unique CPUs = ", len(pdf.cpuname))
print(pdf.groupby(['cpuname','cpunum']).size())
print(pdf.groupby(['cpuname','cloud']).size())
"""
# make a dataframe for each cpuname
# ---------------------------------
cpu_collection = {}
cpuname = pdf.cpuname.unique()
# Loop to write out benchmark scores
for cpu in cpuname:
#print(cpu)
cpu_collection[cpu] = pdf.query('cpuname == @cpu')
cpunum = cpu_collection[cpu].cpunum.unique()
# This if else block is here because the E5-2630 v4 was profiled in two different ways, and the data need to be separated
if cpu == "Intel(R) Xeon(R) CPU E5-2630 v4 @ 2.20GHz":
clouds = cpu_collection[cpu].cloud.unique()
print(cpu, end = '')
for cloud in clouds:
print(cloud)
cpu_collection_cloud = cpu_collection[cpu].query('cloud == @cloud')
num = 20
print(num)
# Different options for error bars
print(cpu_collection_cloud['hs06_32_score'].mean() / float(num))
print(cpu_collection_cloud['hs06_32_score'].std() / float(num))
#print(cpu_collection_cloud['hs06_32_score'].quantile(0.75) / float(num) - cpu_collection_cloud['hs06_32_score'].quantile(0.25) / float(num))
#print(cpu_collection_cloud['hs06_32_score'].quantile(0.95) / float(num) - cpu_collection_cloud['hs06_32_score'].quantile(0.05) / float(num))
print(cpu_collection_cloud['hs06_64_score'].mean() / float(num))
print(cpu_collection_cloud['hs06_64_score'].std() / float(num))
#print(cpu_collection_cloud['hs06_64_score'].quantile(0.75) / float(num) - cpu_collection_cloud['hs06_64_score'].quantile(0.25) / float(num))
#print(cpu_collection_cloud['hs06_64_score'].quantile(0.95) / float(num) - cpu_collection_cloud['hs06_64_score'].quantile(0.05) / float(num))
print(cpu_collection_cloud['spec2017_score'].mean() / float(num))
print(cpu_collection_cloud['spec2017_score'].std() / float(num))
#print(cpu_collection_cloud['spec2017_score'].quantile(0.75) / float(num) - cpu_collection_cloud['spec2017_score'].quantile(0.25) / float(num))
#print(cpu_collection_cloud['spec2017_score'].quantile(0.95) / float(num) - cpu_collection_cloud['spec2017_score'].quantile(0.05) / float(num))
else:
print(cpu)
for num in cpunum:
print(num)
cpu_collection_num = cpu_collection[cpu].query('cpunum == @num')
# Different options for error bars
print(cpu_collection_num['hs06_32_score'].mean() / float(num))
print(cpu_collection_num['hs06_32_score'].std() / float(num))
#print(cpu_collection_num['hs06_32_score'].quantile(0.75) / float(num) - cpu_collection_num['hs06_32_score'].quantile(0.25) / float(num))
#print(cpu_collection_num['hs06_32_score'].quantile(0.95) / float(num) - cpu_collection_num['hs06_32_score'].quantile(0.05) / float(num))
print(cpu_collection_num['hs06_64_score'].mean() / float(num))
print(cpu_collection_num['hs06_64_score'].std() / float(num))
#print(cpu_collection_num['hs06_64_score'].quantile(0.75) / float(num) - cpu_collection_num['hs06_64_score'].quantile(0.25) / float(num))
#print(cpu_collection_num['hs06_64_score'].quantile(0.95) / float(num) - cpu_collection_num['hs06_64_score'].quantile(0.05) / float(num))
print(cpu_collection_num['spec2017_score'].mean() / float(num))
print(cpu_collection_num['spec2017_score'].std() / float(num))
#print(cpu_collection_num['spec2017_score'].quantile(0.75) / float(num) - cpu_collection_num['spec2017_score'].quantile(0.25) / float(num))
#print(cpu_collection_num['spec2017_score'].quantile(0.95) / float(num) - cpu_collection_num['spec2017_score'].quantile(0.05) / float(num))
exit()
|
[
"[email protected]"
] | |
595b484f51cb641a44fd9d996752f386eb520b68
|
49f5d57221f9715a635669380da1facad93d29cf
|
/rest_api_calculator/calc/utils.py
|
064db2f2b216d24ef6f191166f6b241ddb04cd0c
|
[] |
no_license
|
marimuthuei/django-rest
|
35aef6253b8b7c6d5cbd348accee304ad40a3e9c
|
8398ef89f7f728b9d6d7af0384ee54667b7466e1
|
refs/heads/master
| 2022-04-30T17:42:09.286819 | 2021-04-15T07:39:22 | 2021-04-15T07:39:22 | 209,269,881 | 1 | 0 | null | 2022-04-22T22:24:18 | 2019-09-18T09:27:21 |
Python
|
UTF-8
|
Python
| false | false | 897 |
py
|
import math
from rest_framework.exceptions import APIException, ValidationError
from core.settings import FACTORIAl_MAX
def calculator(operator, a, b):
result = None
try:
if operator == "add":
result = a + b
elif operator == "sub":
result = a - b
elif operator == "mul":
result = a * b
elif operator == "div":
result = a / b
elif operator == "sqrt":
result = math.sqrt(a)
elif operator == "pow":
result = math.pow(a, b)
elif operator == "fact":
if 0 <= a <= FACTORIAl_MAX:
result = math.factorial(a)
else:
raise ValidationError("Factorial number computation limited to 15.")
except Exception as ex:
raise APIException("calc error : " + str(ex))
return result
|
[
"[email protected]"
] | |
a20f2c17ed8c730672b800ca5e82bdf5b3c6fd1e
|
b377977d942a7af3052374af064fc17f30b1e76e
|
/_objective_function.py
|
a3697cfbd7454caf9905839ece971886ab23cdbd
|
[] |
no_license
|
qhyseni/CB-CTT
|
4c510c3c74fa0849a01a23bf963d0cbb68988594
|
2ddd098987aefc2eebbec748b7945036ea0340a9
|
refs/heads/master
| 2021-06-15T17:40:19.238246 | 2021-02-27T21:13:36 | 2021-02-27T21:13:36 | 158,017,374 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,169 |
py
|
import xml_to_od
from Models.penalty import penalty
from Experiments.statistics import statistics
class objective_function:
def __init__(self, type, instance_data):
self.instance_data = instance_data
self.type = type
# Penalty scores
self.penalties = penalty(type)
# Objective function is sum of all penalties (which penalties is )
def cost(self, current_solution):
# initialize a dictionary that will help to keep track of courses penalties
course_penalties = dict()
for i in range(self.instance_data.courses_count):
course_penalties[i] = 0
# initialize a dictionary that will help to keep track of curricula penalties
curriculum_penalties = dict()
for curriculum in self.instance_data.curricula:
curriculum_penalties[curriculum.id] = 0
if self.type == 'UD4':
room_capacity_penalty, course_penalties = self.room_capacity_penalties(current_solution, course_penalties)
min_wdays_penalty, course_penalties = self.min_wdays_penalties(current_solution, course_penalties)
windows_penalty, curriculum_penalties = self.windows_penalties(current_solution, curriculum_penalties)
minmax_load_penalty = self.minmax_load_penalties(current_solution)
double_lectures_penalty = self.double_lectures_penalties(current_solution)
cost = room_capacity_penalty + min_wdays_penalty + windows_penalty + minmax_load_penalty + double_lectures_penalty
return cost, course_penalties, curriculum_penalties
elif self.type == 'UD2':
room_capacity_penalty, course_penalties = self.room_capacity_penalties(current_solution, course_penalties)
min_wdays_penalty, course_penalties = self.min_wdays_penalties(current_solution, course_penalties)
isolated_lectures_penalty, curriculum_penalties = self.isolated_lectures_penalties(current_solution, curriculum_penalties)
room_stability_penalty, course_penalties = self.room_stability_penalties(current_solution, course_penalties)
cost = room_capacity_penalty + min_wdays_penalty + isolated_lectures_penalty + room_stability_penalty
return cost, course_penalties, curriculum_penalties
# For each lecture, the number of students that attend the course must be less
# than or equal the number of seats of all the rooms that host its lectures.
# Each student above the capacity counts as 1 point of penalty.
def room_capacity_penalties(self, current_solution, course_penalties):
penalty = 0
for i in range(self.instance_data.days):
for j in range(self.instance_data.periods_per_day):
for k in range(self.instance_data.rooms_count):
if current_solution[i][j][k] != -1:
course_index = current_solution[i][j][k]
students = self.instance_data.courses_students[course_index]
room_size = int(self.instance_data.rooms[k].size)
if students - room_size > 0:
extra_students = students - room_size
penalty += extra_students
course_penalties[course_index] += extra_students
return penalty, course_penalties
# The lectures of each course must be spread into a given minimum number of days.
# Each day below the minimum counts as 1 violation.
def min_wdays_penalties(self, current_solution, course_penalties):
penalty = 0
for course in range(self.instance_data.courses_count):
min_days = self.instance_data.courses_wdays[course]
course_days = 0
for i in range(self.instance_data.days):
course_in_day = False
for j in range(self.instance_data.periods_per_day):
for k in range(self.instance_data.rooms_count):
if course == current_solution[i][j][k]:
course_in_day = True
break
if course_in_day:
break
if course_in_day:
course_days += 1
if course_days < min_days:
cost = (min_days - course_days) * self.penalties.P_DAYS
penalty += cost
course_penalties[course] += cost
return penalty, course_penalties
# Lectures belonging to a curriculum should not have time windows
# (i.e., periods without teaching) between them. For a given curriculum we account for
# a violation every time there is one windows between two lectures within the same day.
# Each time window in a curriculum counts as many violation as its length (in periods).
def windows_penalties(self, current_solution, curriculum_penalties):
penalty = 0
for curriculum in self.instance_data.curricula:
for i in range(self.instance_data.days):
check_for_windows = False
curriculum_windows = 0
for j in range(self.instance_data.periods_per_day):
if check_for_windows:
curriculum_windows += 1
for k in range(self.instance_data.rooms_count):
course = current_solution[i][j][k]
if course != -1:
if check_for_windows and curriculum in self.instance_data.courses_curricula[course]:
penalty += curriculum_windows - 1
curriculum_penalties[curriculum.id] += curriculum_windows - 1
curriculum_windows = 0
break
else:
check_for_windows = True
break
return penalty, curriculum_penalties
# For each curriculum the number of daily lectures should be within a given range.
# Each lecture below the minimum or above the maximum counts as 1 violation.
def minmax_load_penalties(self, current_solution):
penalty = 0
for curriculum in self.instance_data.curricula:
for i in range(self.instance_data.days):
daily_lectures = 0
for j in range(self.instance_data.periods_per_day):
for k in range(self.instance_data.rooms_count):
if current_solution[i][j][k] != -1 and curriculum in self.instance_data.courses_curricula[current_solution[i][j][k]]:
daily_lectures += 1
break
if daily_lectures > self.instance_data.daily_max_lectures:
penalty += daily_lectures - self.instance_data.daily_max_lectures
elif daily_lectures < self.instance_data.daily_min_lectures:
penalty += self.instance_data.daily_min_lectures - daily_lectures
return penalty
# Some courses require that lectures in the same day are grouped together
# (double lectures). For a course that requires grouped lectures, every time there is more than
# one lecture in one day, a lecture non-grouped to another is not allowed. Two lectures are
# grouped if they are adjacent and in the same room. Each non-grouped lecture counts as 1
# violation.
def double_lectures_penalties(self, current_solution):
penalty = 0
for course in range(self.instance_data.courses_count):
if course.double_lectures == 'yes':
for i in range(self.instance_data.days):
course_periods = []
for j in range(self.instance_data.periods_per_day):
course_in_day = False
for k in range(self.instance_data.rooms_count):
if course == current_solution[i][j][k]:
course_in_day = True
break
if course_in_day:
course_periods.append(1)
else:
course_periods.append(0)
for l in range(self.instance_data.periods_per_day):
if course_periods[l] == 1:
if l == 0 and course_periods[l+1] == 0:
penalty += 1
elif l == self.instance_data.periods_per_day - 1 and course_periods[l-1] == 0:
penalty += 1
elif course_periods[l-1] == 0 and course_periods[l+1] == 0:
penalty += 1
return penalty
# Lectures belonging to a curriculum should be adjacent to each other
# (i.e., in consecutive periods). For a given curriculum we account for a
# violation every time there is one lecture not adjacent to any other lecture within the same day.
# Each isolated lecture in a curriculum counts as 1 violation.
def isolated_lectures_penalties(self, current_solution, curriculum_penalties):
penalty = 0
for curriculum in self.instance_data.curricula:
for i in range(self.instance_data.days):
curriculum_periods = []
for j in range(self.instance_data.periods_per_day):
curriculum_in_day = False
for k in range(self.instance_data.rooms_count):
course = current_solution[i][j][k]
if course != -1 and curriculum.id in self.instance_data.courses_curricula[course]:
curriculum_in_day = True
break
if curriculum_in_day:
curriculum_periods.append(1)
else:
curriculum_periods.append(0)
for l in range(self.instance_data.periods_per_day):
if curriculum_periods[l] == 1:
if l == 0 and curriculum_periods[l+1] == 0:
penalty += self.penalties.P_COMP
curriculum_penalties[curriculum.id] += self.penalties.P_COMP
elif l == self.instance_data.periods_per_day - 1 and curriculum_periods[l-1] == 0:
penalty += self.penalties.P_COMP
curriculum_penalties[curriculum.id] += self.penalties.P_COMP
elif curriculum_periods[l-1] == 0 and curriculum_periods[l+1] == 0:
curriculum_penalties[curriculum.id] += self.penalties.P_COMP
penalty += self.penalties.P_COMP
return penalty, curriculum_penalties
# All lectures of a course should be given in the same room. Each distinct
# room used for the lectures of a course, but the first, counts as 1 violation.
def room_stability_penalties(self, current_solution, course_penalties):
penalty = 0
for course in range(self.instance_data.courses_count):
course_room = None
course_rooms = []
for i in range(self.instance_data.days):
for j in range(self.instance_data.periods_per_day):
for k in range(self.instance_data.rooms_count):
if current_solution[i][j][k] == course:
room = self.instance_data.rooms[k].id
if course_room is None:
course_room = room
break
elif course_room != room and room not in course_rooms:
course_rooms.append(room)
penalty += self.penalties.P_STAB
course_penalties[course] += self.penalties.P_STAB
return penalty, course_penalties
|
[
"Sw1086Xk2797"
] |
Sw1086Xk2797
|
9c8e67cc920ea70707ebc82a24b5f442408fbdc6
|
07e6d38e8ad4f0b6c327e720f73f9cdf7567b1d0
|
/src/main/java/com/ipnet/bl/evaluationbl/python/SmartEvaluationDao.py
|
9e0988f65bdf96143cf4bbb31f981bf3d3a8e482
|
[] |
no_license
|
YottaLee/IPNET
|
851bfde982b6ec9059ff468caf830cbb6fd38baa
|
80c9ce618c917dafe6083c34530d81e1408f2cc6
|
refs/heads/master
| 2020-03-22T19:06:03.944672 | 2018-11-17T06:54:52 | 2018-11-17T06:54:52 | 140,504,866 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
from setup import mysql
# 执行sql
def saveEvaluation(patent,value):
with mysql() as cursor:
cursor.execute("select patent_id from patent where patent_name = \""+patent+"\"")
patentId = cursor.fetchall()[0]['patent_id']
cursor.execute("select * from evaluation where patentid = \""+patentId+"\"")
newId = cursor.fetchall()
if len(newId) == 0:
cursor.execute("select max(id) from evaluation")
newId = cursor.fetchall()[0]['max(id)']+1
executeStr = "INSERT INTO evaluation(id,patentid,money,over,evaluation) values ("+str(newId)+",\""+patentId+"\",0,true,"+str(value)+")"
else:
print (newId)
executeStr = "UPDATE evaluation SET evaluation = '"+str(value)+"' WHERE patentid = \""+patentId+"\" "
cursor.execute(executeStr)
|
[
"[email protected]"
] | |
a7d182b1f7306bd17a60d4829a749ec58ebd4878
|
cce056258115ff589a658d5cb3187e4145471e3f
|
/2020 Spring:natural language process/EXP 4:CNN-based NMT/代码/model_embeddings.py
|
5df1c84c17500d1f365c21c18debea7b972341ff
|
[] |
no_license
|
huochf/Course-Experiments
|
cd74c2de92a02bea9565d349fefefd8fa76997b4
|
a91fd21582b3ac5d8fcaf1f12c4f0814cc4675db
|
refs/heads/master
| 2023-06-08T08:43:49.962942 | 2020-09-24T01:53:24 | 2020-09-24T01:53:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,488 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
model_embeddings.py: Embeddings for the NMT model
Pencheng Yin <[email protected]>
Sahil Chopra <[email protected]>
Anand Dhoot <[email protected]>
Michael Hahn <[email protected]>
"""
import torch
import torch.nn as nn
# Do not change these imports; your module names should be
# `CNN` in the file `cnn.py`
# `Highway` in the file `highway.py`
# Uncomment the following two imports once you're ready to run part 1(j)
from cnn import CNN, WordCNN, WordLSTM
from highway import Highway, SelectiveConnect
# End "do not change"
class ModelEmbeddings(nn.Module):
"""
Class that converts input words to their CNN-based embeddings.
"""
def __init__(self, word_embed_size, vocab):
"""
Init the Embedding layer for one language
@param word_embed_size (int): Embedding size (dimensionality) for the output word
@param vocab (VocabEntry): VocabEntry object. See vocab.py for documentation.
Hints: - You may find len(self.vocab.char2id) useful when create the embedding
"""
super(ModelEmbeddings, self).__init__()
### YOUR CODE HERE for part 1h
self.word_embed_size = word_embed_size
self.vocab = vocab
self.e_char = 50
padding_idx = self.vocab.char_pad
self.char_embedding = nn.Embedding(len(self.vocab.char2id), self.e_char, padding_idx=padding_idx)
self.cnn = CNN(embed_size=self.e_char, num_filter=self.word_embed_size)
self.highway = Highway(embedd_size=self.word_embed_size)
self.dropout = nn.Dropout(p=0.3)
### END YOUR CODE
def forward(self, input):
"""
Looks up character-based CNN embeddings for the words in a batch of sentences.
@param input: Tensor of integers of shape (sentence_length, batch_size, max_word_length) where
each integer is an index into the character vocabulary
@param output: Tensor of shape (sentence_length, batch_size, word_embed_size), containing the
CNN-based embeddings for each word of the sentences in the batch
"""
### YOUR CODE HERE for part 1h
X_words_emb = []
for X_padded in input:
X_emb = self.char_embedding(X_padded) # batch_size x max_word_length x char_embed_size
X_reshaped = torch.transpose(X_emb, dim0=1, dim1=2)
X_conv_out = self.cnn(X_reshaped)
X_highway = self.highway(X_conv_out)
X_word_emb = self.dropout(X_highway)
X_words_emb.append(X_word_emb)
X_words_emb = torch.stack(X_words_emb)
return X_words_emb
### END YOUR CODE
class ModelEmbeddings_2(nn.Module):
def __init__(self, word_embed_size, vocab):
super(ModelEmbeddings_2, self).__init__()
self.word_embed_size = word_embed_size
self.vocab = vocab
self.e_char = 50
padding_idx = self.vocab.char_pad
self.char_embedding = nn.Embedding(len(self.vocab.char2id), self.e_char, padding_idx=padding_idx)
self.cnn = CNN(embed_size=self.e_char, num_filter=self.word_embed_size)
self.highway = Highway(embedd_size=self.word_embed_size)
def forward(self, input):
X_words_emb = []
for X_padded in input:
X_emb = self.char_embedding(X_padded) # batch_size x max_word_length x char_embed_size
X_reshaped = torch.transpose(X_emb, dim0=1, dim1=2)
X_conv_out = self.cnn(X_reshaped)
X_highway = self.highway(X_conv_out)
X_words_emb.append(X_highway)
X_words_emb = torch.stack(X_words_emb)
return X_words_emb
class ContexAwareEmbeddings(nn.Module):
def __init__(self, word_embed_size, vocab):
super(ContexAwareEmbeddings, self).__init__()
self.word_embed_size = word_embed_size
self.word_embedding = ModelEmbeddings_2(word_embed_size, vocab)
self.contex_cnn = WordCNN(word_embed_size=word_embed_size, num_filter=word_embed_size)
self.connect = SelectiveConnect(word_embed_size)
self.dropout = nn.Dropout(p=0.3)
def forward(self, input):
X_words_emb = self.word_embedding(input) # sentence_L x batch_size x word_embed_size
X_words_emb_reshaped = X_words_emb.permute(1, 2, 0) # batch_size x word_embed_size x sentence_L
X_contex = self.contex_cnn(X_words_emb_reshaped) # batch_size x word_embed_size x sentence_L
X_contex = X_contex.permute(2, 0, 1) # sentence_L x batch_size x word_embed_size
X_contex_embedding = self.connect(X_contex, X_words_emb)
#X_contex_embedding = self.dropout(X_contex_embedding)
return X_contex_embedding
class ContexAwareEmbeddings_LSTM(nn.Module):
def __init__(self, word_embed_size, vocab):
super(ContexAwareEmbeddings_LSTM, self).__init__()
self.word_embed_size = word_embed_size
self.word_embedding = ModelEmbeddings_2(word_embed_size, vocab)
self.contex_lstm = WordLSTM(word_embed_size=word_embed_size)
self.connect = SelectiveConnect(word_embed_size)
self.dropout = nn.Dropout(p=0.3)
def forward(self, input):
X_words_emb = self.word_embedding(input)
X_contex_lstm = self.contex_lstm(X_words_emb)
X_contex_embedding = self.connect(X_contex_lstm, X_words_emb)
#X_contex_embedding = self.dropout(X_contex_embedding)
return X_contex_embedding
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.