blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a3f0953a5f5150f348eba1aafea1639a6d67dde | 7c14aff73d1266b81944db01c7c4d374f40668a4 | /RNN_MINST_cls.py | 704de130697e4b2b32c21f0d94c27097ceac8eec | [
"Apache-2.0"
]
| permissive | oushu1zhangxiangxuan1/learn-tensorflow | ec19d20cf41dc186b9ac7f7de47d5574f30d6ff9 | e83f8633fcbfd428ee3495b18b75ca78c7a25331 | refs/heads/master | 2020-08-02T10:28:46.143332 | 2019-10-30T08:05:23 | 2019-10-30T08:05:23 | 211,318,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(1)
# 导入数据
minst = input_data.read_data_sets('MNIST_data', one_hot=True)
# hyperparameters
lr = 0.001 # learning rate
training_iters = 100000
batch_size = 128
n_inputs = 28
n_steps = 28 # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 10
x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_classes])
# weights biases init
weights = {
# shape (28,128)
'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
# shape (128,10)
'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
}
biases = {
# shape(128,)
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
# shape(10, )
'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
}
def RNN(X, weights, biases):
# X==>(128 batches * 28 steps, 28 inputs)
X = tf.reshape(X, [-1, n_inputs])
# X_in = W*X + b
X_in = tf.matmul(X, weights['in']) + biases['in']
# X_in ==> (128 batches, 28 steps, 128 hidden) trans back to 3-dimenson
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
n_hidden_units, forget_bias=1.0, state_is_tuple=True)
init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(
lstm_cell, X_in, initial_state=init_state, time_major=False)
# Methods 1
results = tf.matmul(final_state[1], weights['out']) + biases['out']
# Methods 2
# outputs = tf.unstack(tf.transpose(outputs, [1, 0, 2]))
# results = tf.matmul(outputs[-1], weights['out']) + biases['out']
return results
pred = RNN(x, weights, biases)
# def softmax_cross_entropy_with_logits_v2(
# _sentinel=None, # pylint: disable=invalid-name
# labels=None,
# logits=None,
# dim=-1,
# name=None)
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as s:
s.run(init)
step = 0
while step*batch_size < training_iters:
batch_xs, batch_ys = minst.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
s.run([train_op], feed_dict={
x: batch_xs,
y: batch_ys,
})
if step % 20 == 0:
print(s.run(accuracy, feed_dict={
x: batch_xs,
y: batch_ys,
}))
step += 1
| [
"[email protected]"
]
| |
00ee778ca3fd1e9d239eac579284ef16cb9f8a5f | 6dd08ec6b4f6351de8450a3d7e592fd6b4994119 | /cbase/server/cbase-1.8.1/testrunner/pytests/viewquerytests.py | fc69c35e6f2724380fe604ee486dcf7a70afbd76 | []
| no_license | zhgwenming/appstack | d015e96b911fe318f9fba1bdeeea9d888d57dfba | 8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276 | refs/heads/master | 2021-01-23T13:30:19.507537 | 2015-11-09T06:48:35 | 2015-11-09T06:48:35 | 7,576,644 | 1 | 2 | null | 2016-01-05T09:16:22 | 2013-01-12T15:13:21 | C | UTF-8 | Python | false | false | 70,316 | py | import uuid
import logger
import time
import unittest
import json
import sys
import copy
from threading import Thread
from couchbase.document import View
from membase.api.rest_client import RestConnection, RestHelper
from viewtests import ViewBaseTests
from memcached.helper.data_helper import VBucketAwareMemcached, DocumentGenerator, KVStoreAwareSmartClient
from membase.helper.failover_helper import FailoverHelper
from membase.helper.rebalance_helper import RebalanceHelper
from old_tasks import task, taskmanager
from memcached.helper.old_kvstore import ClientKeyValueStore
from TestInput import TestInputSingleton
class ViewQueryTests(unittest.TestCase):
skip_setup_failed = False
@unittest.skipIf(skip_setup_failed, "setup was failed")
def setUp(self):
try:
ViewBaseTests.common_setUp(self)
self.limit = TestInputSingleton.input.param("limit", None)
self.reduce_fn = TestInputSingleton.input.param("reduce_fn", None)
self.error = None
self.task_manager = taskmanager.TaskManager()
self.task_manager.start()
except Exception as ex:
skip_setup_failed = True
self.fail(ex)
def tearDown(self):
ViewBaseTests.common_tearDown(self)
self.task_manager.cancel()
def test_simple_dataset_stale_queries(self):
# init dataset for test
data_set = SimpleDataSet(self._rconn(), self.num_docs, self.limit)
data_set.add_stale_queries()
self._query_test_init(data_set)
def test_simple_dataset_startkey_endkey_queries(self):
data_set = SimpleDataSet(self._rconn(), self.num_docs, limit=self.limit)
data_set.add_startkey_endkey_queries()
self._query_test_init(data_set)
def test_simple_dataset_all_queries(self):
data_set = SimpleDataSet(self._rconn(), self.num_docs, limit=self.limit)
data_set.add_all_query_sets()
self._query_test_init(data_set)
def test_simple_dataset_include_queries(self):
data_set = SimpleDataSet(self._rconn(), self.num_docs, limit=self.limit)
data_set.add_include_docs_queries([data_set.views[0]])
self._query_test_init(data_set, tm = self.task_manager)
def test_simple_dataset_reduce_queries(self):
data_set = SimpleDataSet(self._rconn(), self.num_docs,limit = self.limit,reduce_fn = self.reduce_fn)
data_set.add_reduce_queries()
self._query_test_init(data_set)
def test_simple_dataset_negative_queries(self):
# init dataset for test
query_param = TestInputSingleton.input.param("query_param", None)
value = TestInputSingleton.input.param("value", None)
error = TestInputSingleton.input.param("error", None)
data_set = SimpleDataSet(self._rconn(), self.num_docs)
data_set.add_negative_query(query_param, value, error)
self._query_test_init(data_set)
def test_employee_dataset_startkey_endkey_queries(self):
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day, limit=self.limit)
data_set.add_startkey_endkey_queries()
self._query_test_init(data_set)
def test_employee_dataset_alldocs_queries(self):
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_all_docs_queries()
self._query_test_init(data_set)
def test_employee_dataset_key_quieres(self):
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day, limit=self.limit)
data_set.add_key_queries()
self._query_test_init(data_set)
def test_employee_dataset_negative_queries(self):
# init dataset for test
query_param = TestInputSingleton.input.param("query_param", None)
value = TestInputSingleton.input.param("value", None)
error = TestInputSingleton.input.param("error", None)
docs_per_day = self.input.param('docs-per-day', 20)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_negative_query(query_param, value, error)
self._query_test_init(data_set)
def test_employee_dataset_alldocs_queries_rebalance_in(self):
docs_per_day = self.input.param('docs-per-day', 200)
num_nodes_to_add = self.input.param('num_nodes_to_add',0)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_all_docs_queries()
self._query_test_init(data_set, False)
# rebalance_in and verify loaded data
ViewBaseTests._begin_rebalance_in(self, howmany=num_nodes_to_add + 1)
self._query_all_views(data_set.views)
ViewBaseTests._end_rebalance(self)
#verify queries after rebalance
self._query_test_init(data_set)
def test_employee_dataset_alldocs_failover_queries(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_all_docs_queries()
self._query_test_init(data_set, False)
master = self.servers[0]
RebalanceHelper.wait_for_persistence(master, "default")
# failover and verify loaded data
failover_helper = FailoverHelper(self.servers, self)
failover_nodes = failover_helper.failover(self.failover_factor)
self.log.info("10 seconds sleep after failover before invoking rebalance...")
time.sleep(10)
rest=RestConnection(self.servers[0])
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in failover_nodes])
self._query_all_views(data_set.views)
msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
self.assertTrue(rest.monitorRebalance(), msg=msg)
#verify queries after failover
self._query_all_views(data_set.views)
def test_employee_dataset_alldocs_incremental_failover_queries(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_all_docs_queries()
self._query_test_init(data_set, False)
servers=self.servers;
# incrementaly failover nodes and verify loaded data
for i in range(self.failover_factor):
failover_helper = FailoverHelper(servers, self)
failover_nodes = failover_helper.failover(1)
self.log.info("10 seconds sleep after failover before invoking rebalance...")
time.sleep(10)
rest=RestConnection(self.servers[0])
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in failover_nodes])
self._query_all_views(data_set.views)
temp=[]
for server in servers:
rest = RestConnection(server)
if not RestHelper(rest).is_ns_server_running(timeout_in_seconds=1):
continue
temp.append(server)
servers=temp
msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
self.assertTrue(RestConnection(self.servers[0]).monitorRebalance(), msg=msg)
def test_employee_dataset_alldocs_queries_start_stop_rebalance_in_incremental(self):
docs_per_day = self.input.param('docs-per-day', 20)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_all_docs_queries()
self._query_test_init(data_set, False)
master = self.servers[0]
RebalanceHelper.wait_for_persistence(master, "default")
rest=RestConnection(self.servers[0])
nodes = rest.node_statuses()
for server in self.servers[1:]:
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
self.log.info("adding node {0}:{1} to cluster".format(server.ip, server.port))
otpNode = rest.add_node(master.rest_username, master.rest_password, server.ip, server.port)
msg = "unable to add node {0}:{1} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip, server.port))
# Just doing 2 iterations
for i in [1, 2]:
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
expected_progress = 30*i
reached = RestHelper(rest).rebalance_reached(expected_progress)
self.assertTrue(reached, "rebalance failed or did not reach {0}%".format(expected_progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self._query_all_views(data_set.views)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(), msg="rebalance operation failed restarting")
self._query_all_views(data_set.views)
self.assertTrue(len(rest.node_statuses()) -len(nodes)==1, msg="number of cluster's nodes is not correct")
nodes = rest.node_statuses()
def test_employee_dataset_alldocs_queries_start_stop_rebalance_out_incremental(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
docs_per_day = self.input.param('docs-per-day', 20)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_all_docs_queries()
self._query_test_init(data_set, False)
master = self.servers[0]
RebalanceHelper.wait_for_persistence(master, "default")
rest=RestConnection(self.servers[0])
nodes = rest.node_statuses()
for server in self.servers[1:]:
ejectedNodes=[]
self.log.info("removing node {0}:{1} from cluster".format(server.ip, server.port))
for node in nodes:
if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format(server.ip, server.port):
ejectedNodes.append(node.id)
break
# Just doing 2 iterations
for i in [1, 2]:
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=ejectedNodes)
expected_progress = 30*i
reached = RestHelper(rest).rebalance_reached(expected_progress)
self.assertTrue(reached, "rebalance failed or did not reach {0}%".format(expected_progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self._query_all_views(data_set.views)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=ejectedNodes)
self.assertTrue(rest.monitorRebalance(), msg="rebalance operation failed restarting")
self._query_all_views(data_set.views)
self.assertTrue(len(nodes) - len(rest.node_statuses()) == 1, msg="number of cluster's nodes is not correct")
nodes = rest.node_statuses()
def test_employee_dataset_startkey_endkey_docid_queries(self):
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day, limit=self.limit)
data_set.add_startkey_endkey_docid_queries()
self._query_test_init(data_set)
def test_employee_dataset_group_queries(self):
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day, limit=self.limit)
data_set.add_group_count_queries()
self._query_test_init(data_set)
def test_employee_dataset_startkey_endkey_queries_rebalance_in(self):
docs_per_day = self.input.param('docs-per-day', 200)
num_nodes_to_add = self.input.param('num_nodes_to_add',0)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_startkey_endkey_queries()
self._query_test_init(data_set, False)
# rebalance_in and verify loaded data
ViewBaseTests._begin_rebalance_in(self, howmany=num_nodes_to_add + 1)
self._query_all_views(data_set.views)
ViewBaseTests._end_rebalance(self)
def test_employee_dataset_startkey_endkey_queries_rebalance_out(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
docs_per_day = self.input.param('docs-per-day', 200)
num_nodes_to_add = self.input.param('num_nodes_to_add',0)
data_set = EmployeeDataSet(self._rconn(), docs_per_day)
data_set.add_startkey_endkey_queries()
self._query_test_init(data_set, False)
# rebalance_out and verify loaded data
ViewBaseTests._begin_rebalance_out(self, howmany=num_nodes_to_add + 1)
self._query_all_views(data_set.views)
ViewBaseTests._end_rebalance(self)
def test_employee_dataset_stale_queries(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day, limit=self.limit)
data_set.add_stale_queries()
self._query_test_init(data_set)
def test_employee_dataset_all_queries(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
docs_per_day = self.input.param('docs-per-day', 200)
data_set = EmployeeDataSet(self._rconn(), docs_per_day, limit=self.limit)
data_set.add_all_query_sets()
self._query_test_init(data_set)
def test_all_datasets_all_queries(self):
ViewBaseTests._begin_rebalance_in(self)
ViewBaseTests._end_rebalance(self)
ds1 = EmployeeDataSet(self._rconn())
ds2 = SimpleDataSet(self._rconn(), self.num_docs)
data_sets = [ds1, ds2]
# load and query all views and datasets
test_threads = []
for ds in data_sets:
ds.add_all_query_sets()
t = Thread(target=self._query_test_init,
name=ds.name,
args=(ds, False))
test_threads.append(t)
t.start()
[t.join() for t in test_threads]
ViewBaseTests._begin_rebalance_out(self)
ViewBaseTests._end_rebalance(self)
# verify
[self._query_all_views(ds.views) for ds in data_sets]
###
# load the data defined for this dataset.
# create views and query the data as it loads.
# verification is optional, and best practice is to
# set to False if you plan on running _query_all_views()
# later in the test case
###
def _query_test_init(self, data_set, verify_results = True, tm = None):
views = data_set.views
rest = self._rconn()
load_task = None
if tm is None:
# start loading data using old method
load_task = Thread(target=data_set.load,
name="load_data_set",
args=(self, views[0]))
load_task.start()
else:
load_task = data_set.load_with_tm(tm, rest)
time.sleep(2)
# run queries while loading data
while(load_task.is_alive()):
self._query_all_views(views, False, limit=data_set.limit)
time.sleep(5)
if 'result' in dir(load_task):
load_task.result()
else:
load_task.join()
# results will be verified if verify_results set
if verify_results:
self._query_all_views(views, verify_results, data_set.kv_store, limit = data_set.limit)
else:
self._check_view_intergrity(views)
##
# run all queries for all views in parallel
##
def _query_all_views(self, views, verify_results = True, kv_store = None, limit=None):
query_threads = []
for view in views:
t = Thread(target=view.run_queries,
name="query-{0}".format(view.name),
args=(self, verify_results, kv_store, limit))
query_threads.append(t)
t.start()
[t.join() for t in query_threads]
self._check_view_intergrity(views)
##
# if an error occured loading or querying data for a view
# it is queued and checked here
##
def _check_view_intergrity(self, views):
for view in views:
self.assertEquals(view.results.failures, [],
[ex[1] for ex in view.results.failures])
self.assertEquals(view.results.errors, [],
[ex[1] for ex in view.results.errors])
# retrieve default rest connection associated with the master server
def _rconn(self):
return RestConnection(self.servers[0])
class QueryView:
def __init__(self, rest,
index_size,
bucket = "default",
prefix=None,
name = None,
fn_str = None,
reduce_fn = None,
queries = None,
create_on_init = True,
type_filter = None):
self.index_size = index_size
self.log = logger.Logger.get_logger()
default_prefix = str(uuid.uuid4())[:7]
default_fn_str = 'function (doc) {if(doc.name) { emit(doc.name, doc);}}'
self.bucket = bucket
self.prefix = (prefix, default_prefix)[prefix is None]
default_name = "dev_test_view-{0}".format(self.prefix)
self.name = (name, default_name)[name is None]
self.fn_str = (fn_str, default_fn_str)[fn_str is None]
self.reduce_fn = reduce_fn
self.results = unittest.TestResult()
self.type_filter = type_filter or None
# queries defined for this view
self.queries = (queries, list())[queries is None]
if create_on_init:
rest.create_view(self.name, self.bucket, [View(self.name, self.fn_str, self.reduce_fn)])
# query this view
def run_queries(self, tc, verify_results = False, kv_store = None, limit=None):
rest = tc._rconn()
if not len(self.queries) > 0 :
self.log.info("No queries to run for this view")
return
view_name = self.name
max_dupe_result_count = tc.input.param('max-dupe-result-count', 5)
num_verified_docs = tc.input.param('num-verified-docs', 20)
for query in self.queries:
params = query.params
params["debug"] = "true"
if self.reduce_fn is not None and "include_docs" in params:
del params["include_docs"]
expected_num_docs = query.expected_num_docs
num_keys = -1
if expected_num_docs is not None and verify_results:
attempt = 0
delay = 15
results = None
# first verify all doc_names get reported in the view
# for windows, we need more than 20+ times
result_count_stats = {}
while attempt < 15 and num_keys != expected_num_docs:
if attempt > 11:
params["stale"] = 'false'
self.log.info("Quering view {0} with params: {1}".format(view_name, params))
results = ViewBaseTests._get_view_results(tc, rest, self.bucket, view_name,
limit=limit, extra_params=params,
type_ = query.type_)
# check if this is a reduced query using _count
if self.reduce_fn and (not query.params.has_key("reduce") or query.params.has_key("reduce") and query.params["reduce"] == "true"):
if self.reduce_fn == "_count":
num_keys = self._verify_count_reduce_helper(query, results)
keys = ["group", "group_level", "key", "start_key", "end_key"]
if [key for key in keys if key in params]:
self.log.info("{0}: attempt {1} reduced {2} group(s) to value {3} expected: {4}" \
.format(view_name, attempt + 1, query.expected_num_groups,
num_keys, expected_num_docs))
else:
self.log.info("{0}: attempt {1} reduced {2} group(s) to value {3} expected: {4}" \
.format(view_name, attempt + 1, query.expected_num_groups,
num_keys, self.index_size))
if self.index_size != num_keys or expected_num_docs != num_keys:
attempt += 1
continue
else:
break
else:
num_keys = len(ViewBaseTests._get_keys(self, results))
self.log.info("{0}: attempt {1} retrieved value {2} expected: {3}" \
.format(view_name, attempt + 1, num_keys, expected_num_docs))
attempt += 1
if num_keys not in result_count_stats:
result_count_stats[num_keys] = 1
else:
if result_count_stats[num_keys] == max_dupe_result_count:
break
else:
result_count_stats[num_keys] += 1
time.sleep(delay)
try:
if(num_keys != expected_num_docs):
# debug query results
if self.reduce_fn is not None:
# query again with reduce false
params["reduce"] = "false"
# remove any grouping
if "group" in params:
del params["group"]
if "group_level" in params:
del params["group_level"]
if "key" in params and "limit" in params:
expected_num_docs = min(expected_num_docs, limit)
results = ViewBaseTests._get_view_results(tc, rest,
self.bucket,
view_name,
limit=limit,
extra_params=params,
type_ = query.type_)
# verify keys
key_failures = QueryHelper.verify_query_keys(rest, query,
results, self.bucket,
num_verified_docs, limit=limit)
msg = "unable to retrieve expected results: {0}".format(key_failures)
tc.assertEquals(len(key_failures), 0, msg)
# verify values for include_docs tests
if('include_docs' in params):
failures = QueryHelper.verify_query_values(rest, query, results, self.bucket)
msg = "data integrity failed: {0}".format(failures)
tc.assertEquals(len(failures), 0, msg)
except:
self.log.error("Query failed: see test result logs for details")
self.results.addFailure(tc, sys.exc_info())
else:
# query without verification
self.log.info("Quering view {0} with params: {1}".format(view_name, params));
try:
results = ViewBaseTests._get_view_results(tc, rest, self.bucket, view_name,
limit=limit, extra_params=params,
type_ = query.type_,
invalid_results=query.error and True or False)
except Exception as ex:
if query.error:
if ex.message.find(query.error) > -1:
self.log.info("View results contain '{0}' error as expected".format(query.error))
return
else:
self.log.error("View results expect '{0}' error but {1} raised".format(query.error, ex.message))
self.results.addFailure(tc,(type(ex), ex.message, sys.exc_info()[2]))
return
if query.error:
self.log.error("No error raised for negative case. Expected error '{0}'".format(query.error))
self.results.addFailure(tc, (Exception, "No error raised for negative case", sys.exc_info()[2]))
"""
helper function for verifying results when _count reduce is used.
by default 1 group is expected but when more specified
a summation is derived
result is compared with expected_num_docs of the query
TODO: _sum,_stats? :)
"""
def _verify_count_reduce_helper(self, query, results):
num_keys = 0
for i in xrange(query.expected_num_groups):
if i <= len(results["rows"]):
num_keys += results["rows"][i]["value"]
return num_keys
class EmployeeDataSet:
def __init__(self, rest, docs_per_day = 200, bucket = "default", limit=None):
self.docs_per_day = docs_per_day
self.years = 1
self.months = 12
self.days = 28
self.sys_admin_info = {"title" : "System Administrator and heliport manager",
"desc" : "...Last but not least, as the heliport manager, you will help maintain our growing fleet of remote controlled helicopters, that crash often due to inexperienced pilots. As an independent thinker, you may be free to replace some of the technologies we currently use with ones you feel are better. If so, you should be prepared to discuss and debate the pros and cons of suggested technologies with other stakeholders",
"type" : "admin"}
self.ui_eng_info = {"title" : "UI Engineer",
"desc" : "Couchbase server UI is one of the crown jewels of our product, which makes the Couchbase NoSQL database easy to use and operate, reports statistics on real time across large clusters, and much more. As a Member of Couchbase Technical Staff, you will design and implement front-end software for cutting-edge distributed, scale-out data infrastructure software systems, which is a pillar for the growing cloud infrastructure.",
"type" : "ui"}
self.senior_arch_info = {"title" : "Senior Architect",
"desc" : "As a Member of Technical Staff, Senior Architect, you will design and implement cutting-edge distributed, scale-out data infrastructure software systems, which is a pillar for the growing cloud infrastructure. More specifically, you will bring Unix systems and server tech kung-fu to the team.",
"type" : "arch"}
self.views = self.create_views(rest)
self.bucket = bucket
self.rest = rest
self.name = "employee_dataset"
self.kv_store = None
self.doc_id_map = {}
self.limit = limit
def calc_total_doc_count(self):
return self.years * self.months * self.days * self.docs_per_day * len(self.get_data_sets())
def add_negative_query(self, query_param, value, error, views=None):
views = views or self.views
for view in views:
view.queries += [QueryHelper({query_param : value}, None, error=error)]
def add_startkey_endkey_queries(self, views=None, limit=None):
if views is None:
views = self.views
if limit is None:
limit = self.limit
for view in views:
index_size = view.index_size
offset = self.docs_per_day
# offset includes all data types if
# indexing entire data_set
if index_size == self.calc_total_doc_count():
offset = offset * len(self.get_data_sets())
expected_num_docs = index_size/2
expected_num_docs_offset = index_size/2 + offset
if limit and not view.reduce_fn:
expected_num_docs = min(expected_num_docs, limit)
expected_num_docs_offset = min(expected_num_docs_offset, limit)
view.queries += [QueryHelper({"start_key" : "[2008,7,null]"},
expected_num_docs),
QueryHelper({"start_key" : "[2008,0,1]",
"end_key" : "[2008,7,1]",
"inclusive_end" : "false"},
expected_num_docs),
QueryHelper({"start_key" : "[2008,0,1]",
"end_key" : "[2008,7,1]",
"inclusive_end" : "true"},
expected_num_docs_offset),
QueryHelper({"start_key" : "[2008,7,1]",
"end_key" : "[2008,1,1]",
"descending" : "true",
"inclusive_end" : "false"},
expected_num_docs),
QueryHelper({"start_key" : "[2008,7,1]",
"end_key" : "[2008,1,1]",
"descending" : "true",
"inclusive_end" : "true"},
expected_num_docs_offset),
QueryHelper({"start_key" : "[2008,1,1]",
"end_key" : "[2008,7,1]",
"descending" : "false",
"inclusive_end" : "false"},
expected_num_docs),
QueryHelper({"start_key" : "[2008,1,1]",
"end_key" : "[2008,7,1]",
"descending" : "false",
"inclusive_end" : "true"},
expected_num_docs_offset)]
if limit:
for query in view.queries:
query.params["limit"] = limit
def add_key_queries(self, views=None, limit=None):
if views is None:
views = self.views
if limit is None:
limit = self.limit
for view in views:
# offset includes all data types if
# indexing entire data_set
if view.index_size == self.calc_total_doc_count():
expected_num_docs = self.docs_per_day * len(self.get_data_sets())
else:
expected_num_docs = self.docs_per_day
if limit:
if not view.reduce_fn:
expected_num_docs = min(expected_num_docs, limit)
view.queries += [QueryHelper({"key" : "[2008,7,1]", "limit" : limit},
expected_num_docs)]
else:
view.queries += [QueryHelper({"key" : "[2008,7,1]"},
expected_num_docs)]
def add_all_docs_queries(self, views = None):
if views is None:
views = []
# only select views that will index entire dataset
# and do not have a reduce function
# if user doesn't override
for view in self.views:
if view.index_size == self.calc_total_doc_count():
if view.reduce_fn is None:
views.append(view)
for view in views:
index_size = view.index_size
section_size = index_size/len(self.get_data_sets())
view.queries += [QueryHelper({"start_key": '"arch0000-2008_10_01"'},
index_size - section_size - self.days*9),
QueryHelper({"start_key" : '"ui0000-2008_10_01"'},
index_size - section_size*2 - self.days*9),
QueryHelper({"start_key" : '"arch0000-2008_10_01"',
"end_key" : '"ui0000-2008_10_01"',
"inclusive_end" : "false"},
index_size - section_size*2)]
# test design docs are included when start_key not specified
# TODO: cannot verify this query unless we store view names in
# doc_id_map
#QueryHelper({"end_key" : '"ui0000-2008_10_01"',
# "inclusive_end" : "false"},
# index_size - section_size + 9*self.days + len(self.views))]
# set all_docs flag
for query in view.queries:
query.type_ = "all_docs"
"""
Create queries for testing docids on duplicate start_key result ids.
Only pass in view that indexes all employee types as they are
expected in the query params...i.e (ui,admin,arch)
"""
def add_startkey_endkey_docid_queries(self, views=None, limit=None):
if limit is None:
limit = self.limit
if views is None:
views = []
# only select views that will index entire dataset
# if user doesn't override
for view in self.views:
if view.index_size == self.calc_total_doc_count():
views.append(view)
for view in views:
index_size = view.index_size
offset = self.docs_per_day
# pre-calculating expected key size of query between
# [2008,2,20] and [2008,7,1] with descending set
# based on dataset
all_docs_per_day = len(self.get_data_sets()) * offset
complex_query_key_count = 9*all_docs_per_day + 4*self.days*all_docs_per_day \
+ all_docs_per_day
if limit:
view.queries += [QueryHelper(
{"start_key" : "[2008,7,1]",
"startkey_docid" : "arch0000-2008_07_01",
"limit" : limit}, view.reduce_fn and index_size/2 - offset or min(limit, index_size/2 - offset)),
QueryHelper(
{"start_key" : "[2008,7,1]",
"startkey_docid" : "arch0000-2008_07_01",
"descending" : "false",
"limit" : limit}, view.reduce_fn and index_size/2 - offset or min(limit, index_size/2 - offset)),
QueryHelper(
{"start_key" : "[2008,7,1]",
"startkey_docid" : "arch0000-2008_07_01",
"descending" : "true",
"limit" : limit}, view.reduce_fn and index_size/2 + offset + 1 or min(limit, index_size/2 + offset + 1)),
QueryHelper({"start_key" : "[2008,7,1]",
"startkey_docid" : "ui0000-2008_07_01",
"limit" : limit}, view.reduce_fn and index_size/2 - offset*2 or min(limit, index_size/2 - offset*2)),
QueryHelper({"start_key" : "[2008,7,1]",
"startkey_docid" : "ui0000-2008_07_01",
"descending" : "false",
"limit" : limit}, view.reduce_fn and index_size/2 - offset*2 or min(limit, index_size/2 - offset*2)),
QueryHelper({"start_key" : "[2008,7,1]",
"startkey_docid" : "ui0000-2008_07_01",
"descending" : "true",
"limit" : limit}, view.reduce_fn and index_size/2 + offset*2 +1 or min(limit, index_size/2 + offset*2 + 1)),
# +endkey_docid
QueryHelper({"start_key" : "[2008,0,1]",
"end_key" : "[2008,7,1]",
"endkey_docid" : "arch0000-2008_07_01",
"inclusive_end" : "false",
"limit" : limit}, view.reduce_fn and index_size/2 + offset or min(limit, index_size/2 + offset)),
QueryHelper({"end_key" : "[2008,7,1]",
"endkey_docid" : "ui0000-2008_07_01",
"inclusive_end" : "false",
"limit" : limit}, view.reduce_fn and index_size/2 + offset*2 or min(limit, index_size/2 + offset*2)),
# + inclusive_end
QueryHelper({"end_key" : "[2008,7,1]",
"endkey_docid" : "arch0000-2008_07_01",
"inclusive_end" : "true",
"limit" : limit}, view.reduce_fn and index_size/2 + offset + 1 or min(limit, index_size/2 + offset + 1)),
# + single bounded and descending
QueryHelper({"start_key" : "[2008,7,1]",
"end_key" : "[2008,2,20]",
"endkey_docid" : "ui0000-2008_02_20",
"descending" : "true",
"inclusive_end" : "true",
"limit" : limit}, view.reduce_fn and complex_query_key_count - offset * 2 or min(limit, complex_query_key_count - offset * 2)),
QueryHelper({"start_key" : "[2008,7,1]",
"end_key" : "[2008,2,20]",
"endkey_docid" : "arch0000-2008_02_20",
"descending" : "true",
"inclusive_end" : "false",
"limit" : limit}, view.reduce_fn and complex_query_key_count - offset - 1 or min(limit, complex_query_key_count - offset - 1)),
# + double bounded and descending
QueryHelper({"start_key" : "[2008,7,1]",
"startkey_docid" : "admin0000-2008_07_01",
"end_key" : "[2008,2,20]",
"endkey_docid" : "arch0000-2008_02_20",
"descending" : "true",
"inclusive_end" : "false",
"limit" : limit},
view.reduce_fn and complex_query_key_count - offset - all_docs_per_day or min(limit, complex_query_key_count - offset - all_docs_per_day))]
else:
view.queries += [QueryHelper(
{"start_key" : "[2008,7,1]",
"startkey_docid" : "arch0000-2008_07_01"}, index_size/2 - offset),
QueryHelper({"start_key" : "[2008,7,1]",
"startkey_docid" : "ui0000-2008_07_01"}, index_size/2 - offset*2),
# +endkey_docid
QueryHelper({"start_key" : "[2008,0,1]",
"end_key" : "[2008,7,1]",
"endkey_docid" : "arch0000-2008_07_01",
"inclusive_end" : "false"}, index_size/2 + offset),
QueryHelper({"end_key" : "[2008,7,1]",
"endkey_docid" : "ui0000-2008_07_01",
"inclusive_end" : "false"}, index_size/2 + offset*2),
# + inclusive_end
QueryHelper({"end_key" : "[2008,7,1]",
"endkey_docid" : "arch0000-2008_07_01",
"inclusive_end" : "true"}, index_size/2 + offset + 1),
# + single bounded and descending
QueryHelper({"start_key" : "[2008,7,1]",
"end_key" : "[2008,2,20]",
"endkey_docid" : "ui0000-2008_02_20",
"descending" : "true",
"inclusive_end" : "true"}, complex_query_key_count - offset * 2),
QueryHelper({"start_key" : "[2008,7,1]",
"end_key" : "[2008,2,20]",
"endkey_docid" : "arch0000-2008_02_20",
"descending" : "true",
"inclusive_end" : "false"}, complex_query_key_count - offset - 1),
# + double bounded and descending
QueryHelper({"start_key" : "[2008,7,1]",
"startkey_docid" : "admin0000-2008_07_01",
"end_key" : "[2008,2,20]",
"endkey_docid" : "arch0000-2008_02_20",
"descending" : "true",
"inclusive_end" : "false"},
complex_query_key_count - offset - all_docs_per_day)]
def add_stale_queries(self, views=None, limit=None):
if views is None:
views = self.views
if limit is None:
limit = self.limit
for view in views:
if limit:
view.queries += [QueryHelper({"stale" : "false", "limit" : limit}, min(limit, view.index_size)),
QueryHelper({"stale" : "ok", "limit" : limit}, min(limit, view.index_size)),
QueryHelper({"stale" : "update_after", "limit" : limit}, min(limit, view.index_size))]
else:
view.queries += [QueryHelper({"stale" : "false"}, view.index_size),
QueryHelper({"stale" : "ok"}, view.index_size),
QueryHelper({"stale" : "update_after"}, view.index_size)]
"""
group queries should only be added to views with reduce views.
in this particular case, _count function is expected.
if no specific views are passed in, we'll just figure it out.
verification requries that the expected number of groups generated
by the query is provided. each group will generate a value that
when summed, should add up to the number of indexed docs
"""
def add_group_count_queries(self, views=None, limit=None):
if views is None:
views = [self.views[-1]]
if limit is None:
limit = self.limit
for view in views:
if limit:
view.queries += [QueryHelper({"group" : "true", "limit" : limit},
min(limit,view.index_size),
min(limit, self.years * self.months * self.days)),
QueryHelper({"group_level" : "1", "limit" : limit},
min(limit,view.index_size),
min(limit, self.years)),
QueryHelper({"group_level" : "2", "limit" : limit},
min(limit,view.index_size),
min(limit, self.years * self.months)),
QueryHelper({"group_level" : "3", "limit" : limit},
min(limit,view.index_size),
min(limit, self.years * self.months * self.days))]
else:
view.queries += [QueryHelper({"group" : "true"}, view.index_size,
self.years * self.months * self.days),
QueryHelper({"group_level" : "1"}, view.index_size,
self.years),
QueryHelper({"group_level" : "2"}, view.index_size,
self.years * self.months),
QueryHelper({"group_level" : "3"}, view.index_size,
self.years * self.months * self.days)]
def add_all_query_sets(self, views=None, limit=None):
self.add_stale_queries(views, limit)
self.add_startkey_endkey_queries(views, limit)
self.add_startkey_endkey_docid_queries(views, limit)
self.add_group_count_queries(views)
# views for this dataset
def create_views(self, rest):
vfn1 = 'function (doc) { if(doc.job_title !== undefined) { var myregexp = new RegExp("^UI "); if(doc.job_title.match(myregexp)){ emit([doc.join_yr, doc.join_mo, doc.join_day], [doc.name, doc.email] );}}}'
vfn2 = 'function (doc) { if(doc.job_title !== undefined) { var myregexp = new RegExp("^System "); if(doc.job_title.match(myregexp)){ emit([doc.join_yr, doc.join_mo, doc.join_day], [doc.name, doc.email] );}}}'
vfn3 = 'function (doc) { if(doc.job_title !== undefined) { var myregexp = new RegExp("^Senior "); if(doc.job_title.match(myregexp)){ emit([doc.join_yr, doc.join_mo, doc.join_day], [doc.name, doc.email] );}}}'
vfn4 = 'function (doc) { if(doc.job_title !== undefined) emit([doc.join_yr, doc.join_mo, doc.join_day], [doc.name, doc.email] ); }'
full_index_size = self.calc_total_doc_count()
partial_index_size = full_index_size/3
return [QueryView(rest, full_index_size, fn_str = vfn4),
QueryView(rest, partial_index_size, fn_str = vfn1, type_filter = "ui"),
QueryView(rest, partial_index_size, fn_str = vfn2, type_filter = "admin"),
QueryView(rest, partial_index_size, fn_str = vfn3, type_filter = "arch"),
QueryView(rest, full_index_size, fn_str = vfn4, reduce_fn="_count")]
def get_data_sets(self):
return [self.sys_admin_info, self.ui_eng_info, self.senior_arch_info]
def load(self, tc, view, verify_docs_loaded = True):
data_threads = []
for info in self.get_data_sets():
self.doc_id_map.update({info['type'] : {"years" : self._doc_map_array()}})
t = Thread(target=self._iterative_load,
name="iterative_load",
args=(info, tc, view, self.docs_per_day, verify_docs_loaded))
data_threads.append(t)
t.start()
for t in data_threads:
t.join()
self.preload_matching_query_keys()
# create new array with a None item at index 0 for
# doc_map_id which is used for '1' based lookups
def _doc_map_array(self):
array_ = []
array_.append(None)
return array_
def _iterative_load(self, info, tc, view, loads_per_iteration, verify_docs_loaded):
try:
smart = VBucketAwareMemcached(self.rest, self.bucket)
for i in range(1,self.years + 1):
self.doc_id_map[info['type']]['years'].append(i)
self.doc_id_map[info['type']]['years'][i] =\
{ "months" : self._doc_map_array()}
for j in range(1, self.months + 1):
self.doc_id_map[info['type']]['years'][i]['months'].append(j)
self.doc_id_map[info['type']]['years'][i]['months'][j] =\
{"days" : self._doc_map_array()}
doc_sets = []
for k in range(1, self.days + 1):
self.doc_id_map[info['type']]['years'][i]['months'][j]['days'].append(k)
self.doc_id_map[info['type']]['years'][i]['months'][j]['days'][k]=\
{"docs" : []}
kv_template = {"name": "employee-${prefix}-${seed}",
"join_yr" : 2007+i, "join_mo" : j, "join_day" : k,
"email": "${prefix}@couchbase.com",
"job_title" : info["title"].encode("utf-8","ignore"),
"type" : info["type"].encode("utf-8","ignore"),
"desc" : info["desc"].encode("utf-8", "ignore")}
options = {"size": 256, "seed": str(uuid.uuid4())[:7]}
docs = DocumentGenerator.make_docs(loads_per_iteration, kv_template, options)
doc_sets.append(docs)
# load docs
self._load_chunk(smart, doc_sets)
except Exception as ex:
view.results.addError(tc, sys.exc_info())
raise ex
def _load_chunk(self, smart, doc_sets):
doc_ids = []
for docs in doc_sets:
idx = 0
for value in docs:
value = value.encode("utf-8", "ignore")
json_map = json.loads(value, encoding="utf-8")
type_ = json_map["type"]
year = json_map["join_yr"]
month = json_map["join_mo"]
day = json_map["join_day"]
doc_id = "{0}{1}-{2}_{3}_{4}".format(type_,
str(idx).zfill(4),
year,
str(month).rjust(2,'0'),
str(day).rjust(2,'0'))
del json_map["_id"]
smart.memcached(doc_id).set(doc_id, 0, 0, json.dumps(json_map))
doc_ids.append(doc_id)
# update doc_id map
self.doc_id_map[type_]['years'][year-2007]\
['months'][month]['days'][day]['docs'].append(doc_id)
idx += 1
return doc_ids
def preload_matching_query_keys(self):
# get all queries defined in this data_set
for v in self.views:
for q in v.queries:
self._preload_matching_query_keys(q, v.type_filter)
def _preload_matching_query_keys(self, query, type_filter = None):
inclusive_end = True
descending = False
q_start_yr = 1
q_start_mo = 1
q_start_day = 1
q_end_yr = self.years
q_end_mo = self.months
q_end_day = self.days
q_params = copy.deepcopy(query.params)
if query.type_ == "all_docs":
if 'start_key' in q_params:
q_params['startkey_docid'] = q_params['start_key']
del q_params['start_key']
if 'end_key' in q_params:
q_params['endkey_docid'] = q_params['end_key']
del q_params['end_key']
if 'start_key' in q_params:
params = json.loads(q_params['start_key'])
if params[0] and not None:
q_start_yr = params[0] - 2007
if params[1] and not None:
q_start_mo = params[1]
if params[2] and not None:
q_start_day = params[2]
if 'end_key' in q_params:
params = json.loads(q_params['end_key'])
if params[0] and not None:
q_end_yr = params[0] - 2007
if params[1] and not None:
q_end_mo = params[1]
if params[2] and not None:
q_end_day = params[2]
if 'descending' in q_params:
descending = json.loads(q_params['descending'])
if descending == True:
q_start_yr, q_end_yr = q_end_yr, q_start_yr
q_start_mo, q_end_mo = q_end_mo, q_start_mo
q_start_day, q_end_day = q_end_day, q_start_day
# note: inclusive end check must occur after descending
if 'inclusive_end' in q_params:
inclusive_end = json.loads(q_params['inclusive_end'])
if inclusive_end == False and 'endkey_docid' not in q_params:
if descending == False:
# decrement end_key
if q_end_day <= 1:
q_end_mo -= 1
q_end_day = 28
else:
q_end_day -= 1
else:
# increment start_key
if q_start_day == 28:
q_start_mo += 1
q_start_day = 1
else:
q_start_day += 1
if type_filter is None:
types = [type_ for type_ in self.doc_id_map]
else:
types = [type_filter]
type_idx = 0
for doc_type in types:
for years in self.doc_id_map[doc_type]['years'][q_start_yr:q_end_yr + 1]:
mo_idx = 1
days_skipped_offset = 0
for months in years['months'][q_start_mo:q_end_mo + 1]:
# at end month only include up to N days
if (mo_idx + q_start_mo - 1) == q_end_mo:
mo_days = months['days'][1:q_end_day + 1]
else:
if mo_idx == 1:
# at beginning of month skip first N docs
mo_days = months['days'][q_start_day:]
if q_start_day > 1:
days_skipped_offset = (q_start_day)*self.docs_per_day*(type_idx + 1) \
- 2*self.docs_per_day
else:
mo_days = months['days'][1:]
day_idx = 0
for days in mo_days:
# insert expected keys for query
doc_idx = 0
for id in days['docs']:
# order insertion according to view collation algorithm
# so that we can do easy comparison and docid matches later
# TODO: require pyicu package in testrunner and sort
if type_idx > 0:
day_offset = (type_idx)*self.docs_per_day
month_offset = 0
if day_idx > 0:
day_offset = day_offset*(day_idx + 1) + self.docs_per_day*(day_idx)
day_offset += doc_idx
if mo_idx > 1:
month_offset = 2*(mo_idx - 1)*self.docs_per_day*self.days\
+ self.docs_per_day*self.days*(type_idx - 1)*(mo_idx - 1)\
- days_skipped_offset
ins_pos = day_offset + month_offset
# add record to expected output
query.expected_keys.insert(ins_pos, id)
else:
query.expected_keys.append(id)
doc_idx += 1
day_idx += 1
mo_idx += 1
type_idx += 1
if query.type_ == "all_docs":
# use ascii sort
query.expected_keys.sort()
if descending == True:
query.expected_keys.reverse()
if 'startkey_docid' in q_params:
startkey_docid = q_params['startkey_docid'].strip("\"")
try:
start_idx = query.expected_keys.index(startkey_docid)
query.expected_keys = query.expected_keys[start_idx:]
except ValueError:
pass
if 'endkey_docid' in q_params:
endkey_docid = q_params['endkey_docid'].strip("\"")
try:
end_idx = query.expected_keys.index(endkey_docid)
query.expected_keys = query.expected_keys[:end_idx + 1]
except ValueError:
pass
if inclusive_end == False:
query.expected_keys = query.expected_keys[:-1]
class SimpleDataSet:
def __init__(self, rest, num_docs, limit = None, reduce_fn=None):
self.num_docs = num_docs
self.views = self.create_views(rest,reduce = reduce_fn)
self.name = "simple_dataset"
self.kv_store = ClientKeyValueStore()
self.kv_template = {"name": "doc-${prefix}-${seed}-", "age": "${prefix}"}
self.limit = limit
self.reduce_fn = reduce_fn
def create_views(self, rest, reduce=None):
view_fn = 'function (doc) {if(doc.age !== undefined) { emit(doc.age, doc.name);}}'
return [QueryView(rest, self.num_docs, fn_str=view_fn, reduce_fn=reduce)]
def load(self, tc, view, verify_docs_loaded = True):
doc_names = ViewBaseTests._load_docs(tc, self.num_docs, view.prefix, verify_docs_loaded)
return doc_names
def load_with_tm(self, task_manager, rest,
bucket = "default",
seed = None,
monitor = False):
return \
DataLoadHelper.add_doc_gen_task(task_manager, rest,
self.num_docs,
bucket = bucket,
kv_template = self.kv_template,
kv_store = self.kv_store,
seed = seed,
monitor = monitor)
def add_negative_query(self, query_param, value, error, views=None):
views = views or self.views
for view in views:
view.queries += [QueryHelper({query_param : value}, None, error=error)]
def add_include_docs_queries(self, views=None, limit=None):
views = views or self.views
if limit is None:
limit = self.limit
for view in views:
if limit is not None:
view.queries += [QueryHelper({"include_docs" : "true", "limit" : limit}, limit>view.index_size and limit or view.index_size)]
else:
view.queries += [QueryHelper({"include_docs" : "true"}, view.index_size)]
def add_startkey_endkey_queries(self, views=None, limit=None):
if views is None:
views = self.views
for view in views:
start_key = view.index_size/2
end_key = view.index_size - 1000
if limit is None:
limit = self.limit
if limit is not None:
view.queries += [QueryHelper({"start_key" : end_key,
"end_key" : start_key,
"descending" : "true",
"limit" : str(limit)},
min(limit, end_key - start_key + 1)),
QueryHelper({"start_key" : end_key,
"end_key" : start_key,
"descending" : "true",
"limit" : str(limit)},
min(limit, end_key - start_key + 1)),
QueryHelper({"end_key" : end_key,
"limit" : str(limit)},
min(limit, end_key + 1)),
QueryHelper({"end_key" : end_key,
"inclusive_end" : "false",
"limit" : limit}, min(limit, end_key)),
QueryHelper({"start_key" : start_key,
"limit" : str(limit)},
min(limit, view.index_size - start_key))]
else:
view.queries += [QueryHelper({"start_key" : end_key,
"end_key" : start_key,
"descending" : "true"},
end_key - start_key + 1),
QueryHelper({"start_key" : end_key,
"end_key" : start_key,
"descending" : "true"},
end_key - start_key + 1),
QueryHelper({"end_key" : end_key},
end_key + 1),
QueryHelper({"end_key" : end_key,
"inclusive_end" : "false"}, end_key),
QueryHelper({"start_key" : start_key},
view.index_size - start_key)]
def add_stale_queries(self, views = None, limit= None):
if views is None:
views = self.views
for view in views:
index_size = view.index_size
if limit is None:
limit = self.limit
limit = limit < index_size and limit or index_size
view.queries += [QueryHelper({"stale" : "false" , "limit" : str(limit)}, limit),
QueryHelper({"stale" : "ok" , "limit" : str(limit)}, limit),
QueryHelper({"stale" : "update_after" , "limit" : str(limit)}, limit)]
def add_reduce_queries(self, views = None, limit= None):
if views is None:
views = self.views
for view in views:
index_size = view.index_size
if limit is None:
limit = self.limit
view.queries += [QueryHelper({"reduce" : "false" , "limit" : str(limit)}, min(limit, index_size)),
QueryHelper({"reduce" : "true" , "limit" : str(limit)}, min(limit, index_size))]
def add_all_query_sets(self, views=None, limit=None):
self.add_startkey_endkey_queries(views, limit)
self.add_stale_queries(views, limit)
class DataLoadHelper:
@staticmethod
def add_doc_gen_task(tm, rest, count, bucket = "default",
kv_store = None, store_enabled = True,
kv_template = None, seed = None,
sizes = None, expiration = None,
loop = False, monitor = False,
doc_generators = None):
doc_generators = doc_generators or \
DocumentGenerator.get_doc_generators(count, kv_template, seed, sizes)
t = task.LoadDocGeneratorTask(rest, doc_generators, bucket, kv_store,
store_enabled, expiration, loop)
tm.schedule(t)
if monitor:
return t.result()
return t
class QueryHelper:
def __init__(self, params,
expected_num_docs,
expected_num_groups = 1,
type_ = "view",
error=None):
self.params = params
# number of docs this query should return
self.expected_num_docs = expected_num_docs
self.expected_num_groups = expected_num_groups
self.type_ = type_ # "view" or "all_docs"
self.expected_keys = []
self.error = error
# less open clients
@staticmethod
def verify_query_keys(rest, query, results, bucket = "default", num_verified_docs = 20, limit=None):
failures = []
if(len(query.expected_keys) == 0):
return failures
kv_client = KVStoreAwareSmartClient(rest, bucket)
ids=[str(doc['id']) for doc in results['rows']]
couch_set = set(ids)
expected_set = limit and set(query.expected_keys[:limit]) or set(query.expected_keys)
missing_item_set = expected_set - couch_set
extra_item_set = couch_set - expected_set
# treat duplicate doc_ids as extra_items
if len(ids)!=len(couch_set):
for id_ in couch_set:
if ids.count(id_) > 1:
extra_item_set.add(id_)
if(len(extra_item_set) > 0 ):
# report unexpected/duplicate documents
copy_ids = copy.deepcopy(ids)
for doc_id in extra_item_set:
for id_count in range(copy_ids.count(doc_id)):
ex_doc_idx = copy_ids.index(doc_id)
ex_doc_row = results['rows'][ex_doc_idx + id_count]
failures.append("extra documents detected in result: %s " % (ex_doc_row))
copy_ids.pop(ex_doc_idx)
if(len(missing_item_set) > 0):
# debug missing documents
for doc_id in list(missing_item_set)[:num_verified_docs]:
# attempt to retrieve doc from memcached
mc_item = kv_client.mc_get_full(doc_id)
if mc_item == None:
failures.append("document %s missing from memcached" % (doc_id))
# attempt to retrieve doc from disk
else:
num_vbuckets = len(rest.get_vbuckets(bucket))
doc_meta = kv_client.get_doc_metadata(num_vbuckets, doc_id)
if(doc_meta != None):
if (doc_meta['key_valid'] != 'valid'):
msg = "Error expected in results for key with invalid state %s" % doc_meta
failures.append(msg)
else:
msg = "query doc_id: %s doesn't exist in bucket: %s" % \
(doc_id, bucket)
failures.append(msg)
if(len(failures) == 0):
msg = "view engine failed to index doc: %s \n query: %s" % (doc_id, query.params)
failures.append(msg)
return failures
@staticmethod
def verify_query_values(rest, query, results, bucket = "default"):
failures = []
kv_client = KVStoreAwareSmartClient(rest, bucket)
if('include_docs' in query.params):
docs = [row['doc'] for row in results['rows']]
# retrieve doc from view result and compare with memcached
for view_doc in docs:
doc_id = str(view_doc['_id'])
mc_item = kv_client.mc_get_full(doc_id)
if mc_item is not None:
mc_doc = json.loads(mc_item["value"])
# compare doc content
for key in mc_doc.keys():
if(mc_doc[key] != view_doc[key]):
err_msg =\
"error verifying document id %s: retrieved value %s expected %s \n" % \
(doc_id, mc_doc[key], view_doc[key])
failures.append(err_msg)
else:
failures.append("doc_id %s could not be retrieved for verification \n" % doc_id)
else:
failures.append("cannot verify view result values without include_docs filter \n")
return failures
| [
"[email protected]"
]
| |
ba5ea63f191283a9688c147dff9cfe8c6b3cf2a8 | 668188f5368680567be8c4af55a731e45a2380ba | /util_fix_pct_party.py | e6921735cd7015ca3905a22159709c6fda0e8daf | []
| no_license | wrishel/Ballot_tally | ec6ff128e61d6ebfe91574c9b55e083849665502 | 3e4ed8c4fe0503ead9b55fac77d3cfcd97c73c41 | refs/heads/master | 2023-06-24T17:35:51.537643 | 2021-07-28T18:38:32 | 2021-07-28T18:38:32 | 387,261,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | #!/usr/bin/env python3
"""Reset the unnormalized precinct and party fields in the Images table
by reinterpreting the barcode_upper field.
"""
import dbase
import datetime
from ETP_util import fullpath_to_image, subpath_to_image
import GLB_globs
import etpconfig
import os
import sys
import time
TESTING = True
pid = os.getpid()
if __name__ == '__main__':
GLB = GLB_globs.get()
config = GLB.config
PATH_TO_IMAGES = config['Election']['pathtoimages']
tot_processed = 0
start_time = datetime.datetime.now()
if TESTING:
db = dbase.ETPdb()
db.connect('testing')
else:
db = dbase.ETPdb()
db.connect('production')
decode_bc = dict()
for row in db.get_barcodes():
assert row.barcode not in decode_bc
decode_bc[row.barcode] = (row.precinct_id, row.party_id)
tries = 0
# get a list of rows to fix
#
rows_to_fix = db.get_images_for_barcode(pid, 10) # batch of 10
fixes = [] # tuples of (precinct, page_number, image_number)
last_img_num = None
for row in rows_to_fix:
image_num = row.image_number
last_img_num = image_num
pth = fullpath_to_image(image_num)
precinct = None
party_id = None
pagenum = None
# outputs
# Possible errors precinct party page
# -------- ----- ----
# 1) no file for this number MISSING MISSING MSG
# 2) upper barcode missing UNKNOWN UNKNOWN --
# 3) lower barcode missing -- -- UNK
# 4) upper barcode doesn't UNREC UNREC --
# translate
try:
barcodes = hgbt.getBallotBarcodes(pth)
except IOError as e:
print(f'{e}', sys.stderr)
barcodes = (None, None)
precinct = party_id = 'MISSNG'
pagenum = 'MSG'
else:
pagenum = page_num(barcodes[1]) # may be None
if pagenum is None: pagenum = 'UNK'
if barcodes[0] is None:
precinct = party_id = 'UNKNOWN'
else:
try:
(precinct, party_id) = decode_bc[barcodes[0]]
except KeyError:
# print(image_num, 'Unknown', barcode)
precinct = party_id = 'UNREC'
fixes.append((precinct, pagenum, party_id, barcodes[0], barcodes[1], image_num))
time.sleep(.15) # avoid starving the UI
tot_processed += len(fixes)
if len(fixes) != 0:
print(pid, 'processed', tot_processed, last_img_num,
datetime.datetime.now() - start_time)
db.update_unscanned_images(fixes)
if stopflag:
t = datetime.datetime.now() - start_time
print(f'===> pid {pid} exiting after interrupt, total processed={tot_processed}, time={t}')
exit(0)
# t = .20 starves the UI in fast simulation. Probably not in operation
# if len(fixes) != 0:
# t = 1
# print(f'{pid} dozing')
# else:
t = .25
time.sleep(t) # give other processes a chance
| [
"[email protected]"
]
| |
801805d1bb72e7d65eb1a65e54954bc3e43ec6b5 | 31aaa9c750827e534d53a9cccd93b14034c740d8 | /torchx/specs/test/file_linter_test.py | 104b273377e5141525858cce307071cd1f0076ba | [
"BSD-3-Clause"
]
| permissive | gaocegege/torchx | 1648e06d951f11eac26817fbcb7919fbc8932094 | 6e85e9fc023083447ce261d01e16ec0a0799f598 | refs/heads/master | 2023-06-27T21:33:48.776638 | 2021-07-28T22:27:12 | 2021-07-28T22:28:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,017 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ast
import os
import unittest
from typing import Dict, List, Optional, cast
from pyre_extensions import none_throws
from torchx.specs.file_linter import get_fn_docstring, parse_fn_docstring, validate
# Note if the function is moved, the tests need to be updated with new lineno
# pyre-ignore[11]: Ignore unknown type "AppDef"
def _test_empty_fn() -> "AppDef":
pass
# Note if the function is moved, the tests need to be updated with new lineno
# pyre-ignore[3]: Omit return value for testing purposes
def _test_fn_no_return():
"""
Function description
"""
pass
def _test_fn_return_int() -> int:
"""
Function description
"""
return 0
def _test_docstring_empty(arg: str) -> "AppDef":
""" """
pass
def _test_docstring_func_desc() -> "AppDef":
"""
Function description
"""
pass
def _test_docstring_no_args(arg: str) -> "AppDef":
"""
Test description
"""
pass
def _test_docstring_correct(arg0: str, arg1: int, arg2: Dict[int, str]) -> "AppDef":
"""Short Test description
Long funct description
Args:
arg0: arg0 desc
arg1: arg1 desc
arg2: arg2 desc
"""
pass
# pyre-ignore[2]: Omit return value for testing purposes
def _test_args_no_type_defs(arg0, arg1, arg2: Dict[int, str]) -> "AppDef":
"""
Test description
Args:
arg0: arg0 desc
arg1: arg1 desc
arg2: arg2 desc
"""
pass
def _test_args_dict_list_complex_types(
# pyre-ignore[2]: Omit return value for testing purposes
arg0,
# pyre-ignore[2]: Omit return value for testing purposes
arg1,
arg2: Dict[int, List[str]],
arg3: List[List[str]],
arg4: Optional[Optional[str]],
) -> "AppDef":
"""
Test description
Args:
arg0: arg0 desc
arg1: arg1 desc
arg2: arg2 desc
arg3: arg2 desc
"""
pass
def current_file_path() -> str:
return os.path.join(os.path.dirname(__file__), __file__)
class SpecsFileValidatorTest(unittest.TestCase):
def setUp(self) -> None:
self._path = current_file_path()
with open(self._path, "r") as fp:
source = fp.read()
self._file_content = source
def test_validate_docstring_func_desc(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_docstring_func_desc"
)
self.assertEqual(0, len(linter_errors))
def test_validate_no_return(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_fn_no_return"
)
self.assertEqual(1, len(linter_errors))
expected_desc = (
"Function: _test_fn_no_return missing return annotation or "
"has unknown annotations. Supported return annotation: AppDef"
)
self.assertEqual(expected_desc, linter_errors[0].description)
def test_validate_incorrect_return(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_fn_return_int"
)
self.assertEqual(1, len(linter_errors))
expected_desc = (
"Function: _test_fn_return_int has incorrect return annotation, "
"supported annotation: AppDef"
)
self.assertEqual(expected_desc, linter_errors[0].description)
def test_validate_empty_fn(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_empty_fn"
)
self.assertEqual(1, len(linter_errors))
linter_error = linter_errors[0]
self.assertEqual("TorchxFunctionValidator", linter_error.name)
expected_desc = (
"`_test_empty_fn` is missing a Google Style docstring, please add one. "
"For more information on the docstring format see: "
"https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html"
)
self.assertEquals(expected_desc, linter_error.description)
self.assertEqual(18, linter_error.line)
def test_validate_docstring_empty(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_docstring_empty"
)
self.assertEqual(1, len(linter_errors))
linter_error = linter_errors[0]
self.assertEqual("TorchxFunctionValidator", linter_error.name)
expected_desc = (
"`_test_docstring_empty` is missing a Google Style docstring, please add one. "
"For more information on the docstring format see: "
"https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html"
)
self.assertEquals(expected_desc, linter_error.description)
def test_validate_docstring_no_args(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_docstring_no_args"
)
self.assertEqual(1, len(linter_errors))
linter_error = linter_errors[0]
self.assertEqual("TorchxFunctionValidator", linter_error.name)
expected_desc = (
"`_test_docstring_no_args` not all function arguments"
" are present in the docstring. Missing args: ['arg']"
)
self.assertEqual(expected_desc, linter_error.description)
def test_validate_docstring_correct(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_docstring_correct"
)
self.assertEqual(0, len(linter_errors))
def test_validate_args_no_type_defs(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="_test_args_no_type_defs"
)
print(linter_errors)
self.assertEqual(2, len(linter_errors))
self.assertEqual(
"Arg arg0 missing type annotation", linter_errors[0].description
)
self.assertEqual(
"Arg arg1 missing type annotation", linter_errors[1].description
)
def test_validate_args_no_type_defs_complex(self) -> None:
linter_errors = validate(
self._file_content,
self._path,
torchx_function="_test_args_dict_list_complex_types",
)
self.assertEqual(6, len(linter_errors))
expected_desc = (
"`_test_args_dict_list_complex_types` not all function arguments"
" are present in the docstring. Missing args: ['arg4']"
)
self.assertEqual(
expected_desc,
linter_errors[0].description,
)
self.assertEqual(
"Arg arg0 missing type annotation", linter_errors[1].description
)
self.assertEqual(
"Arg arg1 missing type annotation", linter_errors[2].description
)
self.assertEqual(
"Dict can only have primitive types", linter_errors[3].description
)
self.assertEqual(
"List can only have primitive types", linter_errors[4].description
)
self.assertEqual(
"`_test_args_dict_list_complex_types` allows only Dict, List as complex types.Argument `arg4` has: Optional",
linter_errors[5].description,
)
def _get_function_def(self, function_name: str) -> ast.FunctionDef:
module: ast.Module = ast.parse(self._file_content)
for expr in module.body:
if type(expr) == ast.FunctionDef:
func_def = cast(ast.FunctionDef, expr)
if func_def.name == function_name:
return func_def
raise RuntimeError(f"No function found: {function_name}")
def test_validate_docstring_full(self) -> None:
func_def = self._get_function_def("_test_docstring_correct")
docstring = none_throws(ast.get_docstring(func_def))
func_desc, param_desc = parse_fn_docstring(docstring)
self.assertEqual("Short Test description", func_desc)
self.assertEqual("arg0 desc", param_desc["arg0"])
self.assertEqual("arg1 desc", param_desc["arg1"])
self.assertEqual("arg2 desc", param_desc["arg2"])
def test_get_fn_docstring(self) -> None:
function_desc, _ = none_throws(
get_fn_docstring(self._file_content, "_test_args_dict_list_complex_types")
)
self.assertEqual("Test description", function_desc)
def test_unknown_function(self) -> None:
linter_errors = validate(
self._file_content, self._path, torchx_function="unknown_function"
)
self.assertEqual(1, len(linter_errors))
self.assertEqual(
"Function unknown_function not found", linter_errors[0].description
)
| [
"[email protected]"
]
| |
7f1435e4caee3329efb4e7ef57227350fbfdcdc6 | 40f92e067b7705acac1a01709fbef49b7e98af05 | /tutorial/L42更多的资源类型情况/2图片改进.py | cb1ef3d3a819cb7c108736cd55a653e47a384984 | []
| no_license | liuxiaoxiao666/python_study | bd2a2a6122cf316f3fa7ee6926675bc96deb773c | 9afe9ec7809529af05a971993d9ee17421364f76 | refs/heads/master | 2021-10-19T15:47:47.729728 | 2019-02-22T08:27:40 | 2019-02-22T08:27:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,958 | py | # coding:utf-8
# 情况2:纯静态网页获取图片
# 解决方案:requests包会接受响应的二进制数据,取出后写入到本地。
# 分析网页,比纯静态纯文本麻烦一点,图片往往通过src标签或a标签到网页,第一步先分析获取图片在网页上的url资源地址,第二部请求静态资源地址获取图片二进制信息。
import requests
from lxml import etree
import os
import time
#风景分类/分页列表
resp = requests.get('http://www.ivsky.com/tupian/ziranfengguang/index_2.html')
html_content = resp.content.decode()
print(html_content)
# 获取图解列表页每个图集连接
basedir = os.path.dirname(__file__)
# html_content = """
# <body rel="art" data-id="49681"><div id="header"><div class="box"><div id="logo"><a href="http://www.ivsky.com">天堂图片网</a></div><ul id="menu"><li><a href="/">首页</a></li><li><a href="/tupian/" class="a_now">图片大全</a></li><li><a href="/bizhi/">桌面壁纸</a></li><li><a href="/Photo/" target="_blank">旧版</a></li></ul><div id="search"><div class="inp"><input type="text" id="ser_inp" class="ser_inp" value="找图,要善用搜索"></div><div class="inp-btn"><input type="submit" value="" id="ser_btn" class="ser_btn"></div></div><div id="login" style="display: block;">2018年12月24日<br> 周一 17:05:49</div></div><div class="hbg"></div></div><div class="box"><div id="alltop"><script>dy("alltop");</script></div><div id="tpimgtop1"><script>dy("tpimgtop1");</script><div style=""><iframe width="728" frameborder="0" height="90" scrolling="no" src="http://pos.baidu.com/s?hei=90&wid=728&di=u1729247&ltu=http%3A%2F%2Fwww.ivsky.com%2Ftupian%2Fyueliang_v49681%2F%3Ftdsourcetag%3Ds_pcqq_aiomsg&psi=3517a35963ad0d457ffc8be3083a3672&dai=1&par=1366x728&cce=true&pis=-1x-1&cmi=4&dc=3&ti=%E5%A4%9C%E7%A9%BA%E4%B8%AD%E7%9A%84%E6%9C%88%E4%BA%AE%E5%9B%BE%E7%89%87%2013%E5%BC%A0%20(%E5%A4%A9%E5%A0%82%E5%9B%BE%E7%89%87%E7%BD%91)&cfv=0&cja=false&psr=1366x768&exps=111000,110012&chi=1&ari=2&pcs=1349x657&ccd=24&drs=1&pss=1349x664&dis=0&ant=0&dri=0&tpr=1545639748905&tlm=1544580040&dtm=HTML_POST&cpl=3&tcn=1545639749&ps=92x174&cec=UTF-8&col=zh-CN&cdo=-1"></iframe></div><script type="text/javascript" src="http://a.lanrentuku.com/kfogunubgimhnlou.js"></script>
# # # </div><div id="tpimgtop2"><script>dy("tpimgtop2");</script><div style="width: 100%;"><iframe width="267" frameborder="0" height="90" scrolling="no" src="http://pos.baidu.com/s?hei=90&wid=267&di=u1628852&ltu=http%3A%2F%2Fwww.ivsky.com%2Ftupian%2Fyueliang_v49681%2F%3Ftdsourcetag%3Ds_pcqq_aiomsg&psi=3517a35963ad0d457ffc8be3083a3672&cpl=3&tcn=1545639749&cdo=-1&col=zh-CN&par=1366x728&dai=2&pss=1349x664&ccd=24&pcs=1349x657&dri=0&tpr=1545639748905&ti=%E5%A4%9C%E7%A9%BA%E4%B8%AD%E7%9A%84%E6%9C%88%E4%BA%AE%E5%9B%BE%E7%89%87%2013%E5%BC%A0%20(%E5%A4%A9%E5%A0%82%E5%9B%BE%E7%89%87%E7%BD%91)&chi=1&cfv=0&ps=92x907&dis=0&dtm=HTML_POST&ant=0&cmi=4&dc=3&cce=true&cja=false&tlm=1544580040&cec=UTF-8&psr=1366x768&ari=2&pis=-1x-1&drs=1&exps=111000,114011,110012"></iframe></div><script type="text/javascript" src="http://a.lanrentuku.com/kfoeuhhcuimhnlou.js"></script>
# # # </div></div><div class="box"><div class="pos"><a href="http://www.ivsky.com/">首页</a> > <a href="/tupian/">图片大全</a> > <a href="/tupian/ziranfengguang/">自然风光</a> > <a href="/tupian/yueliang_v49681/">夜空中的月亮图片</a> 13张 </div><div class="sort"><ul class="tpmenu tong"><li class="s1"><a href="/tupian/" title="图片大全 - 唯美图片 - 好看的图片">所有图片</a></li><li class="s2on active"><a href="/tupian/ziranfengguang/" title="自然风光图片 - 自然风景图片">自然风光</a></li><li class="s3"><a href="/tupian/chengshilvyou/" title="城市旅游图片 - 世界各国城市图片">城市旅游</a></li><li class="s4"><a href="/tupian/dongwutupian/" title="动物图片 - 宠物图片 - 野生动物图片">动物图片</a></li><li class="s5"><a href="/tupian/zhiwuhuahui/" title="植物花卉图片 - 花图片 - 花草图片 - 植物图片">植物花卉</a></li><li class="s6"><a href="/tupian/haiyangshijie/" title="海洋世界 - 海洋风光图片 - 海底世界图片">海洋世界</a></li><li class="s7"><a href="/tupian/renwutupian/" title="人物图片 - 人物图片大全">人物图片</a></li><li class="s8"><a href="/tupian/meishishijie/" title="美食图片 - 国外美食甜点图片 - 中国传统美食小吃图片">美食世界</a></li><li class="s9"><a href="/tupian/wupin/" title="物品物件大全 - 生活用品 - 学习用品图片">物品物件</a></li><li class="s10"><a href="/tupian/yundongtiyu/" title="运动图片 - 体育、休闲、竞赛图片">运动体育</a></li><li class="s11"><a href="/tupian/jiaotongyunshu/" title="公路铁路运输 - 桥梁码头图片 - 车船飞机图片">交通运输</a></li><li class="s12"><a href="/tupian/jianzhuhuanjing/" title="世界著名建筑 - 各国建筑图片 - 建筑环境图片">建筑环境</a></li><li class="s13"><a href="/tupian/jiaju/" title="家居装饰设计图片 - 装修效果图 - 时尚家居图片">装饰装修</a></li><li class="s14"><a href="/tupian/guanggaosheji/" title="品牌广告设计 - 广告设计创意图片">广告设计</a></li><li class="s15"><a href="/tupian/katongtupian/" title="卡通插画图片 - 卡通人物形象、卡通风景图片">卡通图片</a></li><li class="s16"><a href="/tupian/jieritupian/" title="中国传统节日图片 - 国外知名节日图片">节日图片</a></li><li class="s17"><a href="/tupian/shejisucai/" title="设计素材图片 - 背景图片 - 花纹、底纹、边框素材图片">设计素材</a></li><li class="s18"><a href="/tupian/yishu/" title="艺术 - 绘画 - 民族艺术">艺术绘画</a></li><li class="s19"><a href="/tupian/qita/" title="其他类别图片">其他类别</a></li></ul></div><div class="album"><div class="al_tit"><h1>夜空中的月亮图片(13张) </h1><div class="al_h3">小鹏 <span id="arc_pubtime" rel="1544580020">1个星期前上传</span> / <span id="arc_click" rel="49681"> 您是第 1020 位浏览者 / </span> 本图集共有 13 张图片</div></div><div id="artinfo"><script>dy("artinfo");</script></div><div id="tpimgleft1"><script>dy("tpimgleft1");</script><div id="_7nfr6y4ygpl" style=""><iframe width="336" frameborder="0" height="280" scrolling="no" src="http://pos.baidu.com/s?hei=280&wid=336&di=u1266263&ltu=http%3A%2F%2Fwww.ivsky.com%2Ftupian%2Fyueliang_v49681%2F%3Ftdsourcetag%3Ds_pcqq_aiomsg&psi=3517a35963ad0d457ffc8be3083a3672&col=zh-CN&exps=111000,110012&dtm=HTML_POST&cdo=-1&ari=2&ti=%E5%A4%9C%E7%A9%BA%E4%B8%AD%E7%9A%84%E6%9C%88%E4%BA%AE%E5%9B%BE%E7%89%87%2013%E5%BC%A0%20(%E5%A4%A9%E5%A0%82%E5%9B%BE%E7%89%87%E7%BD%91)&ps=397x174&cfv=0&drs=1&ccd=24&ant=0&tlm=1544580040&cpl=3&cec=UTF-8&pss=1349x697&dri=0&dai=3&dc=3&psr=1366x768&dis=0&par=1366x728&chi=1&pcs=1349x657&cmi=4&tcn=1545639749&cja=false&cce=true&pis=-1x-1&tpr=1545639748905"></iframe></div><script type="text/javascript" src="http://a.lanrentuku.com/ezioyyoyrcgorvwy.js"></script>
# # # </div><div class="al_info"><div class="al_h4">介绍</div><div class="al_p"><p> 月亮总是那么的温暖,就像一只爱的手,每个寂静的夜晚都轻抚着大地妈妈,洒落在每个人的身上,洒落在每一寸肌肤,让人觉得暖融融的。</p></div></div></div><div class="left"><div id="tpimgleft2"><script>dy("tpimgleft2");</script></div><ul class="pli"> <li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782239.html" title="夜空中的月亮图片 3637x2424" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782239.html" title="夜空中的月亮图片 3637x2424" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782240.html" title="夜空中的月亮图片 4608x3456" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-001.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782240.html" title="夜空中的月亮图片 4608x3456" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782241.html" title="夜空中的月亮图片 4608x3456" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-002.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782241.html" title="夜空中的月亮图片 4608x3456" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782242.html" title="夜空中的月亮图片 4608x2592" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-003.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782242.html" title="夜空中的月亮图片 4608x2592" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782243.html" title="夜空中的月亮图片 4608x3456" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-004.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782243.html" title="夜空中的月亮图片 4608x3456" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782244.html" title="夜空中的月亮图片 4272x2848" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-005.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782244.html" title="夜空中的月亮图片 4272x2848" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782245.html" title="夜空中的月亮图片 2560x1920" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-006.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782245.html" title="夜空中的月亮图片 2560x1920" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782246.html" title="夜空中的月亮图片 4000x3000" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-007.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782246.html" title="夜空中的月亮图片 4000x3000" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782247.html" title="夜空中的月亮图片 5184x3888" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-008.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782247.html" title="夜空中的月亮图片 5184x3888" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782248.html" title="夜空中的月亮图片 4608x3456" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-009.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782248.html" title="夜空中的月亮图片 4608x3456" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782249.html" title="夜空中的月亮图片 3264x2176" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-010.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782249.html" title="夜空中的月亮图片 3264x2176" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782250.html" title="夜空中的月亮图片 4608x3456" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-011.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782250.html" title="夜空中的月亮图片 4608x3456" target="_blank">夜空中的月亮图片 </a></p></li><li><div class="il_img" style="opacity: 1;"><a href="/tupian/yueliang_v49681/pic_782251.html" title="夜空中的月亮图片 4608x3456" target="_blank"><img src="http://img.ivsky.com/img/tupian/t/201806/15/yueliang-012.jpg" alt="夜空中的月亮图片"></a></div><p><a href="/tupian/yueliang_v49681/pic_782251.html" title="夜空中的月亮图片 4608x3456" target="_blank">夜空中的月亮图片 </a></p></li> </ul><div id="tpimgleft3"><script>dy("tpimgleft3");</script></div><div id="tpimgleft4"><script>dy("tpimgleft4");</script></div><div class="page_c"></div><div id="tpimgleft5"><script>dy("tpimgleft5");</script><div style=""><iframe width="760" frameborder="0" height="90" scrolling="no" src="//pos.baidu.com/s?hei=90&wid=760&di=u1265346&ltu=http%3A%2F%2Fwww.ivsky.com%2Ftupian%2Fyueliang_v49681%2F%3Ftdsourcetag%3Ds_pcqq_aiomsg&psi=3517a35963ad0d457ffc8be3083a3672&dtm=HTML_POST&pis=-1x-1&cec=UTF-8&ant=0&psr=1366x768&par=1366x728&cpl=3&chi=1&dai=4&ari=2&cja=false&dis=0&cmi=4&tlm=1544580040&drs=1&cdo=-1&exps=111000,110012&pcs=1349x657&pss=1349x1845&cce=true&dri=0&tcn=1545639749&col=zh-CN&dc=3&cfv=0&tpr=1545639748905&ti=%E5%A4%9C%E7%A9%BA%E4%B8%AD%E7%9A%84%E6%9C%88%E4%BA%AE%E5%9B%BE%E7%89%87%2013%E5%BC%A0%20(%E5%A4%A9%E5%A0%82%E5%9B%BE%E7%89%87%E7%BD%91)&ps=1459x174&ccd=24"></iframe></div><script type="text/javascript" src="http://a.lanrentuku.com/fajpzxswzdh.js"></script>
# # # </div><div id="tpimgleft6"><script>dy("tpimgleft6");</script></div><div class="lxg"><div class="lxg_tit">相关的图集...</div><ul class="lxg_ul"><li><div class="xg_img"><a href="/tupian/weimei_de_yueqiu_v28784/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201507/24/weimei_de_yueqiu-004.jpg" alt="唯美的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/weimei_de_yueqiu_v28784/" title="唯美的月亮图片(11张)" target="_blank">唯美的月亮图片(11张)</a></p></div></li><li><div class="xg_img"><a href="/tupian/weimei_yueqiu_v28875/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201507/27/weimei_yueqiu.jpg" alt="航拍月球图片"></a></div><div class="lxg_info"><p><a href="/tupian/weimei_yueqiu_v28875/" title="航拍月球图片(12张)" target="_blank">航拍月球图片(12张)</a></p></div></li><li><div class="xg_img"><a href="/tupian/yueliang_v49517/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201806/05/yueliang-013.jpg" alt="夜空中唯美的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/yueliang_v49517/" title="夜空中唯美的月亮图片(14张)" target="_blank">夜空中唯美的月亮图片(14张)</a></p></div></li><li class="lxg_line"></li><li><div class="xg_img"><a href="/tupian/yueliang_v49681/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201806/15/yueliang.jpg" alt="夜空中的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/yueliang_v49681/" title="夜空中的月亮图片(13张)" target="_blank">夜空中的月亮图片(13张)</a></p></div></li><li><div class="xg_img"><a href="/tupian/yueliang_v49362/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201805/29/yueliang-005.jpg" alt="皎洁的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/yueliang_v49362/" title="皎洁的月亮图片(14张)" target="_blank">皎洁的月亮图片(14张)</a></p></div></li><li><div class="xg_img"><a href="/tupian/canque_yueliang_v49330/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201805/27/canque_yueliang-006.jpg" alt="残缺的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/canque_yueliang_v49330/" title="残缺的月亮图片(15张)" target="_blank">残缺的月亮图片(15张)</a></p></div></li><li class="lxg_line"></li><li><div class="xg_img"><a href="/tupian/yueliang_v46642/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201801/31/yueliang-006.jpg" alt="圆圆的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/yueliang_v46642/" title="圆圆的月亮图片(13张)" target="_blank">圆圆的月亮图片(13张)</a></p></div></li><li><div class="xg_img"><a href="/tupian/yueliang_v43949/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201709/25/yueliang-006.jpg" alt="一轮弯弯的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/yueliang_v43949/" title="一轮弯弯的月亮图片(12张)" target="_blank">一轮弯弯的月亮图片(12张)</a></p></div></li><li><div class="xg_img"><a href="/tupian/yueliang_v46644/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201801/31/yueliang-022.jpg" alt="皎洁的月亮图片"></a></div><div class="lxg_info"><p><a href="/tupian/yueliang_v46644/" title="皎洁的月亮图片(12张)" target="_blank">皎洁的月亮图片(12张)</a></p></div></li><li class="lxg_line"></li></ul></div><div id="tpimgleft7"><script>dy("tpimgleft7");</script></div><div id="tpimgleft8"><script>dy("tpimgleft8");</script></div></div><div class="right"><div id="tpimgr1"><script>dy("tpimgr1");</script><div id="_qmchnn5i778" style=""><iframe width="160" frameborder="0" height="600" scrolling="no" src="http://pos.baidu.com/s?hei=600&wid=160&di=u3467566&ltu=http%3A%2F%2Fwww.ivsky.com%2Ftupian%2Fyueliang_v49681%2F%3Ftdsourcetag%3Ds_pcqq_aiomsg&psi=3517a35963ad0d457ffc8be3083a3672&col=zh-CN&par=1366x728&psr=1366x768&ccd=24&cdo=-1&cce=true&cja=false&cfv=0&tlm=1544580040&cec=UTF-8&drs=1&dai=5&dc=3&ps=717x1014&pcs=1349x657&ant=0&dtm=HTML_POST&pss=1349x2114&tcn=1545639749&ti=%E5%A4%9C%E7%A9%BA%E4%B8%AD%E7%9A%84%E6%9C%88%E4%BA%AE%E5%9B%BE%E7%89%87%2013%E5%BC%A0%20(%E5%A4%A9%E5%A0%82%E5%9B%BE%E7%89%87%E7%BD%91)&tpr=1545639748905&chi=1&pis=-1x-1&dis=0&dri=0&cpl=3&exps=111000,110012&ari=2&cmi=4"></iframe></div><script type="text/javascript" src="http://a.lanrentuku.com/pkcgjlhjj.js"></script>
# # # </div> <div class="rb"><div class="rtit">所属小分类</div><ul class="timg_ul"><li><div class="t_img"><a href="/tupian/weimei_t5095/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201101/20/haibian_weimei_riluo-002.jpg" width="125" height="77" alt="唯美图片_唯美图片大全"></a></div><div class="t_info"><p><a href="/tupian/weimei_t5095/" title="唯美图片_唯美图片大全" target="_blank">唯美图片(2678张)</a></p></div></li><li><div class="t_img"><a href="/tupian/yekong_t19444/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201308/30/yueyuanzhiye-006.jpg" width="125" height="77" alt="夜空图片"></a></div><div class="t_info"><p><a href="/tupian/yekong_t19444/" title="夜空图片" target="_blank">夜空图片(689张)</a></p></div></li><li><div class="t_img"><a href="/tupian/yueliang_t20603/" target="_blank"><img src="http://img.ivsky.com/img/tupian/m/201308/30/yueyuanzhiye-007.jpg" width="125" height="77" alt="月亮图片"></a></div><div class="t_info"><p><a href="/tupian/yueliang_t20603/" title="月亮图片" target="_blank">月亮图片(387张)</a></p></div></li></ul></div><div id="tpimgr2"><script>dy("tpimgr2");</script></div></div></div><div class="box"><div class="f_rec"><b>推荐内容:</b><a href="/tupian/shilulu_t42843/" title="湿漉漉图片">湿漉漉图片</a><a href="/tupian/maitian_t268/" title="麦田图片">麦田图片</a><a href="/tupian/xueshe_t29850/" title="雪舌图片">雪舌图片</a><a href="/tupian/xinggui_t27924/" title="星轨图片">星轨图片</a><a href="/tupian/xueshan_dingbujing_t35820/" title="雪山顶部景图片">雪山顶部景图片</a><a href="/tupian/lvyi_angran_t44016/" title="绿意盎然图片">绿意盎然图片</a><a href="/tupian/huashu_t28962/" title="桦树图片">桦树图片</a><a href="/tupian/hongse_yanshi_t44769/" title="红色岩石图片">红色岩石图片</a><a href="/tupian/caodi_t2229/" title="草地图片">草地图片</a><a href="/tupian/mingyue_t20604/" title="明月图片">明月图片</a><a href="/tupian/haichao_t36540/" title="海潮图片">海潮图片</a><a href="/tupian/eke_tuosai_hegu_t29810/" title="鄂克托赛河谷图片">鄂克托赛河谷图片</a><a href="/tupian/ziran_zaihai_t38633/" title="自然灾害图片">自然灾害图片</a><a href="/tupian/dacong_t9259/" title="大葱图片">大葱图片</a><a href="/tupian/lantian_t1484/" title="蓝天图片">蓝天图片</a><a href="/tupian/wuyun_t2967/" title="乌云图片">乌云图片</a><a href="/tupian/tianqi_xianxiang_t41109/" title="天气现象图片">天气现象图片</a><a href="/tupian/tudi_t4728/" title="土地图片">土地图片</a><a href="/tupian/sha_t35456/" title="沙图片">沙图片</a><a href="/tupian/bilv_de_dahai_t39376/" title="碧绿的大海图片">碧绿的大海图片</a><div><b>手机版:</b><a href="http://m.ivsky.com/tupian/yueliang_v49681/" target="_blank">夜空中的月亮图片(13张)</a> </div></div></div><div class="box"><div id="tpimgbtm"><script>dy("tpimgbtm");</script></div><div id="tpimgbtm2"><script>dy("tpimgbtm2");</script></div><div id="tppagebtm"><script>dy("tppagebtm");</script></div><div id="tppagebtm2"><script>dy("tppagebtm2");</script></div></div><div id="footer"><div class="box"><div id="fl"><dl><dt>关于</dt><dd><a href="/about/about.html" rel="nofollow">关于天堂</a></dd><dd><a href="/about/disclaimer.html" rel="nofollow">免责声明</a></dd> </dl><dl><dt>帮助</dt><dd><a href="/about/tougao.html" rel="nofollow">用户投稿</a></dd><dd><a href="/about/faq.html" rel="nofollow">常见问题</a></dd></dl><dl><dt>联系</dt><dd><a href="/about/contact.html" rel="nofollow">联系我们</a></dd><dd><a href="/about/guestbook.html" rel="nofollow">留言反馈</a></dd><dd><a href="/about/ad.html" rel="nofollow">广告投放</a></dd></dl><dl><dt>关注</dt><dd class="sina"><a href="http://weibo.com/ivskycom" target="_blank" rel="nofollow">新浪微博</a></dd><dd>公众号<br><img src="http://img.ivsky.com/img/images/qrcode_gzh.jpg" width="75" height="75"></dd></dl></div><div id="fr"><p>© 2005-2018 天堂图片网 闽ICP备05021777号-1 闽公网安备 35020302011402号</p><p>本站图片收集自网络,仅供个人学习交流使用,版权归原作者所有,请勿用于任何商业用途</p><p>主机服务商:<a href="http://www.aliyun.com/cps/rebate?from_uid=jJfSOSVZ99F3GCoDuIogFvjezZSHMCm8" target="_blank">阿里云</a> <a href="http://www.chinaccnet.com/" target="_blank">中电云集</a></p></div></div></div><script>dy("tbox");</script><div id="tbox"><a href="http://www.miibeian.gov.cn/" target="_blank" rel="nofollow">
# </a><a id="gotop" href="javascript:void(0)" target="_top" rel="nofollow" style="display: block;"></a>
# <a id="jy" href="/about/guestbook.html" rel="nofollow" style="display: block;"></a>
# </div>
# <div id="tj"><script>dy("tj");</script></div><script src="/sky.php?do=viewarticle&id=49681&show=1&callback=viewcallback1545639749165&_t=1545639749165"></script></body>
# """
# 获取图片src地址列表
html_tree = etree.HTML(html_content)
pattern = '//ul[@class="ali"]/li/div/a/@href'
pattern2 = '//ul[@class="ali"]/li/p/a/text()'
# 图集连接列表,突击名
img_set_url_list = html_tree.xpath(pattern)
img_set_name_list = html_tree.xpath(pattern2)
print(img_set_url_list)
print(img_set_name_list) # 输出需要的爬取图片的地址
# 循环获取每个图集下所有缩略图
for i in range(0,len(img_set_url_list)):
print('开始本页第{}个图集'.format(i+1))
# 当前脚本所在目录生成图集名文件夹
if not os.path.exists(os.path.join(basedir,img_set_name_list[i])):
# 根据图集名创建子文件夹
os.mkdir(os.path.join(basedir,img_set_name_list[i]))
print('创建图集目录',os.path.join(basedir,img_set_name_list[i]))
# 请求图集详情页 。 下面的代码跟上结课一样
resp = requests.get(('http://www.ivsky.com'+img_set_url_list[i]))
sub_html_content = resp.text
sub_html_etree = etree.HTML(sub_html_content)
# 获取图片资源src连接
img_src_list = sub_html_etree.xpath("//ul[@class='pli']/li/div/a/img/@src")
img_name_list = sub_html_etree.xpath("//ul[@class='pli']/li/div/a/img/@alt")
print(img_src_list)
for j,img_src in enumerate(img_src_list):
# 请求图片二进制
print('开始下载本图集第{}张图片'.format(j))
resp = requests.get(img_src,timeout=10)
if not resp.status_code == 200:
print('请求失败')
img_bytes = resp.content
print(img_bytes)
# 拼图篇绝对路径,保存
print(os.path.join(img_set_name_list[i],f'{j}.jpg'))
with open(os.path.join(img_set_name_list[i],f'{j}.jpg'),mode='wb') as f :
f.write(img_bytes)
break # 注释此行以爬取所有图集
| [
"[email protected]"
]
| |
055237bb5f10dbc3a7803b958039f4f1c9d1f70c | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/retail/v2alpha/retail-v2alpha-py/google/cloud/retail_v2alpha/services/catalog_service/transports/grpc.py | 93bc9af6f89d459cc141a577a3ac5750430668d6 | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,604 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.retail_v2alpha.types import catalog as gcr_catalog
from google.cloud.retail_v2alpha.types import catalog_service
from google.protobuf import empty_pb2 # type: ignore
from .base import CatalogServiceTransport, DEFAULT_CLIENT_INFO
class CatalogServiceGrpcTransport(CatalogServiceTransport):
"""gRPC backend transport for CatalogService.
Service for managing catalog configuration.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'retail.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'retail.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_catalogs(self) -> Callable[
[catalog_service.ListCatalogsRequest],
catalog_service.ListCatalogsResponse]:
r"""Return a callable for the list catalogs method over gRPC.
Lists all the [Catalog][google.cloud.retail.v2alpha.Catalog]s
associated with the project.
Returns:
Callable[[~.ListCatalogsRequest],
~.ListCatalogsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_catalogs' not in self._stubs:
self._stubs['list_catalogs'] = self.grpc_channel.unary_unary(
'/google.cloud.retail.v2alpha.CatalogService/ListCatalogs',
request_serializer=catalog_service.ListCatalogsRequest.serialize,
response_deserializer=catalog_service.ListCatalogsResponse.deserialize,
)
return self._stubs['list_catalogs']
@property
def update_catalog(self) -> Callable[
[catalog_service.UpdateCatalogRequest],
gcr_catalog.Catalog]:
r"""Return a callable for the update catalog method over gRPC.
Updates the [Catalog][google.cloud.retail.v2alpha.Catalog]s.
Returns:
Callable[[~.UpdateCatalogRequest],
~.Catalog]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_catalog' not in self._stubs:
self._stubs['update_catalog'] = self.grpc_channel.unary_unary(
'/google.cloud.retail.v2alpha.CatalogService/UpdateCatalog',
request_serializer=catalog_service.UpdateCatalogRequest.serialize,
response_deserializer=gcr_catalog.Catalog.deserialize,
)
return self._stubs['update_catalog']
@property
def set_default_branch(self) -> Callable[
[catalog_service.SetDefaultBranchRequest],
empty_pb2.Empty]:
r"""Return a callable for the set default branch method over gRPC.
Set a specified branch id as default branch. API methods such as
[SearchService.Search][google.cloud.retail.v2alpha.SearchService.Search],
[ProductService.GetProduct][google.cloud.retail.v2alpha.ProductService.GetProduct],
[ProductService.ListProducts][google.cloud.retail.v2alpha.ProductService.ListProducts]
will treat requests using "default_branch" to the actual branch
id set as default.
For example, if ``projects/*/locations/*/catalogs/*/branches/1``
is set as default, setting
[SearchRequest.branch][google.cloud.retail.v2alpha.SearchRequest.branch]
to ``projects/*/locations/*/catalogs/*/branches/default_branch``
is equivalent to setting
[SearchRequest.branch][google.cloud.retail.v2alpha.SearchRequest.branch]
to ``projects/*/locations/*/catalogs/*/branches/1``.
Using multiple branches can be useful when developers would like
to have a staging branch to test and verify for future usage.
When it becomes ready, developers switch on the staging branch
using this API while keeping using
``projects/*/locations/*/catalogs/*/branches/default_branch`` as
[SearchRequest.branch][google.cloud.retail.v2alpha.SearchRequest.branch]
to route the traffic to this staging branch.
CAUTION: If you have live predict/search traffic, switching the
default branch could potentially cause outages if the ID space
of the new branch is very different from the old one.
More specifically:
- PredictionService will only return product IDs from branch
{newBranch}.
- SearchService will only return product IDs from branch
{newBranch} (if branch is not explicitly set).
- UserEventService will only join events with products from
branch {newBranch}.
This feature is only available for users who have Retail Search
enabled. Please submit a form
`here <https://cloud.google.com/contact>`__ to contact cloud
sales if you are interested in using Retail Search.
Returns:
Callable[[~.SetDefaultBranchRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_default_branch' not in self._stubs:
self._stubs['set_default_branch'] = self.grpc_channel.unary_unary(
'/google.cloud.retail.v2alpha.CatalogService/SetDefaultBranch',
request_serializer=catalog_service.SetDefaultBranchRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['set_default_branch']
@property
def get_default_branch(self) -> Callable[
[catalog_service.GetDefaultBranchRequest],
catalog_service.GetDefaultBranchResponse]:
r"""Return a callable for the get default branch method over gRPC.
Get which branch is currently default branch set by
[CatalogService.SetDefaultBranch][google.cloud.retail.v2alpha.CatalogService.SetDefaultBranch]
method under a specified parent catalog.
This feature is only available for users who have Retail Search
enabled. Please submit a form
`here <https://cloud.google.com/contact>`__ to contact cloud
sales if you are interested in using Retail Search.
Returns:
Callable[[~.GetDefaultBranchRequest],
~.GetDefaultBranchResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_default_branch' not in self._stubs:
self._stubs['get_default_branch'] = self.grpc_channel.unary_unary(
'/google.cloud.retail.v2alpha.CatalogService/GetDefaultBranch',
request_serializer=catalog_service.GetDefaultBranchRequest.serialize,
response_deserializer=catalog_service.GetDefaultBranchResponse.deserialize,
)
return self._stubs['get_default_branch']
__all__ = (
'CatalogServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
7f83ccac64e8a53e8d25e677d1b7ce5405fae100 | f5624e046836723fa065d47d11d52b8b3e448e7b | /espy/splitlist.py | a67fd9453ad4756a09f5179de45f048c6c7f82be | []
| no_license | esheldon/espy | 99daafcf9f81b1465994a0e717095a8ea0bcbfc8 | 090e6f4c65c2dc3acef3c2d98b2971d8c1002787 | refs/heads/master | 2023-07-20T23:49:18.662509 | 2023-07-12T15:40:24 | 2023-07-12T15:40:24 | 1,374,930 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | def split_list(els, nchunks):
nel = len(els)
chunksize = nel // nchunks
extra_items = nel % nchunks
chunks = []
start = 0
for i in range(nchunks):
this_chunksize = chunksize
if i < extra_items:
this_chunksize += 1
end = start + this_chunksize
chunk = els[start:end]
chunks.append(chunk)
start = start + this_chunksize
return [
chunk for chunk in chunks if len(chunk) > 0
]
def write_chunks(chunks, prefix, suffix):
nchunks = len(chunks)
cformat = '%0' + str(len(str(nchunks))) + 'i'
name_format = prefix + cformat + suffix
for i, chunk in enumerate(chunks):
fname = name_format % i
print(fname)
with open(fname, 'w') as fobj:
for el in chunk:
fobj.write(el)
| [
"[email protected]"
]
| |
1ae6af2cce4447358311e46e19be97470cce2616 | b9a131dd85fe5f2d2f5b16c97b1f859ede5a4914 | /Curso_em_Vídeo/CalculoMedia.py | 99126bab46b183c7dfc1ae6d976078cbf62a1650 | []
| no_license | juancassioo/python-sistemas | 131f218bf8fa1bebf1bc6e5fbe3222571ca7a42f | 378596d1c630357b1b1958a3b4e3e7f6f96dd5d1 | refs/heads/main | 2023-07-04T20:27:22.859839 | 2021-08-09T01:10:37 | 2021-08-09T01:10:37 | 394,105,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1+nota2)/2
if media >= 4 and media < 7:
print('Apto a avaliação final')
elif media < 4:
print('Reprovado')
else:
('Aprovado')
print(media) | [
"[email protected]"
]
| |
7245d605b82b94a01fd481256157eec9bc376170 | 67612c27c6d79ae180a5bc266833899abfefe9f5 | /152. Maximum Product Subarray.py | c0dac2b1ad7f683ccdf47d6b5d6e01df83f751e2 | []
| no_license | Katherinaxxx/leetcode | 7e9d0bd7dc613a824116f1247f42bfc33e485ff3 | dcebf49d1e024b9e69c4d9606c8afb32b9d07029 | refs/heads/master | 2023-01-27T20:14:09.459296 | 2023-01-08T07:01:53 | 2023-01-08T07:01:53 | 215,688,672 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/11/13 上午11:57
@Author : Catherinexxx
@Site :
@File : 152. Maximum Product Subarray.py
@Software: PyCharm
"""
# DP 乘积正负号 因此要保存最大值最小值 负数大小交换
# class Solution:
# def maxProduct(self, nums: List[int]) -> int:
# imax = 1
# imin = 1
# res = float('-inf')
# n = len(nums)
# for i in range(n):
# if(nums[i]<=0):
# imax, imin = imin, imax
# imax = max(imax*nums[i], nums[i])
# imin = min(imin*nums[i], nums[i])
# res = max(res, imax)
# return res
# 国际站
class Solution:
def maxProduct(self, A):
B = A[::-1] # 倒序
for i in range(1, len(A)):
A[i] *= A[i - 1] or 1
B[i] *= B[i - 1] or 1
return max(A + B) | [
"[email protected]"
]
| |
52838f675ec154a16649220f797aee36f8262712 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/IBus/__class__.py | 39b144f32a6b76f1cc80ecf5f82044a5df077f98 | []
| no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 3,623 | py | # encoding: utf-8
# module gi.repository.IBus
# from /usr/lib64/girepository-1.0/IBus-1.0.typelib
# by generator 1.147
# no doc
# imports
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.Gio as __gi_overrides_Gio
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class __class__(__gi_overrides.OverridesProxyModule):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self): # reliably restored by inspect
# no doc
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getattr__(self, name): # reliably restored by inspect
# no doc
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, introspection_module): # reliably restored by inspect
# no doc
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self): # reliably restored by inspect
# no doc
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
__class__ = type
__dict__ = mappingproxy({'__module__': 'gi.overrides', '__doc__': None})
| [
"[email protected]"
]
| |
2f26fdc0fa04f60227982e45b6f007e513c9edb9 | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/dd_sdk_1_0/models/network_nic_link_duplex_mode20.py | a1f674f04d95c2db1e574d1bd892e439069155e9 | []
| no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,924 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dd_sdk_1_0.configuration import Configuration
class NetworkNicLinkDuplexMode20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
UNKNOWN = "unknown"
FULL = "full"
HALF = "half"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""NetworkNicLinkDuplexMode20 - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkNicLinkDuplexMode20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkNicLinkDuplexMode20):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetworkNicLinkDuplexMode20):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
8f07d9cbe7ec45103b3e054e55955ac2957f7213 | c5d553e68de3d5c730f5fe2550209de759eabc8c | /1929 소수 구하기.py | 0598ecfbae20cc2a1f69cbce7d06bbde3eb36502 | []
| no_license | KimMooHyeon/Algorithm-Studying | 6bb23b971b0c46c35f4cdde133148f2c5cfaa0f4 | e4417aadf209fd22f960239623bed542744fd374 | refs/heads/master | 2023-08-08T02:28:02.460332 | 2023-07-15T14:22:53 | 2023-07-15T14:22:53 | 198,966,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
M,N=map(int,input().split())
check_dp=[0]*(N+1)
sosu_arr=[]
for i in range(2,N+1):
if check_dp[i] == 0 :
if i >=M:
print(i)
i_num=i
for j in range(i,N+1,i_num):
check_dp[j]=1
| [
"[email protected]"
]
| |
cd072d8feaa75ce59e3e079c13cb5d3970e11f7a | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /1heptane/pdep/network3476_1.py | 732677e6d7a93000358eea7f799c0da66a3811a4 | []
| no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 63,739 | py | species(
label = '[CH2]CO[CH]CCC=O(18425)',
structure = SMILES('[CH2]CO[CH]CCC=O'),
E0 = (-11.6877,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2782.5,750,1395,475,1775,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.177386,0.0940963,-0.000102859,4.69802e-08,3.01876e-12,-1277.55,31.3897], Tmin=(100,'K'), Tmax=(584.095,'K')), NASAPolynomial(coeffs=[9.90448,0.0447925,-2.06952e-05,3.93642e-09,-2.73405e-13,-2709.12,-12.8353], Tmin=(584.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-11.6877,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CJCO) + radical(CCsJOCs)"""),
)
species(
label = 'C2H4(21)',
structure = SMILES('C=C'),
E0 = (42.0619,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2334.71,'J/mol'), sigma=(3.971,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.9592,-0.00757051,5.7099e-05,-6.91588e-08,2.69884e-11,5089.78,4.0973], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.99183,0.0104834,-3.71721e-06,5.94628e-10,-3.5363e-14,4268.66,-0.269082], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(42.0619,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(133.032,'J/(mol*K)'), label="""C2H4""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'O=CCCC=O(5767)',
structure = SMILES('O=CCCC=O'),
E0 = (-309.903,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2695,2870,700,800,1380,1410,450,500,1750,1800,900,1100,180],'cm^-1')),
HinderedRotor(inertia=(0.18601,'amu*angstrom^2'), symmetry=1, barrier=(4.27673,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.186498,'amu*angstrom^2'), symmetry=1, barrier=(4.28796,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.186203,'amu*angstrom^2'), symmetry=1, barrier=(4.28117,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (86.0892,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3653.08,'J/mol'), sigma=(5.8998,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=570.60 K, Pc=40.36 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.98042,0.0494518,-6.06096e-05,5.52155e-08,-2.1639e-11,-37204.9,20.2227], Tmin=(100,'K'), Tmax=(774.657,'K')), NASAPolynomial(coeffs=[2.99129,0.0355848,-1.7014e-05,3.28724e-09,-2.30123e-13,-37102,17.2786], Tmin=(774.657,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-309.903,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cds-OdCsH) + group(Cds-OdCsH)"""),
)
species(
label = '[CH2]COC1CCC1[O](18465)',
structure = SMILES('[CH2]COC1CCC1[O]'),
E0 = (94.0802,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.332062,0.0669395,-2.12054e-05,-1.88843e-08,1.13546e-11,11459.2,29.9723], Tmin=(100,'K'), Tmax=(1038.2,'K')), NASAPolynomial(coeffs=[16.6203,0.0327756,-1.31552e-05,2.47301e-09,-1.75935e-13,6536.17,-56.6409], Tmin=(1038.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(94.0802,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(419.881,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + ring(Cyclobutane) + radical(CJCO) + radical(CC(C)OJ)"""),
)
species(
label = '[O]C1CC[CH]OCC1(18466)',
structure = SMILES('[O]C1CC[CH]OCC1'),
E0 = (-4.95588,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.742061,0.0540866,2.71982e-05,-8.09689e-08,3.83205e-11,-462.329,23.7926], Tmin=(100,'K'), Tmax=(904.882,'K')), NASAPolynomial(coeffs=[17.2798,0.0276938,-6.48371e-06,8.93992e-10,-5.7683e-14,-5367.66,-64.9064], Tmin=(904.882,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-4.95588,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + ring(oxepane) + radical(CCsJOCs) + radical(CC(C)OJ)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]COC=CCC=O(18467)',
structure = SMILES('[CH2]COC=CCC=O'),
E0 = (-109.206,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,3000,3100,440,815,1455,1000,2782.5,750,1395,475,1775,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (113.134,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0422945,0.0719826,-2.78811e-05,-2.12488e-08,1.41444e-11,-12974.1,30.1344], Tmin=(100,'K'), Tmax=(1019.71,'K')), NASAPolynomial(coeffs=[21.7466,0.0230278,-9.58365e-06,1.90667e-09,-1.4236e-13,-19316.3,-84.7189], Tmin=(1019.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-109.206,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(386.623,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-OdCsH) + radical(CJCO)"""),
)
species(
label = 'C=CO[CH]CCC=O(18468)',
structure = SMILES('C=CO[CH]CCC=O'),
E0 = (-121.939,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2782.5,750,1395,475,1775,1000,3025,407.5,1350,352.5,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (113.134,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.809442,0.0933091,-9.31901e-05,4.63472e-08,-8.94938e-12,-14482.2,31.9461], Tmin=(100,'K'), Tmax=(1273.04,'K')), NASAPolynomial(coeffs=[22.4925,0.0200929,-6.92133e-06,1.1702e-09,-7.75727e-14,-20415.1,-86.0978], Tmin=(1273.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-121.939,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(386.623,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-OdCsH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + radical(CCsJOC(O))"""),
)
species(
label = 'C2H4(T)(94)',
structure = SMILES('[CH2][CH2]'),
E0 = (318.146,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,180,1436.54,1437.15,2688.96,2689.16],'cm^-1')),
HinderedRotor(inertia=(0.0257549,'amu*angstrom^2'), symmetry=1, barrier=(17.2441,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.40736,0.0100312,6.40927e-06,-1.41291e-08,5.92671e-12,38288.2,6.11703], Tmin=(100,'K'), Tmax=(954.26,'K')), NASAPolynomial(coeffs=[5.52249,0.00856173,-2.90743e-06,5.02353e-10,-3.44572e-14,37547.8,-5.75276], Tmin=(954.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(318.146,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""C2H4(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'CH2CHO(40)',
structure = SMILES('[CH2]C=O'),
E0 = (1.22925,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,526.75,532.597,975.94,1639.13,1641.45],'cm^-1')),
HinderedRotor(inertia=(0.00114821,'amu*angstrom^2'), symmetry=1, barrier=(2.1986,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (43.0446,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.12,'J/mol'), sigma=(3.97,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.66874,0.0096233,1.60617e-05,-2.87682e-08,1.2503e-11,219.438,12.5694], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[5.91637,0.0088465,-3.14955e-06,5.05413e-10,-3.01305e-14,-1047.8,-6.1065], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(1.22925,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CH2CHO""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]COC=C(1028)',
structure = SMILES('[CH2]COC=C'),
E0 = (26.0821,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,3000,3100,440,815,1455,1000,198.59,198.767,201.413],'cm^-1')),
HinderedRotor(inertia=(0.757629,'amu*angstrom^2'), symmetry=1, barrier=(21.285,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.763991,'amu*angstrom^2'), symmetry=1, barrier=(21.2917,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.753524,'amu*angstrom^2'), symmetry=1, barrier=(21.2921,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (71.0978,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.39677,0.0428258,3.10342e-06,-4.61229e-08,2.41714e-11,3244.03,18.6616], Tmin=(100,'K'), Tmax=(926.093,'K')), NASAPolynomial(coeffs=[18.0716,0.00801341,-7.79595e-07,5.81055e-11,-7.24124e-15,-1440.09,-69.1192], Tmin=(926.093,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(26.0821,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + radical(CJCO)"""),
)
species(
label = '[CH2]COC[CH]CC=O(18469)',
structure = SMILES('[CH2]COC[CH]CC=O'),
E0 = (7.75837,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2782.5,750,1395,475,1775,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.634914,0.0826083,-7.96566e-05,3.4417e-08,1.79551e-12,1046.23,32.7832], Tmin=(100,'K'), Tmax=(584.55,'K')), NASAPolynomial(coeffs=[7.3736,0.0484444,-2.26487e-05,4.36709e-09,-3.07104e-13,54.284,2.14499], Tmin=(584.55,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(7.75837,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCJCO) + radical(CJCO)"""),
)
species(
label = 'C[CH]O[CH]CCC=O(18470)',
structure = SMILES('C[CH]O[CH]CCC=O'),
E0 = (-42.8208,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.368333,0.101892,-0.000131204,9.62802e-08,-2.88833e-11,-4998.17,32.089], Tmin=(100,'K'), Tmax=(810.139,'K')), NASAPolynomial(coeffs=[11.9676,0.0409806,-1.84177e-05,3.46241e-09,-2.39032e-13,-6996.83,-24.8268], Tmin=(810.139,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-42.8208,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCsJOCs) + radical(CCsJOCs)"""),
)
species(
label = '[CH2][CH]OCCCC=O(18471)',
structure = SMILES('[CH2][CH]OCCCC=O'),
E0 = (-11.6877,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2782.5,750,1395,475,1775,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.177386,0.0940963,-0.000102859,4.69802e-08,3.01876e-12,-1277.55,31.3897], Tmin=(100,'K'), Tmax=(584.095,'K')), NASAPolynomial(coeffs=[9.90448,0.0447925,-2.06952e-05,3.93642e-09,-2.73405e-13,-2709.12,-12.8353], Tmin=(584.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-11.6877,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CJCO) + radical(CCsJOCs)"""),
)
species(
label = '[CH2]COCC[CH]C=O(18472)',
structure = SMILES('[CH2]COCCC=C[O]'),
E0 = (-48.8921,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.39169,0.0874158,-7.77501e-05,3.56143e-08,-6.50144e-12,-5714.61,33.6201], Tmin=(100,'K'), Tmax=(1320.37,'K')), NASAPolynomial(coeffs=[18.9608,0.0287889,-1.11481e-05,1.98671e-09,-1.34439e-13,-10825.2,-65.1232], Tmin=(1320.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-48.8921,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(CJCO) + radical(C=COJ)"""),
)
species(
label = '[CH2]COCCC[C]=O(18473)',
structure = SMILES('[CH2]COCCC[C]=O'),
E0 = (-32.1831,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0806489,0.0972262,-0.00013275,1.10349e-07,-3.77342e-11,-3730.96,33.6078], Tmin=(100,'K'), Tmax=(801.013,'K')), NASAPolynomial(coeffs=[8.28099,0.0466608,-2.15618e-05,4.07885e-09,-2.81265e-13,-4787.88,-3.11282], Tmin=(801.013,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-32.1831,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCCJ=O) + radical(CJCO)"""),
)
species(
label = 'CCO[CH][CH]CC=O(18474)',
structure = SMILES('CCO[CH][CH]CC=O'),
E0 = (-23.3747,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.254295,0.0880617,-9.73215e-05,6.44594e-08,-1.81455e-11,-2681.45,32.9121], Tmin=(100,'K'), Tmax=(847.278,'K')), NASAPolynomial(coeffs=[9.46566,0.0445751,-2.03342e-05,3.88362e-09,-2.71899e-13,-4242.37,-10.0009], Tmin=(847.278,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-23.3747,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCsJOCs) + radical(CCJCO)"""),
)
species(
label = 'CCO[CH]C[CH]C=O(18475)',
structure = SMILES('CCO[CH]CC=C[O]'),
E0 = (-80.0252,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.734188,0.0914926,-8.49112e-05,4.00917e-08,-7.43507e-12,-9443.72,33.6637], Tmin=(100,'K'), Tmax=(1315.46,'K')), NASAPolynomial(coeffs=[21.2355,0.0246881,-8.73522e-06,1.48635e-09,-9.82427e-14,-15223.8,-78.3514], Tmin=(1315.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-80.0252,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(CCsJOCs) + radical(C=COJ)"""),
)
species(
label = 'CCO[CH]CC[C]=O(18476)',
structure = SMILES('CCO[CH]CC[C]=O'),
E0 = (-63.3162,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.222052,0.0924239,-0.000101014,4.79485e-08,8.28953e-13,-7487.91,31.4066], Tmin=(100,'K'), Tmax=(594.923,'K')), NASAPolynomial(coeffs=[9.9406,0.0436454,-1.97926e-05,3.73378e-09,-2.58162e-13,-8937.41,-12.8964], Tmin=(594.923,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-63.3162,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCsJOCs) + radical(CCCJ=O)"""),
)
species(
label = '[O][CH]CCC=O(5764)',
structure = SMILES('[O][CH]CCC=O'),
E0 = (19.0721,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2782.5,750,1395,475,1775,1000,3025,407.5,1350,352.5,180,180,2092.49],'cm^-1')),
HinderedRotor(inertia=(0.166661,'amu*angstrom^2'), symmetry=1, barrier=(3.83187,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.164989,'amu*angstrom^2'), symmetry=1, barrier=(3.79342,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.166144,'amu*angstrom^2'), symmetry=1, barrier=(3.81999,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (86.0892,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.56305,0.0641173,-0.000107371,1.07453e-07,-4.10375e-11,2371.43,23.494], Tmin=(100,'K'), Tmax=(845.992,'K')), NASAPolynomial(coeffs=[1.74033,0.0387928,-1.90537e-05,3.64333e-09,-2.50242e-13,3217.68,27.8472], Tmin=(845.992,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(19.0721,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-OdCsH) + radical(CCsJOH) + radical(CCOJ)"""),
)
species(
label = '[CH2]COC1CC[CH]O1(18477)',
structure = SMILES('[CH2]COC1CC[CH]O1'),
E0 = (-23.5499,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.445704,0.0656209,-1.33816e-05,-3.56393e-08,2.12922e-11,-2692.73,26.6818], Tmin=(100,'K'), Tmax=(908.477,'K')), NASAPolynomial(coeffs=[16.4831,0.0286856,-8.00187e-06,1.21729e-09,-7.90494e-14,-6996.39,-56.7988], Tmin=(908.477,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-23.5499,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(419.881,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + ring(Tetrahydrofuran) + radical(CCsJOCs) + radical(CJCO)"""),
)
species(
label = '[CH]1CC[CH]OCCO1(18478)',
structure = SMILES('[CH]1CC[CH]OCCO1'),
E0 = (6.70559,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.632804,0.0594285,-1.62018e-06,-3.69214e-08,1.72633e-11,940.487,23.9373], Tmin=(100,'K'), Tmax=(1018.24,'K')), NASAPolynomial(coeffs=[15.2121,0.0348921,-1.36991e-05,2.56036e-09,-1.82224e-13,-3725.64,-54.9963], Tmin=(1018.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(6.70559,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + ring(Cyclooctane) + radical(CCsJOCs) + radical(CCsJOCs)"""),
)
species(
label = 'C=COCCCC=O(18479)',
structure = SMILES('C=COCCCC=O'),
E0 = (-315.867,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.221991,0.0813895,-5.78506e-05,1.32875e-08,1.67494e-12,-37828.1,30.3597], Tmin=(100,'K'), Tmax=(1038.57,'K')), NASAPolynomial(coeffs=[18.7727,0.0285527,-1.08873e-05,1.98048e-09,-1.38157e-13,-42869.4,-67.2737], Tmin=(1038.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-315.867,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-CdsOsH) + group(Cds-OdCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'CCOC=CCC=O(18480)',
structure = SMILES('CCOC=CCC=O'),
E0 = (-320.795,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.115907,0.0667182,-7.83995e-06,-4.08327e-08,2.05046e-11,-38426.5,28.9331], Tmin=(100,'K'), Tmax=(1015.14,'K')), NASAPolynomial(coeffs=[20.9416,0.0267945,-1.11105e-05,2.20494e-09,-1.64367e-13,-44825.8,-82.5455], Tmin=(1015.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-320.795,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-OdCsH)"""),
)
species(
label = 'CO(12)',
structure = SMILES('[C-]#[O+]'),
E0 = (-119.219,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2084.51],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0101,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(762.44,'J/mol'), sigma=(3.69,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.5971,-0.00102424,2.83336e-06,-1.75825e-09,3.42587e-13,-14343.2,3.45822], Tmin=(100,'K'), Tmax=(1669.93,'K')), NASAPolynomial(coeffs=[2.92796,0.00181931,-8.35308e-07,1.51269e-10,-9.88872e-15,-14292.7,6.51157], Tmin=(1669.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-119.219,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""CO""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]CO[CH]CC(10575)',
structure = SMILES('[CH2]CO[CH]CC'),
E0 = (94.892,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (86.1323,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3205.85,'J/mol'), sigma=(5.93905,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=500.75 K, Pc=34.72 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.765495,0.0727997,-7.10958e-05,3.93372e-08,-9.07339e-12,11528,24.9265], Tmin=(100,'K'), Tmax=(1031.23,'K')), NASAPolynomial(coeffs=[10.817,0.0338109,-1.4383e-05,2.67335e-09,-1.84932e-13,9454.91,-23.8753], Tmin=(1031.23,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(94.892,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(361.68,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(CJCO)"""),
)
species(
label = '[CH2]COC([CH2])CC=O(18481)',
structure = SMILES('[CH2]COC([CH2])CC=O'),
E0 = (7.81548,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2782.5,750,1395,475,1775,1000,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.400351,0.105313,-0.000151966,1.28446e-07,-4.3879e-11,1090.24,33.3428], Tmin=(100,'K'), Tmax=(814.153,'K')), NASAPolynomial(coeffs=[9.24012,0.0462622,-2.16399e-05,4.09793e-09,-2.82013e-13,-92.2083,-8.80613], Tmin=(814.153,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(7.81548,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + longDistanceInteraction_noncyclic(OsCs-ST) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CJCO) + radical(CJC(C)OC)"""),
)
species(
label = 'O=CCCC1CCO1(18426)',
structure = SMILES('O=CCCC1CCO1'),
E0 = (-263.102,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.905414,0.0593888,-1.29759e-05,-2.31368e-08,1.3584e-11,-31524.6,27.0002], Tmin=(100,'K'), Tmax=(926.499,'K')), NASAPolynomial(coeffs=[11.212,0.0369261,-1.22823e-05,2.03325e-09,-1.34056e-13,-34380.1,-27.0401], Tmin=(926.499,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-263.102,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(419.881,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cds-OdCsH) + ring(Oxetane)"""),
)
species(
label = '[CH2]CO[CH]COC=C(18482)',
structure = SMILES('[CH2]CO[CH]COC=C'),
E0 = (29.9088,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,960,1120,1280,1440,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.10242,0.102335,-0.000102109,4.66269e-08,-6.88403e-12,3789.68,32.9469], Tmin=(100,'K'), Tmax=(989.279,'K')), NASAPolynomial(coeffs=[23.2784,0.021819,-7.41511e-06,1.27114e-09,-8.65771e-14,-1918.14,-88.8811], Tmin=(989.279,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(29.9088,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-Cs(Cds-Cd)) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + radical(CJCO) + radical(CCsJOCs)"""),
)
species(
label = '[CH2]CO[CH]CC=CO(18483)',
structure = SMILES('[CH2]CO[CH]CC=CO'),
E0 = (-9.89868,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3025,407.5,1350,352.5,3615,1277.5,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.39105,0.104869,-0.000112908,6.02166e-08,-1.23251e-11,-984.742,35.2373], Tmin=(100,'K'), Tmax=(1257.45,'K')), NASAPolynomial(coeffs=[24.7819,0.0180072,-4.99219e-06,7.23091e-10,-4.36941e-14,-7282.03,-95.895], Tmin=(1257.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-9.89868,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(CJCO) + radical(CCsJOCs)"""),
)
species(
label = '[CH2]C[O](96)',
structure = SMILES('[CH2]C[O]'),
E0 = (188.892,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,1398.33],'cm^-1')),
HinderedRotor(inertia=(0.00547724,'amu*angstrom^2'), symmetry=1, barrier=(7.58298,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0526,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.57171,0.0102136,5.90913e-06,-7.99869e-09,2.07078e-12,22733,11.7517], Tmin=(100,'K'), Tmax=(1490.84,'K')), NASAPolynomial(coeffs=[4.741,0.01502,-6.91914e-06,1.31179e-09,-8.9824e-14,21501.6,2.68291], Tmin=(1490.84,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(188.892,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(CJCO) + radical(CCOJ)"""),
)
species(
label = '[CH]CCC=O(18484)',
structure = SMILES('[CH]CCC=O'),
E0 = (221.112,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2782.5,750,1395,475,1775,1000,338.309,338.317,1520,1520.03],'cm^-1')),
HinderedRotor(inertia=(0.00147232,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.09193,'amu*angstrom^2'), symmetry=1, barrier=(7.47087,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0919401,'amu*angstrom^2'), symmetry=1, barrier=(7.47082,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (70.0898,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.39867,0.0383489,-2.66828e-05,1.05264e-08,-1.89984e-12,26648.5,18.3896], Tmin=(100,'K'), Tmax=(1182.31,'K')), NASAPolynomial(coeffs=[5.79639,0.0268536,-1.20987e-05,2.30283e-09,-1.60963e-13,25845.1,1.42861], Tmin=(1182.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(221.112,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(245.277,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCJ2_triplet)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]O[CH]CCC=O(18485)',
structure = SMILES('[CH2]O[CH]CCC=O'),
E0 = (2.04251,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2782.5,750,1395,475,1775,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.787429,0.0749119,-8.90054e-05,6.27045e-08,-1.84232e-11,357.485,28.3533], Tmin=(100,'K'), Tmax=(820.645,'K')), NASAPolynomial(coeffs=[9.05276,0.0346248,-1.53673e-05,2.88312e-09,-1.9927e-13,-999.093,-9.88836], Tmin=(820.645,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(2.04251,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsOsHH) + group(Cs-OsHHH) + group(Cds-OdCsH) + radical(CCsJOCs) + radical(CsJOCC)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (-11.6877,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (94.0802,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (58.1851,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (109.322,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (96.6224,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (44.1297,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (63.4545,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (116.542,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (146.676,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (153.831,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (115.924,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (84.9627,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (34.7547,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (70.5214,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (69.296,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (337.218,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (46.4699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (59.7478,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (51.7125,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (13.2856,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (333.405,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (166.442,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (-3.40336,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (343.709,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (133.968,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (410.003,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (383.605,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['C2H4(21)', 'O=CCCC=O(5767)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['[CH2]COC1CCC1[O](18465)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(2.13771e+06,'s^-1'), n=1.58803, Ea=(105.768,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_csHNd] for rate rule [R5_SS_CO;carbonylbond_intra_H;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Exocyclic
Ea raised from 101.2 to 105.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['[O]C1CC[CH]OCC1(18466)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(1.19e+11,'s^-1'), n=0.08, Ea=(69.8728,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R8;multiplebond_intra;radadd_intra_cs2H] for rate rule [R8;carbonylbond_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.0
family: Intra_R_Add_Exocyclic"""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)', '[CH2]COC=CCC=O(18467)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(3.72e+08,'cm^3/(mol*s)'), n=1.477, Ea=(6.73624,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 2825 used for Cds-CsH_Cds-OsH;HJ
Exact match found for rate rule [Cds-CsH_Cds-OsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['H(3)', 'C=CO[CH]CCC=O(18468)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(2.182e+10,'cm^3/(mol*s)'), n=0.859, Ea=(6.76971,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [Cds-OsH_Cds;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['C2H4(T)(94)', 'O=CCCC=O(5767)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.6e+10,'cm^3/(mol*s)'), n=1.39, Ea=(35.8862,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Od_CO-CsH;YJ] for rate rule [Od_CO-CsH;CJ]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 4.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['CH2CHO(40)', '[CH2]COC=C(1028)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(0.0114756,'m^3/(mol*s)'), n=2.44484, Ea=(36.1431,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-HH_Cds;CsJ-OneDeHH] for rate rule [Cds-HH_Cds-OsH;CsJ-COHH]
Euclidian distance = 1.41421356237
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]COC[CH]CC=O(18469)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(5.4e-20,'s^-1'), n=9.13, Ea=(108.784,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 341 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeO
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeO]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['C[CH]O[CH]CCC=O(18470)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(3.7e+13,'s^-1','+|-',2), n=-0.1, Ea=(158.364,'kJ/mol'), T0=(1,'K'), Tmin=(700,'K'), Tmax=(1800,'K'), comment="""From training reaction 347 used for R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH2][CH]OCCCC=O(18471)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(4.53164e+09,'s^-1'), n=1.09, Ea=(165.519,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_O;Y_rad_out;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['[CH2]COCC[CH]C=O(18472)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(6.82e+09,'s^-1'), n=0.73, Ea=(127.612,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS_Cs;C_rad_out_1H;Cs_H_out_H/OneDe] for rate rule [R3H_SS_Cs;C_rad_out_H/NonDeO;Cs_H_out_H/CO]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['[CH2]COCCC[C]=O(18473)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(27900,'s^-1'), n=1.97, Ea=(96.6504,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4H_SSS;C_rad_out_H/NonDeO;XH_out] for rate rule [R4H_SSS;C_rad_out_H/NonDeO;CO_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['CCO[CH][CH]CC=O(18474)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(262000,'s^-1'), n=1.62, Ea=(46.4424,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;Cs_H_out_H/NonDeC] for rate rule [R5HJ_3;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['CCO[CH]C[CH]C=O(18475)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(54838.5,'s^-1'), n=1.94766, Ea=(82.2091,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6Hall;C_rad_out_2H;Cs_H_out_H/OneDe] for rate rule [R6HJ_3;C_rad_out_2H;Cs_H_out_H/CO]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['CCO[CH]CC[C]=O(18476)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(60975.7,'s^-1'), n=1.58648, Ea=(80.9836,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7Hall;C_rad_out_2H;XH_out] for rate rule [R7HJ_3;C_rad_out_2H;CO_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['C2H4(T)(94)', '[O][CH]CCC=O(5764)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(1.49215e+07,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['[CH2]COC1CC[CH]O1(18477)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(4.64e+06,'s^-1'), n=1.15, Ea=(58.1576,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_csHNd] for rate rule [R5_SS_CO;carbonyl_intra_H;radadd_intra_csHO]
Euclidian distance = 2.44948974278
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['[CH]1CC[CH]OCCO1(18478)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.18057e+11,'s^-1'), n=0.420859, Ea=(71.4354,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R6plus;multiplebond_intra;radadd_intra_cs2H] + [R6plus;carbonyl_intra_H;radadd_intra] for rate rule [R8_linear;carbonyl_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.82842712475
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['C=COCCCC=O(18479)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['CCOC=CCC=O(18480)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction21',
reactants = ['CO(12)', '[CH2]CO[CH]CC(10575)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(274200,'cm^3/(mol*s)'), n=2.53, Ea=(357.732,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 4 used for CO;C_pri/NonDeC
Exact match found for rate rule [CO;C_pri/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: 1,2_Insertion_CO"""),
)
reaction(
label = 'reaction22',
reactants = ['[CH2]COC([CH2])CC=O(18481)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(2.95289e+09,'s^-1'), n=1, Ea=(158.627,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CsJ-HH;C] + [cCs(-HR!H)CJ;CsJ;C] for rate rule [cCs(-HR!H)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction23',
reactants = ['[CH2]CO[CH]CCC=O(18425)'],
products = ['O=CCCC1CCO1(18426)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_2H] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_2H]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction24',
reactants = ['[CH2]CO[CH]COC=C(18482)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(7040,'s^-1'), n=2.66, Ea=(313.8,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 7 used for R_ROR;R1_doublebond_CH2;R2_doublebond_H;R_O_C
Exact match found for rate rule [R_ROR;R1_doublebond_CH2;R2_doublebond_H;R_O_C]
Euclidian distance = 0
family: ketoenol"""),
)
reaction(
label = 'reaction25',
reactants = ['[CH2]CO[CH]CC=CO(18483)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(605.045,'s^-1'), n=2.96, Ea=(143.867,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R_ROR;R1_doublebond;R2_doublebond_H;R_O_H] for rate rule [R_ROR;R1_doublebond_CHR;R2_doublebond_H;R_O_H]
Euclidian distance = 1.0
family: ketoenol"""),
)
reaction(
label = 'reaction26',
reactants = ['[CH2]C[O](96)', '[CH]CCC=O(18484)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(1355.7,'m^3/(mol*s)'), n=1.40819, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad]
Euclidian distance = 0
family: Birad_R_Recombination
Ea raised from -12.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction27',
reactants = ['CH2(19)', '[CH2]O[CH]CCC=O(18485)'],
products = ['[CH2]CO[CH]CCC=O(18425)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/O;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
network(
label = '3476',
isomers = [
'[CH2]CO[CH]CCC=O(18425)',
],
reactants = [
('C2H4(21)', 'O=CCCC=O(5767)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '3476',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
]
| |
c66da0093d768643a519fc3c1896367ef095410b | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_cumsum/trend_linear/cycle_30/ar_12/test_artificial_1024_cumsum_linear_30_12_100.py | 812701238d3aa0cdd90c6e52e27439a111ee85e6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = 30, transform = "cumsum", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset); | [
"[email protected]"
]
| |
f8898597a2689911dfc7b0dc3b0e25955c465047 | 55a273347cb103fe2b2704cb9653956956d0dd34 | /code/tmp_rtrip/lib2to3/fixes/fix_raise.py | 050118e90c4212a90b1fa805531431d0318601af | [
"MIT"
]
| permissive | emilyemorehouse/ast-and-me | 4af1bc74fc967ea69ac1aed92664f6428acabe6a | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | refs/heads/master | 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 | MIT | 2022-11-04T11:36:43 | 2017-12-21T18:27:19 | Python | UTF-8 | Python | false | false | 2,209 | py | """Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results['exc'].clone()
if exc.type == token.STRING:
msg = 'Python 3 does not support string exceptions'
self.cannot_convert(node, msg)
return
if is_tuple(exc):
while is_tuple(exc):
exc = exc.children[1].children[0].clone()
exc.prefix = ' '
if 'val' not in results:
new = pytree.Node(syms.raise_stmt, [Name('raise'), exc])
new.prefix = node.prefix
return new
val = results['val'].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ''
args = [val]
if 'tb' in results:
tb = results['tb'].clone()
tb.prefix = ''
e = exc
if val.type != token.NAME or val.value != 'None':
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name('raise')] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt, [Name('raise'), Call(exc,
args)], prefix=node.prefix)
| [
"[email protected]"
]
| |
3dccca44a60a3000863d6a65edd0d6b9ce25723f | 5292b03998384c0d2bb5858058892d7e45c5365b | /C3CTF/2017/lfa/server.py | 6153f36b52fbf1bfae1f787c826eee6c154d6d5b | [
"MIT"
]
| permissive | TheusZer0/ctf-archives | 430ef80d367b44fd81449bcb108e367842cb8e39 | 033ccf8dab0abdbdbbaa4f0092ab589288ddb4bd | refs/heads/main | 2023-09-04T17:56:24.416820 | 2021-11-21T06:51:27 | 2021-11-21T06:51:27 | 430,603,430 | 1 | 0 | MIT | 2021-11-22T07:24:08 | 2021-11-22T07:24:07 | null | UTF-8 | Python | false | false | 729 | py | #!/usr/bin/python
import tempfile
import os
import string
import random
def randstr():
return ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(10))
code = "require 'LFA'\n"
code += "syscall 1, 1, \"hello\\n\", 6\n\n"
max = 600 # 600 linex should be more than enough ;)
print "Enter your code, enter the string END_OF_PWN to finish "
while max:
new_code = raw_input("code> ")
if new_code == "END_OF_PWN":
break
code += new_code + "\n"
max -= 1
name = "/tmp/%s" % randstr()
with open(name, "w+") as f:
f.write(code)
flag = open("flag", "r")
os.dup2(flag.fileno(), 1023)
flag.close()
cmd = "timeout 40 ruby %s" % name
os.system(cmd)
| [
"[email protected]"
]
| |
38b5170a2ac939a6a73dfceb7cb566679f67f6de | 0cb1ff9d0be4387e33f1003ab5cc72bab0345e7a | /wildcard/test/test_data/exceptions.py | b55003f488bc7e7760e5c0e1232632abe211c45b | [
"Apache-2.0"
]
| permissive | kickstandproject/wildcard | 65995fb0090c4cfcad34f8373cfc912199ecf5da | 0ef2a15d8ac6b1d37db964d0baa7e40f9f771bc9 | refs/heads/master | 2020-05-17T00:41:09.908059 | 2015-01-27T20:25:33 | 2015-01-28T03:30:22 | 14,288,349 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as keystone_exceptions
from wildcard.test.test_data import utils
def create_stubbed_exception(cls, status_code=500):
msg = "Expected failure."
def fake_init_exception(self, code, message, **kwargs):
self.code = code
self.message = message
def fake_str(self):
return str(self.message)
def fake_unicode(self):
return unicode(self.message)
cls.__init__ = fake_init_exception
cls.__str__ = fake_str
cls.__unicode__ = fake_unicode
cls.silence_logging = True
return cls(status_code, msg)
def data(TEST):
TEST.exceptions = utils.TestDataContainer()
unauth = keystone_exceptions.Unauthorized
TEST.exceptions.keystone_unauthorized = create_stubbed_exception(unauth)
keystone_exception = keystone_exceptions.ClientException
TEST.exceptions.keystone = create_stubbed_exception(keystone_exception)
| [
"[email protected]"
]
| |
23f642c26b049ab38cc33af5219bab1179f92ca6 | 81c85850747f97ccc6ed36e3e0a859b99ef38fe8 | /agesprot/apps/activity/migrations/0001_initial.py | a3d6c2d181ec3eadaa4bcbb3962dbc5f910112c3 | []
| no_license | agesprot1/agesprot | f5047447a37ea8e92b4ffa2d72ae7814d0af8950 | 34c14a176bca5523999d27d5b9f695a6fac9df96 | refs/heads/master | 2021-01-20T22:11:18.686295 | 2016-08-23T22:38:57 | 2016-08-23T22:38:57 | 61,495,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-16 03:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('base', '0001_initial'),
('project', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Actividad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_actividad', models.CharField(max_length=45)),
('descripcion_actividad', models.CharField(max_length=100)),
('fecha_creacion', models.DateField(auto_now=True)),
('fecha_entrega', models.DateField()),
('estado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Tipo_estado')),
('prioridad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Tipo_prioridad')),
('proyecto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Proyecto')),
],
),
migrations.CreateModel(
name='Actividad_role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('actividad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.Actividad')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Roles_project')),
],
),
]
| [
"[email protected]"
]
| |
57c3ea658ea0e1295abf524f1108822e4d6d2a66 | bd867af5245366ee0abfd0f659fcb42170fff8ca | /hackerRank/algorithms/SolveMeFirst/solve_me_first.py | ddfca165b1148f31b2036c8cf23b5fbdc6214bd1 | []
| no_license | kruart/coding_challenges | 04736a6b66da813fd973e7a57aa084bbdab31183 | 395ae60ab392e49bb5bc2f0a4eef1dfd232899bb | refs/heads/master | 2021-06-16T08:51:21.815334 | 2019-11-07T08:39:13 | 2019-11-07T08:39:13 | 153,890,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | # https://www.hackerrank.com/challenges/solve-me-first/problem
def solve_me_first(a, b):
return a + b
a = int(input())
b = int(input())
res = solve_me_first(a, b)
print(res)
| [
"[email protected]"
]
| |
6a234a228d30cac988244e65717f7efcd912dead | 3343e5193e2dd14a3dc2c8c375914b12323dea82 | /dnacentersdk/models/validators/v2_1_2/jsd_bf859ac64a0ba19c.py | 76a50a6b8acc29e75be779590c64df8f2276b99d | [
"MIT"
]
| permissive | sandhjos/dnacentersdk | 6b483fe61307d4ea377b4bcc343e77aa7994e8bc | 9ca1a1923bc714e49e652099e2d60ee121d789e9 | refs/heads/master | 2023-04-18T17:52:09.350514 | 2021-05-07T23:41:47 | 2021-05-07T23:41:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | # -*- coding: utf-8 -*-
"""DNA Center Create HTTP read credentials data model.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorBf859Ac64A0BA19C(object):
"""Create HTTP read credentials request schema definition."""
def __init__(self):
super(JSONSchemaValidatorBf859Ac64A0BA19C, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"items": {
"properties": {
"comments": {
"description":
"",
"type": [
"string",
"null"
]
},
"credentialType": {
"description":
"",
"enum": [
"GLOBAL",
"APP",
null
],
"type": [
"string",
"null"
]
},
"description":
{
"description":
"",
"type": [
"string",
"null"
]
},
"id": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceTenantId": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceUuid": {
"description":
"",
"type": [
"string",
"null"
]
},
"password": {
"description":
"",
"type": [
"string",
"null"
]
},
"port": {
"type": [
"number",
"null"
]
},
"secure": {
"type": [
"boolean",
"null"
]
},
"username": {
"description":
"",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"[email protected]"
]
| |
e693a6dea2031551cda8d8b65dfdb15aff401888 | f693c9c487d31a677f009afcdf922b4e7f7d1af0 | /biomixer-venv/lib/python3.9/site-packages/pylint/epylint.py | 12f541c7af626e6e326e516ea1a0f9e7869b4cb0 | [
"MIT"
]
| permissive | Shellowb/BioMixer | 9048b6c07fa30b83c87402284f0cebd11a58e772 | 1939261589fe8d6584a942a99f0308e898a28c1c | refs/heads/master | 2022-10-05T08:16:11.236866 | 2021-06-29T17:20:45 | 2021-06-29T17:20:45 | 164,722,008 | 1 | 3 | MIT | 2022-09-30T20:23:34 | 2019-01-08T19:52:12 | Python | UTF-8 | Python | false | false | 7,392 | py | # mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4
# -*- vim:fenc=utf-8:ft=python:et:sw=4:ts=4:sts=4
# Copyright (c) 2008-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2014 Jakob Normark <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Manuel Vázquez Acosta <[email protected]>
# Copyright (c) 2014 Derek Harland <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Mihai Balint <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2017, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 Daniela Plascencia <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Ryan McGuire <[email protected]>
# Copyright (c) 2018 thernstig <[email protected]>
# Copyright (c) 2018 Radostin Stoyanov <[email protected]>
# Copyright (c) 2019, 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Andreas Finkler <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""Emacs and Flymake compatible Pylint.
This script is for integration with emacs and is compatible with flymake mode.
epylint walks out of python packages before invoking pylint. This avoids
reporting import errors that occur when a module within a package uses the
absolute import path to get another module within this package.
For example:
- Suppose a package is structured as
a/__init__.py
a/b/x.py
a/c/y.py
- Then if y.py imports x as "from a.b import x" the following produces pylint
errors
cd a/c; pylint y.py
- The following obviously doesn't
pylint a/c/y.py
- As this script will be invoked by emacs within the directory of the file
we are checking we need to go out of it to avoid these false positives.
You may also use py_run to run pylint with desired options and get back (or not)
its output.
"""
import os
import shlex
import sys
from io import StringIO
from subprocess import PIPE, Popen
def _get_env():
"""Extracts the environment PYTHONPATH and appends the current sys.path to
those."""
env = dict(os.environ)
env["PYTHONPATH"] = os.pathsep.join(sys.path)
return env
def lint(filename, options=()):
"""Pylint the given file.
When run from emacs we will be in the directory of a file, and passed its
filename. If this file is part of a package and is trying to import other
modules from within its own package or another package rooted in a directory
below it, pylint will classify it as a failed import.
To get around this, we traverse down the directory tree to find the root of
the package this module is in. We then invoke pylint from this directory.
Finally, we must correct the filenames in the output generated by pylint so
Emacs doesn't become confused (it will expect just the original filename,
while pylint may extend it with extra directories if we've traversed down
the tree)
"""
# traverse downwards until we are out of a python package
full_path = os.path.abspath(filename)
parent_path = os.path.dirname(full_path)
child_path = os.path.basename(full_path)
while parent_path != "/" and os.path.exists(
os.path.join(parent_path, "__init__.py")
):
child_path = os.path.join(os.path.basename(parent_path), child_path)
parent_path = os.path.dirname(parent_path)
# Start pylint
# Ensure we use the python and pylint associated with the running epylint
run_cmd = "import sys; from pylint.lint import Run; Run(sys.argv[1:])"
cmd = (
[sys.executable, "-c", run_cmd]
+ [
"--msg-template",
"{path}:{line}: {category} ({msg_id}, {symbol}, {obj}) {msg}",
"-r",
"n",
child_path,
]
+ list(options)
)
with Popen(
cmd, stdout=PIPE, cwd=parent_path, env=_get_env(), universal_newlines=True
) as process:
for line in process.stdout:
# remove pylintrc warning
if line.startswith("No config file found"):
continue
# modify the file name thats output to reverse the path traversal we made
parts = line.split(":")
if parts and parts[0] == child_path:
line = ":".join([filename] + parts[1:])
print(line, end=" ")
process.wait()
return process.returncode
def py_run(command_options="", return_std=False, stdout=None, stderr=None):
"""Run pylint from python
``command_options`` is a string containing ``pylint`` command line options;
``return_std`` (boolean) indicates return of created standard output
and error (see below);
``stdout`` and ``stderr`` are 'file-like' objects in which standard output
could be written.
Calling agent is responsible for stdout/err management (creation, close).
Default standard output and error are those from sys,
or standalone ones (``subprocess.PIPE``) are used
if they are not set and ``return_std``.
If ``return_std`` is set to ``True``, this function returns a 2-uple
containing standard output and error related to created process,
as follows: ``(stdout, stderr)``.
To silently run Pylint on a module, and get its standard output and error:
>>> (pylint_stdout, pylint_stderr) = py_run( 'module_name.py', True)
"""
# Detect if we use Python as executable or not, else default to `python`
executable = sys.executable if "python" in sys.executable else "python"
# Create command line to call pylint
epylint_part = [executable, "-c", "from pylint import epylint;epylint.Run()"]
options = shlex.split(command_options, posix=not sys.platform.startswith("win"))
cli = epylint_part + options
# Providing standard output and/or error if not set
if stdout is None:
if return_std:
stdout = PIPE
else:
stdout = sys.stdout
if stderr is None:
if return_std:
stderr = PIPE
else:
stderr = sys.stderr
# Call pylint in a subprocess
with Popen(
cli,
shell=False,
stdout=stdout,
stderr=stderr,
env=_get_env(),
universal_newlines=True,
) as process:
proc_stdout, proc_stderr = process.communicate()
# Return standard output and error
if return_std:
return StringIO(proc_stdout), StringIO(proc_stderr)
return None
def Run():
if len(sys.argv) == 1:
print("Usage: %s <filename> [options]" % sys.argv[0])
sys.exit(1)
elif not os.path.exists(sys.argv[1]):
print("%s does not exist" % sys.argv[1])
sys.exit(1)
else:
sys.exit(lint(sys.argv[1], sys.argv[2:]))
if __name__ == "__main__":
Run()
| [
"[email protected]"
]
| |
73ceb23b6051ec06412393193149dafa2b105e62 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/2732.py | 1112dd9834490f34fce3e6b7f8b1d4e4fedbdb3f | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!/usr/bin/python3
from math import log
lines = int(input())
for attempt in range(1,lines+1):
N = int(input())
digits = int(log(N, 10))
for place in range(0,digits+1):
digit = (N % (10 ** (place + 1)) // (10 ** place))
next_digit = (N % (10 ** (place + 2)) // (10 ** (place + 1)))
if digit < next_digit:
N -= (N % (10 ** (place + 1)) + 1)
print('Case #' + str(attempt) + ': ' + str(N))
| [
"[email protected]"
]
| |
45a9d2db2a62576efdf7ae91345243d5ba0d273d | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/template_3_2_def.py | 76e19f44a4b458d103bb31fb6f34b7754884156d | [
"Apache-2.0"
]
| permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,930 | py | import pyeccodes.accessors as _
def load(h):
h.add(_.Codetable('shapeOfTheEarth', 1, "3.2.table", _.Get('masterDir'), _.Get('localDir')))
h.add(_.Unsigned('scaleFactorOfRadiusOfSphericalEarth', 1))
h.add(_.Unsigned('scaledValueOfRadiusOfSphericalEarth', 4))
h.add(_.Unsigned('scaleFactorOfEarthMajorAxis', 1))
h.alias('scaleFactorOfMajorAxisOfOblateSpheroidEarth', 'scaleFactorOfEarthMajorAxis')
h.add(_.Unsigned('scaledValueOfEarthMajorAxis', 4))
h.alias('scaledValueOfMajorAxisOfOblateSpheroidEarth', 'scaledValueOfEarthMajorAxis')
h.add(_.Unsigned('scaleFactorOfEarthMinorAxis', 1))
h.alias('scaleFactorOfMinorAxisOfOblateSpheroidEarth', 'scaleFactorOfEarthMinorAxis')
h.add(_.Unsigned('scaledValueOfEarthMinorAxis', 4))
h.alias('scaledValueOfMinorAxisOfOblateSpheroidEarth', 'scaledValueOfEarthMinorAxis')
h.alias('earthIsOblate', 'one')
if (h.get_l('shapeOfTheEarth') == 0):
h.add(_.Transient('radius', 6367470))
h.alias('radiusOfTheEarth', 'radius')
h.alias('radiusInMetres', 'radius')
h.alias('earthIsOblate', 'zero')
if (h.get_l('shapeOfTheEarth') == 1):
h.add(_.From_scale_factor_scaled_value('radius', _.Get('scaleFactorOfRadiusOfSphericalEarth'), _.Get('scaledValueOfRadiusOfSphericalEarth')))
h.alias('radiusOfTheEarth', 'radius')
h.alias('radiusInMetres', 'radius')
h.alias('earthIsOblate', 'zero')
if (h.get_l('shapeOfTheEarth') == 6):
h.add(_.Transient('radius', 6371229))
h.alias('radiusOfTheEarth', 'radius')
h.alias('radiusInMetres', 'radius')
h.alias('earthIsOblate', 'zero')
if (h.get_l('shapeOfTheEarth') == 8):
h.add(_.Transient('radius', 6371200))
h.alias('radiusOfTheEarth', 'radius')
h.alias('radiusInMetres', 'radius')
h.alias('earthIsOblate', 'zero')
if (h.get_l('shapeOfTheEarth') == 2):
h.add(_.Transient('earthMajorAxis', 6.37816e+06))
h.add(_.Transient('earthMinorAxis', 6.35678e+06))
h.alias('earthMajorAxisInMetres', 'earthMajorAxis')
h.alias('earthMinorAxisInMetres', 'earthMinorAxis')
if (h.get_l('shapeOfTheEarth') == 3):
h.add(_.From_scale_factor_scaled_value('earthMajorAxis', _.Get('scaleFactorOfEarthMajorAxis'), _.Get('scaledValueOfEarthMajorAxis')))
h.add(_.From_scale_factor_scaled_value('earthMinorAxis', _.Get('scaleFactorOfEarthMinorAxis'), _.Get('scaledValueOfEarthMinorAxis')))
h.add(_.Divdouble('earthMajorAxisInMetres', _.Get('earthMajorAxis'), 0.001))
h.add(_.Divdouble('earthMinorAxisInMetres', _.Get('earthMinorAxis'), 0.001))
if (h.get_l('shapeOfTheEarth') == 7):
h.add(_.From_scale_factor_scaled_value('earthMajorAxis', _.Get('scaleFactorOfEarthMajorAxis'), _.Get('scaledValueOfEarthMajorAxis')))
h.add(_.From_scale_factor_scaled_value('earthMinorAxis', _.Get('scaleFactorOfEarthMinorAxis'), _.Get('scaledValueOfEarthMinorAxis')))
h.alias('earthMajorAxisInMetres', 'earthMajorAxis')
h.alias('earthMinorAxisInMetres', 'earthMinorAxis')
if ((h.get_l('shapeOfTheEarth') == 4) or (h.get_l('shapeOfTheEarth') == 5)):
h.add(_.Transient('earthMajorAxis', 6.37814e+06))
h.add(_.Transient('earthMinorAxis', 6.35675e+06))
h.alias('earthMajorAxisInMetres', 'earthMajorAxis')
h.alias('earthMinorAxisInMetres', 'earthMinorAxis')
if (h.get_l('shapeOfTheEarth') == 9):
h.add(_.Transient('earthMajorAxis', 6.37756e+06))
h.add(_.Transient('earthMinorAxis', 6.35626e+06))
h.alias('earthMajorAxisInMetres', 'earthMajorAxis')
h.alias('earthMinorAxisInMetres', 'earthMinorAxis')
h.add(_.Unsigned('Ni', 4))
h.alias('numberOfPointsAlongAParallel', 'Ni')
h.alias('Nx', 'Ni')
h.add(_.Unsigned('Nj', 4))
h.alias('numberOfPointsAlongAMeridian', 'Nj')
h.alias('Ny', 'Nj')
h.alias('geography.Ni', 'Ni')
h.alias('geography.Nj', 'Nj')
h.add(_.Unsigned('basicAngleOfTheInitialProductionDomain', 4))
h.add(_.Transient('mBasicAngle', (_.Get('basicAngleOfTheInitialProductionDomain') * _.Get('oneMillionConstant'))))
h.add(_.Transient('angleMultiplier', 1))
h.add(_.Transient('mAngleMultiplier', 1000000))
pass # when block
h.add(_.Unsigned('subdivisionsOfBasicAngle', 4))
h.add(_.Transient('angleDivisor', 1000000))
pass # when block
h.add(_.Signed('latitudeOfFirstGridPoint', 4))
h.alias('La1', 'latitudeOfFirstGridPoint')
h.add(_.Signed('longitudeOfFirstGridPoint', 4))
h.alias('Lo1', 'longitudeOfFirstGridPoint')
h.add(_.Codeflag('resolutionAndComponentFlags', 1, "grib2/tables/[tablesVersion]/3.3.table"))
h.add(_.Bit('resolutionAndComponentFlags1', _.Get('resolutionAndComponentFlags'), 7))
h.add(_.Bit('resolutionAndComponentFlags2', _.Get('resolutionAndComponentFlags'), 6))
h.add(_.Bit('iDirectionIncrementGiven', _.Get('resolutionAndComponentFlags'), 5))
h.add(_.Bit('jDirectionIncrementGiven', _.Get('resolutionAndComponentFlags'), 4))
h.add(_.Bit('uvRelativeToGrid', _.Get('resolutionAndComponentFlags'), 3))
h.add(_.Bit('resolutionAndComponentFlags6', _.Get('resolutionAndComponentFlags'), 7))
h.add(_.Bit('resolutionAndComponentFlags7', _.Get('resolutionAndComponentFlags'), 6))
h.add(_.Bit('resolutionAndComponentFlags8', _.Get('resolutionAndComponentFlags'), 6))
def ijDirectionIncrementGiven_inline_concept(h):
def wrapped(h):
iDirectionIncrementGiven = h.get_l('iDirectionIncrementGiven')
jDirectionIncrementGiven = h.get_l('jDirectionIncrementGiven')
if iDirectionIncrementGiven == 1 and jDirectionIncrementGiven == 1:
return 1
if iDirectionIncrementGiven == 1 and jDirectionIncrementGiven == 0:
return 0
if iDirectionIncrementGiven == 0 and jDirectionIncrementGiven == 1:
return 0
if iDirectionIncrementGiven == 0 and jDirectionIncrementGiven == 0:
return 0
return wrapped
h.add(_.Concept('ijDirectionIncrementGiven', None, concepts=ijDirectionIncrementGiven_inline_concept(h)))
h.alias('DiGiven', 'iDirectionIncrementGiven')
h.alias('DjGiven', 'jDirectionIncrementGiven')
h.add(_.Signed('latitudeOfLastGridPoint', 4))
h.alias('La2', 'latitudeOfLastGridPoint')
h.add(_.Signed('longitudeOfLastGridPoint', 4))
h.alias('Lo2', 'longitudeOfLastGridPoint')
h.add(_.Unsigned('iDirectionIncrement', 4))
h.alias('Di', 'iDirectionIncrement')
h.alias('Dx', 'iDirectionIncrement')
h.add(_.Unsigned('jDirectionIncrement', 4))
h.alias('Dj', 'jDirectionIncrement')
h.alias('Dy', 'jDirectionIncrement')
h.add(_.Codeflag('scanningMode', 1, "grib2/tables/[tablesVersion]/3.4.table"))
h.add(_.Bit('iScansNegatively', _.Get('scanningMode'), 7))
h.add(_.Bit('jScansPositively', _.Get('scanningMode'), 6))
h.add(_.Bit('jPointsAreConsecutive', _.Get('scanningMode'), 5))
h.add(_.Bit('alternativeRowScanning', _.Get('scanningMode'), 4))
if h.get_l('jPointsAreConsecutive'):
h.alias('numberOfRows', 'Ni')
h.alias('numberOfColumns', 'Nj')
else:
h.alias('numberOfRows', 'Nj')
h.alias('numberOfColumns', 'Ni')
h.alias('geography.iScansNegatively', 'iScansNegatively')
h.alias('geography.jScansPositively', 'jScansPositively')
h.alias('geography.jPointsAreConsecutive', 'jPointsAreConsecutive')
h.add(_.Transient('iScansPositively', _.Not(_.Get('iScansNegatively'))))
h.add(_.Bit('scanningMode5', _.Get('scanningMode'), 3))
h.add(_.Bit('scanningMode6', _.Get('scanningMode'), 2))
h.add(_.Bit('scanningMode7', _.Get('scanningMode'), 1))
h.add(_.Bit('scanningMode8', _.Get('scanningMode'), 0))
h.add(_.Change_scanning_direction('swapScanningX', _.Get('values'), _.Get('Ni'), _.Get('Nj'), _.Get('iScansNegatively'), _.Get('jScansPositively'), _.Get('xFirst'), _.Get('xLast'), _.Get('x')))
h.alias('swapScanningLon', 'swapScanningX')
h.add(_.Change_scanning_direction('swapScanningY', _.Get('values'), _.Get('Ni'), _.Get('Nj'), _.Get('iScansNegatively'), _.Get('jScansPositively'), _.Get('yFirst'), _.Get('yLast'), _.Get('y')))
h.alias('swapScanningLat', 'swapScanningY')
h.add(_.G2grid('g2grid', _.Get('latitudeOfFirstGridPoint'), _.Get('longitudeOfFirstGridPoint'), _.Get('latitudeOfLastGridPoint'), _.Get('longitudeOfLastGridPoint'), _.Get('iDirectionIncrement'), _.Get('jDirectionIncrement'), _.Get('basicAngleOfTheInitialProductionDomain'), _.Get('subdivisionsOfBasicAngle')))
h.add(_.G2latlon('latitudeOfFirstGridPointInDegrees', _.Get('g2grid'), 0))
h.alias('geography.latitudeOfFirstGridPointInDegrees', 'latitudeOfFirstGridPointInDegrees')
h.add(_.G2latlon('longitudeOfFirstGridPointInDegrees', _.Get('g2grid'), 1))
h.alias('geography.longitudeOfFirstGridPointInDegrees', 'longitudeOfFirstGridPointInDegrees')
h.add(_.G2latlon('latitudeOfLastGridPointInDegrees', _.Get('g2grid'), 2))
h.alias('geography.latitudeOfLastGridPointInDegrees', 'latitudeOfLastGridPointInDegrees')
h.add(_.G2latlon('longitudeOfLastGridPointInDegrees', _.Get('g2grid'), 3))
h.alias('geography.longitudeOfLastGridPointInDegrees', 'longitudeOfLastGridPointInDegrees')
h.alias('xFirst', 'longitudeOfFirstGridPointInDegrees')
h.alias('yFirst', 'latitudeOfFirstGridPointInDegrees')
h.alias('xLast', 'longitudeOfLastGridPointInDegrees')
h.alias('yLast', 'latitudeOfLastGridPointInDegrees')
h.add(_.G2latlon('iDirectionIncrementInDegrees', _.Get('g2grid'), 4, _.Get('iDirectionIncrementGiven')))
h.alias('geography.iDirectionIncrementInDegrees', 'iDirectionIncrementInDegrees')
h.add(_.G2latlon('jDirectionIncrementInDegrees', _.Get('g2grid'), 5, _.Get('jDirectionIncrementGiven')))
h.alias('geography.jDirectionIncrementInDegrees', 'jDirectionIncrementInDegrees')
h.alias('latitudeFirstInDegrees', 'latitudeOfFirstGridPointInDegrees')
h.alias('longitudeFirstInDegrees', 'longitudeOfFirstGridPointInDegrees')
h.alias('latitudeLastInDegrees', 'latitudeOfLastGridPointInDegrees')
h.alias('longitudeLastInDegrees', 'longitudeOfLastGridPointInDegrees')
h.alias('DiInDegrees', 'iDirectionIncrementInDegrees')
h.alias('DxInDegrees', 'iDirectionIncrementInDegrees')
h.alias('DjInDegrees', 'jDirectionIncrementInDegrees')
h.alias('DyInDegrees', 'jDirectionIncrementInDegrees')
if (h._missing('Ni') and (h.get_l('PLPresent') == 1)):
h.add(_.Iterator('ITERATOR', _.Get('latlon_reduced'), _.Get('numberOfPoints'), _.Get('missingValue'), _.Get('values'), _.Get('latitudeFirstInDegrees'), _.Get('longitudeFirstInDegrees'), _.Get('latitudeLastInDegrees'), _.Get('longitudeLastInDegrees'), _.Get('Nj'), _.Get('DjInDegrees'), _.Get('pl')))
h.add(_.Nearest('NEAREST', _.Get('latlon_reduced'), _.Get('values'), _.Get('radius'), _.Get('Nj'), _.Get('pl'), _.Get('longitudeFirstInDegrees'), _.Get('longitudeLastInDegrees')))
else:
h.add(_.Transient('iteratorDisableUnrotate', 0))
h.add(_.Iterator('ITERATOR', _.Get('latlon'), _.Get('numberOfPoints'), _.Get('missingValue'), _.Get('values'), _.Get('longitudeFirstInDegrees'), _.Get('DiInDegrees'), _.Get('Ni'), _.Get('Nj'), _.Get('iScansNegatively'), _.Get('latitudeFirstInDegrees'), _.Get('DjInDegrees'), _.Get('jScansPositively'), _.Get('jPointsAreConsecutive'), _.Get('isRotatedGrid'), _.Get('angleOfRotation'), _.Get('latitudeOfSouthernPoleInDegrees'), _.Get('longitudeOfSouthernPoleInDegrees')))
h.add(_.Nearest('NEAREST', _.Get('regular'), _.Get('values'), _.Get('radius'), _.Get('Ni'), _.Get('Nj')))
h.add(_.Latlonvalues('latLonValues', _.Get('values')))
h.alias('latitudeLongitudeValues', 'latLonValues')
h.add(_.Latitudes('latitudes', _.Get('values'), 0))
h.add(_.Longitudes('longitudes', _.Get('values'), 0))
h.add(_.Latitudes('distinctLatitudes', _.Get('values'), 1))
h.add(_.Longitudes('distinctLongitudes', _.Get('values'), 1))
h.add(_.Signed('latitudeOfThePoleOfStretching', 4))
h.add(_.Signed('longitudeOfThePoleOfStretching', 4))
h.add(_.Scale('latitudeOfStretchingPoleInDegrees', _.Get('latitudeOfThePoleOfStretching'), _.Get('oneConstant'), _.Get('grib2divider'), _.Get('truncateDegrees')))
h.alias('geography.latitudeOfStretchingPoleInDegrees', 'latitudeOfStretchingPoleInDegrees')
h.add(_.Scale('longitudeOfStretchingPoleInDegrees', _.Get('longitudeOfThePoleOfStretching'), _.Get('oneConstant'), _.Get('grib2divider'), _.Get('truncateDegrees')))
h.alias('geography.longitudeOfStretchingPoleInDegrees', 'longitudeOfStretchingPoleInDegrees')
h.add(_.Unsigned('stretchingFactorScaled', 4))
h.add(_.Scale('stretchingFactor', _.Get('stretchingFactorScaled'), _.Get('oneConstant'), _.Get('grib2divider')))
h.alias('geography.stretchingFactor', 'stretchingFactor')
| [
"[email protected]"
]
| |
3daffa234c6aa57bcd92b929a867332bb323d54a | 9004f36238c6b601d388b679f430d7614dcb8507 | /docs/source/conf.py | 4778b42c7e3c497d1a7e5dd1faf30927c8ed894c | [
"MIT"
]
| permissive | Zwork101/backpack.py | 32107a0665bf46b81d3e9b6b2bbbc3fe35b2ade4 | 92dca1d6aa55438abe313ffc0cf2b14916238d5d | refs/heads/master | 2021-01-22T04:18:34.807054 | 2017-08-14T13:28:38 | 2017-08-14T13:28:38 | 92,454,505 | 10 | 2 | null | 2017-08-14T13:28:39 | 2017-05-26T00:01:23 | Python | UTF-8 | Python | false | false | 4,940 | py | import sphinx_rtd_theme
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# backpack.py-docs documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 25 17:00:12 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.rsttemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'backpack.py-docs'
copyright = '2017, Zwork101'
author = 'Zwork101'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
'navigation_depth': 3,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.rststatic']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'backpackpy-docsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'backpackpy-docs.tex', 'backpack.py-docs Documentation',
'Zwork101', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'backpackpy-docs', 'backpack.py-docs Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'backpackpy-docs', 'backpack.py-docs Documentation',
author, 'backpackpy-docs', 'One line description of project.',
'Miscellaneous'),
]
| [
"[email protected]"
]
| |
21364fa67620be18c6d8c2548c8efbba02fa0c26 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-cognitiveservices-search-customsearch/azure/cognitiveservices/search/customsearch/models/answer.py | 50dd4581e3630de0d5da4538c51262ca70084e2e | [
"MIT"
]
| permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,815 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .response import Response
class Answer(Response):
"""Answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResultsAnswer
Variables are only populated by the server, and will be ignored when
sending a request.
:param _type: Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.customsearch.models.Query]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
}
_subtype_map = {
'_type': {'SearchResultsAnswer': 'SearchResultsAnswer'}
}
def __init__(self):
super(Answer, self).__init__()
self.follow_up_queries = None
self._type = 'Answer'
| [
"[email protected]"
]
| |
eb5501a72b2dadfe695fbbf4f5bfebce35b113b1 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_2/models/alert_get_response.py | e958eff10dfa33d789703b5137b659a3506f1165 | [
"BSD-2-Clause"
]
| permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,212 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class AlertGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[Alert]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.Alert]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[Alert]): A list of alert objects.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AlertGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
99641de68b0208d05d1987b137bd93c4fb241771 | 87ac76b8aae5bf1c8a1530cd317972e4cf54fd62 | /azext_iot/sdk/digitaltwins/controlplane/operations/private_endpoint_connections_operations.py | 5b74d372b03db478262de60fc126ca5f2d0bdb4b | [
"MIT"
]
| permissive | montgomp/azure-iot-cli-extension | 911dbb10bb27d1b4ba2446fad4e37014c99bea6e | 7dee61b369f5dd7c7b9753edfc87b8ed35841c72 | refs/heads/dev | 2023-08-28T18:58:16.052628 | 2021-10-21T21:13:11 | 2021-10-21T21:13:11 | 271,131,011 | 1 | 1 | NOASSERTION | 2020-08-05T15:56:03 | 2020-06-09T23:30:08 | Python | UTF-8 | Python | false | false | 18,382 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the DigitalTwinsInstance Management API. Constant value: "2020-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-12-01"
self.config = config
def list(
self, resource_group_name, resource_name, custom_headers=None, raw=False, **operation_config):
"""List private endpoint connection properties.
:param resource_group_name: The name of the resource group that
contains the DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PrivateEndpointConnectionsResponse or ClientRawResponse if
raw=true
:rtype: ~controlplane.models.PrivateEndpointConnectionsResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<controlplane.models.ErrorResponseException>`
"""
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^(?!-)[A-Za-z0-9-]{3,63}(?<!-)$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=10)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnectionsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/privateEndpointConnections'}
def get(
self, resource_group_name, resource_name, private_endpoint_connection_name, custom_headers=None, raw=False, **operation_config):
"""Get private endpoint connection properties for the given private
endpoint.
:param resource_group_name: The name of the resource group that
contains the DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private
endpoint connection.
:type private_endpoint_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PrivateEndpointConnection or ClientRawResponse if raw=true
:rtype: ~controlplane.models.PrivateEndpointConnection or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<controlplane.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^(?!-)[A-Za-z0-9-]{3,63}(?<!-)$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=10)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
def _delete_initial(
self, resource_group_name, resource_name, private_endpoint_connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^(?!-)[A-Za-z0-9-]{3,63}(?<!-)$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=10)
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, resource_name, private_endpoint_connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that
contains the DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private
endpoint connection.
:type private_endpoint_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<controlplane.models.ErrorResponseException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
def _create_or_update_initial(
self, resource_group_name, resource_name, private_endpoint_connection_name, properties, custom_headers=None, raw=False, **operation_config):
private_endpoint_connection = models.PrivateEndpointConnection(properties=properties)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^(?!-)[A-Za-z0-9-]{3,63}(?<!-)$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=10)
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', response)
if response.status_code == 202:
deserialized = self._deserialize('PrivateEndpointConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, resource_name, private_endpoint_connection_name, properties, custom_headers=None, raw=False, polling=True, **operation_config):
"""Update the status of a private endpoint connection with the given name.
:param resource_group_name: The name of the resource group that
contains the DigitalTwinsInstance.
:type resource_group_name: str
:param resource_name: The name of the DigitalTwinsInstance.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private
endpoint connection.
:type private_endpoint_connection_name: str
:param properties:
:type properties:
~controlplane.models.PrivateEndpointConnectionProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PrivateEndpointConnection or
ClientRawResponse<PrivateEndpointConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~controlplane.models.PrivateEndpointConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~controlplane.models.PrivateEndpointConnection]]
:raises:
:class:`ErrorResponseException<controlplane.models.ErrorResponseException>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
properties=properties,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PrivateEndpointConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DigitalTwins/digitalTwinsInstances/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
| [
"[email protected]"
]
| |
ecc2abe94866c5f7103035ef171af8b7d9358fc3 | 96fe253e9a740b51dcd7f83d6ab01bb248c2bf4b | /patrones_poma/cohesive_modules/cohesivo_aciclico/tst_cohesivo_aciclico.py | 0415e8c1c409d6b307ab03f7c4ca238275c21b6d | []
| no_license | vvalotto/Patrones_Disenio_Python | 7574470752a5f14214434a927c2c5e0faaa592ba | 7ab6a74e9b008c3434af0a56d4c2b6b7de3617bf | refs/heads/master | 2021-04-28T19:16:21.535998 | 2018-10-21T14:05:36 | 2018-10-21T14:05:36 | 121,891,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from POMA.cohesive_modules.cohesivo_aciclico.factura.factura import *
from POMA.cohesive_modules.cohesivo_aciclico.ruteador.ruteador_tipoA import *
mi_factura = Factura("A", "1", 200.20, RuteadorTipoA())
print(mi_factura.rutear())
print(mi_factura.obtener_prioridad())
| [
"[email protected]"
]
| |
79dfcc06309bb5e28a4290371c43aab711ed6714 | 61d434953d55af170c4abb023686256509a4bffc | /restapi/schemas/replies/ReplySchema.py | 04c71429ee46d94f129a9d811dfb265abba5a929 | []
| no_license | mentimun-mentah/tridatu-backend | c6e471bc08c010ebc5b3fcdf5ef5bac0c33758de | 32cf22c24327b228cba57782ffd4740906e8e7d8 | refs/heads/master | 2023-03-14T13:32:31.264593 | 2021-03-09T13:30:12 | 2021-03-09T13:30:12 | 302,246,645 | 4 | 1 | null | 2021-03-09T13:30:13 | 2020-10-08T06:04:50 | Python | UTF-8 | Python | false | false | 755 | py | from pydantic import BaseModel, constr, validator
from typing import List
from datetime import datetime
class ReplySchema(BaseModel):
class Config:
min_anystr_length = 1
anystr_strip_whitespace = True
class ReplyCreate(ReplySchema):
message: constr(strict=True, min_length=5)
comment_id: constr(strict=True, regex=r'^[0-9]*$')
@validator('comment_id')
def parse_str_to_int(cls, v):
return int(v) if v else None
class ReplyData(ReplySchema):
replies_id: str
replies_message: str
replies_user_id: str
replies_created_at: datetime
users_username: str
users_avatar: str
users_role: str
class ReplyCommentData(ReplySchema):
comments_id: str
comments_replies: List[ReplyData]
| [
"[email protected]"
]
| |
cc51f88f7e67cce097fbe723ea57b273d945e573 | eb2df6020f5759feee3d6d78c5f8c78999454a09 | /migrations/versions/d8e9859f3cbc_.py | 03229422b0101f909d63df429c85801810a4f7f5 | []
| no_license | mywork-dragon/dave-energy | 7a08f855d245c2d90a9c13aa85fc3b9f28ae9294 | 4b3430be6ef6957389ab05be3a17a0245f5d6662 | refs/heads/master | 2023-07-28T02:55:26.791724 | 2021-09-06T11:44:30 | 2021-09-06T11:44:30 | 365,872,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | # type: ignore
"""Devices
Revision ID: d8e9859f3cbc
Revises: 228cb8a5f8d4
Create Date: 2020-03-06 11:48:38.819954
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "d8e9859f3cbc"
down_revision = "228cb8a5f8d4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"devices",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("unit", sa.String(length=255), nullable=True),
sa.Column("building_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["building_id"],
["buildings.id"],
name=op.f("fk_devices_building_id_buildings"),
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_devices")),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("devices")
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
cad6d289e8b1c0a1fd07a1687f78153f63bc7a2a | cbb2dde4b4695d6a3012a5c2540265c45a840b32 | /bin/evtest.py | b99ad5422931e97f5de044fb5f7cfbc34233287d | [
"BSD-2-Clause"
]
| permissive | sil2100/python-evdev | 806d1c8e158976446cf01b84972f15b5badc9e23 | d076168037ac6b78e7e8243918c4d3ce0e0923aa | refs/heads/master | 2021-01-24T05:47:36.899225 | 2013-05-28T13:22:07 | 2013-05-28T13:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python
# encoding: utf-8
'''
evdev example - input device event monitor
'''
from sys import argv, exit
from select import select
from evdev import ecodes, InputDevice, list_devices, AbsInfo
usage = 'usage: evtest <device> [<type> <value>]'
evfmt = 'time {:<16} type {} ({}), code {:<4} ({}), value {}'
device_dir = '/dev/input/'
query_type = None
query_value = None
def select_device():
''' Select a device from the list of accessible input devices '''
devices = [InputDevice(i) for i in reversed(list_devices(device_dir))]
dev_fmt = '{0:<3} {1.fn:<20} {1.name:<35} {1.phys}'
dev_lns = [dev_fmt.format(n, d) for n, d in enumerate(devices)]
print('ID {:<20} {:<35} {}'.format('Device', 'Name', 'Phys'))
print('-' * len(max(dev_lns, key=len)))
print('\n'.join(dev_lns))
print('')
choice = input('Select device [0-{}]:'.format(len(dev_lns)-1))
return devices[int(choice)]
def print_event(e):
if e.type == ecodes.EV_SYN:
if e.code == ecodes.SYN_MT_REPORT:
print('time {:<16} +++++++++ {} ++++++++'.format(e.timestamp(), ecodes.SYN[e.code]))
else:
print('time {:<16} --------- {} --------'.format(e.timestamp(), ecodes.SYN[e.code]))
else:
if e.type in ecodes.bytype:
codename = ecodes.bytype[e.type][e.code]
else:
codename = '?'
print(evfmt.format(e.timestamp(), e.type, ecodes.EV[e.type], e.code, codename, e.value))
if len(argv) == 1:
device = select_device()
elif len(argv) == 2:
device = InputDevice(argv[1])
elif len(argv) == 4:
device = InputDevice(argv[1])
query_type = argv[2]
query_value = argv[3]
else:
print(usage) ; exit(1)
print('Device name: {.name}'.format(device))
print('Device info: {.info}'.format(device))
print('Repeat settings: {}'.format(device.repeat))
print('Device capabilities:')
for type, codes in device.capabilities(verbose=True).items():
print(' Type {} {}:'.format(*type))
for i in codes:
if isinstance(i[1], AbsInfo):
print(' Code {:<4} {}:'.format(*i[0]))
print(' {}'.format(i[1]))
else:
print(' Code {:<4} {}'.format(*i))
print('')
print('Listening for events ...\n')
while True:
r, w, e = select([device], [], [])
for ev in device.read():
print_event(ev)
| [
"[email protected]"
]
| |
6b868d1463284f7e4da356224d0455120fc4d8f2 | f16326f33b286ac10740016c67446dd3279ee60e | /sklearn/tutorials/demo.py | 772c25525fda01237488f7277447a5bd37811e95 | []
| no_license | zhuliyi10/ml_demo | da1b4189a941e079cb780bcf6ab5ae710d407556 | 04303ea37dbfc0ba8dd57e77ff53ccdcae1e5ce5 | refs/heads/master | 2020-03-31T08:23:16.677823 | 2019-01-24T08:32:49 | 2019-01-24T08:32:49 | 152,054,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_digits()
data = iris.images
output = iris.target
print(data.shape)
data1 = data.reshape((data.shape[0], -1))
print(data1.shape)
plt.imshow(data[0], cmap=plt.cm.gray_r)
plt.show()
| [
"[email protected]"
]
| |
2080139d54365d3f68b0112f690fbba5d900f8c9 | 635c9f0501039f5a099849a3108e19de76092aea | /lecture/ssafy_190322/todo/todo/settings.py | b197b4b94c64ba9757a523c9448b11e6fa9af024 | []
| no_license | Hansung-Lee/SSAFY | 87ebea0808bb40381678d678e1035dc5fa2c2eb0 | cdb7ae1bba0e98e733eed703da2c62217c319462 | refs/heads/master | 2020-04-14T20:03:05.975040 | 2019-05-16T08:57:21 | 2019-05-16T08:57:21 | 164,080,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,248 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@9oh55pu^-bogvo(eb)ndu9!!@0^76cv043ln-fv9-hujzvafp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'todos',
'shout',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
| [
"[email protected]"
]
| |
fc8db3e4cc9c4ef2ecf999f752fac8a6ddd96c26 | ece3a452b51d2cbcac6c20481ab6660b77c3f955 | /Eigenbasis.py | 1dca2aade14eaf471cc74f426bd77d2aae761a2a | []
| no_license | ilmoi/imperial_math | ef1d220a361c4dab6fde436b482941d27e97f8f3 | a3e8537dc201fef486a17f7a5e024fa1d60e2644 | refs/heads/master | 2022-08-09T20:29:13.805569 | 2020-05-14T16:42:13 | 2020-05-14T16:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | import numpy as np
#in numpy "matrix" - strictly 2d. need to use arrays instead.
#in numpy need to write numbers horizonally, not vertically
T = np.array([[3/2,-1],[-1/2,1/2]]) #transform
v = np.array([-1,1]) #vector to check
Ans1 = T @ T @ v
C = np.array([[2.73,1],[-1,1.366]]) #two eigenvectors
C_inv = np.linalg.inv(C)
D = np.array([[1.866,0],[0,0.134]]) # [[eigenvalue1, 0],[0, eigenvalue2]]
new_T = C @ D @ D @ C_inv
Ans2 = new_T @ v
print('method 1 results in: ' +str(Ans1))
print('method 1 results in: ' +str(Ans2)) | [
"[email protected]"
]
| |
2f05b4b9c701ea5fb7a5fb4e59cda30ae79ab86c | 30291450c064006f1bd9bc5c432b8a869e2166bb | /tags/0.6/tests/test_types_cn.py | 6a1c00d8014fa97165b2840de092c295ce17b0f9 | [
"MIT"
]
| permissive | BGCX261/zhpy-svn-to-git | 96f04e2f72c61671324219a85939137ff5cd9ef6 | 70da095393fe13543433ab5115cb6c1a519d64b0 | refs/heads/master | 2021-01-22T22:49:04.898314 | 2015-08-25T15:44:00 | 2015-08-25T15:44:00 | 41,587,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | #coding=utf-8
"""
test build-in types
"""
from zhpy import convertor
def test_int():
"""
test int type
"""
assert convertor("整数(2.0)") == "int(2.0)"
def test_float():
"""
test float type
"""
assert convertor("小数(2)") == "float(2)"
def test_boolean():
"""
test boolean type
"""
assert convertor("n = 真") == "n = True"
assert convertor("p = 假") == "p = False"
assert convertor("q = 实") == "q = True"
assert convertor("r = 虛") == "r = False"
def test_string():
"""
same as print test
"""
assert convertor("s.开始为('he')") == "s.startswith('he')"
assert convertor("s.结束为('he')") == "s.endswith('he')"
def test_list():
"""
test list type
"""
assert convertor("列表((1,2,3,4)) == [1,2,3,4]") == \
"list((1,2,3,4)) == [1,2,3,4]"
assert convertor("a = []; a.加入(2); 宣告 a == [2]") == \
"a = []; a.append(2); assert a == [2]"
p = "h,e,l,l,o"
assert convertor('p.分离(",")') == 'p.split(",")'
def test_dict():
"""
test dict type
"""
assert convertor("字典(a=1, b=2) == {'a':1, 'b':2}") == \
"dict(a=1, b=2) == {'a':1, 'b':2}"
def test_tuple():
"""
test tuple type
"""
assert convertor("数组([1,2,3,4]) == (1,2,3,4)") == \
"tuple([1,2,3,4]) == (1,2,3,4)"
def test_set():
"""
test set type
"""
assert convertor("类组([1,2,3,4]) = set([1, 2, 3, 4])") == \
"set([1,2,3,4]) = set([1, 2, 3, 4])"
def test_file():
"""
test file type
"""
assert convertor('fd = 打开("ReadMe_test.txt", "r")') == \
'fd = open("ReadMe_test.txt", "r")'
assert convertor('temp = fd.读一行()') == 'temp = fd.readline()'
assert convertor('temp = fd.读多行()') == 'temp = fd.readlines()'
assert convertor('temp = fd.读取()') == 'temp = fd.read()'
assert convertor('fd.写入(temp)') == 'fd.write(temp)'
assert convertor('fd.关闭()') == 'fd.close()' | [
"[email protected]"
]
| |
5fbe66864d69107584b51660b80c9014d7f093c0 | 6967eccf98ad8c51e69606287279c9ed1c0d344f | /tests/components/sensor/test_mqtt.py | b59ea867c5886f709ca8a4032536d58c11395eec | [
"MIT"
]
| permissive | plucena24/home-assistant | 6c146daceff147db2e22b74a538f4c68f20029ca | 060cbaf66b3722480e6bca54c2c32111179e7067 | refs/heads/dev | 2023-08-18T07:09:55.539600 | 2015-10-23T22:38:19 | 2015-10-23T22:38:19 | 44,849,509 | 0 | 0 | MIT | 2023-08-08T18:37:20 | 2015-10-24T02:50:00 | Python | UTF-8 | Python | false | false | 1,220 | py | """
tests.components.sensor.test_mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests mqtt sensor.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.sensor as sensor
from tests.common import mock_mqtt_component, fire_mqtt_message
class TestSensorMQTT(unittest.TestCase):
""" Test the MQTT sensor. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setting_sensor_value_via_mqtt_message(self):
self.assertTrue(sensor.setup(self.hass, {
'sensor': {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'unit_of_measurement': 'fav unit'
}
}))
fire_mqtt_message(self.hass, 'test-topic', '100')
self.hass.pool.block_till_done()
state = self.hass.states.get('sensor.test')
self.assertEqual('100', state.state)
self.assertEqual('fav unit',
state.attributes.get('unit_of_measurement'))
| [
"[email protected]"
]
| |
9c444fdadbfca4ef40a3f4389874729e20364461 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/toontowngui/FeatureComingSoonDialog.py | 3a5555537952b8d9f862f80f1ae3ae4fb12c852c | []
| no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 794 | py | #Embedded file name: toontown.toontowngui.FeatureComingSoonDialog
from direct.fsm import ClassicFSM, State
from toontown.toonbase.ToontownGlobals import OptionsPageHotkey
from toontown.toontowngui import TTDialog
class FeatureComingSoonDialog:
def __init__(self, text = 'Woah! That feature will be enabled in \n\x01textShadow\x01beta\x02! Sorry about that!'):
self.dialog = TTDialog.TTGlobalDialog(dialogName='ComingSoon', doneEvent='exitDialog', style=TTDialog.Acknowledge, text=text, text_wordwrap=24, text_pos=(0, 0, -0.8), suppressKeys=True, suppressMouse=True)
self.dialog.accept('exitDialog', self.exitDialog)
base.transitions.fadeScreen(0.2)
def exitDialog(self):
base.transitions.noFade()
self.dialog.cleanup()
del self.dialog
| [
"[email protected]"
]
| |
f26d03d5110d3947d2d23a41ebbf1f0641052bf0 | 15d3a10db27128c06f84c30fa8d64b2e1c629fd9 | /express/pallets/migrations/0013_airwaybill_channel.py | c0115cf07beb503bf25d4c75061716ce3a2dd652 | []
| no_license | yiyuhao/exp | 7cba6650e3113ba05698f90a7baf75b680dd6435 | 866a90b2e6f0d113559b0674f514cdd56020f7d6 | refs/heads/master | 2020-03-19T20:20:04.799355 | 2018-07-15T14:55:24 | 2018-07-15T14:55:24 | 136,897,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-18 01:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pallets', '0012_auto_20170317_1722'),
]
operations = [
migrations.AddField(
model_name='airwaybill',
name='channel',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='air_waybills', to='pallets.Channel', verbose_name='\u6e20\u9053'),
),
]
| [
"[email protected]"
]
| |
cc105435957503dedf2f458f6b7088c268b060db | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_blazons.py | c2fbb9176bef4ff2bfd573dca5916e841fcd4995 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._blazon import _BLAZON
#calss header
class _BLAZONS(_BLAZON, ):
def __init__(self,):
_BLAZON.__init__(self)
self.name = "BLAZONS"
self.specie = 'verbs'
self.basic = "blazon"
self.jsondata = {}
| [
"[email protected]"
]
| |
5abbf7a3192c7ba05c5bdb22a2842df0746fbb98 | 4027d8dafb6f60568f03357e329c09262161e963 | /machinelearn/seaborn_learn/scatter_utils.py | 5eecf26b2ce5406a5b958ec0d3a628b8ff7ca668 | []
| no_license | pentiumCM/machinelearn | a2bfa15d6e9f20fd604116f77186da76ebcc4f27 | 329bb9521b5e06e3471aa209fc87ca47f8d5fdcb | refs/heads/master | 2022-12-08T23:43:05.784930 | 2021-05-24T04:02:23 | 2021-05-24T04:02:23 | 216,704,188 | 7 | 1 | null | 2022-12-08T09:30:07 | 2019-10-22T02:13:45 | Python | UTF-8 | Python | false | false | 19,770 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@Author : pentiumCM
@Email : [email protected]
@Software: PyCharm
@File : scatter_utils.py
@Time : 2020/9/8 9:05
@desc :
'''
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
wheel_parms = [
{'left_wheel_center_x': 450.0, 'right_wheel_center_x': 484.0, 'wheel_last_coo': (516, 278), 'car_type': 'truck',
'wheel_center_list': [450.0, 484.0]},
{'left_wheel_center_x': 448.5, 'right_wheel_center_x': 448.5, 'wheel_last_coo': (518, 280), 'car_type': 'truck',
'wheel_center_list': [448.5]},
{'left_wheel_center_x': 448.0, 'right_wheel_center_x': 482.0, 'wheel_last_coo': (519, 280), 'car_type': 'truck',
'wheel_center_list': [448.0, 482.0]},
{'left_wheel_center_x': 447.0, 'right_wheel_center_x': 481.5, 'wheel_last_coo': (519, 280), 'car_type': 'truck',
'wheel_center_list': [481.5, 447.0]},
{'left_wheel_center_x': 446.0, 'right_wheel_center_x': 481.5, 'wheel_last_coo': (519, 281), 'car_type': 'truck',
'wheel_center_list': [481.5, 446.0]},
{'left_wheel_center_x': 444.5, 'right_wheel_center_x': 479.0, 'wheel_last_coo': (516, 283), 'car_type': 'truck',
'wheel_center_list': [444.5, 479.0]},
{'left_wheel_center_x': 442.0, 'right_wheel_center_x': 481.0, 'wheel_last_coo': (516, 283), 'car_type': 'truck',
'wheel_center_list': [481.0, 442.0]},
{'left_wheel_center_x': 442.0, 'right_wheel_center_x': 478.5, 'wheel_last_coo': (515, 283), 'car_type': 'truck',
'wheel_center_list': [442.0, 478.5]},
{'left_wheel_center_x': 441.0, 'right_wheel_center_x': 478.5, 'wheel_last_coo': (513, 283), 'car_type': 'truck',
'wheel_center_list': [441.0, 478.5]},
{'left_wheel_center_x': 440.5, 'right_wheel_center_x': 477.5, 'wheel_last_coo': (514, 290), 'car_type': 'truck',
'wheel_center_list': [440.5, 477.5]},
{'left_wheel_center_x': 439.5, 'right_wheel_center_x': 477.5, 'wheel_last_coo': (507, 293), 'car_type': 'truck',
'wheel_center_list': [477.5, 439.5]},
{'left_wheel_center_x': 438.5, 'right_wheel_center_x': 477.0, 'wheel_last_coo': (507, 294), 'car_type': 'truck',
'wheel_center_list': [477.0, 438.5]},
{'left_wheel_center_x': 435.5, 'right_wheel_center_x': 475.0, 'wheel_last_coo': (510, 294), 'car_type': 'truck',
'wheel_center_list': [435.5, 475.0]},
{'left_wheel_center_x': 434.5, 'right_wheel_center_x': 475.0, 'wheel_last_coo': (511, 294), 'car_type': 'truck',
'wheel_center_list': [434.5, 475.0]},
{'left_wheel_center_x': 432.5, 'right_wheel_center_x': 474.5, 'wheel_last_coo': (502, 297), 'car_type': 'truck',
'wheel_center_list': [432.5, 474.5]},
{'left_wheel_center_x': 432.0, 'right_wheel_center_x': 473.5, 'wheel_last_coo': (503, 299), 'car_type': 'truck',
'wheel_center_list': [432.0, 473.5]},
{'left_wheel_center_x': 430.0, 'right_wheel_center_x': 473.0, 'wheel_last_coo': (503, 301), 'car_type': 'truck',
'wheel_center_list': [473.0, 430.0]},
{'left_wheel_center_x': 428.5, 'right_wheel_center_x': 471.5, 'wheel_last_coo': (502, 302), 'car_type': 'truck',
'wheel_center_list': [428.5, 471.5]},
{'left_wheel_center_x': 428.0, 'right_wheel_center_x': 471.5, 'wheel_last_coo': (505, 307), 'car_type': 'truck',
'wheel_center_list': [428.0, 471.5]},
{'left_wheel_center_x': 427.0, 'right_wheel_center_x': 471.5, 'wheel_last_coo': (505, 308), 'car_type': 'truck',
'wheel_center_list': [427.0, 471.5]},
{'left_wheel_center_x': 425.5, 'right_wheel_center_x': 471.5, 'wheel_last_coo': (505, 308), 'car_type': 'truck',
'wheel_center_list': [425.5, 471.5]},
{'left_wheel_center_x': 423.5, 'right_wheel_center_x': 470.0, 'wheel_last_coo': (503, 307), 'car_type': 'truck',
'wheel_center_list': [470.0, 423.5]},
{'left_wheel_center_x': 422.0, 'right_wheel_center_x': 470.0, 'wheel_last_coo': (502, 311), 'car_type': 'truck',
'wheel_center_list': [422.0, 470.0]},
{'left_wheel_center_x': 420.5, 'right_wheel_center_x': 469.5, 'wheel_last_coo': (503, 312), 'car_type': 'truck',
'wheel_center_list': [420.5, 469.5]},
{'left_wheel_center_x': 417.5, 'right_wheel_center_x': 469.0, 'wheel_last_coo': (501, 314), 'car_type': 'truck',
'wheel_center_list': [417.5, 469.0]},
{'left_wheel_center_x': 416.5, 'right_wheel_center_x': 466.0, 'wheel_last_coo': (501, 316), 'car_type': 'truck',
'wheel_center_list': [416.5, 466.0]},
{'left_wheel_center_x': 415.5, 'right_wheel_center_x': 466.0, 'wheel_last_coo': (500, 316), 'car_type': 'truck',
'wheel_center_list': [466.0, 415.5]},
{'left_wheel_center_x': 413.5, 'right_wheel_center_x': 466.0, 'wheel_last_coo': (500, 317), 'car_type': 'truck',
'wheel_center_list': [466.0, 413.5]},
{'left_wheel_center_x': 413.0, 'right_wheel_center_x': 465.0, 'wheel_last_coo': (500, 319), 'car_type': 'truck',
'wheel_center_list': [465.0, 413.0]},
{'left_wheel_center_x': 410.0, 'right_wheel_center_x': 464.0, 'wheel_last_coo': (494, 322), 'car_type': 'truck',
'wheel_center_list': [464.0, 410.0]},
{'left_wheel_center_x': 408.5, 'right_wheel_center_x': 463.0, 'wheel_last_coo': (494, 323), 'car_type': 'truck',
'wheel_center_list': [408.5, 463.0]},
{'left_wheel_center_x': 405.0, 'right_wheel_center_x': 462.5, 'wheel_last_coo': (494, 326), 'car_type': 'truck',
'wheel_center_list': [405.0, 462.5]},
{'left_wheel_center_x': 404.5, 'right_wheel_center_x': 462.5, 'wheel_last_coo': (493, 328), 'car_type': 'truck',
'wheel_center_list': [404.5, 462.5]},
{'left_wheel_center_x': 403.0, 'right_wheel_center_x': 462.0, 'wheel_last_coo': (494, 333), 'car_type': 'truck',
'wheel_center_list': [403.0, 462.0]},
{'left_wheel_center_x': 400.0, 'right_wheel_center_x': 462.0, 'wheel_last_coo': (492, 336), 'car_type': 'truck',
'wheel_center_list': [400.0, 462.0]},
{'left_wheel_center_x': 392.5, 'right_wheel_center_x': 459.0, 'wheel_last_coo': (497, 341), 'car_type': 'truck',
'wheel_center_list': [392.5, 459.0]},
{'left_wheel_center_x': 391.5, 'right_wheel_center_x': 458.0, 'wheel_last_coo': (497, 341), 'car_type': 'truck',
'wheel_center_list': [458.0, 391.5]},
{'left_wheel_center_x': 388.0, 'right_wheel_center_x': 456.5, 'wheel_last_coo': (497, 343), 'car_type': 'truck',
'wheel_center_list': [456.5, 388.0]},
{'left_wheel_center_x': 387.0, 'right_wheel_center_x': 455.5, 'wheel_last_coo': (497, 344), 'car_type': 'truck',
'wheel_center_list': [387.0, 455.5]},
{'left_wheel_center_x': 382.5, 'right_wheel_center_x': 454.0, 'wheel_last_coo': (498, 349), 'car_type': 'truck',
'wheel_center_list': [382.5, 454.0]},
{'left_wheel_center_x': 380.5, 'right_wheel_center_x': 453.5, 'wheel_last_coo': (498, 353), 'car_type': 'truck',
'wheel_center_list': [380.5, 453.5]},
{'left_wheel_center_x': 377.5, 'right_wheel_center_x': 452.5, 'wheel_last_coo': (498, 354), 'car_type': 'truck',
'wheel_center_list': [377.5, 452.5]},
{'left_wheel_center_x': 372.0, 'right_wheel_center_x': 451.5, 'wheel_last_coo': (495, 362), 'car_type': 'truck',
'wheel_center_list': [451.5, 372.0]},
{'left_wheel_center_x': 371.0, 'right_wheel_center_x': 450.0, 'wheel_last_coo': (494, 365), 'car_type': 'truck',
'wheel_center_list': [450.0, 371.0]},
{'left_wheel_center_x': 364.5, 'right_wheel_center_x': 449.0, 'wheel_last_coo': (497, 371), 'car_type': 'truck',
'wheel_center_list': [449.0, 364.5]},
{'left_wheel_center_x': 361.5, 'right_wheel_center_x': 447.5, 'wheel_last_coo': (496, 375), 'car_type': 'truck',
'wheel_center_list': [361.5, 447.5]},
{'left_wheel_center_x': 356.5, 'right_wheel_center_x': 445.0, 'wheel_last_coo': (496, 382), 'car_type': 'truck',
'wheel_center_list': [356.5, 445.0]},
{'left_wheel_center_x': 354.0, 'right_wheel_center_x': 444.5, 'wheel_last_coo': (494, 385), 'car_type': 'truck',
'wheel_center_list': [354.0, 444.5]},
{'left_wheel_center_x': 342.5, 'right_wheel_center_x': 439.0, 'wheel_last_coo': (491, 394), 'car_type': 'truck',
'wheel_center_list': [342.5, 439.0]},
{'left_wheel_center_x': 337.5, 'right_wheel_center_x': 437.5, 'wheel_last_coo': (491, 399), 'car_type': 'truck',
'wheel_center_list': [337.5, 437.5]},
{'left_wheel_center_x': 331.0, 'right_wheel_center_x': 436.5, 'wheel_last_coo': (488, 409), 'car_type': 'truck',
'wheel_center_list': [331.0, 436.5]},
{'left_wheel_center_x': 326.0, 'right_wheel_center_x': 436.5, 'wheel_last_coo': (487, 416), 'car_type': 'truck',
'wheel_center_list': [436.5, 326.0]},
{'left_wheel_center_x': 321.0, 'right_wheel_center_x': 435.0, 'wheel_last_coo': (487, 419), 'car_type': 'truck',
'wheel_center_list': [321.0, 435.0]},
{'left_wheel_center_x': 310.5, 'right_wheel_center_x': 435.5, 'wheel_last_coo': (487, 431), 'car_type': 'truck',
'wheel_center_list': [435.5, 310.5]},
{'left_wheel_center_x': 305.0, 'right_wheel_center_x': 433.0, 'wheel_last_coo': (486, 437), 'car_type': 'truck',
'wheel_center_list': [305.0, 433.0]},
{'left_wheel_center_x': 292.5, 'right_wheel_center_x': 431.5, 'wheel_last_coo': (479, 447), 'car_type': 'truck',
'wheel_center_list': [292.5, 431.5]},
{'left_wheel_center_x': 287.0, 'right_wheel_center_x': 431.0, 'wheel_last_coo': (478, 456), 'car_type': 'truck',
'wheel_center_list': [287.0, 431.0]},
{'left_wheel_center_x': 272.5, 'right_wheel_center_x': 427.5, 'wheel_last_coo': (478, 476), 'car_type': 'truck',
'wheel_center_list': [272.5, 427.5]},
{'left_wheel_center_x': 264.5, 'right_wheel_center_x': 425.5, 'wheel_last_coo': (478, 480), 'car_type': 'truck',
'wheel_center_list': [264.5, 425.5]},
{'left_wheel_center_x': 248.0, 'right_wheel_center_x': 422.5, 'wheel_last_coo': (477, 481), 'car_type': 'truck',
'wheel_center_list': [248.0, 422.5]},
{'left_wheel_center_x': 236.5, 'right_wheel_center_x': 420.0, 'wheel_last_coo': (475, 477), 'car_type': 'truck',
'wheel_center_list': [420.0, 236.5]},
{'left_wheel_center_x': 213.0, 'right_wheel_center_x': 416.5, 'wheel_last_coo': (472, 479), 'car_type': 'truck',
'wheel_center_list': [416.5, 213.0]},
{'left_wheel_center_x': 200.5, 'right_wheel_center_x': 415.0, 'wheel_last_coo': (472, 479), 'car_type': 'truck',
'wheel_center_list': [200.5, 415.0]},
{'left_wheel_center_x': 191.0, 'right_wheel_center_x': 413.5, 'wheel_last_coo': (472, 479), 'car_type': 'truck',
'wheel_center_list': [413.5, 191.0]},
{'left_wheel_center_x': 167.5, 'right_wheel_center_x': 410.0, 'wheel_last_coo': (471, 480), 'car_type': 'truck',
'wheel_center_list': [410.0, 167.5]},
{'left_wheel_center_x': 155.0, 'right_wheel_center_x': 407.5, 'wheel_last_coo': (471, 478), 'car_type': 'truck',
'wheel_center_list': [155.0, 407.5]},
{'left_wheel_center_x': 126.0, 'right_wheel_center_x': 402.0, 'wheel_last_coo': (469, 475), 'car_type': 'truck',
'wheel_center_list': [402.0, 126.0]},
{'left_wheel_center_x': 112.0, 'right_wheel_center_x': 397.5, 'wheel_last_coo': (470, 477), 'car_type': 'truck',
'wheel_center_list': [112.0, 397.5]},
{'left_wheel_center_x': 95.0, 'right_wheel_center_x': 397.5, 'wheel_last_coo': (470, 475), 'car_type': 'truck',
'wheel_center_list': [397.5, 95.0]},
{'left_wheel_center_x': 396.0, 'right_wheel_center_x': 396.0, 'wheel_last_coo': (473, 476), 'car_type': 'truck',
'wheel_center_list': [396.0]},
{'left_wheel_center_x': 391.0, 'right_wheel_center_x': 391.0, 'wheel_last_coo': (473, 475), 'car_type': 'truck',
'wheel_center_list': [391.0]},
{'left_wheel_center_x': 45.5, 'right_wheel_center_x': 382.5, 'wheel_last_coo': (470, 475), 'car_type': 'truck',
'wheel_center_list': [45.5, 382.5]},
{'left_wheel_center_x': 382.0, 'right_wheel_center_x': 382.0, 'wheel_last_coo': (468, 476), 'car_type': 'truck',
'wheel_center_list': [382.0]},
{'left_wheel_center_x': 377.5, 'right_wheel_center_x': 377.5, 'wheel_last_coo': (465, 474), 'car_type': 'truck',
'wheel_center_list': [377.5]},
{'left_wheel_center_x': 375.0, 'right_wheel_center_x': 375.0, 'wheel_last_coo': (465, 474), 'car_type': 'truck',
'wheel_center_list': [375.0]},
{'left_wheel_center_x': 366.0, 'right_wheel_center_x': 366.0, 'wheel_last_coo': (464, 475), 'car_type': 'truck',
'wheel_center_list': [366.0]},
{'left_wheel_center_x': 361.5, 'right_wheel_center_x': 361.5, 'wheel_last_coo': (463, 475), 'car_type': 'truck',
'wheel_center_list': [361.5]},
{'left_wheel_center_x': 362.5, 'right_wheel_center_x': 362.5, 'wheel_last_coo': (460, 474), 'car_type': 'truck',
'wheel_center_list': [362.5]},
{'left_wheel_center_x': 353.5, 'right_wheel_center_x': 353.5, 'wheel_last_coo': (458, 472), 'car_type': 'truck',
'wheel_center_list': [353.5]},
{'left_wheel_center_x': 350.0, 'right_wheel_center_x': 350.0, 'wheel_last_coo': (457, 473), 'car_type': 'truck',
'wheel_center_list': [350.0]},
{'left_wheel_center_x': 338.5, 'right_wheel_center_x': 338.5, 'wheel_last_coo': (451, 473), 'car_type': 'truck',
'wheel_center_list': [338.5]},
{'left_wheel_center_x': 336.0, 'right_wheel_center_x': 336.0, 'wheel_last_coo': (446, 474), 'car_type': 'truck',
'wheel_center_list': [336.0]},
{'left_wheel_center_x': 328.0, 'right_wheel_center_x': 328.0, 'wheel_last_coo': (441, 471), 'car_type': 'truck',
'wheel_center_list': [328.0]},
{'left_wheel_center_x': 316.0, 'right_wheel_center_x': 316.0, 'wheel_last_coo': (438, 476), 'car_type': 'truck',
'wheel_center_list': [316.0]},
{'left_wheel_center_x': 307.0, 'right_wheel_center_x': 307.0, 'wheel_last_coo': (435, 476), 'car_type': 'truck',
'wheel_center_list': [307.0]},
{'left_wheel_center_x': 304.0, 'right_wheel_center_x': 304.0, 'wheel_last_coo': (430, 474), 'car_type': 'truck',
'wheel_center_list': [304.0]},
{'left_wheel_center_x': 300.0, 'right_wheel_center_x': 300.0, 'wheel_last_coo': (427, 477), 'car_type': 'truck',
'wheel_center_list': [300.0]},
{'left_wheel_center_x': 282.5, 'right_wheel_center_x': 282.5, 'wheel_last_coo': (420, 477), 'car_type': 'truck',
'wheel_center_list': [282.5]},
{'left_wheel_center_x': 269.0, 'right_wheel_center_x': 269.0, 'wheel_last_coo': (418, 475), 'car_type': 'truck',
'wheel_center_list': [269.0]},
{'left_wheel_center_x': 251.5, 'right_wheel_center_x': 251.5, 'wheel_last_coo': (407, 477), 'car_type': 'truck',
'wheel_center_list': [251.5]},
{'left_wheel_center_x': 237.5, 'right_wheel_center_x': 237.5, 'wheel_last_coo': (402, 478), 'car_type': 'truck',
'wheel_center_list': [237.5]},
{'left_wheel_center_x': 211.5, 'right_wheel_center_x': 211.5, 'wheel_last_coo': (395, 477), 'car_type': 'truck',
'wheel_center_list': [211.5]},
{'left_wheel_center_x': 190.0, 'right_wheel_center_x': 190.0, 'wheel_last_coo': (394, 478), 'car_type': 'truck',
'wheel_center_list': [190.0]},
{'left_wheel_center_x': 185.0, 'right_wheel_center_x': 185.0, 'wheel_last_coo': (392, 480), 'car_type': 'truck',
'wheel_center_list': [185.0]},
{'left_wheel_center_x': 153.0, 'right_wheel_center_x': 153.0, 'wheel_last_coo': (383, 474), 'car_type': 'truck',
'wheel_center_list': [153.0]},
{'left_wheel_center_x': 131.5, 'right_wheel_center_x': 131.5, 'wheel_last_coo': (376, 477), 'car_type': 'truck',
'wheel_center_list': [131.5]},
{'left_wheel_center_x': 102.5, 'right_wheel_center_x': 102.5, 'wheel_last_coo': (363, 477), 'car_type': 'truck',
'wheel_center_list': [102.5]}]
def generate_all_wheel_fig(wheel_parms, fig_path, csv_path):
"""
生成车轮的坐标散点图
:param data: 散点图的数据
:param fig_path: 生成散点图的路径
:return:
"""
# wheel_parms = [
# {'left_wheel_center_x': 518.5, 'right_wheel_center_x': 518.5, 'wheel_last_coo': (527, 234), 'car_type': 'bus',
# 'wheel_center_list': [518.5]},
# {'left_wheel_center_x': 519.0, 'right_wheel_center_x': 519.0, 'wheel_last_coo': (529, 234), 'car_type': 'bus',
# 'wheel_center_list': [519.0]}]
# 创建一个空的dataframe
wheel_data = pd.DataFrame(columns=["frame_index", "wheel_centers"])
# 遍历车轮坐标列表,将数据放入 dataframe 中
for frame in range(len(wheel_parms)):
wheel_center_list = wheel_parms[frame]['wheel_center_list']
wheel_center_list.sort()
for item in range(len(wheel_center_list)):
wheel_data.loc[wheel_data.shape[0]] = {"frame_index": frame, "wheel_centers": wheel_center_list[item]}
# 应用默认的默认seaborn主题,缩放比例和调色板
sns.set()
# Seaborn 要求原始数据的输入类型为 pandas 的 Dataframe 或 Numpy 数组
data = wheel_data
fig = sns.scatterplot(x=data['frame_index'], y=data['wheel_centers'],
data=data, s=100)
scatter_fig = fig.get_figure()
# fig_path为想要存入的文件夹或地址
scatter_fig.savefig('all_wheel' + fig_path, dpi=400)
wheel_data.to_csv('all_wheel' + csv_path)
plt.clf()
def generate_left_wheel_fig(wheel_parms, fig_path, csv_path):
"""
生成最左边车轮的坐标散点图
:param data: 散点图的数据
:param fig_path: 生成散点图的路径
:return:
"""
# 创建一个空的dataframe
wheel_data = pd.DataFrame(columns=["frame_index", "left_wheel_center_x"])
# 遍历车轮坐标列表,将数据放入 dataframe 中
for frame in range(len(wheel_parms)):
left_wheel_center_x = wheel_parms[frame]['left_wheel_center_x']
wheel_data.loc[wheel_data.shape[0]] = {"frame_index": frame, "left_wheel_center_x": left_wheel_center_x}
# 应用默认的默认seaborn主题,缩放比例和调色板
sns.set()
# Seaborn 要求原始数据的输入类型为 pandas 的 Dataframe 或 Numpy 数组
data = wheel_data
fig = sns.scatterplot(x=data['frame_index'], y=data['left_wheel_center_x'],
data=data, s=50)
scatter_fig = fig.get_figure()
# fig_path为想要存入的文件夹或地址
scatter_fig.savefig('left_wheel_center_x_' + fig_path, dpi=400)
wheel_data.to_csv('left_wheel_center_x_' + csv_path)
plt.clf()
def generate_diffVal_wheel_fig(wheel_parms, fig_path, csv_path):
"""
生成最左车轮和第二个车轮的差值
:param data: 散点图的数据
:param fig_path: 生成散点图的路径
:return:
"""
# 创建一个空的dataframe
wheel_data = pd.DataFrame(columns=["frame_index", "diffVal"])
# 遍历车轮坐标列表,将数据放入 dataframe 中
for frame in range(len(wheel_parms)):
wheel_center_list = wheel_parms[frame]['wheel_center_list']
wheel_center_list.sort()
if (len(wheel_center_list) > 1):
diffVal = wheel_center_list[1] - wheel_center_list[0]
else:
diffVal = 0
wheel_data.loc[wheel_data.shape[0]] = {"frame_index": frame, "diffVal": diffVal}
# 应用默认的默认seaborn主题,缩放比例和调色板
sns.set()
# Seaborn 要求原始数据的输入类型为 pandas 的 Dataframe 或 Numpy 数组
data = wheel_data
fig = sns.scatterplot(x=data['frame_index'], y=data['diffVal'],
data=data, s=50)
scatter_fig = fig.get_figure()
# fig_path为想要存入的文件夹或地址
scatter_fig.savefig('diffVal_' + fig_path, dpi=400)
wheel_data.to_csv('diffVal_' + csv_path)
plt.clf()
generate_all_wheel_fig(wheel_parms, 'fig3.png', 'fig3.csv')
generate_left_wheel_fig(wheel_parms, 'fig3.png', 'fig3.csv')
generate_diffVal_wheel_fig(wheel_parms, 'fig3.png', 'fig3.csv')
| [
"[email protected]"
]
| |
b9b81d1b217f65aab200611e5db505cd665c0456 | c1d03f41b6c80ef1e0a42b1bb710ba90d680e4c2 | /src/softfab/waiting.py | 2015d3a7b6b5ba0ba4af698aa869be68ff7dda67 | [
"BSD-3-Clause"
]
| permissive | boxingbeetle/softfab | 4f96fc389dec5cd3dc987a427c2f491a19cbbef4 | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | refs/heads/master | 2021-06-22T15:42:38.857018 | 2020-11-23T22:53:21 | 2020-11-23T22:53:21 | 169,245,088 | 20 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,963 | py | # SPDX-License-Identifier: BSD-3-Clause
'''Implementation of the "reason for waiting" feature.
This informs the user of why a task isn't running yet.
'''
from abc import ABC
from enum import IntEnum
from typing import (
TYPE_CHECKING, AbstractSet, Callable, ClassVar, Iterable, List, Optional,
Sequence, Union
)
from softfab.connection import ConnectionStatus
from softfab.resreq import ResourceSpec
from softfab.utils import abstract, pluralize
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from softfab.resourcelib import ResourceBase, TaskRunner
else:
ResourceBase = None
TaskRunner = None
# Implementation of "Reason for waiting" feature:
StatusLevel = IntEnum('StatusLevel', 'FREE RESERVED SUSPENDED MISSING')
def statusLevelForResource(resource: ResourceBase) -> StatusLevel:
if resource.getConnectionStatus() in (
ConnectionStatus.LOST, ConnectionStatus.NEW
):
return StatusLevel.MISSING
elif resource.isReserved():
return StatusLevel.RESERVED
elif resource.isSuspended():
return StatusLevel.SUSPENDED
else:
return StatusLevel.FREE
def _describeLevel(level: Union[StatusLevel, int]) -> str:
if level == StatusLevel.RESERVED:
return 'in use'
elif level == StatusLevel.SUSPENDED:
return 'suspended'
elif level == StatusLevel.MISSING:
return 'unavailable'
else:
return 'not defined'
class ReasonForWaiting:
"""Describes a reason why a task isn't running.
"""
def __str__(self) -> str:
return f'{self.__class__.__name__}({self.description})'
@property
def priority(self) -> Sequence[int]:
"""A tuple of integers that indicates the priority of this reason:
the reason with highest the tuple value will be presented to the user.
"""
raise NotImplementedError
@property
def description(self) -> str:
"""A message for the user that describes this reason.
"""
raise NotImplementedError
class InputReason(ReasonForWaiting):
def __init__(self, inputs: Sequence[str]):
super().__init__()
self.__inputs = inputs
@property
def priority(self) -> Sequence[int]:
return (6,)
@property
def description(self) -> str:
inputs = self.__inputs
return 'waiting for %s: %s' % (
pluralize('input', len(inputs)),
', '.join(inputs)
)
class ResourceMissingReason(ReasonForWaiting):
def __init__(self, resourceId: str):
super().__init__()
self.__resourceId = resourceId
@property
def priority(self) -> Sequence[int]:
return (0, 2)
@property
def description(self) -> str:
return f'resource was deleted: {self.__resourceId}'
class ResourceReason(ReasonForWaiting, ABC):
prioMinor: ClassVar[int] = abstract
def __init__(self, level: StatusLevel):
super().__init__()
self._level = level
@property
def priority(self) -> Sequence[int]:
return (5, self._level, self.prioMinor)
@property
def description(self) -> str:
raise NotImplementedError
class ResourceCapsReason(ResourceReason):
prioMinor = 0
def __init__(self, typeName: str, level: StatusLevel):
super().__init__(level)
self.__type = typeName
@property
def description(self) -> str:
return 'resources of type "%s" with required capabilities are %s' % (
self.__type, _describeLevel(self._level + 1)
)
class ResourceSpecReason(ResourceReason):
prioMinor = 1
def __init__(self, spec: ResourceSpec, level: StatusLevel):
super().__init__(level)
self.__spec = spec
@property
def description(self) -> str:
return 'resources matching reference "%s" are %s' % (
self.__spec.reference, _describeLevel(self._level + 1)
)
class ResourceTypeReason(ResourceReason):
prioMinor = 2
def __init__(self, typeName: str, shortage: int, level: StatusLevel):
super().__init__(level)
self.__type = typeName
self.__shortage = shortage
@property
def description(self) -> str:
level = self._level
if level == StatusLevel.FREE:
return 'waiting for %s: %s' % (
pluralize('resource', self.__shortage), self.__type
)
elif level == StatusLevel.RESERVED:
return 'required %s suspended: %s' % (
'resource is' if self.__shortage == 1 else 'resources are',
self.__type
)
else:
return f'not enough resources: {self.__type}'
class BoundReason(ReasonForWaiting):
def __init__(self, boundRunnerId: str):
super().__init__()
self.__boundRunnerId = boundRunnerId
@property
def priority(self) -> Sequence[int]:
return (4,)
@property
def description(self) -> str:
return f'waiting for bound Task Runner: {self.__boundRunnerId}'
class _CapabilitiesReason(ReasonForWaiting, ABC):
selectorMajor: ClassVar[int] = abstract
def __init__(self,
missingOnAll: AbstractSet[str],
missingOnAny: AbstractSet[str]
):
super().__init__()
self._missingOnAll = missingOnAll
self._missingOnAny = missingOnAny
@property
def priority(self) -> Sequence[int]:
return (
self.selectorMajor,
1,
len(self._missingOnAll),
len(self._missingOnAny)
)
@property
def description(self) -> str:
raise NotImplementedError
class TRCapsReason(_CapabilitiesReason):
selectorMajor = 2
@property
def description(self) -> str:
if self._missingOnAll:
return 'no Task Runner has any of these capabilities: ' + \
', '.join(sorted(self._missingOnAll))
else:
return 'no Task Runner has all of these capabilities: ' + \
', '.join(sorted(self._missingOnAny))
class TRStateReason(ReasonForWaiting):
def __init__(self, level: StatusLevel):
super().__init__()
self.__level = level
@property
def priority(self) -> Sequence[int]:
return (2, 0, self.__level.value)
@property
def description(self) -> str:
return 'all suitable Task Runners are ' + _describeLevel(self.__level)
class UnboundGroupCapsReason(_CapabilitiesReason):
selectorMajor = 1
@property
def description(self) -> str:
if self._missingOnAll:
return 'no Task Runner has any of these group capabilities: ' + \
', '.join(sorted(self._missingOnAll))
else:
return 'no Task Runner has all of these group capabilities: ' + \
', '.join(sorted(self._missingOnAny))
class UnboundGroupStateReason(ReasonForWaiting):
def __init__(self, level: StatusLevel):
super().__init__()
self.__level = level
@property
def priority(self) -> Sequence[int]:
return (1, 0, self.__level.value)
@property
def description(self) -> str:
return 'all Task Runners suitable for the task group are %s' % \
_describeLevel(self.__level)
class BoundGroupTargetReason(ReasonForWaiting):
def __init__(self, boundRunnerId: str, target: str):
super().__init__()
self.__boundRunnerId = boundRunnerId
self.__target = target
@property
def priority(self) -> Sequence[int]:
return (3, 2)
@property
def description(self) -> str:
return f'bound Task Runner "{self.__boundRunnerId}" ' \
f'does not support target "{self.__target}"'
class BoundGroupCapsReason(_CapabilitiesReason):
selectorMajor = 3
severity = 7
def __init__(self, boundRunnerId: str, *args: AbstractSet[str]):
self.__boundRunnerId = boundRunnerId
super().__init__(*args)
@property
def description(self) -> str:
assert self._missingOnAll == self._missingOnAny
return 'bound Task Runner "%s" does not have group capabilities: %s' % (
self.__boundRunnerId,
', '.join(sorted(self._missingOnAll or self._missingOnAny))
)
class BoundGroupStateReason(ReasonForWaiting):
def __init__(self, boundRunnerId: str, level: StatusLevel):
super().__init__()
self.__boundRunnerId = boundRunnerId
self.__level = level
@property
def priority(self) -> Sequence[int]:
return (3, 0, self.__level.value)
@property
def description(self) -> str:
return f'bound Task Runner "{self.__boundRunnerId}" ' \
f'is {_describeLevel(self.__level)}'
def topWhyNot(whyNot: Iterable[ReasonForWaiting]) -> ReasonForWaiting:
"""Returns the highest priority reason for waiting from `whyNot`.
"""
return max(whyNot, key=lambda reason: reason.priority)
# TODO: This algorithm is similar to the one in
# resourcelib.reserveResources(), maybe they can be combined?
# (Another clue that a Task Runner is a special kind of resource.)
def _checkCapabilities(
runners: Sequence[TaskRunner],
whyNot: List[ReasonForWaiting],
reasonFactory: Callable[[AbstractSet[str], AbstractSet[str]],
ReasonForWaiting],
neededCaps: AbstractSet[str]
) -> Sequence[TaskRunner]:
'''Filter out Task Runners without the required capabilities.
'''
foundRunners = []
missingOnAny: AbstractSet[str] = set()
missingOnAll: Optional[AbstractSet[str]] = None
for runner in runners:
missingCaps = neededCaps - runner.capabilities
if missingCaps:
missingOnAny |= missingCaps
if missingOnAll is None:
missingOnAll = missingCaps
else:
missingOnAll &= missingCaps
else:
foundRunners.append(runner)
if not foundRunners:
# There are no Task Runners with the right capabilities.
if runners:
assert missingOnAll is not None
whyNot.append(reasonFactory(missingOnAll, missingOnAny))
return foundRunners
def _checkState(
runners: Sequence[TaskRunner],
whyNot: List[ReasonForWaiting],
reasonFactory: Callable[[StatusLevel], ReasonForWaiting],
) -> None:
'''Report when Task Runner state is a reason a task is not running.
'''
level = min(
(statusLevelForResource(runner) for runner in runners),
default=StatusLevel.FREE
)
if level != StatusLevel.FREE:
whyNot.append(reasonFactory(level))
def checkRunners(
runners: Sequence[TaskRunner],
neededCaps: AbstractSet[str],
whyNot: List[ReasonForWaiting]
) -> Sequence[TaskRunner]:
runners = _checkCapabilities(runners, whyNot, TRCapsReason, neededCaps)
_checkState(runners, whyNot, TRStateReason)
return runners
def checkGroupRunners(
runners: Sequence[TaskRunner],
neededCaps: AbstractSet[str],
whyNot: List[ReasonForWaiting]
) -> Sequence[TaskRunner]:
runners = _checkCapabilities(
runners, whyNot, UnboundGroupCapsReason, neededCaps
)
_checkState(runners, whyNot, UnboundGroupStateReason)
return runners
def checkBoundGroupRunner(
boundRunner: TaskRunner,
neededCaps: AbstractSet[str],
whyNot: List[ReasonForWaiting]
) -> Sequence[TaskRunner]:
boundRunnerId = boundRunner.getId()
runners: Sequence[TaskRunner] = [ boundRunner ]
runners = _checkCapabilities(
runners, whyNot,
lambda *args: BoundGroupCapsReason(boundRunnerId, *args),
neededCaps
)
_checkState(
runners, whyNot,
lambda level: BoundGroupStateReason(boundRunnerId, level)
)
return runners
| [
"[email protected]"
]
| |
222a2fb37a868059da32a081e15ecbce85b1ff49 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_peeked.py | 8d7bc05fa8a2f971e389bfaba31a98cad7940000 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.verbs._peek import _PEEK
#calss header
class _PEEKED(_PEEK, ):
def __init__(self,):
_PEEK.__init__(self)
self.name = "PEEKED"
self.specie = 'verbs'
self.basic = "peek"
self.jsondata = {}
| [
"[email protected]"
]
| |
c6fb5bf9e1f88c1482a354106390c40ed92f2fb0 | adfb9b91518752b361713594eacd850557f7721b | /tests/test_html_formatter.py | 9e0fd4ac17549f8581a9e9139cf292adde5af931 | [
"BSD-2-Clause"
]
| permissive | erickt/pygments | 6223c688cbb6ef81ab3f73d6aa7da9fa796bb6c4 | 05d4b6ce7e51501d2ac22919386017c08c9f5547 | refs/heads/master | 2021-01-10T18:59:43.144002 | 2009-05-17T17:05:11 | 2009-05-17T17:05:11 | 1,125,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,348 | py | # -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import unittest
import StringIO
import tempfile
from os.path import join, dirname, isfile, abspath
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
import support
TESTFILE, TESTDIR = support.location(__file__)
tokensource = list(PythonLexer(encoding='utf-8').get_tokens(open(TESTFILE).read()))
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
houtfile = StringIO.StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
noutfile = StringIO.StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
escaped_text = escape_html(noutfile.getvalue())
self.assertEquals(stripped_html, escaped_text)
def test_external_css(self):
# test correct behavior
# CSS should be in /tmp directory
fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
# CSS should be in TESTDIR (TESTDIR is absolute)
fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
outencoding='utf-8')
tfile = tempfile.NamedTemporaryFile(suffix='.html')
fmt1.format(tokensource, tfile)
try:
fmt2.format(tokensource, tfile)
self.assert_(isfile(join(TESTDIR, 'fmt2.css')))
except IOError:
# test directory not writable
pass
tfile.close()
self.assert_(isfile(join(dirname(tfile.name), 'fmt1.css')))
os.unlink(join(dirname(tfile.name), 'fmt1.css'))
try:
os.unlink(join(TESTDIR, 'fmt2.css'))
except OSError:
pass
def test_all_options(self):
for optdict in [dict(nowrap=True),
dict(linenos=True),
dict(linenos=True, full=True),
dict(linenos=True, full=True, noclasses=True)]:
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
def test_valid_output(self):
# test all available wrappers
fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
outencoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
try:
try:
import subprocess
ret = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
stdout=subprocess.PIPE).wait()
except ImportError:
# Python 2.3 - no subprocess module
ret = os.popen('nsgmls -s -c "%s" "%s"' % (catname, pathname)).close()
if ret == 32512: raise OSError # not found
except OSError:
# nsgmls not available
pass
else:
self.failIf(ret, 'nsgmls run reported errors')
os.unlink(pathname)
def test_get_style_defs(self):
fmt = HtmlFormatter()
sd = fmt.get_style_defs()
self.assert_(sd.startswith('.'))
fmt = HtmlFormatter(cssclass='foo')
sd = fmt.get_style_defs()
self.assert_(sd.startswith('.foo'))
sd = fmt.get_style_defs('.bar')
self.assert_(sd.startswith('.bar'))
sd = fmt.get_style_defs(['.bar', '.baz'])
fl = sd.splitlines()[0]
self.assert_('.bar' in fl and '.baz' in fl)
def test_unicode_options(self):
fmt = HtmlFormatter(title=u'Föö',
cssclass=u'bär',
cssstyles=u'div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
| [
"devnull@localhost"
]
| devnull@localhost |
8018c52bfae85b474764db99ee57fd7438b91010 | bc441bb06b8948288f110af63feda4e798f30225 | /scheduler_sdk/model/topology/link_pb2.pyi | dd2a9cad5ae92103824e305f7a2d316b07a017af | [
"Apache-2.0"
]
| permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from scheduler_sdk.model.topology.linkStyle_pb2 import (
LinkStyle as scheduler_sdk___model___topology___linkStyle_pb2___LinkStyle,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Link(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
source = ... # type: typing___Text
target = ... # type: typing___Text
@property
def style(self) -> scheduler_sdk___model___topology___linkStyle_pb2___LinkStyle: ...
def __init__(self,
*,
source : typing___Optional[typing___Text] = None,
target : typing___Optional[typing___Text] = None,
style : typing___Optional[scheduler_sdk___model___topology___linkStyle_pb2___LinkStyle] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Link: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Link: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"style",b"style"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"source",b"source",u"style",b"style",u"target",b"target"]) -> None: ...
| [
"[email protected]"
]
| |
6abcd1f7e1f19b6f90c46dbd20e5481f4ebc5940 | 35fb652b0b20e7352cacdc078e23464fad40ccf3 | /web/controllers/api/member.py | 1e2ff1b387e4e2d46f19dd619234d93e17030ffd | []
| no_license | xiaoheng14/flask_wx_order | 52f8fe01a473855c22a43c2651b102c291dbde04 | be3314fdb0266eecf4ca7f5a55b2ea24078857c9 | refs/heads/master | 2020-08-23T03:59:19.006943 | 2018-11-19T12:21:25 | 2018-11-19T12:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | # _*_ coding: utf-8 _*_
"""
__author__ = 'lawtech'
__date__ = '2018/11/14 3:17 PM'
"""
from flask import request, jsonify
from web.controllers.api import route_api
from application import app, db
from common.models.member.member import Member
from common.models.member.oauth_member_bind import OauthMemberBind
from common.libs.helper import get_current_date
from common.libs.member.member_service import MemberService
@route_api.route("/member/login", methods=["GET", "POST"])
def login():
resp = {"code": 200, "msg": "操作成功", "data": {}}
req = request.values
code = req["code"] if "code" in req else ""
if not code or len(code) < 1:
resp["code"] = -1
resp["msg"] = "需要code"
return jsonify(resp)
openid = MemberService.get_wx_openid(code)
if openid is None:
resp["code"] = -1
resp["msg"] = "调用微信出错"
return jsonify(resp)
nickname = req["nickName"] if "nickName" in req else ""
sex = req["gender"] if "gender" in req else 0
avatar = req["avatarUrl"] if "avatarUrl" in req else ""
# 判断是否已经测试过,注册了直接返回一些信息
bind_info = OauthMemberBind.query.filter_by(openid=openid, type=1).first()
if not bind_info:
model_member = Member()
model_member.nickname = nickname
model_member.sex = sex
model_member.avatar = avatar
model_member.salt = MemberService.gen_salt()
model_member.updated_time = model_member.created_time = get_current_date()
db.session.add(model_member)
db.session.commit()
model_bind = OauthMemberBind()
model_bind.member_id = model_member.id
model_bind.type = 1
model_bind.openid = openid
model_bind.extra = ""
model_bind.updated_time = model_bind.created_time = get_current_date()
db.session.add(model_bind)
db.session.commit()
bind_info = model_bind
member_info = Member.query.filter_by(id=bind_info.member_id).first()
token = "{}#{}".format(MemberService.gen_auth_code(member_info), member_info.id)
resp["data"] = {"token": token}
return jsonify(resp)
@route_api.route("/member/check-reg", methods=["GET", "POST"])
def check_reg():
resp = {"code": 200, "msg": "操作成功", "data": {}}
req = request.values
code = req["code"] if "code" in req else ""
if not code or len(code) < 1:
resp["code"] = -1
resp["msg"] = "需要code"
return jsonify(resp)
openid = MemberService.get_wx_openid(code)
if openid is None:
resp["code"] = -1
resp["msg"] = "调用微信出错"
return jsonify(resp)
bind_info = OauthMemberBind.query.filter_by(openid=openid, type=1).first()
if not bind_info:
resp['code'] = -1
resp['msg'] = "未绑定"
return jsonify(resp)
member_info = Member.query.filter_by(id=bind_info.member_id).first()
if not member_info:
resp['code'] = -1
resp['msg'] = "未查询到绑定信息"
return jsonify(resp)
token = "{}#{}".format(MemberService.gen_auth_code(member_info), member_info.id)
resp["data"] = {"token": token}
return jsonify(resp)
| [
"[email protected]"
]
| |
5b7a6d3de8a6a095f5ae66152608704c05e1b35f | 3b6b6a580bf6127b288a42ab4519565adc720fbd | /days/081-084-unit-testing/demo/billtracker/billtracker/views/default.py | 3f33c6eaf1a96ca4e7eb9e56dae0090be863f2f8 | []
| no_license | talkpython/100daysofweb-with-python-course | f1f296a5e52670fccba895e078318a5098f96e2f | c6f2fb22a29f74284b2d52ee019e0ace6a6353fc | refs/heads/master | 2023-07-19T11:21:46.515974 | 2023-04-25T21:34:27 | 2023-04-25T21:34:27 | 134,765,291 | 627 | 495 | null | 2023-04-25T21:34:28 | 2018-05-24T20:28:21 | JavaScript | UTF-8 | Python | false | false | 1,271 | py | from pyramid.httpexceptions import HTTPFound
from pyramid.request import Request
from pyramid.response import Response
from pyramid.view import view_config
from billtracker.data import repository
from billtracker.viewmodels.default.index_viewmodel import IndexViewModel
from billtracker.viewmodels.default.bill_details_viewmodel import BillDetailsViewModel
@view_config(route_name='home', renderer='../templates/home/default.pt')
def home(request: Request):
vm = IndexViewModel(request, user_id=1)
return vm.to_dict()
@view_config(route_name='details',
renderer='../templates/home/details.pt',
request_method='GET')
def details_get(request: Request):
vm = BillDetailsViewModel(request, user_id=1)
if not vm.bill:
return Response(status=404)
return vm.to_dict()
@view_config(route_name='details',
renderer='../templates/home/details.pt',
request_method='POST')
def details_post(request: Request):
vm = BillDetailsViewModel(request, user_id=1)
if not vm.bill:
return Response(status=404)
vm.from_form()
if vm.error:
return vm.to_dict()
repository.add_payment(vm.amount, vm.bill_id)
return HTTPFound(location='/bill/{}'.format(vm.bill_id))
| [
"[email protected]"
]
| |
47168d33475c995b581c6678f725b0552cf48aa9 | 3611880ca0cb9266ca30aeaa318342e64bf5302e | /workers/record_cleaner/appcast.py | 8dcb0d2b24ab40fb5236dc864ddb7d386e77ff1d | []
| no_license | Roychenlei/Algorithms_Learning | dff9311ae2ec61db8188e880e19a1e6432fb3fd8 | 468135e10d490dd861d3826bfbd4776d9261dbb7 | refs/heads/master | 2021-06-01T20:21:17.307450 | 2019-08-06T09:32:45 | 2019-08-06T09:32:45 | 96,177,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from .base_record_parser import BaseParser
class Parser(BaseParser):
source_name = 'AC'
desc_tag_name = 'body'
def build_id(self):
return self.orig_data.get('job_reference')
def build_industry(self):
# category may exist in orig data and the value is null
# the output is a list with at least 1 industry
return [self.orig_data.get('appcast_category')]
def build_price(self):
return 'PAY_SCALE_2'
| [
"[email protected]"
]
| |
dfe8082e2726ca32bc985e3483fbc0681f060f9e | 4ef80242cf22a1ccd0d7a2042476b5b6ac1eb03e | /scadparser/grammar/ScadModel.py | a8c274c58936bf26cbee79f06bab9ab9de132def | []
| no_license | rblack42/ScadParser | 71081adb99ec03e78bc78b4101562b7fa1bab134 | a9cc10b23c6515a53065dfb58b23881d0145f88d | refs/heads/master | 2023-07-11T03:51:53.434534 | 2021-08-27T02:03:37 | 2021-08-27T02:03:37 | 397,718,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | #!/usr/bin/env python
# CAVEAT UTILITOR
#
# This file was automatically generated by TatSu.
#
# https://pypi.python.org/pypi/tatsu/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import annotations
from tatsu.objectmodel import Node
from tatsu.semantics import ModelBuilderSemantics
class ModelBase(Node):
pass
class SCADModelBuilderSemantics(ModelBuilderSemantics):
def __init__(self, context=None, types=None):
types = [
t for t in globals().values()
if type(t) is type and issubclass(t, ModelBase)
] + (types or [])
super(SCADModelBuilderSemantics, self).__init__(context=context, types=types)
| [
"[email protected]"
]
| |
78ffa04b42257652a8e823e4c1740983e643782b | aea2c3d54f3524e58356ce02ff3dd72e00cb1551 | /2018/fakephoton_crab/crab_help.py | 0a538a0bbf0c71c0d0d94b8cca69aa88b16c5543 | []
| no_license | gqlcms/WWG_Analysis | e04424597815b0bd5fd1285d8275768badb0f381 | 21843b4e5355ec7e6a2df0aa62623d9ad21a9032 | refs/heads/master | 2023-04-17T14:06:34.604914 | 2021-04-23T05:34:00 | 2021-04-23T05:34:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,923 | py | import os,sys
import argparse
import json
import subprocess
import shutil
parser = argparse.ArgumentParser(description='prepare crab code')
parser.add_argument('-f', dest='file', default='', help='json file input')
parser.add_argument('-m', dest='mode', default='', help='work mode')
args = parser.parse_args()
def get_abbre(name,sample_type,year):
if sample_type == 'MC':
return name.split('/')[1] + '_' + year
elif sample_type == 'data':
return name.split('/')[1] + '_' + name.split('/')[2].split('-')[0]
def prepare_crab(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
if not os.path.exists('crabcode_' + year):
os.mkdir("crabcode_" + year)
print ("------> preparing submit code for",abbre_name)
with open('crabcode_' + year + '/' + abbre_name + '_cfg.py', 'w+') as f:
f.write('from WMCore.Configuration import Configuration \n\n')
f.write('config = Configuration()\n')
f.write('config.section_("General")\n')
f.write('config.General.requestName = "' + abbre_name + '"\n')
f.write('config.General.transferLogs = False \n')
f.write('config.General.workArea = "crab' + year + '"\n\n')
f.write('config.section_("JobType")\n')
f.write('config.JobType.pluginName = "Analysis"\n')
f.write('config.JobType.psetName = "PSet.py"\n')
f.write('config.JobType.scriptExe = "./WWG_crab_script.sh" \n')
f.write('config.JobType.inputFiles = ["../../../scripts/haddnano.py","../WWG_fakephoton/WWG_postproc.py","../WWG_fakephoton/WWGfakephoton_Module.py","../WWG_fakephoton/WWG_keep_and_drop.txt","../WWG_fakephoton/WWG_output_branch.txt","../WWG_fakephoton/DAS_filesearch.py"] #hadd nano will not be needed once nano tools are in cmssw \n')
f.write('config.JobType.scriptArgs = ["isdata=' + sample_type + '","year=' + year + '"] \n')
f.write('config.JobType.sendPythonFolder = True\n')
f.write('config.JobType.allowUndistributedCMSSW = True \n\n')
f.write('config.section_("Data")\n')
f.write('config.Data.inputDataset = "' + name + '" \n')
f.write('#config.Data.inputDBS = "phys03"\n')
f.write('config.Data.inputDBS = "global"\n')
f.write('# config.Data.splitting = "LumiBased"\n')
f.write('config.Data.splitting = "FileBased"\n')
f.write('#config.Data.splitting = "EventAwareLumiBased" \n')
f.write('#config.Data.splitting = "Automatic" \n')
f.write('config.Data.unitsPerJob = 1\n')
if sample_type == 'MC':
pass
elif year == '2018':
f.write('config.Data.lumiMask = "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/Legacy_2018/Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt" \n\n')
elif year == '2017':
f.write('config.Data.lumiMask = "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/13TeV/Legacy_2017/Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt" \n\n')
elif year == '2016':
f.write('config.Data.lumiMask = "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/Legacy_2016/Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt" \n\n')
# f.write('config.Data.outLFNDirBase ="/store/user/sdeng/WWG_analysis/' + sample_type + '/' + year + '"\n')
f.write('config.Data.publication = False\n')
f.write('config.Data.ignoreLocality = True\n')
f.write('config.Data.allowNonValidInputDataset = True\n')
f.write('config.Data.outputDatasetTag = "' + abbre_name + '" \n\n')
f.write('config.section_("Site")\n')
f.write('config.Site.storageSite = "T3_CH_CERNBOX"\n')
f.write('config.Site.whitelist = ["T2_US_MIT","T2_US_Wisconsin","T2_US_Purdue","T2_US_UCSD","T2_US_Caltech","T2_US_Nebraska"] \n')
f.close()
def submit(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
if not os.path.exists(f'crabcode_{year}/{abbre_name}_cfg.py'):
print ("crabcode for ",abbre_name," not existed, skipping")
return True
r=subprocess.run(args=f"crab submit -c crabcode_{year}/{abbre_name}_cfg.py",shell=True,stdout=subprocess.PIPE,encoding='utf-8')
if 'Success' in r.stdout:
print ("--------> submit info:","submit crab jobs for",abbre_name)
else:
print ("--------> submit info:","\033[31mfail\033[0m to submit for",abbre_name)
def kill(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
if not os.path.exists(f'crab{year}/crab_{abbre_name}'):
print ("crab log for ",abbre_name," not existed, skipping \n")
return True
r=subprocess.run(args=f"crab kill -d crab{year}/crab_{abbre_name}" ,shell=True,stdout=subprocess.PIPE,encoding='utf-8')
print (r.stdout,'\n')
shutil.rmtree(f'crab{year}/crab_{abbre_name}')
print (f'crab{year}/crab_{abbre_name} has been removed')
def status(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
if not os.path.exists(f'crab{year}/crab_{abbre_name}'):
print ("crab log for ",abbre_name," not existed, skipping \n")
return True
r=subprocess.run(args=f"crab status -d crab{year}/crab_{abbre_name}" ,shell=True,stdout=subprocess.PIPE,encoding='utf-8')
print (r.stdout,'\n')
def hadd_help(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
store_path = '/eos/user/s/sdeng/WWG_analysis'
first_name = name.split('/')[1]
if os.path.exists(f'{abbre_name}.root'):
print (f'{abbre_name} already existed, skipping')
return True
if not (os.path.exists(f'{store_path}/{sample_type}/{year}/{first_name}/{abbre_name}')):
print (f'results for {abbre_name} not existed in {store_path}/{sample_type}/{year}/{first_name}/{abbre_name}, skipping\n')
return True
if not (len(os.listdir(f'{store_path}/{sample_type}/{year}/{first_name}/{abbre_name}')) == 1 ):
print (f'more than 1 result for {abbre_name}, Please check {store_path}/{sample_type}/{year}/{first_name}/{abbre_name}\n')
return True
run_number = os.listdir(f'{store_path}/{sample_type}/{year}/{first_name}/{abbre_name}')[0]
path = f'{store_path}/{sample_type}/{year}/{first_name}/{abbre_name}/{run_number}/0000/'
print (f'hadding root files in {path}')
r=subprocess.run(args=f"python $CMSSW_BASE/src/PhysicsTools/NanoAODTools/scripts/haddnano.py {abbre_name}.root {path}/*.root ", shell=True,stdout=subprocess.PIPE,encoding='utf-8')
if os.path.exists(f'{abbre_name}.root'):
print (f'hadd complete, please check {abbre_name}.root\n')
else:
print (f'hadd \033[31mfail\033[0m!!')
def report_lumi(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
if not os.path.exists(f'crab{year}/crab_{abbre_name}'):
print ("crab log for ",abbre_name," not existed, skipping \n")
return True
r=subprocess.run(args=f"crab report -d crab{year}/crab_{abbre_name}" ,shell=True,stdout=subprocess.PIPE,encoding='utf-8')
print (r.stdout,'\n')
if not os.path.exists(f'lumi_{year}'):
os.mkdir(f'lumi_{year}')
shutil.copy(f'crab{year}/crab_{abbre_name}/results/notFinishedLumis.json', f'lumi_{year}/{abbre_name}.json')
def resubmit(name,sample_type,year):
abbre_name = get_abbre(name,sample_type,year)
if not os.path.exists(f'crab{year}/crab_{abbre_name}'):
print ("crab log for ",abbre_name," not existed, skipping \n")
return True
print (f"resubmitting {abbre_name}\n")
r = subprocess.run(args=f"crab resubmit -d crab{year}/crab_{abbre_name}" ,shell=True,stdout=subprocess.PIPE,encoding='utf-8')
print (r.stdout,"\n")
if __name__=='__main__':
with open(args.file, "r") as f:
jsons = json.load(f)
f.close()
if args.mode == 'prepare':
for dataset in jsons:
prepare_crab(dataset['name'], dataset['type'], str(dataset['year']))
if args.mode == 'submit':
for dataset in jsons:
submit(dataset['name'], dataset['type'], str(dataset['year']))
if args.mode == 'kill':
for dataset in jsons:
kill(dataset['name'], dataset['type'], str(dataset['year']))
if args.mode == 'status':
for dataset in jsons:
status(dataset['name'], dataset['type'], str(dataset['year']))
if args.mode == 'hadd':
for dataset in jsons:
hadd_help(dataset['name'], dataset['type'], str(dataset['year']))
if args.mode == 'report':
for dataset in jsons:
if dataset['type'] == 'data':
report_lumi(dataset['name'], dataset['type'], str(dataset['year']))
if args.mode == 'resubmit':
for dataset in jsons:
resubmit(dataset['name'], dataset['type'], str(dataset['year']))
| [
"[email protected]"
]
| |
887bddb1e754a1c0a5b8fd3772556ac6728480ea | 9c73dd3043f7db7c9ec76d560484e99ad134fdb6 | /students/douglas_klos/lesson10/assignment/l05-decorator/src/database_operations.py | b5f1eeff53951ceb2b75cff5870e1c2c48f12788 | []
| no_license | UWPCE-PythonCert-ClassRepos/py220-online-201904-V2 | 546b316025b680ca28d24b523663095398616b13 | ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56 | refs/heads/master | 2022-12-10T03:14:25.514630 | 2019-06-11T02:14:17 | 2019-06-11T02:14:17 | 179,139,181 | 1 | 19 | null | 2022-12-08T01:43:38 | 2019-04-02T18:49:10 | Python | UTF-8 | Python | false | false | 10,397 | py | #!/usr/bin/env python3
# pylint: disable=W0613
""" HPNorton API for accessing MongoDB collections """
from datetime import datetime
from multiprocessing import Process, Queue
from os.path import splitext, basename
from time import time
from wrapt import decorator
from loguru import logger
from pymongo import ASCENDING
from pymongo.errors import DuplicateKeyError
import src.mongodb_conn as mdb_conn
MONGO = mdb_conn.MongoDBConnection()
@decorator
def timer_logger(function, instance, args, kwargs):
""" Times functions and writes results to timings.txt """
start = datetime.now()
result = function(*args, **kwargs)
end = datetime.now()
with open("timings.txt", "a") as log_file:
log_file.write(
f"{function.__name__} "
f"called at {start.strftime('%Y-%m-%d %H:%M:%S')}\n"
)
if (
function.__name__ == "insert_to_mongo" or
function.__name__ == "get_line_from_file"
):
record_count = 0
for _ in open(args[0]):
record_count += 1
log_file.write(f"\tCalled on {record_count} records\n")
else:
log_file.write(f"\targs:{args}, kwargs:{kwargs}\n")
log_file.write(f"\tExecution time {end - start} seconds\n")
return result
@timer_logger
def linear(files):
""" Import csv files into mongodatabase.
Arguments:
[file1, file2, file3, ...] -- list of files to import
Returns:
{{},{},,} -- {csv_file: {elapsed, fail, success, total_records},}
"""
return list(map(insert_to_mongo, files))
@timer_logger
def parallel(files):
""" Import csv files into mongodatabase.
Arguments:
[file1, file2, file3, ...] -- list of files to import
Returns:
{{},{},,} -- {csv_file: {elapsed, fail, success, total_records},}
"""
return list(map(join_process, list(map(start_process, files))))
@timer_logger
def start_process(csv_file):
""" Start process on given csv_file
Arguments:
csv_file {string} -- csv_file to start insert process on
Returns:
process, Queue -- process started, Queue with dict of results
"""
results = Queue()
process = Process(target=insert_to_mongo, args=(csv_file, results))
logger.info(f"Starting {process} : {csv_file}")
process.start()
return process, results
@timer_logger
def join_process(process):
""" Joins processes in process argument
Arguments:
[process1, process2, process3, ...] -- list of processes to join
Returns:
{collection_name: {"success", "fail", "total_records", "elapsed"}}
"""
logger.info(f"Joining process {process[0]}")
process[0].join()
return process[1].get()
# pylint: disable=R0914
@timer_logger
def insert_to_mongo(filename, results=None):
""" Inserts given csv file into mongo
Arguments:
filename {string} -- csv filename to import
Returns:
{collection_name: {"success", "fail", "total_records", "elapsed"}}
"""
success = 0
fail = 0
start = time()
collection_name, _ = splitext(basename(filename))
with MONGO as mdb:
logger.info(f"Inserting {collection_name} into Mongo...")
collection = mdb[collection_name]
iter_lines = get_line_from_file(filename)
header = next(iter_lines).split(",")
# Create the indicies for the collection
if collection.name[:6] != "rental":
collection.create_index(header[0], unique=True)
else:
collection.create_index(
[(header[0], ASCENDING), (header[5], ASCENDING)], unique=True
)
# Iterate through lines and insert records
for line in iter_lines:
line = line.split(",")
new_addition = {}
for num, field in enumerate(header):
new_addition[field] = line[num]
try:
collection.insert_one(new_addition)
success += 1
except DuplicateKeyError:
fail += 1
# This allows us to use the same insert function
# for both linear and parallel inserts.
return_dict = {
collection_name: {
"success": success,
"fail": fail,
"total_records": collection.count_documents({}),
"elapsed": time() - start,
}
}
# We get AttributeError for None.put() if in linear since we
# don't pass in a queue object.
try:
results.put(return_dict)
return 0
except AttributeError:
return return_dict
@timer_logger
def show_available_products():
""" Creates a list of currently available products
Returns:
{product_id: {"description", "product_type", "quantity_available"}}
"""
logger.info(f"Preparing dict of available prodcuts...")
available_products = {}
with MONGO as mdb:
products = mdb["product"]
for doc in products.find():
del doc["_id"]
if int(doc["quantity_available"]) > 0:
product_id = doc["product_id"]
del doc["product_id"]
available_products[product_id] = doc
return available_products
@timer_logger
def list_all_products():
""" Prepares a dictionary of all products
Returns:
{product_id: {"description", "product_type", "quantity_available"}}
"""
logger.info(f"Perparing dict of all products...")
all_products_dict = {}
with MONGO as mdb:
products = mdb["product"]
all_products = products.find({})
for product in all_products:
product_id = product["product_id"]
del product["_id"]
del product["product_id"]
all_products_dict[product_id] = product
return all_products_dict
@timer_logger
def list_all_rentals():
""" Prepares a dictionary of all rentals
Returns:
{user_id: {"address", "email", "name", "phone_number", "product_id"}}
"""
logger.info(f"Perparing dict of all rentals...")
all_rentals_dict = {}
with MONGO as mdb:
rentals = mdb["rental"]
all_rentals = rentals.find({})
for rental in all_rentals:
customer_id = rental["user_id"]
del rental["_id"]
del rental["user_id"]
all_rentals_dict[customer_id] = rental
return all_rentals_dict
@timer_logger
def list_all_customers():
""" Prepares a dictionary of all customers
Returns:
{user_id: {"credit_limit",
"email_address",
"home_address",
"last_name",
"name",
"phone_number",
"status"}}
"""
logger.info(f"Perparing dict of all customers...")
all_customers_dict = {}
with MONGO as mdb:
customers = mdb["customers"]
all_customers = customers.find({})
for customer in all_customers:
user_id = customer["user_id"]
del customer["_id"]
del customer["user_id"]
all_customers_dict[user_id] = customer
return all_customers_dict
@timer_logger
def rentals_for_customer(user_id):
"""Prepares a dict of products rented by user_id
Arguments:
user_id {string} -- user_id reference into product collection
Returns:
[{"description", "product_id", "product_type"}, {...}, ...]
"""
logger.info(f"Perparing customer dict for user_id: {user_id}...")
rentals_for_user = []
with MONGO as mdb:
rentals = mdb["rental"]
products = mdb["product"]
query = {"user_id": user_id}
# First we get a list of rentals for the specified user_id
for rental in rentals.find(query):
# Now we get product details from products via the product_id
query = {"product_id": rental["product_id"]}
for product in products.find(query):
del product["_id"]
del product["quantity_available"]
rentals_for_user.append(product)
return rentals_for_user
@timer_logger
def customers_renting_product(product_id):
"""Prepares a dict of customers renting product_id
Arguments:
product_id {string} -- product_id reference into rental collection
Returns:
[{"credit_limit",
"email_address",
"last_name",
"name",
"phone_number",
"status",
"user_id"}, {...}, ...]
"""
logger.info(f"Perparing rental dict for product_id: {product_id}...")
users_renting_product = []
with MONGO as mdb:
rentals = mdb["rental"]
customers = mdb["customers"]
query = {"product_id": product_id}
# First we get a list of customers for the specified product_id
for rental in rentals.find(query):
# Now we get customer details from customers via user_id
query = {"user_id": rental["user_id"]}
logger.info(rental["user_id"])
for customer in customers.find(query):
logger.info(customer)
del customer["_id"]
users_renting_product.append(customer)
return users_renting_product
@timer_logger
def get_line_from_file(filename):
""" Opens the file specified from the command line
Arguments:
filename {string} -- Name of CSV file to import
Returns:
list containing lines of customer data from csv file
"""
with open(filename, "rb") as content:
lines = content.read().decode("utf-8", errors="ignore").split("\n")
for line in lines:
yield line
@timer_logger
def drop_database():
""" Drops database """
logger.warning(f"Dropping {MONGO.database_name} database")
mdb = mdb_conn.MongoClient()
mdb.drop_database(MONGO.database_name)
@timer_logger
def drop_collections():
""" Drops collections from Mongo that are used for this program """
with MONGO as mdb:
logger.info(mdb.list_collection_names())
collections = list(
filter(lambda x: x != "system.indexes", mdb.list_collection_names())
)
for collection in collections:
logger.info(f"Dropping {collection}...")
mdb.drop_collection(collection)
logger.warning("Purge complete!")
| [
"[email protected]"
]
| |
e9497da1b7d035087b8eb5ad7e528fddf548b62a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/pose_estimation/DeepPose/mmpose/apis/train.py | 76ad0f82d5df6af09e71b3f5aea7b724e26599de | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,446 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import warnings
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook
from mmpose.core import DistEvalHook, EvalHook, build_optimizers
from mmpose.core.distributed_wrapper import DistributedDataParallelWrapper
from mmpose.datasets import build_dataloader, build_dataset
from mmpose.utils import get_root_logger
from apex import amp
from apex.optimizers import NpuFusedAdam
try:
from mmcv.runner import Fp16OptimizerHook
except ImportError:
warnings.warn('Fp16OptimizerHook from mmpose will be deprecated from '
'v0.15.0. Please install mmcv>=1.1.4')
from mmpose.core import Fp16OptimizerHook
from mmcv.runner.optimizer.builder import OPTIMIZERS
from apex.optimizers import NpuFusedAdam
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (Dataset): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
dataloader_setting = dict(
samples_per_gpu=cfg.data.get('samples_per_gpu', {}),
workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
# cfg.gpus will be ignored if distributed
num_gpus=meta['world_size'],
dist=distributed,
seed=cfg.seed)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('train_dataloader', {}))
data_loaders = [
build_dataloader(ds, **dataloader_setting) for ds in dataset
]
# determine wether use adversarial training precess or not
use_adverserial_train = cfg.get('use_adversarial_train', False)
OPTIMIZERS.register_module(None,True,NpuFusedAdam)
# build runner
optimizer = build_optimizers(model, cfg.optimizer)
#apex
model.backbone, optimizer = amp.initialize(model.backbone,
optimizer,
opt_level='O2',
loss_scale=128,
combine_grad=True)
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', True)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
if use_adverserial_train:
# Use DistributedDataParallelWrapper for adversarial training
model = DistributedDataParallelWrapper(
model,
device_ids=[meta['rank']],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDistributedDataParallel(
model,
device_ids=[meta['rank']],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
runner = EpochBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
if use_adverserial_train:
# The optimizer step process is included in the train_step function
# of the model, so the runner should NOT include optimizer hook.
optimizer_config = None
else:
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
eval_cfg = cfg.get('evaluation', {})
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
dataloader_setting = dict(
samples_per_gpu=1,
workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
drop_last=False,
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('val_dataloader', {}))
val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
| [
"[email protected]"
]
| |
bd8e4b98babedd08b66d1135954ac48de6ff66b9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startQiskit327.py | 8781c3f761d36325fd575c80bf67b3f550e6def0 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.y(input_qubit[3]) # number=8
prog.y(input_qubit[3]) # number=9
prog.cx(input_qubit[1],input_qubit[0]) # number=10
prog.cx(input_qubit[1],input_qubit[0]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit327.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
1284b69fe8bdaab6e0a57bb5e72c81d69541cd45 | 59642dc8f6d4059d2d36f4f64a92d8edf30c33c6 | /plone/app/blocks/layoutbehavior.py | 0911c4725ca5b68aec437581eabf16baac0d6cec | []
| no_license | lrowe/plone.app.blocks | a855691f2b41ef8ad4b70d8a03c1076bcb1031f8 | 7a4df3a0aff953fe872f85b904ff5f51826ff7b1 | refs/heads/master | 2021-01-24T02:39:12.443662 | 2011-09-22T16:18:50 | 2011-09-22T16:18:50 | 2,428,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | from zope.interface import implements, alsoProvides, Interface
from zope import schema
from plone.app.blocks.interfaces import IOmittedField
from plone.app.blocks.interfaces import ILayoutField
from plone.app.blocks.interfaces import _
class LayoutField(schema.Text):
"""A field used to store layout information
"""
implements(ILayoutField)
class ILayoutAware(Interface):
"""Behavior interface to make a type support layout.
"""
content = LayoutField(
title=_(u"Content"),
description=_(u"Content of the object"),
required=False,
)
pageSiteLayout = schema.Choice(
title=_(u"Page site layout"),
description=_(u"Site layout to apply to the current page"),
vocabulary="plone.availableSiteLayouts",
required=False,
)
sectionSiteLayout = schema.Choice(
title=_(u"Section site layout"),
description=_(u"Site layout to apply to pages under this section"),
vocabulary="plone.availableSiteLayouts",
required=False,
)
try:
from plone.autoform.interfaces import IFormFieldProvider
alsoProvides(ILayoutAware, IFormFieldProvider)
except ImportError:
pass
alsoProvides(ILayoutAware['content'], IOmittedField)
alsoProvides(ILayoutAware['pageSiteLayout'], IOmittedField)
alsoProvides(ILayoutAware['sectionSiteLayout'], IOmittedField)
| [
"[email protected]"
]
| |
87f332437c9bc34c9d50b234bcc08bcea6e2ca64 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /shared/db_opear/configs_data/equipment/equipment_strengthen_config.py | 90fb9e642feb720d0537cc1c61fd07df596fbc39 | []
| no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-7-7下午2:15.
"""
from shared.db_opear.configs_data.common_item import CommonItem
class EquipmentStrengthenConfig(object):
def __init__(self):
self._equipment_strengthen = {}
def parser(self, config_value):
for row in config_value:
self._equipment_strengthen[row.get('level')] = CommonItem(row)
return self._equipment_strengthen
| [
"[email protected]"
]
| |
b528cfce7e6dc4f36240451dbd03f49e575f7fc2 | 753a70bc416e8dced2853f278b08ef60cdb3c768 | /include/tensorflow/lite/testing/op_tests/unfused_gru.py | 786a429fe9aa2232dff76f93ef089a132bee077a | [
"MIT"
]
| permissive | finnickniu/tensorflow_object_detection_tflite | ef94158e5350613590641880cb3c1062f7dd0efb | a115d918f6894a69586174653172be0b5d1de952 | refs/heads/master | 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 | MIT | 2023-03-25T00:31:18 | 2019-12-30T09:58:41 | C++ | UTF-8 | Python | false | false | 2,400 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for unfused_gru."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
"""Build the graph for unfused_gru."""
inputs = [
tf.compat.v1.placeholder(
tf.float32, [parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.compat.v1.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for unfused_gru."""
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| [
"[email protected]"
]
| |
a2a61cea90544794a4610062d9a3c01c4da83557 | 9b1c5c3fb40ca4fbd2123a321296d6b7924a84ad | /core/models.py | e2db994d3c85f0abeee175ad5900bd6f46e758b7 | []
| no_license | thepsalmist/theStore | 4ab3874605d45014ebe72bbf2303bf453afd0f17 | 7586b1f425925ecf6b08f8ac6b11e4381604d616 | refs/heads/master | 2022-03-27T08:12:01.020333 | 2019-12-18T09:00:40 | 2019-12-18T09:00:40 | 226,167,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,033 | py | from django.db import models
from django.conf import settings
from django.shortcuts import reverse
from django.utils import timezone
from PIL import Image
# create label choices
LABEL_CHOICES = (("P", "primary"), ("S", "secondary"), ("D", "danger"))
class Category(models.Model):
# create tuple, first category goes to dB second displayed on screen
CATEGOTY_CHOICES = (
("EDUCATION", "Education"),
("BUSINESS", "Business"),
("DESIGN", "Design"),
("SECURITY", "Security"),
("GAMES", "Games"),
)
title = models.CharField(choices=CATEGOTY_CHOICES,
max_length=20, default="BUSINESS")
slug = models.SlugField(max_length=200, db_index=True)
class Meta:
ordering = ("title",)
verbose_name = "category"
verbose_name_plural = "categories"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("core:home", args=[self.slug])
class Brand(models.Model):
BRAND_CHOICES = (
("MICROSOFT", "Microsoft"),
("ADOBE", "Adobe"),
("AVAST", "Avast"),
("KASPERSKY", "Kaspersky"),
("NORTON", "Norton"),
)
title = models.CharField(choices=BRAND_CHOICES,
max_length=20, default="MICROSOFT")
slug = models.SlugField(max_length=200, db_index=True)
class Meta:
ordering = ("title",)
verbose_name = "brand"
verbose_name_plural = "brands"
def __str__(self):
return self.title
class Item(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
price = models.FloatField()
available = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True)
discount_price = models.FloatField(blank=True, null=True)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, blank=True, default=1
)
brand = models.ForeignKey(
Brand, on_delete=models.CASCADE, blank=True, default=1)
label = models.CharField(choices=LABEL_CHOICES, max_length=1, default="P")
image = models.ImageField(default="default.jpg",
upload_to="Items/%Y/%M/%d")
slug = models.SlugField(max_length=200, db_index=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("core:product", kwargs={"slug": self.slug, "id": self.id})
def get_add_to_cart_url(self):
return reverse("core:add_to_cart", kwargs={"slug": self.slug, "id": self.id})
def remove_from_cart_url(self):
return reverse(
"core:remove_from_cart", kwargs={"slug": self.slug, "id": self.id}
)
class OrderItem(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True
)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
ordered = models.BooleanField(default=False)
quantity = models.IntegerField(default=1)
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_item_price(self):
return self.quantity * self.item.price
def get_total_discount_item_price(self):
return self.quantity * self.item.discount_price
def get_final_price(self):
if self.item.discount_price:
return self.get_total_discount_item_price()
return self.get_total_item_price()
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
items = models.ManyToManyField(OrderItem)
startdate = models.DateTimeField(auto_now_add=True)
ordered_date = models.DateTimeField()
ordered = models.BooleanField(default=False)
def __str__(self):
return self.user.username
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_final_price()
return total
| [
"[email protected]"
]
| |
16530aaeb3ec40a88224b23b12eeb67446521cef | 49c2e3ebf7f5d2f79af6e26c44b4d07ec14a20d5 | /Hello World/venv/Lib/site-packages/pip/_internal/download.py | 68881b39d435c5938b8997336579a8eec65ffbbd | []
| no_license | TaylorHoll/Python_Projects | a0d86642463bdc5b3ea67dae0146c115185c1db2 | a8285b058ed0b4e0a366753d61526056dab23cd3 | refs/heads/master | 2020-06-13T09:04:29.666639 | 2020-01-07T03:40:25 | 2020-01-07T03:40:25 | 194,608,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,146 | py | from __future__ import absolute_import
import os
import re
import sys
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import pip
import platform
import shutil
from pip._internal.exceptions import HashMismatch, InstallationError
from pip._internal.locations import write_delete_marker_file
from pip._internal.models.index import PyPI
from pip._internal.utils.encoding import auto_decode
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.glibc import libc_ver
from pip._internal.utils.misc import (
ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, consume, display_path,
format_size, get_installed_version, rmtree, split_auth_from_netloc,
splitext, unpack_file,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import DownloadProgressProvider
from pip._internal.vcs import vcs
from pip._vendor import requests, six, urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.utils import get_netrc_auth
# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import
from pip._vendor.six.moves import xmlrpc_client # type: ignore
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._vendor.urllib3.util import IS_PYOPENSSL
if MYPY_CHECK_RUNNING:
from typing import (
Optional, Tuple, Dict, IO, Text, Union
)
from pip._internal.models.link import Link
from pip._internal.utils.hashes import Hashes
from pip._internal.vcs import AuthInfo
try:
import ssl # noqa
except ImportError:
ssl = None
HAS_TLS = (ssl is not None) or IS_PYOPENSSL
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
# These are environment variables present when running under various
# CI systems. For each variable, some CI systems that use the variable
# are indicated. The collection was chosen so that for each of a number
# of popular systems, at least one of the environment variables is used.
# This list is used to provide some indication of and lower bound for
# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
# For more background, see: https://github.com/pypa/pip/issues/5499
CI_ENVIRONMENT_VARIABLES = (
# Azure Pipelines
'BUILD_BUILDID',
# Jenkins
'BUILD_ID',
# AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
'CI',
)
def looks_like_ci():
# type: () -> bool
"""
Return whether it looks like pip is running under CI.
"""
# We don't use the method of checking for a tty (e.g. using isatty())
# because some CI systems mimic a tty (e.g. Travis CI). Thus that
# method doesn't provide definitive information in either direction.
return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
if HAS_TLS:
data["openssl_version"] = ssl.OPENSSL_VERSION
setuptools_version = get_installed_version("setuptools")
if setuptools_version is not None:
data["setuptools_version"] = setuptools_version
# Use None rather than False so as not to give the impression that
# pip knows it is not being run under CI. Rather, it is a null or
# inconclusive result. Also, we include some value rather than no
# value to make it easier to know that the check has been run.
data["ci"] = True if looks_like_ci() else None
user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
if user_data is not None:
data["user_data"] = user_data
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
# type: (bool) -> None
self.prompting = prompting
self.passwords = {} # type: Dict[str, AuthInfo]
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Split the credentials from the netloc.
netloc, url_user_password = split_auth_from_netloc(parsed.netloc)
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Use the credentials embedded in the url if we have none stored
if username is None:
username, password = url_user_password
# Get creds from netrc if we still don't have them
if username is None and password is None:
netrc_auth = get_netrc_auth(req.url)
username, password = netrc_auth if netrc_auth else (None, None)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
# warn user that they provided incorrect credentials
if resp.status_code == 401:
logger.warning('401 Error, Credentials not correct for %s',
resp.request.url)
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None # type: Optional[int]
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
# A 500 may indicate transient error in Amazon S3
# A 520 or 527 - may indicate transient error in CloudFlare
status_forcelist=[500, 503, 520, 527],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
# type: (str, Optional[str], Optional[PipSession]) -> Tuple[str, Text]
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
:param url: File path or url.
:param comes_from: Origin description of requirements.
:param session: Instance of pip.download.PipSession.
"""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
# type: (Union[str, Text]) -> bool
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
# type: (str) -> str
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
if not netloc or netloc == 'localhost':
# According to RFC 8089, same as empty authority.
netloc = ''
elif sys.platform == 'win32':
# If we have a UNC path, prepend UNC share notation.
netloc = '\\\\' + netloc
else:
raise ValueError(
'non-local file URIs are not supported on this platform: %r'
% url
)
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
# type: (Union[str, Text]) -> str
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
# type: (str) -> bool
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
# type: (Link) -> bool
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
# type: (Link) -> bool
return link.url.lower().startswith('file:')
def is_dir_url(link):
# type: (Link) -> bool
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(
resp, # type: Response
link, # type: Link
content_file, # type: IO
hashes, # type: Hashes
progress_bar # type: str
):
# type: (...) -> None
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
progress_indicator = DownloadProgressProvider(progress_bar,
max=total_length)
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
else:
logger.info("Downloading %s", url)
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(
link, # type: Link
location, # type: str
download_dir=None, # type: Optional[str]
session=None, # type: Optional[PipSession]
hashes=None, # type: Optional[Hashes]
progress_bar="on" # type: str
):
# type: (...) -> None
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
with TempDirectory(kind="unpack") as temp_dir:
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir.path,
hashes,
progress_bar)
# unpack the archive to the build dir location. even when only
# downloading archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
def unpack_file_url(
link, # type: Link
location, # type: str
download_dir=None, # type: Optional[str]
hashes=None # type: Optional[Hashes]
):
# type: (...) -> None
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(
link, # type: Optional[Link]
location, # type: Optional[str]
download_dir=None, # type: Optional[str]
only_download=False, # type: bool
session=None, # type: Optional[PipSession]
hashes=None, # type: Optional[Hashes]
progress_bar="on" # type: str
):
# type: (...) -> None
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes,
progress_bar=progress_bar
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(
link, # type: Link
session, # type: PipSession
temp_dir, # type: str
hashes, # type: Hashes
progress_bar # type: str
):
# type: (...) -> Tuple[str, str]
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes, progress_bar)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
# type: (Link, str, Hashes) -> Optional[str]
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| [
"[email protected]"
]
| |
4099f77a7a036f3c253bc37298b7830e20d7ebb7 | 1c4ec7ac50dd40056263bf039020df50524481ee | /tensorflow/python/kernel_tests/partitioned_variables_test.py | 0c0465619694996d9f05c96266f45d8ce8f55a9f | [
"Apache-2.0"
]
| permissive | Gruschwick/tensorflow | 51a039fb842d69cbade5927c7716f6e83b08f2a5 | d9a888df836864ea59132df2fad5bf466b02cd09 | refs/heads/master | 2020-04-08T07:46:29.048999 | 2019-01-07T22:03:15 | 2019-01-07T22:03:15 | 159,151,026 | 1 | 0 | Apache-2.0 | 2019-01-20T19:07:51 | 2018-11-26T10:24:39 | C++ | UTF-8 | Python | false | false | 26,420 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for partitioned_variables.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def testFixedSizePartitionerInt64(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(4, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable("v0", dtype=dtypes.int64, shape=[20])
v0_list = v0._get_variable_list()
self.assertEqual(len(v0_list), 4)
def testResourceFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope(
"root", partitioner=partitioner, use_resource=True):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def _testVariableAxisSizePartitioner(self,
name,
axis,
max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testVariableAxisSizePartitioner(self):
with self.cached_session():
# Create a partitioned variable of shape (4, 8, 16, 32) type float32
# Bytes per slice along the given axes:
# 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
# 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
# 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
# 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
self._testVariableAxisSizePartitioner(
"v0",
axis=0,
max_shard_bytes=131072,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
self._testVariableAxisSizePartitioner(
"v1",
axis=1,
max_shard_bytes=65536,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
self._testVariableAxisSizePartitioner(
"v2",
axis=2,
max_shard_bytes=32768,
expected_axis_shards=2,
expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
self._testVariableAxisSizePartitioner(
"v3a",
axis=3,
max_shard_bytes=2048,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
# Slice into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
self._testVariableAxisSizePartitioner(
"v3b",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
self._testVariableAxisSizePartitioner(
"v3c",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32),
max_shards=33)
# Specify max_shards so that it will affect sharding.
self._testVariableAxisSizePartitioner(
"v3d",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=2,
expected_partitions=(1, 1, 1, 2),
max_shards=2)
# Use the partitioner with strings
partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner( # pylint: disable=line-too-long
axis=3,
max_shard_bytes=32768,
bytes_per_string_element=8)
with variable_scope.variable_scope(
"root", partitioner=partitioner_axis3_str):
v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
# Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
# which is equal to 4096. Setting a max_shard_bytes of 32768
# and we should get a split of 4.
# Slice into 4 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 32 / 8 = 4
self.assertEqual(len(v3str_list), 4)
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testMinMaxVariablePartitioner(self):
with self.cached_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=2 << 10,
var_name="v0_0",
var_shape=[2048],
expected_axis_shards=4,
expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v0",
var_shape=[2048, 1024],
expected_axis_shards=32,
expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
self._testMinMaxVariablePartitioner(
max_partitions=16,
axis=0,
min_slice_size=256 << 10,
var_name="v1_max",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
self._testMinMaxVariablePartitioner(
max_partitions=1,
axis=0,
min_slice_size=256 << 10,
var_name="v2_max",
var_shape=[2048, 1024],
expected_axis_shards=1,
expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=128 << 10,
var_name="v3_slice",
var_shape=[2048, 1024],
expected_axis_shards=64,
expected_partitions=[64, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=512 << 10,
var_name="v4_slice",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=1,
min_slice_size=256 << 10,
var_name="v5_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 3, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=3,
min_slice_size=256 << 10,
var_name="v6_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v7_shape",
var_shape=[16, 128, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v8_shape",
var_shape=[4, 512, 1024],
expected_axis_shards=4,
expected_partitions=[4, 1, 1])
def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
return [[(10**i) * v for v in val] for i in range(shape[0])]
class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
for i in xrange(len(expected_specs)):
self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec)
def testVecConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def _testNameHelper(self, use_resource=False):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope("hi", use_resource=use_resource):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
self.assertEqual("hi/PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope(
"hola", use_resource=use_resource) as vs:
vs1 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
with variable_scope.variable_scope(
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
self.assertEqual("hola/PartitionedVariable", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with ops.name_scope("ola"):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
self.assertEqual("PartitionedVariable", var1_name)
self.assertEqual("PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
def testName(self):
self._testNameHelper(use_resource=False)
def testResourceName(self):
self._testNameHelper(use_resource=True)
def testRandomInitValue(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, [
"200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
"200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
"200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
"200 40 0,200:36,4"
])
def testRandomInitUnevenPartitions(self):
with self.cached_session():
rnd = variables.Variable(
random_ops.random_uniform([20, 43], dtype=dtypes.float64))
var_lists = [
partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
variables.global_variables_initializer().run()
rnd_val = self.evaluate(rnd)
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
[
"20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
"20 43 0,20:33,10"
],
# Five slices
[
"20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
"20 43 0,20:27,8", "20 43 0,20:35,8"
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1).eval()
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
"10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
"10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.cached_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
variables.global_variables_initializer().run()
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0).eval()
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
variables.global_variables_initializer().run()
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
variables.global_variables_initializer().run()
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
def testControlDepsNone(self):
with self.cached_session() as session:
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dependency.
d = constant_op.constant(2.0)
# Partitioned variables do not.
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_read
]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
def testConcat(self):
with self.cached_session() as session:
var_x = variable_scope.get_variable(
"x",
initializer=constant_op.constant([1., 2.]),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_concat
]
concat_control_inputs = [
ci for op in concat_ops for ci in op.control_inputs
]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
variables.global_variables_initializer().run()
self.assertAllClose(value.eval(), var_x.as_tensor().eval())
def testMetaGraphSaveLoad(self):
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
variables.global_variables_initializer().run()
save_graph.get_collection_ref("partvar").append(v0)
saver = saver_lib.Saver()
save_graph.finalize()
save_path = saver.save(sess=session, save_path=save_prefix)
previous_value = session.run(
save_graph.get_tensor_by_name(v0.name + ":0"))
restore_graph = ops.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as session:
saver = saver_lib.import_meta_graph(save_path + ".meta")
saver.restore(sess=session, save_path=save_path)
v0, = save_graph.get_collection_ref("partvar")
self.assertIsInstance(v0, variables.PartitionedVariable)
self.assertAllEqual(
previous_value,
session.run(restore_graph.get_tensor_by_name(v0.name + ":0")))
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
71a7c241fcfdd2288e94c907df7bf4fcf021f70a | 844564c24b1757110e00f6acff5658eea52d540a | /train_lgbm_model.py | 3b41c9c94b8c1e4a0db203573e717778bb377aab | []
| no_license | pekkipo/earth_quakes_challenge | df9dd375607ec72eea42985f284c46f221c9048b | be5c44e83102e6cf0b95ad7eb4002bc82748535e | refs/heads/master | 2020-07-26T14:44:24.145905 | 2019-09-16T00:59:14 | 2019-09-16T00:59:14 | 208,679,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,949 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 16:32:47 2019
@author: aleks
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import mean_absolute_error
import lightgbm as lgb
import os
import gc
from datetime import datetime
from imblearn.over_sampling import SMOTE
from sklearn.pipeline import Pipeline
import feature_engineering as fe
import get_models
import params
gc.enable()
NAME = 'lgbm'
def fit_lgb(X_fit, y_fit, X_val, y_val, counter, lgb_path):
model = get_models.get_lgbm_2() # Lol this will not work
model.fit(X_fit, y_fit,
eval_set=[(X_val, y_val)],
verbose=3500,
early_stopping_rounds=3500) # why early stopping?
cv_val = model.predict(X_val)
#Save LightGBM Model
save_to = '{}{}_fold{}.txt'.format(lgb_path, NAME, counter+1)
model.booster_.save_model(save_to)
return cv_val
def train_stage(df, y_df, df_ids, lgb_path):
lgb_cv_result = np.zeros(df.shape[0])
skf = StratifiedKFold(n_splits=params.num_folds, shuffle=True, random_state=42) # what is this doing?
skf.get_n_splits(df_ids, y_df) # and this? -> splits the data in train and test?
print('\nModel Fitting...')
for counter, ids in enumerate(skf.split(df_ids, y_df)):
print('\nFold {}'.format(counter+1))
X_fit, y_fit = df.values[ids[0]], y_df[ids[0]]
X_val, y_val = df.values[ids[1]], y_df[ids[1]]
lgb_cv_result[ids[1]] += fit_lgb(X_fit, y_fit, X_val, y_val, counter, lgb_path)
del X_fit, X_val, y_fit, y_val
gc.collect()
mae_lgb = round(mean_absolute_error(y_df, lgb_cv_result), 6)
print('\nLightGBM VAL MAE: {}'.format(mae_lgb))
return 0
def prediction_stage(df, lgb_path, submit=True):
lgb_models = sorted(os.listdir(lgb_path))
lgb_result = np.zeros(df.shape[0])
print('\nMake predictions...\n')
for m_name in lgb_models:
#Load LightGBM Model
model = lgb.Booster(model_file='{}{}'.format(lgb_path, m_name))
lgb_result += model.predict(df.values)
lgb_result /= len(lgb_models)
if submit:
submission = pd.read_csv(params.submission_file)
submission['time_to_failure'] = lgb_result
submission.to_csv(params.submission_out_file, index=False)
return 0
############ RUN
train_path = 'data/train.csv'
test_path = 'data/test.csv'
print('Load Train Data.')
df_train = pd.read_csv(train_path)
print('\nShape of Train Data: {}'.format(df_train.shape))
print('Load Test Data.')
df_test = pd.read_csv(test_path)
print('\nShape of Test Data: {}'.format(df_test.shape))
#Create dir for models
#os.mkdir(lgb_path)
print('Train Stage.\n')
train_stage(df_train, params.lgb_path)
print('Prediction Stage.\n')
prediction_stage(df_test, params.lgb_path, False)
print('\nDone.') | [
"[email protected]"
]
| |
30a5b8c7cc8bd95a82e561e28a5d7be11faff95e | 3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7 | /mounth001/day04/exercise03.py | 4d2f9b5972212719de0f5a3f68193db7e92b29ea | []
| no_license | Molly-l/66 | 4bfe2f93e726d3cc059222c93a2bb3460b21ad78 | fae24a968f590060522d30f1b278fcfcdab8b36f | refs/heads/master | 2020-09-28T12:50:18.590794 | 2019-11-27T04:42:28 | 2019-11-27T04:42:28 | 226,782,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py |
sum=0
for i in range(5):
num1 = int(input('输入第一个数'))
num2 = int(input('输入第二个数'))
sum=str(num1)+'+'+(num2)+'+?'
sum=num1+num2
sum +=20
print(sum) | [
"[email protected]"
]
| |
baa83d0b02e9df48bb9be99967b9f99428558a1f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_277/ch164_2020_06_20_22_09_38_618562.py | 3df906c7e0595b431ca66d73d3e74969660f4e0f | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | def traduz(lista, dicionario):
lista2 = []
for i in range(len(lista)):
ingles = lista[i]
for j in dicionario.keys():
traducao = dicionario[ingles]
lista2.append(traducao)
return lista2 | [
"[email protected]"
]
| |
a501db98c6eb3c2225a4f5809c45cf52064f7c14 | 625574700973e0d52c5435c0fa60007a0a8cc0a1 | /contest/HHKB/d.py | 6185adaed6dd05d0bb42d27752175732161d8922 | []
| no_license | konchanksu/AtCoder-practice | b1c63fb6f6da8a409617b23438edf5469773049d | 46a7d9b9b33d4fbbcffeb6bb90d4bfca8d5dfa2a | refs/heads/main | 2023-08-24T17:35:30.193120 | 2021-09-29T05:38:40 | 2021-09-29T05:38:40 | 311,109,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | MOD = 10 ** 9 + 7
for _ in range(int(input())):
N, A, B = map(int, input().split())
pt = (pow((N - A + 1), 2, MOD) * pow((N - B + 1), 2, MOD)) % MOD
for i in range((N + 1) // 2):
t = ((N - i * 2) - A) * 4
if t < 0:
break
elif t == 0:
t = 1
| [
"[email protected]"
]
| |
db75fe7c8c874f31cb4f81cf4169470d68c8a848 | efe3c9ad40200e6a4cc54ade2867e455687eb11b | /games/migrations/0008_auto_20180331_2134.py | 3d437be4fabb2fe3d4a517cd5225dc4bec5a3e20 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
]
| permissive | andrewhstead/stream-three-project | bec3b70b354b812d1a875ee4e305377038fe179b | 60e5f946455f12019a266b8231737435702ff95e | refs/heads/master | 2023-06-23T17:53:09.379297 | 2023-06-13T16:09:22 | 2023-06-13T16:09:22 | 126,410,294 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-31 20:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0007_auto_20180331_2132'),
]
operations = [
migrations.AlterField(
model_name='season',
name='champion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='champion', to='teams.Team'),
),
migrations.AlterField(
model_name='season',
name='finalist',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='finalist', to='teams.Team'),
),
]
| [
"[email protected]"
]
| |
c9e5897028bc1541f680eaddccfb613b06578b55 | 3bbcda4d74d9aa65e5c705352a4a60d9db0c6a42 | /third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/page.py | 3ee1c38490e2309578b3024c43a4a2656c0a4f1f | [
"Apache-2.0"
]
| permissive | mzachariasz/sap-deployment-automation | 82ecccb5a438eaee66f14b4448d4abb15313d989 | cb4710f07bb01248de4255a0dc5e48eda24e2d63 | refs/heads/master | 2023-06-25T15:09:53.505167 | 2021-07-23T18:47:21 | 2021-07-23T18:47:21 | 388,017,328 | 1 | 0 | Apache-2.0 | 2021-07-23T18:47:22 | 2021-07-21T06:29:55 | HCL | UTF-8 | Python | false | false | 20,803 | py | import inspect
import logging
import json
import re
from requests import Response
import http.client as http
from awxkit.utils import (
PseudoNamespace,
is_relative_endpoint,
are_same_endpoint,
super_dir_set,
suppress,
is_list_or_tuple,
to_str
)
from awxkit.api import utils
from awxkit.api.client import Connection
from awxkit.api.registry import URLRegistry
from awxkit.config import config
import awxkit.exceptions as exc
log = logging.getLogger(__name__)
_page_registry = URLRegistry()
get_registered_page = _page_registry.get
def is_license_invalid(response):
if re.match(r".*Invalid license.*", response.text):
return True
if re.match(r".*Missing 'eula_accepted' property.*", response.text):
return True
if re.match(r".*'eula_accepted' must be True.*", response.text):
return True
if re.match(r".*Invalid license data.*", response.text):
return True
def is_license_exceeded(response):
if re.match(
r".*license range of.*instances has been exceeded.*",
response.text):
return True
if re.match(
r".*License count of.*instances has been reached.*",
response.text):
return True
if re.match(
r".*License count of.*instances has been exceeded.*",
response.text):
return True
if re.match(r".*License has expired.*", response.text):
return True
if re.match(r".*License is missing.*", response.text):
return True
def is_duplicate_error(response):
if re.match(r".*already exists.*", response.text):
return True
def register_page(urls, page_cls):
if not _page_registry.default:
from awxkit.api.pages import Base
_page_registry.setdefault(Base)
if not is_list_or_tuple(urls):
urls = [urls]
# Register every methodless page with wildcard method
# until more granular page objects exist (options, head, etc.)
updated_urls = []
for url_method_pair in urls:
if isinstance(url_method_pair, str):
url = url_method_pair
method = '.*'
else:
url, method = url_method_pair
updated_urls.append((url, method))
page_cls.endpoint = updated_urls[0][0]
return _page_registry.register(updated_urls, page_cls)
def objectify_response_json(response):
"""return a PseudoNamespace() from requests.Response.json()."""
try:
json = response.json()
except ValueError:
json = dict()
# PseudoNamespace arg must be a dict, and json can be an array.
# TODO: Assess if list elements should be PseudoNamespace
if isinstance(json, dict):
return PseudoNamespace(json)
return json
class Page(object):
endpoint = ''
def __init__(self, connection=None, *a, **kw):
if 'endpoint' in kw:
self.endpoint = kw['endpoint']
self.connection = connection or Connection(
config.base_url, kw.get(
'verify', not config.assume_untrusted))
self.r = kw.get('r', None)
self.json = kw.get(
'json', objectify_response_json(
self.r) if self.r else {})
self.last_elapsed = kw.get('last_elapsed', None)
def __getattr__(self, name):
if 'json' in self.__dict__ and name in self.json:
value = self.json[name]
if not isinstance(
value,
TentativePage) and is_relative_endpoint(value):
value = TentativePage(value, self.connection)
elif isinstance(value, dict):
for key, item in value.items():
if not isinstance(
item, TentativePage) and is_relative_endpoint(item):
value[key] = TentativePage(item, self.connection)
return value
raise AttributeError(
"{!r} object has no attribute {!r}".format(
self.__class__.__name__, name))
def __setattr__(self, name, value):
if 'json' in self.__dict__ and name in self.json:
# Update field only. For new field use explicit patch
self.patch(**{name: value})
else:
self.__dict__[name] = value
def __str__(self):
if hasattr(self, 'json'):
return json.dumps(self.json, indent=4)
return str(super(Page, self).__repr__())
__repr__ = __str__
def __dir__(self):
attrs = super_dir_set(self.__class__)
if 'json' in self.__dict__ and hasattr(self.json, 'keys'):
attrs.update(self.json.keys())
return sorted(attrs)
def __getitem__(self, key):
return getattr(self, key)
def __iter__(self):
return iter(self.json)
@property
def __item_class__(self):
"""Returns the class representing a single 'Page' item"""
return self.__class__
@classmethod
def from_json(cls, raw, connection=None):
resp = Response()
data = json.dumps(raw)
resp._content = bytes(data, 'utf-8')
resp.encoding = 'utf-8'
resp.status_code = 200
return cls(r=resp, connection=connection)
def page_identity(self, response, request_json=None):
"""Takes a `requests.Response` and
returns a new __item_class__ instance if the request method is not a get, or returns
a __class__ instance if the request path is different than the caller's `endpoint`.
"""
request_path = response.request.path_url
if request_path == '/migrations_notran/':
raise exc.IsMigrating('You have been redirected to the migration-in-progress page.')
request_method = response.request.method.lower()
self.last_elapsed = response.elapsed
if isinstance(request_json, dict) and 'ds' in request_json:
ds = request_json.ds
else:
ds = None
try:
data = response.json()
except ValueError as e: # If there was no json to parse
data = dict()
if response.text or response.status_code not in (200, 202, 204):
text = response.text
if len(text) > 1024:
text = text[:1024] + '... <<< Truncated >>> ...'
log.debug(
"Unable to parse JSON response ({0.status_code}): {1} - '{2}'".format(response, e, text))
exc_str = "%s (%s) received" % (
http.responses[response.status_code], response.status_code)
exception = exception_from_status_code(response.status_code)
if exception:
raise exception(exc_str, data)
if response.status_code in (
http.OK,
http.CREATED,
http.ACCEPTED):
# Not all JSON responses include a URL. Grab it from the request
# object, if needed.
if 'url' in data:
endpoint = data['url']
else:
endpoint = request_path
data = objectify_response_json(response)
if request_method in ('get', 'patch', 'put'):
# Update existing resource and return it
if are_same_endpoint(self.endpoint, request_path):
self.json = data
self.r = response
return self
registered_type = get_registered_page(request_path, request_method)
return registered_type(
self.connection,
endpoint=endpoint,
json=data,
last_elapsed=response.elapsed,
r=response,
ds=ds)
elif response.status_code == http.FORBIDDEN:
if is_license_invalid(response):
raise exc.LicenseInvalid(exc_str, data)
elif is_license_exceeded(response):
raise exc.LicenseExceeded(exc_str, data)
else:
raise exc.Forbidden(exc_str, data)
elif response.status_code == http.BAD_REQUEST:
if is_license_invalid(response):
raise exc.LicenseInvalid(exc_str, data)
if is_duplicate_error(response):
raise exc.Duplicate(exc_str, data)
else:
raise exc.BadRequest(exc_str, data)
else:
raise exc.Unknown(exc_str, data)
def update_identity(self, obj):
"""Takes a `Page` and updates attributes to reflect its content"""
self.endpoint = obj.endpoint
self.json = obj.json
self.last_elapsed = obj.last_elapsed
self.r = obj.r
return self
def delete(self):
r = self.connection.delete(self.endpoint)
with suppress(exc.NoContent):
return self.page_identity(r)
def get(self, all_pages=False, **query_parameters):
r = self.connection.get(self.endpoint, query_parameters)
page = self.page_identity(r)
if all_pages and getattr(page, 'next', None):
paged_results = [r.json()['results']]
while page.next:
r = self.connection.get(self.next, query_parameters)
page = self.page_identity(r)
paged_results.append(r.json()['results'])
json = r.json()
json['results'] = []
for page in paged_results:
json['results'].extend(page)
page = self.__class__.from_json(json, connection=self.connection)
return page
def head(self):
r = self.connection.head(self.endpoint)
return self.page_identity(r)
def options(self):
r = self.connection.options(self.endpoint)
return self.page_identity(r)
def patch(self, **json):
r = self.connection.patch(self.endpoint, json)
return self.page_identity(r, request_json=json)
def post(self, json={}):
r = self.connection.post(self.endpoint, json)
return self.page_identity(r, request_json=json)
def put(self, json=None):
"""If a payload is supplied, PUT the payload. If not, submit our existing page JSON as our payload."""
json = self.json if json is None else json
r = self.connection.put(self.endpoint, json=json)
return self.page_identity(r, request_json=json)
def get_related(self, related_name, **kwargs):
assert related_name in self.json.get('related', [])
endpoint = self.json['related'][related_name]
return self.walk(endpoint, **kwargs)
def walk(self, endpoint, **kw):
page_cls = get_registered_page(endpoint)
return page_cls(self.connection, endpoint=endpoint).get(**kw)
def get_natural_key(self, cache=None):
if cache is None:
cache = PageCache()
if not getattr(self, 'NATURAL_KEY', None):
log.warning("This object does not have a natural key: %s", getattr(self, 'endpoint', ''))
return None
natural_key = {}
for key in self.NATURAL_KEY:
if key in self.related:
related_endpoint = cache.get_page(self.related[key])
if related_endpoint is not None:
natural_key[key] = related_endpoint.get_natural_key(cache=cache)
else:
natural_key[key] = None
elif key in self:
natural_key[key] = self[key]
natural_key['type'] = self['type']
return natural_key
_exception_map = {http.NO_CONTENT: exc.NoContent,
http.NOT_FOUND: exc.NotFound,
http.INTERNAL_SERVER_ERROR: exc.InternalServerError,
http.BAD_GATEWAY: exc.BadGateway,
http.METHOD_NOT_ALLOWED: exc.MethodNotAllowed,
http.UNAUTHORIZED: exc.Unauthorized,
http.PAYMENT_REQUIRED: exc.PaymentRequired,
http.CONFLICT: exc.Conflict}
def exception_from_status_code(status_code):
return _exception_map.get(status_code, None)
class PageList(object):
NATURAL_KEY = None
@property
def __item_class__(self):
"""Returns the class representing a single 'Page' item
With an inheritence of OrgListSubClass -> OrgList -> PageList -> Org -> Base -> Page, the following
will return the parent class of the current object (e.g. 'Org').
Obtaining a page type by registered endpoint is highly recommended over using this method.
"""
mro = inspect.getmro(self.__class__)
bl_index = mro.index(PageList)
return mro[bl_index + 1]
@property
def results(self):
items = []
for item in self.json['results']:
endpoint = item.get('url')
if endpoint is None:
registered_type = self.__item_class__
else:
registered_type = get_registered_page(endpoint)
items.append(
registered_type(
self.connection,
endpoint=endpoint,
json=item,
r=self.r))
return items
def go_to_next(self):
if self.next:
next_page = self.__class__(self.connection, endpoint=self.next)
return next_page.get()
def go_to_previous(self):
if self.previous:
prev_page = self.__class__(self.connection, endpoint=self.previous)
return prev_page.get()
def create(self, *a, **kw):
return self.__item_class__(self.connection).create(*a, **kw)
def get_natural_key(self, cache=None):
log.warning("This object does not have a natural key: %s", getattr(self, 'endpoint', ''))
return None
class TentativePage(str):
def __new__(cls, endpoint, connection):
return super(TentativePage, cls).__new__(cls, to_str(endpoint))
def __init__(self, endpoint, connection):
self.endpoint = to_str(endpoint)
self.connection = connection
def _create(self):
return get_registered_page(
self.endpoint)(
self.connection,
endpoint=self.endpoint)
def get(self, **params):
return self._create().get(**params)
def create_or_replace(self, **query_parameters):
"""Create an object, and if any other item shares the name, delete that one first.
Generally, requires 'name' of object.
Exceptions:
- Users are looked up by username
- Teams need to be looked up by name + organization
"""
page = None
# look up users by username not name
if 'users' in self:
assert query_parameters.get(
'username'), 'For this resource, you must call this method with a "username" to look up the object by'
page = self.get(username=query_parameters['username'])
else:
assert query_parameters.get(
'name'), 'For this resource, you must call this method with a "name" to look up the object by'
if query_parameters.get('organization'):
if isinstance(query_parameters.get('organization'), int):
page = self.get(
name=query_parameters['name'],
organization=query_parameters.get('organization'))
else:
page = self.get(
name=query_parameters['name'],
organization=query_parameters.get('organization').id)
else:
page = self.get(name=query_parameters['name'])
if page and page.results:
for item in page.results:
# We found a duplicate item, we will delete it
# Some things, like inventory scripts, allow multiple scripts
# by same name as long as they have different organization
item.delete()
# Now that we know that there is no duplicate, we create a new object
return self.create(**query_parameters)
def get_or_create(self, **query_parameters):
"""Get an object by this name or id if it exists, otherwise create it.
Exceptions:
- Users are looked up by username
- Teams need to be looked up by name + organization
"""
page = None
# look up users by username not name
if query_parameters.get('username') and 'users' in self:
page = self.get(username=query_parameters['username'])
if query_parameters.get('name'):
if query_parameters.get('organization'):
if isinstance(query_parameters.get('organization'), int):
page = self.get(
name=query_parameters['name'],
organization=query_parameters.get('organization'))
else:
page = self.get(
name=query_parameters['name'],
organization=query_parameters.get('organization').id)
else:
page = self.get(name=query_parameters['name'])
elif query_parameters.get('id'):
page = self.get(id=query_parameters['id'])
if page and page.results:
item = page.results.pop()
return item.url.get()
else:
# We did not find it given these params, we will create it instead
return self.create(**query_parameters)
def post(self, payload={}):
return self._create().post(payload)
def put(self):
return self._create().put()
def patch(self, **payload):
return self._create().patch(**payload)
def delete(self):
return self._create().delete()
def options(self):
return self._create().options()
def create(self, *a, **kw):
return self._create().create(*a, **kw)
def payload(self, *a, **kw):
return self._create().payload(*a, **kw)
def create_payload(self, *a, **kw):
return self._create().create_payload(*a, **kw)
def __str__(self):
if hasattr(self, 'endpoint'):
return self.endpoint
return super(TentativePage, self).__str__()
__repr__ = __str__
def __eq__(self, other):
return self.endpoint == other
def __ne__(self, other):
return self.endpoint != other
class PageCache(object):
def __init__(self):
self.options = {}
self.pages_by_url = {}
self.pages_by_natural_key = {}
def get_options(self, page):
url = page.endpoint if isinstance(page, Page) else str(page)
if url in self.options:
return self.options[url]
try:
options = page.options()
except exc.Common:
log.error("This endpoint raised an error: %s", url)
return self.options.setdefault(url, None)
warning = options.r.headers.get('Warning', '')
if '299' in warning and 'deprecated' in warning:
log.warning("This endpoint is deprecated: %s", url)
return self.options.setdefault(url, None)
return self.options.setdefault(url, options)
def set_page(self, page):
log.debug("set_page: %s %s", type(page), page.endpoint)
self.pages_by_url[page.endpoint] = page
if getattr(page, 'NATURAL_KEY', None):
log.debug("set_page has natural key fields.")
natural_key = page.get_natural_key(cache=self)
if natural_key is not None:
log.debug("set_page natural_key: %s", repr(natural_key))
self.pages_by_natural_key[utils.freeze(natural_key)] = page.endpoint
if 'results' in page:
for p in page.results:
self.set_page(p)
return page
def get_page(self, page):
url = page.endpoint if isinstance(page, Page) else str(page)
if url in self.pages_by_url:
return self.pages_by_url[url]
try:
page = page.get(all_pages=True)
except exc.Common:
log.error("This endpoint raised an error: %s", url)
return self.pages_by_url.setdefault(url, None)
warning = page.r.headers.get('Warning', '')
if '299' in warning and 'deprecated' in warning:
log.warning("This endpoint is deprecated: %s", url)
return self.pages_by_url.setdefault(url, None)
log.debug("get_page: %s", page.endpoint)
return self.set_page(page)
def get_by_natural_key(self, natural_key):
endpoint = self.pages_by_natural_key.get(utils.freeze(natural_key))
log.debug("get_by_natural_key: %s, endpoint: %s", repr(natural_key), endpoint)
if endpoint:
return self.get_page(endpoint)
| [
"[email protected]"
]
| |
3012e511cff29465645c8f3cb56a997f403179db | ebde97e71f06f46075b585c92e984d7eb3f700c7 | /lib/ocr/clstm_cni_v3.py | cbd632d57adc681d5d443d64e7f251c80b36c08d | []
| no_license | manhcuogntin4/New_cni_project | 3ccc36d0d536c7e7a9082025b8977419feb3d5c5 | 90441e10df8ea5ed7e56fb2b6083721da19ae839 | refs/heads/master | 2021-04-27T17:07:45.493046 | 2018-03-01T14:51:54 | 2018-03-01T14:51:54 | 122,315,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,317 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
import pyclstm
from PIL import Image
import sys, getopt
import os
import difflib
import sys
#Process parallel
from multiprocessing import Pool
import multiprocessing
import subprocess
from multiprocessing import Manager
from functools import partial
import multiprocessing.pool
import glob
from pandas import DataFrame
import subprocess
import argparse
#IMAGES_FOLDER="./"
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--folder", required=True,
help="images folder")
ap.add_argument("-c", "--class", required=True,
help="model path")
ap.add_argument("-o", "--output", required=True,
help="excel output")
args = vars(ap.parse_args())
IMAGES_FOLDER=args["folder"]
CLASS=args["class"]
excel_outfile=args["output"]
def readFileImages(strFolderName):
print strFolderName
image_list = []
st=strFolderName+"*.png"
for filename in glob.glob(st): #assuming gif
image_list.append(filename)
return image_list
ls_images=readFileImages(IMAGES_FOLDER)
reload(sys)
sys.setdefaultencoding('utf-8')
#CACHE_FOLDER = '/tmp/caffe_demos_uploads/cache'
CACHE_FOLDER = './cache'
this_dir = os.path.dirname(__file__)
def get_similar(str_verify, isLieu=False, score=0.6):
words = []
if not os.path.exists(CACHE_FOLDER):
os.makedirs(CACHE_FOLDER)
if(isLieu):
lieu_path=os.path.join(this_dir, 'lieu.txt')
else:
lieu_path=os.path.join(this_dir, 'nom.txt')
f=open(lieu_path,'r')
for line in f:
words.append(line)
f.close()
print str_verify
index = 0
if(str_verify.find(u':') != -1 and str_verify.index(u':') < 5):
index = str_verify.index(u':')+1
str_verify=str_verify[index:]
print str_verify
simi=difflib.get_close_matches(str_verify, words,5,score)
#print(simi)
return simi
def convert_to_binary(img):
cv2.setNumThreads(0)
if (img.shape >= 3):
img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, imgBinary = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
height = np.size(img, 0)
width = np.size(img, 1)
height=60
r,c=img.shape[:2]
res = cv2.resize(imgBinary,((int)(height*c)/r, height), interpolation = cv2.INTER_CUBIC)
res = cv2.fastNlMeansDenoising(res,20, 7, 21)
out_path = os.path.join(CACHE_FOLDER, str(os.getpid())+ "out.png")
cv2.imwrite(out_path,res)
return out_path, res
def extract_text(img_path, model_path):
ocr = pyclstm.ClstmOcr()
ocr.load(model_path)
imgFile = Image.open(img_path)
text = ocr.recognize(imgFile)
text.encode('utf-8')
chars = ocr.recognize_chars(imgFile)
prob = 1
index = 0
x=0
#print text
if(text.find(u':') != -1 and text.index(u':') < 5):
index = text.index(u':')+1
if(text.find(u' ') != -1 and (text.index(u' ') <= 3)):
if(len(text)>text.index(u' ')+1):
index = text.index(u' ')+1
for ind, j in enumerate(chars):
#print j
if ind >= index:
prob *= j.confidence
if j.char==u':':
if j.x_position >x:
x=j.x_position
#return text[index:], prob, index
return text, prob, index, x
def crop_image(img,cropX=0, cropY=0, cropWidth=0, cropHeight=0):
h = np.size(img, 0)
w = np.size(img, 1)
if(h-cropHeight>cropY) and (w-cropWidth>cropX):
res=img[cropY:h-cropHeight, cropX:w-cropWidth]
else:
res=img[cropY:h,cropX:w]
#print str(os.getpid())
out_path = os.path.join(CACHE_FOLDER, str(os.getpid())+"croped.png")
#out_path = os.path.join(CACHE_FOLDER, "croped.png")
cv2.imwrite(out_path,res)
return out_path
def clstm_ocr(img, cls):
if not os.path.exists(CACHE_FOLDER):
os.makedirs(CACHE_FOLDER)
model_path = os.path.join(this_dir, 'model_nomprenom_cni.clstm')
if cls=="lieu":
model_path = os.path.join(this_dir, 'model_lieu_cni.clstm')
if cls=="datenaissance":
model_path = os.path.join(this_dir, 'model_date_cni.clstm')
if cls=="mrz1" or cls=="mrz2":
model_path = os.path.join(this_dir, 'model_mrz_cni_new.clstm')
islieu=False
if cls=="lieu":
islieu=True
print model_path
converted_image_path, image = convert_to_binary(img)
#maxPro = 0
#ocr_result = ""
ocr_result, maxPro, index,x=extract_text(converted_image_path, model_path)
if(index>0):
image=image[10:,:]
cropX=1
#cropY=8
cropY=1
cropWidth=1
#cropHeight=10
cropHeight=1
if(islieu):
cropHeight=3
cropY=3
cropX=3
cropWidth=3
for i in range (0,cropX,1):
for j in range (0,cropY):
for k in range (0,cropWidth):
for h in range (0, cropHeight):
img_path = crop_image(image, i, j, k, h)
text, prob, index,x = extract_text(img_path, model_path)
#print text, prob
if(prob > maxPro) and (len(text)>=2):
maxPro = prob
ocr_result = text
if (maxPro > 0.95) and (len(text) >= 2):
break
#print maxPro, ocr_result
if(islieu and maxPro<1):
if(maxPro<0.9):
ocr=get_similar(ocr_result,islieu,0.5)
if(len(ocr)>0):
ocr_result=ocr[0].encode('utf-8')+" : "+ ocr_result
else:
ocr=get_similar(ocr_result,islieu,0.6)
if(len(ocr)>0):
ocr_result=ocr[0].encode('utf-8') +" : "+ ocr_result
return (ocr_result, maxPro)
def clstm_ocr_parallel(img, cls):
try:
os.mkdir(CACHE_FOLDER)
except OSError as e:
if e.errno == 17: # errno.EEXIS
os.chmod(CACHE_FOLDER, 0755)
model_path = os.path.join(this_dir, 'model_nomprenom_cni.clstm')
if cls=="lieu":
model_path = os.path.join(this_dir, 'model_lieu_cni.clstm')
if cls=="datenaissance":
model_path = os.path.join(this_dir, 'model_date_cni.clstm')
if cls=="mrz1" or cls=="mrz2":
model_path = os.path.join(this_dir, 'model_mrz_cni_new.clstm')
print model_path
if(img.size>0):
print "size >0"
converted_image_path, image = convert_to_binary(img)
ocr_result, maxPro, index,x=extract_text(converted_image_path, model_path)
print "first extract finished"
else:
return ("",0)
#maxPro = 0
#ocr_result = ""
if(index>0):
image=image[10:,:]
cropX=1
cropY=8
cropWidth=1
cropHeight=10
islieu=False
if (cls=="lieu"):
islieu=True
if(islieu):
cropHeight=3
cropY=3
cropX=3
cropWidth=3
q={}
p={}
txt={}
prob={}
for j in range (0,cropY):
q[j] = multiprocessing.Queue()
p[j] = multiprocessing.Process(target=calib_clstm_height_low_queue, args=(cropX,j,cropWidth,cropHeight,image,model_path,q[j]))
p[j].start()
for j in range (0,cropY):
#p[j].join()
txt[j],prob[j]=q[j].get()
for j in range (0,cropY):
p[j].join()
#txt[j],prob[j]=q[j].get()
for j in range (0,cropY):
if(prob[j]>maxPro):
maxPro=prob[j]
ocr_result=txt[j]
#print "Here:", os.getpid()
#print maxPro, ocr_result
if(islieu and maxPro<1):
if(maxPro<0.9):
ocr=get_similar(ocr_result,islieu,0.5)
if(len(ocr)>0):
ocr_result=ocr[0].encode('utf-8')+" : "+ ocr_result
else:
ocr=get_similar(ocr_result,islieu,0.6)
if(len(ocr)>0):
ocr_result=ocr[0].encode('utf-8') +" : "+ ocr_result
return (ocr_result, maxPro)
def clstm_ocr_calib(img, cls):
if not os.path.exists(CACHE_FOLDER):
os.makedirs(CACHE_FOLDER)
model_path = os.path.join(this_dir, 'model_nomprenom_cni.clstm')
if cls=="lieu":
model_path = os.path.join(this_dir, 'model_lieu_cni.clstm')
if cls=="datenaissance":
model_path = os.path.join(this_dir, 'model_date_cni.clstm')
if cls=="mrz1" or cls=="mrz2":
model_path = os.path.join(this_dir, 'model_mrz_cni_new.clstm')
ocr_result, maxPro="",0
islieu=False
if cls=="lieu":
islieu=True
if(img.size>0):
converted_image_path, image = convert_to_binary(img)
#maxPro = 0
#ocr_result = ""
ocr_result, maxPro, index,x=extract_text(converted_image_path, model_path)
if(islieu):
if(maxPro<0.9):
ocr=get_similar(ocr_result,islieu,0.5)
if(len(ocr)>0):
ocr_result=ocr[0].encode('utf-8')+" : "+ ocr_result
else:
ocr=get_similar(ocr_result,islieu,0.6)
if(len(ocr)>0):
ocr_result=ocr[0].encode('utf-8')+" : "+ ocr_result
else:
return ("",0)
return (ocr_result, maxPro)
def calib_clstm_height_low(cropX, y, cropWidth, cropHeight, image, model_path):
maxPro=0
ocr_result=""
xmin, ymin, xmax, ymax,x_crop=0,0,0,0,0
for i in range (0,cropX):
for k in range (0,cropWidth):
for h in range (0, cropHeight):
img_path = crop_image(image, i, y, k, h)
text, prob, index,x = extract_text(img_path, model_path)
os.remove(img_path)
if(prob > maxPro) and (len(text)>=2):
maxPro = prob
ocr_result = text
xmin,ymin,xmax,ymax, x_crop=i,y,k,h,x
if (maxPro > 0.95) and (len(text) >= 2):
break
print xmin, ymin, xmax, ymax
if maxPro<0.95:
xmin=xmin+x_crop+10
print "xmin:", xmin
model_path = os.path.join(this_dir, 'model-lieu2911-binary-bak.clstm')
img_path = crop_image(image, xmin, ymin, xmax, ymax)
text, prob, index,x = extract_text(img_path, model_path)
print ocr_result, maxPro, ":",text, prob
os.remove(img_path)
if(prob > maxPro) and (len(text)>=2):
print "in old version"
maxPro = prob
ocr_result = text
return ocr_result, maxPro
def calib_clstm_height_low_queue(cropX, y, cropWidth, cropHeight, image, model_path, q):
q.put(calib_clstm_height_low(cropX, y, cropWidth, cropHeight, image, model_path))
ls_txt, ls_prob=[],[]
for filename in ls_images:
img=cv2.imread(filename)
txt, prob=clstm_ocr_parallel(img, CLASS)
ls_txt.append(txt)
ls_prob.append(prob)
df=DataFrame({'File ID':ls_images, CLASS: ls_txt, 'Prob': ls_prob})
df.to_excel(excel_outfile, sheet_name='Match_ID_File', index=False)
| [
"[email protected]"
]
| |
0b5aa09befc5d6623bbda3bceae433a27773e85b | 154f24699c63858703b4538afc0213594f08b0b6 | /scripts/make_csv.py | 25e613e9b21cdb2dd288b978acb42fd68ac8a778 | []
| no_license | inpho/loc-ucsd | 225f936d2ba88df057ea5f82f60053141686a80e | a9c7e7fded841bd0376f944b102f151260ae5e44 | refs/heads/master | 2020-05-30T03:53:19.006317 | 2014-03-03T23:45:55 | 2014-03-03T23:45:55 | 8,609,923 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | import csv
import json
htrc1314 = []
htrc86 = []
htrc6 = []
volumes6=["uc2.ark+=13960=t5w66bs1h",
"uc2.ark+=13960=t6057f659",
"uc2.ark+=13960=t74t6gs0m",
"uc2.ark+=13960=t0ht2h954",
"uc2.ark+=13960=t05x26n0d",
"uc2.ark+=13960=t5p84550z"]
with open('htrc86.json') as jsonfile:
data = json.load(jsonfile)
volumes86 = data.keys()
with open('../data/csv/htrc_lcco.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['id'] in volumes6:
htrc6.append({'id' : row['id'],
'lccn' : row['id2'],
'full_call' : row['full_call'],
'collection' : 'htrc6'})
elif row['id'] in volumes86:
htrc86.append({'id' : row['id'],
'lccn' : row['id2'],
'full_call' : row['full_call'],
'collection' : 'htrc86'})
else:
htrc1314.append({'id' : row['id'],
'lccn' : row['id2'],
'full_call' : row['full_call'],
'collection': 'htrc1314'})
print len(htrc1314), len(htrc86), len(htrc6)
with open('../data/htrc/all.csv', 'w') as newfile:
writer = csv.DictWriter(newfile, ['collection', 'id', 'lccn'], extrasaction='ignore')
for row in htrc6:
writer.writerow(row)
for row in htrc86:
writer.writerow(row)
| [
"[email protected]"
]
| |
7810e6d0ddb5c42ab6ac22acf76b020cae5fbfe5 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /hackerrank/stacks_and_queues/minMaxRiddle.py | 1213aa229e6ad165d187dd216a80db0c7da76999 | []
| no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the riddle function below.
def riddle(arr):
size = 1
mxwindows = []
# size variation from 1 to s
while size <= len(arr):
windows = []
# looping the given array for a specfic size windows
for i in range(len(arr) - size + 1):
s = 0
window = []
# make a list - size of s
while s < size:
window.append(arr[i + s])
s += 1
# attach a min size to windows list
windows.append(min(window))
size += 1
# get max from all mins of windows
mxwindows.append(max(windows))
return mxwindows
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [
"[email protected]"
]
| |
94e38d57514d665ad47c7511ea77607377e6c50f | 9962f123178e8ecd13ab806957c759092f8e4dea | /runner | 4ca1228feec61ade89bed3167b63eae599d86ff9 | []
| no_license | mrcleanspleen/Otto | b1a40af241459a00d5b9c43fc3d94401940cd835 | ca5662bd57a672f76eb52b9138892ec6658a9aa2 | refs/heads/master | 2021-01-01T17:40:48.157654 | 2017-08-06T22:52:54 | 2017-08-06T22:52:54 | 98,131,827 | 0 | 0 | null | 2017-07-23T23:34:27 | 2017-07-23T23:34:27 | null | UTF-8 | Python | false | false | 308 | #!/usr/bin/env python3
import os, sys
if len(sys.argv) > 1:
message = ' '.join(sys.argv[1:])
else:
message = input('message > ')
os.system("cd ~/go/src/Otto;go run main.go \"{}|~|{}|~|{}|~|{}\"".format(message,"someone","iMessage;+;chat508017006480766416","/Users/Peter/go/src/Otto/settings.json"))
| [
"[email protected]"
]
| ||
1686c97446c59e59d5ac28c92c003dd5acb9e0d9 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2692487_0/Python/jessethegame/jam.py | 1d7b96443962d2c3ac3bc0d727483d6e97400d75 | []
| no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import sys
def run(func):
with open(sys.argv[1]) as handle:
count = int(handle.readline())
for x in range(count):
yield func(handle)
def output(data):
return '\n'.join('Case #{}: {}'.format(*pair) for pair in enumerate(data, start=1))
def test(func):
print output(run(func))
def func(handle):
A, N = map(int, handle.readline().split(' '))
S = map(int, handle.readline().split(' '))
ss = sorted(S)
ops = 0
eat = 0
stk = 0
last = len(ss)
while ss:
if A == 1:
return last
if A > ss[0]:
A += ss.pop(0)
eat += 1
stk = max(stk - 1, 0)
if not stk:
last = ops + len(ss)
else:
ops += 1
stk += 1
A += A - 1
return min(last, ops)
test(func)
| [
"[email protected]"
]
| |
f3fd5b33b3f748d30f2c5d9c8263f923560685a8 | 728e9c50fa2c1d71f1c3b40185646ef9fcafa632 | /Tests/Crawl.py | 26775c3c89265e86b899d60d2fe24ab1de8ee8c2 | []
| no_license | ChethanaSR/Python_SDK | 7d9d8bc16ae092f695cf79c43d8d5b12e37ba193 | 524b17a70c91c58a591fe9a70f81e3f8b513546e | refs/heads/master | 2020-07-28T00:46:35.691339 | 2019-09-18T08:39:36 | 2019-09-18T08:39:36 | 209,258,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | import os
import openpyxl
import time
import dicttoxml
from datetime import datetime
from xmldiff import main
import pandas
import csv
import xml.etree.ElementTree as et
from dateutil.parser import parse
def xml_csv_comp_filter_for_timepoint():
#csv_file = "C:\\Users\\rchethana\\Documents\\SDK_Automation\\XML-TestData\\Keyword_SeriesID\\386599657,210461702,117868108,384678537,118117308.csv"
xml_file = ":\\Users\\rchethana\\Documents\\SDK_Automation\\XML-ActualData\\Keyword_SeriesID\\2019-05-28,12-23-09 PM\\2019-05-28,12-23-09 PM.xml"
series_id = []
tree = et.parse( xml_file )
root = tree.getroot()
series = root.findall( ".//entity_id" )
for id in range (len(series)):
series_id.append(series[id].text)
print series_id
xml_csv_comp_filter_for_timepoint()
# verifyoption = "No. of Obs"
# series = root.findall( ".//entity_id" )
#
#
#
#
# series_date =[]
# series_value = []
# series_id_cvs = []
# I = 1
# for id in range (len(series)):
# filterdate = []
# filetrvalue = []
# date = root.findall( ".//item[" +str(I)+ "]/time_points/item/date" )
# value = root.findall( ".//item["+str(I)+"]/time_points/item/value" )
# year_in_date = datetime.strptime( date[id].text, '%Y-%m-%d' ).year
# series_id.append( series[id].text )
# with open(csv_file ) as csvfile:
# readCSV = csv.reader( csvfile, delimiter=',' )
# for row in readCSV:
# if row[0] == "Series ID":
# for num in range( 1, len( series ) + 1 ): series_id_cvs.append( row[num] )
#
# if row[0]== str(year_in_date) and series_id_cvs[id]== series[id].text:
# print value[id].text | [
"[email protected]"
]
| |
c45bf780b6a96b8e03e6aa3578470e50da49f78a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03062/s756793407.py | 4429176fdd3b72784590ffe51d12a6c3b81dbeca | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | N = int(input())
A = list(map(int,input().split()))
minus_cnt = 0
abs_sum = 0
min_abs_val = 1000000000
for i in range(N):
val = A[i]
abs_val = abs(val)
abs_sum += abs_val
if abs_val < min_abs_val:
min_abs_val = abs_val
if val < 0:
minus_cnt += 1
B_max_sum = abs_sum
if minus_cnt % 2 != 0:
B_max_sum -= (min_abs_val * 2)
print(B_max_sum)
| [
"[email protected]"
]
| |
c7469e01212ca09d05235f188a78d379f4e444ea | 14a77f28e43f078a0737b2dbaf682210739ba6c0 | /decisions/factories/event.py | 85bb70e33993b14609466b85a83255812030ddd6 | []
| no_license | tuomas777/paatos | f58e15ee6c5c7bd87d6e015f6bf9eae020aba77b | 79fe20768f6dd6c7ed8bae22eaf20961bbba8fa7 | refs/heads/master | 2020-04-05T22:43:21.254187 | 2016-11-22T07:40:58 | 2016-11-22T07:40:58 | 68,003,356 | 0 | 0 | null | 2016-11-24T14:01:26 | 2016-09-12T11:30:31 | Python | UTF-8 | Python | false | false | 315 | py | import factory
from faker import Faker
from decisions.models import Event
fake = Faker()
fake.seed(7)
class EventFactory(factory.django.DjangoModelFactory):
class Meta:
model = Event
name = fake.text(max_nb_chars=20)
description = fake.paragraph(nb_sentences=5)
start_date = fake.date()
| [
"[email protected]"
]
| |
f7c1891ca323b5da4d72a7b2ea564c4f7f9cb834 | 6a44e772dfdec969f5e2af430f0bf3a35eb73c4e | /src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/usage_name.py | a1f3796d70a9efdf4d82846aaa69862e31effa9d | [
"MIT"
]
| permissive | lurumad/autorest | ecc4b1de223e4b4cdd226a3cf922a6940dbddd34 | fef0c4c9e7fdb5c851bdb095d5a2ff93572d452e | refs/heads/master | 2021-01-12T11:07:39.298341 | 2016-11-04T03:12:08 | 2016-11-04T03:12:08 | 72,835,570 | 1 | 0 | null | 2016-11-04T09:58:50 | 2016-11-04T09:58:50 | null | UTF-8 | Python | false | false | 1,088 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageName(Model):
"""The Usage Names.
:param value: Gets a string describing the resource name.
:type value: str
:param localized_value: Gets a localized string describing the resource
name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self, value=None, localized_value=None):
self.value = value
self.localized_value = localized_value
| [
"[email protected]"
]
| |
a9055249ac9757566ae617d69563511b808fd3eb | 8e2404c7bcfd28329bed789839192b2c4e85ea1b | /Hackerrank/QueuesUsingTwoStacks.py | 9b00afd7b1e775d9d4a9da9534e24dd79876baf2 | []
| no_license | Pabitra-26/Problem-Solved | 408bd51bbffc69f8c5e1def92797c2e6f027f91d | c27de1dd6c4ad14444fa5ee911a16186c200a7f9 | refs/heads/master | 2023-07-30T16:51:28.062349 | 2021-09-27T06:06:54 | 2021-09-27T06:06:54 | 269,935,039 | 2 | 0 | null | 2021-09-27T06:06:55 | 2020-06-06T09:39:33 | Python | UTF-8 | Python | false | false | 1,000 | py | # Problem name: Queues using two stacks
# Description: A queue is an abstract data type that maintains the order in which elements were added to it, allowing the oldest elements to be removed from the front and new elements to be added to the rear. This is called a First-In-First-Out (FIFO) data structure because the first element added to the queue
# (i.e., the one that has been waiting the longest) is always the first one to be removed.
# Strategy: we can use a single list instead
class Queue(object):
def __init__(self):
self.S1=[]
def enQueue(self,element):
self.S1.append(element)
def deQueue(self):
self.S1.pop(0)
def print_q(self):
print(self.S1[0])
if __name__=="__main__":
n=int(input())
que=Queue()
for i in range(n):
l=list(map(int,input().split()))
if(l[0]==1):
que.enQueue(l[1])
elif(l[0]==2):
que.deQueue()
else:
que.print_q()
| [
"[email protected]"
]
| |
1a1e4043f7335ec73f9c0f3e8eae6e2840b83d9e | 9af2ae16b962c5dbed8497df9513f4f39dc4a3c4 | /muke_class/chp2/a5_keras_dnn.py | 6b2757395bffe590dce2acc95d0470020230563b | []
| no_license | Freshield/LEARN_Tensorflow2 | 4170d2158c2e73485fcc7828b09ea96834efe838 | e2e139525aeac504949929330ef4b58cf91816c6 | refs/heads/master | 2021-07-04T05:38:47.709291 | 2020-11-02T09:18:19 | 2020-11-02T09:18:19 | 194,585,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,844 | py | #coding=utf-8
"""
@Author: Freshield
@License: (C) Copyright 2018, BEIJING LINKING MEDICAL TECHNOLOGY CO., LTD.
@Contact: [email protected]
@File: a5_keras_dnn.py
@Time: 2019-04-07 15:17
@Last_update: 2019-04-07 15:17
@Desc: None
@==============================================@
@ _____ _ _ _ _ @
@ | __|___ ___ ___| |_|_|___| |_| | @
@ | __| _| -_|_ -| | | -_| | . | @
@ |__| |_| |___|___|_|_|_|___|_|___| @
@ Freshield @
@==============================================@
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from tensorflow.python import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_valid_scaled = scaler.transform(
x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_test_scaled = scaler.transform(
x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=x_train_scaled.shape[1:]))
for i in range(20):
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
print(model.summary())
logdir = 'data/dnn-callbacks'
if not os.path.exists(logdir):
os.mkdir(logdir)
output_model_file = os.path.join(logdir, 'fashion_minst_model.h5')
callbacks = [
keras.callbacks.TensorBoard(logdir),
keras.callbacks.ModelCheckpoint(output_model_file,
save_best_only=True),
keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3)
]
history = model.fit(x_train_scaled, y_train, epochs=10,
validation_data=(x_valid_scaled, y_valid),
callbacks=callbacks)
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 3)
plt.show()
plot_learning_curves(history)
model.evaluate(x_test_scaled, y_test)
| [
"[email protected]"
]
| |
32507525464d06c8ae7fcde041d434d4c05239d5 | 2154d0221e29a86850a1b83e4302f6e3e3f7fa5d | /mock_example/MySQLdb_fun.py | b9a062f6ef8f96908705df3b76dba64e7143ce26 | []
| no_license | aaqqxx/simple_for_life | 3b8805c6791da6a3a7f42c069dc1ee7d2b8d3649 | 9ad6d61a56216d04250cd89aeaeda63c11942d0a | refs/heads/master | 2020-04-04T09:18:59.396540 | 2015-04-28T11:22:55 | 2015-04-28T11:22:55 | 20,906,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | __author__ = 'XingHua'
import MySQLdb
def Foo():
conn = MySQLdb.connect(host='localhost',
user='root', passwd='abc123', db='test')
cursor = conn.cursor()
cursor.execute('SELECT * FROM people')
id, name = cursor.fetchone()
print id, name
if __name__ == '__main__':
Foo() | [
"[email protected]"
]
| |
7a362af386f8e7f440301bda660837fbb6449fd7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s468061454.py | c59bfc6f1c8081389083fc960c22dd4c3ba08cda | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import sys
input=sys.stdin.readline
h,w,n = map(int, input().split())
l=[list(map(int,input().split())) for i in range(n)]
from collections import defaultdict
d = defaultdict(int)
for tmp in l:
y=tmp[0]-1
x=tmp[1]-1
for i in [-1,0,1]:
for j in [-1,0,1]:
if 1<=x+i<w-1 and 1<=y+j <h-1:
s = str(x+i) + "_" + str(y+j)
d[s]+=1
import collections
lis = list(d.values())
c = collections.Counter(lis)
ans=[0]*10
for itm in c.items():
ans[itm[0]]=itm[1]
ans[0]=(h-2)*(w-2)-sum(ans)
for a in ans:
print(a) | [
"[email protected]"
]
| |
050fdaa4b74799a5ef129833fa9542b5f9cd5b3f | 479ad544171d18a4da1bd109a44fa5c42e075a17 | /fantom_util/fantom_util/graph_tools/named_entity_merging.py | 4906e466e823da456fda67718befeee0e58b34ec | []
| no_license | kth-social-robotics/fantombot | c329a7ec716dff4e74ad2420c46239705d285647 | 1fef1bc98a6aab2a6534e74f7d464b758162fe87 | refs/heads/master | 2020-06-03T00:29:36.239228 | 2019-08-27T13:41:51 | 2019-08-27T13:41:51 | 191,361,328 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | # want to look through utterances of the children of this node, match them with movies in named entity model and
# merge those that are in there and don't have any children
from fantom_util.database import db_session
from fantom_util.database.models import Node
from fantom_util.feature_extraction.named_entities import named_entities_model
from sqlalchemy.orm import joinedload
# For testing NODE_ID = 649613
nem = named_entities_model()
def named_entity_merge(node_id):
nodes = (
db_session.query(Node)
.options(joinedload(Node.utterances), joinedload(Node.node_utterances))
.filter(Node.parent_id == node_id)
.all()
)
to_merge = []
categories = ["movies", "musicians", "bands"]
for node in nodes:
done = False
if not node.children:
for utterance in node.utterances:
utterance_text = utterance.utterance_text
# print(utterance_text)
for category in categories:
for item in nem[category]:
if f" {item.lower()} " in f" {utterance_text} ":
print(f"found {item} in {utterance_text}")
to_merge.append(node)
done = True
if done:
break
if done:
break
if done:
break
return to_merge
| [
"[email protected]"
]
| |
5738a33d087f1c4d0d34dd5308dddb244d93680c | 7246faf9a222269ce2612613f58dc5ff19091f10 | /baekjoon/1000~2999/1764_듣보잡.py | 5de8248dba6fbb77fcee6925ba8bf1bb62089cef | []
| no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | N, M = map(int, input().split())
arr = []
arr2 = []
for i in range(N):
arr.append(input())
for i in range(M):
arr2.append(input())
a = list(set(arr) & set(arr2))
a.sort()
print(len(a))
for i in a:
print(i) | [
"[email protected]"
]
| |
305d1679552a9d89dedd2a734ef75295fd35cf55 | e811c41caa55559d3b482f26c31fcef02ec66138 | /venv/Lib/site-packages/sqlalchemy/dialects/sqlite/base.py | 64bcaade781770128e54aec7ac201b10387378dc | [
"MIT"
]
| permissive | 1SouravGhosh/POC_REPO | 929ea865d60a51597966ffcfc4a7a3a350a00f54 | e486d9a1fe0e1215f24bac3aaf97517cda21a066 | refs/heads/master | 2022-11-01T09:53:56.443500 | 2019-02-17T16:21:48 | 2019-02-17T16:21:48 | 171,133,391 | 0 | 1 | MIT | 2022-10-22T04:17:54 | 2019-02-17T14:45:39 | Python | UTF-8 | Python | false | false | 62,052 | py | # sqlite/base.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite
:name: SQLite
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQlite is used. The implementation classes are
:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQlite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<http://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level
----------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
http://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`~.types.BIGINT`, :class:`~.types.BLOB`,
:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`,
:class:`~.types.CHAR`, :class:`~.types.DATE`,
:class:`~.types.DATETIME`, :class:`~.types.FLOAT`,
:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`,
:class:`~.types.INTEGER`, :class:`~.types.INTEGER`,
:class:`~.types.NUMERIC`, :class:`~.types.REAL`,
:class:`~.types.SMALLINT`, :class:`~.types.TEXT`,
:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`,
:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`,
:class:`~.types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`~.types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`~.types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`~.types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`~.types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`~.types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.execute("create table x (a integer, b integer)")
conn.execute("insert into x (a, b) values (1, 1)")
conn.execute("insert into x (a, b) values (2, 2)")
result = conn.execute("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`.ResultProxy.keys` and
:meth:`.RowProxy.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
""" # noqa
import datetime
import re
from ... import exc
from ... import processors
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...types import BLOB # noqa
from ...types import BOOLEAN # noqa
from ...types import CHAR # noqa
from ...types import DECIMAL # noqa
from ...types import FLOAT # noqa
from ...types import INTEGER # noqa
from ...types import NUMERIC # noqa
from ...types import REAL # noqa
from ...types import SMALLINT # noqa
from ...types import TEXT # noqa
from ...types import TIMESTAMP # noqa
from ...types import VARCHAR # noqa
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d"
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(min)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
r"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.FLOAT,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
return super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
def format_index(self, index, use_schema=True, name=None):
"""Prepare a quoted index and schema name."""
if name is None:
name = index.name
result = self.quote(name, index.quote)
if (
not self.omit_schema
and use_schema
and getattr(index.table, "schema", None)
):
result = (
self.quote_schema(index.table.schema, index.table.quote_schema)
+ "."
+ result
)
return result
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
supports_cast = True
supports_default_values = True
construct_arguments = [
(sa_schema.Table, {"autoincrement": False}),
(sa_schema.Index, {"where": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
self.supports_right_nested_joins = (
self.dbapi.sqlite_version_info >= (3, 7, 16)
)
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
# http://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = {"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace("_", " ")]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.execute(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = '%s'" "AND type='view'") % (
master,
view_name,
)
rs = connection.execute(s)
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type='view'"
) % view_name
rs = connection.execute(s)
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type='view'"
) % view_name
rs = connection.execute(s)
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
columns = []
for row in info:
(name, type_, nullable, default, primary_key) = (
row[1],
row[2].upper(),
not row[3],
row[4],
row[5],
)
columns.append(
self._get_column_info(
name, type_, nullable, default, primary_key
)
)
return columns
def _get_column_info(self, name, type_, nullable, default, primary_key):
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = util.text_type(default)
return {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity tules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in http://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by regcognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if rcol is None:
rcol = lcol
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": [],
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
options["ondelete"] = token[6:].strip()
elif token.startswith("UPDATE"):
options["onupdate"] = token[6:].strip()
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|([a-z0-9]+)) ' r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# http://www.mail-archive.com/[email protected]/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = '%(table)s' "
"AND type = 'table'"
% {"schema": schema_expr, "table": table_name}
)
rs = connection.execute(s)
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = '%(table)s' "
"AND type = 'table'"
% {"schema": schema_expr, "table": table_name}
)
rs = connection.execute(s)
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statement = "PRAGMA %s." % quote(schema)
else:
statement = "PRAGMA "
qtable = quote(table_name)
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.execute(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# http://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
return result
| [
"[email protected]"
]
| |
482c535cae406f32ff74169addb0665a8ded5130 | 1c6866a37fddb455d5dd9a9db100415cd83b2429 | /sanic官方教程/请求数据/1-query_string.py | ea1c30bdd144eb58accc93598360f7fde417f8ef | []
| no_license | zb14755456464/sanic | e860bc140eab8725aa1410096edecd511163121d | 66462923b367c52edab15df6f33705b215f75174 | refs/heads/master | 2021-01-24T08:12:36.021336 | 2019-05-21T12:41:30 | 2019-05-21T12:41:30 | 122,973,882 | 0 | 1 | null | 2019-05-21T12:39:57 | 2018-02-26T13:29:12 | Python | UTF-8 | Python | false | false | 455 | py | from sanic.response import json
from sanic import Sanic
app = Sanic(__name__)
@app.route("/query_string")
def query_string(request):
# http://127.0.0.1:8000/query_string/?a=1&b=2&a=2
# {"args":{"a":["1","2"],"b":["2"]},"url":"http:\/\/0.0.0.0:8000\/query_string\/?a=1&b=2","query_string":"a=1&b=2"}
return json({"args": request.args, "url": request.url, "query_string": request.query_string})
if __name__ == '__main__':
app.run()
| [
"[email protected]"
]
| |
f3beb823bc4165ce7b933f2a83ac81d6221ac32d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/Smetterleen/qual1.py | 654f790f055710cb8fefb7ee19fcf863c683bdcd | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 891 | py | '''
Created on Apr 9, 2016
@author: joep
'''
import os
ds_type = 'large'
BASE = os.path.dirname(os.path.realpath(__file__))
inf = open(os.path.join(BASE, 'A-large.in'.format(ds_type)), 'r')
outf = open(os.path.join(BASE, '{}.out'.format(ds_type)), 'w+')
cases = int(inf.readline())
for case in range(cases):
n = int(inf.readline())
if n == 0:
c_n = 'INSOMNIA'
else:
i = 1
digs = set()
while True:
c_n = str(i * n)
digs.update(set(c_n))
done = True
for dig in '0123456789':
if dig not in digs:
done = False
break
if done:
break
i += 1
outf.write('Case #{}: {}\n'.format(case + 1, c_n))
print('Finished {}'.format(case + 1))
| [
"[[email protected]]"
]
| |
61c1eed9e08a33eaff443e4f2b977508ffb9b5d3 | f47ec2cffc71196679bb165d4c7d6de3b6884e33 | /src/pretalx/submission/forms/tag.py | 183749f9de658767e5f72ff6820a9051bcd32ec1 | [
"Apache-2.0"
]
| permissive | toshywoshy/pretalx | 874bed725df48db47f118ff021340d0d34eca98a | 14619a4cb7d46df1434c8835abbac6f155c37626 | refs/heads/master | 2023-01-08T11:16:44.992557 | 2021-11-02T12:24:30 | 2021-11-02T12:24:30 | 179,450,372 | 0 | 0 | NOASSERTION | 2023-01-06T22:04:49 | 2019-04-04T08:00:38 | Python | UTF-8 | Python | false | false | 933 | py | from django import forms
from django.utils.translation import ugettext_lazy as _
from i18nfield.forms import I18nModelForm
from pretalx.common.mixins.forms import I18nHelpText, ReadOnlyFlag
from pretalx.submission.models import Tag
class TagForm(ReadOnlyFlag, I18nHelpText, I18nModelForm):
def __init__(self, *args, event=None, **kwargs):
self.event = event
super().__init__(*args, **kwargs)
self.fields["color"].widget.attrs["class"] = "colorpickerfield"
def clean_tag(self):
tag = self.cleaned_data["tag"]
qs = self.event.tags.all()
if self.instance and self.instance.pk:
qs = qs.exclude(pk=self.instance.pk)
if any(str(s.tag) == str(tag) for s in qs):
raise forms.ValidationError(_("You already have a tag by this name!"))
return tag
class Meta:
model = Tag
fields = ("tag", "description", "color", "public")
| [
"[email protected]"
]
| |
cfb3ff4f069ff67a31f55f2ba1d579cd20f195c9 | 1cf380b819a399c3f58a7ad13f5daeb5659cead3 | /wrf_management/real.py | 72fdb321a3d7e8e409ac03baf1527db36cdaafb4 | []
| no_license | daliagachc/wrf_management | dd88cf5d6279457f4e2b414acfa0d0cbaaad3873 | 4ee88c668ed0252e68713aa756b74344ecada615 | refs/heads/master | 2021-06-13T09:39:08.477315 | 2021-04-09T14:43:21 | 2021-04-09T14:43:21 | 171,271,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | # project name: wrf_management
# created by diego aliaga daliaga_at_chacaltaya.edu.bo
import glob
import os
import pathlib
from collections import OrderedDict
import wrf_management.project_global_constants as gc
import sqlite3 as sq
import pandas as pd
import wrf_management.run_utilities as ru
import f90nml
def skim_namelist_copy_real(
input_path, output_path, *, date, duration_h
):
old_dic = f90nml.read(os.path.join(input_path, 'namelist.input'))
s_dt = pd.to_datetime(date)
e_dt = s_dt + pd.Timedelta(duration_h, 'h')
d_list = [
['start_year', s_dt.year],
['start_month', s_dt.month],
['start_day', s_dt.day],
['start_hour', s_dt.hour],
['end_year', e_dt.year],
['end_month', e_dt.month],
['end_day', e_dt.day],
['end_hour', e_dt.hour],
# ['end_second', e_dt.second],
]
for k, v in d_list:
# print(k)
# print(v)
old_dic['time_control'][k] = 4*[v]
f90nml.write(
old_dic,
os.path.join(output_path, 'namelist.input'),
force=True
)
return old_dic
def get_met_files(
*, job_path, met_pref
):
met_path = pathlib.Path(job_path).parent
pre_path = os.path.join(met_path, met_pref)
# print(pre_path)
file_list = glob.glob(os.path.join(pre_path, 'met_em.d' + '*'))
return file_list
def link_met_files(
*,
job_path, met_pref
):
met_list = get_met_files(
job_path=job_path,
met_pref=met_pref)
df = pd.DataFrame(met_list, columns=['source'])
df['base_name'] = df['source'].apply(
lambda p: os.path.basename(p)
)
df['dest'] = df['base_name'].apply(
lambda bn: os.path.join(job_path, bn)
)
df.apply(
lambda r: ru.relink(r['source'], r['dest']),
axis=1
)
| [
"[email protected]"
]
| |
8d938e45ee0ced99172d7b4a614d66203d5bb8f6 | 933ed73cdf117fc6c88c1ebba7a17b82807a16e8 | /docs/02.AI_ML/code-1905/day06/demo09_cv.py | c74d27eecf9adec52575b547693ae52533582b4a | [
"Apache-2.0"
]
| permissive | wan230114/PythonNote | c4fff80f6f3849ed0b0346526d3c6197a4867d2c | f4989a44c03efdcea3f7aa3484e3470e7fd983eb | refs/heads/master | 2023-01-28T14:51:07.304066 | 2023-01-12T18:38:16 | 2023-01-12T18:38:16 | 242,367,069 | 5 | 6 | Apache-2.0 | 2021-01-05T23:35:10 | 2020-02-22T15:45:01 | JavaScript | UTF-8 | Python | false | false | 395 | py | """
demo09_cv.py 词袋模型
"""
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft
doc = 'The brown dog is running. ' \
'The black dog is in the black room. ' \
'Running in the room is forbidden.'
# 对doc按照句子进行拆分
sents = tk.sent_tokenize(doc)
# 构建词袋模型
cv = ft.CountVectorizer()
bow = cv.fit_transform(sents)
print(bow.toarray()) | [
"[email protected]"
]
| |
648086fac69850c8b63b8de728580fbc618e210f | 6061ebee9fbce8eb5b48ed7ccd2aecb196156598 | /modulo03-estruturascondicionais/exercicios/exercicio11.py | 19004de489822a3a78156123ac1ad86c8d4f0533 | []
| no_license | DarioCampagnaCoutinho/logica-programacao-python | fdc64871849bea5f5bbf2c342db5fda15778110b | b494bb6ef226c89f4bcfc66f964987046aba692d | refs/heads/master | 2023-02-24T11:45:29.551278 | 2021-01-26T22:02:49 | 2021-01-26T22:02:49 | 271,899,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | horas = int(input('Horas:'))
if horas >= 0 and horas <= 11:
print('Bom Dia')
elif horas >= 12 and horas <= 17:
print("Boa Tarde")
elif horas >= 18 and horas <= 23:
print("Boa Noite")
else:
print("Digite Novamente")
| [
"[email protected]"
]
| |
3c0bfa561bf63fedd740a9fc81b331d90ccc697b | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/0b6c5e6d25ab9cef83b75fab1cc1443c85468427-<fc>-fix.py | 4bfb79d0d7698b3efca58ea99ebec0336227cf4c | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,120 | py |
def fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None):
'\n **Fully Connected Layer**\n\n The fully connected layer can take multiple tensors as its inputs. It\n creates a variable (one for each input tensor) called weights for each input\n tensor, which represents a fully connected weight matrix from each input\n unit to each output unit. The fully connected layer multiplies each input\n tensor with its coresponding weight to produce an output Tensor. If\n multiple input tensors are given, the results of multiple multiplications\n will be sumed up. If bias_attr is not None, a biases variable will be\n created and added to the output. Finally, if activation is not None,\n it will be applied to the output as well.\n\n This process can be formulated as follows:\n\n .. math::\n\n Out = Act({\\sum_{i=0}^{N-1}W_iX_i + b})\n\n In the above equation:\n\n * :math:`N`: Number of the input.\n * :math:`X_i`: The input tensor.\n * :math:`W`: The weights created by this layer.\n * :math:`b`: The bias parameter created by this layer (if needed).\n * :math:`Act`: The activation funtion.\n * :math:`Out`: The output tensor.\n\n Args:\n input(Variable|list): The input tensor(s) to the fully connected layer.\n size(int): The number of output units in the fully connected layer.\n num_flatten_dims(int): The fc layer can accept an input tensor with more\n than two dimensions. If this happens, the\n multidimensional tensor will first be flattened\n into a 2-dimensional matrix. The parameter\n `num_flatten_dims` determines how the input tensor\n is flattened: the first `num_flatten_dims`\n dimensions will be flatten to form the first\n dimension of the final matrix (height of the\n matrix), and the rest `rank(X) - num_flatten_dims`\n dimensions are flattened to form the second\n dimension of the final matrix (width of the matrix).\n For example, suppose `X` is a 6-dimensional tensor\n with a shape [2, 3, 4, 5, 6], and\n `num_flatten_dims` = 3. Then, the flattened matrix\n will have a shape [2 x 3 x 4, 5 x 6] = [24, 30].\n By default, `num_flatten_dims` is set to 1.\n param_attr(ParamAttr|list): The parameter attribute for learnable\n parameters/weights of the fully connected\n layer.\n param_initializer(ParamAttr|list): The initializer used for the\n weight/parameter. If set None,\n XavierInitializer() will be used.\n bias_attr(ParamAttr|list): The parameter attribute for the bias parameter\n for this layer. If set None, no bias will be\n added to the output units.\n bias_initializer(ParamAttr|list): The initializer used for the bias.\n If set None, then ConstantInitializer()\n will be used.\n act(str): Activation to be applied to the output of the fully connected\n layer.\n name(str): Name/alias of the fully connected layer.\n\n\n Returns:\n Variable: The output tensor variable.\n\n Raises:\n ValueError: If rank of the input tensor is less than 2.\n\n Examples:\n .. code-block:: python\n\n data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")\n fc = fluid.layers.fc(input=data, size=1000, act="tanh")\n '
helper = LayerHelper('fc', **locals())
dtype = helper.input_dtype()
mul_results = []
for (input_var, param_attr) in helper.iter_inputs_and_params():
input_shape = input_var.shape
param_shape = ([reduce((lambda a, b: (a * b)), input_shape[num_flatten_dims:], 1)] + [size])
w = helper.create_parameter(attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_tmp_variable(dtype)
helper.append_op(type='mul', inputs={
'X': input_var,
'Y': w,
}, outputs={
'Out': tmp,
}, attrs={
'x_num_col_dims': num_flatten_dims,
'y_num_col_dims': 1,
})
mul_results.append(tmp)
if (len(mul_results) == 1):
pre_bias = mul_results[0]
else:
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(type='sum', inputs={
'X': mul_results,
}, outputs={
'Out': pre_bias,
})
pre_activation = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_activation)
| [
"[email protected]"
]
| |
939bf81f702844980e7c9e5256af2fa6085d426a | 2c7b6ceffd09dae72d18a573a82d3a4c1d105e06 | /EXAMPLES/defaultdict_fruitnames.py | 14e449c1024ce3c9c3eee0cfc15d25f4b4c44901 | []
| no_license | feleHaile/20180813JPL | c23144a2139bc256e86a81a4402dc6ace0bb2791 | 09af77d98a9eeea193760aacff52b21fac8fc920 | refs/heads/master | 2020-05-15T22:31:14.922844 | 2018-08-16T21:59:40 | 2018-08-16T21:59:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | #!/usr/bin/env python
#
from collections import defaultdict
from pprint import pprint
fruits = ["pomegranate","cherry","apricot","date","apple","lemon","kiwi",
"orange","lime","watermelon","guava","papaya","fig","pear","banana",
"tamarind","persimmon","elderberry","peach","blueberry","lychee",
"grape" ]
fruits_by_first_letter = defaultdict(list) # <1>
for fruit in fruits:
first_letter = fruit[0] # <2>
fruits_by_first_letter[first_letter].append(fruit) # <3>
pprint(fruits_by_first_letter) # <4>
| [
"[email protected]"
]
| |
f897bc4ebb70621584bc23fe028dcaa3e6e152ec | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_6695.py | 6799ccfe76a1aae3be3037e8f110140980d21e5d | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # What is the proper way to track indexes in python?
for index, entry in enumerate(longList):
if entry == 'foo':
print index
| [
"[email protected]"
]
| |
0a7c1810b71bc1d74a6427857b8fba73dcd596f5 | e9ed8174f0e2f52f858f0dd8b9206eb57388ece2 | /JssProject/JssProject/asgi.py | 54480fbeb22c73b1ed8de2d0c993cab07e905d18 | []
| no_license | Tedhoon/JasoseolProject | bb061dc1ed0cf0a0842a2c046c4434ccb80263a5 | 9500edabb35242f2974443a8b0fa43e5e3435484 | refs/heads/master | 2022-11-11T03:40:04.564877 | 2020-06-28T07:02:28 | 2020-06-28T07:02:28 | 275,432,188 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
ASGI config for JssProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'JssProject.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
62f6fefadf2e85580056e2d9ccbd8b06d40759b8 | 0203e5a6d7beb1e0f83113dac4c167b171756f24 | /lib/spdk-19.10/test/json_config/clear_config.py | 5328e6dc19c40496c6295697584810b4c20d3e50 | [
"BSD-3-Clause"
]
| permissive | Wonchul08Lee/poseidonos | eaafe277fc56a0f5b5fcca3b70acc9bfe5d5d1ae | 6fe410cdf88f3243ad9210f763c2b5a2f7e8b46a | refs/heads/main | 2023-03-30T13:41:09.660647 | 2021-04-08T06:43:26 | 2021-04-08T06:43:26 | 355,819,746 | 0 | 0 | BSD-3-Clause | 2021-04-08T08:17:27 | 2021-04-08T08:17:26 | null | UTF-8 | Python | false | false | 7,593 | py | #!/usr/bin/env python3
import os
import sys
import argparse
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), "../../scripts"))
import rpc # noqa
from rpc.client import print_dict, JSONRPCException # noqa
def get_bdev_name_key(bdev):
bdev_name_key = 'name'
if 'method' in bdev and bdev['method'] == 'bdev_split_create':
bdev_name_key = "base_bdev"
return bdev_name_key
def get_bdev_name(bdev):
bdev_name = None
if 'params' in bdev:
if 'name' in bdev['params']:
bdev_name = bdev['params']['name']
elif 'base_name' in bdev['params']:
bdev_name = bdev['params']['base_name']
elif 'base_bdev' in bdev['params']:
bdev_name = bdev['params']['base_bdev']
if 'method' in bdev and bdev['method'] == 'bdev_error_create':
bdev_name = "EE_%s" % bdev_name
return bdev_name
def get_bdev_delete_method(bdev):
delete_method_map = {'bdev_malloc_create': "bdev_malloc_delete",
'bdev_null_create': "bdev_null_delete",
'bdev_rbd_create': "bdev_rbd_delete",
'bdev_pmem_create': "bdev_pmem_delete",
'bdev_aio_create': "bdev_aio_delete",
'bdev_error_create': "bdev_error_delete",
'construct_split_vbdev': "destruct_split_vbdev",
'bdev_virtio_attach_controller': "remove_virtio_bdev",
'bdev_crypto_create': "bdev_crypto_delete",
'bdev_delay_create': "bdev_delay_delete",
'bdev_passthru_create': "bdev_passthru_delete",
'bdev_compress_create': 'bdev_compress_delete',
}
destroy_method = None
if 'method' in bdev:
construct_method = bdev['method']
if construct_method in list(delete_method_map.keys()):
destroy_method = delete_method_map[construct_method]
return destroy_method
def clear_bdev_subsystem(args, bdev_config):
rpc_bdevs = args.client.call("bdev_get_bdevs")
for bdev in bdev_config:
bdev_name_key = get_bdev_name_key(bdev)
bdev_name = get_bdev_name(bdev)
destroy_method = get_bdev_delete_method(bdev)
if destroy_method:
args.client.call(destroy_method, {bdev_name_key: bdev_name})
nvme_controllers = args.client.call("bdev_nvme_get_controllers")
for ctrlr in nvme_controllers:
args.client.call('bdev_nvme_detach_controller', {'name': ctrlr['name']})
''' Disable and reset hotplug '''
rpc.bdev.bdev_nvme_set_hotplug(args.client, False)
def get_nvmf_destroy_method(nvmf):
delete_method_map = {'nvmf_create_subsystem': "nvmf_delete_subsystem"}
try:
return delete_method_map[nvmf['method']]
except KeyError:
return None
def clear_nvmf_subsystem(args, nvmf_config):
for nvmf in nvmf_config:
destroy_method = get_nvmf_destroy_method(nvmf)
if destroy_method:
args.client.call(destroy_method, {'nqn': nvmf['params']['nqn']})
def get_iscsi_destroy_method(iscsi):
delete_method_map = {'iscsi_create_portal_group': "iscsi_delete_portal_group",
'iscsi_create_initiator_group': "iscsi_delete_initiator_group",
'iscsi_create_target_node': "iscsi_delete_target_node",
'iscsi_set_options': None
}
return delete_method_map[iscsi['method']]
def get_iscsi_name(iscsi):
if 'name' in iscsi['params']:
return iscsi['params']['name']
else:
return iscsi['params']['tag']
def get_iscsi_name_key(iscsi):
if iscsi['method'] == 'iscsi_create_target_node':
return "name"
else:
return 'tag'
def clear_iscsi_subsystem(args, iscsi_config):
for iscsi in iscsi_config:
destroy_method = get_iscsi_destroy_method(iscsi)
if destroy_method:
args.client.call(destroy_method, {get_iscsi_name_key(iscsi): get_iscsi_name(iscsi)})
def get_nbd_destroy_method(nbd):
delete_method_map = {'nbd_start_disk': "nbd_stop_disk"
}
return delete_method_map[nbd['method']]
def clear_nbd_subsystem(args, nbd_config):
for nbd in nbd_config:
destroy_method = get_nbd_destroy_method(nbd)
if destroy_method:
args.client.call(destroy_method, {'nbd_device': nbd['params']['nbd_device']})
def clear_net_framework_subsystem(args, net_framework_config):
pass
def clear_copy_subsystem(args, copy_config):
pass
def clear_interface_subsystem(args, interface_config):
pass
def clear_vhost_subsystem(args, vhost_config):
for vhost in reversed(vhost_config):
if 'method' in vhost:
method = vhost['method']
if method in ['vhost_scsi_controller_add_target']:
args.client.call("vhost_scsi_controller_remove_target",
{"ctrlr": vhost['params']['ctrlr'],
"scsi_target_num": vhost['params']['scsi_target_num']})
elif method in ['vhost_create_scsi_controller', 'vhost_create_blk_controller',
'vhost_create_nvme_controller']:
args.client.call("vhost_delete_controller", {'ctrlr': vhost['params']['ctrlr']})
def call_test_cmd(func):
def rpc_test_cmd(*args, **kwargs):
try:
func(*args, **kwargs)
except JSONRPCException as ex:
print((ex.message))
exit(1)
return rpc_test_cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Clear config command')
parser.add_argument('-s', dest='server_addr', default='/var/tmp/spdk.sock')
parser.add_argument('-p', dest='port', default=5260, type=int)
parser.add_argument('-t', dest='timeout', default=60.0, type=float)
parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
help='Set verbose mode to INFO', default="ERROR")
parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
help="""Set verbose level. """)
subparsers = parser.add_subparsers(help='RPC methods')
@call_test_cmd
def clear_config(args):
for subsystem_item in reversed(args.client.call('framework_get_subsystems')):
args.subsystem = subsystem_item['subsystem']
clear_subsystem(args)
p = subparsers.add_parser('clear_config', help="""Clear configuration of all SPDK subsystems and targets using JSON RPC""")
p.set_defaults(func=clear_config)
@call_test_cmd
def clear_subsystem(args):
config = args.client.call('framework_get_config', {"name": args.subsystem})
if config is None:
return
if args.verbose:
print("Calling clear_%s_subsystem" % args.subsystem)
globals()["clear_%s_subsystem" % args.subsystem](args, config)
p = subparsers.add_parser('clear_subsystem', help="""Clear configuration of SPDK subsystem using JSON RPC""")
p.add_argument('--subsystem', help="""Subsystem name""")
p.set_defaults(func=clear_subsystem)
args = parser.parse_args()
with rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper())) as client:
try:
args.client = client
args.func(args)
except JSONRPCException as ex:
print((ex.message))
exit(1)
| [
"[email protected]"
]
| |
a38336c56207fb0e1d51bcc216cb54f334e4f6c4 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/35ca52050200eb7a7d7dee4ba4da4e4a6c9da1320007_populate_computer.py | 35ca52050200eb7a7d7dee4ba4da4e4a6c9da132 | [
"MIT"
]
| permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def initial_data(apps, schema_editor):
SC = apps.get_model('ship', 'ShipComputer')
SC(name='None', beamattack=0).save()
SC(name='Electronic', beamattack=25).save()
SC(name='Optronic', beamattack=50).save()
SC(name='Positronic', beamattack=75).save()
SC(name='Cybertronic', beamattack=100).save()
SC(name='Moleculartronic', beamattack=125).save()
class Migration(migrations.Migration):
dependencies = [
('ship', '0006_auto_20141004_0839'),
]
operations = [
migrations.RunPython(initial_data),
]
| [
"[email protected]"
]
| |
463f2b8c0b5c8c46db28eb503109fe4db542aa86 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/dossiers2/custom/helpers.py | f05bfc017fe3b9514391e58a82f275ee50d3af3d | []
| no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,786 | py | # 2017.05.04 15:28:51 Střední Evropa (letní čas)
# Embedded file name: scripts/common/dossiers2/custom/helpers.py
from dossiers2.custom.records import RECORDS, RECORD_INDICES
from dossiers2.custom.cache import getCache
def getTankExpertRequirements(vehTypeFrags, nationID = -1):
cache = getCache()
killedVehTypes = set(vehTypeFrags.iterkeys())
res = {'tankExpert': cache['vehiclesInTrees'] - killedVehTypes}
if nationID == -1:
nationIDs = cache['nationsWithVehiclesInTree']
else:
nationIDs = [nationID]
vehiclesInTreesByNation = cache['vehiclesInTreesByNation']
for nationIdx in nationIDs:
res[''.join(['tankExpert', str(nationIdx)])] = vehiclesInTreesByNation[nationIdx] - killedVehTypes
return res
def getMechanicEngineerRequirements(defaultUnlocks, unlocks, nationID = -1):
cache = getCache()
vehiclesInTreesByNation = cache['vehiclesInTreesByNation']
res = {'mechanicEngineer': cache['vehiclesInTrees'] - defaultUnlocks - unlocks}
if nationID == -1:
nationIDs = cache['nationsWithVehiclesInTree']
else:
nationIDs = [nationID]
for nationIdx in nationIDs:
res[''.join(['mechanicEngineer', str(nationIdx)])] = vehiclesInTreesByNation[nationIdx] - defaultUnlocks - unlocks
return res
def getRecordMaxValue(block, record):
recordPacking = RECORDS[RECORD_INDICES[block, record]]
if recordPacking[2] == 'b' or recordPacking[2] == 'bs':
return 1
raise recordPacking[2] == 'p' or AssertionError
return recordPacking[4]
def updateTankExpert(dossierDescr, vehTypeFrags, nationID):
res = getTankExpertRequirements(vehTypeFrags, nationID)
for record, value in res.iteritems():
if len(value) == 0:
dossierDescr['achievements'][record] = True
dossierDescr.addPopUp('achievements', record, True)
def updateMechanicEngineer(dossierDescr, defaultUnlocks, unlocks, nationID):
res = getMechanicEngineerRequirements(defaultUnlocks, unlocks, nationID)
for record, value in res.iteritems():
if len(value) == 0:
dossierDescr['achievements'][record] = True
dossierDescr.addPopUp('achievements', record, True)
def updateRareAchievements(dossierDescr, achievements):
block = dossierDescr['rareAchievements']
for achievement in achievements:
if achievement > 0:
block.append(achievement)
elif achievement < 0:
try:
block.remove(abs(achievement))
except:
pass
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\dossiers2\custom\helpers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:28:52 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
cd04e4a74fe6f82b608519b7387f62b22d627744 | b643abbcfb5dc46a2d311f179f740cbe44f6a922 | /manage.py | 420d5ab4bec8ad6485ebd9ef3286367cc3685146 | []
| no_license | safwanvk/productivity | e7126d2ce77649e80ada365ab4616baa91b289ec | 141598632da0acd6c47ff34446ccbef9f7b980ac | refs/heads/main | 2023-03-10T19:06:57.589215 | 2021-03-01T14:41:24 | 2021-03-01T14:41:24 | 342,141,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProductivityApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
7c22ed802147373cbca2af023716a1833bc12ada | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/classic_cv/ctx/ian_ef.py | 63e565c7b73f84faf7767733f0cbdada3e14c82b | [
"MIT"
]
| permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../')
from rusentrel.classic.ctx.ian_ef import run_testing_ian_ef
from rusentrel.classic_cv.common import CV_COUNT, \
classic_cv_common_callback_modification_func, \
CV_NAME_PREFIX
if __name__ == "__main__":
run_testing_ian_ef(
name_prefix=CV_NAME_PREFIX,
cv_count=CV_COUNT,
custom_callback_func=classic_cv_common_callback_modification_func)
| [
"[email protected]"
]
| |
aa37bb217f03cac2488f3606a6f1a5a26f41559f | 3d37f595a8aaaa7c5723ddbd6758ecac5147dce2 | /maximum-subarray/maximum-subarray.py | bf5f661dd48593b947036ae83c3e2e79e0d320f9 | []
| no_license | baggy2797/Leetcode | ec218b155ebb972cd793253f25c3e18117216703 | 469c1541579401768f7a1da55d504a9e8656b21e | refs/heads/main | 2023-06-24T17:03:42.708935 | 2021-07-16T22:31:24 | 2021-07-16T22:31:24 | 342,979,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
globalMax = nums[0]
currMax = nums[0]
for i in range(1,len(nums)):
currMax = max(currMax+nums[i],nums[i])
globalMax = max(globalMax,currMax)
return globalMax | [
"[email protected]"
]
| |
d2303bbedd1e575b6704b5320400a954d0ca9015 | a5718006e28b394633c4e84e75e7941cb4c11a08 | /TD 1 probleme 55 Arthur Lambert.py | 16a3dfdd20ce6ec98cfeb59b80e0126829370564 | []
| no_license | mines-nancy-tcss5ac-2018/td1-ArthurLambert1 | ab883b105e7a5341524032a5bf89c866861f5dd4 | 4e65a396b52e2fe5a7452d78ca0f739260beb854 | refs/heads/master | 2020-03-31T00:16:09.032475 | 2018-10-05T20:41:03 | 2018-10-05T20:41:03 | 151,733,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | def nombreMiroir(x): #Retourne le nombre à l'envers
chaine = str(x)
chaineRenversee = ''
for elt in chaine:
chaineRenversee = elt + chaineRenversee
return int(chaineRenversee)
def testPalindrome(x): #Teste si un nombre est palindrome ou non
return x == nombreMiroir(x)
def testLychrel(n):
for i in range(50):
n += nombreMiroir(n)
if testPalindrome(n):
return 1
return 0
def solve55(n):
l = list(range(10, n + 1))
resultat = 0
for x in l:
if testLychrel(x) == 0:
resultat += 1
return resultat
assert solve55(10000) == 249
print(solve55(10000))
| [
"[email protected]"
]
| |
aac8cd6d37866c9041fe21351488a9a7928086af | c0ba52c370f3c41471308588d49ae75f975d9b49 | /qa/rpc-tests/listtransactions.py | 3c07a0c0926626b959fa9c77d87e7da67b1cdf6d | [
"MIT"
]
| permissive | mirzaei-ce/core-aghilbit | 7f318a7487675ef7a38280d7b19284c3227cea52 | 4a4ce7b0da3fe01246f300a6809cda68d0708ef6 | refs/heads/master | 2021-07-12T08:21:29.796955 | 2017-10-16T16:56:05 | 2017-10-16T16:56:05 | 107,156,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,725 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import AghilbitTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction
import cStringIO
import binascii
def txFromHex(hexstring):
tx = CTransaction()
f = cStringIO.StringIO(binascii.unhexlify(hexstring))
tx.deserialize(f)
return tx
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(AghilbitTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = binascii.hexlify(tx3_modified.serialize()).decode('utf-8')
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= 0.004*100000000 # bump the fee
tx3_b = binascii.hexlify(tx3_b.serialize()).decode('utf-8')
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| [
"[email protected]"
]
| |
594c3f8d10249ae15900bb0604641c8dd1f64ec3 | 933a4f98b3ab1df987bce525d20ca904b225140f | /scripts/slave/recipe_modules/buildbucket/tests/put.py | cd433ecae9b207c24dc57c1e52b530ea49713fe2 | [
"BSD-3-Clause"
]
| permissive | mcgreevy/chromium-build | 3881c489b4d9be2f113da755487808b3593f8156 | f8e42c70146c1b668421ee6358dc550a955770a3 | refs/heads/master | 2020-12-30T12:32:15.685191 | 2017-05-17T06:58:18 | 2017-05-17T06:58:18 | 91,419,271 | 0 | 2 | NOASSERTION | 2020-07-22T09:27:35 | 2017-05-16T05:52:45 | Python | UTF-8 | Python | false | false | 1,518 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'buildbucket',
'service_account',
'recipe_engine/properties',
]
def RunSteps(api):
example_bucket = 'master.user.username'
build_parameters = {
'builder_name': 'linux_perf_bisect',
'properties': {
'bisect_config': {
'bad_revision': '351054',
'bug_id': 537649,
'command': ('src/tools/perf/run_benchmark -v '
'--browser=release --output-format=chartjson '
'--also-run-disabled-tests speedometer'),
'good_revision': '351045',
'gs_bucket': 'chrome-perf',
'max_time_minutes': '20',
'metric': 'Total/Total',
'recipe_tester_name': 'linux_perf_bisect',
'repeat_count': '10',
'test_type': 'perf'
},
}
}
build_tags = {'master': 'overriden.master.url',
'builder': 'overriden_builder'}
service_account = api.service_account.get_json_path('username')
api.buildbucket.put(
[{'bucket': example_bucket,
'parameters': build_parameters,
'tags': build_tags,
'client_operation_id': 'random_client_op_id'}],
service_account)
def GenTests(api):
yield (
api.test('basic') +
api.properties(buildername='example_builder', buildnumber=123)
)
| [
"[email protected]"
]
| |
6a927d7f4c1bd6bbb8a99700a2b23ada0d6bd2ab | 91ff6fdf7b2ccc58869d6ad41842f230644952c1 | /requirements/venky_task/important/textwrap.py | 11f82c6c9c166647506b48126e9b1c74f97a3d77 | []
| no_license | KONASANI-0143/Dev | dd4564f54117f54ccfa003d1fcec4220e6cbe1f9 | 23d31fbeddcd303a7dc90ac9cfbe2c762d61c61e | refs/heads/master | 2023-08-14T15:59:59.012414 | 2021-10-13T14:54:49 | 2021-10-13T15:10:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import textwrap
sample_text = '''
Python is a widely used high-level, general-purpose, interpreted,
dynamic programming language. Its design philosophy emphasizes
code readability, and its syntax allows programmers to express
concepts in fewer lines of code than possible in languages such
as C++ or Java.
'''
print(textwrap.fill(sample_text, width=50))
print(textwrap.dedent(sample_text))
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.