blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92cdb2d2c042b5c2f5c428f0f9f04b40d75ffe1f | 772a82205af92d2f2d2b490ac6bc23fdb7456124 | /hadoop-python/TopPopularLinksMapper.py | e761e3029501002bdccab3db35d35e3963e3d614 | [] | no_license | atashi/LLL | 4f777b3a06c6ed38eab4323d2072dbbec22eee92 | 857b8c7fccfe8216da59228c1cf3675444855673 | refs/heads/master | 2021-05-17T10:11:28.946779 | 2019-11-25T15:56:14 | 2019-11-25T15:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!/usr/bin/env python
import sys
from collections import Counter
counter = Counter()
for line in sys.stdin:
line = line.strip()
k, v = line.split('\t')
try:
v = int(v)
except ValueError:
continue
counter.update({k: v})
counter_list = counter.items()
sort_counter = sorted(counter_list, key=lambda x: x[1], reverse=True)
for k, v in sort_counter[:10]:
print("%s\t%d" % (k, v))
| [
"[email protected]"
] | |
540660b234ff1db182a1e336b27f5d1fc440103d | fac96b4c97150e02f1405f7430c89b115e4c27f7 | /ch03/ex3-9.motorcycles.py | f1492ae35d421f4f18f5f5c3779654508b24f7b5 | [] | no_license | gustavonvp/PYTHON-CRASH-COURSE | 37478990ff3c3c368da505eb9e5a35dee5d1960b | 8033e2eb84cf6d85fd4ff42ae0550f38dcd23f62 | refs/heads/master | 2023-04-03T00:42:20.333183 | 2017-10-24T05:47:01 | 2017-10-24T05:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | # Appending Elements to the End of a List
motorcycles = []
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
| [
"[email protected]"
] | |
50f391a8148e8c120e820f8f5d3e3228e7e44471 | a88afb87020530b8736841f3570fc125b5ded163 | /Python_Scripts/model_numbers.py | a5a6cc938421db752e5e93130f08208165847e52 | [
"MIT"
] | permissive | ICB-DCM/solverstudy | efb915189b63fb2ca005097b9d27054fbfbfb2c1 | 0aea105c115bbf92d13cc19d88ab554438abdd38 | refs/heads/master | 2023-01-19T17:36:08.394305 | 2020-11-23T22:53:03 | 2020-11-23T22:53:03 | 307,409,328 | 0 | 0 | MIT | 2020-11-23T22:53:05 | 2020-10-26T14:59:53 | Python | UTF-8 | Python | false | false | 824 | py | """Extract some basic information on model numbers."""
import os
import pandas as pd
from C import DIR_MODELS
df = pd.read_csv(os.path.join(DIR_MODELS, 'model_summary.tsv'), sep='\t')
print("Column names", df.columns)
print("Number of models:", df.shape[0])
print("Number of model groups:", len(df.short_id.unique()))
print("Number of AMICI importable models:", sum(df.amici_import == 'OK'))
print("Number of COPASI importable models:", sum(~pd.isnull(df.copasi_path)))
df_imp = df[(df.amici_import == 'OK') & (~pd.isnull(df.copasi_path))]
print("Number of importable models:", df_imp.shape[0])
print("Number of model groups:", len(df_imp.short_id.unique()))
df_acc = df_imp[df_imp.accepted]
print("Number of accepted models:", df_acc.shape[0])
print("Number of accepted model groups:", len(df_acc.short_id.unique())) | [
"[email protected]"
] | |
9a6db226d7bcbf0cc22bb1d38a97777b59a02fb7 | d3afd01b844f314a25e231d49eb18419b38de40b | /NotMnist/make_datasets.py | f3bd5286b969e7137ed08ebede391fdd13bdef7d | [] | no_license | nanigasi-san/Chainer_DL | 35cb6f58ef05ce45f371990af5f7ebc675ee2472 | ee8fb1faec9e5aad67a5d366681acc3979946c4b | refs/heads/master | 2020-04-29T14:40:00.004462 | 2019-06-20T08:16:51 | 2019-06-20T08:16:51 | 176,203,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | from skimage import io
import glob
import numpy as np
from chainer.datasets import TupleDataset
from random import randint
#Tuple_Datasetを作る
def make_tupledata_set_train(size=100):
alphabet_list = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image_list = []
answer_list = []
def make_image_set():
image_path_list = glob.glob("F://notMnist_large/{0}/*".format(alphabet))
count = 0
_dataset = []
for image_path in image_path_list[:size+100]:
try:
_dataset.append(io.imread(image_path)/255)
count += 1
except:
continue
if count == size:
break
return _dataset
def make_answer_set():
return np.array( [alphabet_list.index(alphabet)] * size)
for alphabet in alphabet_list[:10]:
image_list.extend(make_image_set())
answer_list.extend(make_answer_set())
return TupleDataset(np.array(image_list,dtype=np.float32),np.array(answer_list))
def make_tupledata_set_test(size=10):
alphabet_list = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image_list = []
answer_list = []
def make_image_set():
image_path_list = glob.glob("F://notMnist_large/{0}/*".format(alphabet))
count = 0
_dataset = []
for i in range(size+50):
try:
_dataset.append(io.imread(image_path_list[randint(0,30000)])/255)
count += 1
except:
continue
if count == size:
break
return _dataset
def make_answer_set():
return np.array( [alphabet_list.index(alphabet)] * size)
for alphabet in alphabet_list[:10]:
image_list.extend(make_image_set())
answer_list.extend(make_answer_set())
return TupleDataset(np.array(image_list,dtype=np.float32),np.array(answer_list))
| [
"[email protected]"
] | |
c7256651b31945c3782d5e628fbc0571bb324f0e | a1da48c4376c8676cda8872443461e84fff6dc13 | /torchblocks/processor/sequence_labeling_processor.py | d57f31eb8b14683380853117be282ddb9580c728 | [
"MIT"
] | permissive | topDreamer/TorchBlocks | 6f9b2dc3be1dae143f0aeaa07057a53071ac841a | a5baecb9a2470ff175087475630f2b7db3f7ef51 | refs/heads/master | 2022-12-29T15:13:42.779220 | 2020-10-18T02:18:11 | 2020-10-18T02:18:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,137 | py | import logging
from .base import DataProcessor
from .utils import InputFeatures
logger = logging.getLogger(__name__)
class SequenceLabelingProcessor(DataProcessor):
'''
special_token_label: [CLS]和[SEP]对应的标签, defalult: 'O'
pad_label_id: padding对应的标签id, 默认使用'X',即default: 0
'''
def __init__(self, tokenizer, data_dir,
prefix='',
encode_mode='one',
truncate_label=True,
special_token_label='O',
add_special_tokens=True,
pad_to_max_length=True,
pad_label_id=0):
super().__init__(data_dir=data_dir,
prefix=prefix,
tokenizer=tokenizer,
encode_mode=encode_mode,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
truncate_label=truncate_label)
self.pad_label_id = pad_label_id
self.special_token_label = special_token_label
def convert_to_features(self, examples, label_list, max_seq_length, **kwargs):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
texts = example.texts
inputs = self.encode(texts=texts, max_seq_length=max_seq_length)
label_ids = example.label_ids
if label_ids is None or not isinstance(label_ids, list):
raise ValueError("label_ids is not correct")
special_toekns_num = 2 if self.add_special_tokens else 0
if len(label_ids) > max_seq_length - special_toekns_num: # [CLS] and [SEP]
label_ids = label_ids[:(max_seq_length - special_toekns_num)]
label_ids = [label_map[x] for x in label_ids]
label_ids = [label_map[self.special_token_label]] + label_ids + [label_map[self.special_token_label]]
label_ids += [self.pad_label_id] * (max_seq_length - len(label_ids)) # padding
inputs['guid'] = example.guid
inputs['label_ids'] = label_ids
if ex_index < 5:
self.print_examples(**inputs)
features.append(InputFeatures(**inputs))
return features
class SequenceLabelingSpanProcessor(DataProcessor):
'''
span sequence labeling
'''
def __init__(self, tokenizer, data_dir,
prefix='',
encode_mode='one',
truncate_label=True,
add_special_tokens=True,
pad_to_max_length=True,
pad_label_id=0):
super().__init__(data_dir=data_dir,
prefix=prefix,
encode_mode=encode_mode,
tokenizer=tokenizer,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
truncate_label=truncate_label)
self.pad_label_id = pad_label_id
def get_batch_keys(self):
return ['input_ids', 'attention_mask', 'token_type_ids', 'start_positions', 'end_positions']
def convert_to_features(self, examples, label_list, max_seq_length):
label2id = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
texts = example.texts
inputs = self.encode(texts=texts, max_seq_length=max_seq_length)
start_positions = [self.pad_label_id] * max_seq_length
end_positions = [self.pad_label_id] * max_seq_length
for span in example.label_ids:
label = span[0]
if self.add_special_tokens:
start = span[1] + 1 # cls
end = span[2] + 1 # cls
special_num = 2
else:
start = span[1]
end = span[2]
special_num = 0
if start > max_seq_length - special_num:
continue
start_positions[start] = label2id[label]
if end > max_seq_length - special_num:
continue
end_positions[end] = label2id[label]
assert len(start_positions) == max_seq_length
assert len(end_positions) == max_seq_length
inputs['guid'] = example.guid
inputs['start_positions'] = start_positions
inputs['end_positions'] = end_positions
if ex_index < 5:
self.print_examples(**inputs)
features.append(InputFeatures(**inputs))
return features
| [
"[email protected]"
] | |
d3f68f0ea82a4d0dbaf2ae04832775a6e8124729 | 92db89aaa332d2a0ea0318932c635c27e2ac5ff7 | /chap04_Classification/lecture_1x/step05_softmax_classifier.py | b2a0c2b0c34af50a17fb6d5bf97458f396355a39 | [] | no_license | DominKim/Tensorflow_DNN_CNN_RNN_Basic | daf40100c777a9d154996e4a02c8e19c35daa5fb | 1de11219800169b3bc0c95872d5952e76cbc3227 | refs/heads/master | 2022-11-09T11:16:06.576660 | 2020-06-30T05:50:04 | 2020-06-30T05:50:04 | 275,949,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | # -*- coding: utf-8 -*-
"""
step05_softmax_classifier
- 활성함수 : Softmax(model)
- 손실함수 : Cross Entropy
"""
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from sklearn.metrics import accuracy_score
# 1. x, y 공급 data
# [털, 날개]
x_data = np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 1], [1, 1]]) # [6, 2]
# [기타, 포유류, 조류] : [6, 3] -> one hot encoding
y_data = np.array([
[1, 0, 0], # 기타[0]
[0, 1, 0], # 포유류[1]
[0, 0, 1], # 조류[2]
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
# 2. X, Y 변수 정의
X = tf.placeholder(dtype = tf.float32, shape = [None, 2]) # [관측치, 입력수]
Y = tf.placeholder(dtype = tf.float32, shape = [None, 3]) # [관측치, 출력수]
# 3. w, b
w = tf.Variable(tf.random_normal([2,3])) # [입력수, 출력수]
b = tf.Variable(tf.random_normal([3])) # [출력수]
# 4. softmax 분류기
# 1) 회귀방정식 : 예측치
model = tf.matmul(X, w) + b # 회귀모델
# softmax(예측치)
softmax = tf.nn.softmax(model) # 활성함수 적용(0 ~ 1) : y1 : 0.8, y2 : 0.1, y3 : 0.1
# (2) loss function : Entropy 이용 : -sum(Y * log(model))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels = Y, logits = model))
loss = -tf.reduce_mean(Y * tf.log(softmax) + (1 - Y) * tf.log(1 - softmax))
# 3) optimizer : 오차 최소화(w, b update)
train = tf.train.AdamOptimizer(0.1).minimize(loss) # 오차 최소화
# 4) argmax() : encoding(2) -> decoding(10)
y_pred = tf.argmax(softmax, axis = 1)
y_true = tf.argmax(Y, axis = 1)
# 5.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 반복학습 : 500회
for step in range(500):
_, loss_val = sess.run([train, loss], feed_dict = {X:x_data, Y: y_data})
if (step + 1) % 50 == 0:
print(f"step = {step + 1}, loss = {loss_val}")
# mode result
print(sess.run(softmax, feed_dict = {X:x_data}))
y_pred_re = sess.run(y_pred, feed_dict = {X:x_data}) # 예측치
y_true_re = sess.run(y_true, feed_dict = {Y:y_data}) # 정답
acc = accuracy_score(y_true_re, y_pred_re)
print("y_pred =", y_pred_re)
print("y_true =", y_true_re)
print("accuracy =", acc)
'''
y_pred = [0 1 1 0 0 1]
y_true = [0 1 2 0 0 2]
accuracy = 0.6666666666666666
'''
| [
"[email protected]"
] | |
79cee0eb377abf4334aa24cfa9d979838ccb273e | b3b88d3ad0e23b4712059bdfd56fa89b0801faf3 | /neutron_plugin_contrail/plugins/opencontrail/loadbalancer/v2/loadbalancer_pool.py | 727c45cd819355edd8184956b8f947364a63b38f | [
"Apache-2.0"
] | permissive | tungstenfabric/tf-neutron-plugin | 3630c52db005c7efbc713cbdcc16064d7ca53f1b | 890e65d0091f81c8b55fb65fc574b257f813cea4 | refs/heads/master | 2023-08-17T14:24:14.134426 | 2023-08-13T18:00:50 | 2023-08-13T18:01:48 | 237,743,263 | 4 | 8 | Apache-2.0 | 2022-09-20T05:22:42 | 2020-02-02T08:51:02 | Python | UTF-8 | Python | false | false | 8,849 | py | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import uuid
from neutron_lbaas.extensions import loadbalancerv2
try:
from neutron.api.v2.attributes import ATTR_NOT_SPECIFIED
except Exception:
from neutron_lib.constants import ATTR_NOT_SPECIFIED
try:
from neutron.common.exceptions import NotAuthorized
except ImportError:
from neutron_lib.exceptions import NotAuthorized
try:
from neutron.openstack.common import uuidutils
except ImportError:
from oslo_utils import uuidutils
from vnc_api.vnc_api import (
LoadbalancerPoolType, KeyValuePair, IdPermsType, LoadbalancerPool,
KeyValuePairs, NoIdError)
from vnc_api import exceptions as vnc_exc
from .. resource_manager import ResourceManager, EntityInUse
from .. resource_manager import LoadbalancerMethodInvalid
class LoadbalancerPoolManager(ResourceManager):
_loadbalancer_pool_type_mapping = {
'admin_state': 'admin_state_up',
'protocol': 'protocol',
'loadbalancer_method': 'lb_algorithm',
'subnet_id': 'subnet_id'
}
@property
def property_type_mapping(self):
return self._loadbalancer_pool_type_mapping
def make_properties(self, pool):
props = LoadbalancerPoolType()
for key, mapping in self._loadbalancer_pool_type_mapping.iteritems():
if mapping in pool:
setattr(props, key, pool[mapping])
sp = pool['session_persistence']
if sp is not None:
if 'type' in sp:
props.session_persistence = sp['type']
if 'cookie_name' in sp:
props.persistence_cookie_name = sp['cookie_name']
return props
def create_update_custom_attributes(self, custom_attributes, kvps):
kvp_array = []
for custom_attribute in custom_attributes or []:
for key, value in custom_attribute.iteritems():
kvp = KeyValuePair(key, value)
kvp_array.append(kvp)
kvps.set_key_value_pair(kvp_array)
return True
def _get_listeners(self, pool):
ll_list = []
ll = {}
ll_back_refs = pool.get_loadbalancer_listener_refs()
if ll_back_refs is None:
return None
ll['id'] = ll_back_refs[0]['uuid']
ll_list.append(ll)
return ll_list
def make_dict(self, pool, fields=None):
res = {
'id': pool.uuid,
'tenant_id': pool.parent_uuid.replace('-', ''),
'name': pool.display_name,
'description': self._get_object_description(pool),
'status': self._get_object_status(pool),
'listeners': self._get_listeners(pool),
'session_persistence': None,
}
if res['listeners']:
res['listener_id'] = res['listeners'][0]['id']
props = pool.get_loadbalancer_pool_properties()
for key, mapping in self._loadbalancer_pool_type_mapping.iteritems():
value = getattr(props, key, None)
if value is not None:
res[mapping] = value
custom_attributes = []
kvps = pool.get_loadbalancer_pool_custom_attributes()
if kvps:
custom_attributes = [
{kvp.get_key(): kvp.get_value()}
for kvp in kvps.get_key_value_pair() or []]
res['custom_attributes'] = [custom_attributes]
if props.session_persistence:
sp = {'type': props.session_persistence}
if props.session_persistence == 'APP_COOKIE':
sp['cookie_name'] = props.persistence_cookie_name
res['session_persistence'] = sp
# members
res['members'] = []
members = pool.get_loadbalancer_members()
if members is not None:
res['members'] = [{'id': member['uuid']} for member in members]
# health_monitor
hm_refs = pool.get_loadbalancer_healthmonitor_refs()
if hm_refs is not None:
res['healthmonitor_id'] = hm_refs[0]['uuid']
return self._fields(res, fields)
def resource_read(self, id):
return self._api.loadbalancer_pool_read(id=id)
def resource_list(self, tenant_id=None):
if tenant_id:
parent_id = str(uuid.UUID(tenant_id))
else:
parent_id = None
return self._api.loadbalancer_pools_list(parent_id=parent_id)
def resource_update(self, obj):
try:
return self._api.loadbalancer_pool_update(obj)
except vnc_exc.HttpError as e:
if 'LoadbalancerMethodType' in e.content:
pool_props = obj.get_loadbalancer_pool_properties()
lb_method = pool_props.get_loadbalancer_method()
raise LoadbalancerMethodInvalid(lb_method=lb_method,
pool_id=obj.uuid)
def resource_delete(self, id):
return self._api.loadbalancer_pool_delete(id=id)
def get_exception_notfound(self, id=None):
return loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
def get_exception_inuse(self, id=None):
return EntityInUse(name=self.neutron_name, id=id)
@property
def neutron_name(self):
return "pool"
@property
def resource_name_plural(self):
return "loadbalancer-pools"
def create(self, context, pool):
"""
Create a loadbalancer_pool object.
"""
p = pool['pool']
tenant_id = self._get_tenant_id_for_create(context, p)
project = self._project_read(project_id=tenant_id)
if p['listener_id']:
try:
ll = self._api.loadbalancer_listener_read(id=p['listener_id'])
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Listener',
id=p['listener_id'])
project_id = ll.parent_uuid
if str(uuid.UUID(tenant_id)) != project_id:
raise NotAuthorized()
else:
ll = None
pool_uuid = uuidutils.generate_uuid()
name = self._get_resource_name('loadbalancer-pool', project,
p['name'], pool_uuid)
props = self.make_properties(p)
id_perms = IdPermsType(enable=True,
description=p['description'])
pool = LoadbalancerPool(name, project, uuid=pool_uuid,
loadbalancer_pool_properties=props,
id_perms=id_perms, display_name=p['name'])
if ll:
pool_exists = ll.get_loadbalancer_pool_back_refs()
if pool_exists is not None:
raise loadbalancerv2.OnePoolPerListener(
listener_id=p['listener_id'],
pool_id=pool_exists[0]['uuid'])
pool.set_loadbalancer_listener(ll)
# Custom attributes
if p['custom_attributes'] != ATTR_NOT_SPECIFIED:
custom_attributes = KeyValuePairs()
self.create_update_custom_attributes(p['custom_attributes'],
custom_attributes)
pool.set_loadbalancer_pool_custom_attributes(custom_attributes)
self._api.loadbalancer_pool_create(pool)
return self.make_dict(pool)
def _update_pool_properties(self, props, pool):
change = self.update_properties_subr(props, pool)
if 'session_persistence' in pool:
sp = pool['session_persistence']
new_type = sp.get('type', None)
if props.session_persistence != new_type:
props.session_persistence = new_type
change = True
new_cookie_name = sp.get('cookie_name', None)
if props.persistence_cookie_name != new_cookie_name and \
props.session_persistence == 'APP_COOKIE':
props.persistence_cookie_name = new_cookie_name
change = True
return change
def update_properties(self, pool_db, id, p):
props = pool_db.get_loadbalancer_pool_properties()
change = False
if self._update_pool_properties(props, p):
pool_db.set_loadbalancer_pool_properties(props)
change = True
if 'custom_attributes' in p:
custom_attributes = pool_db.get_loadbalancer_pool_custom_attributes()
# Make sure to initialize custom_attributes
if not custom_attributes:
custom_attributes = KeyValuePairs()
if self.create_update_custom_attributes(p['custom_attributes'],
custom_attributes):
pool_db.set_loadbalancer_pool_custom_attributes(custom_attributes)
change = True
return change
| [
"[email protected]"
] | |
5dda0c6d02518f56ac74730f1e83e27e23506133 | 4d718292ec9f90444eeda13d18febb10757da894 | /Exercices/6/Q Sauvegarde.py | a8020c51bbc6b578e78ff326e200ba24eb36fbe6 | [] | no_license | rverschuren/Info | b40fb04a6260dacfc95d12e63c99abd82b140e06 | c9aa0bdc1b026c8ba8134b878b5fae7d49d75e19 | refs/heads/master | 2020-04-16T07:29:49.847812 | 2019-01-14T14:50:18 | 2019-01-14T14:50:18 | 165,389,281 | 1 | 2 | null | 2019-01-12T18:56:01 | 2019-01-12T13:12:46 | Python | UTF-8 | Python | false | false | 389 | py | #Wiaux Bastien
def save_data(filename, life, mana, position_x, position_y):
with open(filename,"w") as fichier:
fichier.write("{}\n{}\n{}\n{}".format(life, mana, position_x, position_y))
def load_data(filename):
with open(filename,'r') as fichier:
data = [int(i) for i in fichier.read().strip().split("\n")]
return data[0],data[1],data[2],data[3]
| [
"[email protected]"
] | |
e9fe621b0279a36d6e766bad4eb8aebbfc560b6d | 3f01eb21ce140e6e8d6e9f6c037a0ed3acfd0e1b | /home/context_processors.py | fa9f1a4cf3ebef508c48420baf109b26c74ba6b1 | [
"MIT"
] | permissive | manushah17/Capstone_2019 | 01d45e3d8f925dac88c1911d853ec1b8762d5b1f | 381094fc778906810e13d7611bfdb2c74cac326e | refs/heads/master | 2022-12-16T21:08:29.385969 | 2019-09-07T15:08:42 | 2019-09-07T15:08:42 | 206,984,224 | 0 | 0 | MIT | 2022-12-08T01:22:56 | 2019-09-07T15:04:22 | HTML | UTF-8 | Python | false | false | 142 | py | from django.conf import settings
def global_settings(request):
return {
'GOOGLE_MAPS_API_KEY': settings.GOOGLE_MAPS_API_KEY
} | [
"[email protected]"
] | |
f413f0f6bb08ff5c76498ec2113b6004d38abe5c | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/recent/FromPropertyDescriptor.spec | 900befebba85c916d2b96653e1366f4b02b2d2c9 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 1,107 | spec | 1. If _Desc_ is *undefined*, return *undefined*.
1. Let _obj_ be ! OrdinaryObjectCreate(%Object.prototype%).
1. Assert: _obj_ is an extensible ordinary object with no own properties.
1. If _Desc_ has a [[Value]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"value"*, _Desc_.[[Value]]).
1. If _Desc_ has a [[Writable]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"writable"*, _Desc_.[[Writable]]).
1. If _Desc_ has a [[Get]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"get"*, _Desc_.[[Get]]).
1. If _Desc_ has a [[Set]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"set"*, _Desc_.[[Set]]).
1. If _Desc_ has an [[Enumerable]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"enumerable"*, _Desc_.[[Enumerable]]).
1. If _Desc_ has a [[Configurable]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"configurable"*, _Desc_.[[Configurable]]).
1. Return _obj_. | [
"[email protected]"
] | |
cefbf59aba6168247c1a7dd09e5d28cd50a6b679 | 941cbcc815da9927c16291fd0cf341fdf26d4b4b | /Web網頁框架/框架(Django)/200502_cookie&session/mysite/app01/views.py | bea45499ae8ecefde206c362d44a68fecc8ee90c | [] | no_license | narru888/PythonWork-py37- | 27de004157efdf42972f66b20872e17de8bc676c | f9cb1670fb84b9eb8aaaf7cd5cf9139ab4ef4053 | refs/heads/master | 2022-12-09T04:58:06.068302 | 2020-09-23T09:47:40 | 2020-09-23T09:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | from django.shortcuts import render, redirect
def login(request):
print('COOKIES', request.COOKIES)
print('SESSION', request.session)
if request.method == 'POST':
name = request.POST.get('user')
pwd = request.POST.get('pwd')
if name == 'sb' and pwd == '123':
# 純COOKIE(不安全)
# ret = redirect('/index/')
# # set_cookie(key, values, 有效存在時間(s))
# ret.set_cookie('data', {'user': name, 'pwd': pwd}, max_age=5) # cookie加入信息
# return ret
# COOKIE & SESSION
request.session['is_login'] = True
request.session['user'] = name
# request.session.set_expiry(value)
# 如果value是個整數,session會在些秒數後失效。
# 如果value是個datatime或timedelta,session就會在這個時間後失效。
# 如果value是0, 用戶關閉瀏覽器session就會失效。
# 如果value是None, session會依賴全局session失效策略。
request.session.set_expiry(10)
return redirect('/index/')
return render(request, 'login.html')
def index(request):
# 純COOKIE(不安全)
# if request.COOKIES.get('data', None): # 沒有抓到回傳None
# user = request.COOKIES.get('data')
# return render(request, 'index.html', locals())
# COOKIE & SESSION
if request.session.get('is_login', None):
user = request.session.get('user')
return render(request, 'index.html', locals())
else:
return redirect('/login/')
| [
"[email protected]"
] | |
218116f516a12b14cfa3aac8e28571b20389bf31 | 019b885fb971359524943730af2d6b67e6d322d5 | /build/lib/presalytics_story/api/default_api.py | 945c5dc3a25429f711f782303f7797e3bc6ad453 | [
"MIT"
] | permissive | presalytics/story-python-client | ab9ce85e680dad2ceb70832935cc03318b6f0b3f | 48ac7830b85d65b94a9f6bbfc0c7ee8344327084 | refs/heads/master | 2020-08-04T23:22:34.877485 | 2019-12-27T22:54:30 | 2019-12-27T22:54:30 | 212,312,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81,410 | py | # coding: utf-8
"""
Communcations
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from presalytics_story.api_client import ApiClient
from presalytics_story.exceptions import (
ApiTypeError,
ApiValueError
)
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def story_get(self, **kwargs): # noqa: E501
"""story_get # noqa: E501
Returns a list of stories for this user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Story]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_get_with_http_info(**kwargs) # noqa: E501
def story_get_with_http_info(self, **kwargs): # noqa: E501
"""story_get # noqa: E501
Returns a list of stories for this user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Story], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Story]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_get(self, id, **kwargs): # noqa: E501
"""story_id_collaborators_get # noqa: E501
Returns a list of that collaborators on the story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[StoryCollaborator]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_get_with_http_info(id, **kwargs) # noqa: E501
def story_id_collaborators_get_with_http_info(self, id, **kwargs): # noqa: E501
"""story_id_collaborators_get # noqa: E501
Returns a list of that collaborators on the story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[StoryCollaborator], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[StoryCollaborator]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_post(self, id, unknown_base_type, **kwargs): # noqa: E501
"""story_id_collaborators_post # noqa: E501
Add a colloborator to this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_post(id, unknown_base_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param UNKNOWN_BASE_TYPE unknown_base_type: Collaborator user id and permission type (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: StoryCollaborator
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_post_with_http_info(id, unknown_base_type, **kwargs) # noqa: E501
def story_id_collaborators_post_with_http_info(self, id, unknown_base_type, **kwargs): # noqa: E501
"""story_id_collaborators_post # noqa: E501
Add a colloborator to this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_post_with_http_info(id, unknown_base_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param UNKNOWN_BASE_TYPE unknown_base_type: Collaborator user id and permission type (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(StoryCollaborator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'unknown_base_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_post`") # noqa: E501
# verify the required parameter 'unknown_base_type' is set
if ('unknown_base_type' not in local_var_params or
local_var_params['unknown_base_type'] is None):
raise ApiValueError("Missing the required parameter `unknown_base_type` when calling `story_id_collaborators_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'unknown_base_type' in local_var_params:
body_params = local_var_params['unknown_base_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoryCollaborator', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_delete(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_delete # noqa: E501
Remove a collaborator from this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_delete(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_delete_with_http_info(id, story_collaborator_userid, **kwargs) # noqa: E501
def story_id_collaborators_userid_delete_with_http_info(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_delete # noqa: E501
Remove a collaborator from this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_delete_with_http_info(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_delete`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/{story_collaborator_userid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_get(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_get # noqa: E501
Get a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_get(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: StoryCollaborator
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_get_with_http_info(id, story_collaborator_userid, **kwargs) # noqa: E501
def story_id_collaborators_userid_get_with_http_info(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_get # noqa: E501
Get a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_get_with_http_info(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(StoryCollaborator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_get`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/{story_collaborator_userid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoryCollaborator', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_permissiontype_get(self, id, story_collaborator_userid, permissiontype, **kwargs): # noqa: E501
"""story_id_collaborators_userid_permissiontype_get # noqa: E501
Returns a status code response whether a user a has permission 204 = Granted, 403 = forbidden # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_permissiontype_get(id, story_collaborator_userid, permissiontype, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param str permissiontype: the type of permission requested. can be a permission_type object name (e.g., owner, editor, create, viewer, admin) or a permission type field (e.g., can_edit, can_view, can_add_collaborators, can_delete) (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_permissiontype_get_with_http_info(id, story_collaborator_userid, permissiontype, **kwargs) # noqa: E501
def story_id_collaborators_userid_permissiontype_get_with_http_info(self, id, story_collaborator_userid, permissiontype, **kwargs): # noqa: E501
"""story_id_collaborators_userid_permissiontype_get # noqa: E501
Returns a status code response whether a user a has permission 204 = Granted, 403 = forbidden # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_permissiontype_get_with_http_info(id, story_collaborator_userid, permissiontype, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param str permissiontype: the type of permission requested. can be a permission_type object name (e.g., owner, editor, create, viewer, admin) or a permission type field (e.g., can_edit, can_view, can_add_collaborators, can_delete) (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid', 'permissiontype'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_permissiontype_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_permissiontype_get`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_permissiontype_get`") # noqa: E501
# verify the required parameter 'permissiontype' is set
if ('permissiontype' not in local_var_params or
local_var_params['permissiontype'] is None):
raise ApiValueError("Missing the required parameter `permissiontype` when calling `story_id_collaborators_userid_permissiontype_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
if 'permissiontype' in local_var_params:
path_params['permissiontype'] = local_var_params['permissiontype'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/authorize/{story_collaborator_userid}/{permissiontype}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_put(self, id, story_collaborator_userid, story_collaborator, **kwargs): # noqa: E501
"""story_id_collaborators_userid_put # noqa: E501
Modify a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_put(id, story_collaborator_userid, story_collaborator, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param StoryCollaborator story_collaborator: Collaborator user id (presalytics userid) and permission type (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: StoryCollaborator
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_put_with_http_info(id, story_collaborator_userid, story_collaborator, **kwargs) # noqa: E501
def story_id_collaborators_userid_put_with_http_info(self, id, story_collaborator_userid, story_collaborator, **kwargs): # noqa: E501
"""story_id_collaborators_userid_put # noqa: E501
Modify a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_put_with_http_info(id, story_collaborator_userid, story_collaborator, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param StoryCollaborator story_collaborator: Collaborator user id (presalytics userid) and permission type (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(StoryCollaborator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid', 'story_collaborator'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_put`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_put`") # noqa: E501
# verify the required parameter 'story_collaborator' is set
if ('story_collaborator' not in local_var_params or
local_var_params['story_collaborator'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator` when calling `story_id_collaborators_userid_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'story_collaborator' in local_var_params:
body_params = local_var_params['story_collaborator']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/{story_collaborator_userid}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoryCollaborator', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_delete(self, id, **kwargs): # noqa: E501
"""Remove story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_delete_with_http_info(id, **kwargs) # noqa: E501
def story_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Remove story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_file_ooxmlautomationid_get(self, id, ooxml_automation_id, **kwargs): # noqa: E501
"""story_id_file_ooxmlautomationid_get # noqa: E501
Get updated story as open office xml file (e.g., .pptx, .docx, .xlsx) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_file_ooxmlautomationid_get(id, ooxml_automation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str ooxml_automation_id: the id of the ooxml_automation object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_file_ooxmlautomationid_get_with_http_info(id, ooxml_automation_id, **kwargs) # noqa: E501
def story_id_file_ooxmlautomationid_get_with_http_info(self, id, ooxml_automation_id, **kwargs): # noqa: E501
"""story_id_file_ooxmlautomationid_get # noqa: E501
Get updated story as open office xml file (e.g., .pptx, .docx, .xlsx) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_file_ooxmlautomationid_get_with_http_info(id, ooxml_automation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str ooxml_automation_id: the id of the ooxml_automation object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(file, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'ooxml_automation_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_file_ooxmlautomationid_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_file_ooxmlautomationid_get`") # noqa: E501
# verify the required parameter 'ooxml_automation_id' is set
if ('ooxml_automation_id' not in local_var_params or
local_var_params['ooxml_automation_id'] is None):
raise ApiValueError("Missing the required parameter `ooxml_automation_id` when calling `story_id_file_ooxmlautomationid_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'ooxml_automation_id' in local_var_params:
path_params['ooxml_automation_id'] = local_var_params['ooxml_automation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/file/{ooxml_automation_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_get(self, id, **kwargs): # noqa: E501
"""Returns story metadata, inlcuding json object with story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_get_with_http_info(id, **kwargs) # noqa: E501
def story_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns story metadata, inlcuding json object with story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_put(self, id, story, **kwargs): # noqa: E501
"""Update story metadata, including story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_put(id, story, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param Story story: The updated story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_put_with_http_info(id, story, **kwargs) # noqa: E501
def story_id_put_with_http_info(self, id, story, **kwargs): # noqa: E501
"""Update story metadata, including story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_put_with_http_info(id, story, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param Story story: The updated story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_put`") # noqa: E501
# verify the required parameter 'story' is set
if ('story' not in local_var_params or
local_var_params['story'] is None):
raise ApiValueError("Missing the required parameter `story` when calling `story_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'story' in local_var_params:
body_params = local_var_params['story']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_permission_types_get(self, **kwargs): # noqa: E501
"""story_permission_types_get # noqa: E501
Returns a list of possible user permission types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_permission_types_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[PermissionType]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_permission_types_get_with_http_info(**kwargs) # noqa: E501
def story_permission_types_get_with_http_info(self, **kwargs): # noqa: E501
"""story_permission_types_get # noqa: E501
Returns a list of possible user permission types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_permission_types_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[PermissionType], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_permission_types_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/permission_types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PermissionType]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_post(self, outline, **kwargs): # noqa: E501
"""Upload new story to presalytics api # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post(outline, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Outline outline: A story outline json object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_post_with_http_info(outline, **kwargs) # noqa: E501
def story_post_with_http_info(self, outline, **kwargs): # noqa: E501
"""Upload new story to presalytics api # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post_with_http_info(outline, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Outline outline: A story outline json object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['outline'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'outline' is set
if ('outline' not in local_var_params or
local_var_params['outline'] is None):
raise ApiValueError("Missing the required parameter `outline` when calling `story_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'outline' in local_var_params:
body_params = local_var_params['outline']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_post_file(self, **kwargs): # noqa: E501
"""Upload new story to presalytics api via an Open Office Xml file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post_file(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[file] file:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_post_file_with_http_info(**kwargs) # noqa: E501
def story_post_file_with_http_info(self, **kwargs): # noqa: E501
"""Upload new story to presalytics api via an Open Office Xml file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post_file_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[file] file:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_post_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
collection_formats['file'] = 'csv' # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/file', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_teller_id_get(self, id, **kwargs): # noqa: E501
"""story_teller_id_get # noqa: E501
Render story as reveal.js web document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_teller_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_teller_id_get_with_http_info(id, **kwargs) # noqa: E501
def story_teller_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""story_teller_id_get # noqa: E501
Render story as reveal.js web document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_teller_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_teller_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_teller_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/html', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/teller/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
53efc7d0e74f2edd93a30082008734d0a0524e74 | 72c6e91223602b29ae34499f2813d5197dcf5f00 | /p15_three_sum.py | 59dc6ae80fd912eef3f7c89fe42a18ff5dacc7e2 | [] | no_license | koyo922/leetcode | d730d6aab6ee368b75ca59bce8492c548e7e2d6d | e8e561adea5e92cd00e374b613ea52a64be4e766 | refs/heads/master | 2020-08-02T17:32:59.512106 | 2019-12-02T11:04:53 | 2019-12-02T11:04:53 | 211,447,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,694 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/3sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
Authors: qianweishuo<[email protected]>
Date: 2019/7/4 上午8:46
"""
class Solution(object):
def bruteforce(self, nums):
res = set()
nums.sort() # 有利于减少重复,减轻判重压力
for i, x in enumerate(nums):
for j, y in enumerate(nums[i + 1:], start=i + 1): # 注意start
for z in nums[j + 1:]:
if x + y + z == 0:
res.add((x, y, z))
return list(res)
def using_set(self, nums):
if len(nums) < 3: # 边界条件
return []
res = set()
nums.sort() # 先排序,减轻判重压力
for i, v in enumerate(nums[:-2]):
if i - 1 >= 0 and nums[i - 1] == v: # 如果跟左边值相等,避免重复解
continue
trap = set() # 类似two-sum,不过返回的是值而非下标;所以能用set代替dict
for x in nums[i + 1:]:
if x in trap:
res.add((v, -v - x, x))
else:
trap.add(-v - x)
return res
def using_pinch(self, nums):
res = set()
nums.sort()
for i, v in enumerate(nums[:-2]):
if i - 1 >= 0 and nums[i - 1] == v: # 重复值跳过
continue
l, r = i + 1, len(nums) - 1 # 开始就地两边夹逼,避免了额外空间
while l < r:
s = v + nums[l] + nums[r]
if s < 0:
l += 1
elif s > 0:
r -= 1
else:
res.add((v, nums[l], nums[r]))
# 如果res是list,则此处要手动去重 while l<r and nums[l+1]==nums[l]: l+=1
l += 1
r -= 1
return res
def threeSum(self, nums):
# return self.bruteforce(nums)
# return self.using_set(nums)
return self.using_pinch(nums)
if __name__ == '__main__':
print(Solution().threeSum([-1, 0, 1, 2, -1, -4]))
| [
"[email protected]"
] | |
f26e81267ca6aa0a3f6dce527dfefd1185dacee0 | 3da6b8a0c049a403374e787149d9523012a1f0fc | /Coder_Old/pycharm_daima/爬虫大师班/10-关系型数据库/数据库操作.py | 8d6f74f0337cf0e51be57211571c88b065331126 | [] | no_license | AndersonHJB/PyCharm_Coder | d65250d943e84b523f022f65ef74b13e7c5bc348 | 32f2866f68cc3a391795247d6aba69a7156e6196 | refs/heads/master | 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 | Python | UTF-8 | Python | false | false | 7,139 | py | # !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:AI悦创 @DateTime :2020/2/16 16:02 @Function :功能 Development_tool :PyCharm
# code is far away from bugs with the god animal protecting
# I love animals. They taste delicious.
from sqlalchemy import create_engine
from sqlalchemy import Table,Column,String,Integer,MetaData
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() # 创建基类
engine = create_engine(
"mysql+pymysql://root:[email protected]:3306/test",
max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
pool_size = 10, # 连接池大小
echo = True, # 调试信息展示
)
metadata = MetaData()
class Host(Base):
# 表名为 host
__tablename__ = 'host'
# 表结构
# primary_key 等于主键
# unique 唯一
# nullable 可为空的
id = Column(Integer, primary_key=True, autoincrement=True)
hostname = Column(String(64), unique=True, nullable=False)
ip_addr = Column(String(128),unique=True, nullable=False)
port = Column(Integer, default=22)
# @classmethod
# def filter(cls, param):
# pass
# Base.metadata.create_all(engine) # 创建表
# res = sess.query(Host).filter_by(id=1).all()
if __name__ == '__main__':
Session = sessionmaker(bind = engine)
sess = Session()
res = sess.query(Host).filter(Host.id==1)
for r in res:
print(r)
print(r.hostname)
print(r.ip_addr)
sess.commit()
h = Host(hostname='test1', ip_addr='127.0.0.1')
h2 = Host(hostname='test2', ip_addr='192.168.0.1', port=8080)
h3 = Host(hostname='test3', ip_addr='192.170.1.0', port=3030)
# sess.query(Host).filter(Host.id==1).update({'port':9999})
# sess.commit()
# 循环加入多个数据
# if __name__ == '__main__':
# Session = sessionmaker(bind = engine)
# sess = Session()
# data_list = ['AI悦创', 'aiyc', 12, 1314.520, '黄']
# for index, data in enumerate(data_list):
# h = Host(hostname=data,ip_addr=index)
# # h = Host(hostname='{}'.format(data), ip_addr='{}'.format(index))
# sess.add(h)
# sess.commit()
# user = Table('mybank', metadata,
# Column('id', Integer, primary_key=True, autoincrement=True),
# Column('name', String(10))
# )
# connect = engine.connect()
# connect.execute(user.delete().where(user.c.id==1))
# connect.close()
# # res = connect.execute(select([user.c.name,]))
# # # res = connect.execute(select([user.c.id==1,user.c.id==2]))
# # print(res.fetchall())
# # connect.close()
# # metadata.create_all(engine)
# # conn = engine.connect()
# # conn.execute(user.update().where(user.c.id==1).values(name='Python'))
# # conn.close()
# # res = engine.execute('select * from user2020')
# # print(res)
# # for i in res:
# # print(i)
# # print(i[0])
# # print(i[1])
# # print(type(i))
# # engine.execute("insert into user2020 (City, name) values ('AIYC', 'huang')")
# # 同时添加多个数据,字段名称的位置可以不按数据库表中的来写
#
# # engine.execute("update user2020 set id=5, name='Python' where id=1")
# # engine.execute("update user2020 set name='Python',id=10 where id=5")
#
#
#
# # engine.execute('update 操作对象(表) set (更新数据操作) 所要修改的数据在哪里(where)')
#
# # engine.execute("insert into user2020 (name) values ('AIYC')")
#
# # metadata = MetaData() # 取得元数据,介绍数据库
# # data = Table('user', metadata,
# # Column('id', Integer, primary_key = True, autoincrement=True),
# # Column('name', String(10)),
# # Column('City', String(255))
# # )
# # # metadata.create_all(engine)
# # connect = engine.connect()
# # # connect.execute(data.update(data.c.id==1).values(City="Beijing",name="AI悦创"))
# # # connect.execute(select([]))
# # connect.close()
# from sqlalchemy import create_engine,MetaData,Table,engine
# from sqlalchemy import Column,String,Integer
#
#
# engine = create_engine(
# "mysql+pymysql://root:[email protected]:3306/test",# (里面的 root 要填写你的密码),注意:mysql+pymysql 之间不要加空格
# # "mysql + pymysql://root:root@localhost/test",
# max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
# pool_size = 10, # 连接池大小
# echo = True, # 调试信息展示
# )
#
# metadata = MetaData() # 获得元数据,介绍数据库
#
# # 定义表
# user_table = Table('user_table', metadata,
# Column("id", Integer, primary_key=True,autoincrement=True),
# Column("教学表",String(10)))
# metadata.create_all(engine) # 创建表
#
# # 修改表中数据
# conn = engine.connect() # 获取一个连接
# # 增加数据
# conn.execute(user_table.insert(),{"教学表":"hjb_two"})
# # # 更新数据-更新全部数据
# conn.execute(user_table.update(),{"教学表":"AI悦创"})
# # # 更新指定数据
# conn.execute(user_table.update().where(user_table.c.id==1).values(id=1000))
# conn.execute(user_table.update().where(user_table.c.id==2).values(教学表='AIYC'))
# # # where(user.c.id==2) 查找的位置,或者说要修改的位置
# conn.close()
#
# from sqlalchemy import create_engine,MetaData,Table,engine
# from sqlalchemy import Column,String,Integer
#
#
# engine = create_engine(
# "mysql+pymysql://root:[email protected]:3306/test",
# max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
# pool_size = 10, # 连接池大小
# echo = True, # 调试信息展示
# )
#
# metadata = MetaData() # 获得元数据,介绍数据库
#
# # 定义表
# user = Table('mybank', metadata,
# Column("id", Integer, primary_key=True,autoincrement=True),
# Column("教学表",String(10)))
# metadata.create_all(engine) # 创建表
# # ----------------------------插入数据----------------------------------
# # 修改表中数据
# conn = engine.connect() # 获取一个连接
# # 增加数据
# conn.execute(user.insert(),{"教学表":"hjb_two"})
# conn.close()
# # ----------------------------更新数据----------------------------------
# # # 更新数据-更新全部数据
# conn.execute(user.update(),{"教学表":"AI悦创"})
# # # 更新指定数据
# conn.execute(user.update().where(user.c.id==1).values(id=1000))
# conn.execute(user.update().where(user.c.id==2).values(教学表='AIYC'))
# # 同时更新多个数据
# conn.execute(user.update().where(user.c.id==1).values(City="Beijing",name="AI悦创"))
# # # where(user.c.id==2) 查找的位置,或者说要修改的位置
# conn.close()
# # ----------------------------查询数据----------------------------------
# # 需要导入:select
# from sqlalchemy import select
# conn = engine.execute()
# res = conn.execute(select([user.c.name,]))
# # res = conn.execute(select([user.c.id==1, user.c.id==2]))
# print(res.fetchall())
# conn.close()
# # ----------------------------删除数据----------------------------------
# conn = engine.execute()
# conn.execute(user.delete().where(user.c.id==1))
# conn.close() | [
"[email protected]"
] | |
5fa70f6b2467740079445fbd4bd24d17e263db56 | e7bb432a333c74f90f7bd95f6cd6c3647552d8ff | /uniq.py | 1b6a247631f40f107df89ecf6b7f86667c2e3097 | [] | no_license | ponyatov/world | 6e44dcd7f0aa3821232eaf5c00f543f2430d80eb | 7705422a444419d04a9a6c13826f4fda09dcfc37 | refs/heads/master | 2021-01-13T06:55:54.261074 | 2017-02-08T15:13:23 | 2017-02-08T15:13:23 | 81,317,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | registry = {}
class Data:
def __repr__(self): return str(self.val)
def __init__(self,V):
# lookup for existant object
if V in registry: self = registry[V]
else:
self.val = V
registry[V] = self # register created object
print registry,Data(0)
print registry,Data(0)
print registry,Data(1)
print registry,Data(1)
| [
"[email protected]"
] | |
493df5864714a188c03125eef52ecfc3145f1d99 | 0fb6f0faa7e6d9b557ec43201f0edb2056ac08d6 | /python/baekJoon/arithOper/theFourFundamentalArithOper.py | 52544072db4d8ed931a9f2381ec886fe55a3c231 | [] | no_license | macoto35/Algorithms_Kata | a0bb167668aa4e8b678f9c5f47fc9142ade67553 | f92aec5674dc67be024f4ad04d40a85d09ef7b1e | refs/heads/master | 2021-07-07T13:21:16.805125 | 2020-07-21T01:25:34 | 2020-07-21T01:25:34 | 138,005,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | a, b = map(int, input().split()); print(a+b, a-b, a*b, a//b, a%b, sep='\n')
| [
"[email protected]"
] | |
7dd104b89946246aa48bfd8fc5c1cdb54c5b4ff1 | ee441564d68e45fa8df6828d6fc724dce4216028 | /test_R.py | da5c16d5ead939b36d0bbd69e14ae17ca4beae1f | [] | no_license | abandonsea/Revisiting-Feature-Fusion-for-RGB-T-Salient-Object-Detection | 3bfe78cbb181d17e177404c30d65f15b0d675098 | 79f6c2234c87b8a6a0237a8d98aeb4f2be1fc0fe | refs/heads/master | 2023-08-27T08:33:06.882790 | 2021-10-21T15:41:34 | 2021-10-21T15:41:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | import cv2
import numpy as np
import T_train
import os
import sys
import tensorflow as tf
import time
import vgg16
import math
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
#img_t_mean=[101.515, 78.315, 140.606]
#img_t_mean=[85.971, 56.608, 151.944]
#img_t_mean=[127.493, 126.314, 127.453] #small
def load_img_list(dataset):
if dataset == 'MSRA-B':
path = 'dataset/MSRA-B/image'
elif dataset == 'DUT-OMRON':
path = 'dataset/DUT-OMRON/DUT-OMRON-image'
imgs = os.listdir(path)
return path, imgs
def image_entropy(input):
tmp = []
for i in range(256):
tmp.append(0)
val = 0
k = 0
res = 0
#image = input.convert('L')
img = np.array(input)
for i in range(len(img)):
for j in range(len(img[i])):
val = img[i][j]
tmp[val] = float(tmp[val] + 1)
k = float(k + 1)
for i in range(len(tmp)):
tmp[i] = float(tmp[i] / k)
for i in range(len(tmp)):
if(tmp[i] == 0):
res = res
else:
res = float(res - tmp[i] * (math.log(tmp[i]) / math.log(2.0)))
res_ = res / 8.0
return res_
if __name__ == "__main__":
model = T_train.Model()
model.build_model()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
img_size = T_train.img_size
label_size = T_train.label_size
ckpt = tf.train.get_checkpoint_state('Model-thermal/')
saver = tf.train.Saver()
saver.restore(sess, 'Model-thermal/model.ckpt-14')
datasets = ['MSRA-B', 'DUT-OMRON']
if not os.path.exists('Result'):
os.mkdir('Result')
#for dataset in datasets:
#path, imgs = load_img_list(dataset)
#save_dir = 'Result/' + dataset
#if not os.path.exists(save_dir):
#os.mkdir(save_dir)
#save_dir = 'Result/' + dataset + '/NLDF_'
#if not os.path.exists(save_dir):
#os.mkdir(save_dir)
imgs_r = os.listdir('DATA/thermal-test')
for f_img_r in imgs_r:
img_r = cv2.imread(os.path.join('DATA/thermal-test', f_img_r))
img_name, ext = os.path.splitext(f_img_r)
if img_r is not None:
#ori_img = img.copy()
img_shape = img_r.shape
img_r = cv2.resize(img_r, (img_size, img_size)) #- R_train.img_r_mean
img_r = img_r.astype(np.float32) / 255.
img_r = img_r.reshape((1, img_size, img_size, 3))
start_time = time.time()
result = sess.run(model.Prob,
feed_dict={model.input_holder_t: img_r})
print("--- %s seconds ---" % (time.time() - start_time))
result = np.reshape(result, (label_size, label_size, 2))
result = result[:, :, 0]
result = cv2.resize(np.squeeze(result), (img_shape[1], img_shape[0]))
save_name = os.path.join('Result', img_name+'.png')
cv2.imwrite(save_name, (result*255).astype(np.uint8))
sess.close()
| [
"[email protected]"
] | |
87fa53c091769800a069ce7ec1fbf0d80f7b3b39 | 0a83fcb80e0eaa4ff44bb8d9d14668d04a10164c | /collective/easyform/browser/actions.py | 99af8b22ba6a1aa6466a99d204ca42c662983210 | [] | no_license | quintagroup/collective.easyform | 97b268ca16512094979bcb88bdbbe053c521c908 | e2ef62451eae750310656725787068425ba6b3f4 | refs/heads/master | 2023-01-10T14:14:15.020818 | 2016-06-09T19:23:45 | 2016-06-09T19:23:45 | 17,138,883 | 2 | 15 | null | 2022-12-27T14:53:37 | 2014-02-24T14:36:28 | Python | UTF-8 | Python | false | false | 11,034 | py | # -*- coding: utf-8 -*-
from Acquisition import aq_inner
from Acquisition import aq_parent
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from ZPublisher.BaseRequest import DefaultPublishTraverse
from collective.easyform import easyformMessageFactory as _
from collective.easyform.api import get_actions
from collective.easyform.api import get_context
from collective.easyform.api import get_fields
from collective.easyform.browser.fields import AjaxSaveHandler
from collective.easyform.interfaces import IActionEditForm
from collective.easyform.interfaces import IActionFactory
from collective.easyform.interfaces import IEasyFormActionContext
from collective.easyform.interfaces import IEasyFormActionsContext
from collective.easyform.interfaces import IEasyFormActionsEditorExtender
from collective.easyform.interfaces import IExtraData
from collective.easyform.interfaces import INewAction
from collective.easyform.interfaces import ISaveData
from collective.easyform.interfaces import ISavedDataFormWrapper
from plone.autoform.form import AutoExtensibleForm
from plone.memoize.instance import memoize
from plone.schemaeditor.browser.field.traversal import FieldContext
from plone.schemaeditor.browser.schema.add_field import FieldAddForm
from plone.schemaeditor.browser.schema.listing import SchemaListing
from plone.schemaeditor.browser.schema.listing import SchemaListingPage
from plone.schemaeditor.browser.schema.traversal import SchemaContext
from plone.schemaeditor.interfaces import IFieldEditFormSchema
from plone.schemaeditor.utils import SchemaModifiedEvent
from plone.z3cform import layout
from plone.z3cform.crud import crud
from plone.z3cform.interfaces import IDeferSecurityCheck
from plone.z3cform.traversal import WrapperWidgetTraversal
from z3c.form import button
from z3c.form import field
from z3c.form import form
from zope.cachedescriptors.property import Lazy as lazy_property
from zope.component import adapts
from zope.component import getAdapters
from zope.component import queryUtility
from zope.event import notify
from zope.i18nmessageid import MessageFactory
from zope.interface import alsoProvides
from zope.interface import implements
from zope.interface import noLongerProvides
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.schema import getFieldsInOrder
try:
import plone.resourceeditor
plone.resourceeditor # avoid PEP 8 warning
HAVE_RESOURCE_EDITOR = True
except ImportError:
HAVE_RESOURCE_EDITOR = False
PMF = MessageFactory('plone')
class SavedDataTraversal(WrapperWidgetTraversal):
adapts(ISavedDataFormWrapper, IBrowserRequest)
def traverse(self, name, ignored):
form = self._prepareForm()
alsoProvides(self.request, IDeferSecurityCheck)
form.update()
noLongerProvides(self.request, IDeferSecurityCheck)
for subform in form.subforms:
if not hasattr(subform, 'subforms'):
continue
for subsubform in subform.subforms:
if not name.startswith(subsubform.prefix):
continue
for id_ in subsubform.widgets:
if subsubform.prefix + subsubform.widgets.prefix + id_ == name:
target = self._form_traverse(subsubform, id_)
target.__parent__ = aq_inner(self.context)
return target
return super(SavedDataTraversal, self).traverse(name, ignored)
class SavedDataView(BrowserView):
def items(self):
return [
(name, action.__doc__)
for name, action in getFieldsInOrder(get_actions(self.context))
if ISaveData.providedBy(action)
]
class DataWrapper(dict):
def __init__(self, sid, data, parent):
self.__sid__ = sid
self.update(data)
self.__parent__ = parent
class SavedDataForm(crud.CrudForm):
template = ViewPageTemplateFile('saveddata_form.pt')
addform_factory = crud.NullForm
@property
def field(self):
return self.context.field
@property
def name(self):
return self.field.__name__
@property
def get_fields(self):
return get_fields(get_context(self.field))
def description(self):
return _(u"${items} input(s) saved", mapping={'items': self.field.itemsSaved()})
@property
def update_schema(self):
fields = field.Fields(self.get_fields)
showFields = getattr(self.field, 'showFields', [])
if showFields:
fields = fields.select(*showFields)
return fields
@property
def view_schema(self):
ExtraData = self.field.ExtraData
if ExtraData:
return field.Fields(IExtraData).select(*ExtraData)
def get_items(self):
return [
(key, DataWrapper(key, value, self.context))
for key, value in self.field.getSavedFormInputItems()
]
# def add(self, data):
# storage = self.context._inputStorage
def before_update(self, item, data):
id_ = item.__sid__
item.update(data)
self.field.setDataRow(id_, item.copy())
def remove(self, (id, item)):
self.field.delDataRow(id)
@button.buttonAndHandler(PMF(u'Download'), name='download')
def handleDownload(self, action):
self.field.download(self.request.response)
@button.buttonAndHandler(_(u'Clear all'), name='clearall')
def handleClearAll(self, action):
self.field.clearSavedFormInput()
class SavedDataFormWrapper(layout.FormWrapper):
implements(ISavedDataFormWrapper)
ActionSavedDataView = layout.wrap_form(
SavedDataForm, __wrapper_class=SavedDataFormWrapper)
class EasyFormActionContext(FieldContext):
""" wrapper for published zope 3 schema fields
"""
implements(IEasyFormActionContext)
def publishTraverse(self, request, name):
""" It's not valid to traverse to anything below a field context.
"""
# hack to make inline validation work
# (plone.app.z3cform doesn't know the form is the default view)
if name == self.__name__:
return ActionEditView(self, request).__of__(self)
return DefaultPublishTraverse(self, request).publishTraverse(request, name)
class EasyFormActionsView(SchemaContext):
implements(IEasyFormActionsContext)
schema = None
def __init__(self, context, request):
self.schema = get_actions(context)
super(EasyFormActionsView, self).__init__(
self.schema,
request,
name='actions'
)
def publishTraverse(self, request, name):
""" Look up the field whose name matches the next URL path element, and wrap it.
"""
try:
return EasyFormActionContext(self.schema[name], self.request).__of__(self)
except KeyError:
return DefaultPublishTraverse(self, request).publishTraverse(request, name)
def browserDefault(self, request):
""" If not traversing through the schema to a field, show the SchemaListingPage.
"""
return self, ('@@listing',)
class EasyFormActionsListing(SchemaListing):
template = ViewPageTemplateFile('actions_listing.pt')
@memoize
def _field_factory(self, field):
field_identifier = u'{0}.{1}'.format(
field.__module__, field.__class__.__name__)
return queryUtility(IActionFactory, name=field_identifier)
@button.buttonAndHandler(PMF(u'Save'))
def handleSaveDefaults(self, action):
data, errors = self.extractData()
if errors:
self.status = self.formErrorsMessage
return
for fname, value in data.items():
self.context.schema[fname].required = value
notify(SchemaModifiedEvent(self.context))
# update widgets to take the new defaults into account
self.updateWidgets()
self.request.response.redirect(self.context.absolute_url())
def handleModelEdit(self, action):
self.request.response.redirect('@@modeleditor')
class EasyFormActionsListingPage(SchemaListingPage):
""" Form wrapper so we can get a form with layout.
We define an explicit subclass rather than using the wrap_form method
from plone.z3cform.layout so that we can inject the schema name into
the form label.
"""
form = EasyFormActionsListing
index = ViewPageTemplateFile('model_listing.pt')
class ActionAddForm(FieldAddForm):
fields = field.Fields(INewAction)
label = _('Add new action')
ActionAddFormPage = layout.wrap_form(ActionAddForm)
class ActionEditForm(AutoExtensibleForm, form.EditForm):
implements(IActionEditForm)
def __init__(self, context, request):
super(form.EditForm, self).__init__(context, request)
self.field = context.field
def getContent(self):
return self.field
@lazy_property
def schema(self):
return IFieldEditFormSchema(self.field)
@lazy_property
def additionalSchemata(self):
schema_context = self.context.aq_parent
return [v for k, v in getAdapters((schema_context, self.field), IEasyFormActionsEditorExtender)]
@button.buttonAndHandler(PMF(u'Save'), name='save')
def handleSave(self, action):
data, errors = self.extractData()
if errors:
self.status = self.formErrorsMessage
return
changes = self.applyChanges(data)
if changes:
self.status = self.successMessage
else:
self.status = self.noChangesMessage
notify(SchemaModifiedEvent(self.context.aq_parent))
self.redirectToParent()
@button.buttonAndHandler(PMF(u'Cancel'), name='cancel')
def handleCancel(self, action):
self.redirectToParent()
def redirectToParent(self):
parent = aq_parent(aq_inner(self.context))
url = parent.absolute_url()
self.request.response.redirect(url)
class ActionEditView(layout.FormWrapper):
form = ActionEditForm
def __init__(self, context, request):
super(ActionEditView, self).__init__(context, request)
self.field = context.field
@lazy_property
def label(self):
return _(u"Edit Action '${fieldname}'", mapping={'fieldname': self.field.__name__})
if HAVE_RESOURCE_EDITOR:
but = button.Button("modeleditor", title=_(u'Edit XML Actions Model'))
EasyFormActionsListing.buttons += button.Buttons(but)
handler = button.Handler(but, EasyFormActionsListing.handleModelEdit)
EasyFormActionsListing.handlers.addHandler(but, handler)
class ModelEditorView(BrowserView):
""" editor view """
title = _(u'Edit XML Actions Model')
def modelSource(self):
return self.context.aq_parent.actions_model
class AjaxSaveHandler(AjaxSaveHandler):
""" handle AJAX save posts """
def save(self, source):
self.context.aq_parent.actions_model = source
| [
"[email protected]"
] | |
a63262e1c0f892e91fd74142a07a6b0b86a245a3 | 524591f2c4f760bc01c12fea3061833847a4ff9a | /arm/usr/lib/python2.7/dist-packages/rosdep2/main.py | 86d5d764afde106c322a2bd226ff82c6f7fead2e | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | Roboy/roboy_plexus | 6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e | 1f3039edd24c059459563cb81d194326fe824905 | refs/heads/roboy3 | 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 | BSD-3-Clause | 2022-10-22T13:43:45 | 2017-08-28T16:53:52 | C++ | UTF-8 | Python | false | false | 34,688 | py | #!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/[email protected]
"""
Command-line interface to rosdep library
"""
from __future__ import print_function
import os
import sys
import traceback
try:
from urllib.error import URLError
from urllib.request import build_opener
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPHandler
from urllib.request import install_opener
from urllib.request import ProxyHandler
except ImportError:
from urllib2 import build_opener
from urllib2 import HTTPBasicAuthHandler
from urllib2 import HTTPHandler
from urllib2 import install_opener
from urllib2 import ProxyHandler
from urllib2 import URLError
import warnings
from optparse import OptionParser
import rospkg
from . import create_default_installer_context, get_default_installer
from . import __version__
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, InvalidData, CachePermissionError
from .installers import RosdepInstaller
from .lookup import RosdepLookup, ResolutionError
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import update_sources_list, get_sources_cache_dir,\
download_default_sources_list, SourcesListLoader,CACHE_INDEX,\
get_sources_list_dir, get_default_sources_list_file,\
DEFAULT_SOURCES_LIST_URL
from .rosdistrohelper import PreRep137Warning
from .catkin_packages import find_catkin_packages_in
from .catkin_packages import set_workspace_packages
from .catkin_packages import get_workspace_packages
class UsageError(Exception):
pass
_usage = """usage: rosdep [options] <command> <args>
Commands:
rosdep check <stacks-and-packages>...
check if the dependencies of package(s) have been met.
rosdep install <stacks-and-packages>...
generate a scripts script and then execute it.
rosdep db
generate the dependency database and print it to the console.
rosdep init
initialize rosdep sources in /etc/ros/rosdep. May require sudo.
rosdep keys <stacks-and-packages>...
list the rosdep keys that the packages depend on.
rosdep resolve <rosdeps>
resolve <rosdeps> to system dependencies
rosdep update
update the local rosdep database based on the rosdep sources.
rosdep what-needs <rosdeps>...
print a list of packages that declare a rosdep on (at least
one of) <rosdeps>
rosdep where-defined <rosdeps>...
print a list of yaml files that declare a rosdep on (at least
one of) <rosdeps>
rosdep fix-permissions
Recursively change the permissions of the user's ros home directory.
May require sudo. Can be useful to fix permissions after calling
"rosdep update" with sudo accidentally.
"""
def _get_default_RosdepLookup(options):
"""
Helper routine for converting command-line options into
appropriate RosdepLookup instance.
"""
os_override = convert_os_override_option(options.os_override)
sources_loader = SourcesListLoader.create_default(sources_cache_dir=options.sources_cache_dir,
os_override=os_override,
verbose=options.verbose)
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader)
lookup.verbose = options.verbose
return lookup
def rosdep_main(args=None):
if args is None:
args = sys.argv[1:]
try:
exit_code = _rosdep_main(args)
if exit_code not in [0, None]:
sys.exit(exit_code)
except rospkg.ResourceNotFound as e:
print("""
ERROR: Rosdep cannot find all required resources to answer your query
%s
"""%(error_to_human_readable(e)), file=sys.stderr)
sys.exit(1)
except UsageError as e:
print(_usage, file=sys.stderr)
print("ERROR: %s"%(str(e)), file=sys.stderr)
sys.exit(os.EX_USAGE)
except RosdepInternalError as e:
print("""
ERROR: Rosdep experienced an internal error.
Please go to the rosdep page [1] and file a bug report with the message below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
"""%(__version__, e.message), file=sys.stderr)
sys.exit(1)
except ResolutionError as e:
print("""
ERROR: %s
%s
"""%(e.args[0], e), file=sys.stderr)
sys.exit(1)
except CachePermissionError as e:
print(str(e))
print("Try running 'sudo rosdep fix-permissions'")
sys.exit(1)
except UnsupportedOs as e:
print("Unsupported OS: %s\nSupported OSes are [%s]"%(e.args[0], ', '.join(e.args[1])), file=sys.stderr)
sys.exit(1)
except Exception as e:
print("""
ERROR: Rosdep experienced an error: %s
Please go to the rosdep page [1] and file a bug report with the stack trace below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
"""%(e, __version__, traceback.format_exc()), file=sys.stderr)
sys.exit(1)
def check_for_sources_list_init(sources_cache_dir):
"""
Check to see if sources list and cache are present.
*sources_cache_dir* alone is enough to pass as the user has the
option of passing in a cache dir.
If check fails, tell user how to resolve and sys exit.
"""
commands = []
filename = os.path.join(sources_cache_dir, CACHE_INDEX)
if os.path.exists(filename):
return
else:
commands.append('rosdep update')
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
commands.insert(0, 'sudo rosdep init')
else:
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
commands.insert(0, 'sudo rosdep init')
if commands:
commands = '\n'.join([" %s"%c for c in commands])
print("""
ERROR: your rosdep installation has not been initialized yet. Please run:
%s
"""%(commands), file=sys.stderr)
sys.exit(1)
else:
return True
def key_list_to_dict(key_list):
"""
Convert a list of strings of the form 'foo:bar' to a dictionary.
Splits strings of the form 'foo:bar quux:quax' into separate entries.
"""
try:
key_list = [key for s in key_list for key in s.split(' ')]
return dict(map(lambda s: [t.strip() for t in s.split(':')], key_list))
except ValueError as e:
raise UsageError("Invalid 'key:value' list: '%s'" % ' '.join(key_list))
def str_to_bool(s):
"""Maps a string to bool. Supports true/false, and yes/no, and is case-insensitive"""
s = s.lower()
if s in ['yes', 'true']:
return True
elif s in ['no', 'false']:
return False
else:
raise UsageError("Cannot parse '%s' as boolean" % s)
def setup_proxy_opener():
# check for http[s]?_proxy user
for scheme in ['http', 'https']:
key = scheme + '_proxy'
if key in os.environ:
proxy = ProxyHandler({scheme: os.environ[key]})
auth = HTTPBasicAuthHandler()
opener = build_opener(proxy, auth, HTTPHandler)
install_opener(opener)
def _rosdep_main(args):
# sources cache dir is our local database.
default_sources_cache = get_sources_cache_dir()
parser = OptionParser(usage=_usage, prog='rosdep')
parser.add_option("--os", dest="os_override", default=None,
metavar="OS_NAME:OS_VERSION", help="Override OS name and version (colon-separated), e.g. ubuntu:lucid")
parser.add_option("-c", "--sources-cache-dir", dest="sources_cache_dir", default=default_sources_cache,
metavar='SOURCES_CACHE_DIR', help="Override %s"%(default_sources_cache))
parser.add_option("--verbose", "-v", dest="verbose", default=False,
action="store_true", help="verbose display")
parser.add_option("--version", dest="print_version", default=False,
action="store_true", help="print version and exit")
parser.add_option("--reinstall", dest="reinstall", default=False,
action="store_true", help="(re)install all dependencies, even if already installed")
parser.add_option("--default-yes", "-y", dest="default_yes", default=False,
action="store_true", help="Tell the package manager to default to y or fail when installing")
parser.add_option("--simulate", "-s", dest="simulate", default=False,
action="store_true", help="Simulate install")
parser.add_option("-r", dest="robust", default=False,
action="store_true", help="Continue installing despite errors.")
parser.add_option("-q", dest="quiet", default=False,
action="store_true", help="Quiet. Suppress output except for errors.")
parser.add_option("-a", "--all", dest="rosdep_all", default=False,
action="store_true", help="select all packages")
parser.add_option("-n", dest="recursive", default=True,
action="store_false", help="Do not consider implicit/recursive dependencies. Only valid with 'keys', 'check', and 'install' commands.")
parser.add_option("--ignore-packages-from-source", "--ignore-src", "-i",
dest='ignore_src', default=False, action="store_true",
help="Affects the 'check' and 'install' verbs. If "
"specified then rosdep will not install keys "
"that are found to be catkin packages anywhere in "
"the ROS_PACKAGE_PATH or in any of the directories "
"given by the --from-paths option.")
parser.add_option("--skip-keys",
dest='skip_keys', action="append", default=[],
help="Affects the 'check' and 'install' verbs. The "
"specified rosdep keys will be ignored, i.e. not "
"resolved and not installed. The option can be supplied multiple "
"times. A space separated list of rosdep keys can also "
"be passed as a string. A more permanent solution to "
"locally ignore a rosdep key is creating a local rosdep rule "
"with an empty list of packages (include it in "
"/etc/ros/rosdep/sources.list.d/ before the defaults).")
parser.add_option("--filter-for-installers",
action="append", default=[],
help="Affects the 'db' verb. If supplied, the output of the 'db' "
"command is filtered to only list packages whose installer "
"is in the provided list. The option can be supplied "
"multiple times. A space separated list of installers can also "
"be passed as a string. Example: `--filter-for-installers \"apt pip\"`")
parser.add_option("--from-paths", dest='from_paths',
default=False, action="store_true",
help="Affects the 'check', 'keys', and 'install' verbs. "
"If specified the arugments to those verbs will be "
"considered paths to be searched, acting on all "
"catkin packages found there in.")
parser.add_option("--rosdistro", dest='ros_distro', default=None,
help="Explicitly sets the ROS distro to use, overriding "
"the normal method of detecting the ROS distro "
"using the ROS_DISTRO environment variable.")
parser.add_option("--as-root", default=[], action='append',
metavar="INSTALLER_KEY:<bool>", help="Override "
"whether sudo is used for a specific installer, "
"e.g. '--as-root pip:false' or '--as-root \"pip:no homebrew:yes\"'. "
"Can be specified multiple times.")
options, args = parser.parse_args(args)
if options.print_version:
print(__version__)
sys.exit(0)
# flatten list of skipped keys and filter-for-installers
options.skip_keys = [key for s in options.skip_keys for key in s.split(' ')]
options.filter_for_installers = [inst for s in options.filter_for_installers for inst in s.split(' ')]
if len(args) == 0:
parser.error("Please enter a command")
command = args[0]
if not command in _commands:
parser.error("Unsupported command %s."%command)
args = args[1:]
if options.ros_distro:
os.environ['ROS_DISTRO'] = options.ros_distro
# Convert list of keys to dictionary
options.as_root = dict((k, str_to_bool(v)) for k, v in key_list_to_dict(options.as_root).items())
if not command in ['init', 'update', 'fix-permissions']:
check_for_sources_list_init(options.sources_cache_dir)
elif not command in ['fix-permissions']:
setup_proxy_opener()
if command in _command_rosdep_args:
return _rosdep_args_handler(command, parser, options, args)
elif command in _command_no_args:
return _no_args_handler(command, parser, options, args)
else:
return _package_args_handler(command, parser, options, args)
def _no_args_handler(command, parser, options, args):
if args:
parser.error("command [%s] takes no arguments"%(command))
else:
return command_handlers[command](options)
def _rosdep_args_handler(command, parser, options, args):
# rosdep keys as args
if options.rosdep_all:
parser.error("-a, --all is not a valid option for this command")
elif len(args) < 1:
parser.error("Please enter arguments for '%s'"%command)
else:
return command_handlers[command](args, options)
def _package_args_handler(command, parser, options, args):
if options.rosdep_all:
if args:
parser.error("cannot specify additional arguments with -a")
else:
# let the loader filter the -a. This will take out some
# packages that are catkinized (for now).
lookup = _get_default_RosdepLookup(options)
loader = lookup.get_loader()
args = loader.get_loadable_resources()
not_found = []
elif not args:
parser.error("no packages or stacks specified")
# package or stack names as args. have to convert stack names to packages.
# - overrides to enable testing
packages = []
not_found = []
if options.from_paths:
for path in args:
if options.verbose:
print("Using argument '{0}' as a path to search.".format(path))
if not os.path.exists(path):
print("given path '{0}' does not exist".format(path))
return 1
path = os.path.abspath(path)
if 'ROS_PACKAGE_PATH' not in os.environ:
os.environ['ROS_PACKAGE_PATH'] = '{0}'.format(path)
else:
os.environ['ROS_PACKAGE_PATH'] = '{0}{1}{2}'.format(
path,
os.pathsep,
os.environ['ROS_PACKAGE_PATH']
)
pkgs = find_catkin_packages_in(path, options.verbose)
packages.extend(pkgs)
# Make packages list unique
packages = list(set(packages))
else:
rospack = rospkg.RosPack()
rosstack = rospkg.RosStack()
val = rospkg.expand_to_packages(args, rospack, rosstack)
packages = val[0]
not_found = val[1]
if not_found:
raise rospkg.ResourceNotFound(not_found[0], rospack.get_ros_paths())
# Handle the --ignore-src option
if command in ['install', 'check'] and options.ignore_src:
if options.verbose:
print("Searching ROS_PACKAGE_PATH for "
"sources: " + str(os.environ['ROS_PACKAGE_PATH'].split(':')))
ws_pkgs = get_workspace_packages()
for path in os.environ['ROS_PACKAGE_PATH'].split(':'):
path = os.path.abspath(path.strip())
if os.path.exists(path):
pkgs = find_catkin_packages_in(path, options.verbose)
ws_pkgs.extend(pkgs)
elif options.verbose:
print("Skipping non-existent path " + path)
set_workspace_packages(ws_pkgs)
lookup = _get_default_RosdepLookup(options)
# Handle the --skip-keys option by pretending that they are packages in the catkin workspace
if command in ['install', 'check'] and options.skip_keys:
if options.verbose:
print("Skipping the specified rosdep keys:\n- " + '\n- '.join(options.skip_keys))
lookup.skipped_keys = options.skip_keys
if 0 and not packages: # disable, let individual handlers specify behavior
# possible with empty stacks
print("No packages in arguments, aborting")
return
return command_handlers[command](lookup, packages, options)
def convert_os_override_option(options_os_override):
"""
Convert os_override option flag to ``(os_name, os_version)`` tuple, or
``None`` if not set
:returns: ``(os_name, os_version)`` tuple if option is set, ``None`` otherwise
:raises: :exc:`UsageError` if option is not set properly
"""
if not options_os_override:
return None
val = options_os_override
if not ':' in val:
raise UsageError("OS override must be colon-separated OS_NAME:OS_VERSION, e.g. ubuntu:maverick")
os_name = val[:val.find(':')]
os_version = val[val.find(':')+1:]
return os_name, os_version
def configure_installer_context(installer_context, options):
"""
Configure the *installer_context* from *options*.
- Override the OS detector in *installer_context* if necessary.
- Set *as_root* for installers if specified.
:raises: :exc:`UsageError` If user input options incorrectly
"""
os_override = convert_os_override_option(options.os_override)
if os_override is not None:
installer_context.set_os_override(*os_override)
for k,v in options.as_root.items():
try:
installer_context.get_installer(k).as_root = v
except KeyError:
raise UsageError("Installer '%s' not defined." % k)
def command_init(options):
try:
data = download_default_sources_list()
except URLError as e:
print("ERROR: cannot download default sources list from:\n%s\nWebsite may be down."%(DEFAULT_SOURCES_LIST_URL))
return 4
# reuse path variable for error message
path = get_sources_list_dir()
old_umask = os.umask(0o022)
try:
if not os.path.exists(path):
os.makedirs(path)
path = get_default_sources_list_file()
if os.path.exists(path):
print("ERROR: default sources list file already exists:\n\t%s\nPlease delete if you wish to re-initialize"%(path))
return 1
with open(path, 'w') as f:
f.write(data)
print("Wrote %s"%(path))
print("Recommended: please run\n\n\trosdep update\n")
except IOError as e:
print("ERROR: cannot create %s:\n\t%s"%(path, e), file=sys.stderr)
return 2
except OSError as e:
print("ERROR: cannot create %s:\n\t%s\nPerhaps you need to run 'sudo rosdep init' instead"%(path, e), file=sys.stderr)
return 3
finally:
os.umask(old_umask)
def command_update(options):
error_occured = []
def update_success_handler(data_source):
print("Hit %s"%(data_source.url))
def update_error_handler(data_source, exc):
error_string = "ERROR: unable to process source [%s]:\n\t%s"%(data_source.url, exc)
print(error_string, file=sys.stderr)
error_occured.append(error_string)
sources_list_dir = get_sources_list_dir()
# disable deprecation warnings when using the command-line tool
warnings.filterwarnings("ignore", category=PreRep137Warning)
if not os.path.exists(sources_list_dir):
print("ERROR: no sources directory exists on the system meaning rosdep has not yet been initialized.\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n")
return 1
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
print("ERROR: no data sources in %s\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n"%sources_list_dir, file=sys.stderr)
return 1
try:
print("reading in sources list data from %s"%(sources_list_dir))
sources_cache_dir = get_sources_cache_dir()
try:
if os.geteuid() == 0:
print("Warning: running 'rosdep update' as root is not recommended.", file=sys.stderr)
print(" You should run 'sudo rosdep fix-permissions' and invoke 'rosdep update' again without sudo.", file=sys.stderr)
except AttributeError:
# nothing we wanna do under Windows
pass
update_sources_list(success_handler=update_success_handler,
error_handler=update_error_handler)
print("updated cache in %s"%(sources_cache_dir))
except InvalidData as e:
print("ERROR: invalid sources list file:\n\t%s"%(e), file=sys.stderr)
return 1
except IOError as e:
print("ERROR: error loading sources list:\n\t%s"%(e), file=sys.stderr)
return 1
if error_occured:
print ("ERROR: Not all sources were able to be updated.\n[[[")
for e in error_occured:
print (e)
print("]]]")
return 1
def command_keys(lookup, packages, options):
lookup = _get_default_RosdepLookup(options)
rosdep_keys = get_keys(lookup, packages, options.recursive)
_print_lookup_errors(lookup)
print('\n'.join(rosdep_keys))
def get_keys(lookup, packages, recursive):
rosdep_keys = []
for package_name in packages:
deps = lookup.get_rosdeps(package_name, implicit=recursive)
rosdep_keys.extend(deps)
return set(rosdep_keys)
def command_check(lookup, packages, options):
verbose = options.verbose
installer_context = create_default_installer_context(verbose=verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=verbose)
# pretty print the result
if [v for k, v in uninstalled if v]:
print("System dependencies have not been satisified:")
for installer_key, resolved in uninstalled:
if resolved:
for r in resolved:
print("%s\t%s"%(installer_key, r))
else:
print("All system dependencies have been satisified")
if errors:
for package_name, ex in errors.items():
if isinstance(ex, rospkg.ResourceNotFound):
print("ERROR[%s]: resource not found [%s]"%(package_name, ex.args[0]), file=sys.stderr)
else:
print("ERROR[%s]: %s"%(package_name, ex), file=sys.stderr)
if uninstalled:
return 1
else:
return 0
def error_to_human_readable(error):
if isinstance(error, rospkg.ResourceNotFound):
return "Missing resource %s"%(error,)
elif isinstance(error, ResolutionError):
return "%s"%(error.args[0],)
else:
return "%s"%(error,)
def command_install(lookup, packages, options):
# map options
install_options = dict(interactive=not options.default_yes, verbose=options.verbose,
reinstall=options.reinstall,
continue_on_error=options.robust, simulate=options.simulate, quiet=options.quiet)
# setup installer
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
if options.reinstall:
if options.verbose:
print("reinstall is true, resolving all dependencies")
try:
uninstalled, errors = lookup.resolve_all(packages, installer_context, implicit=options.recursive)
except InvalidData as e:
print("ERROR: unable to process all dependencies:\n\t%s"%(e), file=sys.stderr)
return 1
else:
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=options.verbose)
if options.verbose:
print("uninstalled dependencies are: [%s]"%(', '.join([', '.join(pkg) for pkg in [v for k,v in uninstalled]])))
if errors:
err_msg = ("ERROR: the following packages/stacks could not have their "
"rosdep keys resolved\nto system dependencies")
if rospkg.distro.current_distro_codename() is None:
err_msg += (
" (ROS distro is not set. "
"Make sure `ROS_DISTRO` environment variable is set, or use "
"`--rosdistro` option to specify the distro, "
"e.g. `--rosdistro indigo`)"
)
print(err_msg + ":", file=sys.stderr)
for rosdep_key, error in errors.items():
print("%s: %s"%(rosdep_key, error_to_human_readable(error)), file=sys.stderr)
if options.robust:
print("Continuing to install resolvable dependencies...")
else:
return 1
try:
installer.install(uninstalled, **install_options)
if not options.simulate:
print("#All required rosdeps installed successfully")
return 0
except KeyError as e:
raise RosdepInternalError(e)
except InstallFailed as e:
print("ERROR: the following rosdeps failed to install", file=sys.stderr)
print('\n'.join([" %s: %s"%(k, m) for k,m in e.failures]), file=sys.stderr)
return 1
def _compute_depdb_output(lookup, packages, options):
installer_context = create_default_installer_context(verbose=options.verbose)
os_name, os_version = _detect_os(installer_context, options)
output = "Rosdep dependencies for operating system %s version %s "%(os_name, os_version)
for stack_name in stacks:
output += "\nSTACK: %s\n"%(stack_name)
view = lookup.get_stack_rosdep_view(stack_name)
for rosdep in view.keys():
definition = view.lookup(rosdep)
resolved = resolve_definition(definition, os_name, os_version)
output = output + "<<<< %s -> %s >>>>\n"%(rosdep, resolved)
return output
def command_db(options):
# exact same setup logic as command_resolve, should possibly combine
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
print("OS NAME: %s"%os_name)
print("OS VERSION: %s"%os_version)
errors = []
print("DB [key -> resolution]")
# db does not leverage the resource-based API
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
for rosdep_name in view.keys():
try:
d = view.lookup(rosdep_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
if options.filter_for_installers and inst_key not in options.filter_for_installers:
continue
resolved = installer.resolve(rule)
resolved_str = " ".join(resolved)
print ("%s -> %s"%(rosdep_name, resolved_str))
except ResolutionError as e:
errors.append(e)
#TODO: add command-line option for users to be able to see this.
#This is useful for platform bringup, but useless for most users
#as the rosdep db contains numerous, platform-specific keys.
if 0:
for error in errors:
print("WARNING: %s"%(error_to_human_readable(error)), file=sys.stderr)
def _print_lookup_errors(lookup):
for error in lookup.get_errors():
if isinstance(error, rospkg.ResourceNotFound):
print("WARNING: unable to locate resource %s"%(str(error.args[0])), file=sys.stderr)
else:
print("WARNING: %s"%(str(error)), file=sys.stderr)
def command_what_needs(args, options):
lookup = _get_default_RosdepLookup(options)
packages = []
for rosdep_name in args:
packages.extend(lookup.get_resources_that_need(rosdep_name))
_print_lookup_errors(lookup)
print('\n'.join(set(packages)))
def command_where_defined(args, options):
lookup = _get_default_RosdepLookup(options)
locations = []
for rosdep_name in args:
locations.extend(lookup.get_views_that_define(rosdep_name))
_print_lookup_errors(lookup)
if locations:
for location in locations:
origin = location[1]
print(origin)
else:
print("ERROR: cannot find definition(s) for [%s]"%(', '.join(args)), file=sys.stderr)
return 1
def command_resolve(args, options):
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer, installer_keys, default_key, \
os_name, os_version = get_default_installer(installer_context=installer_context,
verbose=options.verbose)
invalid_key_errors = []
for rosdep_name in args:
if len(args) > 1:
print("#ROSDEP[%s]"%rosdep_name)
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
try:
d = view.lookup(rosdep_name)
except KeyError as e:
invalid_key_errors.append(e)
continue
rule_installer, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
installer = installer_context.get_installer(rule_installer)
resolved = installer.resolve(rule)
print("#%s"%(rule_installer))
print (" ".join([str(r) for r in resolved]))
for error in invalid_key_errors:
print("ERROR: no rosdep rule for %s"%(error), file=sys.stderr)
for error in lookup.get_errors():
print("WARNING: %s"%(error_to_human_readable(error)), file=sys.stderr)
if invalid_key_errors:
return 1 # error exit code
def command_fix_permissions(options):
import os
import pwd
import grp
stat_info = os.stat(os.path.expanduser('~'))
uid = stat_info.st_uid
gid = stat_info.st_gid
user_name = pwd.getpwuid(uid).pw_name
try:
group_name = grp.getgrgid(gid).gr_name
except KeyError as e:
group_name = gid
ros_home = rospkg.get_ros_home()
print("Recursively changing ownership of ros home directory '{0}' "
"to '{1}:{2}' (current user)...".format(ros_home, user_name, group_name))
failed = []
try:
for dirpath, dirnames, filenames in os.walk(ros_home):
try:
os.lchown(dirpath, uid, gid)
except Exception as e:
failed.append((dirpath, str(e)))
for f in filenames:
try:
path = os.path.join(dirpath, f)
os.lchown(path, uid, gid)
except Exception as e:
failed.append((path, str(e)))
except Exception:
import traceback
traceback.print_exc()
print("Failed to walk directory. Try with sudo?")
else:
if failed:
print("Failed to change ownership for:")
for p, e in failed:
print("{0} --> {1}".format(p, e))
print("Try with sudo?")
else:
print("Done.")
command_handlers = {
'db': command_db,
'check': command_check,
'keys': command_keys,
'install': command_install,
'what-needs': command_what_needs,
'where-defined': command_where_defined,
'resolve': command_resolve,
'init': command_init,
'update': command_update,
'fix-permissions': command_fix_permissions,
# backwards compat
'what_needs': command_what_needs,
'where_defined': command_where_defined,
'depdb': command_db,
}
# commands that accept rosdep names as args
_command_rosdep_args = ['what-needs', 'what_needs', 'where-defined', 'where_defined', 'resolve']
# commands that take no args
_command_no_args = ['update', 'init', 'db', 'fix-permissions']
_commands = command_handlers.keys()
| [
"[email protected]"
] | |
9eb9671c3137c3bce0ad495c24dce9f83e498907 | 270f1b82d494c474df0bf02f60d106f30e6d3fa3 | /src/drivers/driver.py | 08d668314e1d6ade5820de9c60810ecb3994b599 | [
"MIT"
] | permissive | KDahlgren/nyctea | cbb0f7c1023d13f600e90c864c070592c3240d74 | 725940d46a63ca4189283bcc716ad0c96aab48ec | refs/heads/master | 2021-05-11T02:28:24.086022 | 2018-02-03T07:42:03 | 2018-02-03T07:42:03 | 118,362,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python
'''
driver.py
'''
# **************************************** #
#############
# IMPORTS #
#############
# standard python packages
import inspect, itertools, logging, os, sqlite3, string, sys, time
# ------------------------------------------------------ #
# import sibling packages HERE!!!
if not os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) )
from dedt import dedt, dedalusParser
from utils import parseCommandLineInput, tools
from evaluators import c4_evaluator
# **************************************** #
####################
# CLASS #
####################
| [
"[email protected]"
] | |
3fcc00faa020fb3330f1af7af5b154afb0be26ce | 51d0377511a5da902033fb9d80184db0e096fe2c | /31-customer-analytics-and-ab-testing-in-python/4-analyzing-ab-testing-results/05-understanding-confidence-intervals.py | 7cdaf7e13d6506a7916e804755c8cd0d7ad97aa8 | [] | no_license | sashakrasnov/datacamp | c28c6bda178163337baed646220b2f7dcc36047d | 759f4cec297883907e21118f24a3449d84c80761 | refs/heads/master | 2021-12-07T02:54:51.190672 | 2021-09-17T21:05:29 | 2021-09-17T21:05:29 | 157,093,632 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | '''
Understanding confidence intervals
In this exercise, you'll develop your intuition for how various parameter values impact confidence intervals. Specifically, you will explore through the get_ci() function how changes widen or tighten the confidence interval. This is the function signature, where cl is the confidence level and sd is the standard deviation.
'''
import pandas as pd
import scipy.stats as sci
def get_ci(value, cl, sd):
loc = sci.norm.ppf(1 - cl/2)
rng_val = sci.norm.cdf(loc - value/sd)
lwr_bnd = value - rng_val
upr_bnd = value + rng_val
return_val = (lwr_bnd, upr_bnd)
return(return_val)
'''
INSTRUCTIONS 1/3
* Find the confidence interval with a value of 1, a confidence level of 0.975 and a standard deviation of 0.5.
'''
# Compute and print the confidence interval
confidence_interval = get_ci(1, 0.975, 0.5)
print(confidence_interval)
'''
INSTRUCTIONS 2/3
* Repeat the calculation, updating the confidence level to 0.95 and the standard deviation to 2. Leave the value as 1
'''
# Compute and print the confidence interval
confidence_interval = get_ci(1, 0.95, 2)
print(confidence_interval)
'''
INSTRUCTIONS 3/3
* Finally, update your code such that the standard deviation is 0.001 while leaving the confidence level and value the same as the previous exercise part. Compare the three confidence intervals outputted. How do they seem to relate to the parameters used?
'''
# Compute and print the confidence interval
confidence_interval = get_ci(1, 0.95, 0.001)
print(confidence_interval)
'''
(0.9755040421682947, 1.0244959578317054)
(0.6690506448818785, 1.3309493551181215)
(1.0, 1.0)
As our standard deviation decreases so too does the width of our confidence interval. Great work!
''' | [
"[email protected]"
] | |
2b42981cb3c3c7962f733ce338ebbd731734a2f4 | f7499803d5b95b3fc076a8873100317ce6bc639d | /purity_fb/purity_fb_1dot4/apis/file_systems_api.py | 27b518256c0e69cd7f534c279390e3daf28ac800 | [
"Apache-2.0"
] | permissive | yongjianmu/purity_fb_python_client | 23ca2cf6880be836bfd411502678e9647e438637 | 9dd1afd5cd5c63324f8c0e0f8f31912667886091 | refs/heads/master | 2020-03-28T15:04:16.396302 | 2018-10-01T23:10:35 | 2018-10-01T23:14:14 | 148,552,355 | 0 | 0 | null | 2018-09-12T23:01:22 | 2018-09-12T23:01:22 | null | UTF-8 | Python | false | false | 21,116 | py | # coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FileSystemsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_file_systems(self, file_system, **kwargs):
"""
Create a new file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: the attribute map used to create the file system (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_file_systems_with_http_info(file_system, **kwargs)
else:
(data) = self.create_file_systems_with_http_info(file_system, **kwargs)
return data
def create_file_systems_with_http_info(self, file_system, **kwargs):
"""
Create a new file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems_with_http_info(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: the attribute map used to create the file system (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_system']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_system' is set
if ('file_system' not in params) or (params['file_system'] is None):
raise ValueError("Missing the required parameter `file_system` when calling `create_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.4/file-systems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_systems(self, name, **kwargs):
"""
Delete a file system by name
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the file system to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_file_systems_with_http_info(name, **kwargs)
else:
(data) = self.delete_file_systems_with_http_info(name, **kwargs)
return data
def delete_file_systems_with_http_info(self, name, **kwargs):
"""
Delete a file system by name
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the file system to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.4/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_file_systems(self, **kwargs):
"""
List file systems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A list of names.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int start: start
:param int limit: limit, should be >= 0
:param str token: token
:param bool total: return a total object in addition to the other results
:param bool total_only: return only the total object
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_file_systems_with_http_info(**kwargs)
else:
(data) = self.list_file_systems_with_http_info(**kwargs)
return data
def list_file_systems_with_http_info(self, **kwargs):
"""
List file systems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A list of names.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int start: start
:param int limit: limit, should be >= 0
:param str token: token
:param bool total: return a total object in addition to the other results
:param bool total_only: return only the total object
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token', 'total', 'total_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_systems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total' in params:
query_params.append(('total', params['total']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.4/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_systems(self, name, attributes, **kwargs):
"""
Update an existing file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems(name, attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: the name of the file system to be updated (required)
:param FileSystem attributes: the new attributes, only modifiable fields could be used. (required)
:param bool ignore_usage: Allow update operations that lead to a hard_limit_enabled file system with usage over its provisioned size. The update can be either setting hard_limit_enabled when usage is higher than provisioned size, or resize provisioned size to a value under usage when hard_limit_enabled is True.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_file_systems_with_http_info(name, attributes, **kwargs)
else:
(data) = self.update_file_systems_with_http_info(name, attributes, **kwargs)
return data
def update_file_systems_with_http_info(self, name, attributes, **kwargs):
"""
Update an existing file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems_with_http_info(name, attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: the name of the file system to be updated (required)
:param FileSystem attributes: the new attributes, only modifiable fields could be used. (required)
:param bool ignore_usage: Allow update operations that lead to a hard_limit_enabled file system with usage over its provisioned size. The update can be either setting hard_limit_enabled when usage is higher than provisioned size, or resize provisioned size to a value under usage when hard_limit_enabled is True.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'attributes', 'ignore_usage']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `update_file_systems`")
# verify the required parameter 'attributes' is set
if ('attributes' not in params) or (params['attributes'] is None):
raise ValueError("Missing the required parameter `attributes` when calling `update_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
if 'ignore_usage' in params:
query_params.append(('ignore_usage', params['ignore_usage']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'attributes' in params:
body_params = params['attributes']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.4/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
b29bffcc2e7d2e3616ff332e06f3397623fdd0ed | 132787c692753ce56cc87abce863af61367e4c41 | /tests/test_flexmaxpool.py | 8c82d49dc0c9325e94e1e659e7ba73da72d85bae | [
"Apache-2.0"
] | permissive | LMescheder/torch_flexconvs | 4cff0b2195e9c0db4bdfbfe4b59d6bf6fdddebae | 72a6aa4eb7dd029b6c446def6031ce56b9fb8bfd | refs/heads/master | 2022-11-16T04:32:53.550675 | 2019-02-21T16:08:21 | 2019-02-21T16:08:21 | 173,277,013 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | import torch
from torch.autograd import gradcheck
from scipy.spatial import cKDTree
from torch_flexconv import FlexMaxPool, flex_maxpool
def test_flexmaxpool():
B = 16
p = torch.rand(B, 3, 1000)
p_np = p.squeeze().numpy()
idx_nn = []
for i in range(B):
idx = cKDTree(p_np[i].T).query(p_np[i].T, k=12)[1]
idx = torch.IntTensor(idx.T)
idx_nn.append(idx)
idx_nn = torch.stack(idx_nn, dim=0)
net = torch.rand(B, 32, 1000)
model = FlexMaxPool()
out1 = model(net, idx_nn)
net = net.cuda()
idx_nn = idx_nn.cuda()
model = model.cuda()
out2 = model(net, idx_nn).cpu()
print(((out1 - out2).abs()/out1.abs()).mean())
def test_flexmaxpool_grads():
B = 16
in_channels = 8
n_points = 20
p = torch.rand(B, 3, n_points)
p_np = p.squeeze().numpy()
idx_nn = []
for i in range(B):
idx = cKDTree(p_np[i].T).query(p_np[i].T, k=3)[1]
idx = torch.IntTensor(idx.T)
idx_nn.append(idx)
idx_nn = torch.stack(idx_nn, dim=0)
feat = torch.rand(B, in_channels, n_points)
# idx_nn = idx_nn.cuda()
# feat = feat.cuda()
feat = feat.to(torch.float64)
p = p.to(torch.float64)
feat.requires_grad_()
gradcheck(
flex_maxpool,
[feat, idx_nn])
test_flexmaxpool()
test_flexmaxpool_grads()
| [
"[email protected]"
] | |
1fc85c384cbc924fac454a4c31e5b8c74a901880 | 600a398c5bfebd8bb7aa8dd8349c710bee719d3a | /PRL/genK.py | e89375f819ca451210803b2a60591e3f7a5f8941 | [
"MIT"
] | permissive | balasbk/game-theory | 00f57f206736953a44a7f5a23edc2a82a29474c0 | 958e093e64799e2dd445d18bd9966251270f81e7 | refs/heads/master | 2022-04-26T01:01:26.363820 | 2020-04-28T16:11:48 | 2020-04-28T16:11:48 | 259,680,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | import numpy as np
import random
import math
#TODO documentation
def apply2prefs(k_fun, p1, p2):
(x1p, y1p), (x1n, y1n) = p1
(x2p, y2p), (x2n, y2n) = p2
res = 0.
if y1p == y2p:
res += k_fun(x1p, x2p)
if y1n == y2n:
res += k_fun(x1n, x2n)
if y1p == y2n:
res -= k_fun(x1p, x2n)
if y1n == y2p:
res -= k_fun(x1n, x2p)
return res
class GenK:
def __init__(self):
pass
def get_random_kernel(self):
pass
def get_pref_kernel_function(self):
pass
def get_kernel_function(self):
pass
class GenKList(GenK):
def __init__(self, k_list):
self.kernel_list = k_list
def __repr__(self):
return "GenKList(n_kernel=%d)" %len(self.kernel_list)
def get_random_kernel(self):
return random.randint(0, len(self.kernel_list)-1)
def get_pref_kernel_function(self, d):
return lambda p1, p2: apply2prefs(self.get_kernel_function(d), p1, p2)
def get_kernel_function(self, d):
return self.kernel_list[d]
class GenHPK(GenK):
def __init__(self, min_deg=2, max_deg=2):
self.min_deg = min_deg
self.max_deg = max(max_deg, min_deg)
def __repr__(self):
return "GenHPK(dmin=%d, dmax=%d)" %(self.min_deg, self.max_deg)
def get_random_kernel(self):
return random.randint(self.min_deg, self.max_deg)
def get_pref_kernel_function(self, degree):
return lambda p1, p2: apply2prefs(self.get_kernel_function(degree), p1, p2)
def get_kernel_function(self, degree):
return lambda x,z: np.dot(x,z)**degree
class GenRBFK(GenKList):
def __init__(self, gamma_range):
self.gamma_range = gamma_range
def __repr__(self):
return "GenRBFK(gamma_range=%s)" %(self.gamma_range)
def get_random_kernel(self):
return random.choice(self.gamma_range)
def get_pref_kernel_function(self, gamma):
return lambda p1, p2: apply2prefs(self.get_kernel_function(gamma), p1, p2)
def get_kernel_function(self, gamma):
return lambda x,z: math.exp(-gamma * np.linalg.norm(x-z)**2)
#return lambda x,z: math.exp(-gamma * np.sum((x-z)**2))
| [
"[email protected]"
] | |
049abc3f07bd8f1d11c09f59c92e3042d20a79cb | 4b6d5a775f9c1a0da28136dc10652390d0c5a6ba | /build/lib/xbos_services_getter/xbos_services_getter.py | 727822388f3d36beda9fd2a4a5e250de42ef0df8 | [] | no_license | daniellengyel/xbos_services_getter | c78aa6c66ec3a9be93551b62ae3d44b1e63b9492 | f7ca68b6c17bf72a79b9cd3e7f4eeb9d76631592 | refs/heads/master | 2023-07-22T14:19:53.916522 | 2019-08-19T18:16:37 | 2019-08-19T18:16:37 | 174,210,512 | 0 | 5 | null | 2023-07-05T20:53:11 | 2019-03-06T19:50:35 | Python | UTF-8 | Python | false | false | 47,124 | py | import grpc
from xbos_services_getter.lib import building_zone_names_pb2
from xbos_services_getter.lib import building_zone_names_pb2_grpc
from xbos_services_getter.lib import hvac_consumption_pb2
from xbos_services_getter.lib import hvac_consumption_pb2_grpc
from xbos_services_getter.lib import indoor_data_historical_pb2
from xbos_services_getter.lib import indoor_data_historical_pb2_grpc
from xbos_services_getter.lib import indoor_temperature_prediction_pb2
from xbos_services_getter.lib import indoor_temperature_prediction_pb2_grpc
from xbos_services_getter.lib import meter_data_historical_pb2
from xbos_services_getter.lib import meter_data_historical_pb2_grpc
from xbos_services_getter.lib import occupancy_pb2
from xbos_services_getter.lib import occupancy_pb2_grpc
from xbos_services_getter.lib import optimizer_pb2
from xbos_services_getter.lib import optimizer_pb2_grpc
from xbos_services_getter.lib import outdoor_temperature_historical_pb2
from xbos_services_getter.lib import outdoor_temperature_historical_pb2_grpc
from xbos_services_getter.lib import outdoor_temperature_prediction_pb2
from xbos_services_getter.lib import outdoor_temperature_prediction_pb2_grpc
from xbos_services_getter.lib import price_pb2
from xbos_services_getter.lib import price_pb2_grpc
from xbos_services_getter.lib import temperature_bands_pb2
from xbos_services_getter.lib import temperature_bands_pb2_grpc
from xbos_services_getter.lib import baseline_optimizer_pb2
from xbos_services_getter.lib import baseline_optimizer_pb2_grpc
import datetime
import pytz
import pandas as pd
import os
'''
Utility constants
'''
NO_ACTION = 0
HEATING_ACTION = 1
COOLING_ACTION = 2
FAN = 3
TWO_STAGE_HEATING_ACTION = 4
TWO_STAGE_COOLING_ACTION = 5
def get_window_in_sec(s):
"""Returns number of seconds in a given duration or zero if it fails.
Supported durations are seconds (s), minutes (m), hours (h), and days(d)."""
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400}
try:
return int(float(s[:-1])) * seconds_per_unit[s[-1]]
except:
return 0
# Building and Zone names
def get_building_zone_names_stub(BUILDING_ZONE_NAMES_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the building_zone_address service.
:param BUILDING_ZONE_NAMES_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if BUILDING_ZONE_NAMES_HOST_ADDRESS is None:
BUILDING_ZONE_NAMES_HOST_ADDRESS = os.environ["BUILDING_ZONE_NAMES_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(BUILDING_ZONE_NAMES_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(BUILDING_ZONE_NAMES_HOST_ADDRESS, credentials)
return building_zone_names_pb2_grpc.BuildingZoneNamesStub(channel)
def get_buildings(building_zone_names_stub):
"""Gets all the building names supported by the services.
:param building_zone_names_stub: grpc stub for building_zone_names service.
:return: list (string) building names.
"""
building_names = building_zone_names_stub.GetBuildings(building_zone_names_pb2.BuildingRequest())
return [bldg.name for bldg in building_names.names]
def get_zones(building_zone_names_stub, building):
"""Gets all zone names for the given building which are supported by the services.
:param building_zone_names_stub: grpc stub for building_zone_names service.
:param building: (string) building name. Needs to be in the list returned by get_buildings.
:return: list (string) zone names.
"""
zones = building_zone_names_stub.GetZones(building_zone_names_pb2.ZoneRequest(building=building))
return [zone.name for zone in zones.names]
def get_all_buildings_zones(building_zone_names_stub):
"""Gets all building and corresponding zones in a dictionary.
:param building_zone_names_stub: grpc stub for building_zone_names service.
:return: dictionary <building name, list<zone names>> (strings)
"""
buildings = get_buildings(building_zone_names_stub)
zones = {}
for bldg in buildings:
zones[bldg] = get_zones(building_zone_names_stub, bldg)
return zones
# Temperature band functions
def get_temperature_band_stub(TEMPERATURE_BANDS_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the temperature_band service.
:param TEMPERATURE_BANDS_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if TEMPERATURE_BANDS_HOST_ADDRESS is None:
TEMPERATURE_BANDS_HOST_ADDRESS = os.environ["TEMPERATURE_BANDS_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(TEMPERATURE_BANDS_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(TEMPERATURE_BANDS_HOST_ADDRESS, credentials)
return temperature_bands_pb2_grpc.SchedulesStub(channel)
def get_comfortband(temperature_band_stub, building, zone, start, end, window):
"""Gets comfortband as pd.df.
:param temperature_band_stub: grpc stub for temperature_band microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware) start of comfortband
:param end: (datetime timezone aware) end of comfortband
:param window: (str) the interval in which to split the comfortband.
:return: pd.df columns=["t_low", "t_high"], valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = start.timestamp() * 1e9
end_unix = end.timestamp() * 1e9
window_seconds = get_window_in_sec(window)
# call service
comfortband_response = temperature_band_stub.GetComfortband(
temperature_bands_pb2.ScheduleRequest(building=building, zone=zone, start=int(start_unix), end=int(end_unix), window=window,
unit="F"))
# process data
comfortband_list = []
for msg in comfortband_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"t_high" : msg.temperature_high,
"t_low" : msg.temperature_low,
"unit" : msg.unit
}
comfortband_list.append(item)
df = pd.DataFrame(comfortband_list)
df.set_index("datetime",inplace=True)
return df
def get_do_not_exceed(temperature_band_stub, building, zone, start, end, window):
"""Gets do_not_exceed as pd.df.
:param temperature_band_stub: grpc stub for temperature_band microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware) start of do_not_exceed
:param end: (datetime timezone aware) end of do_not_exceed
:param window: (str) the interval in which to split the do_not_exceed.
:return: pd.df columns=["t_low", "t_high"], valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = start.timestamp() * 1e9
end_unix = end.timestamp() * 1e9
window_seconds = get_window_in_sec(window)
# call service
do_not_exceed_response = temperature_band_stub.GetDoNotExceed(
temperature_bands_pb2.ScheduleRequest(building=building, zone=zone, start=int(start_unix), end=int(end_unix), window=window,
unit="F"))
# process data
do_not_exceed_list = []
for msg in do_not_exceed_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"t_high" : msg.temperature_high,
"t_low" : msg.temperature_low,
"unit" : msg.unit
}
do_not_exceed_list.append(item)
df = pd.DataFrame(do_not_exceed_list)
df.set_index("datetime",inplace=True)
return df
# occupancy functions
def get_occupancy_stub(OCCUPANCY_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the occupancy service.
:param OCCUPANCY_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if OCCUPANCY_HOST_ADDRESS is None:
OCCUPANCY_HOST_ADDRESS = os.environ["OCCUPANCY_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(OCCUPANCY_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(OCCUPANCY_HOST_ADDRESS, credentials)
return occupancy_pb2_grpc.OccupancyStub(channel)
def get_occupancy(occupancy_stub, building, zone, start, end, window):
"""Gets occupancy as pd.series.
:param occupancy_stub: grpc stub for occupancy microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.series valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = start.timestamp() * 1e9
end_unix = end.timestamp() * 1e9
window_seconds = get_window_in_sec(window)
# call service
occupancy_response = occupancy_stub.GetOccupancy(
occupancy_pb2.Request(building=building, zone=zone, start=int(start_unix), end=int(end_unix), window=window))
# process data
occupancy_list = []
for msg in occupancy_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"occupancy" : msg.occupancy
}
occupancy_list.append(item)
df = pd.DataFrame(occupancy_list)
df.set_index("datetime",inplace=True)
return df
# price functions
def get_price_stub(PRICE_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the price service.
:param PRICEPRICE_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if PRICE_HOST_ADDRESS is None:
PRICE_HOST_ADDRESS = os.environ["PRICE_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(PRICE_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(PRICE_HOST_ADDRESS, credentials)
return price_pb2_grpc.PriceStub(channel)
def get_all_tariffs(price_stub):
"""Gets all available tariffs and utilities as a list of dictionaries.
:param price_stub: grpc stub for price microservice
:return: list of (dictionary) keys=["tariff", "utility"]
"""
all_tariffs_utilities = price_stub.GetAllTariffsAndUtilities(price_pb2.Empty()).tariffs_utilities
all_tariffs_utilities_list = []
for tariff_and_utility in all_tariffs_utilities:
all_tariffs_utilities_list.append({"tariff": tariff_and_utility.tariff, "utility": tariff_and_utility.utility})
return all_tariffs_utilities_list
def get_tariff_and_utility(price_stub, building):
"""Gets the tariff and utility for the given building as a dictionary.
:param price_stub: grpc stub for price microservice
:param building: (str) building name
:return: (dictionary) keys=["tariff", "utility"]
"""
tariff_and_utility = price_stub.GetTariffAndUtility(price_pb2.BuildingRequest(building=building))
return {"tariff": tariff_and_utility.tariff, "utility": tariff_and_utility.utility}
def get_price_utility_tariff(price_stub,utility,tariff,price_type, start, end, window):
"""Gets the price as a pandas dataframe.
:param price_stub: grpc stub for price microservice
:param building: (str) building name
:param price_type: (str) "ENERGY" or "DEMAND"
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.DataFrame columns=["price" (float), "unit" string] index=start to end with window intervals.
"""
if price_type not in ["ENERGY", "DEMAND"]:
raise AttributeError("Given price type is invalid. Use ENERGY or DEMAND.")
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
price_response = price_stub.GetPrice(price_pb2.PriceRequest(utility=utility,
tariff=tariff,
price_type=price_type,
start=start_unix,
end=end_unix,
window=window))
# process data
utility_tariff_list = []
for msg in price_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"price" : msg.price,
"unit" : msg.unit,
"window" : msg.window
}
utility_tariff_list.append(item)
df = pd.DataFrame(utility_tariff_list)
df.set_index("datetime",inplace=True)
return df
def get_price(price_stub, building, price_type, start, end, window):
"""Gets the price as a pandas dataframe.
:param price_stub: grpc stub for price microservice
:param building: (str) building name
:param price_type: (str) "ENERGY" or "DEMAND"
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.DataFrame columns=["price", "unit"], types=[float, string] index=start to end with window intervals.
"""
# call service
tariff_and_utility = get_tariff_and_utility(price_stub, building)
return get_price_utility_tariff(price_stub,tariff_and_utility["utility"],tariff_and_utility["tariff"],price_type, start, end, window)
def get_demand_response_forecast_utility(price_stub, utility,timezone=pytz.timezone('US/Pacific')):
if utility.upper() not in ["PGE", "SCE"]:
raise AttributeError("Given utility type is invalid. Use PGE or SCE.")
# call service
demand_response = price_stub.GetDemandResponseForecast(price_pb2.DemandResponseRequest(utility=utility)).statuses
# process data
utility_tariff_list = []
for msg in demand_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=timezone),
"status" : msg.status
}
utility_tariff_list.append(item)
df = pd.DataFrame(utility_tariff_list)
if len(utility_tariff_list)!= 0:
df.set_index("datetime",inplace=True)
return df
def get_demand_response_confirmed_utility(price_stub, utility,timezone=pytz.timezone('US/Pacific')):
if utility.upper() not in ["PGE", "SCE"]:
raise AttributeError("Given utility type is invalid. Use PGE or SCE.")
# call service
demand_response = price_stub.GetDemandResponseConfirmed(price_pb2.DemandResponseRequest(utility=utility)).statuses
# process data
utility_tariff_list = []
for msg in demand_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=timezone),
"status" : msg.status
}
utility_tariff_list.append(item)
df = pd.DataFrame(utility_tariff_list)
if len(utility_tariff_list)!= 0:
df.set_index("datetime",inplace=True)
return df
def get_demand_response_forecast(price_stub, building,timezone=pytz.timezone('US/Pacific')):
tariff_and_utility = get_tariff_and_utility(price_stub, building)
return get_demand_response_forecast_utility(price_stub,tariff_and_utility["utility"])
def get_demand_response_confirmed(price_stub, building,timezone=pytz.timezone('US/Pacific')):
tariff_and_utility = get_tariff_and_utility(price_stub, building)
return get_demand_response_confirmed_utility(price_stub,tariff_and_utility["utility"])
# indoor historic functions
def get_indoor_historic_stub(INDOOR_DATA_HISTORICAL_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the indoor_data_historical service.
:param INDOOR_DATA_HISTORICAL_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if INDOOR_DATA_HISTORICAL_HOST_ADDRESS is None:
INDOOR_DATA_HISTORICAL_HOST_ADDRESS = os.environ["INDOOR_DATA_HISTORICAL_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(INDOOR_DATA_HISTORICAL_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(INDOOR_DATA_HISTORICAL_HOST_ADDRESS, credentials)
return indoor_data_historical_pb2_grpc.IndoorDataHistoricalStub(channel)
def get_indoor_temperature_historic(indoor_historic_stub, building, zone, start, end, window, agg="MEAN"):
"""Gets historic indoor temperature as pd.series.
:param indoor_historic_stub: grpc stub for historic indoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.df columns=["temperature", "unit"] values=[float, string], index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
historic_temperature_response = indoor_historic_stub.GetRawTemperatures(
indoor_data_historical_pb2.Request(building=building, zone=zone, start=start_unix, end=end_unix,
window=window,aggregation=agg))
# process data
temperature_list = []
for msg in historic_temperature_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"temperature" : msg.temperature,
"unit" : msg.unit
}
temperature_list.append(item)
df = pd.DataFrame(temperature_list)
df.set_index("datetime",inplace=True)
return df
def get_indoor_actions_historic(indoor_historic_stub, building, zone, start, end, window, agg="MAX"):
"""Gets historic indoor temperature as pd.series.
:param indoor_historic_stub: grpc stub for historic indoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.df columns["action"], types=["float"], index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
historic_action_response = indoor_historic_stub.GetRawActions(
indoor_data_historical_pb2.Request(building=building, zone=zone, start=start_unix, end=end_unix,
window=window,aggregation=agg))
# process data
action_list = []
for msg in historic_action_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"action" : msg.action
}
action_list.append(item)
df = pd.DataFrame(action_list)
df.set_index("datetime",inplace=True)
return df
def get_indoor_modes_historic(indoor_historic_stub, building, zone, start, end, window, agg="MAX"):
"""Gets historic indoor temperature as pd.series.
:param indoor_historic_stub: grpc stub for historic indoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.df columns["mode"], types=["float"], index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
historic_mode_response = indoor_historic_stub.GetRawModes(
indoor_data_historical_pb2.Request(building=building, zone=zone, start=start_unix, end=end_unix,
window=window,aggregation=agg))
# process data
mode_list = []
for msg in historic_mode_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"mode" : msg.mode
}
mode_list.append(item)
df = pd.DataFrame(mode_list)
df.set_index("datetime",inplace=True)
return df
def get_indoor_setpoints_historic(indoor_historic_stub, building, zone, start, end, window,agg="MIN"):
"""Gets historic setpoints temperature as pd.df.
:param indoor_historic_stub: grpc stub for historic indoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.df columns=["t_low", "t_high"] valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
historic_setpoints_response = indoor_historic_stub.GetRawTemperatureBands(
indoor_data_historical_pb2.Request(building=building, zone=zone, start=start_unix, end=end_unix,
window=window,aggregation=agg))
# process data
setpoints_list = []
for msg in historic_setpoints_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"t_high" : msg.temperature_high,
"t_low" : msg.temperature_low,
"unit" : msg.unit
}
setpoints_list.append(item)
df = pd.DataFrame(setpoints_list)
df.set_index("datetime",inplace=True)
return df
# indoor prediction functions
def get_indoor_temperature_prediction_stub(INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the indoor_temperature_prediction service.
:param INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS is None:
INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS = os.environ["INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS, credentials)
return indoor_temperature_prediction_pb2_grpc.IndoorTemperaturePredictionStub(channel)
def get_indoor_temperature_prediction(indoor_temperature_prediction_stub, building, zone, current_time, action, t_in, t_out, t_prev,
other_zone_temperatures):
"""Gets prediction of indoor temperature.
:param indoor_temperature_prediction_stub: grpc stub for prediction of indoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param current_time: (datetime timezone aware)
:param action: (int) Action as given in utils file.
:param t_in: (float) current temperature inside of zone.
:param t_out: (float) currrent outdoor temperature.
:param t_prev: (float) the temperature 5 min ago.
:param other_zone_temperatures: {zone_i: indoor temperature of zone_i}
:return: (float) temperature in 5 minutes after current_time in Fahrenheit.
"""
current_time = current_time.replace(microsecond=0)
current_time_unix = int(current_time.timestamp() * 1e9)
# call service
indoor_prediction_response = indoor_temperature_prediction_stub.GetSecondOrderPrediction(
indoor_temperature_prediction_pb2.SecondOrderPredictionRequest(building=building, zone=zone, current_time=current_time_unix,
action=action,
indoor_temperature=t_in, previous_indoor_temperature=t_prev,
outside_temperature=t_out,
other_zone_temperatures=other_zone_temperatures,
temperature_unit="F"))
return indoor_prediction_response.temperature,datetime.datetime.utcfromtimestamp(indoor_prediction_response.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=current_time.tzinfo),indoor_prediction_response.unit
def get_indoor_temperature_prediction_error(indoor_temperature_prediction_stub, building, zone, action, start=None, end=None,
temperature_unit="F"):
"""Gets mean and var of the error of indoor temperature predictions.
:param indoor_temperature_prediction_stub: grpc stub for prediction of indoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param action: (int) Action as given in utils file. Specifies for which action to get the error. -1 gets the error
on the whole dataset, regardless of action.
:param start: (datetime timezone aware). If None, get the training error.
:param end: (datetime timezone aware). If None, get the training error.
:param temperature_unit: temperature unit
:return: mean error (float), varirance of error (float), unit of the error (string).
"""
if (start is None) or (end is None):
end = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
start = end - datetime.timedelta(hours=24)
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
# call service
error_response = indoor_temperature_prediction_stub.GetSecondOrderError(
indoor_temperature_prediction_pb2.ErrorRequest(building=building, zone=zone, action=action,
start=start_unix,
end=end_unix,
unit=temperature_unit))
return error_response.mean, error_response.var, error_response.unit
# HVAC Consumption functions
def get_hvac_consumption_stub(HVAC_CONSUMPTION_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the hvac_consumption service.
:param HVAC_CONSUMPTION_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if HVAC_CONSUMPTION_HOST_ADDRESS is None:
HVAC_CONSUMPTION_HOST_ADDRESS = os.environ["HVAC_CONSUMPTION_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(HVAC_CONSUMPTION_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(HVAC_CONSUMPTION_HOST_ADDRESS, credentials)
return hvac_consumption_pb2_grpc.ConsumptionHVACStub(channel)
def get_hvac_consumption(hvac_consumption_stub, building, zone):
hvac_consumption_response = hvac_consumption_stub.GetConsumption(
hvac_consumption_pb2.Request(building=building, zone=zone))
hvac_consumption_final = {NO_ACTION: 0,
HEATING_ACTION: hvac_consumption_response.heating_consumption,
COOLING_ACTION: hvac_consumption_response.cooling_consumption,
FAN: hvac_consumption_response.ventilation_consumption,
TWO_STAGE_HEATING_ACTION: hvac_consumption_response.heating_consumption_stage_two,
TWO_STAGE_COOLING_ACTION: hvac_consumption_response.cooling_consumption_stage_two,
"UNIT": hvac_consumption_response.unit}
return hvac_consumption_final
# Historic outdoor temperature functions
def get_outdoor_temperature_historic_stub(OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the outdoor_temperature_historical service.
:param OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS: Optional argument to supply host address for given service.
Otherwise, set as environment variable.
:return: grpc Stub object.
"""
if OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS is None:
OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS = os.environ["OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(OUTDOOR_TEMPERATURE_HISTORICAL_HOST_ADDRESS, credentials)
return outdoor_temperature_historical_pb2_grpc.OutdoorTemperatureStub(channel)
def get_raw_outdoor_temperature_historic(outdoor_historic_stub, building, start, end, window, aggregate="MEAN"):
"""Gets historic outdoor temperature as pd.series.
:param indoor_historic_stub: grpc stub for historic outdoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.series valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
historic_outdoor_response = outdoor_historic_stub.GetRawTemperature(
outdoor_temperature_historical_pb2.TemperatureRequest(
building=building, start=int(start_unix), end=int(end_unix), window=window, aggregate=aggregate))
# process data
temperature_list = []
for msg in historic_outdoor_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"temperature" : msg.temperature,
"unit" : msg.unit
}
temperature_list.append(item)
df = pd.DataFrame(temperature_list)
df.set_index("datetime",inplace=True)
return df
def get_preprocessed_outdoor_temperature(outdoor_historic_stub, building, start, end, window):
"""Gets historic outdoor temperature as pd.series.
:param indoor_historic_stub: grpc stub for historic outdoor temperature microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.series valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
historic_outdoor_response = outdoor_historic_stub.GetPreprocessedTemperature(
outdoor_temperature_historical_pb2.TemperatureRequest(
building=building, start=int(start_unix), end=int(end_unix), window=window))
# process data
temperature_list = []
for msg in historic_outdoor_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"temperature" : msg.temperature,
"unit" : msg.unit
}
temperature_list.append(item)
df = pd.DataFrame(temperature_list)
df.set_index("datetime",inplace=True)
return df
# Outdoor temperature prediction functions
def get_outdoor_temperature_prediction_stub(OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS=None,secure=True):
"""Get the stub to interact with the outdoor_temperature_prediction service.
:param OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS: Optional argument to supply host address for given service.
Otherwise, set as environment variable.
:return: grpc Stub object.
"""
if OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS is None:
OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS = os.environ["OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(OUTDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS, credentials)
return outdoor_temperature_prediction_pb2_grpc.OutdoorTemperatureStub(channel)
def get_outdoor_temperature_prediction(outdoor_prediction_stub, building, start, end, window):
"""Gets prediction outdoor temperature as pd.series.
:param outdoor_prediction_stub: grpc stub for outdoor temperature prediction microservice
:param building: (str) building name
:param zone: (str) zone name
:param start: (datetime timezone aware)
:param end: (datetime timezone aware)
:param window: (str) the interval in which to split the data.
:return: pd.series valus=float, index=time
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# call service
prediction_outdoor_response = outdoor_prediction_stub.GetTemperature(
outdoor_temperature_prediction_pb2.TemperatureRequest(
building=building, start=int(start_unix), end=int(end_unix), window=window))
# process data
prediction_list = []
for msg in prediction_outdoor_response.temperatures:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"temperature" : msg.temperature,
"unit" : msg.unit
}
prediction_list.append(item)
df = pd.DataFrame(prediction_list)
df.set_index("datetime",inplace=True)
return df
def get_meter_data_historical_stub(METER_DATA_HISTORICAL_HOST_ADDRESS=None,secure=True):
""" Get stub to interact with meter data service.
:param METER_DATA_HISTORICAL_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if METER_DATA_HISTORICAL_HOST_ADDRESS is None:
METER_DATA_HISTORICAL_HOST_ADDRESS = os.environ["METER_DATA_HISTORICAL_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(METER_DATA_HISTORICAL_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(METER_DATA_HISTORICAL_HOST_ADDRESS,credentials)
return meter_data_historical_pb2_grpc.MeterDataHistoricalStub(channel)
def get_meter_data_historical(meter_data_stub, bldg, start, end, point_type, aggregate, window):
""" Get meter data as a dataframe.
:param meter_data_stub: grpc stub for meter data service.
:param bldg: list(str) - list of buildings.
:param start: datetime (timezone aware)
:param end: datetime (timezone aware)
:param point_type: (str) Building_Electric_Meter or Green_Button_Meter
:param aggregate: (str) Values include MEAN, MAX, MIN, COUNT, SUM and RAW (the temporal window parameter is ignored)
:param window: (str) Size of the moving window.
:return: pd.DataFrame(), defaultdict(list) - Meter data, dictionary that maps meter data's columns (uuid's) to sites
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
window_seconds = get_window_in_sec(window)
# Create gRPC request object
request = meter_data_historical_pb2.Request(
building=bldg,
start=int(start_unix),
end=int(end_unix),
point_type=point_type,
aggregate=aggregate,
window=window
)
historic_meter_data_response = meter_data_stub.GetMeterDataHistorical(request)
# process data
meter_list = []
for msg in historic_meter_data_response:
item = {
"datetime" : datetime.datetime.utcfromtimestamp(msg.time / 1e9).replace(tzinfo=pytz.utc).astimezone(tz=start.tzinfo),
"power" : msg.power
}
meter_list.append(item)
df = pd.DataFrame(meter_list)
df.set_index("datetime",inplace=True)
return df
def get_optimizer_stub(OPTIMIZER_HOST_ADDRESS=None,secure=True):
""" Get stub to interact with optimizer service.
:param OPTIMIZER_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if OPTIMIZER_HOST_ADDRESS is None:
OPTIMIZER_HOST_ADDRESS = os.environ["OPTIMIZER_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(OPTIMIZER_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(OPTIMIZER_HOST_ADDRESS,credentials)
return optimizer_pb2_grpc.OptimizerStub(channel)
def get_mpc_optimization(optimizer_stub, building, zones, start, end, window, lambda_val, starting_temperatures,
unit="F"):
"""Get the optimal actions according to MPC optimization.
:param optimizer_stub: grpc stub for optimizer service
:param building: (str) building name
:param zones: (list str) zones names
:param start: datetime (timezone aware)
:param end: datetime (timezone aware)
:param window: (str) the intervals in which to optimize
:param lambda_val: (float) between 0 and 1. The lambda value to balance cost and discomfort.
:param starting_temperatures: (dict) {str zone: float temperature} the starting temperatures of all zones in
given building.
:param unit: (string) the unit of the temperature.
:return: (dict {(str) zone: (int) action) the optimal actions to take
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
# call service
optimizer_response = optimizer_stub.GetMPCOptimization(
optimizer_pb2.MPCOptimizationRequest(
building=building,
zones=zones,
start=int(start_unix),
end=int(end_unix),
window=window,
lambda_val=lambda_val,
starting_temperatures=starting_temperatures,
unit=unit))
return {iter_zone: optimizer_response.actions[iter_zone] for iter_zone in zones}
def get_mpc_simulation(optimizer_stub, building, zones, start, end, window,
forecasting_horizon, lambda_val, starting_temperatures,
unit="F", num_runs=1):
"""Get the simulation results according to MPC optimization. Stops get_mpc_simulation
when param:end is reached.
:param optimizer_stub: grpc stub for optimizer service
:param building: (str) building name
:param zones: (list str) zones names
:param start: datetime (timezone aware)
:param end: datetime (timezone aware)
:param window: (str) the intervals in which to optimize
:param forecasting_horizon: (str) the timeframe for which to simulate at every step.
:param lambda_val: (float) between 0 and 1. The lambda value to balance cost and discomfort.
:param starting_temperatures: (dict) {str zone: float temperature} the starting temperatures of all zones in
given building.
:param unit: (string) the unit of the temperature.
:param num_runs: (int) the number of runs of simulation to get a better idea
of the variance of the simulation.
:returns:
actions: {iter_zone: [actions]} actions that were excecuted for
every step.
temperatures: ({iter_zone: [temperatures]) temperature seen
at every step.
len(actions[zone]) = (end - start)/window
len(temperatures[zone]) = (end - start)/temperatures + 1
"""
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
# call service
simulation_response = optimizer_stub.GetMPCSimulation(
optimizer_pb2.SimulationRequest(
building=building,
zones=zones,
start=int(start_unix),
end=int(end_unix),
window=window,
forecasting_horizon=forecasting_horizon,
lambda_val=lambda_val,
starting_temperatures=starting_temperatures,
unit=unit,
num_runs=num_runs))
actions = []
temperatures = []
for simulation_result in simulation_response.simulation_results:
actions.append({iter_zone: simulation_result.actions[iter_zone] for iter_zone in zones})
temperatures.append({iter_zone: simulation_result.temperatures[iter_zone] for iter_zone in zones})
return actions, temperatures
def check_data(data, start, end, window, check_nan=False):
"""Checks if data has right times and optionally checks for nan.
This includes checking that the daterange [param:start (inculsive) - param:end (exclusive)) is included in the data.
And that the time-difference between datapoints equals to param:window.
:param data: pd.df or pd.series
:param start: datetime (timezone aware)
:param end: datetime (timezone aware)
:param window: (string)
:param check_nan: If False (default) will not return an error if a datapoint is Nan. If True, will error on nan
data points.
:return: str err message. If no error, returns None."""
if not isinstance(data, pd.DataFrame) and not isinstance(data, pd.Series):
return "Is not a pd.DataFrame/pd.Series"
window = get_window_in_sec(window)
time_diffs = data.index.to_series(keep_tz=True).diff()
if (time_diffs.shape[0] > 1) and ((time_diffs.min() != time_diffs.max()) or (time_diffs.min().seconds != window)):
return "Missing rows or/and bad time frequency."
if (start not in data.index) or ((end - datetime.timedelta(seconds=window)) not in data.index):
return "Does not have valid start or/and end time."
if check_nan and (data.isna().values.any()):
return "Nan values in data."
return None
def get_baseline_optimizer_stub(BASELINE_OPTIMIZER_HOST_ADDRESS=None,secure=True):
""" Get stub to interact with optimizer service.
:param BASELINE_OPTIMIZER_HOST_ADDRESS: Optional argument to supply host address for given service. Otherwise,
set as environment variable.
:return: grpc Stub object.
"""
if BASELINE_OPTIMIZER_HOST_ADDRESS is None:
BASELINE_OPTIMIZER_HOST_ADDRESS = os.environ["BASELINE_OPTIMIZER_HOST_ADDRESS"]
if not secure:
channel = grpc.insecure_channel(BASELINE_OPTIMIZER_HOST_ADDRESS)
else:
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(BASELINE_OPTIMIZER_HOST_ADDRESS,credentials)
return baseline_optimizer_pb2_grpc.BaselineOptimizerStub(channel)
def get_normal_schedule_action(baseline_optimizer_stub,building,zones,start,end,window,starting_temperatures,unit,occupancy,do_not_exceed):
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
baseline_optimizer_response = baseline_optimizer_stub.GetNormalScheduleAction(baseline_optimizer_pb2.NormalScheduleRequest(building=building,zones=zones,start=start_unix,end=end_unix,window=window,starting_temperatures=starting_temperatures,unit=unit,occupancy=occupancy,do_not_exceed=do_not_exceed))
return baseline_optimizer_response.actions
def get_setpoint_expansion_action(baseline_optimizer_stub,building,zones,start,end,window,starting_temperatures,unit,occupancy,do_not_exceed,expansion_degrees):
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
baseline_optimizer_response = baseline_optimizer_stub.GetSetpointExpansionAction(baseline_optimizer_pb2.SetpointExpansionRequest(building=building,zones=zones,start=start_unix,end=end_unix,window=window,starting_temperatures=starting_temperatures,unit=unit,occupancy=occupancy,do_not_exceed=do_not_exceed,expansion_degrees=expansion_degrees))
return baseline_optimizer_response.actions
def get_demand_charge_action(baseline_optimizer_stub,building,zones,start,end,window,starting_temperatures,unit,occupancy,do_not_exceed,max_zones,include_all_zones):
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
start_unix = int(start.timestamp() * 1e9)
end_unix = int(end.timestamp() * 1e9)
baseline_optimizer_response = baseline_optimizer_stub.GetDemandChargeAction(baseline_optimizer_pb2.DemandChargeRequest(building=building,zones=zones,start=start_unix,end=end_unix,window=window,starting_temperatures=starting_temperatures,unit=unit,occupancy=occupancy,do_not_exceed=do_not_exceed,max_zones=max_zones,include_all_zones=include_all_zones))
return baseline_optimizer_response.actions
| [
"[email protected]"
] | |
59df0dab074bd06ac5b02c4d0e64f12409454b00 | 5ac30246e65c6640ef71c737cdb5514b90ce5818 | /audit_shell.py | 878a782b17de9d90a895f913e99c7feb4ab59c99 | [] | no_license | yanlingsishao/lijump | bb03357797a5784849b35a7ab85f84a645d9bae7 | c31e10d6b113e04fa5da77aac7a68a76cd2e34a1 | refs/heads/master | 2020-03-27T04:17:09.894332 | 2018-08-24T02:28:11 | 2018-08-24T02:28:11 | 145,927,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018-6-14 17:02
# @Author : Jerry Wang
# @Site :
# @File : audit_shell.py
# @Software: PyCharm
import sys,os
if __name__ == '__main__':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LuffyAudit.settings")
import django
django.setup() # 手动注册django所有的APP
from audit.backend import user_interactive
obj = user_interactive.UserShell(sys.argv)
obj.start()
| [
"[email protected]"
] | |
d95f09504e067973a8c951b1f9de6a4dfbc7f241 | 9dcd4f0b95f084e7a1c44d74ce812f5f9464a9d6 | /backend/manage.py | 5093d9c48a4bc5eaa2d3e5f3d7fd853cea0a9db5 | [] | no_license | crowdbotics-apps/plan-20540 | 42337154cad80e9f4256cded340bf1036189e285 | 8c5f6b8bca65be82e216ae2c8b256a64e2ab9f42 | refs/heads/master | 2022-12-18T04:06:37.124514 | 2020-09-21T23:15:19 | 2020-09-21T23:15:19 | 297,482,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plan_20540.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e530aaa767d12ea93c377b257ea84936bbd7066a | 7cbc0963d88a5fb4eb241e2a55fd791098dd5f16 | /capp/admin.py | 5a5a374ca6b9cac630382226b588b284e51c8475 | [] | no_license | Kipngetich33/test-repo | a7a4ebeb4eff117db5cff90f40a2b34ed3a3fa66 | e08e1a8a12196dcf806e2270f8efe930405a5379 | refs/heads/master | 2022-12-12T15:54:56.401732 | 2018-05-27T08:54:13 | 2018-05-27T08:54:13 | 134,891,667 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from django.contrib import admin
from .models import Profile, Question, Comment, Session, Record, Doctor, Inpatient
# Register your models here.
admin.site.register(Inpatient)
admin.site.register(Profile)
admin.site.register(Comment)
admin.site.register(Question)
admin.site.register(Session)
admin.site.register(Record)
admin.site.register(Doctor)
| [
"[email protected]"
] | |
7cf617b5c56a3f2ea11220ca71f5a197cb36b863 | 050fc5ca698dfd7612dee42aa980fc7b5eee40a2 | /skywalking/agent/protocol/interceptors_aio.py | 1ade5fb7ffa878a86eac085cdaa32514df038639 | [
"Apache-2.0"
] | permissive | apache/skywalking-python | 8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6 | 1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d | refs/heads/master | 2023-09-05T02:45:56.225937 | 2023-08-28T22:19:24 | 2023-08-28T22:19:24 | 261,456,329 | 178 | 122 | Apache-2.0 | 2023-08-28T22:19:26 | 2020-05-05T12:13:49 | Python | UTF-8 | Python | false | false | 3,420 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import grpc
class _ClientInterceptorAsync(
grpc.aio.UnaryUnaryClientInterceptor,
grpc.aio.UnaryStreamClientInterceptor,
grpc.aio.StreamUnaryClientInterceptor,
grpc.aio.StreamStreamClientInterceptor
):
def __init__(self, interceptor_async_function):
self._fn = interceptor_async_function
async def intercept_unary_unary(self, continuation, client_call_details, request):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, iter((request,)), False, False)
response = await continuation(new_details, next(new_request_iterator))
return (await postprocess(response)) if postprocess else response
async def intercept_unary_stream(self, continuation, client_call_details, request):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, iter((request,)), False, True)
response_it = await continuation(new_details, next(new_request_iterator))
return (await postprocess(response_it)) if postprocess else response_it
async def intercept_stream_unary(self, continuation, client_call_details, request_iterator):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, request_iterator, True, False)
response = await continuation(new_details, new_request_iterator)
return (await postprocess(response)) if postprocess else response
async def intercept_stream_stream(self, continuation, client_call_details, request_iterator):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, request_iterator, True, True)
response_it = await continuation(new_details, new_request_iterator)
return (await postprocess(response_it)) if postprocess else response_it
def create(intercept_async_call):
return _ClientInterceptorAsync(intercept_async_call)
ClientCallDetails = namedtuple('ClientCallDetails', ('method', 'timeout', 'metadata', 'credentials'))
def header_adder_interceptor_async(header, value):
async def intercept_async_call(client_call_details, request_iterator, request_streaming, response_streaming):
metadata = list(client_call_details.metadata or ())
metadata.append((header, value))
client_call_details = ClientCallDetails(
client_call_details.method, client_call_details.timeout, metadata, client_call_details.credentials,
)
return client_call_details, request_iterator, None
return create(intercept_async_call)
| [
"[email protected]"
] | |
ae7d7e80d8d08994ebaba9368bfbeb41259cdf19 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/reservations/azure-mgmt-reservations/azure/mgmt/reservations/aio/operations/_exchange_operations.py | 00a323e5f5ee98c8fe3be36a18fe084e24a20aee | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 7,503 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExchangeOperations:
"""ExchangeOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.reservations.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _post_initial(
self,
body: "_models.ExchangeRequest",
**kwargs
) -> Optional["_models.ExchangeOperationResultResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExchangeOperationResultResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._post_initial.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'ExchangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExchangeOperationResultResponse', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_post_initial.metadata = {'url': '/providers/Microsoft.Capacity/exchange'} # type: ignore
async def begin_post(
self,
body: "_models.ExchangeRequest",
**kwargs
) -> AsyncLROPoller["_models.ExchangeOperationResultResponse"]:
"""Exchange Reservation(s).
Returns one or more ``Reservations`` in exchange for one or more ``Reservation`` purchases.
:param body: Request containing the refunds and purchases that need to be executed.
:type body: ~azure.mgmt.reservations.models.ExchangeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExchangeOperationResultResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.reservations.models.ExchangeOperationResultResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExchangeOperationResultResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._post_initial(
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExchangeOperationResultResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_post.metadata = {'url': '/providers/Microsoft.Capacity/exchange'} # type: ignore
| [
"[email protected]"
] | |
3b9cdeea125b0d4b04ff8151a9af1f622e9f31b8 | 0388e6159a676944a26ffd10e413980120e2c338 | /extra_foam/gui/ctrl_widgets/scan_button_set.py | 70da587b8f8e5c72b49bde8e857ce1c7d6f2127a | [
"BSD-3-Clause"
] | permissive | scottwedge/EXtra-foam | 0bb3689ec11df7253ce407b9c5c53f68a405200f | 578c6035af023575a5c026b0391d15884ca1df60 | refs/heads/master | 2021-02-07T20:06:18.266595 | 2020-02-24T14:08:50 | 2020-02-24T14:08:50 | 244,071,447 | 0 | 0 | BSD-3-Clause | 2020-03-01T02:05:39 | 2020-03-01T02:05:38 | null | UTF-8 | Python | false | false | 1,489 | py | """
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <[email protected]>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton
class ScanButtonSet(QFrame):
scan_toggled_sgn = pyqtSignal(bool)
reset_sgn = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent=parent)
self._scan_btn = QPushButton("Scan")
self._pause_btn = QPushButton("Pause")
self._pause_btn.setEnabled(False)
self._reset_btn = QPushButton("Reset")
self.initUI()
self.initConnections()
def initUI(self):
layout = QHBoxLayout()
layout.addWidget(self._scan_btn)
layout.addWidget(self._pause_btn)
layout.addWidget(self._reset_btn)
self.setLayout(layout)
def initConnections(self):
self._scan_btn.clicked.connect(self._onStartScan)
self._pause_btn.clicked.connect(self._onStopScan)
self._reset_btn.clicked.connect(self.reset_sgn)
def _onStartScan(self):
self._scan_btn.setEnabled(False)
self._pause_btn.setEnabled(True)
self.scan_toggled_sgn.emit(True)
def _onStopScan(self):
self._pause_btn.setEnabled(False)
self._scan_btn.setEnabled(True)
self.scan_toggled_sgn.emit(False)
| [
"[email protected]"
] | |
a40d7e98cb98dcaf66ed024dcd8b9b94752de86f | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /imooc/celery_learning/celery_app/celeryconfig.py | bfd26a86904e2b5b0b64ae6339b240819533c0c4 | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/12/4 09:14'
from datetime import timedelta
from celery.schedules import crontab
# broker_url消息中间件
BROKER_URL = 'amqp://guest:guestpwd@localhost:port/vhost_name' # with passwd, e.g. guestpwd is your password
# BROKER_URL = 'redis://127.0.0.1:6379/1' # without passwd
# BROKER_URL = 'redis://:[email protected]:6379/1' # with redis auth, e.g. xxx is your password
# backend_url主要用于存储任务执行结果
CELERY_RESULT_BACKEND = 'redis://:[email protected]:6379/4'
# localhost redis-server /usr/local/redis-4.0.1/etc/redis.conf
CELERY_TIMEZONE = 'Asia/Shanghai'
# 忽略返回任务结果
CELERY_IGNORE_RESULT = False
# UTC
# 导入指定的任务模块
CELERY_IMPORTS = (
'celery_app.task1',
'celery_app.task2',
'celery_app.task_send_email',
)
CELERYD_MAX_TASKS_PER_CHILD = 40
# 设置定时任务 task1 每10s执行一次, task2 每天17:30执行
CELERYBEAT_SCHEDULE = {
'task1': {
'task': 'celery_app.task1.add',
'schedule': timedelta(seconds=7),
'args': (2, 8)
},
'task2': {
'task': 'celery_app.task2.multiply',
# 'schedule': crontab(hour=17, minute=21),
'schedule': timedelta(seconds=3),
'args': (4, 5)
},
'task3': {
'task': 'celery_app.task_send_email.send_email',
# 'schedule': crontab(hour=17, minute=21),
'schedule': timedelta(seconds=5),
},
}
## Run in terminal: celery beat -A celery_app -l INFO
| [
"[email protected]"
] | |
93e02e28357482f45b11b6504bb548bf8ffb0bd3 | 7759122052337252217fff9d51ec6d125ef370e0 | /iq/engine/gtk/gtkbox_manager.py | 1428ab5c17caecb85a7e8a492c21f26fc90f3528 | [] | no_license | XHermitOne/iq_framework | 3325670c74233d99e599921fad4bd41e5d8104f3 | 7550e242746cb2fb1219474463f8db21f8e3e114 | refs/heads/master | 2023-09-03T21:07:58.107750 | 2023-09-01T07:30:13 | 2023-09-01T07:30:13 | 195,210,479 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GtkBox manager.
"""
import gi
gi.require_version('Gtk', '3.0')
import gi.repository.Gtk
from ...util import log_func
# from ...util import spc_func
# from ...util import id_func
from . import base_manager
__version__ = (0, 0, 0, 1)
class iqGtkBoxManager(base_manager.iqBaseManager):
"""
GtkBox manager.
"""
def clearGtkBox(self, box=None):
"""
Clear GtkBox.
:param box: GtkBox object.
:return: True/False.
"""
assert issubclass(box.__class__, gi.repository.Gtk.Box), u'GtkBox manager type error'
try:
for child in box.get_children():
box.remove(child)
child.destroy()
return True
except:
log_func.fatal(u'Error clear box <%s>' % box.get_name())
return False
| [
"[email protected]"
] | |
41f45ca9f01ea667729fbcb4f7f1ad0903e8186f | 9f9c0861a392d26c1ec0c317b2cba85515ddc627 | /torch/nn/quantized/modules/rnn.py | 7e523ba830d22bd85be26b8bb793722a0e45d7f2 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | zhuhaozhe/pytorch | be09e6aed6b2b229f4d1126fc170542e2aa02016 | ba556961a7de900c0ad6f10ceba094b9f5a2a61e | refs/heads/master | 2023-08-10T10:35:28.573545 | 2023-08-01T07:05:50 | 2023-08-01T07:05:50 | 205,321,942 | 0 | 1 | NOASSERTION | 2022-09-08T08:00:41 | 2019-08-30T06:46:46 | C++ | UTF-8 | Python | false | false | 1,719 | py | import torch
class LSTM(torch.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples in :class:`~torch.nn.quantizable.LSTM`
Examples::
>>> custom_module_config = {
... 'float_to_observed_custom_module_class': {
... nn.LSTM: nn.quantizable.LSTM,
... },
... 'observed_to_quantized_custom_module_class': {
... nn.quantizable.LSTM: nn.quantized.LSTM,
... }
... }
>>> tq.prepare(model, prepare_custom_module_class=custom_module_config)
>>> tq.convert(model, convert_custom_module_class=custom_module_config)
"""
_FLOAT_MODULE = torch.nn.quantizable.LSTM
def _get_name(self):
return 'QuantizedLSTM'
@classmethod
def from_float(cls, *args, **kwargs):
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError("It looks like you are trying to convert a "
"non-observed LSTM module. Please, see "
"the examples on quantizable LSTMs.")
@classmethod
def from_observed(cls, other):
assert type(other) == cls._FLOAT_MODULE
converted = torch.ao.quantization.convert(other, inplace=False,
remove_qconfig=True)
converted.__class__ = cls
return converted
| [
"[email protected]"
] | |
911c9e93d8d43ad832b767d37f5c312e13acad79 | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200119185018.py | 7381a67978262b1e6f18702e2d351e7181371d37 | [] | no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | '''
-**make snake longer when eaten
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
'''
import arcade
import random
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
up = False
down = False
left = False
right = False
player_x_column = 5
player_y_row = 5
snake_body = []
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_display = True
grid_texture = arcade.load_texture("29x51_grid.jpg")
def on_update(delta_time):
snake_move()
def on_draw():
arcade.start_render()
grid_background()
snake()
apple()
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
# for i in range (1):
# player_loaction_x = player_loaction_x(player_x_column)
# player_loaction_y.append(player_y_row)
else:
restart()
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row
global up, down, left, right
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
up = False
down = False
left = False
right = False
print ("You died")
def snake():
global player_x_column, player_y_row, apple_x, apple_y, snake_len, snake_body, snake_head
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_head = [player_x_column, player_y_row]
snake_len = [snake_head]
# if (len(snake_body) > 1):
# for num in range (1, len(snake_body):
# snake_len[i]= snake_len[i-1]
print("body", body)
# for index in range (body - 1, 0, -1):
# player_x_column = snake_len[index - 1][0]
# player_y_row = snake_len[index - 1][1]
# snake_len[index]
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, snake_body, snake_len
global SPEED
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
snake_body.append([10 + num, 10])
snake_head.append(snake_body)
print ("hit")
else:
apple_display = True
print (snake_len)
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
apple_display == True
def on_key_press(key, modifiers):
global up, down, left, right
if key == arcade.key.W:
up = True
down = False
right = False
left = False
elif key == arcade.key.S:
down = True
up = False
right = False
left = False
elif key == arcade.key.A:
left = True
up = False
down = False
right = False
elif key == arcade.key.D:
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
pass
def setup():
global grid
# global player_x_column, apple_x, player_y_row, apple_y, SPEED
# SPEED = 10
# if (player_x_column == apple_x) and (player_y_row == apple_y):
# SPEED += 5
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/10)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup() | [
"[email protected]"
] | |
7abb383871da68acca8803c033a6cda606057eae | 4ecb332ba2edd08d4a0a0021db675b41c3790dbd | /bc19-scaffold/bots/34.TacticsImprovedBot/pilgrims.py | f180cd4981648d302407dcd2442b86e3dd0796e0 | [] | no_license | Nischay-Pro/BattleCode2019 | fdffdd235e8db60189e90e48c3f47f23bb32b30f | 92193daf631687acca00176c1fa6a9255d7d4381 | refs/heads/master | 2020-04-16T10:15:39.875870 | 2019-01-27T21:08:52 | 2019-01-27T21:08:52 | 165,497,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,853 | py | import constants
import utility
import pilgrims_utility
import movement
import check
def pilgrim(robot):
if robot.pilgrim_mine_ownership == None:
robot.steps_to_mine += 1
# communications.self_communicate_loop(robot)
# robot.log("Pilgrims current move destination is " + robot.current_move_destination)
carry_karb = robot.me.karbonite
carry_fuel = robot.me.fuel
# The pilgrim is on a mine and wants to deposit resources
if carry_fuel > 80 or carry_karb > 18:
# robot.log("Nearing capacity")
return pilgrim_full(robot)
# The pilgrim checks if it has a mine on it's current position
pilgrim_is_mining = pilgrim_mine(robot)
if pilgrim_is_mining !=0 and robot.fuel > 1 and robot.actual_round_number != None:
if robot.actual_round_number >= 6:
if robot.piligrim_did_i_shout_my_x_cord == False:
robot.castle_talk(robot.me.x + 64)
robot.piligrim_did_i_shout_my_x_cord = True
else:
if robot.piligrim_did_i_shout_my_y_cord == False:
robot.castle_talk(robot.me.y + 64)
robot.piligrim_did_i_shout_my_y_cord = True
return pilgrim_is_mining
# Receive signal from castle on which mine to go to
if robot.step == 0:
pilgrims_utility.receive_initial_signal(robot)
# Move Section
pilgrim_is_moving = pilgrim_move(robot)
if pilgrim_is_moving !=0 and robot.fuel > 30:
# robot.log(pilgrim_is_moving)
return pilgrim_is_moving
def pilgrim_move(robot):
# Emergency case, allows pilgrims to mine
if robot.fuel <= 2:
return 0
pos_x = robot.me.x
pos_y = robot.me.y
passable_map, occupied_map, karb_map, fuel_map = utility.get_all_maps(robot)
random_directions = utility.random_cells_around()
# May change for impossible resources
# pilgrims_utility.did_pilgrim_burn_out(robot)
# Capture and start mining any resource if more than 50 turns since creation and no mine
# TODO - Improve this code snippet to mine, if in visible region and empty
# if robot.me.turn > constants.pilgrim_will_scavenge_closeby_mines_after_turns: #and robot.me.turn < constants.pilgrim_will_scavenge_closeby_mines_before_turns:
# for direction in random_directions:
# if (not utility.is_cell_occupied(occupied_map, pos_x + direction[1], pos_y + direction[0])) and utility.is_cell_resourceful(karb_map, fuel_map, pos_x + direction[1], pos_y + direction[0]):
# robot.current_move_destination = None
# utility.default_movement_variables(robot)
# return robot.move(direction[1], direction[0])
# TODO - Make into scout if too old, which will scout enemy bases
# If the mine is already occupied
# pilgrims_utility.is_pilgrim_scavenging(robot)
# Just move
if not movement.is_completely_surrounded(robot):
if robot.current_move_destination == None and robot.pilgrim_mine_ownership != None:
robot.current_move_destination = robot.pilgrim_mine_ownership
move_command = movement.move_to_destination(robot)
if move_command != None:
return move_command
# Random Movement when not enough time
# for direction in random_directions:
# if not utility.is_cell_occupied(occupied_map, pos_x + direction[1], pos_y + direction[0]) and passable_map[pos_y + direction[0]][pos_x + direction[1]] == 1:
# robot.mov_path_between_location_and_destination = None
# return robot.move(direction[1], direction[0])
return 0
def pilgrim_mine(robot):
pos_x = robot.me.x
pos_y = robot.me.y
karb_map = robot.get_karbonite_map()
fuel_map = robot.get_fuel_map()
if utility.is_cell_resourceful(karb_map, fuel_map, pos_x, pos_y):
robot.signal(0, 0)
if utility.is_cell_fuel(fuel_map, pos_x, pos_y):
robot.karb_miner = False
robot.fuel_miner = True
robot.castle_talk(6)
elif utility.is_cell_karbonite(karb_map, pos_x, pos_y):
robot.karb_miner = True
robot.fuel_miner = False
robot.castle_talk(7)
# TRAVIS CHECK MINE 1
robot.pilgrim_mine_ownership = (pos_x, pos_y)
return check.mine_check(robot, 1)
else:
return 0
def pilgrim_full(robot):
# If we have adjacent castle/church or haven't reached the convoy age end
pilgrim_give_or_convoy = pilgrims_utility.give_or_mine(robot)
if pilgrim_give_or_convoy != 0 and robot.fuel > 4:
return pilgrim_give_or_convoy
# FIXME - Make churches not be built if castle/other church is in reasonable travel range
if robot.karbonite > 50 and robot.fuel > 200:
return pilgrims_utility.make_church(robot)
return None
| [
"[email protected]"
] | |
ab9d0e899156ce165308ad2bee71b1107e9548ab | ea819acfd7486c0cb613acc89dba6be48502e65c | /test/test_bson.py | 5f068916fc9eee41fe9459e82d842934ada12313 | [
"Apache-2.0"
] | permissive | drg/mongo-python-driver | 5138b8ea0133b3a4a950a9312db42dc0ab81535c | f0382a3ef003b0c97fba0808bc5d22249085304e | refs/heads/master | 2021-01-24T04:21:03.739445 | 2009-09-23T15:26:53 | 2009-09-23T19:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,487 | py | # Copyright 2009 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the bson module."""
import unittest
import datetime
import re
import glob
import sys
import types
try:
import uuid
should_test_uuid = True
except ImportError:
should_test_uuid = False
from nose.plugins.skip import SkipTest
sys.path[0:0] = [""]
import qcheck
from pymongo.binary import Binary
from pymongo.code import Code
from pymongo.objectid import ObjectId
from pymongo.dbref import DBRef
from pymongo.son import SON
from pymongo.bson import BSON, is_valid, _to_dicts
from pymongo.errors import UnsupportedTag, InvalidDocument, InvalidStringData
class TestBSON(unittest.TestCase):
def setUp(self):
pass
def test_basic_validation(self):
self.assertRaises(TypeError, is_valid, 100)
self.assertRaises(TypeError, is_valid, u"test")
self.assertRaises(TypeError, is_valid, 10.4)
self.failIf(is_valid("test"))
# the simplest valid BSON document
self.assert_(is_valid("\x05\x00\x00\x00\x00"))
self.assert_(is_valid(BSON("\x05\x00\x00\x00\x00")))
self.failIf(is_valid("\x04\x00\x00\x00\x00"))
self.failIf(is_valid("\x05\x00\x00\x00\x01"))
self.failIf(is_valid("\x05\x00\x00\x00"))
self.failIf(is_valid("\x05\x00\x00\x00\x00\x00"))
def test_random_data_is_not_bson(self):
qcheck.check_unittest(self, qcheck.isnt(is_valid),
qcheck.gen_string(qcheck.gen_range(0, 40)))
def test_basic_to_dict(self):
self.assertEqual({"test": u"hello world"},
BSON("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C"
"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F"
"\x72\x6C\x64\x00\x00").to_dict())
self.assertEqual([{"test": u"hello world"}, {}],
_to_dicts("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00"
"\x0C\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20"
"\x77\x6F\x72\x6C\x64\x00\x00\x05\x00\x00"
"\x00\x00"))
def test_data_timestamp(self):
self.assertEqual({"test": (4, 20)},
BSON("\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x04"
"\x00\x00\x00\x14\x00\x00\x00\x00").to_dict())
def test_basic_from_dict(self):
self.assertRaises(TypeError, BSON.from_dict, 100)
self.assertRaises(TypeError, BSON.from_dict, "hello")
self.assertRaises(TypeError, BSON.from_dict, None)
self.assertRaises(TypeError, BSON.from_dict, [])
self.assertEqual(BSON.from_dict({}), BSON("\x05\x00\x00\x00\x00"))
self.assertEqual(BSON.from_dict({"test": u"hello world"}),
"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00\x00"
"\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C\x64\x00"
"\x00")
self.assertEqual(BSON.from_dict({u"mike": 100}),
"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00\x00"
"\x00\x00")
self.assertEqual(BSON.from_dict({"hello": 1.5}),
"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00\x00"
"\x00\x00\x00\x00\xF8\x3F\x00")
self.assertEqual(BSON.from_dict({"true": True}),
"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00")
self.assertEqual(BSON.from_dict({"false": False}),
"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00"
"\x00")
self.assertEqual(BSON.from_dict({"empty": []}),
"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05\x00"
"\x00\x00\x00\x00")
self.assertEqual(BSON.from_dict({"none": {}}),
"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00\x00"
"\x00\x00\x00")
self.assertEqual(BSON.from_dict({"test": Binary("test")}),
"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00\x00"
"\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00")
self.assertEqual(BSON.from_dict({"test": Binary("test", 128)}),
"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00"
"\x00\x80\x74\x65\x73\x74\x00")
self.assertEqual(BSON.from_dict({"test": None}),
"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00")
self.assertEqual(BSON.from_dict({"date": datetime.datetime(2007, 1, 8,
0, 30,
11)}),
"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE\x1C"
"\xFF\x0F\x01\x00\x00\x00")
self.assertEqual(BSON.from_dict({"regex": re.compile("a*b",
re.IGNORECASE)}),
"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61\x2A"
"\x62\x00\x69\x00\x00")
self.assertEqual(BSON.from_dict({"$where": Code("test")}),
"\x1F\x00\x00\x00\x0F\x24\x77\x68\x65\x72\x65\x00\x12"
"\x00\x00\x00\x05\x00\x00\x00\x74\x65\x73\x74\x00\x05"
"\x00\x00\x00\x00\x00")
a = ObjectId("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B")
self.assertEqual(BSON.from_dict({"oid": a}),
"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02\x03"
"\x04\x05\x06\x07\x08\x09\x0A\x0B\x00")
self.assertEqual(BSON.from_dict({"ref": DBRef("coll", a)}),
"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02$ref"
"\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00\x01\x02"
"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00\x00")
def test_null_character_encoding(self):
self.assertRaises(InvalidStringData, BSON.from_dict, {"with zero": "hello\x00world"})
self.assertRaises(InvalidStringData, BSON.from_dict, {"with zero": u"hello\x00world"})
def test_from_then_to_dict(self):
def helper(dict):
self.assertEqual(dict, (BSON.from_dict(dict)).to_dict())
helper({})
helper({"test": u"hello"})
self.assert_(isinstance(BSON.from_dict({"hello": "world"})
.to_dict()["hello"],
types.UnicodeType))
helper({"mike": -10120})
helper({"long": long(10)})
helper({"really big long": 2147483648})
helper({u"hello": 0.0013109})
helper({"something": True})
helper({"false": False})
helper({"an array": [1, True, 3.8, u"world"]})
helper({"an object": {"test": u"something"}})
helper({"a binary": Binary("test", 100)})
helper({"a binary": Binary("test", 128)})
helper({"a binary": Binary("test", 254)})
helper({"another binary": Binary("test")})
helper(SON([(u'test dst', datetime.datetime(1993, 4, 4, 2))]))
helper({"big float": float(10000000000)})
def from_then_to_dict(dict):
return dict == (BSON.from_dict(dict)).to_dict()
qcheck.check_unittest(self, from_then_to_dict,
qcheck.gen_mongo_dict(3))
def test_data_files(self):
# TODO don't hardcode this, actually clone the repo
data_files = "../mongo-qa/modules/bson_tests/tests/*/*.xson"
generate = True
for file_name in glob.glob(data_files):
f = open(file_name, "r")
xml = f.read()
f.close()
try:
doc = SON.from_xml(xml)
bson = BSON.from_dict(doc)
except UnsupportedTag:
print "skipped file %s: %s" % (file_name, sys.exc_info()[1])
continue
try:
f = open(file_name.replace(".xson", ".bson"), "rb")
expected = f.read()
f.close()
self.assertEqual(bson, expected, file_name)
self.assertEqual(doc, bson.to_dict(), file_name)
except IOError:
if generate:
print "generating .bson for %s" % file_name
f = open(file_name.replace(".xson", ".bson"), "w")
f.write(bson)
f.close()
def test_bad_encode(self):
self.assertRaises(InvalidStringData, BSON.from_dict,
{"lalala": '\xf4\xe0\xf0\xe1\xc0 Color Touch'})
def test_overflow(self):
self.assert_(BSON.from_dict({"x": 9223372036854775807L}))
self.assertRaises(OverflowError, BSON.from_dict, {"x": 9223372036854775808L})
self.assert_(BSON.from_dict({"x": -9223372036854775808L}))
self.assertRaises(OverflowError, BSON.from_dict, {"x": -9223372036854775809L})
def test_tuple(self):
self.assertEqual({"tuple": [1, 2]},
BSON.from_dict({"tuple": (1, 2)}).to_dict())
def test_uuid(self):
if not should_test_uuid:
raise SkipTest()
id = uuid.uuid4()
transformed_id = (BSON.from_dict({"id": id})).to_dict()["id"]
self.assert_(isinstance(transformed_id, uuid.UUID))
self.assertEqual(id, transformed_id)
self.assertNotEqual(uuid.uuid4(), transformed_id)
# TODO this test doesn't pass w/ C extension
#
# timegm doesn't handle years < 1900 (negative), at least on OS X
# we probably need to use our own version of timegm
# def test_date_before_epoch(self):
# doc = {"date": datetime.datetime(1600, 5, 5)}
# self.assertEqual(doc, BSON.from_dict(doc).to_dict())
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
0c722d822e98d521f2af77ed2e6f29002c65e0a8 | 93ba28a7808ed5a406753748fedbdbaea5f3c8b2 | /KSTest.py | 92e5fb7c859a739fb20a3a6166aaad182051e025 | [] | no_license | zaixingmao/samples-plots | 7a55005abab1e7644296d1eb2e76f603d160a37b | bb2371c7f664a84c454189ec648bb55630cb7565 | refs/heads/master | 2020-05-21T23:27:34.390427 | 2017-07-14T14:59:52 | 2017-07-14T14:59:52 | 24,139,867 | 0 | 0 | null | 2015-10-23T16:00:22 | 2014-09-17T10:17:08 | Python | UTF-8 | Python | false | false | 4,000 | py | #!/usr/bin/env python
import ROOT as r
import optparse
import tool
r.gROOT.SetBatch(True) # to suppress canvas pop-outs
def KSTest(ifile, ofile, name):
f = r.TFile(ifile)
testTree = f.Get("TestTree")
trainTree = f.Get("TrainTree")
nBins = 20
nBins2 = 100000
xMin = -1.0
xMax = 1.0
BDT_Sig_Train = r.TH1F('BDT_Sig_Train', 'BDT_Sig_Train', nBins, xMin, xMax)
BDT_Sig_Test = r.TH1F('BDT_Sig_Test', 'Overtraining Check (%s)' %name[:name.find('_')], nBins, xMin, xMax)
BDT_Bkg_Train = r.TH1F('BDT_Bkg_Train', 'BDT_Bkg_Train', nBins, xMin, xMax)
BDT_Bkg_Test = r.TH1F('BDT_Bkg_Test', 'BDT_Bkg_Test', nBins, xMin, xMax)
BDT_Sig_Train_4KS = r.TH1F('BDT_Sig_Train_4KS', 'BDT_Sig_Train_4KS', nBins2, xMin, xMax)
BDT_Sig_Test_4KS = r.TH1F('BDT_Sig_Test_4KS', 'BDT_Sig_Test_4KS', nBins2, xMin, xMax)
BDT_Bkg_Train_4KS = r.TH1F('BDT_Bkg_Train_4KS', 'BDT_Bkg_Train_4KS', nBins2, xMin, xMax)
BDT_Bkg_Test_4KS = r.TH1F('BDT_Bkg_Test_4KS', 'BDT_Bkg_Test_4KS', nBins2, xMin, xMax)
totalTest = testTree.GetEntries()
for i in range(totalTest):
testTree.GetEntry(i)
if testTree.className == "Signal":
BDT_Sig_Test.Fill(testTree.BDT, testTree.weight)
BDT_Sig_Test_4KS.Fill(testTree.BDT, testTree.weight)
else:
BDT_Bkg_Test.Fill(testTree.BDT, testTree.weight)
BDT_Bkg_Test_4KS.Fill(testTree.BDT, testTree.weight)
totalTrain = trainTree.GetEntries()
for i in range(totalTrain):
trainTree.GetEntry(i)
if trainTree.className == "Signal":
BDT_Sig_Train.Fill(trainTree.BDT, trainTree.weight)
BDT_Sig_Train_4KS.Fill(trainTree.BDT, trainTree.weight)
else:
BDT_Bkg_Train.Fill(trainTree.BDT, trainTree.weight)
BDT_Bkg_Train_4KS.Fill(trainTree.BDT, trainTree.weight)
BDT_Bkg_Train.Sumw2()
BDT_Sig_Train.Sumw2()
sigKS = BDT_Sig_Test_4KS.KolmogorovTest(BDT_Sig_Train_4KS)
bkgKS = BDT_Bkg_Test_4KS.KolmogorovTest(BDT_Bkg_Train_4KS)
print 'signal: %.4f' %sigKS
print 'background: %.4f' %bkgKS
BDT_Bkg_Train.Scale(1/BDT_Bkg_Train.Integral())
BDT_Bkg_Train.SetMarkerColor(r.kRed)
BDT_Bkg_Train.SetMarkerStyle(21)
BDT_Bkg_Test.SetLineColor(r.kRed)
BDT_Bkg_Test.SetFillColor(r.kRed)
BDT_Bkg_Test.SetFillStyle(3354)
BDT_Bkg_Test.Scale(1/BDT_Bkg_Test.Integral())
BDT_Sig_Train.Scale(1/BDT_Sig_Train.Integral())
BDT_Sig_Train.SetMarkerColor(r.kBlue)
BDT_Sig_Train.SetMarkerStyle(21)
BDT_Sig_Test.SetLineColor(r.kBlue)
BDT_Sig_Test.SetFillColor(r.kBlue)
BDT_Sig_Test.SetFillStyle(3001)
BDT_Sig_Test.Scale(1/BDT_Sig_Test.Integral())
legendHistos1 = []
legendHistos1.append((BDT_Bkg_Test, 'bkg test'))
legendHistos1.append((BDT_Bkg_Train, 'bkg train'))
# legendHistos1.append((BDT_Bkg_Train, 'KS: %0.3f' %bkgKS))
legendHistos2 = []
legendHistos2.append((BDT_Sig_Test, 'sig test'))
legendHistos2.append((BDT_Sig_Train, 'sig train'))
# legendHistos2.append((BDT_Sig_Train, 'KS: %0.3f' %sigKS))
l1 = tool.setMyLegend(lPosition=(0.2, 0.67, 0.5, 0.82), lHistList=legendHistos1)
l2 = tool.setMyLegend(lPosition=(0.6, 0.67, 0.9, 0.82), lHistList=legendHistos2)
r.gStyle.SetOptStat(0)
c = r.TCanvas("c","Test", 800, 600)
BDT_Sig_Test.Draw()
BDT_Sig_Test.GetXaxis().SetTitle("BDT")
BDT_Sig_Test.SetMaximum(0.5)
BDT_Sig_Train.Draw('sameE1P')
BDT_Bkg_Test.Draw('same')
BDT_Bkg_Train.Draw('sameE1P')
l1.Draw('same')
l2.Draw('same')
c.Print('%s.pdf' %ofile)
massPoints = ['260','270','280','290','300','310','320','330','340','350']
# massPoints = ['260','300','350']
nTreesList = ['150']
for nTrees in nTreesList:
for iMass in massPoints:
postFix = '_7_n%s_mJJ' %nTrees
KSTest('/nfs_scratch/zmao/TMVA/TMVA%s%s.root' %(iMass,postFix), '/nfs_scratch/zmao/TMVA/pdf/TMVA%s%s' %(iMass,postFix), 'H2hh%s_n%s' %(iMass, nTrees))
| [
"[email protected]"
] | |
bb7445c4b8823d18bbfa187d22777592411f70d1 | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/seasons/client/challengetaskentry.py | 40bd3616f7d69a72a50f4485d485bf15326d58d2 | [] | no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,563 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\seasons\client\challengetaskentry.py
import carbonui.const as uiconst
from carbonui.primitives.container import Container
from carbonui.primitives.containerAutoSize import ContainerAutoSize
from carbonui.primitives.fill import Fill
from carbonui.primitives.frame import Frame
from eve.client.script.ui.control.eveLabel import EveLabelLargeBold, Label, EveLabelSmall, EveLabelMedium
from eve.client.script.ui.eveFontConst import EVE_LARGE_FONTSIZE
from seasons.client.challengeexpirationtimer import ChallengeExpirationTimer, CHALLENGE_EXPIRATION_CLOCK_SIZE
from seasons.client.challengetaskprogressbar import ChallengeTaskProgressBar
from seasons.client.const import DEFAULT_ANIMATE_PROGRESS
from seasons.client.seasonpoints import SeasonPoints
from seasons.client.uiutils import get_agent_icon, SEASON_THEME_TEXT_COLOR_REGULAR, SEASON_THEME_TEXT_COLOR_HIGHLIGHTED
import uix
CHALLENGE_MINIMUM_HEIGHT = 120
CHALLENGE_HEIGHT_PADDING = 20
TOP_CONTAINER_MINIMUM_HEIGHT = 64
TOP_CONTAINER_PAD_TOP = 6
TOP_CONTAINER_PAD_LEFT = 10
TOP_CONTAINER_PAD_RIGHT = 10
DESCRIPTION_CONTAINER_PAD_TOP = 5
DESCRIPTION_FONTSIZE = 10
CHALLENGE_PROGRESS_HEIGHT = 20
CHALLENGE_PROGRESS_PAD_TOP = 4
CHALLENGE_PROGRESS_PAD_BOTTOM = 6
CHALLENGE_POINTS_HEIGHT = CHALLENGE_PROGRESS_HEIGHT - CHALLENGE_PROGRESS_PAD_TOP - CHALLENGE_PROGRESS_PAD_BOTTOM
CHALLENGE_POINTS_LEFT = 15
AGENT_IMAGE_SIZE = TOP_CONTAINER_MINIMUM_HEIGHT
TITLE_CONTAINER_HEIGHT = 15
TITLE_LABEL_WIDTH_OFFSET = AGENT_IMAGE_SIZE + CHALLENGE_EXPIRATION_CLOCK_SIZE + 20
CHALLENGE_EXPIRATION_CLOCK_PAD_RIGHT = 20
BACKGROUND_FILL_COLOR = (0.5, 0.5, 0.5, 0.1)
AGENT_FRAME_COLOR = (1.0, 1.0, 1.0)
AGENT_FRAME_OPACITY = 0.3
def calculate_challenges_height(challenges, challenge_text_width, are_challenges_in_two_columns):
if are_challenges_in_two_columns:
return calculate_challenges_height_in_two_columns(challenges, challenge_text_width)
return calculate_challenges_height_in_one_column(challenges, challenge_text_width)
def calculate_challenges_height_in_one_column(challenges, challenge_text_width):
challenges_height = 0
challenge_entries_height = dict()
for challenge in challenges.itervalues():
total_challenge_height, title_text_height, description_text_height = calculate_challenge_height(challenge, challenge_text_width)
challenge_entries_height[challenge.challenge_id] = [total_challenge_height, title_text_height, description_text_height]
challenges_height += total_challenge_height
return (challenges_height, challenge_entries_height)
def calculate_challenges_height_in_two_columns(challenges, challenge_text_width):
challenges_height = 0
challenge_entries_height = dict()
challenges_in_column_one = []
challenges_in_column_two = []
challenge_count = 1
for challenge in challenges.itervalues():
if challenge_count % 2:
challenges_in_column_one.append(challenge)
else:
challenges_in_column_two.append(challenge)
challenge_count += 1
number_of_rows = challenge_count / 2
for row in xrange(0, number_of_rows):
challenge_one = challenges_in_column_one[row]
challenge_one_height, challenge_one_title_text_height, challenge_one_description_text_height = calculate_challenge_height(challenge_one, challenge_text_width)
challenge_height = challenge_one_height
is_there_column_two = row < len(challenges_in_column_two)
if is_there_column_two:
challenge_two = challenges_in_column_two[row]
challenge_two_height, challenge_two_title_text_height, challenge_two_description_text_height = calculate_challenge_height(challenge_two, challenge_text_width)
challenge_height = max(challenge_one_height, challenge_two_height)
challenge_entries_height[challenge_two.challenge_id] = [challenge_height, challenge_two_title_text_height, challenge_two_description_text_height]
challenge_entries_height[challenge_one.challenge_id] = [challenge_height, challenge_one_title_text_height, challenge_one_description_text_height]
challenges_height += challenge_height
return (challenges_height, challenge_entries_height)
def calculate_challenge_height(challenge, text_width):
text_width = text_width - 2 * TOP_CONTAINER_PAD_LEFT - TOP_CONTAINER_PAD_RIGHT
title_text_height = uix.GetTextHeight(strng=challenge.name, width=text_width, font=EVE_LARGE_FONTSIZE)
title_text_height = max(TITLE_CONTAINER_HEIGHT, title_text_height + TOP_CONTAINER_PAD_TOP)
description_text_height = uix.GetTextHeight(strng=challenge.message_text, width=text_width, font=DESCRIPTION_FONTSIZE) + DESCRIPTION_CONTAINER_PAD_TOP
description_text_height += DESCRIPTION_CONTAINER_PAD_TOP
top_height = max(TOP_CONTAINER_MINIMUM_HEIGHT, title_text_height + description_text_height + DESCRIPTION_CONTAINER_PAD_TOP)
progress_height = CHALLENGE_PROGRESS_HEIGHT + CHALLENGE_PROGRESS_PAD_TOP + CHALLENGE_PROGRESS_PAD_BOTTOM
total_challenge_height = max(CHALLENGE_MINIMUM_HEIGHT, top_height + progress_height) + CHALLENGE_HEIGHT_PADDING
return (total_challenge_height, title_text_height, description_text_height)
class ChallengeTaskEntry(Container):
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.challenge = attributes.challenge
self.challenge_title_height = attributes.challenge_title_height
self.challenge_description_height = attributes.challenge_description_height
self.challenge_height = self._get_challenge_height()
self.progress_frame_width = attributes.progress_frame_width
self.progress_frame_width_offset = attributes.progress_frame_width_reduction
self.animate_progress = attributes.Get('animate_progress', DEFAULT_ANIMATE_PROGRESS)
Fill(bgParent=self, color=BACKGROUND_FILL_COLOR)
self._construct_header()
self._construct_rewards()
self._construct_progress()
def _get_challenge_height(self):
return max(TOP_CONTAINER_MINIMUM_HEIGHT, self.challenge_title_height + self.challenge_description_height)
def _construct_header(self):
self.top_container = Container(name='top_container', parent=self, align=uiconst.TOTOP, height=self.challenge_height, padTop=TOP_CONTAINER_PAD_TOP, padLeft=TOP_CONTAINER_PAD_LEFT, padRight=TOP_CONTAINER_PAD_RIGHT)
self._construct_agent()
self._construct_description()
def _construct_agent(self):
agent_container = Container(name='agent_container', parent=self.top_container, align=uiconst.TOLEFT, width=AGENT_IMAGE_SIZE, height=AGENT_IMAGE_SIZE)
frame_container = Container(name='frame_container', parent=agent_container, align=uiconst.TOPLEFT, width=AGENT_IMAGE_SIZE, height=AGENT_IMAGE_SIZE)
Frame(name='agent_icon_frame', parent=frame_container, frameConst=uiconst.FRAME_BORDER1_CORNER0, opacity=AGENT_FRAME_OPACITY, color=AGENT_FRAME_COLOR)
get_agent_icon(name='agent_icon', parent=agent_container, align=uiconst.CENTERTOP, size=AGENT_IMAGE_SIZE, agent_id=self.challenge.agent_id)
def _construct_description(self):
self.title_container = Container(name='title_container', parent=self.top_container, align=uiconst.TOTOP, height=max(TITLE_CONTAINER_HEIGHT, self.challenge_title_height), padLeft=TOP_CONTAINER_PAD_LEFT)
title_label_container_width = self.width - AGENT_IMAGE_SIZE - CHALLENGE_EXPIRATION_CLOCK_PAD_RIGHT - CHALLENGE_EXPIRATION_CLOCK_SIZE - TOP_CONTAINER_PAD_LEFT - TOP_CONTAINER_PAD_RIGHT
self.title_label_container = Container(name='title_label_container', parent=self.title_container, align=uiconst.TOLEFT, width=title_label_container_width, clipChildren=True)
title_label = EveLabelLargeBold(name='title_label', parent=self.title_label_container, align=uiconst.CENTERLEFT, text=self.challenge.name)
title_label.color = SEASON_THEME_TEXT_COLOR_HIGHLIGHTED
expiration_timer_container = Container(name='expiration_timer_container', parent=self.title_container, align=uiconst.TORIGHT, width=CHALLENGE_EXPIRATION_CLOCK_SIZE)
ChallengeExpirationTimer(name='expiration_timer', parent=expiration_timer_container, align=uiconst.CENTER, height=CHALLENGE_EXPIRATION_CLOCK_SIZE, width=CHALLENGE_EXPIRATION_CLOCK_SIZE, expiration_date=self.challenge.expiration_date)
description_container = Container(name='description_container', parent=self.top_container, align=uiconst.TOTOP, height=self.challenge_description_height, padLeft=TOP_CONTAINER_PAD_LEFT, padTop=DESCRIPTION_CONTAINER_PAD_TOP)
description_label = Label(name='description_label', parent=description_container, align=uiconst.TOTOP, text=self.challenge.message_text, fontsize=DESCRIPTION_FONTSIZE)
description_label.color = SEASON_THEME_TEXT_COLOR_REGULAR
def _construct_rewards(self):
reward_wrapper_container = ContainerAutoSize(name='reward_wrapper_container', parent=self, align=uiconst.TOBOTTOM_NOPUSH, height=CHALLENGE_PROGRESS_HEIGHT, padTop=CHALLENGE_PROGRESS_PAD_TOP, padBottom=CHALLENGE_PROGRESS_PAD_BOTTOM, padLeft=TOP_CONTAINER_PAD_LEFT, padRight=TOP_CONTAINER_PAD_RIGHT)
SeasonPoints(name='reward_container', parent=reward_wrapper_container, points=self.challenge.points_awarded, season_points_size=CHALLENGE_PROGRESS_HEIGHT, reward_label_class=EveLabelMedium, align=uiconst.TORIGHT, height=CHALLENGE_POINTS_HEIGHT, left=CHALLENGE_POINTS_LEFT)
def _construct_progress(self):
self.progress_container = ChallengeTaskProgressBar(name='progress_container', parent=self, align=uiconst.TOBOTTOM, challenge=self.challenge, progress_frame_width=self.progress_frame_width, progress_frame_width_offset=self.progress_frame_width_offset, animate_progress=self.animate_progress, height=CHALLENGE_PROGRESS_HEIGHT, padTop=CHALLENGE_PROGRESS_PAD_TOP, padBottom=CHALLENGE_PROGRESS_PAD_BOTTOM, padLeft=TOP_CONTAINER_PAD_LEFT, padRight=TOP_CONTAINER_PAD_RIGHT, label_type_function=EveLabelSmall, adapt_text_color_to_progress=True)
def update_challenge_progress(self, new_progress):
self.progress_container.update_challenge(new_progress)
def complete_challenge(self):
self.progress_container.update_challenge(self.challenge.max_progress) | [
"[email protected]"
] | |
640bb5c14c09f8ec02c9dacad2871708e2458f5d | 36da783434c28e605fa8ad71764e5173568f6d9a | /include/TestHydrusServer.py | b7ecf6d0c846e5c78b065841b29f8eeafe997f4f | [
"WTFPL"
] | permissive | wlerin/hydrus | 238550af37815a8eac727870f36b2ea3f616b817 | dab4e0131aa6d8e79da5753dd14c8c1b6cd9c169 | refs/heads/master | 2021-01-22T17:28:01.583906 | 2016-05-18T20:07:14 | 2016-05-18T20:07:14 | 59,448,048 | 0 | 0 | null | 2016-05-23T03:00:37 | 2016-05-23T03:00:37 | null | UTF-8 | Python | false | false | 28,930 | py | import ClientConstants as CC
import ClientData
import ClientFiles
import ClientLocalServer
import ClientMedia
import hashlib
import httplib
import HydrusConstants as HC
import HydrusServer
import HydrusServerResources
import HydrusSerialisable
import itertools
import os
import ServerFiles
import ServerServer
import shutil
import stat
import TestConstants
import time
import threading
import unittest
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.internet.defer import deferredGenerator, waitForDeferred
import HydrusData
import HydrusGlobals
class TestServer( unittest.TestCase ):
@classmethod
def setUpClass( self ):
services = []
self._file_service = ClientData.GenerateService( HydrusData.GenerateKey(), HC.FILE_REPOSITORY, 'file repo', {} )
self._tag_service = ClientData.GenerateService( HydrusData.GenerateKey(), HC.TAG_REPOSITORY, 'tag repo', {} )
self._admin_service = ClientData.GenerateService( HydrusData.GenerateKey(), HC.SERVER_ADMIN, 'server admin', {} )
services_manager = HydrusGlobals.test_controller.GetServicesManager()
services_manager._keys_to_services[ self._file_service.GetServiceKey() ] = self._file_service
services_manager._keys_to_services[ self._tag_service.GetServiceKey() ] = self._tag_service
services_manager._keys_to_services[ self._admin_service.GetServiceKey() ] = self._admin_service
os.makedirs( ServerFiles.GetExpectedUpdateDir( self._file_service.GetServiceKey() ) )
os.makedirs( ServerFiles.GetExpectedUpdateDir( self._tag_service.GetServiceKey() ) )
permissions = [ HC.GET_DATA, HC.POST_DATA, HC.POST_PETITIONS, HC.RESOLVE_PETITIONS, HC.MANAGE_USERS, HC.GENERAL_ADMIN, HC.EDIT_SERVICES ]
account_key = HydrusData.GenerateKey()
account_type = HydrusData.AccountType( 'account', permissions, ( None, None ) )
created = HydrusData.GetNow() - 100000
expires = None
used_bytes = 0
used_requests = 0
self._account = HydrusData.Account( account_key, account_type, created, expires, used_bytes, used_requests )
self._access_key = HydrusData.GenerateKey()
self._file_hash = HydrusData.GenerateKey()
def TWISTEDSetup():
reactor.listenTCP( HC.DEFAULT_SERVER_ADMIN_PORT, ServerServer.HydrusServiceAdmin( self._admin_service.GetServiceKey(), HC.SERVER_ADMIN, 'hello' ) )
reactor.listenTCP( HC.DEFAULT_LOCAL_FILE_PORT, ClientLocalServer.HydrusServiceLocal( CC.LOCAL_FILE_SERVICE_KEY, HC.LOCAL_FILE, 'hello' ) )
reactor.listenTCP( HC.DEFAULT_LOCAL_BOORU_PORT, ClientLocalServer.HydrusServiceBooru( CC.LOCAL_BOORU_SERVICE_KEY, HC.LOCAL_BOORU, 'hello' ) )
reactor.listenTCP( HC.DEFAULT_SERVICE_PORT, ServerServer.HydrusServiceRepositoryFile( self._file_service.GetServiceKey(), HC.FILE_REPOSITORY, 'hello' ) )
reactor.listenTCP( HC.DEFAULT_SERVICE_PORT + 1, ServerServer.HydrusServiceRepositoryTag( self._tag_service.GetServiceKey(), HC.TAG_REPOSITORY, 'hello' ) )
reactor.callFromThread( TWISTEDSetup )
time.sleep( 1 )
@classmethod
def tearDownClass( self ):
shutil.rmtree( ServerFiles.GetExpectedUpdateDir( self._file_service.GetServiceKey() ) )
shutil.rmtree( ServerFiles.GetExpectedUpdateDir( self._tag_service.GetServiceKey() ) )
def _test_basics( self, host, port ):
connection = httplib.HTTPConnection( host, port, timeout = 10 )
#
connection.request( 'GET', '/' )
response = connection.getresponse()
data = response.read()
p1 = data == HydrusServerResources.CLIENT_ROOT_MESSAGE
p2 = data == HydrusServerResources.ROOT_MESSAGE_BEGIN + 'hello' + HydrusServerResources.ROOT_MESSAGE_END
self.assertTrue( p1 or p2 )
#
with open( os.path.join( HC.STATIC_DIR, 'hydrus.ico' ), 'rb' ) as f: favicon = f.read()
connection.request( 'GET', '/favicon.ico' )
response = connection.getresponse()
data = response.read()
self.assertEqual( data, favicon )
def _test_local_file( self, host, port ):
connection = httplib.HTTPConnection( host, port, timeout = 10 )
#
path = ClientFiles.GetExpectedFilePath( HC.CLIENT_FILES_DIR, self._file_hash, HC.IMAGE_JPEG )
with open( path, 'wb' ) as f: f.write( 'file' )
connection.request( 'GET', '/file?hash=' + self._file_hash.encode( 'hex' ) )
response = connection.getresponse()
data = response.read()
self.assertEqual( data, 'file' )
try: os.remove( path )
except: pass
#
path = ClientFiles.GetExpectedThumbnailPath( self._file_hash )
with open( path, 'wb' ) as f: f.write( 'thumb' )
connection.request( 'GET', '/thumbnail?hash=' + self._file_hash.encode( 'hex' ) )
response = connection.getresponse()
data = response.read()
self.assertEqual( data, 'thumb' )
try: os.remove( path )
except: pass
def _test_file_repo( self, service, host, port ):
info = service.GetInfo()
info[ 'access_key' ] = self._access_key
# file
path = ServerFiles.GetExpectedPath( 'file', self._file_hash )
with open( path, 'wb' ) as f: f.write( 'file' )
response = service.Request( HC.GET, 'file', { 'hash' : self._file_hash.encode( 'hex' ) } )
self.assertEqual( response, 'file' )
try: os.remove( path )
except: pass
path = os.path.join( HC.STATIC_DIR, 'hydrus.png' )
with open( path, 'rb' ) as f: file = f.read()
service.Request( HC.POST, 'file', { 'file' : file } )
written = HydrusGlobals.test_controller.GetWrite( 'file' )
[ ( args, kwargs ) ] = written
( written_service_key, written_account, written_file_dict ) = args
self.assertEqual( written_file_dict[ 'hash' ], '\xadm5\x99\xa6\xc4\x89\xa5u\xeb\x19\xc0&\xfa\xce\x97\xa9\xcdey\xe7G(\xb0\xce\x94\xa6\x01\xd22\xf3\xc3' )
self.assertEqual( written_file_dict[ 'ip' ], '127.0.0.1' )
self.assertEqual( written_file_dict[ 'height' ], 200 )
self.assertEqual( written_file_dict[ 'width' ], 200 )
self.assertEqual( written_file_dict[ 'mime' ], 2 )
self.assertEqual( written_file_dict[ 'size' ], 5270 )
# ip
( ip, timestamp ) = ( '94.45.87.123', HydrusData.GetNow() - 100000 )
HydrusGlobals.test_controller.SetRead( 'ip', ( ip, timestamp ) )
response = service.Request( HC.GET, 'ip', { 'hash' : self._file_hash.encode( 'hex' ) } )
self.assertEqual( response[ 'ip' ], ip )
self.assertEqual( response[ 'timestamp' ], timestamp )
# thumbnail
path = ServerFiles.GetExpectedPath( 'thumbnail', self._file_hash )
with open( path, 'wb' ) as f: f.write( 'thumb' )
response = service.Request( HC.GET, 'thumbnail', { 'hash' : self._file_hash.encode( 'hex' ) } )
self.assertEqual( response, 'thumb' )
try: os.remove( path )
except: pass
def _test_local_booru( self, host, port ):
#
connection = httplib.HTTPConnection( host, port, timeout = 10 )
#
with open( os.path.join( HC.STATIC_DIR, 'local_booru_style.css' ), 'rb' ) as f:
css = f.read()
connection.request( 'GET', '/style.css' )
response = connection.getresponse()
data = response.read()
self.assertEqual( data, css )
#
share_key = HydrusData.GenerateKey()
hashes = [ HydrusData.GenerateKey() for i in range( 5 ) ]
with open( ClientFiles.GetExpectedFilePath( HC.CLIENT_FILES_DIR, hashes[0], HC.IMAGE_JPEG ), 'wb' ) as f: f.write( 'file' )
with open( ClientFiles.GetExpectedThumbnailPath( hashes[0], False ), 'wb' ) as f: f.write( 'thumbnail' )
local_booru_manager = HydrusGlobals.test_controller.GetManager( 'local_booru' )
#
self._test_local_booru_requests( connection, share_key, hashes[0], 404 )
#
info = {}
info[ 'name' ] = 'name'
info[ 'text' ] = 'text'
info[ 'timeout' ] = 0
info[ 'hashes' ] = hashes
# hash, inbox, size, mime, width, height, duration, num_frames, num_words, tags_manager, locations_manager, local_ratings, remote_ratings
media_results = [ ClientMedia.MediaResult( ( hash, True, 500, HC.IMAGE_JPEG, 640, 480, None, None, None, None, None, None, None ) ) for hash in hashes ]
HydrusGlobals.test_controller.SetRead( 'local_booru_share_keys', [ share_key ] )
HydrusGlobals.test_controller.SetRead( 'local_booru_share', info )
HydrusGlobals.test_controller.SetRead( 'media_results', media_results )
local_booru_manager.RefreshShares()
#
self._test_local_booru_requests( connection, share_key, hashes[0], 403 )
#
info[ 'timeout' ] = None
HydrusGlobals.test_controller.SetRead( 'local_booru_share', info )
local_booru_manager.RefreshShares()
#
self._test_local_booru_requests( connection, share_key, hashes[0], 200 )
#
HydrusGlobals.test_controller.SetRead( 'local_booru_share_keys', [] )
local_booru_manager.RefreshShares()
#
self._test_local_booru_requests( connection, share_key, hashes[0], 404 )
def _test_local_booru_requests( self, connection, share_key, hash, expected_result ):
requests = []
requests.append( '/gallery?share_key=' + share_key.encode( 'hex' ) )
requests.append( '/page?share_key=' + share_key.encode( 'hex' ) + '&hash=' + hash.encode( 'hex' ) )
requests.append( '/file?share_key=' + share_key.encode( 'hex' ) + '&hash=' + hash.encode( 'hex' ) )
requests.append( '/thumbnail?share_key=' + share_key.encode( 'hex' ) + '&hash=' + hash.encode( 'hex' ) )
for request in requests:
connection.request( 'GET', request )
response = connection.getresponse()
data = response.read()
self.assertEqual( response.status, expected_result )
def _test_repo( self, service, host, port ):
service_key = service.GetServiceKey()
# news
news = 'this is the news'
service.Request( HC.POST, 'news', { 'news' : news } )
written = HydrusGlobals.test_controller.GetWrite( 'news' )
[ ( args, kwargs ) ] = written
( written_service_key, written_news ) = args
self.assertEqual( news, written_news )
# num_petitions
num_petitions = 23
HydrusGlobals.test_controller.SetRead( 'num_petitions', num_petitions )
response = service.Request( HC.GET, 'num_petitions' )
self.assertEqual( response[ 'num_petitions' ], num_petitions )
# petition
action = HC.CONTENT_UPDATE_PETITION
account_identifier = HydrusData.AccountIdentifier( account_key = HydrusData.GenerateKey() )
reason = 'it sucks'
contents = [ HydrusData.Content( HC.CONTENT_TYPE_FILES, [ HydrusData.GenerateKey() for i in range( 10 ) ] ) ]
petition = HydrusData.ServerToClientPetition( action = action, petitioner_account_identifier = account_identifier, reason = reason, contents = contents )
HydrusGlobals.test_controller.SetRead( 'petition', petition )
response = service.Request( HC.GET, 'petition' )
self.assertEqual( type( response ), HydrusData.ServerToClientPetition )
# update
begin = 100
subindex_count = 5
update = HydrusData.ServerToClientServiceUpdatePackage()
update.SetBeginEnd( begin, begin + HC.UPDATE_DURATION - 1 )
update.SetSubindexCount( subindex_count )
path = ServerFiles.GetExpectedServiceUpdatePackagePath( service_key, begin )
with open( path, 'wb' ) as f: f.write( update.DumpToNetworkString() )
response = service.Request( HC.GET, 'service_update_package', { 'begin' : begin } )
self.assertEqual( response.GetBegin(), update.GetBegin() )
try: os.remove( path )
except: pass
subindex = 2
num_hashes = 12
tag = 'series:blah'
hash_ids_to_hashes = { i : HydrusData.GenerateKey() for i in range( 12 ) }
rows = [ ( tag, [ i for i in range( num_hashes ) ] ) ]
update = HydrusData.ServerToClientContentUpdatePackage()
update.AddContentData( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD, rows, hash_ids_to_hashes )
path = ServerFiles.GetExpectedContentUpdatePackagePath( service_key, begin, subindex )
with open( path, 'wb' ) as f: f.write( update.DumpToNetworkString() )
response = service.Request( HC.GET, 'content_update_package', { 'begin' : begin, 'subindex' : subindex } )
self.assertEqual( response.GetNumContentUpdates(), update.GetNumContentUpdates() )
try: os.remove( path )
except: pass
update = HydrusData.ClientToServerContentUpdatePackage( {}, hash_ids_to_hashes )
service.Request( HC.POST, 'content_update_package', { 'update' : update } )
written = HydrusGlobals.test_controller.GetWrite( 'update' )
[ ( args, kwargs ) ] = written
( written_service_key, written_account, written_update ) = args
self.assertEqual( update.GetHashes(), written_update.GetHashes() )
def _test_restricted( self, service, host, port ):
# access_key
registration_key = HydrusData.GenerateKey()
HydrusGlobals.test_controller.SetRead( 'access_key', self._access_key )
request_headers = {}
request_headers[ 'Hydrus-Key' ] = registration_key.encode( 'hex' )
response = service.Request( HC.GET, 'access_key', request_headers = request_headers )
self.assertEqual( response[ 'access_key' ], self._access_key )
info = service.GetInfo()
info[ 'access_key' ] = self._access_key
# set up session
last_error = 0
account = self._account
HydrusGlobals.test_controller.SetRead( 'service', service )
HydrusGlobals.test_controller.SetRead( 'account_key_from_access_key', HydrusData.GenerateKey() )
HydrusGlobals.test_controller.SetRead( 'account', self._account )
# account
response = service.Request( HC.GET, 'account' )
self.assertEqual( repr( response[ 'account' ] ), repr( self._account ) )
# account_info
account_info = { 'message' : 'hello' }
HydrusGlobals.test_controller.SetRead( 'account_info', account_info )
HydrusGlobals.test_controller.SetRead( 'account_key_from_identifier', HydrusData.GenerateKey() )
response = service.Request( HC.GET, 'account_info', { 'subject_account_key' : HydrusData.GenerateKey().encode( 'hex' ) } )
self.assertEqual( response[ 'account_info' ], account_info )
response = service.Request( HC.GET, 'account_info', { 'subject_hash' : HydrusData.GenerateKey().encode( 'hex' ) } )
self.assertEqual( response[ 'account_info' ], account_info )
response = service.Request( HC.GET, 'account_info', { 'subject_hash' : HydrusData.GenerateKey().encode( 'hex' ), 'subject_tag' : 'hello'.encode( 'hex' ) } )
self.assertEqual( response[ 'account_info' ], account_info )
# account_types
account_types = { 'message' : 'hello' }
HydrusGlobals.test_controller.SetRead( 'account_types', account_types )
response = service.Request( HC.GET, 'account_types' )
self.assertEqual( response[ 'account_types' ], account_types )
edit_log = 'blah'
service.Request( HC.POST, 'account_types', { 'edit_log' : edit_log } )
written = HydrusGlobals.test_controller.GetWrite( 'account_types' )
[ ( args, kwargs ) ] = written
( written_service_key, written_edit_log ) = args
self.assertEqual( edit_log, written_edit_log )
# registration_keys
registration_key = HydrusData.GenerateKey()
HydrusGlobals.test_controller.SetRead( 'registration_keys', [ registration_key ] )
response = service.Request( HC.GET, 'registration_keys', { 'num' : 1, 'title' : 'blah' } )
self.assertEqual( response[ 'registration_keys' ], [ registration_key ] )
response = service.Request( HC.GET, 'registration_keys', { 'num' : 1, 'title' : 'blah', 'lifetime' : 100 } )
self.assertEqual( response[ 'registration_keys' ], [ registration_key ] )
# stats
stats = { 'message' : 'hello' }
HydrusGlobals.test_controller.SetRead( 'stats', stats )
response = service.Request( HC.GET, 'stats' )
self.assertEqual( response[ 'stats' ], stats )
def _test_server_admin( self, service, host, port ):
info = service.GetInfo()
info[ 'host' ] = host
info[ 'port' ] = port
# init
access_key = HydrusData.GenerateKey()
HydrusGlobals.test_controller.SetRead( 'init', access_key )
response = service.Request( HC.GET, 'init' )
self.assertEqual( response[ 'access_key' ], access_key )
#
info[ 'access_key' ] = self._access_key
# backup
response = service.Request( HC.POST, 'backup' )
# services
services_info = { 'message' : 'hello' }
HydrusGlobals.test_controller.SetRead( 'services_info', services_info )
response = service.Request( HC.GET, 'services_info' )
self.assertEqual( response[ 'services_info' ], services_info )
edit_log = 'blah'
registration_keys = service.Request( HC.POST, 'services', { 'edit_log' : edit_log } )
written = HydrusGlobals.test_controller.GetWrite( 'services' )
[ ( args, kwargs ) ] = written
( written_service_key, written_edit_log ) = args
self.assertEqual( edit_log, written_edit_log )
def _test_tag_repo( self, service, host, port ):
pass
def test_local_service( self ):
host = '127.0.0.1'
port = HC.DEFAULT_LOCAL_FILE_PORT
self._test_basics( host, port )
self._test_local_file( host, port )
def test_repository_file( self ):
host = '127.0.0.1'
port = HC.DEFAULT_SERVICE_PORT
info = self._file_service.GetInfo()
info[ 'host' ] = host
info[ 'port' ] = port
self._test_basics( host, port )
self._test_restricted( self._file_service, host, port )
self._test_repo( self._file_service, host, port )
self._test_file_repo( self._file_service, host, port )
def test_repository_tag( self ):
host = '127.0.0.1'
port = HC.DEFAULT_SERVICE_PORT + 1
info = self._tag_service.GetInfo()
info[ 'host' ] = host
info[ 'port' ] = port
self._test_basics( host, port )
self._test_restricted( self._tag_service, host, port )
self._test_repo( self._tag_service, host, port )
self._test_tag_repo( self._tag_service, host, port )
def test_server_admin( self ):
host = '127.0.0.1'
port = HC.DEFAULT_SERVER_ADMIN_PORT
info = self._admin_service.GetInfo()
info[ 'host' ] = host
info[ 'port' ] = port
self._test_basics( host, port )
self._test_restricted( self._admin_service, host, port )
self._test_server_admin( self._admin_service, host, port )
def test_local_booru( self ):
host = '127.0.0.1'
port = HC.DEFAULT_LOCAL_BOORU_PORT
self._test_basics( host, port )
self._test_local_booru( host, port )
'''
class TestAMP( unittest.TestCase ):
@classmethod
def setUpClass( self ):
self._alice = HydrusData.GenerateKey()
self._bob = HydrusData.GenerateKey()
self._server_port = HC.DEFAULT_SERVICE_PORT + 10
self._service_key = HydrusData.GenerateKey()
def TWISTEDSetup():
self._factory = HydrusServer.MessagingServiceFactory( self._service_key )
reactor.listenTCP( self._server_port, self._factory )
reactor.callFromThread( TWISTEDSetup )
time.sleep( 1 )
def _get_deferred_result( self, deferred ):
def err( failure ):
failure.trap( Exception )
return failure.type( failure.value )
deferred.addErrback( err )
before = time.time()
while not deferred.called:
time.sleep( 0.1 )
if time.time() - before > 10: raise Exception( 'Trying to get deferred timed out!' )
result = deferred.result
if issubclass( type( result ), Exception ): raise result
return result
def _get_client_protocol( self ):
point = TCP4ClientEndpoint( reactor, '127.0.0.1', self._server_port )
deferred = connectProtocol( point, HydrusServerAMP.MessagingClientProtocol() )
protocol = self._get_deferred_result( deferred )
return protocol
def _make_persistent_connection( self, protocol, access_key, name ):
identifier = hashlib.sha256( access_key ).digest()
HC.app.SetRead( 'im_identifier', identifier )
permissions = [ HC.GET_DATA, HC.POST_DATA, HC.POST_PETITIONS, HC.RESOLVE_PETITIONS, HC.MANAGE_USERS, HC.GENERAL_ADMIN, HC.EDIT_SERVICES ]
account_key = HydrusData.GenerateKey()
account_type = HC.AccountType( 'account', permissions, ( None, None ) )
created = HC.GetNow() - 100000
expires = None
used_bytes = 0
used_requests = 0
account = HC.Account( account_key, account_type, created, expires, used_bytes, used_requests )
HC.app.SetRead( 'account_key_from_access_key', HydrusData.GenerateKey() )
HC.app.SetRead( 'account', account )
deferred = protocol.callRemote( HydrusServerAMP.IMSessionKey, access_key = access_key, name = name )
result = self._get_deferred_result( deferred )
session_key = result[ 'session_key' ]
deferred = protocol.callRemote( HydrusServerAMP.IMLoginPersistent, network_version = HC.NETWORK_VERSION, session_key = session_key )
result = self._get_deferred_result( deferred )
self.assertEqual( result, {} )
def _make_temporary_connection( self, protocol, identifier, name ):
deferred = protocol.callRemote( HydrusServerAMP.IMLoginTemporary, network_version = HC.NETWORK_VERSION, identifier = identifier, name = name )
result = self._get_deferred_result( deferred )
self.assertEqual( result, {} )
def test_connections( self ):
persistent_protocol = self._get_client_protocol()
persistent_access_key = HydrusData.GenerateKey()
persistent_identifier = hashlib.sha256( persistent_access_key ).digest()
persistent_name = 'persistent'
self._make_persistent_connection( persistent_protocol, persistent_access_key, persistent_name )
self.assertIn( persistent_identifier, self._factory._persistent_connections )
self.assertIn( persistent_name, self._factory._persistent_connections[ persistent_identifier ] )
temp_protocol_1 = self._get_client_protocol()
temp_protocol_2 = self._get_client_protocol()
temp_name_1 = 'temp_1'
temp_identifier = HydrusData.GenerateKey()
temp_name_2 = 'temp_2'
self._make_temporary_connection( temp_protocol_1, temp_identifier, temp_name_1 )
self._make_temporary_connection( temp_protocol_2, temp_identifier, temp_name_2 )
self.assertIn( temp_identifier, self._factory._temporary_connections )
self.assertIn( temp_name_1, self._factory._temporary_connections[ temp_identifier ] )
self.assertIn( temp_name_2, self._factory._temporary_connections[ temp_identifier ] )
def test_status( self ):
# some of this is UDP, so get that working!
# add two bobs
# ask for status of the bobs
# test that we get both, online
# now disconnect a bob
# ask for bob status
# test that we only have one bob
# now disconnect other bob
# repeat for nothing
pass
def test_message( self ):
persistent_protocol = self._get_client_protocol()
persistent_access_key = HydrusData.GenerateKey()
persistent_identifier = hashlib.sha256( persistent_access_key ).digest()
persistent_name = 'persistent'
self._make_persistent_connection( persistent_protocol, persistent_access_key, persistent_name )
temp_protocol = self._get_client_protocol()
temp_identifier = HydrusData.GenerateKey()
temp_name = 'temp'
self._make_temporary_connection( temp_protocol, temp_identifier, temp_name )
#
HC.pubsub.ClearPubSubs()
message = 'hello temp'
deferred = persistent_protocol.callRemote( HydrusServerAMP.IMMessageServer, identifier_to = temp_identifier, name_to = temp_name, message = message )
result = self._get_deferred_result( deferred )
self.assertEqual( result, {} )
result = HC.pubsub.GetPubSubs( 'im_message_received' )
[ ( args, kwargs ) ] = result
self.assertEqual( args, ( persistent_identifier, persistent_name, temp_identifier, temp_name, message ) )
#
HC.pubsub.ClearPubSubs()
message = 'hello persistent'
deferred = temp_protocol.callRemote( HydrusServerAMP.IMMessageServer, identifier_to = persistent_identifier, name_to = persistent_name, message = message )
result = self._get_deferred_result( deferred )
self.assertEqual( result, {} )
result = HC.pubsub.GetPubSubs( 'im_message_received' )
[ ( args, kwargs ) ] = result
self.assertEqual( args, ( temp_identifier, temp_name, persistent_identifier, persistent_name, message ) )
''' | [
"[email protected]"
] | |
b5559016125f4ba359d3ccc395429165da594707 | 4ba32be96850894f8c94597899a401b3b19f216e | /uotp/packet/time.py | b4e31801d2f99580fc5de05750169d19a2106202 | [
"Unlicense"
] | permissive | dlunch/uotp | 2e204cf036f5a735d1f8fe3149d0dd08d96cbedf | bfa52a5aae4c7b40c10aebaaa4667c26d40b5ff7 | refs/heads/master | 2021-08-31T23:15:52.285195 | 2017-12-23T11:21:42 | 2017-12-23T11:21:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from struct import unpack
from .base import Packet, Opcode
class TimeRequest(Packet):
OPCODE = Opcode.Time
SIMPLE = True
@classmethod
def _encode_payload(cls, data: dict) -> bytes:
return b''
@classmethod
def _decode_payload(cls, payload: bytes) -> dict:
time, = unpack("!I", payload)
return {
'time': time
}
| [
"[email protected]"
] | |
d2c98cb9d7f276f7a2cc3774ca7207c9c874da3a | dbce70b3685e04fe7b52687bfc4bc9d1c3325486 | /src/filingcabinet/migrations/0019_auto_20210323_1404.py | 30f74a4c0f93ad21c2ea0d031a18c8e6eefb9001 | [] | no_license | okfde/django-filingcabinet | d0fd8ea1deb7e990dcfe510df548bd497e96fe5e | 5d5ff8f9f6573614d61def654b3e22805bf84934 | refs/heads/main | 2023-09-02T10:53:28.789501 | 2023-08-02T15:34:57 | 2023-08-02T15:34:57 | 144,304,373 | 7 | 4 | null | 2023-09-14T16:21:49 | 2018-08-10T15:37:53 | Python | UTF-8 | Python | false | false | 1,210 | py | # Generated by Django 3.1.6 on 2021-03-23 13:04
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.FILINGCABINET_DOCUMENT_MODEL),
("filingcabinet", "0018_auto_20200622_1302"),
]
operations = [
migrations.AddField(
model_name="document",
name="listed",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="documentcollection",
name="listed",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="documentcollection",
name="uid",
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AlterField(
model_name="page",
name="document",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pages",
to=settings.FILINGCABINET_DOCUMENT_MODEL,
),
),
]
| [
"[email protected]"
] | |
49aeb6dcb89830cd7a6f3a7ffabb101c58b2a116 | 77ada1a21fd1086b00fe5e0f2a7e568bca8562c5 | /visualization_data.py | 9f0c9c5000ea270576f852d41355864e40b59c74 | [] | no_license | Tulin2010/LSTM_GoogleClusterTraceData | 584f8d38395ffd159f30496487ad5c8161b4c331 | a999b3a609bb1907b6fbe85c5783b0365078f53e | refs/heads/master | 2023-03-17T13:06:17.392797 | 2017-11-21T15:18:02 | 2017-11-21T15:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import pandas as pd
import matplotlib.pyplot as plt
from pandas import read_csv
dataset = read_csv('/home/nguyen/learnRNNs/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
plt.plot(dataset)
plt.show()
# print a | [
"[email protected]"
] | |
ed419545953e68ef8416ac79e6c96d621fb18c94 | 4dc5aa4f1a99b5a8ca20413640094149e025b49e | /project-addons/l10n_es_facturae_ph/models/__init__.py | ad2302ae21a5303ac4d03e0bea984332125d7098 | [] | no_license | digitalsatori/PXGO_00064_2014_PHA | 469dd86e595a125a5ca1f24c51756182638a0847 | fe27d2f456deb750f9fba528feaa075dcf4a1b02 | refs/heads/master | 2023-07-19T18:32:17.178115 | 2023-07-15T13:20:05 | 2023-07-15T13:20:05 | 62,711,911 | 0 | 0 | null | 2023-08-28T21:36:39 | 2016-07-06T10:14:56 | Python | UTF-8 | Python | false | false | 168 | py | # -*- coding: utf-8 -*-
# © 2022 Pharmadus Botanicals
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import account_invoice, facturae_invoice
| [
"[email protected]"
] | |
91b7938f4809073bba962f3a9dfd05892a57cfa0 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/application_gateway_url_path_map_py3.py | cc013f10ac410d7745b713e261396fb447cd6378 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 3,457 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for
PathBasedRouting.
:param id: Resource ID.
:type id: str
:param default_backend_address_pool: Default backend address pool resource
of URL path map.
:type default_backend_address_pool:
~azure.mgmt.network.v2017_06_01.models.SubResource
:param default_backend_http_settings: Default backend http settings
resource of URL path map.
:type default_backend_http_settings:
~azure.mgmt.network.v2017_06_01.models.SubResource
:param default_redirect_configuration: Default redirect configuration
resource of URL path map.
:type default_redirect_configuration:
~azure.mgmt.network.v2017_06_01.models.SubResource
:param path_rules: Path rule of URL path map resource.
:type path_rules:
list[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayPathRule]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayUrlPathMap, self).__init__(id=id, **kwargs)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.default_redirect_configuration = default_redirect_configuration
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| [
"[email protected]"
] | |
c961cee021b74de7528f7c6bf4dd84a756d25224 | 7b5c2eeae723cfda08a29d60845ba45c0eda54d7 | /demo/condition.py | 84be2d6db93b226c145988ec653f9abd28ff3f9e | [] | no_license | sankarmanoj/CTE-Python | 90b0cadd84442bf4678648536f29aadb66b86449 | 127f7fe3123c58975c1ed06e5e74fb0da4517b77 | refs/heads/master | 2020-01-27T09:58:42.168546 | 2016-11-11T13:38:06 | 2016-11-11T13:38:06 | 67,203,144 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | for x in range(100):
if x%3==0:
print x
| [
"[email protected]"
] | |
fdc9507613f0c24c6bf4372dcdc2f935907300f4 | f3693916a8b118bf139364604dac3f51235ed613 | /functional/Components/Clients/Clients_POST/test_TC_43120_Clients_POST_Height_Gt.py | c91858daec38cff468e6ba12818cf956996a8beb | [] | no_license | muktabehera/QE | e7d62284889d8241d22506f6ee20547f1cfe6db1 | 3fedde591568e35f7b80c5bf6cd6732f8eeab4f8 | refs/heads/master | 2021-03-31T02:19:15.369562 | 2018-03-13T02:45:10 | 2018-03-13T02:45:10 | 124,984,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | # -*- coding: UTF-8 -*-
"""PFE Component Tests - Clients.
* TC-43120 - Clients POST:
Verify that user is able to add source constraint rule with specific rule for parameter 'Height>GT(Greater Than) using request POST '/clients/'.
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients"
JSON data sent to PathFinder in this test:
{'id': 'sourceRuleHeightGT',
'matchingRule': {'groups': [], 'operator': 'ALL', 'rules': []},
'name': 'POST: Client with Source Rule Height GT',
'sourceSelectionRule': [{'groups': [],
'operator': 'ALL',
'rules': [{'contextField': 'heightPx',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 1000,
'operator': 'GT'}]}]}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.components
@pytest.allure.story('Clients')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Clients test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-43120')
@pytest.mark.Clients
@pytest.mark.POST
def test_TC_43120_POST_Clients_Height_Gt(self, context):
"""TC-43120 - Clients-POST
Verify that user is able to add source constraint rule with specific rule for parameter 'Height>GT(Greater Than) using request POST '/clients/'."""
# Define a test step
with pytest.allure.step("""Verify that user is able to add source constraint rule with specific rule for parameter 'Height>GT(Greater Than) using request POST '/clients/'."""):
# Test case configuration
clientDetails = context.sc.ClientDetails(
id='sourceRuleHeightGT',
matchingRule={'operator': 'ALL',
'rules': [],
'groups': []},
name='POST: Client with Source Rule Height GT',
sourceSelectionRule=[{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'heightPx',
'operator': 'GT',
'contextFieldType': 'String',
'matchValue': 1000,
'contextFieldKey': None
}],
'groups': []
}])
# createEntity the Clients.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Clients.createEntity(
body=clientDetails
)
)
| [
"[email protected]"
] | |
1ab116b1625ef9856f2198a6637bb253a87c266b | 25f4e894beced05eb15708ac2314d72e679fa069 | /google/google/spiders/GoogleSpider.py | 7358c7cbfd2aefd2703b5e751cd026ebcac532c8 | [] | no_license | pyscrape/web-scraping-projects | 4e6059daaa2f1092e2c8f1ee4a4ad1e422b21096 | f1e77b151de256e1a5e83099859635edc2f5826a | refs/heads/master | 2021-06-01T15:02:42.548264 | 2016-08-24T07:27:11 | 2016-08-24T07:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
from google.items import GoogleItem
from scrapy.conf import settings
def parsing_rating(line):
if len(line)>0:
return line[0][line[0].find("alt"):]
else:
return ""
class GoogleSpider(CrawlSpider):
name = 'google'
allowed_domains = ['google.com']
# rules = (Rule(LxmlLinkExtractor(allow=(r'\/([A-Z])([A-Z0-9]{9})'),deny=('')),callback='parse_item'),Rule(LxmlLinkExtractor(allow=(''))),),)
# rules = (Rule(LxmlLinkExtractor(allow=(r'https://www.tripadvisor.com/Attraction_Review.*')),callback='parse_trip', process_links='process_links'),)
rules = (Rule(LxmlLinkExtractor(allow=(r'https://www.google.com/.*')),callback='parse_search'),Rule(LxmlLinkExtractor(allow=(''))),follow=False)
def __init__(self,*args, **kwargs):
super(TripadvisorSpider, self).__init__(*args, **kwargs)
start_url='https://www.tripadvisor.com/Attractions-g187337-Activities-Frankfurt_Hesse.html'
# start_url='https://www.tripadvisor.com/'
self.start_urls = [start_url]
def parse_trip(self,response):
item = GoogleItem()
print "\n\n---------------------START-----------------------"
print response.url
# print response.xpath('//a/@href').extract()
# try:
item['name'] = response.xpath('//*[@id="HEADING"]/text()').extract()[0].encode('ascii','ignore')
# item['rating'] = parsing_rating(response.xpath('//*[@id="HEADING_GROUP"]/div/div[2]/div[1]/div/span/img').extract())
# item['neighborhood'] = response.xpath('//*[@id="MAP_AND_LISTING"]/div[2]/div/div[2]/div/div[1]/div/address/span/span').extract()
# item['classification'] = response.xpath('//*[@id="HEADING_GROUP"]/div/div[3]/div[2]/div').extract()
item['url'] = response.url
# item['price'] = response.xpath('//*[@id="ABOVE_THE_FOLD"]/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]/div[1]/text()').extract()
# item['hours'] = response.xpath('//*[@id="MAP_AND_LISTING"]/div[2]/div/div[2]/div/div[4]/div/div[2]/div').extract()
# item['desc'] = response.xpath('//*[@id="OVERLAY_CONTENTS"]/div/p/text()').extract()
# item['desc'] = [desc.encode('ascii','ignore') for desc in response.xpath('//*[@id="feature-bullets"]/ul/li/span/text()').extract() ]
# usernames = response.xpath('//*[@class="username mo"]').extract()
# reviews = response.xpath('//*[@class="partial_entry"]/text()').extract()
# item['reviews'] = zip(usernames,reviews)
print "\n\n---------------------------------------------------"
print(item)
# except:
# print('Not a product!')
# item = None
yield item
def process_links(self,links):
print "\n LINKS"
links_list = []
for i in links:
if "https://www.tripadvisor.com/Attraction_Review" in i.url:
links_list.append(i)
print i.url
return links_list
def dummy(self,response):
print(str(response.url)) | [
"[email protected]"
] | |
b6332d0f705af38d75004144447d9437b6cc3a1a | c6b5725f1317a5b16d8025365826acb3560cb9a6 | /HLTrigger/Configuration/python/HLTrigger_Datasets_PRef_cff.py | 8c499ed574c5d0d92b7ecb9aeb2ff4a542243aa5 | [
"Apache-2.0"
] | permissive | cms-patatrack/cmssw | d9f090b937225545911aa8d4ae8495ca230b50ee | 0a92cc7e09ca63a2cf505e99d6d890703ffe75dc | refs/heads/CMSSW_11_3_X_Patatrack | 2023-08-17T02:22:06.066802 | 2021-04-06T10:59:15 | 2021-04-06T10:59:15 | 113,325,755 | 4 | 7 | Apache-2.0 | 2021-09-01T21:23:46 | 2017-12-06T14:20:07 | C++ | UTF-8 | Python | false | false | 8,283 | py | # /dev/CMSSW_11_3_0/PRef
import FWCore.ParameterSet.Config as cms
# stream PhysicsCommissioning
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsCommissioning_datasetHLTPhysics_selector
streamPhysicsCommissioning_datasetHLTPhysics_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsCommissioning_datasetHLTPhysics_selector.l1tResults = cms.InputTag('')
streamPhysicsCommissioning_datasetHLTPhysics_selector.throw = cms.bool(False)
streamPhysicsCommissioning_datasetHLTPhysics_selector.triggerConditions = cms.vstring('HLT_Physics_v7')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsCommissioning_datasetZeroBias_selector
streamPhysicsCommissioning_datasetZeroBias_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsCommissioning_datasetZeroBias_selector.l1tResults = cms.InputTag('')
streamPhysicsCommissioning_datasetZeroBias_selector.throw = cms.bool(False)
streamPhysicsCommissioning_datasetZeroBias_selector.triggerConditions = cms.vstring(
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6'
)
# stream PhysicsEndOfFill
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsEndOfFill_datasetEmptyBX_selector
streamPhysicsEndOfFill_datasetEmptyBX_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsEndOfFill_datasetEmptyBX_selector.l1tResults = cms.InputTag('')
streamPhysicsEndOfFill_datasetEmptyBX_selector.throw = cms.bool(False)
streamPhysicsEndOfFill_datasetEmptyBX_selector.triggerConditions = cms.vstring(
'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2'
)
# stream PhysicsHIZeroBias1
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias1_datasetHIZeroBias1_selector
streamPhysicsHIZeroBias1_datasetHIZeroBias1_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias1_datasetHIZeroBias1_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias1_datasetHIZeroBias1_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias1_datasetHIZeroBias1_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part0_v6')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias1_datasetHIZeroBias2_selector
streamPhysicsHIZeroBias1_datasetHIZeroBias2_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias1_datasetHIZeroBias2_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias1_datasetHIZeroBias2_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias1_datasetHIZeroBias2_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part1_v6')
# stream PhysicsHIZeroBias2
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias2_datasetHIZeroBias3_selector
streamPhysicsHIZeroBias2_datasetHIZeroBias3_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias2_datasetHIZeroBias3_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias2_datasetHIZeroBias3_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias2_datasetHIZeroBias3_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part2_v6')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias2_datasetHIZeroBias4_selector
streamPhysicsHIZeroBias2_datasetHIZeroBias4_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias2_datasetHIZeroBias4_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias2_datasetHIZeroBias4_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias2_datasetHIZeroBias4_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part3_v6')
# stream PhysicsHIZeroBias3
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias3_datasetHIZeroBias5_selector
streamPhysicsHIZeroBias3_datasetHIZeroBias5_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias3_datasetHIZeroBias5_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias3_datasetHIZeroBias5_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias3_datasetHIZeroBias5_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part4_v6')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias3_datasetHIZeroBias6_selector
streamPhysicsHIZeroBias3_datasetHIZeroBias6_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias3_datasetHIZeroBias6_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias3_datasetHIZeroBias6_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias3_datasetHIZeroBias6_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part5_v6')
# stream PhysicsHIZeroBias4
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias4_datasetHIZeroBias7_selector
streamPhysicsHIZeroBias4_datasetHIZeroBias7_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias4_datasetHIZeroBias7_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias4_datasetHIZeroBias7_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias4_datasetHIZeroBias7_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part6_v6')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias4_datasetHIZeroBias8_selector
streamPhysicsHIZeroBias4_datasetHIZeroBias8_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias4_datasetHIZeroBias8_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias4_datasetHIZeroBias8_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias4_datasetHIZeroBias8_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part7_v6')
# stream PhysicsHIZeroBias5
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias5_datasetHIZeroBias10_selector
streamPhysicsHIZeroBias5_datasetHIZeroBias10_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias5_datasetHIZeroBias10_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias5_datasetHIZeroBias10_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias5_datasetHIZeroBias10_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part9_v6')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias5_datasetHIZeroBias9_selector
streamPhysicsHIZeroBias5_datasetHIZeroBias9_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias5_datasetHIZeroBias9_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias5_datasetHIZeroBias9_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias5_datasetHIZeroBias9_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part8_v6')
# stream PhysicsHIZeroBias6
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias6_datasetHIZeroBias11_selector
streamPhysicsHIZeroBias6_datasetHIZeroBias11_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias6_datasetHIZeroBias11_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias6_datasetHIZeroBias11_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias6_datasetHIZeroBias11_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part10_v6')
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter as streamPhysicsHIZeroBias6_datasetHIZeroBias12_selector
streamPhysicsHIZeroBias6_datasetHIZeroBias12_selector.hltResults = cms.InputTag('TriggerResults', '', 'HLT')
streamPhysicsHIZeroBias6_datasetHIZeroBias12_selector.l1tResults = cms.InputTag('')
streamPhysicsHIZeroBias6_datasetHIZeroBias12_selector.throw = cms.bool(False)
streamPhysicsHIZeroBias6_datasetHIZeroBias12_selector.triggerConditions = cms.vstring('HLT_HIZeroBias_part11_v6')
| [
"[email protected]"
] | |
9e15da608c57204097c2be6bfac95cf84b72f297 | 8b71fdd80be5f22659cfb135019f5fd968a3cb77 | /supervised_learning/0x01-classification/19-deep_neural_network.py | 5e9cf4bd693450e09c970d9f96ab70baf7c24890 | [] | no_license | KamalTaleb/holbertonschool-machine_learning | 357e1c6bfcffa6672e12a3d518846b2a96747148 | 242b449b3a7a4051270ca32a22866a884754d141 | refs/heads/master | 2023-03-29T07:24:44.113412 | 2021-04-08T15:45:16 | 2021-04-08T15:45:16 | 320,596,666 | 0 | 0 | null | 2020-12-11T14:41:48 | 2020-12-11T14:29:03 | Python | UTF-8 | Python | false | false | 3,177 | py | #!/usr/bin/env python3
"""Contains the DeepNeuralNetwork class"""
import numpy as np
class DeepNeuralNetwork:
"""
DeepNeuralNetwork class
defines a deep neural network
performing binary classification:
"""
def __init__(self, nx, layers):
"""
Class constructor
:param nx: the number of input features
:param layers: list representing the number of nodes
in each layer of the network
"""
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
if not isinstance(layers, list):
raise TypeError("layers must be a list of positive integers")
if len(layers) == 0:
raise TypeError('layers must be a list of positive integers')
self.nx = nx
self.layers = layers
self.__L = len(layers)
self.__cache = {}
self.__weights = {}
for i in range(self.L):
if not isinstance(layers[i], int) or layers[i] < 1:
raise TypeError("layers must be a list of positive integers")
W_key = "W{}".format(i + 1)
b_key = "b{}".format(i + 1)
self.weights[b_key] = np.zeros((layers[i], 1))
if i == 0:
f = np.sqrt(2 / nx)
self.__weights['W1'] = np.random.randn(layers[i], nx) * f
else:
f = np.sqrt(2 / layers[i - 1])
h = np.random.randn(layers[i], layers[i - 1]) * f
self.__weights[W_key] = h
@property
def L(self):
"""property to retrieve L"""
return self.__L
@property
def cache(self):
"""property to retrieve b1"""
return self.__cache
@property
def weights(self):
"""property to retrieve A1"""
return self.__weights
def forward_prop(self, X):
"""
Calculates the forward propagation of the neural network
:param X: a numpy.ndarray with shape (nx, m)
that contains the input data
:return: the output of the neural network and the cache,
respectively
"""
self.__cache['A0'] = X
for l in range(self.__L):
W_key = "W{}".format(l + 1)
b_key = "b{}".format(l + 1)
A_key_prev = "A{}".format(l)
A_key_forw = "A{}".format(l + 1)
Z = np.matmul(self.__weights[W_key], self.__cache[A_key_prev]) \
+ self.__weights[b_key]
self.__cache[A_key_forw] = 1 / (1 + np.exp(-Z))
return self.__cache[A_key_forw], self.__cache
def cost(self, Y, A):
"""
Calculates the cost of the model using logistic regression
:param Y: numpy.ndarray with shape (1, m)
that contains the correct labels for the input data
:param A: numpy.ndarray with shape (1, m)
containing the activated output of the neuron for each example
:return: the cost
"""
cost = -np.sum((Y * np.log(A)) +
((1 - Y) * np.log(1.0000001 - A))) / Y.shape[1]
return cost
| [
"[email protected]"
] | |
b94058827cb1372d534468bbbd322a04a99f959b | ebc7607785e8bcd6825df9e8daccd38adc26ba7b | /python/leetcode/dfs/dfs.py | 73df417ba9abc9d12885a4e0fbd4b12e54d6ba30 | [] | no_license | galid1/Algorithm | 18d1b72b0d5225f99b193e8892d8b513a853d53a | 5bd69e73332f4dd61656ccdecd59c40a2fedb4b2 | refs/heads/master | 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 | Python | UTF-8 | Python | false | false | 920 | py | import sys
# stack을 이용한 구현
# def dfs(g, start):
# for key in g.keys():
# g[key] = sorted(g[key], reverse=True)
# stack = []
# stack.append(start)
#
# visited = set()
# while stack:
# cur = stack.pop()
# if cur in visited:
# continue
# # 방문 처리 및 출력
# print(cur)
# visited.add(cur)
#
# for link in g[cur]:
# stack.append(link)
def dfs(g, vertex, visited):
print(vertex, end=' ')
visited.add(vertex)
for next_vertex in g[vertex]:
if next_vertex not in visited:
dfs(g, next_vertex, visited)
n, m, s = map(int, sys.stdin.readline().strip().split(" "))
g = {i: [] for i in range(1, n+1)}
for _ in range(m):
k, v = map(int, sys.stdin.readline().strip().split(" "))
g[k].append(v)
g[v].append(k)
for key in g.keys():
g[key].sort()
dfs(g, s, set()) | [
"[email protected]"
] | |
d508c9134e55dfefde90ff6147e63c22f1ef4da6 | 817c58b0d73d20638ea410512aa61b6b8837cf62 | /backend/backend/api_urls.py | 9a86a93b978ca7dfe66526f69f4c88b62bccf19d | [
"MIT"
] | permissive | ProjetoALES/ales-website | d64eaef437ba6da1df7d810b8f495ad141d41464 | 9dc5b460f5e780a1221d0ed5071043f088082395 | refs/heads/master | 2022-01-25T04:15:59.302899 | 2020-02-25T05:15:10 | 2020-02-25T05:15:10 | 242,913,529 | 0 | 0 | MIT | 2022-01-06T22:42:46 | 2020-02-25T04:57:07 | Vue | UTF-8 | Python | false | false | 403 | py | from django.urls import include, path
from .views import CurrentUserViewset
from student import api_urls as student_urls
from .router import base_router
base_router.register("me", CurrentUserViewset, basename="me")
app_name = "api"
urlpatterns = [
path(
"",
include((student_urls.urlpatterns, student_urls.app_name), namespace="student"),
)
]
urlpatterns += base_router.urls
| [
"[email protected]"
] | |
95293ca379d2cc6ee06bbdf758e30368f3d7f4e6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /zDei9LFWkX9d7wXyb_17.py | 4d91f8876a0ba580b6c2e2347c216c95b10964bd | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py |
def malthusian(food_growth, pop_mult):
year=0
pop=100
food_prod = 100
while True:
food_prod+=food_growth
pop*=pop_mult
year+=1
if pop>food_prod:
break
return year
| [
"[email protected]"
] | |
15dc7407da6affbb37c0dd12b9349f1ca91bba81 | b2edef9270dfe69986c1f268d4bad7c4b1a54315 | /329.longest-increasing-path-in-a-matrix.py | cf326f9a2ae3e6f0ab5d2ed9fd1dbb47b1428f78 | [] | no_license | mrgrant/LeetCode | 9167f29462a072df4932201834073043cba99366 | 82132065ae1b4964a1e0ef913912f382471f4eb5 | refs/heads/master | 2021-12-03T00:16:42.070167 | 2021-11-30T15:02:29 | 2021-11-30T15:02:29 | 143,361,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | #
# @lc app=leetcode id=329 lang=python
#
# [329] Longest Increasing Path in a Matrix
#
# @lc code=start
import collections
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
# dfs solution
# m = len(matrix)
# n = len(matrix[0])
# dp = [[0 for _ in range(n)] for _ in range(m)]
# def dfs(i, j):
# if not dp[i][j]:
# val = matrix[i][j]
# dp[i][j] = 1 + max(
# dfs(i-1,j) if i > 0 and matrix[i-1][j] < val else 0,
# dfs(i+1,j) if i < m - 1 and matrix[i+1][j] < val else 0,
# dfs(i,j-1) if j > 0 and matrix[i][j-1] < val else 0,
# dfs(i,j+1) if j < n - 1 and matrix[i][j+1] < val else 0,
# )
# return dp[i][j]
# for i in range(m):
# for j in range(n):
# dfs(i, j)
# return max(dp[i][j] for i in range(m) for j in range(n))
# bfs with topological sort solution
m = len(matrix)
n = len(matrix[0])
dir = [(1, 0), (-1, 0), (0, 1), (0, -1)]
indeg = {}
q = collections.deque()
for i in range(m):
for j in range(n):
cnt = 0
for dx, dy in dir:
mx = i + dx
my = j + dy
if 0 <= mx < m and 0 <= my < n and matrix[mx][my] < matrix[i][j]:
cnt += 1
indeg[(i, j)] = cnt
if cnt == 0:
q.append((i, j))
step = 0
while q:
size = len(q)
for _ in range(size):
x, y = q.popleft()
for dx, dy in dir:
mx = x + dx
my = y + dy
if 0 <= mx < m and 0 <= my < n and matrix[mx][my] > matrix[x][y] and (mx, my) in indeg:
indeg[(mx, my)] -= 1
if indeg[(mx, my)] == 0:
q.append((mx, my))
step += 1
return step
# @lc code=end
| [
"[email protected]"
] | |
fc65a1a0c364aa804a4aa0ff883d6b6e9a1b0133 | 2279568acd5c7182ea4d287d20cd208b10c945a2 | /django/django_intro/form_submission/POST_form_submission/views.py | 6e7bdc0b8c7af63776f3aafbe8e7e54a0eb7dcd8 | [] | no_license | quangnguyen17/Python | fbc5cec0eb51e48c964022e1bd45fb585d2b60ec | 1920f757c5381480fc42f90946651aa0363fcaff | refs/heads/master | 2020-12-27T08:58:51.058504 | 2020-02-10T21:42:45 | 2020-02-10T21:43:12 | 237,815,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.shortcuts import render, redirect
# Create your views here.
def index(request):
return render(request, "index.html")
def create_user(request):
request.session['name'] = request.POST['name']
request.session['email'] = request.POST['email']
return redirect("/success")
def success(request):
return render(request, "success.html")
| [
"[email protected]"
] | |
3fa906d66b83757fe642d3e423f6b479a7ee5ff3 | 796344a0ecccb0c979348baef8b80a5146ba5ddd | /mysite/settings.py | bf17080ab7a8e8652cc8a877fd0148fa785c140d | [] | no_license | emantovanelli/my-first-blog | d5f2d2af6373196172acbefbbf090f80296b5d99 | 6c87da8db54576111976dc57109ca096fd694363 | refs/heads/master | 2021-01-10T12:36:48.237270 | 2016-01-04T13:52:34 | 2016-01-04T13:52:34 | 48,194,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u_&c4%-u=vm)^hsyx-30-hf9z#oa_@db=s89od@1_t^5c=x)1e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
3eaa3f58fe481870dbf16431a9e71d5aa1952112 | fafb89a3552e4dbb47d134966462ef5f3f37f576 | /KEMP/v0.2/fdtd3d/util/common.py | 0cdb6d40a03475f495280bd9770af503cbc10707 | [] | no_license | EMinsight/fdtd_accelerate | 78fa1546df5264550d12fba3cf964838b560711d | a566c60753932eeb646c4a3dea7ed25c7b059256 | refs/heads/master | 2021-12-14T03:26:52.070069 | 2012-07-25T08:25:21 | 2012-07-25T08:25:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,030 | py | import doctest
import unittest
def check_type(arg_name, arg, arg_type, element_type=None):
"""
Check the type of the argument
If the type is mismatch, the TypeError exception is raised.
When the 'arg's type is a list or a tuple,
each element's type is also checked.
>>> check_type('arg_name', 2, int)
>>> check_type('arg_name', 3.4, (int, float))
>>> check_type('arg_name', 'xy', str)
>>> check_type('arg_name', (1.2, 2.3), tuple, float)
>>> check_type('arg_name', ['a', 'b'], (list, tuple), str)
"""
if not isinstance(arg, arg_type):
raise TypeError("argument '%s' type must be a %s : %s is given" % \
(arg_name, repr(arg_type), type(arg)) )
if isinstance(arg, (list, tuple)):
if element_type == None:
raise TypeError( \
"\n\tWhen the 'arg's type is a list or a tuple, \
\n\targumnet 'element_type' must be specified." )
for element in arg:
if not isinstance(element, element_type):
raise TypeError("argument '%s's element type must be a %s : %s is given" % \
(arg_name, repr(element_type), type(element)) )
def check_value(arg_name, arg, value):
"""
Check if the argument is one of the values
If the value is mismatch, the ValueError exception is raised.
>>> check_value('arg_name', 'a', ('a', 'b', 'ab'))
"""
if not arg in convert_to_tuple(value):
repr_val = repr(value)
if isinstance(value, (list, tuple)) and len(repr_val) > 40:
repr_val = str(value[:2] + ['...'] + value[-2:]).replace("'", '')
raise ValueError("argument '%s' value must be one of %s : %s is given" % \
(arg_name, repr_val, repr(arg)) )
def binary_prefix_nbytes(nbytes):
"""
Return a (converted nbytes, binary prefix) pair for the nbytes
>>> binary_prefix_nbytes(2e9)
(1.862645149230957, 'GiB')
>>> binary_prefix_nbytes(2e6)
(1.9073486328125, 'MiB')
>>> binary_prefix_nbytes(2e3)
(1.953125, 'KiB')
>>> binary_prefix_nbytes(2)
(2, 'Bytes')
"""
check_type('nbytes', nbytes, (int, float))
if nbytes >= 1024**3:
value = float(nbytes)/(1024**3)
prefix_str = 'GiB'
elif nbytes >= 1024**2:
value = float(nbytes)/(1024**2)
prefix_str = 'MiB'
elif nbytes >= 1024:
value = float(nbytes)/1024
prefix_str = 'KiB'
else:
value = nbytes
prefix_str = 'Bytes'
return value, prefix_str
def replace_template_code(code, old_list, new_list):
"""
Replace the macros in the template code
>>> code = '''AA, BB'''
>>> replace_template_code(code, ['AA', 'BB'], ['aa', str(22)])
'aa, 22'
"""
check_type('code', code, str)
check_type('old_list', old_list, (list, tuple), str)
check_type('new_list', new_list, (list, tuple), str)
assert len(old_list) == len(new_list), \
"arguments 'old_list' and 'new_list' do not have same length"
for old, new in zip(old_list, new_list):
code = code.replace(old, new)
return code
def slice_index_two_points(pt0, pt1):
"""
Return the tuple of slice indices from two points
>>> slice_index_two_points((0, 0, 0), (10, 11, 12))
(slice(0, 11, None), slice(0, 12, None), slice(0, 13, None))
>>> slice_index_two_points((0, 0, 0), (10, 0, 12))
(slice(0, 11, None), 0, slice(0, 13, None))
>>> slice_index_two_points((1, 2, 3), (1, 2, 3))
(1, 2, 3)
"""
check_type('pt0', pt0, (list, tuple), int)
check_type('pt1', pt1, (list, tuple), int)
slidx = []
for p0, p1 in zip(pt0, pt1):
if p0 == p1:
slidx.append(p0)
else:
slidx.append(slice(p0, p1+1))
return tuple(slidx)
def shape_two_points(pt0, pt1, mul_x=1, is_dummy=False):
"""
Return the shape from two points
>>> shape_two_points((0, 0, 0), (10, 11, 12))
(11, 12, 13)
>>> shape_two_points((0, 0, 0), (10, 0, 12))
(11, 13)
>>> shape_two_points((0, 0, 0), (0, 0, 12))
(13,)
>>> shape_two_points((1, 2, 3), (1, 2, 3))
(1,)
>>> shape_two_points((0, 0, 0), (10, 11, 12), 2)
(22, 12, 13)
>>> shape_two_points((0, 0, 0), (10, 0, 12), 3)
(33, 13)
>>> shape_two_points((0, 0, 0), (0, 0, 12), 4)
(52,)
>>> shape_two_points((1, 2, 3), (1, 2, 3), 2)
(2,)
>>> shape_two_points((0, 0, 0), (0, 0, 12), is_dummy=True)
(1, 1, 13)
>>> shape_two_points((1, 2, 3), (1, 2, 3), is_dummy=True)
(1, 1, 1)
"""
check_type('pt0', pt0, (list, tuple), int)
check_type('pt1', pt1, (list, tuple), int)
check_type('mul_x', mul_x, int)
shape = []
for p0, p1 in zip(pt0, pt1):
value = abs(p1 - p0) + 1
if value == 1:
if is_dummy:
shape.append(value)
else:
shape.append(value)
if shape == []:
return (mul_x,)
else:
shape[0] *= mul_x
return tuple(shape)
def convert_to_tuple(arg):
"""
Return the tuple which is converted from the arbitrary argument
>>> convert_to_tuple(3)
(3,)
>>> convert_to_tuple(['a', 'b'])
('a', 'b')
"""
if isinstance(arg, (list, tuple)):
return tuple(arg)
else:
return (arg,)
def intersection_two_slices(ns, slices0, slices1):
"""
Return the slice which is overlapped slices
>>> ns = (10, 20, 30)
>>> slices0 = (slice(-2,None), slice(None,None), slice(None,None))
>>> slices1 = (slice(None,None), slice(None,None), slice(None,None))
>>> intersection_two_slices(ns, slices0, slices1)
(slice(8, 10, None), slice(0, 20, None), slice(0, 30, None))
>>> slices1 = (slice(-4,-2), slice(None,None), slice(None,None))
>>> intersection_two_slices(ns, slices0, slices1)
(slice(0, 0, None), slice(0, 20, None), slice(0, 30, None))
>>> slices0 = (slice(5, 6), slice(7, 12), slice(12, 17))
>>> slices1 = (slice(5, 6), slice(7, 12), slice(12, 17))
>>> intersection_two_slices(ns, slices0, slices1)
(slice(5, 6, None), slice(7, 12, None), slice(12, 17, None))
"""
check_type('ns', ns, (list, tuple), int)
check_type('slices0', slices0, (list, tuple), slice)
check_type('slices1', slices1, (list, tuple), slice)
assert len(ns) == len(slices0) == len(slices1), \
'The argument lists must have same length. %s, %s, %s' % \
(len(ns), len(slices0), len(slices1))
slices = []
for n, sl0, sl1 in zip(ns, slices0, slices1):
set0 = set( range(*sl0.indices(n)) )
set1 = set( range(*sl1.indices(n)) )
overlap = sorted( list( set0.intersection(set1) ) )
if len(overlap) > 0:
slices.append( slice(overlap[0], overlap[-1]+1) )
else:
slices.append( slice(0, 0) )
return tuple(slices)
def intersection_two_lines(line0, line1):
"""
Return the two lines which is overlapped
>>> intersection_two_lines((8, 9), (0, 9))
(8, 9)
>>> intersection_two_lines((0, 1), (1, 2))
(1, 1)
>>> intersection_two_lines((0, 1), (2, 3))
"""
check_type('line0', line0, (list, tuple), int)
check_type('line1', line1, (list, tuple), int)
x0, x1 = line0
x2, x3 = line1
set0 = set( range(x0, x1+1) )
set1 = set( range(x2, x3+1) )
overlap = sorted( list( set0.intersection(set1) ) )
if len(overlap) > 0:
return (overlap[0], overlap[-1])
else:
return None
def intersection_two_regions(pt0, pt1, pt2, pt3):
"""
Return the two points(tuple) which is overlapped regions
>>> intersection_two_regions((8,0,0), (9,19,29), (0,0,0), (9,19,29))
((8, 0, 0), (9, 19, 29))
>>> intersection_two_regions((0,0,0), (1,19,29), (1,0,0), (2,19,29))
((1, 0, 0), (1, 19, 29))
>>> intersection_two_regions((0,0,0), (1,19,29), (2,0,0), (3,19,29))
"""
check_type('pt0', pt0, (list, tuple), int)
check_type('pt1', pt1, (list, tuple), int)
check_type('pt2', pt2, (list, tuple), int)
check_type('pt3', pt3, (list, tuple), int)
assert len(pt0) == len(pt1) == len(pt2) == len(pt3), \
'The points must have same length.'
pt4, pt5 = [], []
for p0, p1, p2, p3 in zip(pt0, pt1, pt2, pt3):
overlap = intersection_two_lines((p0, p1), (p2, p3))
if overlap != None:
pt4.append(overlap[0])
pt5.append(overlap[-1])
if len(pt4) == len(pt5) == len(pt0):
return (tuple(pt4), tuple(pt5))
else:
return None
def append_instance(instance_list, instance):
priority_dict = { \
'core':0, 'current':1, \
'pml':2, 'incident':3, \
'pbc':4, 'mpi':5}
new = priority_dict[instance.priority_type]
index = len(instance_list)
for i, inst in enumerate(instance_list):
old = priority_dict[inst.priority_type]
if new < old:
index = i
break
instance_list.insert(index, instance)
class TestFunctions(unittest.TestCase):
def test_doctest(self):
doctest.testmod()
def test_check_type(self):
self.assertRaises(TypeError, check_type, '', 2, float)
self.assertRaises(TypeError, check_type, '', 3.2, str)
self.assertRaises(TypeError, check_type, '', 3.4, (int, str))
self.assertRaises(TypeError, check_type, '', [1, 2], list)
self.assertRaises(TypeError, check_type, '', (1.2, 2.3), tuple, int)
self.assertRaises(TypeError, check_type, '', ['a', 'b'], tuple, int)
self.assertRaises(TypeError, check_type, '', ['a', 'b', {'c':3}], list, str)
def test_check_value(self):
self.assertRaises(ValueError, check_value, '', 'a', ('b', 'c'))
def test_replace_template_code(self):
self.assertRaises(AssertionError, replace_template_code, \
'AA, BB', ['AA', 'BB'], ['a', 'b', 'c'])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e21df57f20a42deffed822307c1bf7b5614cf75f | 0274f2c465f110598456624581f569331221068b | /impl/gps/gps_operations.py | 64a7a924c36f2bd4394f6ff191375c071b57d962 | [] | no_license | bluecube/thesis | 63e745076c86a3122e9c3d7ff42ff22e32921860 | 588db206e64de9b681372fea9a70d3fa2aa598df | refs/heads/master | 2016-09-06T00:01:03.840006 | 2013-05-27T09:36:51 | 2013-05-27T09:36:51 | 1,376,241 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,060 | py | from __future__ import unicode_literals
import collections
import logging
from . import sirf
from . import sirf_messages
if bytes == str:
# This branch is here for python 2.x and to avoid
# the cost of calls to sirf.bytes_to_message_id
# This whole business is a little ugly :-)
_message_id_filter = chr
else:
_message_id_filter = lambda(x): x
assert _message_id_filter(97) == b'a'[0]
class GpsOperations(collections.Iterator):
"""
Operations common to both real gps and recording.
"""
def __init__(self):
self._logger = logging.getLogger('localization.gps')
def _read_binary_sirf_msg(self):
"""
Return bytes with a single valid message read from the port
(the message payload).
"""
raise NotImplemented()
def set_message_rate(self, msg_type, rate):
"""
Set how often a message gets sent by the SIRF chip.
Rate is integer, meaning number of seconds, 0 means disabled.
This is a no-op unless we are on a real gps.
"""
pass
def try_read_message(self):
"""
Try to read one SIRF message from the gps.
Raises UnrecognizedMessageException.
"""
return sirf.from_bytes(self._read_binary_sirf_msg())
def read_message(self):
"""
Read one recognized SIRF message from the gps.
"""
while True:
try:
return sirf.from_bytes(self._read_binary_sirf_msg())
except sirf.UnrecognizedMessageException:
pass
def read_specific_message(self, msg_type):
"""
Discards messages until one of given type is received.
May block for a long time, careful with this.
"""
if not issubclass(msg_type, sirf_messages._SirfReceivedMessageBase):
raise TypeError("msg_type must be a message type.")
msg = None
while not isinstance(msg, msg_type):
msg = self.read_message()
#print(msg)
return msg
def filtered_messages(self, msg_type_set):
"""
Returns iterator of messages of types in msg_type_set.
Faster than filtering using isinstance.
"""
ids = {msg_type.get_message_id() for msg_type in msg_type_set}
while True:
data = self._read_binary_sirf_msg()
if sirf.bytes_to_message_id(data) in ids:
yield sirf.from_bytes(data)
def split_to_cycles(self, msg_type_filter = None, separation = 0.5):
"""
Returns iterator of messages grouped by the measurement cycles
and optionally filtered only to message types contained in msg_type_filter.
"""
ids = {msg_type.get_message_id() for msg_type in msg_type_filter}
if not len(ids):
class _Everything:
def __contains__(self, x):
return True
ids = _Everything()
out = []
last_msg_time = float("nan")
while True:
data = self._read_binary_sirf_msg()
if sirf.bytes_to_message_id(data) in ids:
out.append(sirf.from_bytes(data))
if self.last_msg_time - last_msg_time > separation:
yield out
out = []
last_msg_time = self.last_msg_time
def loop(self, observers, cycle_end_callback = None, cycle_end_threshold = 0.3, log_status = 600):
"""
Read messages in infinite loop and notify observers.
observers:
Iterable of observers that will be notified as messages
are received
cycle_end_callback:
Callable that will be called after the measurement cycle ends, or None.
Block end callback will only be called when a message arrives with time distance larger
than block end threshold, not immediately after the time runs out!
cycle_end_threshold:
How long a pause between two messages must be to be taken as a start of new measurement cycle.
log_status:
After how many cycles should the status be logged.
If this is false, then no logging is performed.
"""
observers = list(observers)
message_ids = {}
for observer in observers:
for message_type in observer.observed_message_types():
filtered_id = _message_id_filter(message_type.get_message_id())
message_ids.setdefault(filtered_id, []).append(observer)
if log_status:
status_id = _message_id_filter(sirf_messages.GeodeticNavigationData.get_message_id())
else:
status_id = None
status_remaining = 0
last_msg_time = float("nan")
while True:
try:
binary = self._read_binary_sirf_msg()
except StopIteration:
return
if cycle_end_callback is not None and self.last_msg_time - last_msg_time > cycle_end_threshold:
cycle_end_callback()
last_msg_time = self.last_msg_time
message_id = binary[0]
if status_remaining <= 0 and message_id == status_id:
message = sirf.from_bytes(binary)
self._logger.info(message.status_line())
status_remaining = log_status
if message_id not in message_ids:
continue
else:
if message_id == status_id:
status_remaining -= 1
if message_id not in message_ids:
continue
else:
message = sirf.from_bytes(binary)
for observer in message_ids[message_id]:
observer(message)
def __next__(self):
"""
We want to support iterator protocol.
"""
return self.read_message()
def next(self):
"""
Iterator protocol for python 2.x
"""
return self.read_message()
| [
"[email protected]"
] | |
7c38e135fbe87c2b8a76e963211c110dcae4f12e | 9bac4cd580ecd3152b828d3bb421e648f2156361 | /_admin_panel/apuzzles/forms.py | c6fc0066f8a3ab00f8123e3fdbbcb435b61c62ae | [] | no_license | sharingsimplethoughts/mygame2 | 2d3b5febfc950faeec535347fbdaff39191a4805 | d0432bdbf74b03fb7244ff8911f04b485aff016f | refs/heads/master | 2023-05-29T04:01:04.104641 | 2020-07-07T14:38:07 | 2020-07-07T14:38:07 | 276,071,526 | 0 | 0 | null | 2021-06-11T18:12:15 | 2020-06-30T10:45:33 | HTML | UTF-8 | Python | false | false | 2,626 | py | from django import forms
from puzzles.models import *
class PuzzlesAddEditForm(forms.Form):
def clean(self):
les_name = self.data['les_name']
les_cat = self.data['les_cat']
les_desc = self.data['les_desc']
les_hint = self.data['les_hint']
les_exp = self.data['les_exp']
les_learn = self.data['les_learn']
if not les_name or les_name=="":
raise forms.ValidationError('Please provide puzzle name')
if not les_cat or les_cat=="":
raise forms.ValidationError('Please choose puzzle category')
if les_cat:
obj = PuzzleCategory.objects.filter(id=les_cat).first()
if not obj:
raise forms.ValidationError('Please choose valid puzzle category')
if not les_desc or les_desc=="":
raise forms.ValidationError('Please provide puzzle description')
if not les_hint or les_hint=="":
raise forms.ValidationError('Please provide puzzle hint')
if not les_exp or les_exp=="":
raise forms.ValidationError('Please provide puzzle explanation')
if not les_learn or les_learn=="":
raise forms.ValidationError('Please provide puzzle learning text')
class PuzzleCategoriesAddForm(forms.Form):
def clean(self):
cr_name=self.data['cr_name']
cr_list=self.data['cr_list']
if not cr_name or cr_name=="":
raise forms.ValidationError('Please provide category name')
# if not cr_list or cr_list=="," or cr_list=="":
# raise forms.ValidationError('Please select puzzles')
lcr = PuzzleCategory.objects.filter(name=cr_name).first()
if lcr:
raise forms.ValidationError('This category name already exists')
class PuzzleCategoriesEditForm(forms.Form):
# def __init__(self,*args, **kwargs):
# self.cr_id=kwargs.pop('cr_id',None)
# super(PuzzleCategoriesEditForm,self).__init__(*args,**kwargs)
def clean(self):
cr_id=self.data['cr_id']
cr_name=self.data['cr_name']
cr_list=self.data['cr_list']
if not cr_name or cr_name=="":
raise forms.ValidationError('Please provide category name')
# if not cr_list or cr_list=="," or cr_list=="":
# raise forms.ValidationError('Please select puzzles')
lcobj = PuzzleCategory.objects.filter(id=cr_id).first()
if lcobj.name!=cr_name:
lcr = PuzzleCategory.objects.filter(name=cr_name).first()
if lcr:
raise forms.ValidationError('This category name already taken')
| [
"[email protected]"
] | |
d2139d040c78dcd1ac92ed08ba7de06fe9427ce8 | 60962534e8f0fbbe87732ff38f613a3f5fc5342f | /largestRectangleHistogram/main.py | 8977284fc74c5d2d742e622f35fdafa1e1048086 | [] | no_license | publicbull/leetcode | 4ebde395814e8ed9ce8bc8576d3c15d224ee3722 | 73fb5c1d77002cc24a2ea2db58e679cf2bd1c767 | refs/heads/master | 2020-12-24T15:22:12.457530 | 2013-07-03T22:03:15 | 2013-07-03T22:03:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | '''
Created on May 15, 2013
@author: Administrator
'''
def largestRectangle(data):
def _largestRectangle(left, right, maxRect):
if left > right:
return
currMin = data[left]
currMinPos = left
for i in range(left, right + 1):
if currMin > data[i]:
currMin = data[i]
currMinPos = i
maxRect[0] = currMin * (right - left + 1)
maxRect[1], maxRect[2] = left, right
max1 = [-1, 0, 0]
_largestRectangle(left, currMinPos - 1, max1)
if maxRect[0] < max1[0]:
maxRect[0], maxRect[1], maxRect[2] = max1[0], max1[1], max1[2]
max2 = [-1, 0, 0]
_largestRectangle(currMinPos + 1, right, max2)
if maxRect[0] < max2[0]:
maxRect[0], maxRect[1], maxRect[2] = max2[0], max2[1], max2[2]
size = len(data)
maxRect = [-1, 0, 0]
_largestRectangle(0, size - 1, maxRect)
return maxRect
if __name__ == '__main__':
data = [2, 1, 5, 6, 2, 3]
print(largestRectangle(data))
| [
"[email protected]"
] | |
c884e21d21ea76207943ee8ca33e776e8a9ee7e0 | 2d58c1351ab970eb55f4832b09582592e96468d5 | /p31.py | 4a9857e0b2d677a5a819dd859be186c88da7b955 | [] | no_license | 0x0400/LeetCode | 832bc971c2cae9eecb55f5b14e8c34eaec0d9e26 | 94bb9fedc908490cc52d87def317c057fadaeceb | refs/heads/master | 2023-02-24T20:13:11.345873 | 2023-02-10T16:46:31 | 2023-02-10T16:46:31 | 84,653,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | # https://leetcode.com/problems/next-permutation/
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
idx = len(nums) - 2
while idx >= 0:
if nums[idx] >= nums[idx+1]:
idx -= 1
continue
minIdx = len(nums) -1
while minIdx > idx:
if nums[minIdx] <= nums[idx]:
minIdx -= 1
continue
break
nums[idx], nums[minIdx] = nums[minIdx], nums[idx]
break
idx += 1
lastIdx = len(nums) - 1
i = 0
while idx + i < lastIdx - i:
nums[idx+i], nums[lastIdx-i] = nums[lastIdx-i], nums[idx+i]
i += 1
| [
"[email protected]"
] | |
25510828bc0661930551ad3acd71ac4f3ed9447b | 5eca88bd5e2d9f5bb92d0a5cdeb39032015c4b92 | /python/batch_uninstall.py | 448a61a5d18cfc0162b9ddc2f57929bf58a6b571 | [] | no_license | cet4meiguo/AndroidTestPyScripts | 09e105cc40389ec530af99aa3ce9be43378ea756 | ae6864a3cca2f8c1486e67faf069c9c137deedb4 | refs/heads/master | 2021-06-18T03:24:24.750998 | 2017-03-04T14:16:27 | 2017-03-04T14:16:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
#批量卸载设备上的第三方应用
def uninstall():
os.popen("adb wait-for-device")
print "start uninstall..."
for packages in os.popen("adb shell pm list packages -3").readlines():
packageName = packages.split(":")[-1].splitlines()[0]
os.popen("adb uninstall %s" %packageName)
print "remove %s successes." %packageName
if __name__ == "__main__":
uninstall()
print " "
print "All the third-party applications uninstall successes."
| [
"[email protected]"
] | |
28373c3f1d49ddd45b27342982bd5c545849ae0f | d9fa8fee100b4a944009826f181896ba35985892 | /venv/lib/python3.7/site-packages/django/core/management/__init__.py | 503cd67ca8c35035b13d5770f988f988ac6c20cf | [] | no_license | Garima2505/swe1_app | 753fe53599e6535955f47305dc3336b48d97c047 | 56060792675af8946f4fb31d4ddeacb47586185f | refs/heads/master | 2022-12-23T03:55:10.667290 | 2019-10-16T20:58:07 | 2019-10-16T20:58:07 | 212,366,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,152 | py | import functools
import os
import pkgutil
import sys
from collections import OrderedDict, defaultdict
from difflib import get_close_matches
from importlib import import_module
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (
BaseCommand,
CommandError,
CommandParser,
handle_default_options,
)
from django.core.management.color import color_style
from django.utils import autoreload
def find_commands(management_dir):
"""
Given a path to a management directory, return a list of all the command
names that are available.
"""
command_dir = os.path.join(management_dir, "commands")
return [
name
for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith("_")
]
def load_command_class(app_name, name):
"""
Given a command name and an application name, return the Command
class instance. Allow all errors raised by the import process
(ImportError, AttributeError) to propagate.
"""
module = import_module("%s.management.commands.%s" % (app_name, name))
return module.Command()
@functools.lru_cache(maxsize=None)
def get_commands():
"""
Return a dictionary mapping command names to their callback applications.
Look for a management.commands package in django.core, and in each
installed application -- if a commands package exists, register all
commands in that package.
Core commands are always included. If a settings module has been
specified, also include user-defined commands.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: "django.core" for name in find_commands(__path__[0])}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, "management")
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(command_name, *args, **options):
"""
Call the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
`command_name` may be a string or a command object. Using a string is
preferred unless the command object is required for further processing or
testing.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
from django.core.management.commands import flush
cmd = flush.Command()
call_command(cmd, verbosity=0, interactive=False)
# Do something with cmd ...
"""
if isinstance(command_name, BaseCommand):
# Command object passed in.
command = command_name
command_name = command.__class__.__module__.split(".")[-1]
else:
# Load the command object by name.
try:
app_name = get_commands()[command_name]
except KeyError:
raise CommandError("Unknown command: %r" % command_name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, command_name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser("", command_name)
# Use the `dest` option name from the parser option
opt_mapping = {
min(s_opt.option_strings).lstrip("-").replace("-", "_"): s_opt.dest
for s_opt in parser._actions
if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
parse_args = [str(a) for a in args]
# Any required arguments which are passed in via **options must be passed
# to parse_args().
parse_args += [
"{}={}".format(min(opt.option_strings), arg_options[opt.dest])
for opt in parser._actions
if opt.required and opt.dest in options
]
defaults = parser.parse_args(args=parse_args)
defaults = dict(defaults._get_kwargs(), **arg_options)
# Raise an error if any unknown options were passed.
stealth_options = set(command.base_stealth_options + command.stealth_options)
dest_parameters = {action.dest for action in parser._actions}
valid_options = (dest_parameters | stealth_options).union(opt_mapping)
unknown_options = set(options) - valid_options
if unknown_options:
raise TypeError(
"Unknown option(s) for %s command: %s. "
"Valid options are: %s."
% (
command_name,
", ".join(sorted(unknown_options)),
", ".join(sorted(valid_options)),
)
)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop("args", ())
if "skip_checks" not in options:
defaults["skip_checks"] = True
return command.execute(*args, **defaults)
class ManagementUtility:
"""
Encapsulate the logic of the django-admin and manage.py utilities.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
if self.prog_name == "__main__.py":
self.prog_name = "python -m django"
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""Return the script's main help text, as a string."""
if commands_only:
usage = sorted(get_commands())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand."
% self.prog_name,
"",
"Available subcommands:",
]
commands_dict = defaultdict(lambda: [])
for name, app in get_commands().items():
if app == "django.core":
app = "django"
else:
app = app.rpartition(".")[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(
style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception
)
)
return "\n".join(usage)
def fetch_command(self, subcommand):
"""
Try to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get("DJANGO_SETTINGS_MODULE"):
# If `subcommand` is missing due to misconfigured settings, the
# following line will retrigger an ImproperlyConfigured exception
# (get_commands() swallows the original one) so the user is
# informed about it.
settings.INSTALLED_APPS
else:
sys.stderr.write("No Django settings specified.\n")
possible_matches = get_close_matches(subcommand, commands)
sys.stderr.write("Unknown command: %r" % subcommand)
if possible_matches:
sys.stderr.write(". Did you mean %s?" % possible_matches[0])
sys.stderr.write("\nType '%s help' for usage.\n" % self.prog_name)
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if "DJANGO_AUTO_COMPLETE" not in os.environ:
return
cwords = os.environ["COMP_WORDS"].split()[1:]
cword = int(os.environ["COMP_CWORD"])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ""
subcommands = [*get_commands(), "help"]
options = [("--help", False)]
# subcommand
if cword == 1:
print(" ".join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != "help":
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ("dumpdata", "sqlmigrate", "sqlsequencereset", "test"):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser("", cwords[0])
options.extend(
(min(s_opt.option_strings), s_opt.nargs != 0)
for s_opt in parser._actions
if s_opt.option_strings
)
# filter out previously specified options from available options
prev_opts = {x.split("=")[0] for x in cwords[1 : cword - 1]}
options = (opt for opt in options if opt[0] not in prev_opts)
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for opt_label, require_arg in options:
# append '=' to options which require args
if require_arg:
opt_label += "="
print(opt_label)
# Exit code of the bash completion function is never passed back to
# the user, so it's safe to always exit with 0.
# For more details see #25420.
sys.exit(0)
def execute(self):
"""
Given the command-line arguments, figure out which subcommand is being
run, create a parser appropriate to that command, and run it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = "help" # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(
usage="%(prog)s subcommand [options] [args]",
add_help=False,
allow_abbrev=False,
)
parser.add_argument("--settings")
parser.add_argument("--pythonpath")
parser.add_argument("args", nargs="*") # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
except ImportError as exc:
self.settings_exception = exc
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == "runserver" and "--noreload" not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader. Pretend it didn't happen by
# loading an empty list of applications.
apps.all_models = defaultdict(OrderedDict)
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = True
# Remove options not compatible with the built-in runserver
# (e.g. options for the contrib.staticfiles' runserver).
# Changes here require manually testing as described in
# #27522.
_parser = self.fetch_command("runserver").create_parser(
"django", "runserver"
)
_options, _args = _parser.parse_known_args(self.argv[2:])
for _arg in _args:
self.argv.remove(_arg)
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == "help":
if "--commands" in args:
sys.stdout.write(self.main_help_text(commands_only=True) + "\n")
elif not options.args:
sys.stdout.write(self.main_help_text() + "\n")
else:
self.fetch_command(options.args[0]).print_help(
self.prog_name, options.args[0]
)
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == "version" or self.argv[1:] == ["--version"]:
sys.stdout.write(django.get_version() + "\n")
elif self.argv[1:] in (["--help"], ["-h"]):
sys.stdout.write(self.main_help_text() + "\n")
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""Run a ManagementUtility."""
utility = ManagementUtility(argv)
utility.execute()
| [
"[email protected]"
] | |
7b1f943d3c5619b79aee733252222c093017888e | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_31938.py | 017da36e216e36f1227fb158380e0eaca4d4cd1f | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,844 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((418.732, 454.505, 614.234), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((473.199, 482.589, 581.337), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((541.317, 512.112, 547.473), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((546.679, 429.394, 658.17), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((672.968, 591.826, 429.626), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((453.092, 479.276, 595.335), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((451.571, 479.158, 596.217), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((436.205, 500.95, 605.693), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((418.426, 519.362, 617.452), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((400.108, 533.306, 633.721), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((409.906, 543.034, 658.407), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((438.28, 546.232, 659.481), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((429.229, 464.153, 591.494), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((450.154, 632.588, 720.504), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((572.876, 670.705, 564.494), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((572.876, 670.705, 564.494), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((553.792, 648.536, 561.418), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((535.169, 626.223, 560.035), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((516.818, 604.112, 560.172), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((499.415, 581.514, 561.968), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((486.961, 556.053, 565.906), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((478.335, 529.748, 572.412), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((581.79, 721.756, 711.949), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((376.908, 338.914, 429.795), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((499.313, 529.393, 530.218), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((499.313, 529.393, 530.218), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((515.142, 505.55, 527.498), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((531.756, 483.355, 536.218), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((547.732, 481.311, 560.58), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((462.745, 418.564, 627.696), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((638.585, 543.856, 502.247), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((477.426, 458.376, 595.056), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((477.545, 458.089, 595.249), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((501.658, 466.606, 606.946), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((510.655, 493.181, 609.148), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((497.285, 517.676, 612.649), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((476.234, 535.44, 618.462), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((451.266, 547.328, 624.161), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((425.241, 556.149, 631.183), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((422.302, 530.06, 550.41), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((426.992, 577.55, 716.821), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((443.594, 498.592, 524.165), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((465.134, 490.91, 536.638), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((511.273, 472.277, 565.371), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((557.71, 453.909, 593.879), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((513.255, 386.416, 604.221), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((655.007, 478.186, 623.173), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((490.682, 424.811, 622.543), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((502.609, 433.326, 598.443), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((513.841, 436.883, 572.742), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((525.359, 444.647, 548.082), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((535.5, 452.38, 522.77), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((542.341, 472.748, 504.186), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((475.986, 470.452, 550.635), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((608.616, 476.644, 456.665), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
eb86ed1da548c5ada25cc1aa23969fe16b9b6d66 | cc7ad1a2aa5d691c15ff7838d1e5126ab2c2bee0 | /basic_ranking/urls.py | 7691f1ea08646018399e25f6c2c1ded7e23ba8f9 | [] | no_license | demirantay/lingooapp | 9632be8a7d3dd00e7a4ac13618f32975da389729 | c842bb032668ef1bd5e7f4282acd4990843c8640 | refs/heads/master | 2023-03-14T08:00:37.681334 | 2021-01-09T09:36:48 | 2021-01-09T09:36:48 | 285,181,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | from django.urls import path
from . import views
urlpatterns = [
# Ranking Overview
path(
"ranking/overview/<int:page>/",
views.ranking_overview,
name="ranking_overview"
),
# Category Ranking Page
path(
"ranking/<str:language>/<int:page>/",
views.category_ranking,
name="category_ranking"
),
]
| [
"[email protected]"
] | |
43edad709ef0ed6de4ca2dcc842527c0a49c651b | db4c1703bee4e79e0a275434b7491c6dfe7a8602 | /backend/rush_19600/wsgi.py | 3761f5a1382397ef0ff7f2d1d384d21c0f1a9ca3 | [] | no_license | crowdbotics-apps/rush-19600 | 6baf41e83f15b7d48d3a04d2b83a29c8a4cfa781 | 8ecff341418f153c1a82b6492eaef662253b3254 | refs/heads/master | 2022-12-01T09:00:15.372702 | 2020-08-18T05:48:32 | 2020-08-18T05:48:32 | 288,006,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for rush_19600 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rush_19600.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
ade89183eb307e372486d8f1a1c6c63400af1e0f | c3e75ab16954f7dffdf68983237df98fae832b43 | /conf_matrix.py | adf29a6d56121b97b080b9403749eec2825a1afa | [] | no_license | satojkovic/ml-algorithms-simple | 713c6f57c23030a79db7500dff32f30858ebeee6 | a2bd09544b82a96dcf41b5a650fee4fe21b99934 | refs/heads/master | 2021-06-04T15:22:06.421941 | 2021-05-11T12:50:51 | 2021-05-11T12:50:51 | 3,835,836 | 13 | 12 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def main():
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target)
estimator = SVC(C=1.0, kernel='rbf', gamma=0.01)
clf = OneVsRestClassifier(estimator)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
cm = confusion_matrix(y_test, pred)
print cm
print accuracy_score(y_test, pred)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9a79938a1d99004ccdaf6accc35ec65df0ca6e7c | abbc11abfabb0d3976789a9ec073b28892c78778 | /machine_program/data_ready.py | c1616d0e234e88c9fe2e2d22875178636457ef3f | [] | no_license | sunxhap/machine_learning | b06b28b3aba5b39704d8a3ae282f366dad6af406 | ef1d80a16fd35f03e428ac27b9b0f771f6f1edbb | refs/heads/master | 2022-05-01T15:22:07.314221 | 2017-11-12T09:12:30 | 2017-11-12T09:12:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | # -*- coding: utf-8 -*-
"""
@Time: 2017/11/8 11:24
@Author: sunxiang
"""
import numpy as np
filename = "data.csv"
def data_ready():
"""
数据读入 已有分类标签
"""
data = []
labels = []
with open("data.txt") as ifile:
for line in ifile:
try:
tokens = line.strip().split(' ')
data.append([float(tk) for tk in tokens[:-1]])
labels.append(tokens[-1])
except:
print line
x = np.array(data)
labels = np.array(labels)
y = np.zeros(labels.shape)
''''' 标签转换为0/1 '''
y[labels == 'A'] = 1
return x, y
def data_ready_notype():
"""
数据读入 已有分类标签 返回列表
"""
data = []
labels = []
with open(filename) as ifile:
for line in ifile:
try:
tokens = line.strip().split(' ')
data.append([tk for tk in tokens])
labels.append(tokens[-1])
except:
print line
# x = np.array(data)
# labels = np.array(labels)
# y = np.zeros(labels.shape)
#
# ''''' 标签转换为0/1 '''
# y[labels == 'A'] = 1
return data
# def createDataSet():
# """
# 创建数据集
# :return:
# """
# return data_ready_notype()
# # dataSet = [[1, 1, 'yes'],
# # [1, 1, 'yes'],
# # [1, 0, 'no'],
# # [0, 1, 'no'],
# # [0, 1, 'no']]
# # labels = ['no surfacing', 'flippers']
# # return dataSet, labels
def createDataSet():
"""
创建数据集
:return:
"""
return np.loadtxt(filename, dtype=str, delimiter=",").tolist()
| [
"[email protected]"
] | |
a3d79b4cb2607b2a2b3e3d33d8952b975e6821a2 | 943449a56a53edce7a873b4023df9859bd931363 | /sdk/lusid/models/allocation_request.py | 73c628c34b434313ef323a824d1996daa6b674f4 | [
"MIT"
] | permissive | entityoneuk/lusid-sdk-python-preview | 47e1e6c38ba443ea5ce03b5e50bd89e70cc4b9ee | 4a78765bb1a55cd9a8fc258a2072b7a978e2a250 | refs/heads/master | 2022-04-23T04:31:00.775056 | 2020-04-24T11:15:32 | 2020-04-24T11:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,852 | py | # coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://api.lusid.com/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can map their own data models. The core entities in LUSID provide a minimal structure and set of relationships, and the data model can be extended using Properties. The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is a container for transactions and holdings (a **Transaction Portfolio**) or constituents (a **Reference Portfolio**). * **Derived Portfolios**. Derived Portfolios allow Portfolios to be created based on other Portfolios, by overriding or adding specific items. * **Holdings** A Holding is a quantity of an Instrument or a balance of cash within a Portfolio. Holdings can only be adjusted via Transactions. * **Transactions** A Transaction is an economic event that occurs in a Portfolio, causing its holdings to change. * **Corporate Actions** A corporate action is a market event which occurs to an Instrument and thus applies to all portfolios which holding the instrument. Examples are stock splits or mergers. * **Constituents** A constituent is a record in a Reference Portfolio containing an Instrument and an associated weight. * **Instruments** An instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** All major entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio. Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Transaction Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional data. Portfolio properties can be changed over time, for example to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate #### Example Transactions ##### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A spot foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] ##### A Forward FX Example LUSID has a flexible transaction modelling system, meaning there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Using these transaction types, LUSID will generate two holdings for each Forward FX trade, one for each currency in the trade. An example Forward Fx trade to sell GBP for USD in a JPY-denominated portfolio is shown below: | Column | Forward 'Sell' Trade | Notes | | ----- | ----- | ---- | | TransactionId | FBN00004 | | | Type | FwdFxSell | | | InstrumentIdentifiers | { \"Instrument/default/Currency\", \"GBP\" } | | | TransactionDate | 2018-08-02 | | | SettlementDate | 2019-02-06 | Six month forward | | Units | 10000.00 | Units of GBP | | TransactionPrice | 1 | | | TradeCurrency | GBP | Currency being sold | | ExchangeRate | 1.3142 | Agreed rate between GBP and USD | | TotalConsideration.Amount | 13142.00 | Amount in the settlement currency, USD | | TotalConsideration.Currency | USD | Settlement currency | | Trade/default/TradeToPortfolioRate | 142.88 | Rate between trade currency, GBP and portfolio base currency, JPY | Please note that exactly the same economic behaviour could be modelled using the FwdFxBuy Transaction Type with the amounts and rates reversed. ### Holdings A holding represents a position in an instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Reference Portfolios Reference portfolios are portfolios that contain constituents with weights. They are designed to represent entities such as indices and benchmarks. ### Constituents | Field|Type|Description | | ---|---|--- | | InstrumentIdentifiers|map|Unique instrument identifiers | | InstrumentUid|string|LUSID's internal unique instrument identifier, resolved from the instrument identifiers | | Currency|decimal| | | Weight|decimal| | | FloatingWeight|decimal| | ## Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ## Properties Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Meta data The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Event Publish Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity Object| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | # noqa: E501
The version of the OpenAPI document: 0.10.1327
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AllocationRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'properties': 'dict(str, PerpetualProperty)',
'instrument_identifiers': 'dict(str, str)',
'quantity': 'int',
'portfolio_id': 'ResourceId',
'allocated_order_id': 'ResourceId',
'id': 'ResourceId'
}
attribute_map = {
'properties': 'properties',
'instrument_identifiers': 'instrumentIdentifiers',
'quantity': 'quantity',
'portfolio_id': 'portfolioId',
'allocated_order_id': 'allocatedOrderId',
'id': 'id'
}
required_map = {
'properties': 'optional',
'instrument_identifiers': 'required',
'quantity': 'required',
'portfolio_id': 'required',
'allocated_order_id': 'required',
'id': 'required'
}
def __init__(self, properties=None, instrument_identifiers=None, quantity=None, portfolio_id=None, allocated_order_id=None, id=None): # noqa: E501
"""
AllocationRequest - a model defined in OpenAPI
:param properties: Client-defined properties associated with this allocation.
:type properties: dict[str, lusid.PerpetualProperty]
:param instrument_identifiers: The instrument allocated. (required)
:type instrument_identifiers: dict(str, str)
:param quantity: The quantity of given instrument allocated. (required)
:type quantity: int
:param portfolio_id: (required)
:type portfolio_id: lusid.ResourceId
:param allocated_order_id: (required)
:type allocated_order_id: lusid.ResourceId
:param id: (required)
:type id: lusid.ResourceId
""" # noqa: E501
self._properties = None
self._instrument_identifiers = None
self._quantity = None
self._portfolio_id = None
self._allocated_order_id = None
self._id = None
self.discriminator = None
if properties is not None:
self.properties = properties
self.instrument_identifiers = instrument_identifiers
self.quantity = quantity
self.portfolio_id = portfolio_id
self.allocated_order_id = allocated_order_id
self.id = id
@property
def properties(self):
"""Gets the properties of this AllocationRequest. # noqa: E501
Client-defined properties associated with this allocation. # noqa: E501
:return: The properties of this AllocationRequest. # noqa: E501
:rtype: dict(str, PerpetualProperty)
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this AllocationRequest.
Client-defined properties associated with this allocation. # noqa: E501
:param properties: The properties of this AllocationRequest. # noqa: E501
:type: dict(str, PerpetualProperty)
"""
self._properties = properties
@property
def instrument_identifiers(self):
"""Gets the instrument_identifiers of this AllocationRequest. # noqa: E501
The instrument allocated. # noqa: E501
:return: The instrument_identifiers of this AllocationRequest. # noqa: E501
:rtype: dict(str, str)
"""
return self._instrument_identifiers
@instrument_identifiers.setter
def instrument_identifiers(self, instrument_identifiers):
"""Sets the instrument_identifiers of this AllocationRequest.
The instrument allocated. # noqa: E501
:param instrument_identifiers: The instrument_identifiers of this AllocationRequest. # noqa: E501
:type: dict(str, str)
"""
if instrument_identifiers is None:
raise ValueError("Invalid value for `instrument_identifiers`, must not be `None`") # noqa: E501
self._instrument_identifiers = instrument_identifiers
@property
def quantity(self):
"""Gets the quantity of this AllocationRequest. # noqa: E501
The quantity of given instrument allocated. # noqa: E501
:return: The quantity of this AllocationRequest. # noqa: E501
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this AllocationRequest.
The quantity of given instrument allocated. # noqa: E501
:param quantity: The quantity of this AllocationRequest. # noqa: E501
:type: int
"""
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`") # noqa: E501
self._quantity = quantity
@property
def portfolio_id(self):
"""Gets the portfolio_id of this AllocationRequest. # noqa: E501
:return: The portfolio_id of this AllocationRequest. # noqa: E501
:rtype: ResourceId
"""
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, portfolio_id):
"""Sets the portfolio_id of this AllocationRequest.
:param portfolio_id: The portfolio_id of this AllocationRequest. # noqa: E501
:type: ResourceId
"""
if portfolio_id is None:
raise ValueError("Invalid value for `portfolio_id`, must not be `None`") # noqa: E501
self._portfolio_id = portfolio_id
@property
def allocated_order_id(self):
"""Gets the allocated_order_id of this AllocationRequest. # noqa: E501
:return: The allocated_order_id of this AllocationRequest. # noqa: E501
:rtype: ResourceId
"""
return self._allocated_order_id
@allocated_order_id.setter
def allocated_order_id(self, allocated_order_id):
"""Sets the allocated_order_id of this AllocationRequest.
:param allocated_order_id: The allocated_order_id of this AllocationRequest. # noqa: E501
:type: ResourceId
"""
if allocated_order_id is None:
raise ValueError("Invalid value for `allocated_order_id`, must not be `None`") # noqa: E501
self._allocated_order_id = allocated_order_id
@property
def id(self):
"""Gets the id of this AllocationRequest. # noqa: E501
:return: The id of this AllocationRequest. # noqa: E501
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AllocationRequest.
:param id: The id of this AllocationRequest. # noqa: E501
:type: ResourceId
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AllocationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
bf5774cfced892e0f81372fb0b659c3deb8a2cc0 | 97c5fe6a54636de9b056719ea62ac1de4e76ebdc | /src/matches/utils.py | 75c3ec2bd03ff40320434f900d8ef59465c9cf50 | [
"MIT"
] | permissive | EdwardBetts/matchmaker | 937ece7acbfd1fcb57ab59cd13b16c3cd67d54f3 | ec56d18c6af8ca904325deca3be56484d3415c70 | refs/heads/master | 2020-12-11T01:50:10.773983 | 2016-01-26T16:53:29 | 2016-01-26T16:53:29 | 56,478,725 | 0 | 0 | null | 2016-04-18T05:11:12 | 2016-04-18T05:11:12 | null | UTF-8 | Python | false | false | 1,382 | py |
from decimal import Decimal
def get_match(user_a, user_b):
a = user_a.useranswer_set.all().values_list("question")
b = user_b.useranswer_set.all().values_list("question")
matches_b = user_b.useranswer_set.filter(question=a).order_by("question")
matches_a = user_a.useranswer_set.filter(question=b).order_by("question")
questions_match_num = matches_b.count()
if questions_match_num:
a_points = 0
b_points = 0
a_total_points = 0
b_total_points = 0
for question_a, question_b in zip(matches_a, matches_b):
if question_b.their_answer == question_a.my_answer:
a_points += question_b.their_points
a_total_points += question_b.their_points
if question_a.their_answer == question_b.my_answer:
b_points += question_a.their_points
b_total_points += question_a.their_points
if a_total_points == 0:
a_decimal = 0.000001
else:
a_decimal = a_points / Decimal(a_total_points)
if b_total_points == 0:
b_decimal = 0.000001
else:
b_decimal = b_points / Decimal(b_total_points)
match_percentage = (Decimal(a_decimal) * Decimal(b_decimal)) ** (1/Decimal(questions_match_num))
return match_percentage, questions_match_num
else:
return None, False
| [
"[email protected]"
] | |
6f759c661b592a52caaf1452caae33339900454b | 5f364b328d0e7df6f292dbbec266995f495b2ed4 | /src/python/txtai/pipeline/translation.py | b93b6d7c6fbacb0d01bcd3ab7561c52c463746d7 | [
"Apache-2.0"
] | permissive | binglinchengxiash/txtai | a17553f57ddd857ff39a7d0b38e24930f5c71596 | 1513eb8390f01848742e67690b6e4bc6452101ee | refs/heads/master | 2023-04-03T18:59:35.845281 | 2021-04-05T22:05:15 | 2021-04-05T22:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,895 | py | """
Translation module
"""
import fasttext
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, MarianMTModel, MarianTokenizer
from transformers.file_utils import cached_path
from transformers.hf_api import HfApi
from .hfmodel import HFModel
class Translation(HFModel):
"""
Translates text from source language into target language.
"""
# Default language detection model
DEFAULT_LANG_DETECT = "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz"
def __init__(self, path="facebook/m2m100_418M", quantize=False, gpu=True, batch=64, langdetect=DEFAULT_LANG_DETECT):
"""
Constructs a new language translation pipeline.
Args:
path: optional path to model, accepts Hugging Face model hub id or local path,
uses default model for task if not provided
quantize: if model should be quantized, defaults to False
gpu: True/False if GPU should be enabled, also supports a GPU device id
batch: batch size used to incrementally process content
langdetect: path to language detection model, uses a default path if not provided
"""
# Call parent constructor
super().__init__(path, quantize, gpu, batch)
# Language detection
self.detector = None
self.langdetect = langdetect
# Language models
self.models = {}
self.ids = self.available()
def __call__(self, texts, target="en", source=None):
"""
Translates text from source language into target language.
This method supports texts as a string or a list. If the input is a string,
the return type is string. If text is a list, the return type is a list.
Args:
texts: text|list
target: target language code, defaults to "en"
source: source language code, detects language if not provided
Returns:
list of translated text
"""
values = [texts] if not isinstance(texts, list) else texts
# Detect source languages
languages = self.detect(values) if not source else [source] * len(values)
unique = set(languages)
# Build list of (index, language, text)
values = [(x, lang, values[x]) for x, lang in enumerate(languages)]
results = {}
for language in unique:
# Get all text values for language
inputs = [(x, text) for x, lang, text in values if lang == language]
# Translate text in batches
outputs = []
for chunk in self.batch([text for _, text in inputs], self.batchsize):
outputs.extend(self.translate(chunk, language, target))
# Store output value
for y, (x, _) in enumerate(inputs):
results[x] = outputs[y]
# Return results in same order as input
results = [results[x] for x in sorted(results)]
return results[0] if isinstance(texts, str) else results
def available(self):
"""
Runs a query to get a list of available language models from the Hugging Face API.
Returns:
list of available language name ids
"""
return set(x.modelId for x in HfApi().model_list() if x.modelId.startswith("Helsinki-NLP"))
def detect(self, texts):
"""
Detects the language for each element in texts.
Args:
texts: list of text
Returns:
list of languages
"""
if not self.detector:
# Suppress unnecessary warning
fasttext.FastText.eprint = lambda x: None
# Load language detection model
path = cached_path(self.langdetect)
self.detector = fasttext.load_model(path)
# Transform texts to format expected by language detection model
texts = [x.lower() for x in texts]
return [x[0].split("__")[-1] for x in self.detector.predict(texts)[0]]
def translate(self, texts, source, target):
"""
Translates text from source to target language.
Args:
texts: list of text
source: source language code
target: target language code
Returns:
list of translated text
"""
# Return original if already in target language
if source == target:
return texts
# Load model and tokenizer
model, tokenizer = self.lookup(source, target)
model.to(self.device)
indices = None
with self.context():
if isinstance(model, M2M100ForConditionalGeneration):
source = self.langid(tokenizer.lang_code_to_id, source)
target = self.langid(tokenizer.lang_code_to_id, target)
tokenizer.src_lang = source
tokens, indices = self.tokenize(tokenizer, texts)
translated = model.generate(**tokens, forced_bos_token_id=tokenizer.lang_code_to_id[target])
else:
tokens, indices = self.tokenize(tokenizer, texts)
translated = model.generate(**tokens)
# Decode translations
translated = tokenizer.batch_decode(translated, skip_special_tokens=True)
# Combine translations - handle splits on large text from tokenizer
results, last = [], -1
for x, i in enumerate(indices):
if i == last:
results[-1] += translated[x]
else:
results.append(translated[x])
last = i
return results
def lookup(self, source, target):
"""
Retrieves a translation model for source->target language. This method caches each model loaded.
Args:
source: source language code
target: target language code
Returns:
(model, tokenizer)
"""
# Determine best translation model to use, load if necessary and return
path = self.modelpath(source, target)
if path not in self.models:
self.models[path] = self.load(path)
return self.models[path]
def modelpath(self, source, target):
"""
Derives a translation model path given source and target languages.
Args:
source: source language code
target: target language code
Returns:
model path
"""
# First try direct model
template = "Helsinki-NLP/opus-mt-%s-%s"
path = template % (source, target)
if path in self.ids:
return path
# Use multi-language - english model
if target == "en":
return template % ("mul", target)
# Default model if no suitable model found
return self.path
def load(self, path):
"""
Loads a model specified by path.
Args:
path: model path
Returns:
(model, tokenizer)
"""
if path.startswith("Helsinki-NLP"):
model = MarianMTModel.from_pretrained(path)
tokenizer = MarianTokenizer.from_pretrained(path)
else:
model = M2M100ForConditionalGeneration.from_pretrained(path)
tokenizer = M2M100Tokenizer.from_pretrained(path)
# Apply model initialization routines
model = self.prepare(model)
return (model, tokenizer)
def langid(self, languages, target):
"""
Searches a list of languages for a prefix match on target.
Args:
languages: list of languages
target: target language code
Returns:
best match or None if no match found
"""
for lang in languages:
if lang.startswith(target):
return lang
return None
| [
"[email protected]"
] | |
1aeafea4c287d40f71a3a955dbca59e856894d98 | ed0e1f62c637cee6c120f77ffc0d8db4a0b218c2 | /test8.py | c9834f9ec424f9870f678bc7c5c69763054d726a | [] | no_license | bcrafton/weight-mirror | 1d386fe68f88eea3b67ddcef70450a37331871d7 | d2e35e7378fc261de397c54a6db76b3f8a1e0281 | refs/heads/master | 2020-05-15T18:45:57.488229 | 2019-04-23T20:44:17 | 2019-04-23T20:44:17 | 182,438,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,428 | py |
import numpy as np
import argparse
import keras
import matplotlib.pyplot as plt
from whiten import whiten
#######################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--l2', type=float, default=1e-3)
args = parser.parse_args()
LAYER1 = 1024 * 3
LAYER2 = 1000
LAYER3 = 1000
LAYER4 = 10
TRAIN_EXAMPLES = 50000
TEST_EXAMPLES = 10000
#######################################
def softmax(x):
e_x = np.exp(x - np.max(x, axis=1, keepdims=True))
return e_x / np.sum(e_x, axis=1, keepdims=True)
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def dsigmoid(x):
return x * (1. - x)
def relu(x):
return x * (x > 0)
def drelu(x):
# USE A NOT Z
return 1.0 * (x > 0)
def tanh(x):
return np.tanh(x)
def dtanh(x):
# USE A NOT Z
return (1. - (x ** 2))
#######################################
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def zca_approx(data, ksize, ssize):
N, H, W, C = np.shape(data)
KX, KY, KZ = ksize
SX, SY, SZ = ssize
for sx in range(0, KX, SX):
for sy in range(0, KY, SY):
for sz in range(0, KZ, SZ):
for x in range(sx, H+sx, KX):
for y in range(sy, W+sy, KY):
for z in range(sz, C+sz, KZ):
x1 = x
x2 = x + KX
y1 = y
y2 = y + KY
z1 = z
z2 = z + KZ
if (x2 > H or y2 > W or z2 > C):
continue
print (x, y, z)
white = whiten(X=data[:, x1:x2, y1:y2, z1:z2], method='zca')
white = np.reshape(white, (N, x2-x1, y2-y1, z2-z1))
data[:, x1:x2, y1:y2, z1:z2] = white
return data
#######################################
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
assert(np.shape(x_train) == (50000, 32, 32, 3))
assert(np.shape(x_test) == (10000, 32, 32, 3))
y_train = keras.utils.to_categorical(y_train, 10)
x_train = x_train.astype('float32')
y_test = keras.utils.to_categorical(y_test, 10)
x_test = x_test.astype('float32')
mean = np.mean(x_train, axis=0, keepdims=True)
std = np.std(x_train, axis=0, ddof=1, keepdims=True)
scale = std
x_train = x_train - mean
x_train = x_train / scale
x_train = x_train.reshape(TRAIN_EXAMPLES, 32, 32, 3)
# x_train = whiten(x_train)
x_train = zca_approx(x_train, (8, 8, 3), (8, 8, 3))
x_train = x_train.reshape(TRAIN_EXAMPLES, 1024 * 3)
#######################################
high = 1. / np.sqrt(LAYER1)
weights1 = np.random.uniform(low=-high, high=high, size=(LAYER1, LAYER2))
bias1 = np.zeros(shape=LAYER2)
high = 1. / np.sqrt(LAYER2)
weights2 = np.random.uniform(low=-high, high=high, size=(LAYER2, LAYER3))
bias2 = np.zeros(shape=LAYER3)
high = 1. / np.sqrt(LAYER3)
weights3 = np.random.uniform(low=-high, high=high, size=(LAYER3, LAYER4))
bias3 = np.zeros(shape=LAYER4)
high = 1. / np.sqrt(LAYER2)
# b2 = np.random.uniform(low=-high, high=high, size=(LAYER2, LAYER3))
b2 = np.zeros(shape=(LAYER2, LAYER3))
########
xx1 = 0.
xx2 = 0.
batch_size = 100
for idx in range(0, 10000, batch_size):
print (idx)
start = idx
end = idx + batch_size
x1 = np.reshape(x_train[start:end], (batch_size, LAYER1)) @ weights1
x2 = np.random.uniform(low=-1., high=1., size=(batch_size, LAYER1)) @ weights1
y1 = x1 @ weights2
y2 = x2 @ weights2
xx1 += x1.T @ y1
xx2 += x2.T @ y2
xx1 = xx1 / np.max(xx1)
xx2 = xx2 / np.max(xx2)
weights2 = weights2 / np.max(weights2)
print (np.shape(xx1), np.shape(xx2), np.shape(weights2))
loss1 = np.sum((weights2 - xx1) ** 2)
loss2 = np.sum((weights2 - xx2) ** 2)
angle1 = angle_between(np.reshape(xx1, -1), np.reshape(weights2, -1)) * (180.0 / 3.14)
angle2 = angle_between(np.reshape(xx2, -1), np.reshape(weights2, -1)) * (180.0 / 3.14)
print (loss1, loss2, loss1 / loss2)
print (angle1, angle2)
| [
"[email protected]"
] | |
8f633e9e029f949e14bea3ec4d3701110965256d | 21ae28849f391b58cbc5a6d3d586af68e20e3954 | /bin/update_node_allowed_ips.py | add0aec3c64275b2c578baca75cfc829f189d714 | [
"MIT"
] | permissive | OriHoch/knesset-data-k8s | f6fa12abdde9d7f929769938a3e82ea8fa364e3d | 0a9de5ecd1fc50f3607936500833e15de2ae8f80 | refs/heads/master | 2023-08-17T12:28:05.208947 | 2023-08-14T18:04:57 | 2023-08-14T18:04:57 | 116,827,286 | 0 | 0 | MIT | 2018-03-28T10:58:49 | 2018-01-09T14:35:09 | Shell | UTF-8 | Python | false | false | 838 | py | #!/usr/bin/env python3
import json
import subprocess
ALLOWED_IPS="""
212.80.204.81
5.100.254.253
194.36.91.251
212.199.115.150
212.80.204.206
194.36.91.165
5.100.248.220
195.28.181.207
212.115.111.44
212.115.111.199
83.229.74.79
83.229.74.80
194.36.90.155
"""
def main():
allowed_ips = [ip.strip() for ip in ALLOWED_IPS.split() if ip.strip()]
node_ip_names = {}
for node in json.loads(subprocess.check_output(['kubectl', 'get', 'node', '-o', 'json']))['items']:
node_ip_names[node['metadata']['annotations']['rke.cattle.io/external-ip']] = node['metadata']['name']
for ip in allowed_ips:
node_name = node_ip_names.get(ip)
if node_name:
subprocess.check_call(['kubectl', 'label', 'node', node_name, 'oknesset-allowed-ip=true', '--overwrite'])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ef51eee9d65771338c539f401afc3b2486178d7d | eb676eb94cd3d21d684108d25249f1d1437ccf50 | /tools/data/dota/split/img_split.py | f535652f073e4a6216d3b60327800a4f0005e600 | [
"Apache-2.0"
] | permissive | Ixiaohuihuihui/AO2-DETR | c11d5f2115180712907976b6a76d1526ba02f134 | bb9ae812c804260900b101f417e58d247eaa8ad7 | refs/heads/master | 2023-05-23T21:36:04.978670 | 2023-03-16T03:25:03 | 2023-03-16T03:25:03 | 496,444,703 | 62 | 9 | null | null | null | null | UTF-8 | Python | false | false | 19,534 | py | # Copyright (c) OpenMMLab. All rights reserved.
# Written by jbwang1997
# Reference: https://github.com/jbwang1997/BboxToolkit
import argparse
import codecs
import datetime
import itertools
import json
import logging
import os
import os.path as osp
import time
from functools import partial, reduce
from math import ceil
from multiprocessing import Manager, Pool
import cv2
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
try:
import shapely.geometry as shgeo
except ImportError:
shgeo = None
def add_parser(parser):
"""Add arguments."""
parser.add_argument(
'--base_json',
type=str,
default=None,
help='json config file for split images')
parser.add_argument(
'--nproc', type=int, default=10, help='the procession number')
# argument for loading data
parser.add_argument(
'--load_type', type=str, default=None, help='loading function type')
parser.add_argument(
'--img_dirs',
nargs='+',
type=str,
default=None,
help='images dirs, must give a value')
parser.add_argument(
'--ann_dirs',
nargs='+',
type=str,
default=None,
help='annotations dirs, optional')
parser.add_argument(
'--classes',
nargs='+',
type=str,
default=None,
help='the classes and order for loading data')
# argument for splitting image
parser.add_argument(
'--sizes',
nargs='+',
type=int,
default=[1024],
help='the sizes of sliding windows')
parser.add_argument(
'--gaps',
nargs='+',
type=int,
default=[512],
help='the steps of sliding widnows')
parser.add_argument(
'--rates',
nargs='+',
type=float,
default=[1.],
help='same as DOTA devkit rate, but only change windows size')
parser.add_argument(
'--img_rate_thr',
type=float,
default=0.6,
help='the minimal rate of image in window and window')
parser.add_argument(
'--iof_thr',
type=float,
default=0.7,
help='the minimal iof between a object and a window')
parser.add_argument(
'--no_padding',
action='store_true',
help='not padding patches in regular size')
parser.add_argument(
'--padding_value',
nargs='+',
type=int,
default=[0],
help='padding value, 1 or channel number')
# argument for saving
parser.add_argument(
'--save_dir',
type=str,
default='.',
help='to save pkl and split images')
parser.add_argument(
'--save_ext',
type=str,
default='.png',
help='the extension of saving images')
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(description='Splitting images')
add_parser(parser)
args = parser.parse_args()
if args.base_json is not None:
with open(args.base_json, 'r') as f:
prior_config = json.load(f)
for action in parser._actions:
if action.dest not in prior_config or \
not hasattr(action, 'default'):
continue
action.default = prior_config[action.dest]
args = parser.parse_args()
# assert arguments
assert args.load_type is not None, "argument load_type can't be None"
assert args.img_dirs is not None, "argument img_dirs can't be None"
assert args.ann_dirs is None or len(args.ann_dirs) == len(args.img_dirs)
assert len(args.sizes) == len(args.gaps)
assert len(args.sizes) == 1 or len(args.rates) == 1
assert args.save_ext in ['.png', '.jpg', 'bmp', '.tif']
assert args.iof_thr >= 0 and args.iof_thr < 1
assert args.iof_thr >= 0 and args.iof_thr <= 1
assert not osp.exists(args.save_dir), \
f'{osp.join(args.save_dir)} already exists'
return args
def get_sliding_window(info, sizes, gaps, img_rate_thr):
"""Get sliding windows.
Args:
info (dict): Dict of image's width and height.
sizes (list): List of window's sizes.
gaps (list): List of window's gaps.
img_rate_thr (float): Threshold of window area divided by image area.
Returns:
list[np.array]: Information of valid windows.
"""
eps = 0.01
windows = []
width, height = info['width'], info['height']
for size, gap in zip(sizes, gaps):
assert size > gap, f'invaild size gap pair [{size} {gap}]'
step = size - gap
x_num = 1 if width <= size else ceil((width - size) / step + 1)
x_start = [step * i for i in range(x_num)]
if len(x_start) > 1 and x_start[-1] + size > width:
x_start[-1] = width - size
y_num = 1 if height <= size else ceil((height - size) / step + 1)
y_start = [step * i for i in range(y_num)]
if len(y_start) > 1 and y_start[-1] + size > height:
y_start[-1] = height - size
start = np.array(
list(itertools.product(x_start, y_start)), dtype=np.int64)
stop = start + size
windows.append(np.concatenate([start, stop], axis=1))
windows = np.concatenate(windows, axis=0)
img_in_wins = windows.copy()
img_in_wins[:, 0::2] = np.clip(img_in_wins[:, 0::2], 0, width)
img_in_wins[:, 1::2] = np.clip(img_in_wins[:, 1::2], 0, height)
img_areas = (img_in_wins[:, 2] - img_in_wins[:, 0]) * \
(img_in_wins[:, 3] - img_in_wins[:, 1])
win_areas = (windows[:, 2] - windows[:, 0]) * \
(windows[:, 3] - windows[:, 1])
img_rates = img_areas / win_areas
if not (img_rates > img_rate_thr).any():
max_rate = img_rates.max()
img_rates[abs(img_rates - max_rate) < eps] = 1
return windows[img_rates > img_rate_thr]
def poly2hbb(polys):
"""Convert polygons to horizontal bboxes.
Args:
polys (np.array): Polygons with shape (N, 8)
Returns:
np.array: Horizontal bboxes.
"""
shape = polys.shape
polys = polys.reshape(*shape[:-1], shape[-1] // 2, 2)
lt_point = np.min(polys, axis=-2)
rb_point = np.max(polys, axis=-2)
return np.concatenate([lt_point, rb_point], axis=-1)
def bbox_overlaps_iof(bboxes1, bboxes2, eps=1e-6):
"""Compute bbox overlaps (iof).
Args:
bboxes1 (np.array): Horizontal bboxes1.
bboxes2 (np.array): Horizontal bboxes2.
eps (float, optional): Defaults to 1e-6.
Returns:
np.array: Overlaps.
"""
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
if rows * cols == 0:
return np.zeros((rows, cols), dtype=np.float32)
hbboxes1 = poly2hbb(bboxes1)
hbboxes2 = bboxes2
hbboxes1 = hbboxes1[:, None, :]
lt = np.maximum(hbboxes1[..., :2], hbboxes2[..., :2])
rb = np.minimum(hbboxes1[..., 2:], hbboxes2[..., 2:])
wh = np.clip(rb - lt, 0, np.inf)
h_overlaps = wh[..., 0] * wh[..., 1]
l, t, r, b = [bboxes2[..., i] for i in range(4)]
polys2 = np.stack([l, t, r, t, r, b, l, b], axis=-1)
if shgeo is None:
raise ImportError('Please run "pip install shapely" '
'to install shapely first.')
sg_polys1 = [shgeo.Polygon(p) for p in bboxes1.reshape(rows, -1, 2)]
sg_polys2 = [shgeo.Polygon(p) for p in polys2.reshape(cols, -1, 2)]
overlaps = np.zeros(h_overlaps.shape)
for p in zip(*np.nonzero(h_overlaps)):
overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area
unions = np.array([p.area for p in sg_polys1], dtype=np.float32)
unions = unions[..., None]
unions = np.clip(unions, eps, np.inf)
outputs = overlaps / unions
if outputs.ndim == 1:
outputs = outputs[..., None]
return outputs
def get_window_obj(info, windows, iof_thr):
"""
Args:
info (dict): Dict of bbox annotations.
windows (np.array): information of sliding windows.
iof_thr (float): Threshold of overlaps between bbox and window.
Returns:
list[dict]: List of bbox annotations of every window.
"""
bboxes = info['ann']['bboxes']
iofs = bbox_overlaps_iof(bboxes, windows)
window_anns = []
for i in range(windows.shape[0]):
win_iofs = iofs[:, i]
pos_inds = np.nonzero(win_iofs >= iof_thr)[0].tolist()
win_ann = dict()
for k, v in info['ann'].items():
try:
win_ann[k] = v[pos_inds]
except TypeError:
win_ann[k] = [v[i] for i in pos_inds]
win_ann['trunc'] = win_iofs[pos_inds] < 1
window_anns.append(win_ann)
return window_anns
def crop_and_save_img(info, windows, window_anns, img_dir, no_padding,
padding_value, save_dir, anno_dir, img_ext):
"""
Args:
info (dict): Image's information.
windows (np.array): information of sliding windows.
window_anns (list[dict]): List of bbox annotations of every window.
img_dir (str): Path of images.
no_padding (bool): If True, no padding.
padding_value (tuple[int|float]): Padding value.
save_dir (str): Save filename.
anno_dir (str): Annotation filename.
img_ext (str): Picture suffix.
Returns:
list[dict]: Information of paths.
"""
img = cv2.imread(osp.join(img_dir, info['filename']))
patch_infos = []
for i in range(windows.shape[0]):
patch_info = dict()
for k, v in info.items():
if k not in ['id', 'fileanme', 'width', 'height', 'ann']:
patch_info[k] = v
window = windows[i]
x_start, y_start, x_stop, y_stop = window.tolist()
patch_info['x_start'] = x_start
patch_info['y_start'] = y_start
patch_info['id'] = \
info['id'] + '__' + str(x_stop - x_start) + \
'__' + str(x_start) + '___' + str(y_start)
patch_info['ori_id'] = info['id']
ann = window_anns[i]
ann['bboxes'] = translate(ann['bboxes'], -x_start, -y_start)
patch_info['ann'] = ann
patch = img[y_start:y_stop, x_start:x_stop]
if not no_padding:
height = y_stop - y_start
width = x_stop - x_start
if height > patch.shape[0] or width > patch.shape[1]:
padding_patch = np.empty((height, width, patch.shape[-1]),
dtype=np.uint8)
if not isinstance(padding_value, (int, float)):
assert len(padding_value) == patch.shape[-1]
padding_patch[...] = padding_value
padding_patch[:patch.shape[0], :patch.shape[1], ...] = patch
patch = padding_patch
patch_info['height'] = patch.shape[0]
patch_info['width'] = patch.shape[1]
cv2.imwrite(osp.join(save_dir, patch_info['id'] + img_ext), patch)
patch_info['filename'] = patch_info['id'] + img_ext
patch_infos.append(patch_info)
bboxes_num = patch_info['ann']['bboxes'].shape[0]
outdir = os.path.join(anno_dir, patch_info['id'] + '.txt')
with codecs.open(outdir, 'w', 'utf-8') as f_out:
if bboxes_num == 0:
pass
else:
for idx in range(bboxes_num):
obj = patch_info['ann']
outline = ' '.join(list(map(str, obj['bboxes'][idx])))
diffs = str(
obj['diffs'][idx]) if not obj['trunc'][idx] else '2'
outline = outline + ' ' + obj['labels'][idx] + ' ' + diffs
f_out.write(outline + '\n')
return patch_infos
def single_split(arguments, sizes, gaps, img_rate_thr, iof_thr, no_padding,
padding_value, save_dir, anno_dir, img_ext, lock, prog, total,
logger):
"""
Args:
arguments (object): Parameters.
sizes (list): List of window's sizes.
gaps (list): List of window's gaps.
img_rate_thr (float): Threshold of window area divided by image area.
iof_thr (float): Threshold of overlaps between bbox and window.
no_padding (bool): If True, no padding.
padding_value (tuple[int|float]): Padding value.
save_dir (str): Save filename.
anno_dir (str): Annotation filename.
img_ext (str): Picture suffix.
lock (object): Lock of Manager.
prog (object): Progress of Manager.
total (object): Length of infos.
logger (object): Logger.
Returns:
list[dict]: Information of paths.
"""
info, img_dir = arguments
windows = get_sliding_window(info, sizes, gaps, img_rate_thr)
window_anns = get_window_obj(info, windows, iof_thr)
patch_infos = crop_and_save_img(info, windows, window_anns, img_dir,
no_padding, padding_value, save_dir,
anno_dir, img_ext)
assert patch_infos
lock.acquire()
prog.value += 1
msg = f'({prog.value / total:3.1%} {prog.value}:{total})'
msg += ' - ' + f"Filename: {info['filename']}"
msg += ' - ' + f"width: {info['width']:<5d}"
msg += ' - ' + f"height: {info['height']:<5d}"
msg += ' - ' + f"Objects: {len(info['ann']['bboxes']):<5d}"
msg += ' - ' + f'Patches: {len(patch_infos)}'
logger.info(msg)
lock.release()
return patch_infos
def setup_logger(log_path):
"""Setup logger.
Args:
log_path (str): Path of log.
Returns:
object: Logger.
"""
logger = logging.getLogger('img split')
formatter = logging.Formatter('%(asctime)s - %(message)s')
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
log_path = osp.join(log_path, now + '.log')
handlers = [logging.StreamHandler(), logging.FileHandler(log_path, 'w')]
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def translate(bboxes, x, y):
"""Map bboxes from window coordinate back to original coordinate.
Args:
bboxes (np.array): bboxes with window coordinate.
x (float): Deviation value of x-axis.
y (float): Deviation value of y-axis
Returns:
np.array: bboxes with original coordinate.
"""
dim = bboxes.shape[-1]
translated = bboxes + np.array([x, y] * int(dim / 2), dtype=np.float32)
return translated
def load_dota(img_dir, ann_dir=None, nproc=10):
"""Load DOTA dataset.
Args:
img_dir (str): Path of images.
ann_dir (str): Path of annotations.
nproc (int): number of processes.
Returns:
list: Dataset's contents.
"""
assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
assert ann_dir is None or osp.isdir(
ann_dir), f'The {ann_dir} is not an existing dir!'
print('Starting loading DOTA dataset information.')
start_time = time.time()
_load_func = partial(_load_dota_single, img_dir=img_dir, ann_dir=ann_dir)
if nproc > 1:
pool = Pool(nproc)
contents = pool.map(_load_func, os.listdir(img_dir))
pool.close()
else:
contents = list(map(_load_func, os.listdir(img_dir)))
contents = [c for c in contents if c is not None]
end_time = time.time()
print(f'Finishing loading DOTA, get {len(contents)} iamges,',
f'using {end_time - start_time:.3f}s.')
return contents
def _load_dota_single(imgfile, img_dir, ann_dir):
"""Load DOTA's single image.
Args:
imgfile (str): Filename of single image.
img_dir (str): Path of images.
ann_dir (str): Path of annotations.
Returns:
dict: Content of single image.
"""
img_id, ext = osp.splitext(imgfile)
if ext not in ['.jpg', '.JPG', '.png', '.tif', '.bmp']:
return None
imgpath = osp.join(img_dir, imgfile)
size = Image.open(imgpath).size
txtfile = None if ann_dir is None else osp.join(ann_dir, img_id + '.txt')
content = _load_dota_txt(txtfile)
content.update(
dict(width=size[0], height=size[1], filename=imgfile, id=img_id))
return content
def _load_dota_txt(txtfile):
"""Load DOTA's txt annotation.
Args:
txtfile (str): Filename of single txt annotation.
Returns:
dict: Annotation of single image.
"""
gsd, bboxes, labels, diffs = None, [], [], []
if txtfile is None:
pass
elif not osp.isfile(txtfile):
print(f"Can't find {txtfile}, treated as empty txtfile")
else:
with open(txtfile, 'r') as f:
for line in f:
if line.startswith('gsd'):
num = line.split(':')[-1]
try:
gsd = float(num)
except ValueError:
gsd = None
continue
items = line.split(' ')
if len(items) >= 9:
bboxes.append([float(i) for i in items[:8]])
labels.append(items[8])
diffs.append(int(items[9]) if len(items) == 10 else 0)
bboxes = np.array(bboxes, dtype=np.float32) if bboxes else \
np.zeros((0, 8), dtype=np.float32)
diffs = np.array(diffs, dtype=np.int64) if diffs else \
np.zeros((0,), dtype=np.int64)
ann = dict(bboxes=bboxes, labels=labels, diffs=diffs)
return dict(gsd=gsd, ann=ann)
def main():
"""Main function of image split."""
args = parse_args()
if args.ann_dirs is None:
args.ann_dirs = [None for _ in range(len(args.img_dirs))]
padding_value = args.padding_value[0] \
if len(args.padding_value) == 1 else args.padding_value
sizes, gaps = [], []
for rate in args.rates:
sizes += [int(size / rate) for size in args.sizes]
gaps += [int(gap / rate) for gap in args.gaps]
save_imgs = osp.join(args.save_dir, 'images')
save_files = osp.join(args.save_dir, 'annfiles')
os.makedirs(save_imgs)
os.makedirs(save_files)
logger = setup_logger(args.save_dir)
print('Loading original data!!!')
infos, img_dirs = [], []
for img_dir, ann_dir in zip(args.img_dirs, args.ann_dirs):
_infos = load_dota(img_dir=img_dir, ann_dir=ann_dir, nproc=args.nproc)
_img_dirs = [img_dir for _ in range(len(_infos))]
infos.extend(_infos)
img_dirs.extend(_img_dirs)
print('Start splitting images!!!')
start = time.time()
manager = Manager()
worker = partial(
single_split,
sizes=sizes,
gaps=gaps,
img_rate_thr=args.img_rate_thr,
iof_thr=args.iof_thr,
no_padding=args.no_padding,
padding_value=padding_value,
save_dir=save_imgs,
anno_dir=save_files,
img_ext=args.save_ext,
lock=manager.Lock(),
prog=manager.Value('i', 0),
total=len(infos),
logger=logger)
if args.nproc > 1:
pool = Pool(args.nproc)
patch_infos = pool.map(worker, zip(infos, img_dirs))
pool.close()
else:
patch_infos = list(map(worker, zip(infos, img_dirs)))
patch_infos = reduce(lambda x, y: x + y, patch_infos)
stop = time.time()
print(f'Finish splitting images in {int(stop - start)} second!!!')
print(f'Total images number: {len(patch_infos)}')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b02560b0defdd18eb23f539beec3bddce578c929 | 4382c60f18aba351a2e7cdab7ce2793c2d27717c | /Algorithm 190902/N-Queen.py | 8cb26ea9eb6c4917e91ef390623b09c6de2f3091 | [] | no_license | vxda7/pycharm | e550b1db4cabe1a0fa03e140f33b028ef08bd4cb | ce29f682a923875b62a8c7c0102790eef11ab156 | refs/heads/master | 2020-07-03T11:27:27.807096 | 2019-11-15T08:50:32 | 2019-11-15T08:50:32 | 201,891,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | def find(N):
stack = []
stack.append([0,0])
# place.append([0,0])
cnt = 0
res = 0
ok = False
next = 0
while stack != []:
get = stack.pop()
col, row = get[0], get[1]
for i in stack: # 확인하는 곳
if i[0] != col and i[1] != row and i[0]+i[1] != col+row and i[0]-i[1] != col-row:
ok = True
cnt += 1
if cnt == N:
res += 1
cnt = 0
if ok: # 위치가 관계없다면 추가
if 0 <= col + 1 < N and 0 <= row + next < N:
stack.append([col+1, row+2])
return res
t = int(input())
for tc in range(1, t+1):
N = int(input())
print("#{} {}".format(tc, find(N))) | [
"[email protected]"
] | |
45febe6de65dd3e8d1ad838f075d14c9a9588b72 | baa6ba7246fb214c32451126d521919d5f9f40c5 | /pbrx/cmd/main.py | 22a43cea35cd05e4957e80f794fb70c5a554052d | [
"Apache-2.0"
] | permissive | emonty/shiny-octo-computing-machine | 52838f025fb60c69df78e8f6165d76780ef3c676 | 7fa0dab928196e4f9ef0a5110459e350059e2493 | refs/heads/master | 2020-03-08T12:21:51.197849 | 2018-04-04T19:52:12 | 2018-04-27T21:17:33 | 128,124,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,385 | py | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import logging
import logging.config
import os
import sys
try:
import yaml
except ImportError:
yaml = None
from pbrx import containers
from pbrx import siblings
import pbr.version
log = logging.getLogger("pbrx")
def _read_logging_config_file(filename):
if not os.path.exists(filename):
raise ValueError("Unable to read logging config file at %s", filename)
ext = os.path.splitext(filename)[1]
if ext in (".yml", ".yaml"):
if not yaml:
raise ValueError(
"PyYAML not installed but a yaml logging config was provided."
" Install PyYAML, or convert the config to JSON."
)
return yaml.safe_load(open(filename, "r"))
elif ext == ".json":
return json.load(open(filename, "r"))
return filename
def setup_logging(log_config, debug):
if log_config:
config = _read_logging_config_file(log_config)
if isinstance(config, dict):
logging.config.dictConfig(config)
else:
logging.config.fileConfig(config)
else:
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG if debug else logging.INFO)
def main():
parser = argparse.ArgumentParser(
description="pbrx: Utilities for projects using pbr"
)
parser.add_argument(
"--version",
action="version",
version=str(pbr.version.VersionInfo("pbrx")),
)
parser.add_argument(
"--debug", help="Emit debug output", action="store_true"
)
parser.add_argument(
"--log-config",
help="Path to a logging config file. Takes precedence over --debug",
)
subparsers = parser.add_subparsers(
title="commands", description="valid commands", help="additional help"
)
cmd_siblings = subparsers.add_parser(
"install-siblings", help="install sibling packages"
)
cmd_siblings.set_defaults(func=siblings.main)
cmd_siblings.add_argument(
"-c,--constraints",
dest="constraints",
help="Path to constraints file",
required=False,
)
cmd_siblings.add_argument(
"projects", nargs="*", help="List of project src dirs to process"
)
cmd_containers = subparsers.add_parser(
"build-containers", help="build per-process container images"
)
cmd_containers.set_defaults(func=containers.build)
cmd_containers.add_argument(
"--prefix", help="Organization prefix containers will be published to"
)
args = parser.parse_args()
setup_logging(args.log_config, args.debug)
try:
return args.func(args)
except Exception as e:
log.exception(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
74bdee5b7b4f80809a44c39f4990a698827c4318 | 85d380bc1fa9b5d091caab98951fec2bf7ae0407 | /hog_von_mises.py | ead3541b58e1645770d071fb461c792f1a2fddc4 | [] | no_license | bbbales2/faehrmann_hogs | 6b0469351d6a1e749fe00399474ee0466ab94fcb | 622ec582501041ff5e6dd50526d0e3d91de29f53 | refs/heads/master | 2020-02-26T13:39:05.916838 | 2016-06-23T23:20:30 | 2016-06-23T23:20:30 | 61,756,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,279 | py | #%%
import pystan
import matplotlib.pyplot as plt
import numpy
import os
import itertools
import math
import scipy.integrate
import mahotas
import collections
import skimage.measure, skimage.io, skimage.feature, skimage.util, skimage.filters
import seaborn
import random
#
#,,
# '/home/bbales2/microhog/rafting_rotated_2d/2na/9/signalx.png',
# '/home/bbales2/microhog/rafting_rotated_2d/ah/9/signalx.png''/home/bbales2/web/hog/static/images/renen5strain02.png',
# '/home/bbales2/web/hog/static/images/renen5strain22.png'
#
ims2 = []
for path in ['/home/bbales2/web/hog/static/images/molybdenum0.png',
'/home/bbales2/web/hog/static/images/molybdenum1.png']:
im = skimage.io.imread(path, as_grey = True).astype('float')
im = skimage.transform.rescale(im, 0.25)
im -= im.mean()
im /= im.std()
stats = []
for i in range(im.shape[0]):
for j in range(im.shape[1]):
if i == 0:
dy = im[i + 1, j] - im[i, j]
elif i == im.shape[0] - 1:
dy = im[i, j] - im[i - 1, j]
else:
dy = (im[i + 1, j] - im[i - 1, j]) / 2.0
if j == 0:
dx = im[i, j + 1] - im[i, j]
elif j == im.shape[1] - 1:
dx = im[i, j] - im[i, j - 1]
else:
dx = (im[i, j + 1] - im[i, j - 1]) / 2.0
angle = (numpy.arctan2(dy, dx) + numpy.pi)# / (2.0 * numpy.pi)
mag = numpy.sqrt(dy**2 + dx**2)
stats.append((angle, mag))
stats = numpy.array(stats)
plt.imshow(im)
plt.show()
1/0
hog = microstructure.features.hog2(im, bins = 20, stride = 1, sigma = 1.0)
#%%
stats[:, 0] = stats[:, 0] - stats[:, 0].min()
#%%
idxs = range(len(stats))
random.shuffle(idxs)
seaborn.distplot(stats[idxs[:10000], 0])
plt.show()
seaborn.distplot(stats[idxs[:10000], 1])
plt.show()
idxs = numpy.argsort(stats[:, 1])[-2000:]
seaborn.distplot(stats[idxs, 0])
plt.show()
#%%
model_code = """
data {
int<lower=1> K; //Number of Von Mises distributions to fit
int<lower=1> N;
real<lower=0.0, upper=2.0 * pi()> y[N];
}
parameters {
real<lower=0.0, upper=2.0 * pi()> mu[K];
simplex[K + 1] theta;
real<lower=0.0> kappa[K];
}
model {
real ps[K + 1];
for (k in 1:K) {
kappa[k] ~ normal(5.0, 10.0);
}
for (n in 1:N) {
for (k in 1:K) {
ps[k] <- log(theta[k]) + von_mises_log(y[n], mu[k], kappa[k]);
}
ps[K + 1] <- log(theta[K + 1]) + uniform_log(y[n], 0.0, 2.0 * pi());
increment_log_prob(log_sum_exp(ps));
}
}
generated quantities {
real out;
{
int k;
k <- categorical_rng(theta);
if (k <= K)
{
out <- von_mises_rng(mu[k], kappa[k]);
}
else
{
out <- uniform_rng(0.0, 2.0 * pi());
}
}
}
"""
sm = pystan.StanModel(model_code = model_code)
#%%
N = 100
idxs2 = range(len(idxs))
random.shuffle(idxs2)
seaborn.distplot(stats[idxs[idxs2[:N]], 0], bins = 20)
plt.show()
samples = stats[idxs[idxs2[:N]], 0]
#%%
fit = sm.sampling(data = {
'K' : 4,
'N' : N,
'y' : samples
})
#%%
print fit
plt.hist(fit.extract()['out'][2000:] % (2.0 * numpy.pi), normed = True, alpha = 0.5)
plt.hist(samples, normed = True, alpha = 0.5)
plt.show()
| [
"[email protected]"
] | |
b90fc66f5687bd6a65b348920111d97be910d581 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_managed_instance_long_term_retention_policies_operations.py | 7360ed2e31356432ffc3fcbe52b5dc8c530e3b68 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 17,947 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedInstanceLongTermRetentionPoliciesOperations:
"""ManagedInstanceLongTermRetentionPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
policy_name: Union[str, "_models.ManagedInstanceLongTermRetentionPolicyName"],
**kwargs
) -> "_models.ManagedInstanceLongTermRetentionPolicy":
"""Gets a managed database's long term retention policy.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the database.
:type database_name: str
:param policy_name: The policy name. Should always be Default.
:type policy_name: str or ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceLongTermRetentionPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceLongTermRetentionPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
policy_name: Union[str, "_models.ManagedInstanceLongTermRetentionPolicyName"],
parameters: "_models.ManagedInstanceLongTermRetentionPolicy",
**kwargs
) -> Optional["_models.ManagedInstanceLongTermRetentionPolicy"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ManagedInstanceLongTermRetentionPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedInstanceLongTermRetentionPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
policy_name: Union[str, "_models.ManagedInstanceLongTermRetentionPolicyName"],
parameters: "_models.ManagedInstanceLongTermRetentionPolicy",
**kwargs
) -> AsyncLROPoller["_models.ManagedInstanceLongTermRetentionPolicy"]:
"""Sets a managed database's long term retention policy.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the database.
:type database_name: str
:param policy_name: The policy name. Should always be Default.
:type policy_name: str or ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicyName
:param parameters: The long term retention policy info.
:type parameters: ~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedInstanceLongTermRetentionPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceLongTermRetentionPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
policy_name=policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}'} # type: ignore
def list_by_database(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
**kwargs
) -> AsyncIterable["_models.ManagedInstanceLongTermRetentionPolicyListResult"]:
"""Gets a database's long term retention policy.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedInstanceLongTermRetentionPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedInstanceLongTermRetentionPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_database.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedInstanceLongTermRetentionPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/backupLongTermRetentionPolicies'} # type: ignore
| [
"[email protected]"
] | |
c22f182daa3f1e38aa9cc338dd18a375ab6e398c | 13d0ad57a2f5deb83593e73843be7cbeeaad8d3d | /medium/longest_palindromic_substring.py | 50d56789db9b059cf53561ba450b02bb4226062c | [] | no_license | mwong33/leet-code-practice | b21f277d73b30df9e681499733baad07979480a1 | 9c0e6294bf3b3614b185f0760906abad60f8d9b6 | refs/heads/main | 2023-03-29T20:35:43.841662 | 2021-03-31T22:05:44 | 2021-03-31T22:05:44 | 317,382,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | class Solution:
# O(n^2) time O(1) space
def longestPalindrome(self, s: str) -> str:
if len(s) <= 1:
return s[0]
start = 0
end = 0
for i in range(len(s)):
length_1 = self.middleOut(s, i, i)
length_2 = self.middleOut(s, i, i+1)
max_length = max(length_1, length_2)
if max_length > (end - start + 1):
start = i - ((max_length-1)//2)
end = i + (max_length//2)
return s[start:end+1]
def middleOut(self, s, start, end):
while start >= 0 and end < len(s) and s[start] == s[end]:
start -= 1
end += 1
return end - start - 1
| [
"[email protected]"
] | |
1ad003672fb2b8b55a75c3dcdb098c88cb9ebd98 | 4ace4d5a94ab0db79562f1b23edd6011a89148c6 | /src/airflow-stubs/contrib/task_runner/cgroup_task_runner.pyi | 4414ee2cae524c1466ecef790cd589d60586a9be | [
"MIT"
] | permissive | viewthespace/mypy-stubs | 9abebc2eab2b46b2230842f06114673e1a4de052 | 182fa275c4a7011eb5345694b88229adbddcc999 | refs/heads/master | 2023-06-07T18:52:46.739560 | 2023-06-01T22:05:27 | 2023-06-01T22:05:45 | 236,780,299 | 0 | 0 | MIT | 2022-01-11T20:53:55 | 2020-01-28T16:23:07 | Python | UTF-8 | Python | false | false | 551 | pyi | from airflow.task.task_runner.base_task_runner import BaseTaskRunner as BaseTaskRunner
from airflow.utils.helpers import reap_process_group as reap_process_group
from airflow.utils.operator_resources import Resources as Resources
from typing import Any
class CgroupTaskRunner(BaseTaskRunner):
process: Any
def __init__(self, local_task_job) -> None: ...
mem_cgroup_name: Any
cpu_cgroup_name: Any
def start(self) -> None: ...
def return_code(self): ...
def terminate(self) -> None: ...
def on_finish(self) -> None: ...
| [
"[email protected]"
] | |
507ee908ce5b75fda95f6fe95550269860a2ecbb | 1ce26dbce1da7dabb87e275ff9f49a6988a34b0b | /shops/models.py | 8789cd51b1e8525aad8d6eec6c3013595214bf11 | [] | no_license | eyobofficial/clothing-shop-app | 38d48ae12fb24265aac7ecbad650c41c785daf1c | 5bfc35f765b4ebf6916c306341597d217be60a1d | refs/heads/master | 2020-03-18T01:15:46.482548 | 2018-05-20T09:02:14 | 2018-05-20T09:02:14 | 134,135,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,624 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
from django.conf import settings
from django.urls import reverse
def get_deleted_user():
return get_user_model().objects.get_or_create(username='Deleted')[0]
class CustomUser(AbstractUser):
pass
class Base(models.Model):
"""
Base abstract model for all other models to inherit from
"""
created_at = models.DateTimeField(
'Created date',
auto_now_add=True,
help_text='Record created date and time.'
)
updated_at = models.DateTimeField(
'Modified date',
auto_now=True,
help_text='Record last modified date and time'
)
class Meta:
abstract = True
class Shop(Base):
"""
Models a virtual shop
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
logo = models.ImageField(
upload_to='shops/logo/',
null=True, blank=True
)
description = models.TextField('Short description', blank=True)
class Meta:
get_latest_by = ['updated_at', ]
def __str__(self):
return self.name
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:shop-detail', args=[str(self.pk)])
class Catagory(Base):
"""
Models Product Catagory
Example: Dress, Shoes, Leather etc...
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.TextField('Short description', blank=True)
class Meta:
get_latest_by = ['-updated_at', ]
def __str__(self):
self.name
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:catagory-detail', args=[str(self.pk)])
class Tag(Base):
"""
Models a product Tag
Example: leather, oldies, modern, jano etc...
"""
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Product(Base):
"""
Models a Product
"""
GENDER_OPTIONS = (
('M', 'Male'),
('F', 'Female'),
)
AGE_OPTIONS = (
('A', 'Adults'),
('K', 'Kids'),
)
shop = models.ForeignKey(
Shop,
related_name='products',
on_delete=models.CASCADE,
)
catagory = models.ForeignKey(
Catagory,
related_name='products',
on_delete=models.CASCADE,
)
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.TextField(blank=True)
tags = models.ManyToManyField(
Tag,
blank=True, related_name='products',
)
gender = models.CharField(max_length=1, choices=GENDER_OPTIONS)
age = models.CharField(max_length=1, choices=AGE_OPTIONS)
price = models.DecimalField(max_digits=10, decimal_places=2)
is_on_sale = models.BooleanField('On sale', default=False)
is_featured = models.BooleanField('Featured', default=False)
thumbnail = models.ImageField(upload_to='shops/products/')
publish = models.BooleanField(
default=True,
help_text='Publish product to the public'
)
class Meta:
order_with_respect_to = 'shop'
get_latest_by = ['-updated_at', ]
def __str__(self):
return self.name
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:product-detail', args=[str(self.pk)])
class ProductPicture(Base):
product = models.ForeignKey(
Product,
related_name='pictures',
on_delete=models.CASCADE
)
picture = models.ImageField(
upload_to='shops/products/pictures',
blank=True, null=True,
)
class Meta:
get_latest_by = ['-updated_at', ]
order_with_respect_to = 'product'
def __str__(self):
return '{} product pic #{}'.format(self.product, self.pk)
class Inventory(Base):
"""
Models the product inventory data
"""
product = models.ForeignKey(
Product,
related_name='inventory',
on_delete=models.CASCADE
)
color = models.CharField(max_length=100, blank=True)
size = models.CharField(max_length=100, blank=True)
stock = models.PositiveIntegerField('Available in stock')
class Meta:
order_with_respect_to = 'product'
def __str__(self):
return 'Inventory for {}'.format(self.product)
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:inventory-detail', args=[str(self.pk)])
class DeliveryMethod(Base):
"""
Models a product delivery method type
"""
name = models.CharField(max_length=100)
icon = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2)
class Meta:
get_latest_by = ['updated_at', ]
def __str__(self):
return self.name
class PaymentMethod(Base):
"""
Models a payment method type
"""
name = models.CharField(max_length=100)
icon = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
class Meta:
get_latest_by = ['updated_at', ]
def __str__(self):
return self.name
class Order(Base):
cutomer = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='orders',
on_delete=models.SET(get_deleted_user)
)
delivery_method = models.ForeignKey(
DeliveryMethod,
related_name='orders',
null=True, on_delete=models.SET_NULL,
)
order_date = models.DateTimeField()
is_delivered = models.BooleanField('Delivery status', default=False)
class Meta:
get_latest_by = ['-order_date', ]
ordering = ['-order_date', 'is_delivered', ]
def __str__(self):
return 'Order No. {}'.format(self.pk)
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:order-detail', args=[str(self.pk)])
class OrderList(Base):
"""
Models the an ordered product with respect to an Order object
"""
order = models.ForeignKey(
Order,
related_name='ordered_lists',
on_delete=models.CASCADE
)
product = models.ForeignKey(
Product,
related_name='ordered_lists',
null=True, on_delete=models.SET_NULL
)
qty = models.PositiveIntegerField('Quantity', default=1)
amount = models.DecimalField(max_digits=8, decimal_places=2)
class Meta:
order_with_respect_to = 'order'
get_latest_by = ['-updated_at', ]
def __str__(self):
return '{} of {}'.format(self.product, self.order)
class Payment(Base):
"""
Models a payment made for an order
"""
order = models.ForeignKey(
Order,
related_name='payments',
null=True, on_delete=models.SET_NULL,
)
payment_method = models.ForeignKey(
PaymentMethod,
related_name='payments',
null=True, on_delete=models.SET_NULL
)
subtotal = models.DecimalField(max_digits=8, decimal_places=2)
delivery_amount = models.DecimalField(max_digits=6, decimal_places=2)
tax = models.DecimalField(max_digits=6, decimal_places=2)
payment_date = models.DateTimeField()
is_payment_completed = models.BooleanField(default=False)
class Meta:
get_latest_by = ['-updated_at', ]
ordering = ['is_payment_completed', '-updated_at']
def __str__(self):
return 'Payment for {}'.format(self.order)
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:payment-detail', args=[str(self.pk)])
| [
"[email protected]"
] | |
91231887766eacf8e685b57bd6b7f460b361dead | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/798.py | 443a65047771e9eece0a19b643b46c7730fc197c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | import sys
def read_case(l):
au = l.strip().split()[1]
return [int(x) for x in au]
def cant(c):
inv = [i - sum(c[:i]) for i in range(len(c))]
return max(0, max(inv))
def rint():
return int(rline())
def rline():
global linenr
linenr += 1
return stdin[linenr - 1]
global stdin
global linenr
stdin = sys.stdin.readlines()
linenr = 0
cases = rint()
case = 1
while linenr < len(stdin):
c = read_case(rline())
print 'Case #{0}: {1}'.format(case, cant(c))
case += 1
| [
"[email protected]"
] | |
e745c490084e729003b83b40f597a6e997348317 | bc2df7e370e70aa3ccdab80bcecd5379d8ca222c | /bin/base/stuydy_recover.py | 257f038f1c7b201773df4497d9b1f00ca1704412 | [] | no_license | windyStreet/el-OAMP | 092fe39e938ff2bf499ea5790e3914e359ec2069 | 9a986629daab6b24722a7e18ea0e6593a77d451d | refs/heads/master | 2023-05-05T06:27:49.595082 | 2019-10-15T03:46:53 | 2019-10-15T03:46:53 | 371,572,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | #!/usr/bin/env python
# !-*- coding:utf-8 -*-
from bin.base.tool import RabbitMQ
from pymongo import MongoClient
from bin import init
if __name__ == '__main__':
conn = MongoClient('211.154.149.99', 27017)
db = conn.BBT_TrainRecord
record_set = db.processRecord
ds = {
"toolUser": "longrise",
# "toolPassword": "longrise",
# "toolHost": "192.168.7.219",
"toolPort": 5672,
"toolName": "",
'toolHost': '211.154.149.99',
'toolPassword': 'uCSXBwi7KJfKIx4x',
}
init.CONF_INFO = {
'study_recover': ds
}
RM = RabbitMQ.getInstance(ds='study_recover')
start = 0
end = 5000000
limit = 1000
n = 0
while (True):
offset = start + limit * n
if offset >= end:
break
for re_dict in record_set.find({"studyend": {"$gt": "2019-06-13 10:00:00.000", '$lt': "2019-06-14 10:00:00"}, "isvideopass": "1"}).sort('createtime').skip(offset).limit(limit):
re = {
'id': re_dict.get("recordid"),
'cwid': re_dict.get('cwid'),
'cardno': re_dict.get('cardno'),
'studentno': re_dict.get('studentno'),
'effecttime': '1',
'stuclientip': re_dict.get('ip'),
'stuclientmacinfo': "BBAPP_MQ_" + re_dict.get('comfrom2'),
}
RM.sendMsg(queue='APP_onlineUpdate', msg=re)
print(offset)
n = n + 1
| [
"[email protected]"
] | |
1c4dc3f363cf67370ac7a4c684739dccada90fdc | f6db8d85a3b41eed543959314d65927353a8229c | /W5/geolocation/migrations/0005_auto_20201207_1442.py | 687464ec4b4eab5b7af85f1f1b57254ccd0985cc | [] | no_license | NFEL/DjangoPaeez99 | d573cc8e36500f08bc104d76f7a2628062d86c2f | 621636bfb47d71f2a4f45037b7264dd5ebc7cdd7 | refs/heads/main | 2023-01-27T22:05:57.788049 | 2020-12-08T10:08:28 | 2020-12-08T10:08:28 | 304,553,353 | 1 | 2 | null | 2020-10-16T07:33:04 | 2020-10-16T07:33:03 | null | UTF-8 | Python | false | false | 599 | py | # Generated by Django 3.1.2 on 2020-12-07 14:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geolocation', '0004_address_lo'),
]
operations = [
migrations.RenameField(
model_name='address',
old_name='lo',
new_name='service_area',
),
migrations.AddField(
model_name='address',
name='service_radius',
field=models.DecimalField(decimal_places=3, max_digits=5, null=True, verbose_name='شعاع فعالیت'),
),
]
| [
"[email protected]"
] | |
f86ee34060570d42f42a0bd9f70543ce71859a55 | 5fd658211a0951e287742973618012b7d9f89e43 | /tennisblock/webapp/management/commands/local_host_entries.py | e9a7e7ac422add274661f5b80c3b0b1c2b7e0986 | [] | no_license | sharpertool/tennisblock | 6d14000d3d709ec339124e893ffc8a7cdfe73d8d | 82bec4179a5c487a588ff10d910c6c7a9c1014d6 | refs/heads/master | 2022-03-05T13:03:43.728596 | 2021-02-15T06:37:53 | 2021-02-15T06:37:53 | 148,848,726 | 1 | 0 | null | 2022-02-10T14:00:37 | 2018-09-14T22:43:16 | JavaScript | UTF-8 | Python | false | false | 1,340 | py | from os.path import exists
import re
from shutil import copy
from django.core.management.base import BaseCommand
from wagtail.core.models import Site
class Command(BaseCommand):
help = 'Generate output to be added to /etc/hosts file'
def add_arguments(self, parser):
parser.add_argument('--env', help='''
Point this to your .env.local file and it will update the allowed host entries
''')
def handle(self, *args, **options):
allowed_hosts = list(
Site.objects.values_list('hostname', flat=True).all())
etc_hosts = [f"127.0.0.1 {x}" for x in allowed_hosts]
hosts_lines = "\n".join(etc_hosts)
etc_data = "# tennisblock.local begin\n" + hosts_lines + "\n# tennisblock.local end\n"
print("Insert the following lines into your /etc/hosts file:")
print(etc_data)
envfile = options.get('env', None)
if envfile and exists(envfile):
copy(envfile, envfile + '.bak')
with open(envfile, 'r') as fp:
original = fp.readlines()
with open(envfile, 'w') as fp:
for line in original:
if re.search(r'^DJANGO_ALLOWED_HOSTS', line):
line = f"DJANGO_ALLOWED_HOSTS={','.join(allowed_hosts)}"
fp.write(line)
| [
"[email protected]"
] | |
b5478f5ad21fcd76d7703d0c8a466721f17e07d0 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stringReduction_20200722190200.py | ef338e3e8ae2885556338eaa1ee2321358b4f04a | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | def string(str):
st = list(str)
count = 0
while count < 20:
for i in range(0,len(st)-1,2):
pair = st[i] + st[i+1]
if pair == 'ab' or pair == 'ba':
st.pop(i)
st.pop(i)
st.insert(i,'c')
break
if pair == 'bc' or pair == 'cb':
st.pop(i)
st.pop(i)
st.insert(i,'a')
break
if pair == 'ac' or pair == 'ca':
st.pop(i)
st.pop(i)
st.insert(i,'b')
break
print(st)
count +=1
print(len(st) // 2 )
string("abcabc") | [
"[email protected]"
] | |
f55287dd1de797ed3a0460fd54cdcb062360a5f4 | 99a4817f852115f2f14d50cc6a99abbdb62c4218 | /Modulos/Modulo 02 - Deep Learning Frameworks/03 - Passoa a Passo/tensorcode08.py | 7071ad1ac1236324ed48246ed099bb9c60c6f37b | [] | no_license | eduardodimperio/formacao-inteligencia-artificial | cac290e1385a9770a7b492ef3e695124b0ac5499 | c5b50bad4908e8423fe384d90929a772f01787c3 | refs/heads/master | 2023-07-03T21:02:17.107564 | 2021-08-07T13:33:34 | 2021-08-07T13:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # Criando Tensores
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
print("\n")
# Criando um tensor preenchido com zeros.
tensor = tf.zeros(shape = [3, 4], dtype = tf.int32)
print(('Tensor preenchido com zeros como int32, 3 linhas e 4 colunas:\n{0}').format(tensor.numpy()))
print("\n")
# Criando um tensor preenchido com valor 1 e tipo de dados float32.
tensor = tf.ones(shape = [5, 3], dtype = tf.float32)
print(('\nTensor preenchido com valor 1 e float32, 5 linhas e 3 colunas:\n{0}').format(tensor.numpy()))
print("\n")
# Criando um tensor preenchido com valor 100 e tipo de dados float64.
tensor = tf.constant(100, shape = [4, 4], dtype = tf.float64)
print(('\nTensor preenchido com valor 100 e float64, 4 linhas e 4 colunas:\n{0}').format(tensor.numpy()))
print("\n")
# Criando um tensor preenchido Rank 2 preenchido com zeros
tensor = tf.Variable(tf.zeros([1, 2]))
print(tensor)
print("\n")
# Atribuindo valores ao tensor criado no item anterior
tensor.assign_add([[100, 200]])
print(tensor)
print("\n") | [
"[email protected]"
] | |
07177574b87754a20802a44993ef7080f3706469 | c058f51b99f91faebf27183b2b579e9f96e0d8f5 | /test/optim/utils/test_acquisition_utils.py | f494ef4e7ec3e22cb71b51768ac05bb8d26606d3 | [
"MIT"
] | permissive | pytorch/botorch | 255d62f698cc615c750e9343c278a63c7e96a586 | 4cc5ed59b2e8a9c780f786830c548e05cc74d53c | refs/heads/main | 2023-08-22T15:23:51.071048 | 2023-08-22T05:30:38 | 2023-08-22T05:30:38 | 142,940,093 | 2,891 | 373 | MIT | 2023-09-13T00:16:13 | 2018-07-30T23:59:57 | Jupyter Notebook | UTF-8 | Python | false | false | 10,735 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
import torch
from botorch import settings
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
)
from botorch.acquisition.multi_objective.max_value_entropy_search import (
qMultiObjectiveMaxValueEntropy,
)
from botorch.acquisition.multi_objective.monte_carlo import (
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.exceptions import BotorchError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models import ModelListGP, SingleTaskGP
from botorch.models.transforms.input import Warp
from botorch.optim.utils import columnwise_clamp, fix_features, get_X_baseline
from botorch.sampling.normal import IIDNormalSampler
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestColumnWiseClamp(BotorchTestCase):
def setUp(self):
super().setUp()
self.X = torch.tensor([[-2, 1], [0.5, -0.5]], device=self.device)
self.X_expected = torch.tensor([[-1, 0.5], [0.5, -0.5]], device=self.device)
def test_column_wise_clamp_scalars(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, 1, -1)
X_clmp = columnwise_clamp(X, -1, 0.5)
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, -3, 3)
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_scalar_tensors(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.tensor(1), torch.tensor(-1))
X_clmp = columnwise_clamp(X, torch.tensor(-1), torch.tensor(0.5))
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, torch.tensor(-3), torch.tensor(3))
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_tensors(self):
X, X_expected = self.X, self.X_expected
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.ones(2), torch.zeros(2))
with self.assertRaises(RuntimeError):
X_clmp = columnwise_clamp(X, torch.zeros(3), torch.ones(3))
X_clmp = columnwise_clamp(X, torch.tensor([-1, -1]), torch.tensor([0.5, 0.5]))
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, torch.tensor([-3, -3]), torch.tensor([3, 3]))
self.assertTrue(torch.equal(X_clmp, X))
def test_column_wise_clamp_full_dim_tensors(self):
X = torch.tensor([[[-1, 2, 0.5], [0.5, 3, 1.5]], [[0.5, 1, 0], [2, -2, 3]]])
lower = torch.tensor([[[0, 0.5, 1], [0, 2, 2]], [[0, 2, 0], [1, -1, 0]]])
upper = torch.tensor([[[1, 1.5, 1], [1, 4, 3]], [[1, 3, 0.5], [3, 1, 2.5]]])
X_expected = torch.tensor(
[[[0, 1.5, 1], [0.5, 3, 2]], [[0.5, 2, 0], [2, -1, 2.5]]]
)
X_clmp = columnwise_clamp(X, lower, upper)
self.assertTrue(torch.equal(X_clmp, X_expected))
X_clmp = columnwise_clamp(X, lower - 5, upper + 5)
self.assertTrue(torch.equal(X_clmp, X))
with self.assertRaises(ValueError):
X_clmp = columnwise_clamp(X, torch.ones_like(X), torch.zeros_like(X))
with self.assertRaises(RuntimeError):
X_clmp = columnwise_clamp(X, lower.unsqueeze(-3), upper.unsqueeze(-3))
def test_column_wise_clamp_raise_on_violation(self):
X = self.X
with self.assertRaises(BotorchError):
X_clmp = columnwise_clamp(
X, torch.zeros(2), torch.ones(2), raise_on_violation=True
)
X_clmp = columnwise_clamp(
X, torch.tensor([-3, -3]), torch.tensor([3, 3]), raise_on_violation=True
)
self.assertTrue(torch.equal(X_clmp, X))
class TestFixFeatures(BotorchTestCase):
def _getTensors(self):
X = torch.tensor([[-2, 1, 3], [0.5, -0.5, 1.0]], device=self.device)
X_null_two = torch.tensor([[-2, 1, 3], [0.5, -0.5, 1.0]], device=self.device)
X_expected = torch.tensor([[-1, 1, -2], [-1, -0.5, -2]], device=self.device)
X_expected_null_two = torch.tensor(
[[-1, 1, 3], [-1, -0.5, 1.0]], device=self.device
)
return X, X_null_two, X_expected, X_expected_null_two
def test_fix_features(self):
X, X_null_two, X_expected, X_expected_null_two = self._getTensors()
X.requires_grad_(True)
X_null_two.requires_grad_(True)
X_fix = fix_features(X, {0: -1, 2: -2})
X_fix_null_two = fix_features(X_null_two, {0: -1, 2: None})
self.assertTrue(torch.equal(X_fix, X_expected))
self.assertTrue(torch.equal(X_fix_null_two, X_expected_null_two))
def f(X):
return X.sum()
f(X).backward()
self.assertTrue(torch.equal(X.grad, torch.ones_like(X)))
X.grad.zero_()
f(X_fix).backward()
self.assertTrue(
torch.equal(
X.grad,
torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], device=self.device),
)
)
f(X_null_two).backward()
self.assertTrue(torch.equal(X_null_two.grad, torch.ones_like(X)))
X_null_two.grad.zero_()
f(X_fix_null_two).backward()
self.assertTrue(
torch.equal(
X_null_two.grad,
torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], device=self.device),
)
)
class TestGetXBaseline(BotorchTestCase):
def test_get_X_baseline(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
X_train = torch.rand(20, 2, **tkwargs)
model = MockModel(
MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
)
# test NEI with X_baseline
acqf = qNoisyExpectedImprovement(
model, X_baseline=X_train[:2], prune_baseline=False, cache_root=False
)
X = get_X_baseline(acq_function=acqf)
self.assertTrue(torch.equal(X, acqf.X_baseline))
# test EI without X_baseline
acqf = qExpectedImprovement(model, best_f=0.0)
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = get_X_baseline(
acq_function=acqf,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
# set train inputs
model.train_inputs = (X_train,)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test that we fail back to train_inputs if X_baseline is an empty tensor
acqf.register_buffer("X_baseline", X_train[:0])
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test acquisition function without X_baseline or model
acqf = FixedFeatureAcquisitionFunction(acqf, d=2, columns=[0], values=[0])
with warnings.catch_warnings(record=True) as w, settings.debug(True):
X_rnd = get_X_baseline(
acq_function=acqf,
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, BotorchWarning))
self.assertIsNone(X_rnd)
Y_train = 2 * X_train[:2] + 1
moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
ref_point = torch.zeros(2, **tkwargs)
# test NEHVI with X_baseline
acqf = qNoisyExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
X_baseline=X_train[:2],
sampler=IIDNormalSampler(sample_shape=torch.Size([2])),
cache_root=False,
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, acqf.X_baseline))
# test qEHVI without train_inputs
acqf = qExpectedHypervolumeImprovement(
moo_model,
ref_point=ref_point,
partitioning=FastNondominatedPartitioning(
ref_point=ref_point,
Y=Y_train,
),
)
# test extracting train_inputs from model list GP
model_list = ModelListGP(
SingleTaskGP(X_train, Y_train[:, :1]),
SingleTaskGP(X_train, Y_train[:, 1:]),
)
acqf = qExpectedHypervolumeImprovement(
model_list,
ref_point=ref_point,
partitioning=FastNondominatedPartitioning(
ref_point=ref_point,
Y=Y_train,
),
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test MESMO for which we need to use
# `acqf.mo_model`
batched_mo_model = SingleTaskGP(X_train, Y_train)
acqf = qMultiObjectiveMaxValueEntropy(
batched_mo_model,
sample_pareto_frontiers=lambda model: torch.rand(10, 2, **tkwargs),
)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
# test that if there is an input transform that is applied
# to the train_inputs when the model is in eval mode, we
# extract the untransformed train_inputs
model = SingleTaskGP(
X_train, Y_train[:, :1], input_transform=Warp(indices=[0, 1])
)
model.eval()
self.assertFalse(torch.equal(model.train_inputs[0], X_train))
acqf = qExpectedImprovement(model, best_f=0.0)
X = get_X_baseline(
acq_function=acqf,
)
self.assertTrue(torch.equal(X, X_train))
| [
"[email protected]"
] | |
97aba7f61ee1a790a8f6bc2b427fe23430b50d04 | f40cc44ebfc337326577c91cd88d0c1dd845b098 | /LuminarPythonPrograms/PythonToDatabase/SelectFromDBS.py | abfd684fd180b6d5bd3d4b9d24f0cf1e4182d452 | [] | no_license | Aswin2289/LuminarPython | 6e07d6f9bf6c8727b59f38f97f5779a33b2fab0d | ba633a276dd79bbf214cfceac2413c894eaa1875 | refs/heads/master | 2023-01-01T07:52:41.598110 | 2020-10-13T04:34:49 | 2020-10-13T04:34:49 | 290,109,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import mysql.connector
db=mysql.connector.connect(
host="localhost",
port=3307,
user="root",
password="Password@123",
database="luminarpython" ,
auth_plugin='mysql_native_password'
)
cursor=db.cursor()
try:
sql="SELECT * FROM EMPLOYEE"
cursor.execute(sql)
myresult=cursor.fetchall()
for x in myresult:
print(x)
except Exception as e:
db.rollback()
print(e.args)
finally:
db.close() | [
"[email protected]"
] | |
439738cd92bfa58d3bde1f4adeada16f33045fbc | 41c824ce983c2a400ca6484b365d6f7ee077c8a3 | /tools/office_db/declarant_group_stat_data.py | 320cd51f82a57beffb7bd779eae0f925dfc285af | [] | no_license | TI-Russia/smart_parser | 2c84c12906e308229037c2bc75299a4b227e795e | 7428904975b2cf88cb329b8da11017cdebe8fa03 | refs/heads/master | 2022-12-10T06:40:43.852974 | 2022-08-05T11:06:18 | 2022-08-05T11:06:18 | 129,266,366 | 16 | 4 | null | 2022-12-08T11:18:29 | 2018-04-12T14:44:23 | HTML | UTF-8 | Python | false | false | 9,600 | py | from office_db.year_income import TYearIncome
from office_db.rubrics import get_russian_rubric_str
import json
import os
from collections import defaultdict
class TGroupYearSnapshot:
def __init__(self, median_income=None, incomes_count=None, declarants_count=None):
self.median_year_income = median_income
self.incomes_count = incomes_count
#declarants_count must be greater than incomes_count, declarants_count contains sections with less than MROT income
self.declarants_count = declarants_count
@staticmethod
def from_json(j):
d = TGroupYearSnapshot()
d.incomes_count = j.get('incomes_count')
d.median_year_income = j.get('median_year_income')
d.declarants_count = j.get('declarants_count')
return d
def to_json(self):
return {
'incomes_count': self.incomes_count,
'median_year_income': self.median_year_income,
'declarants_count': self.declarants_count
}
class TGroupStatData:
def __init__(self):
self.year_snapshots = defaultdict(TGroupYearSnapshot)
self.child_office_examples = list()
self.child_offices_count = None
self.source_document_count = None
self.section_count = None
self.v2 = None
self.v2_size = None
def get_year_snapshot(self, year) -> TGroupYearSnapshot:
return self.year_snapshots.get(year)
def get_or_create_year_snapshot(self, year) -> TGroupYearSnapshot:
return self.year_snapshots[year]
def is_empty(self):
return len(self.year_snapshots) == 0
@staticmethod
def from_json(j):
d = TGroupStatData()
d.year_snapshots = defaultdict(TGroupYearSnapshot)
for k, v in j['year_snapshots'].items():
d.year_snapshots[int(k)] = TGroupYearSnapshot.from_json(v)
d.v2 = j.get('V2')
d.v2_size = j.get('V2_size')
d.child_office_examples = j.get('child_office_examples')
d.child_offices_count = j.get('child_offices_count')
d.source_document_count = j.get('source_document_count')
d.section_count = j.get('section_count')
return d
def to_json(self):
return {
'year_snapshots': dict((k, v.to_json()) for k, v in self.year_snapshots.items()),
'V2': self.v2,
'V2_size': self.v2_size,
'child_office_examples': self.child_office_examples,
'child_offices_count': self.child_offices_count,
'source_document_count': self.source_document_count,
'section_count': self.section_count
}
def add_snapshot(self, year: int, snapshot: TGroupYearSnapshot):
self.year_snapshots[year] = snapshot
def get_median_income(self, year: int):
a = self.year_snapshots.get(year)
if a is None:
return None
return a.median_year_income
class TGroupStatDataList:
office_group = 1
rubric_group = 2
def __init__(self, directory, group_type=None, start_year=None, last_year=None):
self.declarant_groups = defaultdict(TGroupStatData)
self.group_type = group_type
self.start_year = start_year
self.last_year = last_year
if self.group_type is None:
self.group_type = TGroupStatDataList.office_group
if self.group_type == TGroupStatDataList.office_group:
self.file_path = os.path.join(directory, "office_stat_data.txt")
else:
self.file_path = os.path.join(directory, "rubric_stat_data.txt")
def get_csv_path(self):
return self.file_path[:-len('.txt')] + ".csv"
def write_csv_file(self, russia, filepath=None):
if filepath is None:
filepath = self.get_csv_path()
with open(filepath, "w") as outp:
outp.write("\t".join(self.get_table_headers()) + "\n")
for r in self.get_all_office_report_rows(russia):
outp.write("\t".join(map(str, r)) + "\n")
def load_from_disk(self):
self.declarant_groups = defaultdict(TGroupStatData)
with open(self.file_path) as inp:
j = json.load(inp)
for k, v in j['groups'].items():
if k == "null":
k = None
else:
k = int(k)
self.declarant_groups[k] = TGroupStatData.from_json(v)
self.start_year = j['start_year']
self.last_year = j['last_year']
def save_to_disk(self, postfix=""):
with open(self.file_path + postfix, "w") as outp:
d = {
"groups": dict((k, v.to_json()) for k, v in self.declarant_groups.items()),
'start_year': self.start_year,
'last_year': self.last_year
}
json.dump(d, outp, indent=4, ensure_ascii=False)
def add_group(self, group_id: int, group: TGroupStatData):
self.declarant_groups[group_id] = group
def get_group_data(self, group_id: int) -> TGroupStatData:
return self.declarant_groups.get(group_id)
def get_or_create_group_data(self, group_id: int) -> TGroupStatData:
return self.declarant_groups[group_id]
def get_table_headers(self):
l = ['Id', 'Name']
for year in range(self.start_year, self.last_year + 1):
l.append(str(year))
l.append('|{}|'.format(year))
l.append('Q1')
l.append('PI')
l.append('D1')
l.append('V2')
l.append('|V2|')
return l
def get_table_column_description(self):
l = ['Идентификатор',
'Название']
for year in range(self.start_year, self.last_year + 1):
l.append("Медианный доход за {} год".format(year))
l.append('Количество учтенных деклараций за {} год'.format(year))
l.append('Во сколько раз сотрудники ведомства получают больше населения (посл. учтенный год)')
l.append('Рост медианной зарплаты всего населения в процентах в пределах учтенного интервала')
l.append('Рост медианного дохода декларантов в процентах в пределах учтенного интервала')
l.append('Усредненный индивидуальный рост декларантов в пределах учтенного интервала, поделенный на средний рост зарплаты населения')
l.append('Количество элементов, учтенных в V2')
return l
def get_office_report_table_row(self, russia, group_id, max_cell_width=None):
if self.group_type == TGroupStatDataList.office_group:
name = russia.get_office(group_id).name
if max_cell_width is not None:
if len(name) > max_cell_width - 3:
name = name[:max_cell_width - 3] + "..."
output_row = [group_id, name]
else:
if group_id is None:
rubric_name = "остальное"
else:
rubric_name = get_russian_rubric_str(group_id)
output_row = [group_id, rubric_name]
office_info: TGroupStatData
office_info = self.declarant_groups.get(group_id)
if office_info is None:
return None
declarant_count = 0
year_count = 0
valid_incomes = list()
for year in range(self.start_year, self.last_year + 1):
d = office_info.get_year_snapshot(year)
if d is not None and d.incomes_count is not None and d.incomes_count > 5:
declarant_count += d.incomes_count
year_count += 1
output_row.append(d.median_year_income)
output_row.append(d.incomes_count)
valid_incomes.append(TYearIncome(year, d.median_year_income))
else:
output_row.append(-1)
output_row.append(0)
if declarant_count <= 10 or year_count < 2:
# office is too small
return None
cmp_result = russia.get_average_nominal_incomes(valid_incomes)
if cmp_result is None:
params = [-1] * 4
else:
Q1 = russia.compare_to_all_russia_average_month_income(
valid_incomes[-1].year,
valid_incomes[-1].income/12.0)
Q1_str = str(Q1).replace(".", ",")
PI = cmp_result.population_income_growth
D1 = cmp_result.declarant_income_growth
V2_str = str(office_info.v2).replace(".", ",")
params = [Q1_str, PI, D1, V2_str, office_info.v2_size]
output_row.extend(params)
return output_row
def get_all_office_report_rows(self, russia):
for group_id in self.declarant_groups.keys():
r = self.get_office_report_table_row(russia, group_id, max_cell_width=120)
if r is not None:
yield r
class TOfficeRubricCalculatedData:
def __init__(self, directory):
self.directory = directory
self.office_stats = TGroupStatDataList(directory, TGroupStatDataList.office_group)
self.office_stats.load_from_disk()
self.rubric_stats = TGroupStatDataList(directory, TGroupStatDataList.rubric_group)
self.rubric_stats.load_from_disk() | [
"[email protected]"
] | |
2f1128b354fee139b1e3aef954db438043fa9ef2 | 6c00681f86e22e137c9f0f5381f2f78552fb6d1e | /preprocess/build_graph.py | f8dec70676ce945112981f2a2b1a31ddf9de11db | [] | no_license | FoxerLee/TGCN | f967344ae79e5bc7924b2854465d86393d89406a | ffb48c154aaeaebaca5ad618aff9731cb7117409 | refs/heads/master | 2023-01-31T04:33:31.088815 | 2020-12-16T05:31:54 | 2020-12-16T05:31:54 | 312,063,455 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 12,963 | py | import os
import random
import numpy as np
import pickle as pkl
# import networkx as nx
import scipy.sparse as sp
from math import log
from sklearn import svm
from nltk.corpus import wordnet as wn
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.spatial.distance import cosine
import sys
sys.path.append('../')
from utils.utils import loadWord2Vec, clean_str
if len(sys.argv) != 2:
sys.exit("Use: python build_graph.py <dataset>")
dataset = sys.argv[1]
word_embeddings_dim = 300
word_vector_map = {}
# shulffing
doc_name_list = []
doc_train_list = []
doc_test_list = []
with open('../cleaned_data/' + dataset + '/' + dataset + '.txt', 'r') as f:
lines = f.readlines()
for line in lines:
doc_name_list.append(line.strip())
temp = line.split("\t")
if temp[1].find('test') != -1:
doc_test_list.append(line.strip())
elif temp[1].find('train') != -1:
doc_train_list.append(line.strip())
# print(doc_train_list)
# print(doc_test_list)
doc_content_list = []
with open('../cleaned_data/' + dataset + '/' + dataset + '_clean.txt', 'r') as f:
lines = f.readlines()
for line in lines:
doc_content_list.append(line.strip())
# print(doc_content_list)
train_ids = []
for train_name in doc_train_list:
train_id = doc_name_list.index(train_name)
train_ids.append(train_id)
# print(train_ids)
random.shuffle(train_ids)
# partial labeled data
# train_ids = train_ids[:int(0.2 * len(train_ids))]
train_ids_str = '\n'.join(str(index) for index in train_ids)
with open('../cleaned_data/' + dataset + '/graph/' + dataset + '.train.index', 'w') as f:
f.write(train_ids_str)
test_ids = []
for test_name in doc_test_list:
test_id = doc_name_list.index(test_name)
test_ids.append(test_id)
# print(test_ids)
random.shuffle(test_ids)
test_ids_str = '\n'.join(str(index) for index in test_ids)
with open('../cleaned_data/' + dataset + '/graph/' + dataset + '.test.index', 'w') as f:
f.write(test_ids_str)
ids = train_ids + test_ids
# print(ids)
print("data size {}".format(len(ids)))
print("adding...")
shuffle_doc_name_list = []
shuffle_doc_words_list = []
for id in ids:
shuffle_doc_name_list.append(doc_name_list[int(id)])
shuffle_doc_words_list.append(doc_content_list[int(id)])
shuffle_doc_name_str = '\n'.join(shuffle_doc_name_list)
shuffle_doc_words_str = '\n'.join(shuffle_doc_words_list)
with open('../cleaned_data/' + dataset + '/' + dataset + '_shuffle.txt', 'w') as f:
f.write(shuffle_doc_name_str)
with open('../cleaned_data/' + dataset + '/corpus/' + dataset + '_shuffle.txt', 'w') as f:
f.write(shuffle_doc_words_str)
# build vocab
print("building...")
word_freq = {}
word_set = set()
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
for word in words:
word_set.add(word)
if word in word_freq:
word_freq[word] += 1
else:
word_freq[word] = 1
vocab = list(word_set)
vocab_size = len(vocab)
word_doc_list = {}
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
appeared = set()
for word in words:
if word in appeared:
continue
if word in word_doc_list:
doc_list = word_doc_list[word]
doc_list.append(i)
word_doc_list[word] = doc_list
else:
word_doc_list[word] = [i]
appeared.add(word)
word_doc_freq = {}
for word, doc_list in word_doc_list.items():
word_doc_freq[word] = len(doc_list)
word_id_map = {}
for i in range(vocab_size):
word_id_map[vocab[i]] = i
vocab_str = '\n'.join(vocab)
with open('../cleaned_data/' + dataset + '/corpus/' + dataset + '_vocab.txt', 'w') as f:
f.write(vocab_str)
# label list
label_set = set()
for doc_meta in shuffle_doc_name_list:
temp = doc_meta.split('\t')
label_set.add(temp[2])
label_list = list(label_set)
label_list_str = '\n'.join(label_list)
with open('../cleaned_data/' + dataset + '/corpus/' + dataset + '_labels.txt', 'w') as f:
f.write(label_list_str)
# x: feature vectors of training docs, no initial features
# slect 90% training set
train_size = len(train_ids)
val_size = int(0.1 * train_size)
real_train_size = train_size - val_size # - int(0.5 * train_size)
real_train_doc_names = shuffle_doc_name_list[:real_train_size]
real_train_doc_names_str = '\n'.join(real_train_doc_names)
with open('../cleaned_data/' + dataset + '/graph/' + dataset + '.real_train.name', 'w') as f:
f.write(real_train_doc_names_str)
row_x = []
col_x = []
data_x = []
for i in range(real_train_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
# print(doc_vec)
# print(np.array(word_vector))
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_x.append(i)
col_x.append(j)
# np.random.uniform(-0.25, 0.25)
data_x.append(doc_vec[j] / doc_len) # doc_vec[j]/ doc_len
# x = sp.csr_matrix((real_train_size, word_embeddings_dim), dtype=np.float32)
x = sp.csr_matrix((data_x, (row_x, col_x)), shape=(
real_train_size, word_embeddings_dim))
y = []
for i in range(real_train_size):
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split('\t')
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
y.append(one_hot)
y = np.array(y)
# print(y)
# tx: feature vectors of test docs, no initial features
test_size = len(test_ids)
row_tx = []
col_tx = []
data_tx = []
for i in range(test_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i + train_size]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_tx.append(i)
col_tx.append(j)
# np.random.uniform(-0.25, 0.25)
data_tx.append(doc_vec[j] / doc_len) # doc_vec[j] / doc_len
# tx = sp.csr_matrix((test_size, word_embeddings_dim), dtype=np.float32)
tx = sp.csr_matrix((data_tx, (row_tx, col_tx)),
shape=(test_size, word_embeddings_dim))
ty = []
for i in range(test_size):
doc_meta = shuffle_doc_name_list[i + train_size]
temp = doc_meta.split('\t')
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
ty.append(one_hot)
ty = np.array(ty)
# print(ty)
# allx: the the feature vectors of both labeled and unlabeled training instances
# (a superset of x)
# unlabeled training instances -> words
word_vectors = np.random.uniform(-0.01, 0.01,
(vocab_size, word_embeddings_dim))
for i in range(len(vocab)):
word = vocab[i]
if word in word_vector_map:
vector = word_vector_map[word]
word_vectors[i] = vector
row_allx = []
col_allx = []
data_allx = []
for i in range(train_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_allx.append(int(i))
col_allx.append(j)
# np.random.uniform(-0.25, 0.25)
data_allx.append(doc_vec[j] / doc_len) # doc_vec[j]/doc_len
for i in range(vocab_size):
for j in range(word_embeddings_dim):
row_allx.append(int(i + train_size))
col_allx.append(j)
data_allx.append(word_vectors.item((i, j)))
row_allx = np.array(row_allx)
col_allx = np.array(col_allx)
data_allx = np.array(data_allx)
allx = sp.csr_matrix(
(data_allx, (row_allx, col_allx)), shape=(train_size + vocab_size, word_embeddings_dim))
ally = []
for i in range(train_size):
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split('\t')
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
ally.append(one_hot)
for i in range(vocab_size):
one_hot = [0 for l in range(len(label_list))]
ally.append(one_hot)
ally = np.array(ally)
print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)
'''
Doc word heterogeneous graph
'''
# word co-occurence with context windows
window_size = 20
windows = []
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
# print(length, length - window_size + 1)
for j in range(length - window_size + 1):
window = words[j: j + window_size]
windows.append(window)
# print(window)
word_window_freq = {}
for window in windows:
appeared = set()
for i in range(len(window)):
if window[i] in appeared:
continue
if window[i] in word_window_freq:
word_window_freq[window[i]] += 1
else:
word_window_freq[window[i]] = 1
appeared.add(window[i])
word_pair_count = {}
for window in windows:
for i in range(1, len(window)):
for j in range(0, i):
word_i = window[i]
word_i_id = word_id_map[word_i]
word_j = window[j]
word_j_id = word_id_map[word_j]
if word_i_id == word_j_id:
continue
word_pair_str = str(word_i_id) + ',' + str(word_j_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
# two orders
word_pair_str = str(word_j_id) + ',' + str(word_i_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
row = []
col = []
weight = []
# pmi as weights
num_window = len(windows)
for key in word_pair_count:
temp = key.split(',')
i = int(temp[0])
j = int(temp[1])
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log((1.0 * count / num_window) /
(1.0 * word_freq_i * word_freq_j/(num_window * num_window)))
if pmi <= 0:
continue
row.append(train_size + i)
col.append(train_size + j)
weight.append(pmi)
# doc word frequency
doc_word_freq = {}
for doc_id in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[doc_id]
words = doc_words.split()
for word in words:
word_id = word_id_map[word]
doc_word_str = str(doc_id) + ',' + str(word_id)
if doc_word_str in doc_word_freq:
doc_word_freq[doc_word_str] += 1
else:
doc_word_freq[doc_word_str] = 1
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_word_set = set()
for word in words:
if word in doc_word_set:
continue
j = word_id_map[word]
key = str(i) + ',' + str(j)
freq = doc_word_freq[key]
if i < train_size:
row.append(i)
else:
row.append(i + vocab_size)
col.append(train_size + j)
idf = log(1.0 * len(shuffle_doc_words_list) /
word_doc_freq[vocab[j]])
weight.append(freq * idf)
doc_word_set.add(word)
node_size = train_size + vocab_size + test_size
adj = sp.csr_matrix(
(weight, (row, col)), shape=(node_size, node_size))
# dump objects
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.x', 'wb') as f:
pkl.dump(x, f)
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.y', 'wb') as f:
pkl.dump(y, f)
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.tx', 'wb') as f:
pkl.dump(tx, f)
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.ty', 'wb') as f:
pkl.dump(ty, f)
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.allx', 'wb') as f:
pkl.dump(allx, f)
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.ally', 'wb') as f:
pkl.dump(ally, f)
with open('../cleaned_data/' + dataset + '/graph/ind.' + dataset + '.adj', 'wb') as f:
pkl.dump(adj, f)
| [
"[email protected]"
] | |
78f8b5c22b9ff3f3ef52fe996a14a3184da876c5 | bd1b1fda138e6687dadc57317c3e312bc8872600 | /mycode/lintcode/Binary Tree & Divide Conquer/69 Binary Tree Level Order Traversal.py | 773f44f7eaff2c8f8e55118bc84c3419688ff279 | [] | no_license | dundunmao/lint_leet | fc185038f57e0c5cbb82a74cebd4fe00422416cb | 5788bd7b154649d2f787bbc4feb717ff2f4b4c59 | refs/heads/master | 2020-11-30T04:56:25.553327 | 2017-10-22T07:11:01 | 2017-10-22T07:11:01 | 96,705,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | # -*- encoding: utf-8 -*-
# 给出一棵二叉树,返回其节点值的层次遍历(逐层从左往右访问)
#
# 您在真实的面试中是否遇到过这个题? Yes
# 样例
# 给一棵二叉树 {3,9,20,#,#,15,7} :
#
# 3
# / \
# 9 20
# / \
# 15 7
# 返回他的分层遍历结果:
#
# [
# [3],
# [9,20],
# [15,7]
# ]
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree.
@return: Level order in a list of lists of integers
"""
def levelOrder(self, root):
# write your code here
# write your code here
if root is None:
return []
result = []
stack = []
stack.append(root)
while stack != []:
l = len(stack)
list_level = []
for i in range(l):
if stack[0].left:
stack.append(stack[0].left)
if stack[0].right:
stack.append(stack[0].right)
top = stack.pop(0)
list_level.append(top.val)
result.append(list_level)
return result
from Queue import Queue
class Solution2:
"""
@param root: The root of binary tree.
@return: Level order in a list of lists of integers
"""
def levelOrder(self, root):
# write your code here
# write your code here
if root is None:
return []
result = []
queue = Queue()
queue.put(root)
while queue.qsize()>0:#这里不能写成 while queue:
l = queue.qsize()
list_level = []
for i in range(l):
q = queue.get()
if q.left:
queue.put(q.left)
if q.right:
queue.put(q.right)
list_level.append(q.val)
result.append(list_level)
return result
if __name__ == '__main__':
# TREE 1
# Construct the following tree
# 1
# / \
# 2 3
# / \
# 4 5
# / \
# 6 7
# \
# 8
P = TreeNode(1)
P.left = TreeNode(2)
P.left.left = TreeNode(4)
P.left.right = TreeNode(5)
P.left.right.left = TreeNode(6)
P.left.right.right = TreeNode(7)
P.left.right.right.right = TreeNode(8)
P.right = TreeNode(3)
#
#
# Q = Node(26)
# Q.left = Node(10)
# Q.left.left = Node(4)
# Q.left.right = Node(6)
# Q.right = Node(3)
# # Q.right.right = Node(3)
s = Solution2()
print s.levelOrder(P) | [
"[email protected]"
] | |
e0d1bccb62af7b5c0d1f98c92f261a00027045ca | 4095ac8b38a295ecdb12ab7c61c5a347f48ceac0 | /src/LightweightGAN.py | 13790b8f8f24cb98bb23972e9e13d109f4632411 | [
"MIT"
] | permissive | inoue0406/adversarial-nowcasting | 51b04cb4b3111bd8491c7d33c269eeee04e6a63e | 431f6bc4b7d731e85ca52f1bf81638b31c4be17e | refs/heads/main | 2023-02-08T20:11:41.930204 | 2021-01-05T09:43:57 | 2021-01-05T09:43:57 | 319,322,983 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,412 | py | import math
from math import log2, floor
import torch
from torch.optim import Adam
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.autograd import grad as torch_grad
from einops import rearrange
from kornia import filter2D
from gsa_pytorch import GSA
# helper functions
def exists(val):
return val is not None
def is_power_of_two(val):
return log2(val).is_integer()
def default(val, d):
return val if exists(val) else d
def upsample(scale_factor = 2):
return nn.Upsample(scale_factor = scale_factor)
def set_requires_grad(model, bool):
for p in model.parameters():
p.requires_grad = bool
# augmentations
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size):
super().__init__()
self.D = D
def forward(self, images, prob = 0., types = [], detach = False, **kwargs):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=types)
if detach:
images = images.detach()
return self.D(images, **kwargs)
# helper classes
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if not exists(old):
return new
return old * self.beta + (1 - self.beta) * new
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.tensor(1e-3))
def forward(self, x):
return self.g * self.fn(x)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class SumBranches(nn.Module):
def __init__(self, branches):
super().__init__()
self.branches = nn.ModuleList(branches)
def forward(self, x):
return sum(map(lambda fn: fn(x), self.branches))
class Blur(nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer('f', f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f [None, :, None]
return filter2D(x, f, normalized=True)
# Overwrite with identity
Blur = nn.Identity
norm_class = nn.BatchNorm2d
# Class
class SLE(nn.Module):
def __init__(
self,
*,
chan_in,
chan_out
):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((4, 4))
self.max_pool = nn.AdaptiveMaxPool2d((4, 4))
chan_intermediate = chan_in // 2
self.net = nn.Sequential(
nn.Conv2d(chan_in * 2, chan_intermediate, 4),
nn.LeakyReLU(0.1),
nn.Conv2d(chan_intermediate, chan_out, 1),
nn.Sigmoid()
)
def forward(self, x):
pooled_avg = self.avg_pool(x)
pooled_max = self.max_pool(x)
return self.net(torch.cat((pooled_max, pooled_avg), dim = 1))
class SpatialSLE(nn.Module):
def __init__(self, upsample_times, num_groups = 2):
super().__init__()
self.num_groups = num_groups
chan = num_groups * 2
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
upsample(2 ** upsample_times),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.LeakyReLU(0.1),
nn.Conv2d(chan, 1, 3, padding = 1),
nn.Sigmoid()
)
def forward(self, x):
b, c, h, w = x.shape
num_groups = self.num_groups
mult = math.ceil(c / num_groups)
padding = (mult - c % mult) // 2
x_padded = F.pad(x, (0, 0, 0, 0, padding, padding))
x = rearrange(x_padded, 'b (g c) h w -> b g c h w', g = num_groups)
pooled_avg = x.mean(dim = 2)
pooled_max, _ = x.max(dim = 2)
pooled = torch.cat((pooled_avg, pooled_max), dim = 1)
return self.net(pooled)
class Generator(nn.Module):
def __init__(
self,
*,
image_size,
latent_dim = 256,
fmap_max = 512,
fmap_inverse_coef = 12,
transparent = False,
attn_res_layers = [],
sle_spatial = False
):
super().__init__()
resolution = log2(image_size)
assert is_power_of_two(image_size), 'image size must be a power of 2'
init_channel = 4 if transparent else 3
fmap_max = default(fmap_max, latent_dim)
self.initial_conv = nn.Sequential(
nn.ConvTranspose2d(latent_dim, latent_dim * 2, 4),
norm_class(latent_dim * 2),
nn.GLU(dim = 1)
)
num_layers = int(resolution) - 2
features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), range(2, num_layers + 2)))
features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features))
features = list(map(lambda n: 3 if n[0] >= 8 else n[1], features))
features = [latent_dim, *features]
in_out_features = list(zip(features[:-1], features[1:]))
self.res_layers = range(2, num_layers + 2)
self.layers = nn.ModuleList([])
self.res_to_feature_map = dict(zip(self.res_layers, in_out_features))
self.sle_map = ((3, 7), (4, 8), (5, 9), (6, 10))
self.sle_map = list(filter(lambda t: t[0] <= resolution and t[1] <= resolution, self.sle_map))
self.sle_map = dict(self.sle_map)
self.num_layers_spatial_res = 1
for (res, (chan_in, chan_out)) in zip(self.res_layers, in_out_features):
image_width = 2 ** res
attn = None
if image_width in attn_res_layers:
attn = Rezero(GSA(dim = chan_in, norm_queries = True))
sle = None
if res in self.sle_map:
residual_layer = self.sle_map[res]
sle_chan_out = self.res_to_feature_map[residual_layer - 1][-1]
sle = SLE(
chan_in = chan_out,
chan_out = sle_chan_out
)
sle_spatial = None
if res <= (resolution - self.num_layers_spatial_res):
sle_spatial = SpatialSLE(
upsample_times = self.num_layers_spatial_res,
num_groups = 2 if res < 8 else 1
)
layer = nn.ModuleList([
nn.Sequential(
upsample(),
Blur(),
nn.Conv2d(chan_in, chan_out * 2, 3, padding = 1),
norm_class(chan_out * 2),
nn.GLU(dim = 1)
),
sle,
sle_spatial,
attn
])
self.layers.append(layer)
self.out_conv = nn.Conv2d(features[-1], init_channel, 3, padding = 1)
def forward(self, x):
x = rearrange(x, 'b c -> b c () ()')
x = self.initial_conv(x)
x = F.normalize(x, dim = -1)
residuals = dict()
spatial_residuals = dict()
for (res, (up, sle, sle_spatial, attn)) in zip(self.res_layers, self.layers):
if exists(sle_spatial):
spatial_res = sle_spatial(x)
spatial_residuals[res + self.num_layers_spatial_res] = spatial_res
if exists(attn):
x = attn(x) + x
x = up(x)
if exists(sle):
out_res = self.sle_map[res]
residual = sle(x)
residuals[out_res] = residual
next_res = res + 1
if next_res in residuals:
x = x * residuals[next_res]
if next_res in spatial_residuals:
x = x * spatial_residuals[next_res]
return self.out_conv(x)
class SimpleDecoder(nn.Module):
def __init__(
self,
*,
chan_in,
chan_out = 3,
num_upsamples = 4,
):
super().__init__()
self.layers = nn.ModuleList([])
final_chan = chan_out
chans = chan_in
for ind in range(num_upsamples):
last_layer = ind == (num_upsamples - 1)
chan_out = chans if not last_layer else final_chan * 2
layer = nn.Sequential(
upsample(),
nn.Conv2d(chans, chan_out, 3, padding = 1),
nn.GLU(dim = 1)
)
self.layers.append(layer)
chans //= 2
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class Discriminator(nn.Module):
def __init__(
self,
*,
image_size,
fmap_max = 512,
fmap_inverse_coef = 12,
transparent = False,
disc_output_size = 5,
attn_res_layers = []
):
super().__init__()
resolution = log2(image_size)
assert is_power_of_two(image_size), 'image size must be a power of 2'
assert disc_output_size in {1, 5}, 'discriminator output dimensions can only be 5x5 or 1x1'
resolution = int(resolution)
init_channel = 4 if transparent else 3
num_non_residual_layers = max(0, int(resolution) - 8)
num_residual_layers = 8 - 3
non_residual_resolutions = range(min(8, resolution), 2, -1)
features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), non_residual_resolutions))
features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features))
if num_non_residual_layers == 0:
res, _ = features[0]
features[0] = (res, init_channel)
chan_in_out = list(zip(features[:-1], features[1:]))
self.non_residual_layers = nn.ModuleList([])
for ind in range(num_non_residual_layers):
first_layer = ind == 0
last_layer = ind == (num_non_residual_layers - 1)
chan_out = features[0][-1] if last_layer else init_channel
self.non_residual_layers.append(nn.Sequential(
Blur(),
nn.Conv2d(init_channel, chan_out, 4, stride = 2, padding = 1),
nn.LeakyReLU(0.1)
))
self.residual_layers = nn.ModuleList([])
for (res, ((_, chan_in), (_, chan_out))) in zip(non_residual_resolutions, chan_in_out):
image_width = 2 ** resolution
attn = None
if image_width in attn_res_layers:
attn = Rezero(GSA(dim = chan_in, batch_norm = False, norm_queries = True))
self.residual_layers.append(nn.ModuleList([
SumBranches([
nn.Sequential(
Blur(),
nn.Conv2d(chan_in, chan_out, 4, stride = 2, padding = 1),
nn.LeakyReLU(0.1),
nn.Conv2d(chan_out, chan_out, 3, padding = 1),
nn.LeakyReLU(0.1)
),
nn.Sequential(
Blur(),
nn.AvgPool2d(2),
nn.Conv2d(chan_in, chan_out, 1),
nn.LeakyReLU(0.1),
)
]),
attn
]))
last_chan = features[-1][-1]
if disc_output_size == 5:
self.to_logits = nn.Sequential(
nn.Conv2d(last_chan, last_chan, 1),
nn.LeakyReLU(0.1),
nn.Conv2d(last_chan, 1, 4)
)
elif disc_output_size == 1:
self.to_logits = nn.Sequential(
Blur(),
nn.Conv2d(last_chan, last_chan, 3, stride = 2, padding = 1),
nn.LeakyReLU(0.1),
nn.Conv2d(last_chan, 1, 4)
)
self.to_shape_disc_out = nn.Sequential(
nn.Conv2d(init_channel, 64, 3, padding = 1),
Residual(Rezero(GSA(dim = 64, norm_queries = True, batch_norm = False))),
SumBranches([
nn.Sequential(
Blur(),
nn.Conv2d(64, 32, 4, stride = 2, padding = 1),
nn.LeakyReLU(0.1),
nn.Conv2d(32, 32, 3, padding = 1),
nn.LeakyReLU(0.1)
),
nn.Sequential(
Blur(),
nn.AvgPool2d(2),
nn.Conv2d(64, 32, 1),
nn.LeakyReLU(0.1),
)
]),
Residual(Rezero(GSA(dim = 32, norm_queries = True, batch_norm = False))),
nn.AdaptiveAvgPool2d((4, 4)),
nn.Conv2d(32, 1, 4)
)
self.decoder1 = SimpleDecoder(chan_in = last_chan, chan_out = init_channel)
self.decoder2 = SimpleDecoder(chan_in = features[-2][-1], chan_out = init_channel) if resolution >= 9 else None
def forward(self, x, calc_aux_loss = False):
orig_img = x
for layer in self.non_residual_layers:
x = layer(x)
layer_outputs = []
for (net, attn) in self.residual_layers:
if exists(attn):
x = attn(x) + x
x = net(x)
layer_outputs.append(x)
out = self.to_logits(x).flatten(1)
img_32x32 = F.interpolate(orig_img, size = (32, 32))
out_32x32 = self.to_shape_disc_out(img_32x32)
if not calc_aux_loss:
return out, out_32x32, None
# self-supervised auto-encoding loss
layer_8x8 = layer_outputs[-1]
layer_16x16 = layer_outputs[-2]
recon_img_8x8 = self.decoder1(layer_8x8)
aux_loss = F.mse_loss(
recon_img_8x8,
F.interpolate(orig_img, size = recon_img_8x8.shape[2:])
)
if exists(self.decoder2):
select_random_quadrant = lambda rand_quadrant, img: rearrange(img, 'b c (m h) (n w) -> (m n) b c h w', m = 2, n = 2)[rand_quadrant]
crop_image_fn = partial(select_random_quadrant, floor(random() * 4))
img_part, layer_16x16_part = map(crop_image_fn, (orig_img, layer_16x16))
recon_img_16x16 = self.decoder2(layer_16x16_part)
aux_loss_16x16 = F.mse_loss(
recon_img_16x16,
F.interpolate(img_part, size = recon_img_16x16.shape[2:])
)
aux_loss = aux_loss + aux_loss_16x16
return out, out_32x32, aux_loss
class LightweightGAN(nn.Module):
def __init__(
self,
*,
latent_dim,
image_size,
optimizer = "adam",
fmap_max = 512,
fmap_inverse_coef = 12,
transparent = False,
disc_output_size = 5,
attn_res_layers = [],
sle_spatial = False,
ttur_mult = 1.,
lr = 2e-4,
rank = 0,
ddp = False
):
super().__init__()
self.latent_dim = latent_dim
self.image_size = image_size
G_kwargs = dict(
image_size = image_size,
latent_dim = latent_dim,
fmap_max = fmap_max,
fmap_inverse_coef = fmap_inverse_coef,
transparent = transparent,
attn_res_layers = attn_res_layers,
sle_spatial = sle_spatial
)
self.G = Generator(**G_kwargs)
self.D = Discriminator(
image_size = image_size,
fmap_max = fmap_max,
fmap_inverse_coef = fmap_inverse_coef,
transparent = transparent,
attn_res_layers = attn_res_layers,
disc_output_size = disc_output_size
)
self.ema_updater = EMA(0.995)
self.GE = Generator(**G_kwargs)
set_requires_grad(self.GE, False)
if optimizer == "adam":
self.G_opt = Adam(self.G.parameters(), lr = lr, betas=(0.5, 0.9))
self.D_opt = Adam(self.D.parameters(), lr = lr * ttur_mult, betas=(0.5, 0.9))
elif optimizer == "adabelief":
self.G_opt = AdaBelief(self.G.parameters(), lr = lr, betas=(0.5, 0.9))
self.D_opt = AdaBelief(self.D.parameters(), lr = lr * ttur_mult, betas=(0.5, 0.9))
else:
assert False, "No valid optimizer is given"
self.apply(self._init_weights)
self.reset_parameter_averaging()
self.cuda(rank)
self.D_aug = AugWrapper(self.D, image_size)
def _init_weights(self, m):
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def EMA(self):
def update_moving_average(ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
for current_buffer, ma_buffer in zip(current_model.buffers(), ma_model.buffers()):
new_buffer_value = self.ema_updater.update_average(ma_buffer, current_buffer)
ma_buffer.copy_(new_buffer_value)
update_moving_average(self.GE, self.G)
def reset_parameter_averaging(self):
self.GE.load_state_dict(self.G.state_dict())
def forward(self, x):
raise NotImplemented
| [
"[email protected]"
] | |
ce047380b982dcab9cf772b0ee014ca21ac67c17 | 774353c913eb170ec15ca881cd2bae43121b99e1 | /58135918-give-grayscale-image-color/give_grayscale_image_color.py | d81a90bd165f1d11361546301466835bf984d0f2 | [
"MIT"
] | permissive | nathancy/stackoverflow | b83bdca4f44fd523259b551301a7371e03fb8493 | ed5a00319ad3a2c7631825e17963c392aee5a103 | refs/heads/master | 2022-05-19T15:09:37.883623 | 2022-05-14T20:21:19 | 2022-05-14T20:21:19 | 175,527,064 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | import cv2
import numpy as np
before = cv2.imread('2.png')
b, g, r = cv2.split(before)
np.multiply(b, 1.5, out=b, casting="unsafe")
np.multiply(g, .75, out=g, casting="unsafe")
np.multiply(r, 1.25, out=r, casting="unsafe")
after = cv2.merge([b, g, r])
cv2.imshow('before', before)
cv2.imshow('after', after)
cv2.waitKey()
| [
"[email protected]"
] | |
026fccd199ba8df764511f787fe16bf0d38b5c75 | 9068f861ce5ee8908866b0da94dc375fbec1bfa3 | /manage.py | 93e1e652793e954f073805e16f5b57d0a01cd0e8 | [] | no_license | nanfengpo/flask_flasky7 | b1e421e4a64284aabf42a1f6c559863068a13e45 | 170a19b3bd0bdb59a7ddaee62f49a74fa19c1657 | refs/heads/master | 2021-01-02T08:11:27.315798 | 2017-08-03T09:13:35 | 2017-08-03T09:13:35 | 98,955,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | #!/usr/bin/env python
from app import create_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app('default')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() | [
"[email protected]"
] | |
5aa485fdb364c75c6362321c665b13828904d5fc | 328afd873e3e4fe213c0fb4ce6621cb1a450f33d | /GeeksforGeeks/insertionsort.py | 0bbde62db8c7da07f2e9b7c847cde885a6d3d3e5 | [] | no_license | TorpidCoder/Python | 810371d1bf33c137c025344b8d736044bea0e9f5 | 9c46e1de1a2926e872eee570e6d49f07dd533956 | refs/heads/master | 2021-07-04T08:21:43.950665 | 2020-08-19T18:14:09 | 2020-08-19T18:14:09 | 148,430,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | def insertion(arr):
for vals in arr:
values = arr.index(vals)
while(values>0):
if(arr[values-1]>arr[values]):
arr[values-1] , arr[values] = arr[values] , arr[values-1]
else:
break
values-=1
return arr
arr = [1,12,3,14,6,7]
print(insertion(arr))
| [
"[email protected]"
] | |
9214fcf06416f698b932ed0b9d0c76ea4f1e7d85 | 64643d3f814c2eb30dd2f86850980f48ac1486ba | /spektral/layers/convolutional/gin.py | 79a988972dc60a5c0d95f8e8b989a4d19b6a29c7 | [
"MIT"
] | permissive | Prashant118/spektral | 275e550baf08a2bd5354e8fefdf60a6a686d0af0 | dbf769b0ad47318f354a2de40a87ed8893d9b2fe | refs/heads/master | 2022-04-23T10:37:58.084038 | 2020-04-21T10:05:53 | 2020-04-21T10:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,466 | py | import tensorflow as tf
from tensorflow.keras import activations, backend as K
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from spektral.layers import ops
from spektral.layers.convolutional.gcn import GraphConv
class GINConv(GraphConv):
r"""
A Graph Isomorphism Network (GIN) as presented by
[Xu et al. (2018)](https://arxiv.org/abs/1810.00826).
**Mode**: single.
**This layer expects sparse inputs.**
This layer computes for each node \(i\):
$$
\Z_i = \textrm{MLP}\big( (1 + \epsilon) \cdot \X_i + \sum\limits_{j \in \mathcal{N}(i)} \X_j \big)
$$
where \(\textrm{MLP}\) is a multi-layer perceptron.
**Input**
- Node features of shape `([batch], N, F)`;
- Binary adjacency matrix of shape `([batch], N, N)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `epsilon`: unnamed parameter, see
[Xu et al. (2018)](https://arxiv.org/abs/1810.00826), and the equation above.
This parameter can be learned by setting `epsilon=None`, or it can be set
to a constant value, which is what happens by default (0). In practice, it
is safe to leave it to 0.
- `mlp_hidden`: list of integers, number of hidden units for each hidden
layer in the MLP (if None, the MLP has only the output layer);
- `mlp_activation`: activation for the MLP layers;
- `activation`: activation function to use;
- `use_bias`: whether to add a bias to the linear transformation;
- `kernel_initializer`: initializer for the kernel matrix;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the kernel matrix;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the kernel matrix;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(self,
channels,
epsilon=None,
mlp_hidden=None,
mlp_activation='relu',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(channels,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.epsilon = epsilon
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = activations.get(mlp_activation)
def build(self, input_shape):
assert len(input_shape) >= 2
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint
)
mlp_layers = []
for i, channels in enumerate(self.mlp_hidden):
mlp_layers.append(Dense(channels, self.mlp_activation, **layer_kwargs))
mlp_layers.append(
Dense(self.channels, self.activation, **layer_kwargs)
)
self.mlp = Sequential(mlp_layers)
# Parameter for propagating features
if self.epsilon is None:
self.eps = self.add_weight(shape=(1,),
initializer=self.bias_initializer,
name='eps')
else:
# If epsilon is given, keep it constant
self.eps = K.constant(self.epsilon)
self.built = True
def call(self, inputs):
features = inputs[0]
fltr = inputs[1]
# Enforce sparse representation
if not K.is_sparse(fltr):
fltr = ops.dense_to_sparse(fltr)
# Propagation
targets = fltr.indices[:, -2]
sources = fltr.indices[:, -1]
messages = tf.gather(features, sources)
aggregated = ops.scatter_sum(targets, messages, N=tf.shape(features)[0])
hidden = (1.0 + self.eps) * features + aggregated
# MLP
output = self.mlp(hidden)
return output
def get_config(self):
config = {
'epsilon': self.epsilon,
'mlp_hidden': self.mlp_hidden,
'mlp_activation': self.mlp_activation
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@staticmethod
def preprocess(A):
return A
| [
"[email protected]"
] | |
16b09f8ce664b418c75168ef19854b8ba981583b | 6ef3fc3ffa5f33e6403cb7cb0c30a35623a52d0d | /samples/snippets/product_search/import_product_sets.py | e2937509d632f68848329098d09d96a678ee704e | [
"Apache-2.0"
] | permissive | vam-google/python-vision | 61405506e3992ab89e6a454e4dda9b05fe2571f2 | 09e969fa30514d8a6bb95b576c1a2ae2c1e11d54 | refs/heads/master | 2022-08-15T08:40:35.999002 | 2022-07-18T16:04:35 | 2022-07-18T16:04:35 | 254,789,106 | 0 | 0 | Apache-2.0 | 2020-04-11T03:59:02 | 2020-04-11T03:59:01 | null | UTF-8 | Python | false | false | 3,472 | py | #!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform import product sets operations
on Product set in Cloud Vision Product Search.
For more information, see the tutorial page at
https://cloud.google.com/vision/product-search/docs/
"""
import argparse
# [START vision_product_search_tutorial_import]
from google.cloud import vision
# [END vision_product_search_tutorial_import]
# [START vision_product_search_import_product_images]
def import_product_sets(project_id, location, gcs_uri):
"""Import images of different products in the product set.
Args:
project_id: Id of the project.
location: A compute region name.
gcs_uri: Google Cloud Storage URI.
Target files must be in Product Search CSV format.
"""
client = vision.ProductSearchClient()
# A resource that represents Google Cloud Platform location.
location_path = f"projects/{project_id}/locations/{location}"
# Set the input configuration along with Google Cloud Storage URI
gcs_source = vision.ImportProductSetsGcsSource(
csv_file_uri=gcs_uri)
input_config = vision.ImportProductSetsInputConfig(
gcs_source=gcs_source)
# Import the product sets from the input URI.
response = client.import_product_sets(
parent=location_path, input_config=input_config)
print('Processing operation name: {}'.format(response.operation.name))
# synchronous check of operation status
result = response.result()
print('Processing done.')
for i, status in enumerate(result.statuses):
print('Status of processing line {} of the csv: {}'.format(
i, status))
# Check the status of reference image
# `0` is the code for OK in google.rpc.Code.
if status.code == 0:
reference_image = result.reference_images[i]
print(reference_image)
else:
print('Status code not OK: {}'.format(status.message))
# [END vision_product_search_import_product_images]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
parser.add_argument(
'--project_id',
help='Project id. Required',
required=True)
parser.add_argument(
'--location',
help='Compute region name',
default='us-west1')
import_product_sets_parser = subparsers.add_parser(
'import_product_sets', help=import_product_sets.__doc__)
import_product_sets_parser.add_argument('gcs_uri')
args = parser.parse_args()
if args.command == 'import_product_sets':
import_product_sets(args.project_id, args.location, args.gcs_uri)
| [
"[email protected]"
] | |
84f9a4867be76726bae44972eac88cfb0c3d2da4 | e61e0558b459b9880b3bc103a0c4035c1fc52be5 | /azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/error_response.py | a522682d06949f555050322c4624ccb2bc321960 | [
"MIT"
] | permissive | OnlyAGhost/azure-sdk-for-python | 67f713702fe573d14dde3590ca634a4a36130721 | 6bbab4181bbabf5db1c278dda870598acc9f0021 | refs/heads/master | 2021-05-13T13:55:53.118773 | 2018-01-05T02:17:19 | 2018-01-05T02:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""ErrorResponse.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
:param inner_error:
:type inner_error:
~azure.cognitiveservices.language.textanalytics.models.InternalError
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'inner_error': {'key': 'innerError', 'type': 'InternalError'},
}
def __init__(self, code=None, message=None, target=None, inner_error=None):
self.code = code
self.message = message
self.target = target
self.inner_error = inner_error
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
| [
"[email protected]"
] | |
cc6e729c10f0317cca13583dd97f7ab225bbe522 | c6760258b3ad3dd912f0842b8ae03cbea188a8c4 | /fsleyes/gl/gl21/glmip_funcs.py | eec134d2181a654a6a1d2bbcdeaf389801c27998 | [
"BSD-3-Clause",
"CC-BY-3.0",
"Apache-2.0"
] | permissive | sanjayankur31/fsleyes | aa822f627cde38ec766180fb591c9af7d18d2126 | 46ccb4fe2b2346eb57576247f49714032b61307a | refs/heads/master | 2020-04-09T08:41:18.380424 | 2018-12-03T11:44:51 | 2018-12-03T11:44:51 | 160,204,259 | 1 | 0 | null | 2018-12-03T14:31:31 | 2018-12-03T14:31:31 | null | UTF-8 | Python | false | false | 3,501 | py | #!/usr/bin/env python
#
# glmip_funcs.py - Functions used by GLMIP for rendering in an OpenGL 2.1
# environment.
#
# Author: Paul McCarthy <[email protected]>
#
"""This module contains functions used by the :class:`.GLMIP` class for
rendering in an OpenGL 2.1 environment.
"""
import numpy as np
import fsl.utils.transform as transform
import fsleyes.gl.shaders as shaders
from . import glvolume_funcs
def init(self):
"""Initialise the shader programs. """
self.shader = None
compileShaders( self)
updateShaderState(self)
def destroy(self):
"""Destroy the shader programs. """
self.shader.destroy()
self.shader = None
def compileShaders(self):
"""Compiles vertex and fragment shaders. """
if self.shader is not None:
self.shader.destroy()
vertSrc = shaders.getVertexShader( 'glvolume')
fragSrc = shaders.getFragmentShader('glmip')
self.shader = shaders.GLSLShader(vertSrc, fragSrc)
def updateShaderState(self):
"""Updates the vertex/fragment shader state based on the current
state of the :class:`.MIPOpts` instance.
"""
if not self.ready():
return
opts = self.opts
shader = self.shader
vmin, vmax = self.overlay.dataRange
# Convert clipping values from voxel value
# range totexture value range (0.0 - 1.0).
imgXform = self.imageTexture.invVoxValXform
clipLow = opts.clippingRange[0] * imgXform[0, 0] + imgXform[0, 3]
clipHigh = opts.clippingRange[1] * imgXform[0, 0] + imgXform[0, 3]
textureMin = vmin * imgXform[0, 0] + imgXform[0, 3]
textureMax = vmax * imgXform[0, 0] + imgXform[0, 3]
imageShape = self.image.shape[:3]
# Create a single transformation matrix
# which transforms from image texture values
# to voxel values, and scales said voxel
# values to colour map texture coordinates.
img2CmapXform = transform.concat(
self.cmapTexture.getCoordinateTransform(),
self.imageTexture.voxValXform)
# sqrt(3) so the window is 100%
# along the diagonal of a cube
window = np.sqrt(3) * opts.window / 100.0
shader.load()
changed = False
changed |= shader.set('imageTexture', 0)
changed |= shader.set('cmapTexture', 1)
changed |= shader.set('textureMin', textureMin)
changed |= shader.set('textureMax', textureMax)
changed |= shader.set('img2CmapXform', img2CmapXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('useSpline', opts.interpolation == 'spline')
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('invertClip', opts.invertClipping)
changed |= shader.set('window', window)
changed |= shader.set('useMinimum', opts.minimum)
changed |= shader.set('useAbsolute', opts.absolute)
shader.unload()
return changed
def draw2D(self, zpos, axes, xform=None, bbox=None):
"""Draws a 2D slice at the given ``zpos``. Uses the
:func:`.glvolume_funcs.draw2D` function.
"""
self.shader.load()
viewmat = self.canvas.viewMatrix
cdir, rayStep = self.opts.calculateRayCastSettings(viewmat)
self.shader.set('cameraDir', cdir)
self.shader.set('rayStep', rayStep)
glvolume_funcs.draw2D(self, zpos, axes, xform, bbox)
self.shader.unloadAtts()
self.shader.unload()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.