hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e048929c57d8279d48bbfdb7b6430abd2459ceab | 243 | py | Python | Others/code_festival/code-festival-2015-final-open/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/code_festival/code-festival-2015-final-open/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/code_festival/code-festival-2015-final-open/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
s, t, u = map(str, input().split())
if len(s) == 5 and len(t) == 7 and len(u) == 5:
print('valid')
else:
print('invalid')
if __name__ == '__main__':
main()
| 16.2 | 52 | 0.440329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.205761 |
e048b527992db2f1543fe57b684fc1f640173519 | 328 | py | Python | python_Project/Day_16-20/test_2.py | Zzz-ww/Python-prac | c97f2c16b74a2c1df117f377a072811cc596f98b | [
"MIT"
] | null | null | null | python_Project/Day_16-20/test_2.py | Zzz-ww/Python-prac | c97f2c16b74a2c1df117f377a072811cc596f98b | [
"MIT"
] | null | null | null | python_Project/Day_16-20/test_2.py | Zzz-ww/Python-prac | c97f2c16b74a2c1df117f377a072811cc596f98b | [
"MIT"
] | null | null | null | """
嵌套的列表的坑
"""
names = ['关羽', '张飞', '赵云', '马超', '黄忠']
courses = ['语文', '数学', '英语']
# 录入五个学生三门课程的成绩
scores = [[None] * len(courses) for _ in range(len(names))]
for row, name in enumerate(names):
for col, course in enumerate(courses):
scores[row][col] = float(input(f'请输入{name}的{course}的成绩:'))
print(scores) | 25.230769 | 66 | 0.591463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.417874 |
e04a69c5ceb81801a0a97e45ded8c53330ccbc76 | 18,672 | py | Python | asr/dataloaders/am_dataloader.py | Z-yq/audioSamples.github.io | 53c474288f0db1a3acfe40ba57a4cd5f2aecbcd3 | [
"Apache-2.0"
] | 1 | 2022-03-03T02:51:55.000Z | 2022-03-03T02:51:55.000Z | asr/dataloaders/am_dataloader.py | Z-yq/audioSamples.github.io | 53c474288f0db1a3acfe40ba57a4cd5f2aecbcd3 | [
"Apache-2.0"
] | null | null | null | asr/dataloaders/am_dataloader.py | Z-yq/audioSamples.github.io | 53c474288f0db1a3acfe40ba57a4cd5f2aecbcd3 | [
"Apache-2.0"
] | null | null | null | import logging
import random
import numpy as np
import pypinyin
import tensorflow as tf
from augmentations.augments import Augmentation
from utils.speech_featurizers import SpeechFeaturizer
from utils.text_featurizers import TextFeaturizer
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import time
class AM_DataLoader():
def __init__(self, config_dict, training=True):
self.speech_config = config_dict['speech_config']
self.phone_config = config_dict['inp_config']
self.text_config = config_dict['tar_config']
self.running_config=config_dict['running_config']
self.augment_config = config_dict['augments_config']
self.streaming = self.speech_config['streaming']
self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket']
self.batch = config_dict['running_config']['batch_size']
self.speech_featurizer = SpeechFeaturizer(self.speech_config)
self.phone_featurizer = TextFeaturizer(self.phone_config)
self.text_featurizer = TextFeaturizer(self.text_config)
self.make_file_list( training)
self.augment = Augmentation(self.augment_config)
self.init_text_to_vocab()
self.epochs = 1
self.steps = 0
def return_data_types(self):
return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32)
def return_data_shape(self):
return (
tf.TensorShape([self.batch, None, 1]),
tf.TensorShape([self.batch, ]),
tf.TensorShape([self.batch, None]),
tf.TensorShape([self.batch, ]),
tf.TensorShape([self.batch, None]),
)
def get_per_epoch_steps(self):
return len(self.train_list) // self.batch
def eval_per_epoch_steps(self):
return len(self.test_list) // self.batch
def init_text_to_vocab(self):
pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']],
'调小': [['tiáo'], ['xiǎo']],
'调亮': [['tiáo'], ['liàng']],
'调暗': [['tiáo'], ['àn']],
'肖': [['xiāo']],
'英雄传': [['yīng'], ['xióng'], ['zhuàn']],
'新传': [['xīn'], ['zhuàn']],
'外传': [['wài'], ['zhuàn']],
'正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]
})
def text_to_vocab_func(txt):
pins = pypinyin.pinyin(txt)
pins = [i[0] for i in pins]
phones = []
for pin in pins:
if pin in self.phone_featurizer.vocab_array:
phones += [pin]
else:
phones += list(pin)
# print(phones)
return phones
self.text_to_vocab = text_to_vocab_func
def make_file_list(self, training=True):
train_list=self.speech_config['train_list']
test_list=self.speech_config['eval_list']
if training:
with open(train_list, encoding='utf-8') as f:
train_list = f.readlines()
train_list = [i.strip() for i in train_list if i != '']
self.train_list = train_list
np.random.shuffle(self.train_list)
with open(test_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
self.test_list = data
self.train_offset = 0
self.test_offset = 0
logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list)))
else:
with open(test_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
self.test_list = data
self.test_offset = 0
def only_chinese(self, word):
txt = ''
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
txt += ch
else:
continue
return txt
def eval_data_generator(self):
sample = []
speech_features = []
input_length = []
phones = []
phones_length = []
txts = []
max_input = 0
batch = self.batch
for i in range(batch * 10):
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
sample.append(line)
if len(sample) == batch:
break
if self.streaming:
max_input = max_input // self.chunk * self.chunk + self.chunk
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
if self.streaming:
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
max_input = max_input // self.chunk * self.chunk + self.chunk
max_in_len = max_input // self.chunk
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
max_in_len *= chunk_times
input_length = np.clip(input_length, 0, max_in_len)
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]),
padding='post', value=self.phone_featurizer.pad)
txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post',
value=self.text_featurizer.pad)
x = np.array(speech_features, 'float32')
phones = np.array(phones, 'int32')
txts = np.array(txts, 'int32')
input_length = np.array(input_length, 'int32')
phones_length = np.array(phones_length, 'int32')
return x, input_length, phones, phones_length, txts
def check_valid(self, txt, vocab_list):
if len(txt) == 0:
return False
for n in txt:
if n in vocab_list:
pass
else:
return n
return True
def generate(self, train=True):
sample = []
speech_features = []
input_length = []
phones = []
phones_length = []
txts = []
max_input = 0
if train:
batch = self.batch * 3 // 4 if self.augment.available() else self.batch
else:
batch = self.batch
for i in range(batch * 10):
if train:
line = self.train_list[self.train_offset]
self.train_offset += 1
if self.train_offset > len(self.train_list) - 1:
self.train_offset = 0
np.random.shuffle(self.train_list)
self.epochs += 1
else:
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
sample.append(line)
if len(sample) == batch:
break
if train and self.augment.available():
sample = random.sample(sample, self.batch // 4)
for i in sample:
wp, txt = i.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
continue
if len(data) < 400:
logging.info('{} wav too short < 25ms,skip'.format(wp))
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
continue
data = self.augment.process(data)
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
if self.streaming:
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
max_input = max_input // self.chunk * self.chunk + self.chunk
max_in_len = max_input // self.chunk
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
max_in_len *= chunk_times
input_length = np.clip(input_length, 0, max_in_len)
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad)
txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad)
x = np.array(speech_features, 'float32')
phones = np.array(phones, 'int32')
txts = np.array(txts, 'int32')
input_length = np.array(input_length, 'int32')
phones_length = np.array(phones_length, 'int32')
return x, input_length, phones, phones_length,txts
def generator(self, train=True):
while 1:
s=time.time()
x, input_length, phones, phones_length,txts = self.generate(train)
e=time.time()
logging.info('load data cost time: {}'.format(e-s))
if x.shape[0] == 0:
logging.info('load data length zero,continue')
continue
yield x, input_length, phones, phones_length,txts
| 43.322506 | 152 | 0.530366 | 18,375 | 0.980785 | 428 | 0.022845 | 0 | 0 | 0 | 0 | 1,636 | 0.087323 |
e04c1f351c8c0376a0ea90e165d6e346051fee43 | 7,612 | py | Python | migrations/versions/2018_04_20_data_src_refactor.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | null | null | null | migrations/versions/2018_04_20_data_src_refactor.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | null | null | null | migrations/versions/2018_04_20_data_src_refactor.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | null | null | null | """empty message
Revision ID: 2018_04_20_data_src_refactor
Revises: 2018_04_11_add_sandbox_topic
Create Date: 2018-04-20 13:03:32.478880
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy.dialects.postgresql import ARRAY
revision = '2018_04_20_data_src_refactor'
down_revision = '2018_04_11_add_sandbox_topic'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types')
op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True))
op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True))
op.get_bind()
op.execute('''
UPDATE page SET suppression_and_disclosure = suppression_rules
WHERE disclosure_control is null;
''')
op.execute('''
UPDATE page SET suppression_and_disclosure = disclosure_control
WHERE suppression_rules is null;
''')
op.execute('''
UPDATE page SET suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control)
WHERE suppression_rules is not null
AND disclosure_control is not null;
''')
op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey')
op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey')
op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey')
op.drop_column('page', 'secondary_source_1_date_next_update')
op.drop_column('page', 'secondary_source_1_date_updated')
op.drop_column('page', 'secondary_source_1_suppression_rules')
op.drop_column('page', 'secondary_source_1_disclosure_control')
op.drop_column('page', 'secondary_source_2_frequency')
op.drop_column('page', 'secondary_source_2_contact_2_name')
op.drop_column('page', 'secondary_source_2_contact_2_phone')
op.drop_column('page', 'secondary_source_2_url')
op.drop_column('page', 'secondary_source_2_date_next_update')
op.drop_column('page', 'secondary_source_2_contact_1_name')
op.drop_column('page', 'last_update_date')
op.drop_column('page', 'secondary_source_2_contact_1_phone')
op.drop_column('page', 'secondary_source_2_publisher_text')
op.drop_column('page', 'secondary_source_2_disclosure_control')
op.drop_column('page', 'secondary_source_2_type_of_statistic_id')
op.drop_column('page', 'secondary_source_2_suppression_rules')
op.drop_column('page', 'secondary_source_2_frequency_other')
op.drop_column('page', 'secondary_source_2_publisher_id')
op.drop_column('page', 'secondary_source_2_title')
op.drop_column('page', 'secondary_source_2_date')
op.drop_column('page', 'next_update_date')
op.drop_column('page', 'secondary_source_2_date_updated')
op.drop_column('page', 'secondary_source_2_statistic_type')
op.drop_column('page', 'secondary_source_2_frequency_id')
op.drop_column('page', 'secondary_source_2_contact_2_email')
op.drop_column('page', 'secondary_source_2_contact_1_email')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))
op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id'])
op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id'])
op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id'])
op.drop_column('page', 'secondary_source_1_type_of_data')
op.drop_column('page', 'suppression_and_disclosure')
op.drop_column('page', 'note_on_corrections_or_updates')
op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates')
op.drop_column('page', 'secondary_source_1_data_source_purpose')
# ### end Alembic commands ###
| 62.909091 | 152 | 0.759721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,878 | 0.509459 |
e04ce14b43e2b6f0784e3b17efec18f6e25f76d2 | 1,897 | py | Python | lib/core/parse/cmdline.py | vikas-kundu/phonedict | 6795cab0024e792340c43d95552162a985b891f6 | [
"MIT"
] | null | null | null | lib/core/parse/cmdline.py | vikas-kundu/phonedict | 6795cab0024e792340c43d95552162a985b891f6 | [
"MIT"
] | null | null | null | lib/core/parse/cmdline.py | vikas-kundu/phonedict | 6795cab0024e792340c43d95552162a985b891f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# coded by Vikas Kundu https://github.com/vikas-kundu
# -------------------------------------------
import sys
import getopt
import time
import config
from lib.core.parse import banner
from lib.core import util
from lib.core import installer
def options():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install'])
if((len(sys.argv)==9) or (len(sys.argv)==2)):
pass
else:
print("Error! Some parameter is missing please check!")
time.sleep(2)
banner.usage()
sys.exit()
except getopt.GetoptError as err:
print(err)
banner.usage()
sys.exit(2)
for (o, a) in opts:
if(o in('-i','--install')):
if(util.packages_check()==False):
installer.start_install()
else:
print("Packages already installed!")
sys.exit()
elif (o in ('-w', '--wizard')):
config.wizard=True
elif o in ('-h','--help'):
banner.usage()
sys.exit()
elif o in ('-m','--mode'):
config.str_mode=str(a)
elif o in ('-t','--task'):
config.str_task=str(a)
elif o in ('-c','--country'):
config.str_country=str(a.lower().strip('"\''))
elif o in ('-o','--output'):
config.str_output=str(a.strip('"\''))
elif o in ('-n','--number'):
config.str_number=str(a.strip('"\''))
else:
print("Something went wrong with argument parsing!")
time.sleep(2)
banner.usage()
sys.exit()
| 28.313433 | 129 | 0.461255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.245651 |
e04d583757322341dcf56eb5852389f9fd5b2748 | 1,634 | py | Python | mistral/tests/unit/utils/test_utils.py | shubhamdang/mistral | 3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d | [
"Apache-2.0"
] | 205 | 2015-06-21T11:51:47.000Z | 2022-03-05T04:00:04.000Z | mistral/tests/unit/utils/test_utils.py | shubhamdang/mistral | 3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d | [
"Apache-2.0"
] | 8 | 2015-06-23T14:47:58.000Z | 2021-01-28T06:06:44.000Z | mistral/tests/unit/utils/test_utils.py | shubhamdang/mistral | 3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d | [
"Apache-2.0"
] | 110 | 2015-06-14T03:34:38.000Z | 2021-11-11T12:12:56.000Z | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral import exceptions as exc
from mistral.tests.unit import base
from mistral.utils import ssh_utils
from mistral_lib import utils
class UtilsTest(base.BaseTest):
def test_itersubclasses(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(C):
pass
self.assertEqual([B, C, D], list(utils.iter_subclasses(A)))
def test_paramiko_to_private_key(self):
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"../dir"
)
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"..\\dir"
)
self.assertIsNone(
ssh_utils._to_paramiko_private_key(private_key_filename=None,
password='pass')
)
| 29.178571 | 77 | 0.632191 | 800 | 0.489596 | 0 | 0 | 0 | 0 | 0 | 0 | 698 | 0.427173 |
e04da5eb604fc61099ea52110ba3398380247444 | 2,660 | py | Python | shoutcast_api/shoutcast_request.py | scls19fr/shoutcast_api | 89a9e826b82411ae5f24ea28e1b1cb22eaaa0890 | [
"MIT"
] | 6 | 2020-03-03T06:07:31.000Z | 2021-11-24T19:20:12.000Z | shoutcast_api/shoutcast_request.py | scls19fr/shoutcast_api | 89a9e826b82411ae5f24ea28e1b1cb22eaaa0890 | [
"MIT"
] | 6 | 2020-11-17T20:30:30.000Z | 2020-11-22T04:09:36.000Z | shoutcast_api/shoutcast_request.py | scls19fr/shoutcast_api | 89a9e826b82411ae5f24ea28e1b1cb22eaaa0890 | [
"MIT"
] | 1 | 2020-11-17T20:11:38.000Z | 2020-11-17T20:11:38.000Z | import xmltodict
import json
from .models import Tunein
from .utils import _init_session
from .Exceptions import APIException
base_url = 'http://api.shoutcast.com'
tunein_url = 'http://yp.shoutcast.com/{base}?id={id}'
tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]
def call_api_xml(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
response_as_dict = xmltodict.parse(response.content)
api_response = response_as_dict.get('response')
if api_response:
api_status_code = int(api_response.get('statusCode'))
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText')
)
raise APIException(message, code=api_status_code)
return response_as_dict
raise APIException(response.content, code=response.status_code)
def call_api_json(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
json_response = json.loads(response.content.decode('utf-8'))
api_response = json_response.get('response')
api_status_code = int(api_response.get('statusCode'))
if api_status_code != 200:
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText', '')
)
raise APIException(message, code=api_status_code)
return json_response.get('response')['data']
raise APIException(response.reason, code=response.status_code)
def call_api_tunein(station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=tuneins[2], id=station_id)
response = session.get(url)
if response.status_code == 200:
api_response = xmltodict.parse(response.content.decode('utf-8'))
return api_response
raise APIException(response.reason, code=response.status_code)
def call_api_tunein_any(base: Tunein, station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=base, id=station_id)
response = session.get(url)
if response.status_code == 200:
return response.content.decode('utf-8')
raise APIException(response.reason, code=response.status_code)
| 38.550725 | 119 | 0.697368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.13985 |
e04ec585b764ff6cb1ec40221ed614d384e735f8 | 581 | py | Python | django_app_permissions/management/commands/resolve_app_groups.py | amp89/django-app-permissions | 11f576d2118f5b73fdbefa0675acc3374a5a9749 | [
"MIT"
] | 2 | 2020-09-04T04:12:30.000Z | 2020-10-20T00:12:01.000Z | django_app_permissions/management/commands/resolve_app_groups.py | amp89/django-app-permissions | 11f576d2118f5b73fdbefa0675acc3374a5a9749 | [
"MIT"
] | 4 | 2020-09-06T22:29:18.000Z | 2020-09-11T01:19:50.000Z | django_app_permissions/management/commands/resolve_app_groups.py | amp89/django-app-permissions | 11f576d2118f5b73fdbefa0675acc3374a5a9749 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand, no_translations
from django.contrib.auth.models import Group
from django.conf import settings
import sys
class Command(BaseCommand):
def handle(self, *args, **options):
sys.stdout.write("\nResolving app groups")
app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS]
for app_name in app_list:
created = Group.objects.get_or_create(name=app_name)
sys.stdout.write(f"\n{app_name}, new={created}")
sys.stdout.write("\n") | 32.277778 | 95 | 0.693632 | 419 | 0.72117 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.099828 |
e04f5b24d6bd2e775a7ec943b8b4d08de4e402bf | 34,343 | py | Python | swift/common/db.py | sunzz679/swift-2.4.0--source-read | 64355268da5265440f5f7e8d280dd8cd4c2cf2a2 | [
"Apache-2.0"
] | null | null | null | swift/common/db.py | sunzz679/swift-2.4.0--source-read | 64355268da5265440f5f7e8d280dd8cd4c2cf2a2 | [
"Apache-2.0"
] | null | null | null | swift/common/db.py | sunzz679/swift-2.4.0--source-read | 64355268da5265440f5f7e8d280dd8cd4c2cf2a2 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database code for Swift """
from contextlib import contextmanager, closing
import hashlib
import logging
import os
from uuid import uuid4
import sys
import time
import errno
import six.moves.cPickle as pickle
from swift import gettext_ as _
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE
from swift.common.utils import json, Timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPBadRequest
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = False
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max number of pending entries
PENDING_CAP = 131072
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in args]
def utf8encodekeys(metadata):
uni_keys = [k for k in metadata if isinstance(k, unicode)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = sv
def _db_timeout(timeout, db_file, call):
with LockTimeout(timeout, db_file):
retry_wait = 0.001
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(retry_wait)
retry_wait = min(retry_wait * 2, 0.05)
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class DatabaseAlreadyExists(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path):
self.path = path
def __str__(self):
return 'DB %s already exists' % self.path
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
def __init__(self, database, timeout=None, *args, **kwargs):
if timeout is None:
timeout = BROKER_TIMEOUT
self.timeout = timeout
self.db_file = database
super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)
def cursor(self, cls=None):
if cls is None:
cls = GreenDBCursor
return sqlite3.Connection.cursor(self, cls)
def commit(self):
return _db_timeout(
self.timeout, self.db_file,
lambda: sqlite3.Connection.commit(self))
class GreenDBCursor(sqlite3.Cursor):
"""SQLite Cursor handler that plays well with eventlet."""
def __init__(self, *args, **kwargs):
self.timeout = args[0].timeout
self.db_file = args[0].db_file
super(GreenDBCursor, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
return _db_timeout(
self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(
self, *args, **kwargs))
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
def get_db_connection(path, timeout=30, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class DatabaseBroker(object):
"""Encapsulates working with a database."""
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False):
"""Encapsulates working with a database."""
self.conn = None
self.db_file = db_file
self.pending_file = self.db_file + '.pending'
self.pending_timeout = pending_timeout or 10
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
def __str__(self):
"""
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""
return self.db_file
def initialize(self, put_timestamp=None, storage_policy_index=None):
"""
Create the DB
The storage_policy_index is passed through to the subclass's
``_initialize`` method. It is ignored by ``AccountBroker``.
:param put_timestamp: internalized timestamp of initial PUT request
:param storage_policy_index: only required for containers
"""
if self.db_file == ':memory:':
tmp_db_file = None
conn = get_db_connection(self.db_file, self.timeout)
else:
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = Timestamp(0).internal
self._initialize(conn, put_timestamp,
storage_policy_index=storage_policy_index)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseAlreadyExists(self.db_file)
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: internalized delete timestamp
"""
# first, clear the metadata
cleared_meta = {}
for k in self.metadata:
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
self._delete_db(conn, timestamp)
conn.commit()
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
elif 'disk I/O error' in str(exc_value):
exc_hint = 'disk error while accessing'
else:
raise exc_type, exc_value, exc_traceback
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except Exception:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
self.conn = get_db_connection(self.db_file, self.timeout)
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('BEGIN IMMEDIATE')
try:
yield True
except (Exception, Timeout):
pass
try:
conn.execute('ROLLBACK')
conn.isolation_level = orig_isolation_level
self.conn = conn
except (Exception, Timeout):
logging.exception(
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
"""
Re-id the database. This should be called after an rsync.
:param remote_id: the ID of the remote database being rsynced in
"""
with self.get() as conn:
row = conn.execute('''
UPDATE %s_stat SET id=?
''' % self.db_type, (str(uuid4()),))
row = conn.execute('''
SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1
''' % self.db_contains_type).fetchone()
sync_point = row['ROWID'] if row else -1
conn.execute('''
INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, remote_id))
self._newid(conn)
conn.commit()
def _newid(self, conn):
# Override for additional work when receiving an rsynced db.
pass
def _is_deleted(self, conn):
"""
Check if the database is considered deleted
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
raise NotImplementedError()
def is_deleted(self):
"""
Check if the DB is considered to be deleted.
:returns: True if the DB is considered to be deleted, False otherwise
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
self._commit_puts_stale_ok()
with self.get() as conn:
return self._is_deleted(conn)
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
"""
Used in replication to handle updating timestamps.
:param created_at: create timestamp
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
"""
with self.get() as conn:
old_status = self._is_deleted(conn)
conn.execute('''
UPDATE %s_stat SET created_at=MIN(?, created_at),
put_timestamp=MAX(?, put_timestamp),
delete_timestamp=MAX(?, delete_timestamp)
''' % self.db_type, (created_at, put_timestamp, delete_timestamp))
if old_status != self._is_deleted(conn):
timestamp = Timestamp(time.time())
self._update_status_changed_at(conn, timestamp.internal)
conn.commit()
def get_items_since(self, start, count):
"""
Get a list of objects in the database between start and end.
:param start: start ROWID
:param count: number to get
:returns: list of objects between start and end
"""
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
''' % self.db_contains_type, (start, count))
curs.row_factory = dict_factory
return [r for r in curs]
def get_sync(self, id, incoming=True):
"""
Gets the most recent sync point for a server from the sync table.
:param id: remote ID to get the sync_point for
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: the sync point, or -1 if the id doesn't exist.
"""
with self.get() as conn:
row = conn.execute(
"SELECT sync_point FROM %s_sync WHERE remote_id=?"
% ('incoming' if incoming else 'outgoing'), (id,)).fetchone()
if not row:
return -1
return row['sync_point']
def get_syncs(self, incoming=True):
"""
Get a serialized copy of the sync table.
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: list of {'remote_id', 'sync_point'}
"""
with self.get() as conn:
curs = conn.execute('''
SELECT remote_id, sync_point FROM %s_sync
''' % ('incoming' if incoming else 'outgoing'))
result = []
for row in curs:
result.append({'remote_id': row[0], 'sync_point': row[1]})
return result
def get_max_row(self):
query = '''
SELECT SQLITE_SEQUENCE.seq
FROM SQLITE_SEQUENCE
WHERE SQLITE_SEQUENCE.name == '%s'
LIMIT 1
''' % (self.db_contains_type)
with self.get() as conn:
row = conn.execute(query).fetchone()
return row[0] if row else -1
def get_replication_info(self):
"""
Get information about the DB required for replication.
:returns: dict containing keys from get_info plus max_row and metadata
Note:: get_info's <db_contains_type>_count is translated to just
"count" and metadata is the raw string.
"""
info = self.get_info()
info['count'] = info.pop('%s_count' % self.db_contains_type)
info['metadata'] = self.get_raw_metadata()
info['max_row'] = self.get_max_row()
return info
def get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('SELECT * from %s_stat' % self.db_type)
curs.row_factory = dict_factory
return curs.fetchone()
#在数据库中添加一条记录
def put_record(self, record):
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
#对数据库父目录加锁
with lock_parent_directory(self.pending_file, self.pending_timeout):
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
#将对象记录写入数据库文件中
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
self.make_tuple_for_pickle(record),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def _commit_puts(self, item_list=None):
"""
Scan for .pending files and commit the found records by feeding them
to merge_items(). Assume that lock_parent_directory has already been
called.
:param item_list: A list of items to commit in addition to .pending
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
self._commit_puts_load(item_list, entry)
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _commit_puts_stale_ok(self):
"""
Catch failures of _commit_puts() if broker is intended for
reading of stats, and thus does not care for pending updates.
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
try:
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
def _commit_puts_load(self, item_list, entry):
"""
Unmarshall the :param:entry and append it to :param:item_list.
This is implemented by a particular broker to be compatible
with its :func:`merge_items`.
"""
raise NotImplementedError
def make_tuple_for_pickle(self, record):
"""
Turn this db record dict into the format this service uses for
pending pickles.
"""
raise NotImplementedError
def merge_syncs(self, sync_points, incoming=True):
"""
Merge a list of sync points with the incoming sync table.
:param sync_points: list of sync points where a sync point is a dict of
{'sync_point', 'remote_id'}
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
"""
with self.get() as conn:
for rec in sync_points:
try:
conn.execute('''
INSERT INTO %s_sync (sync_point, remote_id)
VALUES (?, ?)
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE %s_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if not DB_PREALLOCATION or self.db_file == ':memory:':
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
def get_raw_metadata(self):
with self.get() as conn:
try:
metadata = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
metadata = ''
return metadata
@property
def metadata(self):
"""
Returns the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value.
"""
metadata = self.get_raw_metadata()
if metadata:
metadata = json.loads(metadata)
utf8encodekeys(metadata)
else:
metadata = {}
return metadata
@staticmethod
def validate_metadata(metadata):
"""
Validates that metadata_falls within acceptable limits.
:param metadata: to be validated
:raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE
is exceeded
"""
meta_count = 0
meta_size = 0
for key, (value, timestamp) in metadata.items():
key = key.lower()
if value != '' and (key.startswith('x-account-meta') or
key.startswith('x-container-meta')):
prefix = 'x-account-meta-'
if key.startswith('x-container-meta-'):
prefix = 'x-container-meta-'
key = key[len(prefix):]
meta_count = meta_count + 1
meta_size = meta_size + len(key) + len(value)
if meta_count > MAX_META_COUNT:
raise HTTPBadRequest('Too many metadata items; max %d'
% MAX_META_COUNT)
if meta_size > MAX_META_OVERALL_SIZE:
raise HTTPBadRequest('Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE)
def update_metadata(self, metadata_updates, validate_metadata=False):
"""
Updates the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value. Key/values will only be overwritten if
the timestamp is newer. To delete a key, set its value to ('',
timestamp). These empty keys will eventually be removed by
:func:`reclaim`
"""
#从数据库中查询元数据信息,生成字典格式,保存到old_metadata
old_metadata = self.metadata
#如果新添加的元数据是原来元数据的子集
if set(metadata_updates).issubset(set(old_metadata)):
#查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据
for key, (value, timestamp) in metadata_updates.items():
if timestamp > old_metadata[key][1]:
break
else:
#所有的元数据均过期,则不作任何处理
return
#到这里,就是存在需要更新的元数据
with self.get() as conn:
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
md = json.loads(md) if md else {}
utf8encodekeys(md)
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
conn.execute("""
ALTER TABLE %s_stat
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
md = {}
#遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据
for key, value_timestamp in metadata_updates.items():
value, timestamp = value_timestamp
if key not in md or timestamp > md[key][1]:
md[key] = value_timestamp
if validate_metadata:
DatabaseBroker.validate_metadata(md)
conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,
(json.dumps(md),))
conn.commit()
def reclaim(self, age_timestamp, sync_timestamp):
"""
Delete rows from the db_contains_type table that are marked deleted
and whose created_at timestamp is < age_timestamp. Also deletes rows
from incoming_sync and outgoing_sync where the updated_at timestamp is
< sync_timestamp.
In addition, this calls the DatabaseBroker's :func:`_reclaim` method.
:param age_timestamp: max created_at timestamp of object rows to delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
if self.db_file != ':memory:' and os.path.exists(self.pending_file):
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
with self.get() as conn:
conn.execute('''
DELETE FROM %s WHERE deleted = 1 AND %s < ?
''' % (self.db_contains_type, self.db_reclaim_timestamp),
(age_timestamp,))
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError as err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
DatabaseBroker._reclaim(self, conn, age_timestamp)
conn.commit()
def _reclaim(self, conn, timestamp):
"""
Removes any empty metadata values older than the timestamp using the
given database connection. This function will not call commit on the
conn, but will instead return True if the database needs committing.
This function was created as a worker to limit transactions and commits
from other related functions.
:param conn: Database connection to reclaim metadata within.
:param timestamp: Empty metadata items last updated before this
timestamp will be removed.
:returns: True if conn.commit() should be called
"""
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
if md:
md = json.loads(md)
keys_to_delete = []
for key, (value, value_timestamp) in md.items():
if value == '' and value_timestamp < timestamp:
keys_to_delete.append(key)
if keys_to_delete:
for key in keys_to_delete:
del md[key]
conn.execute('UPDATE %s_stat SET metadata = ?' %
self.db_type, (json.dumps(md),))
return True
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
return False
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: internalized put timestamp
"""
with self.get() as conn:
conn.execute(
'UPDATE %s_stat SET put_timestamp = ?'
' WHERE put_timestamp < ?' % self.db_type,
(timestamp, timestamp))
conn.commit()
def update_status_changed_at(self, timestamp):
"""
Update the status_changed_at field in the stat table. Only
modifies status_changed_at if the timestamp is greater than the
current status_changed_at timestamp.
:param timestamp: internalized timestamp
"""
with self.get() as conn:
self._update_status_changed_at(conn, timestamp)
conn.commit()
def _update_status_changed_at(self, conn, timestamp):
conn.execute(
'UPDATE %s_stat SET status_changed_at = ?'
' WHERE status_changed_at < ?' % self.db_type,
(timestamp, timestamp))
| 38.032115 | 79 | 0.573945 | 30,121 | 0.868466 | 2,828 | 0.081539 | 3,532 | 0.101837 | 0 | 0 | 14,034 | 0.404636 |
e0530a4b979886c9eec477ba716b7cb1d54f44a5 | 12,101 | py | Python | xdl/utils/prop_limits.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | xdl/utils/prop_limits.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | xdl/utils/prop_limits.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | """Prop limits are used to validate the input given to xdl elements. For
example, a volume property should be a positive number, optionally followed by
volume units. The prop limit is used to check that input supplied is valid for
that property.
"""
import re
from typing import List, Optional
class PropLimit(object):
"""Convenience class for storing prop limit. A prop limit is essentially a
regex for validating the input to a given prop. For example, checking
appropriate units are used or a value is within a certain range.
Either ``regex`` or ``enum`` must be given when instantiating. If ``enum``
is given it will override whatever is given for ``regex`` and ``hint``.
``hint`` and ``default`` are both optional, but recommended, at least when
using ``regex`` not ``enum``.
Arguments:
regex (str): Regex pattern that should match with valid values and not
match with invalid values.
hint (str): Useful hint for what valid value should look like, e.g.
"Volume should be a number followed by volume units, e.g. '5 mL'."
default (str): Default valid value. Should use standard units of the
quantity involved, e.g. for volume, '0 mL'.
enum (List[str]): List of values that the prop can take. This is used
to automatically generate a regex from the list of allowed values.
"""
def __init__(
self,
regex: Optional[str] = None,
hint: Optional[str] = '',
default: Optional[str] = '',
enum: Optional[List[str]] = [],
):
if not regex and not enum:
raise ValueError(
'Either `regex` or `enum` argument must be given.')
self.default = default
# If enum given generate regex from this
self.enum = enum
if enum:
if not regex:
self.regex = self.generate_enum_regex()
else:
self.regex = regex
if not hint:
self.hint = self.generate_enum_hint()
else:
self.hint = hint
# Otherwise just set regex as attribute
else:
self.regex = regex
self.hint = hint
def validate(self, value: str) -> bool:
"""Validate given value against prop limit regex.
Args:
value (str): Value to validate against prop limit.
Returns:
bool: True if the value matches the prop limit, otherwise False.
"""
return re.match(self.regex, value) is not None
def generate_enum_regex(self) -> str:
"""Generate regex from :py:attr:`enum`. Regex will match any of the
items in :py:attr:`enum`.
Returns:
str: Regex that will match any of the strings in the :py:attr:`enum`
list.
"""
regex = r'('
for item in self.enum:
regex += item + r'|'
regex = regex[:-1] + r')'
return regex
def generate_enum_hint(self) -> str:
"""Generate hint from :py:attr:`enum`. Hint will list all items in
:py:attr:`enum`.
Returns:
str: Hint listing all items in :py:attr:`enum`.
"""
s = 'Expecting one of '
for item in self.enum[:-1]:
s += f'"{item}", '
s = s[:-2] + f' or "{self.enum[-1]}".'
return s
##################
# Regex patterns #
##################
#: Pattern to match a positive or negative float,
#: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would all be matched by this
#: pattern.
FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)'
#: Pattern to match a positive float,
#: e.g. '0', 1', '10.3', '0.0' would all be matched by this pattern, but not
#: '-10.3' or '-1'.
POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)'
#: Pattern to match boolean strings, specifically matching 'true' and 'false'
#: case insensitvely.
BOOL_PATTERN: str = r'(false|False|true|True)'
#: Pattern to match all accepted volumes units case insensitvely, or empty string.
VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?'
#: Pattern to match all accepted mass units, or empty string.
MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?'
#: Pattern to match all accepted temperature units, or empty string.
TEMP_UNITS_PATTERN: str = r'(°C|K|F)?'
#: Pattern to match all accepted time units, or empty string.
TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?'
#: Pattern to match all accepted pressure units, or empty string.
PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?'
#: Pattern to match all accepted rotation speed units, or empty string.
ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?'
#: Pattern to match all accepted length units, or empty string.
DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?'
#: Pattern to match all accepted mol units, or empty string.
MOL_UNITS_PATTERN = r'(mmol|mol)?'
###############
# Prop limits #
###############
def generate_quantity_units_pattern(
quantity_pattern: str,
units_pattern: str,
hint: Optional[str] = '',
default: Optional[str] = ''
) -> PropLimit:
"""
Convenience function to generate PropLimit object for different quantity
types, i.e. for variations on the number followed by unit pattern.
Args:
quantity_pattern (str): Pattern to match the number expected. This will
typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``.
units_pattern (str): Pattern to match the units expected or empty
string. Empty string is matched as not including units is allowed
as in this case standard units are used.
hint (str): Hint for the prop limit to tell the user what correct input
should look like in the case of an errror.
default (str): Default value for the prop limit, should use standard
units for the prop involved.
"""
return PropLimit(
regex=r'^((' + quantity_pattern + r'[ ]?'\
+ units_pattern + r'$)|(^' + quantity_pattern + r'))$',
hint=hint,
default=default
)
# NOTE: It is important here that defaults use the standard unit for that
# quantity type as XDL app uses this to add in default units.
#: Prop limit for volume props.
VOLUME_PROP_LIMIT: PropLimit = PropLimit(
regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\
+ VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$',
hint='Expecting number followed by standard volume units, e.g. "5.5 mL"',
default='0 mL',
)
#: Prop limit for mass props.
MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
MASS_UNITS_PATTERN,
hint='Expecting number followed by standard mass units, e.g. "2.3 g"',
default='0 g'
)
#: Prop limit for mol props.
MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
MOL_UNITS_PATTERN,
hint='Expecting number followed by mol or mmol, e.g. "2.3 mol".',
default='0 mol',
)
#: Prop limit for temp props.
TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
FLOAT_PATTERN,
TEMP_UNITS_PATTERN,
hint='Expecting number in degrees celsius or number followed by standard temperature units, e.g. "25", "25°C", "298 K".',
default='25°C',
)
#: Prop limit for time props.
TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
TIME_UNITS_PATTERN,
hint='Expecting number followed by standard time units, e.g. "15 mins", "3 hrs".',
default='0 secs'
)
#: Prop limit for pressure props.
PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
PRESSURE_UNITS_PATTERN,
hint='Expecting number followed by standard pressure units, e.g. "50 mbar", "1 atm".',
default='1013.25 mbar'
)
#: Prop limit for rotation speed props.
ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
ROTATION_SPEED_UNITS_PATTERN,
hint='Expecting RPM value, e.g. "400 RPM".',
default='400 RPM',
)
#: Prop limit for wavelength props.
WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
DISTANCE_UNITS_PATTERN,
hint='Expecting wavelength, e.g. "400 nm".',
default='400 nm'
)
#: Prop limit for any props requiring a positive integer such as ``repeats``.
#: Used if no explicit property is given and prop type is ``int``.
POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit(
r'[0-9]+',
hint='Expecting positive integer value, e.g. "3"',
default='1',
)
#: Prop limit for any props requiring a positive float. Used if no explicit
#: prop type is given and prop type is ``float``.
POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit(
regex=POSITIVE_FLOAT_PATTERN,
hint='Expecting positive float value, e.g. "3", "3.5"',
default='0',
)
#: Prop limit for any props requiring a boolean value. Used if no explicit prop
#: type is given and prop type is ``bool``.
BOOL_PROP_LIMIT: PropLimit = PropLimit(
BOOL_PATTERN,
hint='Expecting one of "false" or "true".',
default='false',
)
#: Prop limit for ``WashSolid`` ``stir`` prop. This is a special case as the
#: value can be ``True``, ``False`` or ``'solvent'``.
WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit(
r'(' + BOOL_PATTERN + r'|solvent)',
enum=['true', 'solvent', 'false'],
hint='Expecting one of "true", "false" or "solvent".',
default='True'
)
#: Prop limit for ``Separate`` ``purpose`` prop. One of 'extract' or 'wash'.
SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash'])
#: Prop limit for ``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'.
SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom'])
#: Prop limit for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate',
#: 'dissolve', 'basify', 'acidify' or 'dilute'.
ADD_PURPOSE_PROP_LIMIT = PropLimit(
enum=[
'neutralize',
'precipitate',
'dissolve',
'basify',
'acidify',
'dilute',
]
)
#: Prop limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm',
#: 'reaction' or 'unstable-reagent'.
HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit(
enum=['control-exotherm', 'reaction', 'unstable-reagent']
)
#: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is only option.
STIR_PURPOSE_PROP_LIMIT = PropLimit(
enum=['dissolve']
)
#: Prop limit for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent',
#: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'.
REAGENT_ROLE_PROP_LIMIT = PropLimit(
enum=[
'solvent',
'reagent',
'catalyst',
'substrate',
'acid',
'base',
'activating-agent'
]
)
#: Prop limit for ``Component`` ``component_type`` prop. One of 'reactor',
#: 'filter', 'separator', 'rotavap' or 'flask'.
COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit(
enum=['reactor', 'filter', 'separator', 'rotavap', 'flask']
)
#: Pattern matching a float of value 100, e.g. '100', '100.0', '100.000' would
#: all be matched.
_hundred_float: str = r'(100(?:[.][0]+)?)'
#: Pattern matching any float between 10.000 and 99.999.
_ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)'
#: Pattern matching any float between 0 and 9.999.
_zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)'
#: Pattern matching float between 0 and 100. Used for percentages.
PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit(
r'^(' + _hundred_float + '|'\
+ _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$',
hint='Expecting number from 0-100 representing a percentage, e.g. "50", "8.5".',
default='0',
)
| 35.591176 | 277 | 0.650029 | 3,099 | 0.255946 | 0 | 0 | 0 | 0 | 0 | 0 | 7,793 | 0.643624 |
e0531fdc3eeb8a1247c13837ac5c2a532816fd2e | 3,884 | py | Python | dit/utils/bindargs.py | leoalfonso/dit | e7d5f680b3f170091bb1e488303f4255eeb11ef4 | [
"BSD-3-Clause"
] | 1 | 2021-03-15T08:51:42.000Z | 2021-03-15T08:51:42.000Z | dit/utils/bindargs.py | leoalfonso/dit | e7d5f680b3f170091bb1e488303f4255eeb11ef4 | [
"BSD-3-Clause"
] | null | null | null | dit/utils/bindargs.py | leoalfonso/dit | e7d5f680b3f170091bb1e488303f4255eeb11ef4 | [
"BSD-3-Clause"
] | null | null | null | """
Provides usable args and kwargs from inspect.getcallargs.
For Python 3.3 and above, this module is unnecessary and can be achieved using
features from PEP 362:
http://www.python.org/dev/peps/pep-0362/
For example, to override a parameter of some function:
>>> import inspect
>>> def func(a, b=1, c=2, d=3):
... return a, b, c, d
...
>>> def override_c(*args, **kwargs):
... sig = inspect.signature(override)
... ba = sig.bind(*args, **kwargs)
... ba['c'] = 10
... return func(*ba.args, *ba.kwargs)
...
>>> override_c(0, c=3)
(0, 1, 10, 3)
Also useful:
http://www.python.org/dev/peps/pep-3102/
"""
import sys
import inspect
from inspect import getcallargs
try:
from inspect import getfullargspec
except ImportError:
# Python 2.X
from collections import namedtuple
from inspect import getargspec
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(f):
args, varargs, varkw, defaults = getargspec(f)
kwonlyargs = []
kwonlydefaults = None
annotations = getattr(f, '__annotations__', {})
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations)
def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs):
"""Binds arguments and keyword arguments to a function or method.
Returns a tuple (bargs, bkwargs) suitable for manipulation and passing
to the specified function.
`bargs` consists of the bound args, varargs, and kwonlyargs from
getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec.
Both can be used in a call to the specified function. Any default
parameter values are included in the output.
Examples
--------
>>> def func(a, b=3, *args, **kwargs):
... pass
>>> bindcallargs(func, 5)
((5, 3), {})
>>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there')
((5, 4, 3, 2, 1), {'hello': 'there'})
>>> args, kwargs = bindcallargs(func, 5)
>>> kwargs['b'] = 5 # overwrite default value for b
>>> func(*args, **kwargs)
"""
# It is necessary to choose an unlikely variable name for the function.
# The reason is that any kwarg by the same name will cause a TypeError
# due to multiple values being passed for that argument name.
func = _fUnCtIoN_
callargs = getcallargs(func, *args, **kwargs)
spec = getfullargspec(func)
# Construct all args and varargs and use them in bargs
bargs = [callargs[arg] for arg in spec.args]
if spec.varargs is not None:
bargs.extend(callargs[spec.varargs])
bargs = tuple(bargs)
# Start with kwonlyargs.
bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs}
# Add in kwonlydefaults for unspecified kwonlyargs only.
# Since keyword only arguements aren't allowed in python2, and we
# don't support python 3.0, 3.1, 3.2, this should never be executed:
if spec.kwonlydefaults is not None: # pragma: no cover
bkwargs.update({k: v for k, v in spec.kwonlydefaults.items()
if k not in bkwargs})
# Add in varkw.
if spec.varkw is not None:
bkwargs.update(callargs[spec.varkw])
return bargs, bkwargs
def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs):
# Should match functionality of bindcallargs_32 for Python > 3.3.
sig = inspect.signature(_fUnCtIoN_)
ba = sig.bind(*args, **kwargs)
# Add in all default values
for param in sig.parameters.values():
if param.name not in ba.arguments:
ba.arguments[param.name] = param.default
return ba.args, ba.kwargs
if sys.version_info[0:2] < (3,3):
bindcallargs = bindcallargs_leq32
else:
bindcallargs = bindcallargs_geq33
| 31.072 | 79 | 0.65036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,203 | 0.567199 |
e053d13d8a4cd7c86d2670f87f97133354905c98 | 36,370 | py | Python | tests/python/gaia-ui-tests/gaiatest/gaia_test.py | AmyYLee/gaia | a5dbae8235163d7f985bdeb7d649268f02749a8b | [
"Apache-2.0"
] | 1 | 2020-04-06T13:02:09.000Z | 2020-04-06T13:02:09.000Z | tests/python/gaia-ui-tests/gaiatest/gaia_test.py | AmyYLee/gaia | a5dbae8235163d7f985bdeb7d649268f02749a8b | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/gaia_test.py | AmyYLee/gaia | a5dbae8235163d7f985bdeb7d649268f02749a8b | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import sys
import time
from marionette import MarionetteTestCase
from marionette.by import By
from marionette.errors import NoSuchElementException
from marionette.errors import ElementNotVisibleException
from marionette.errors import TimeoutException
from marionette.errors import StaleElementException
from marionette.errors import InvalidResponseException
import mozdevice
class LockScreen(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_lock_screen.js"))
self.marionette.import_script(js)
@property
def is_locked(self):
self.marionette.switch_to_frame()
return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked')
def lock(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('GaiaLockScreen.lock()')
assert result, 'Unable to lock screen'
def unlock(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('GaiaLockScreen.unlock()')
assert result, 'Unable to unlock screen'
class GaiaApp(object):
def __init__(self, origin=None, name=None, frame=None, src=None):
self.frame = frame
self.frame_id = frame
self.src = src
self.name = name
self.origin = origin
def __eq__(self, other):
return self.__dict__ == other.__dict__
class GaiaApps(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
def get_permission(self, app_name, permission_name):
return self.marionette.execute_async_script("return GaiaApps.getPermission('%s', '%s')" % (app_name, permission_name))
def set_permission(self, app_name, permission_name, value):
return self.marionette.execute_async_script("return GaiaApps.setPermission('%s', '%s', '%s')" %
(app_name, permission_name, value))
def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("GaiaApps.launchWithName('%s')" % name, script_timeout=launch_timeout)
assert result, "Failed to launch app with name '%s'" % name
app = GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
if app.frame_id is None:
raise Exception("App failed to launch; there is no app frame")
if switch_to_frame:
self.switch_to_frame(app.frame_id, url)
return app
@property
def displayed_app(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('return GaiaApps.displayedApp();')
return GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
def switch_to_displayed_app(self):
self.marionette.switch_to_default_content()
self.marionette.switch_to_frame(self.displayed_app.frame)
def is_app_installed(self, app_name):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("GaiaApps.locateWithName('%s')" % app_name)
def uninstall(self, name):
self.marionette.switch_to_frame()
self.marionette.execute_async_script("GaiaApps.uninstallWithName('%s')" % name)
def kill(self, app):
self.marionette.switch_to_frame()
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
result = self.marionette.execute_async_script("GaiaApps.kill('%s');" % app.origin)
assert result, "Failed to kill app with name '%s'" % app.name
def kill_all(self):
self.marionette.switch_to_frame()
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
self.marionette.execute_async_script("GaiaApps.killAll()")
def runningApps(self):
return self.marionette.execute_script("return GaiaApps.getRunningApps()")
def switch_to_frame(self, app_frame, url=None, timeout=30):
self.marionette.switch_to_frame(app_frame)
start = time.time()
if not url:
def check(now):
return "about:blank" not in now
else:
def check(now):
return url in now
while (time.time() - start < timeout):
if check(self.marionette.get_url()):
return
time.sleep(2)
raise TimeoutException('Could not switch to app frame %s in time' % app_frame)
class GaiaData(object):
def __init__(self, marionette, testvars=None):
self.marionette = marionette
self.testvars = testvars or {}
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_data_layer.js"))
self.marionette.import_script(js)
self.marionette.set_search_timeout(10000)
def set_time(self, date_number):
self.marionette.set_context(self.marionette.CONTEXT_CHROME)
self.marionette.execute_script("window.navigator.mozTime.set(%s);" % date_number)
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
@property
def all_contacts(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True)
@property
def sim_contacts(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True)
def insert_contact(self, contact):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True)
assert result, 'Unable to insert contact %s' % contact
def remove_all_contacts(self, default_script_timeout=60000):
self.marionette.switch_to_frame()
self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts)))
result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True)
assert result, 'Unable to remove all contacts'
self.marionette.set_script_timeout(default_script_timeout)
def get_setting(self, name):
return self.marionette.execute_async_script('return GaiaDataLayer.getSetting("%s")' % name, special_powers=True)
@property
def all_settings(self):
return self.get_setting('*')
def set_setting(self, name, value):
import json
value = json.dumps(value)
result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting("%s", %s)' % (name, value), special_powers=True)
assert result, "Unable to change setting with name '%s' to '%s'" % (name, value)
def _get_pref(self, datatype, name):
return self.marionette.execute_script("return SpecialPowers.get%sPref('%s');" % (datatype, name), special_powers=True)
def _set_pref(self, datatype, name, value):
value = json.dumps(value)
self.marionette.execute_script("SpecialPowers.set%sPref('%s', %s);" % (datatype, name, value), special_powers=True)
def get_bool_pref(self, name):
"""Returns the value of a Gecko boolean pref, which is different from a Gaia setting."""
return self._get_pref('Bool', name)
def set_bool_pref(self, name, value):
"""Sets the value of a Gecko boolean pref, which is different from a Gaia setting."""
return self._set_pref('Bool', name, value)
def get_int_pref(self, name):
"""Returns the value of a Gecko integer pref, which is different from a Gaia setting."""
return self._get_pref('Int', name)
def set_int_pref(self, name, value):
"""Sets the value of a Gecko integer pref, which is different from a Gaia setting."""
return self._set_pref('Int', name, value)
def get_char_pref(self, name):
"""Returns the value of a Gecko string pref, which is different from a Gaia setting."""
return self._get_pref('Char', name)
def set_char_pref(self, name, value):
"""Sets the value of a Gecko string pref, which is different from a Gaia setting."""
return self._set_pref('Char', name, value)
def set_volume(self, value):
channels = ['alarm', 'content', 'notification']
for channel in channels:
self.set_setting('audio.volume.%s' % channel, value)
def bluetooth_enable(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.enableBluetooth()")
def bluetooth_disable(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.disableBluetooth()")
def bluetooth_pair_device(self, device_name):
return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice("%s")' % device_name)
def bluetooth_unpair_all_devices(self):
self.marionette.switch_to_frame()
self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()')
def bluetooth_set_device_name(self, device_name):
result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name)
assert result, "Unable to set device's bluetooth name to %s" % device_name
def bluetooth_set_device_discoverable_mode(self, discoverable):
if (discoverable):
result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);')
else:
result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);')
assert result, 'Able to set the device bluetooth discoverable mode'
@property
def bluetooth_is_enabled(self):
return self.marionette.execute_script("return window.navigator.mozBluetooth.enabled")
@property
def is_cell_data_enabled(self):
return self.get_setting('ril.data.enabled')
def connect_to_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToCellData()", special_powers=True)
assert result, 'Unable to connect to cell data'
def disable_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableCellData()", special_powers=True)
assert result, 'Unable to disable cell data'
@property
def is_cell_data_connected(self):
# XXX: check bug-926169
# this is used to keep all tests passing while introducing multi-sim APIs
return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +
'window.navigator.mozMobileConnections && ' +
'window.navigator.mozMobileConnections[0]; ' +
'return mobileConnection.data.connected;')
def enable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', True)
def disable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', False)
@property
def is_wifi_enabled(self):
return self.marionette.execute_script("return window.navigator.mozWifiManager.enabled;")
def enable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.enableWiFi()", special_powers=True)
assert result, 'Unable to enable WiFi'
def disable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableWiFi()", special_powers=True)
assert result, 'Unable to disable WiFi'
def connect_to_wifi(self, network=None):
network = network or self.testvars.get('wifi')
assert network, 'No WiFi network provided'
self.enable_wifi()
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToWiFi(%s)" % json.dumps(network))
assert result, 'Unable to connect to WiFi network'
def forget_all_networks(self):
self.marionette.switch_to_frame()
self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()')
def is_wifi_connected(self, network=None):
network = network or self.testvars.get('wifi')
assert network, 'No WiFi network provided'
self.marionette.switch_to_frame()
return self.marionette.execute_script("return GaiaDataLayer.isWiFiConnected(%s)" % json.dumps(network))
@property
def known_networks(self):
return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()')
@property
def active_telephony_state(self):
# Returns the state of only the currently active call or None if no active call
return self.marionette.execute_script("return GaiaDataLayer.getMozTelephonyState()")
@property
def is_antenna_available(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable')
@property
def is_fm_radio_enabled(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled')
@property
def fm_radio_frequency(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency')
@property
def media_files(self):
result = []
result.extend(self.music_files)
result.extend(self.picture_files)
result.extend(self.video_files)
return result
def delete_all_sms(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.deleteAllSms();", special_powers=True)
def delete_all_call_log_entries(self):
"""The call log needs to be open and focused in order for this to work."""
self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();')
def kill_active_call(self):
self.marionette.execute_script("var telephony = window.navigator.mozTelephony; " +
"if(telephony.active) telephony.active.hangUp();")
@property
def music_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllMusic();')
@property
def picture_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllPictures();')
@property
def video_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllVideos();')
def sdcard_files(self, extension=''):
files = self.marionette.execute_async_script(
'return GaiaDataLayer.getAllSDCardFiles();')
if len(extension):
return [filename for filename in files if filename.endswith(extension)]
return files
def send_sms(self, number, message):
import json
number = json.dumps(number)
message = json.dumps(message)
result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True)
assert result, 'Unable to send SMS to recipient %s with text %s' % (number, message)
class GaiaDevice(object):
def __init__(self, marionette, testvars=None):
self.marionette = marionette
self.testvars = testvars or {}
@property
def manager(self):
if hasattr(self, '_manager') and self._manager:
return self._manager
if not self.is_android_build:
raise Exception('Device manager is only available for devices.')
dm_type = os.environ.get('DM_TRANS', 'adb')
if dm_type == 'adb':
self._manager = mozdevice.DeviceManagerADB()
elif dm_type == 'sut':
host = os.environ.get('TEST_DEVICE')
if not host:
raise Exception('Must specify host with SUT!')
self._manager = mozdevice.DeviceManagerSUT(host=host)
else:
raise Exception('Unknown device manager type: %s' % dm_type)
return self._manager
@property
def is_android_build(self):
if self.testvars.get('is_android_build') is None:
self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform']
return self.testvars['is_android_build']
@property
def is_online(self):
# Returns true if the device has a network connection established (cell data, wifi, etc)
return self.marionette.execute_script('return window.navigator.onLine;')
@property
def has_mobile_connection(self):
# XXX: check bug-926169
# this is used to keep all tests passing while introducing multi-sim APIs
return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +
'window.navigator.mozMobileConnections && ' +
'window.navigator.mozMobileConnections[0]; ' +
'return mobileConnection !== undefined')
@property
def has_wifi(self):
if not hasattr(self, '_has_wifi'):
self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined')
return self._has_wifi
def push_file(self, source, count=1, destination='', progress=None):
if not destination.count('.') > 0:
destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]])
self.manager.mkDirs(destination)
self.manager.pushFile(source, destination)
if count > 1:
for i in range(1, count + 1):
remote_copy = '_%s.'.join(iter(destination.split('.'))) % i
self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' % remote_copy])
if progress:
progress.update(i)
self.manager.removeFile(destination)
def restart_b2g(self):
self.stop_b2g()
time.sleep(2)
self.start_b2g()
def start_b2g(self):
if self.marionette.instance:
# launch the gecko instance attached to marionette
self.marionette.instance.start()
elif self.is_android_build:
self.manager.shellCheckOutput(['start', 'b2g'])
else:
raise Exception('Unable to start B2G')
self.marionette.wait_for_port()
self.marionette.start_session()
if self.is_android_build:
self.marionette.execute_async_script("""
window.addEventListener('mozbrowserloadend', function loaded(aEvent) {
if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) {
window.removeEventListener('mozbrowserloadend', loaded);
marionetteScriptFinished();
}
});""", script_timeout=60000)
# TODO: Remove this sleep when Bug 924912 is addressed
time.sleep(5)
def stop_b2g(self):
if self.marionette.instance:
# close the gecko instance attached to marionette
self.marionette.instance.close()
elif self.is_android_build:
self.manager.shellCheckOutput(['stop', 'b2g'])
else:
raise Exception('Unable to stop B2G')
self.marionette.client.close()
self.marionette.session = None
self.marionette.window = None
class GaiaTestCase(MarionetteTestCase):
_script_timeout = 60000
_search_timeout = 10000
# deafult timeout in seconds for the wait_for methods
_default_timeout = 30
def __init__(self, *args, **kwargs):
self.restart = kwargs.pop('restart', False)
kwargs.pop('iterations', None)
kwargs.pop('checkpoint_interval', None)
MarionetteTestCase.__init__(self, *args, **kwargs)
def setUp(self):
try:
MarionetteTestCase.setUp(self)
except InvalidResponseException:
if self.restart:
pass
self.device = GaiaDevice(self.marionette, self.testvars)
if self.restart and (self.device.is_android_build or self.marionette.instance):
self.device.stop_b2g()
if self.device.is_android_build:
# revert device to a clean state
self.device.manager.removeDir('/data/local/storage/persistent')
self.device.manager.removeDir('/data/b2g/mozilla')
self.device.start_b2g()
# the emulator can be really slow!
self.marionette.set_script_timeout(self._script_timeout)
self.marionette.set_search_timeout(self._search_timeout)
self.lockscreen = LockScreen(self.marionette)
self.apps = GaiaApps(self.marionette)
self.data_layer = GaiaData(self.marionette, self.testvars)
from gaiatest.apps.keyboard.app import Keyboard
self.keyboard = Keyboard(self.marionette)
self.cleanUp()
def cleanUp(self):
# remove media
if self.device.is_android_build:
for filename in self.data_layer.media_files:
# filename is a fully qualified path
self.device.manager.removeFile(filename)
# Switch off keyboard FTU screen
self.data_layer.set_setting("keyboard.ftu.enabled", False)
# restore settings from testvars
[self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()]
# unlock
self.lockscreen.unlock()
# If we are restarting all of these values are reset to default earlier in the setUp
if not self.restart:
# disable passcode before restore settings from testvars
self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111')
self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False)
# Change language back to English
self.data_layer.set_setting("language.current", "en-US")
# Switch off spanish keyboard before test
self.data_layer.set_setting("keyboard.layouts.spanish", False)
# Set do not track pref back to the default
self.data_layer.set_setting('privacy.donottrackheader.value', '-1')
if self.data_layer.get_setting('ril.radio.disabled'):
# enable the device radio, disable Airplane mode
self.data_layer.set_setting('ril.radio.disabled', False)
# Re-set edge gestures pref to False
self.data_layer.set_setting('edgesgesture.enabled', False)
# disable carrier data connection
if self.device.has_mobile_connection:
self.data_layer.disable_cell_data()
self.data_layer.disable_cell_roaming()
if self.device.has_wifi:
self.data_layer.enable_wifi()
self.data_layer.forget_all_networks()
self.data_layer.disable_wifi()
# remove data
self.data_layer.remove_all_contacts(self._script_timeout)
# reset to home screen
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));")
# kill any open apps
self.apps.kill_all()
# disable sound completely
self.data_layer.set_volume(0)
def install_marketplace(self):
_yes_button_locator = (By.ID, 'app-install-install-button')
mk = {"name": "Marketplace Dev",
"manifest": "https://marketplace-dev.allizom.org/manifest.webapp ",
}
if not self.apps.is_app_installed(mk['name']):
# install the marketplace dev app
self.marionette.execute_script('navigator.mozApps.install("%s")' % mk['manifest'])
# TODO add this to the system app object when we have one
self.wait_for_element_displayed(*_yes_button_locator)
self.marionette.find_element(*_yes_button_locator).tap()
self.wait_for_element_not_displayed(*_yes_button_locator)
def connect_to_network(self):
if not self.device.is_online:
try:
self.connect_to_local_area_network()
except:
if self.device.has_mobile_connection:
self.data_layer.connect_to_cell_data()
else:
raise Exception('Unable to connect to network')
assert self.device.is_online
def connect_to_local_area_network(self):
if not self.device.is_online:
if self.testvars.get('wifi') and self.device.has_wifi:
self.data_layer.connect_to_wifi()
assert self.device.is_online
else:
raise Exception('Unable to connect to local area network')
def push_resource(self, filename, count=1, destination=''):
self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination]))
def resource(self, filename):
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename))
def change_orientation(self, orientation):
""" There are 4 orientation states which the phone can be passed in:
portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and landscape-secondary
"""
self.marionette.execute_async_script("""
if (arguments[0] === arguments[1]) {
marionetteScriptFinished();
}
else {
var expected = arguments[1];
window.screen.onmozorientationchange = function(e) {
console.log("Received 'onmozorientationchange' event.");
waitFor(
function() {
window.screen.onmozorientationchange = null;
marionetteScriptFinished();
},
function() {
return window.screen.mozOrientation === expected;
}
);
};
console.log("Changing orientation to '" + arguments[1] + "'.");
window.screen.mozLockOrientation(arguments[1]);
};""", script_args=[self.screen_orientation, orientation])
@property
def screen_width(self):
return self.marionette.execute_script('return window.screen.width')
@property
def screen_orientation(self):
return self.marionette.execute_script('return window.screen.mozOrientation')
def wait_for_element_present(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
return self.marionette.find_element(by, locator)
except NoSuchElementException:
pass
else:
raise TimeoutException(
'Element %s not present before timeout' % locator)
def wait_for_element_not_present(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
self.marionette.find_element(by, locator)
except NoSuchElementException:
break
else:
raise TimeoutException(
'Element %s still present after timeout' % locator)
def wait_for_element_displayed(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
e = None
while time.time() < timeout:
time.sleep(0.5)
try:
if self.marionette.find_element(by, locator).is_displayed():
break
except (NoSuchElementException, StaleElementException) as e:
pass
else:
# This is an effortless way to give extra debugging information
if isinstance(e, NoSuchElementException):
raise TimeoutException('Element %s not present before timeout' % locator)
else:
raise TimeoutException('Element %s present but not displayed before timeout' % locator)
def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
if not self.marionette.find_element(by, locator).is_displayed():
break
except StaleElementException:
pass
except NoSuchElementException:
break
else:
raise TimeoutException(
'Element %s still visible after timeout' % locator)
def wait_for_condition(self, method, timeout=_default_timeout,
message="Condition timed out"):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
end_time = time.time() + timeout
while time.time() < end_time:
try:
value = method(self.marionette)
if value:
return value
except (NoSuchElementException, StaleElementException):
pass
time.sleep(0.5)
else:
raise TimeoutException(message)
def is_element_present(self, by, locator):
try:
self.marionette.find_element(by, locator)
return True
except:
return False
def is_element_displayed(self, by, locator):
try:
return self.marionette.find_element(by, locator).is_displayed()
except (NoSuchElementException, ElementNotVisibleException):
return False
def tearDown(self):
self.lockscreen = None
self.apps = None
self.data_layer = None
MarionetteTestCase.tearDown(self)
class GaiaEnduranceTestCase(GaiaTestCase):
def __init__(self, *args, **kwargs):
self.iterations = kwargs.pop('iterations') or 1
self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations
GaiaTestCase.__init__(self, *args, **kwargs)
def drive(self, test, app):
self.test_method = test
self.app_under_test = app
# Now drive the actual test case iterations
for count in range(1, self.iterations + 1):
self.iteration = count
self.marionette.log("%s iteration %d of %d" % (self.test_method.__name__, count, self.iterations))
# Print to console so can see what iteration we're on while test is running
if self.iteration == 1:
print "\n"
print "Iteration %d of %d..." % (count, self.iterations)
sys.stdout.flush()
self.test_method()
# Checkpoint time?
if ((count % self.checkpoint_interval) == 0) or count == self.iterations:
self.checkpoint()
# Finished, now process checkpoint data into .json output
self.process_checkpoint_data()
def checkpoint(self):
# Console output so know what's happening if watching console
print "Checkpoint..."
sys.stdout.flush()
# Sleep to give device idle time (for gc)
idle_time = 30
self.marionette.log("sleeping %d seconds to give the device some idle time" % idle_time)
time.sleep(idle_time)
# Dump out some memory status info
self.marionette.log("checkpoint")
self.cur_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
# If first checkpoint, create the file if it doesn't exist already
if self.iteration in (0, self.checkpoint_interval):
self.checkpoint_path = "checkpoints"
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path, 0755)
self.log_name = "%s/checkpoint_%s_%s.log" % (self.checkpoint_path, self.test_method.__name__, self.cur_time)
with open(self.log_name, 'a') as log_file:
log_file.write('%s Gaia Endurance Test: %s\n' % (self.cur_time, self.test_method.__name__))
output_str = self.device.manager.shellCheckOutput(["b2g-ps"])
with open(self.log_name, 'a') as log_file:
log_file.write('%s Checkpoint after iteration %d of %d:\n' % (self.cur_time, self.iteration, self.iterations))
log_file.write('%s\n' % output_str)
def close_app(self):
# Close the current app (self.app) by using the home button
self.marionette.switch_to_frame()
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));")
# Bring up the cards view
_cards_view_locator = ('id', 'cards-view')
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('holdhome'));")
self.wait_for_element_displayed(*_cards_view_locator)
# Sleep a bit
time.sleep(5)
# Tap the close icon for the current app
locator_part_two = '#cards-view li.card[data-origin*="%s"] .close-card' % self.app_under_test.lower()
_close_button_locator = ('css selector', locator_part_two)
close_card_app_button = self.marionette.find_element(*_close_button_locator)
close_card_app_button.tap()
def process_checkpoint_data(self):
# Process checkpoint data into .json
self.marionette.log("processing checkpoint data from %s" % self.log_name)
# Open the checkpoint file
checkpoint_file = open(self.log_name, 'r')
# Grab every b2g rss reading for each checkpoint
b2g_rss_list = []
for next_line in checkpoint_file:
if next_line.startswith("b2g"):
b2g_rss_list.append(next_line.split()[5])
# Close the checkpoint file
checkpoint_file.close()
# Calculate the average b2g_rss
total = 0
for b2g_mem_value in b2g_rss_list:
total += int(b2g_mem_value)
avg_rss = total / len(b2g_rss_list)
# Create a summary text file
summary_name = self.log_name.replace('.log', '_summary.log')
summary_file = open(summary_name, 'w')
# Write the summarized checkpoint data
summary_file.write('test_name: %s\n' % self.test_method.__name__)
summary_file.write('completed: %s\n' % self.cur_time)
summary_file.write('app_under_test: %s\n' % self.app_under_test.lower())
summary_file.write('total_iterations: %d\n' % self.iterations)
summary_file.write('checkpoint_interval: %d\n' % self.checkpoint_interval)
summary_file.write('b2g_rss: ')
summary_file.write(', '.join(b2g_rss_list))
summary_file.write('\navg_rss: %d\n\n' % avg_rss)
# Close the summary file
summary_file.close()
# Write to suite summary file
suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path
suite_summary_file = open(suite_summary_file_name, 'a')
suite_summary_file.write('%s: %s\n' % (self.test_method.__name__, avg_rss))
suite_summary_file.close()
| 41.376564 | 139 | 0.648474 | 35,751 | 0.98298 | 0 | 0 | 5,527 | 0.151966 | 0 | 0 | 10,015 | 0.275364 |
e053d242f75ab9ddd50217184c0c2cd558a9aad9 | 5,591 | py | Python | library/__mozilla__/pyjamas/DOM.py | certik/pyjamas | 5bb72e63e50f09743ac986f4c9690ba50c499ba9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | library/__mozilla__/pyjamas/DOM.py | certik/pyjamas | 5bb72e63e50f09743ac986f4c9690ba50c499ba9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | library/__mozilla__/pyjamas/DOM.py | certik/pyjamas | 5bb72e63e50f09743ac986f4c9690ba50c499ba9 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-08-13T20:32:25.000Z | 2019-08-13T20:32:25.000Z | def buttonClick(button):
JS("""
var doc = button.ownerDocument;
if (doc != null) {
var evt = doc.createEvent('MouseEvents');
evt.initMouseEvent('click', true, true, null, 0, 0,
0, 0, 0, false, false, false, false, 0, null);
button.dispatchEvent(evt);
}
""")
def compare(elem1, elem2):
JS("""
if (!elem1 && !elem2) {
return true;
} else if (!elem1 || !elem2) {
return false;
}
if (!elem1.isSameNode) {
return (elem1 == elem2);
}
return (elem1.isSameNode(elem2));
""")
def eventGetButton(evt):
JS("""
var button = evt.which;
if(button == 2) {
return 4;
} else if (button == 3) {
return 2;
} else {
return button || 0;
}
""")
# This is what is in GWT 1.5 for getAbsoluteLeft. err...
#"""
# // We cannot use DOMImpl here because offsetLeft/Top return erroneous
# // values when overflow is not visible. We have to difference screenX
# // here due to a change in getBoxObjectFor which causes inconsistencies
# // on whether the calculations are inside or outside of the element's
# // border.
# try {
# return $doc.getBoxObjectFor(elem).screenX
# - $doc.getBoxObjectFor($doc.documentElement).screenX;
# } catch (e) {
# // This works around a bug in the FF3 betas. The bug
# // should be fixed before they release, so this can
# // be removed at a later date.
# // https://bugzilla.mozilla.org/show_bug.cgi?id=409111
# // DOMException.WRONG_DOCUMENT_ERR == 4
# if (e.code == 4) {
# return 0;
# }
# throw e;
# }
#"""
def getAbsoluteLeft(elem):
JS("""
// Firefox 3 expects getBoundingClientRect
// getBoundingClientRect can be float: 73.1 instead of 74, see
// gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47
// Please note, their implementation has 1px offset.
if ( typeof elem.getBoundingClientRect == 'function' ) {
var left = Math.ceil(elem.getBoundingClientRect().left);
return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft;
}
// Older Firefox can use getBoxObjectFor
else {
var left = $doc.getBoxObjectFor(elem).x;
var parent = elem.parentNode;
while (parent) {
if (parent.scrollLeft > 0) {
left = left - parent.scrollLeft;
}
parent = parent.parentNode;
}
return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft;
}
""")
# This is what is in GWT 1.5 for getAbsoluteTop. err...
#"""
# // We cannot use DOMImpl here because offsetLeft/Top return erroneous
# // values when overflow is not visible. We have to difference screenY
# // here due to a change in getBoxObjectFor which causes inconsistencies
# // on whether the calculations are inside or outside of the element's
# // border.
# try {
# return $doc.getBoxObjectFor(elem).screenY
# - $doc.getBoxObjectFor($doc.documentElement).screenY;
# } catch (e) {
# // This works around a bug in the FF3 betas. The bug
# // should be fixed before they release, so this can
# // be removed at a later date.
# // https://bugzilla.mozilla.org/show_bug.cgi?id=409111
# // DOMException.WRONG_DOCUMENT_ERR == 4
# if (e.code == 4) {
# return 0;
# }
# throw e;
# }
#"""
def getAbsoluteTop(elem):
JS("""
// Firefox 3 expects getBoundingClientRect
if ( typeof elem.getBoundingClientRect == 'function' ) {
var top = Math.ceil(elem.getBoundingClientRect().top);
return top + $doc.body.scrollTop + $doc.documentElement.scrollTop;
}
// Older Firefox can use getBoxObjectFor
else {
var top = $doc.getBoxObjectFor(elem).y;
var parent = elem.parentNode;
while (parent) {
if (parent.scrollTop > 0) {
top -= parent.scrollTop;
}
parent = parent.parentNode;
}
return top + $doc.body.scrollTop + $doc.documentElement.scrollTop;
}
""")
def getChildIndex(parent, child):
JS("""
var count = 0, current = parent.firstChild;
while (current) {
if (! current.isSameNode) {
if (current == child) {
return count;
}
}
else if (current.isSameNode(child)) {
return count;
}
if (current.nodeType == 1) {
++count;
}
current = current.nextSibling;
}
return -1;
""")
def isOrHasChild(parent, child):
JS("""
while (child) {
if ((!parent.isSameNode)) {
if (parent == child) {
return true;
}
}
else if (parent.isSameNode(child)) {
return true;
}
try {
child = child.parentNode;
} catch(e) {
// Give up on 'Permission denied to get property
// HTMLDivElement.parentNode'
// See https://bugzilla.mozilla.org/show_bug.cgi?id=208427
return false;
}
if (child && (child.nodeType != 1)) {
child = null;
}
}
return false;
""")
def releaseCapture(elem):
JS("""
if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem))
DOM.sCaptureElem = null;
if (!elem.isSameNode) {
if (elem == $wnd.__captureElem) {
$wnd.__captureElem = null;
}
}
else if (elem.isSameNode($wnd.__captureElem)) {
$wnd.__captureElem = null;
}
""")
| 29.119792 | 84 | 0.571275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,242 | 0.937578 |
e05432743bd72af1411301793f19ae278f8a6b5a | 485 | py | Python | apps/vendors/migrations/0090_auto_20160610_2125.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/vendors/migrations/0090_auto_20160610_2125.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/vendors/migrations/0090_auto_20160610_2125.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-10 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0089_auto_20160602_2123'),
]
operations = [
migrations.AlterField(
model_name='vendor',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),
),
]
| 23.095238 | 86 | 0.626804 | 328 | 0.676289 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.259794 |
e0543e59c4fcb122d63759114f58b779ede6cdce | 540 | py | Python | graph/articulation_points.py | fujihiraryo/library | cdb01e710219d7111f890d09f89531916dd03533 | [
"MIT"
] | null | null | null | graph/articulation_points.py | fujihiraryo/library | cdb01e710219d7111f890d09f89531916dd03533 | [
"MIT"
] | 4 | 2020-12-16T10:00:00.000Z | 2021-02-12T12:51:50.000Z | graph/articulation_points.py | fujihiraryo/python-kyopro-library | cdb01e710219d7111f890d09f89531916dd03533 | [
"MIT"
] | null | null | null | from depth_first_search import DFS
def articulation_points(graph):
n = len(graph)
dfs = DFS(graph)
order = [None] * n
for i, x in enumerate(dfs.preorder):
order[x] = i
lower = order[:]
for x in dfs.preorder[::-1]:
for y in graph[x]:
if y == dfs.parent[x]:
continue
lower[x] = min(lower[x], lower[y])
if len(dfs.children[0]) > 1:
yield 0
for x in range(1, n):
if any(order[x] <= lower[y] for y in dfs.children[x]):
yield x
| 25.714286 | 62 | 0.522222 | 0 | 0 | 502 | 0.92963 | 0 | 0 | 0 | 0 | 0 | 0 |
e055245acd2ad8d01c1ab4aacd02a9a0e3b9e3b6 | 1,558 | py | Python | database.py | AndreAngelucci/popcorn_time_bot | 710b77b59d6c62569c1bf6984c7cf9adac8ea840 | [
"MIT"
] | null | null | null | database.py | AndreAngelucci/popcorn_time_bot | 710b77b59d6c62569c1bf6984c7cf9adac8ea840 | [
"MIT"
] | 1 | 2021-06-02T00:39:42.000Z | 2021-06-02T00:39:42.000Z | database.py | AndreAngelucci/popcorn_time_bot | 710b77b59d6c62569c1bf6984c7cf9adac8ea840 | [
"MIT"
] | null | null | null | import pymongo
from conf import Configuracoes
class Mongo_Database:
""" Singleton com a conexao com o MongoDB """
_instancia = None
def __new__(cls, *args, **kwargs):
if not(cls._instancia):
cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs)
return cls._instancia
def __init__(self,):
#pega a string de conexao no arquivo de configuracao
string_conexao = Configuracoes().get_config("database", "string_connection")
assert (string_conexao != ""), "String de conexao indefinida"
try:
self.mongo_client = pymongo.MongoClient(string_conexao)
self.collection_filmes = self.mongo_client["popcorn_time"]["filmes"]
self.collection_tweets = self.mongo_client["twitter_log"]["tweets"]
except:
raise Exception("Nao foi possivel se conectar ao B.D.")
print("Conectado a", string_conexao)
def grava_filmes(self, lista_filmes):
#verifica se o filme ja existe
#se nao existir, grava e adiciona a lista de novos filmes
novos = []
try:
for filme in lista_filmes:
if (self.collection_filmes.count_documents({"_id": filme["_id"]}) == 0):
self.collection_filmes.insert_one(filme)
novos.append(filme)
finally:
return novos
def grava_tweet(self, tweet_info):
#grava o retorno dos tweets
self.collection_tweets.insert_one(tweet_info)
| 39.948718 | 88 | 0.617458 | 1,502 | 0.964056 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.241335 |
e0553357877f320fcfcfc9bb4fdd3aa6b5cc2f78 | 2,947 | py | Python | sensor_core/sleep.py | JorisHerbots/niip_iot_zombie_apocalypse | 3ff848f3dab1dde9d2417d0a2c56a76a85e18920 | [
"MIT"
] | null | null | null | sensor_core/sleep.py | JorisHerbots/niip_iot_zombie_apocalypse | 3ff848f3dab1dde9d2417d0a2c56a76a85e18920 | [
"MIT"
] | null | null | null | sensor_core/sleep.py | JorisHerbots/niip_iot_zombie_apocalypse | 3ff848f3dab1dde9d2417d0a2c56a76a85e18920 | [
"MIT"
] | null | null | null | import machine
import pycom
import utime
from exceptions import Exceptions
class Sleep:
@property
def wakeReason(self):
return machine.wake_reason()[0]
@property
def wakePins(self):
return machine.wake_reason()[1]
@property
def powerOnWake(self):
return self.wakeReason == machine.PWRON_WAKE
@property
def pinWake(self):
return self.wakeReason == machine.PIN_WAKE
@property
def RTCWake(self):
return self.wakeReason == machine.RTC_WAKE
@property
def ULPWake(self):
return self.wakeReason == machine.ULP_WAKE
@property
def isSleepWake(self):
return self.pinWake or self.RTCWake or self.ULPWake
@property
def activeTime(self):
return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)
@property
def inactiveTime(self):
return self.__inactiveTime
ACTIVE_TIME_KEY = 'activeTime'
INACTIVE_TIME_KEY = 'inactiveTime'
SLEEP_TIME_KEY = 'sleepTime'
def __init__(self):
self.__activityStart = utime.ticks_ms()
self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY)
self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY)
if not self.powerOnWake:
sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time()
pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime)
self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY)
self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY)
self.__wakeUpPins = []
def __initPersistentVariable(self, key, value=0):
if (pycom.nvs_get(key) == None):
pycom.nvs_set(key, value)
def addWakeUpPin(self, pin):
# P2, P3, P4, P6, P8 to P10 and P13 to P23
if isinstance(pin, list):
self.__wakeUpPins.extend(pin)
else:
self.__wakeUpPins.append(pin)
try:
machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True)
except Exception as e:
Exceptions.error(Exception('Sleep not available: ' + str(e)))
def resetTimers(self):
pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0)
pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0)
def sleep(self, milliseconds=0):
if milliseconds == 0:
milliseconds = 604800000 # 1 week
pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds)
pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart))
try:
machine.deepsleep(milliseconds)
except Exception as e:
Exceptions.error(Exception('Deepsleep not available: ' + str(e)))
def delay(self, milliseconds):
utime.sleep_ms(milliseconds)
| 32.032609 | 121 | 0.646759 | 2,862 | 0.971157 | 0 | 0 | 799 | 0.271123 | 0 | 0 | 139 | 0.047167 |
e0554c3395746111d418fbf380163f0e080e4265 | 1,260 | py | Python | pytorch_gleam/search/rerank_format.py | Supermaxman/pytorch-gleam | 8b0d8dddc812e8ae120c9760fd44fe93da3f902d | [
"Apache-2.0"
] | null | null | null | pytorch_gleam/search/rerank_format.py | Supermaxman/pytorch-gleam | 8b0d8dddc812e8ae120c9760fd44fe93da3f902d | [
"Apache-2.0"
] | null | null | null | pytorch_gleam/search/rerank_format.py | Supermaxman/pytorch-gleam | 8b0d8dddc812e8ae120c9760fd44fe93da3f902d | [
"Apache-2.0"
] | null | null | null |
import torch
import argparse
from collections import defaultdict
import os
import json
def load_predictions(input_path):
pred_list = []
for file_name in os.listdir(input_path):
if file_name.endswith('.pt'):
preds = torch.load(os.path.join(input_path, file_name))
pred_list.extend(preds)
question_scores = defaultdict(lambda: defaultdict(dict))
p_count = 0
u_count = 0
for prediction in pred_list:
doc_pass_id = prediction['id']
q_p_id = prediction['question_id']
# score = prediction['pos_score']
score = prediction['pos_score'] - prediction['neg_score']
if doc_pass_id not in question_scores or q_p_id not in question_scores[doc_pass_id]:
p_count += 1
u_count += 1
question_scores[doc_pass_id][q_p_id] = score
print(f'{p_count} unique predictions')
print(f'{u_count} total predictions')
return question_scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', required=True)
parser.add_argument('-o', '--output_path', required=True)
args = parser.parse_args()
input_path = args.input_path
output_path = args.output_path
question_scores = load_predictions(input_path)
with open(output_path, 'w') as f:
json.dump(question_scores, f)
if __name__ == '__main__':
main()
| 25.714286 | 86 | 0.743651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.149206 |
e055f89145eb203a0a63bfdad54931948d02ec37 | 388 | py | Python | des036.py | LeonardoPereirajr/Curso_em_video_Python | 9d8a97ba3389c8e86b37dfd089fab5d04adc146d | [
"MIT"
] | null | null | null | des036.py | LeonardoPereirajr/Curso_em_video_Python | 9d8a97ba3389c8e86b37dfd089fab5d04adc146d | [
"MIT"
] | null | null | null | des036.py | LeonardoPereirajr/Curso_em_video_Python | 9d8a97ba3389c8e86b37dfd089fab5d04adc146d | [
"MIT"
] | null | null | null | casa = int(input('Qual o valor da casa? '))
sal = int(input('Qual seu salario? '))
prazo = int(input('Quantos meses deseja pagar ? '))
parcela = casa/prazo
margem = sal* (30/100)
if parcela > margem:
print('Este negocio não foi aprovado, aumente o prazo .')
else:
print("Negocio aprovado pois a parcela é de R$ {} e voce pode pagar R$ {} mensais".format(parcela,margem))
| 38.8 | 111 | 0.664948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.520513 |
e05606e62a7f260ca58d2f3413562fa3ee898b64 | 1,000 | py | Python | HackBitApp/migrations/0003_roadmap.py | SukhadaM/HackBit-Interview-Preparation-Portal | f4c6b0d7168a4ea4ffcf1569183b1614752d9946 | [
"MIT"
] | null | null | null | HackBitApp/migrations/0003_roadmap.py | SukhadaM/HackBit-Interview-Preparation-Portal | f4c6b0d7168a4ea4ffcf1569183b1614752d9946 | [
"MIT"
] | null | null | null | HackBitApp/migrations/0003_roadmap.py | SukhadaM/HackBit-Interview-Preparation-Portal | f4c6b0d7168a4ea4ffcf1569183b1614752d9946 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-27 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HackBitApp', '0002_company_photo'),
]
operations = [
migrations.CreateModel(
name='Roadmap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(db_index=True, max_length=200, unique=True)),
('photo1', models.ImageField(upload_to='photos/company/roadmap')),
('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')),
('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')),
],
options={
'verbose_name': 'roadmap',
'verbose_name_plural': 'roadmaps',
'ordering': ('company_name',),
},
),
]
| 34.482759 | 114 | 0.571 | 907 | 0.907 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.284 |
e0573523b4d451bef7e8afb67ef1d49c8d3db2d3 | 1,051 | py | Python | Other_Python/Kernel_Methods/matrix_operations.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | null | null | null | Other_Python/Kernel_Methods/matrix_operations.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | null | null | null | Other_Python/Kernel_Methods/matrix_operations.py | Romit-Maulik/Tutorials-Demos-Practice | a58ddc819f24a16f7059e63d7f201fc2cd23e03a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 14:36:48 2020
@author: matth
"""
import autograd.numpy as np
#%% Kernel operations
# Returns the norm of the pairwise difference
def norm_matrix(matrix_1, matrix_2):
norm_square_1 = np.sum(np.square(matrix_1), axis = 1)
norm_square_1 = np.reshape(norm_square_1, (-1,1))
norm_square_2 = np.sum(np.square(matrix_2), axis = 1)
norm_square_2 = np.reshape(norm_square_2, (-1,1))
d1=matrix_1.shape
d2=matrix_2.shape
if d1[1]!=d2[1]:
matrix_1=np.transpose(matrix_1)
inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2))
norm_diff = -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2)
return norm_diff
# Returns the pairwise inner product
def inner_matrix(matrix_1, matrix_2):
d1=matrix_1.shape
d2=matrix_2.shape
if d1[1]!=d2[1]:
matrix_1=np.transpose(matrix_1)
return np.matmul(matrix_1, np.transpose(matrix_2))
if __name__ == '__main__':
print('This is the matrix operations file') | 25.02381 | 79 | 0.676499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.218839 |
e0576a003dfb918c45d8ae2afa80c98a64287387 | 2,371 | py | Python | cors/resources/cors-makeheader.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | cors/resources/cors-makeheader.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 7,642 | 2018-05-28T09:38:03.000Z | 2022-03-31T20:55:48.000Z | cors/resources/cors-makeheader.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | import json
from wptserve.utils import isomorphic_decode
def main(request, response):
origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none')
if b"check" in request.GET:
token = request.GET.first(b"token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first(b"check", None) == b"keep":
request.server.stash.put(token, value)
body = u"1"
else:
body = u"0"
return [(b"Content-Type", b"text/plain")], body
if origin != b'none':
response.headers.set(b"Access-Control-Allow-Origin", origin)
if b'origin2' in request.GET:
response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2'))
#Preflight
if b'headers' in request.GET:
response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers'))
if b'credentials' in request.GET:
response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials'))
if b'methods' in request.GET:
response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods'))
code_raw = request.GET.first(b'code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == u'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if b'preflight' in request.GET:
code = int(request.GET.first(b'preflight'))
#Log that the preflight actually happened if we have an ident
if b'token' in request.GET:
request.server.stash.put(request.GET[b'token'], True)
if b'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set(b"Location", request.GET.first(b'location'))
headers = {}
for name, values in request.headers.items():
if len(values) == 1:
headers[isomorphic_decode(name)] = isomorphic_decode(values[0])
else:
#I have no idea, really
headers[name] = values
headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))
body = json.dumps(headers)
if code:
return (code, b"StatusText"), [], body
else:
return body
| 33.871429 | 100 | 0.619148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.267398 |
e057de6d96dbc248f4a0c02caf3e3c52ad4ff136 | 1,053 | py | Python | device_osc_grid.py | wlfyit/PiLightsLib | 98e39af45f05d0ee44e2f166de5b654d58df33ae | [
"MIT"
] | null | null | null | device_osc_grid.py | wlfyit/PiLightsLib | 98e39af45f05d0ee44e2f166de5b654d58df33ae | [
"MIT"
] | null | null | null | device_osc_grid.py | wlfyit/PiLightsLib | 98e39af45f05d0ee44e2f166de5b654d58df33ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from pythonosc import osc_bundle_builder
from pythonosc import osc_message_builder
from pythonosc import udp_client
from .device import DeviceObj
# OSC Grid Object
class OSCGrid(DeviceObj):
def __init__(self, name, width, height, ip, port, bri=1):
DeviceObj.__init__(self, name, "osc_grid", width, height)
self.buffer = []
self.brightness = bri
self.osc = udp_client.SimpleUDPClient(ip, port)
def set(self, r, g, b, x=0, y=0):
DeviceObj.set(self, r, g, b, x, y)
# Set Pixel
builder = osc_message_builder.OscMessageBuilder(address="/light/{0}/{1}/color".format(x, y))
builder.add_arg(r)
builder.add_arg(g)
builder.add_arg(b)
self.buffer.append(builder.build())
def show(self):
DeviceObj.show(self)
# Update Display
bundle = osc_bundle_builder.OscBundleBuilder(0)
for m in self.buffer:
bundle.add_content(m)
self.osc.send(bundle.build())
self.buffer.clear()
| 24.488372 | 100 | 0.636277 | 862 | 0.818613 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.093067 |
e0581bc2242266c4f411267aa587a7bfd0afc840 | 965 | py | Python | main/models.py | StevenSume/EasyCMDB | c2c44c9efe2de2729659d81ef886abff242ac1c5 | [
"Apache-2.0"
] | 2 | 2019-08-23T06:04:12.000Z | 2019-09-16T07:27:16.000Z | main/models.py | StevenSume/EasyCMDB | c2c44c9efe2de2729659d81ef886abff242ac1c5 | [
"Apache-2.0"
] | null | null | null | main/models.py | StevenSume/EasyCMDB | c2c44c9efe2de2729659d81ef886abff242ac1c5 | [
"Apache-2.0"
] | null | null | null | from .app import db
class Project(db.Model):
__tablename__ = 'projects'
id = db.Column(db.Integer,primary_key=True,autoincrement=True)
project_name = db.Column(db.String(64),unique=True,index=True)
def to_dict(self):
mydict = {
'id': self.id,
'project_name': self.project_name
}
return mydict
def __repr__(self):
return '<Project %r>' % self.__name__
class Item(db.Model):
__tablename__ = 'Items'
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
project_id = db.Column(db.Integer)
key = db.Column(db.String(64),nullable=False)
value = db.Column(db.String(64),nullable=False)
def to_dict(self):
mydict = {
'id': self.id,
'project_id': self.project_id,
'key': self.key,
'value': self.value
}
return mydict
def __repr__(self):
return '<Item %r>' % self.__name__
| 26.805556 | 67 | 0.592746 | 941 | 0.97513 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.091192 |
e05894d94e1647d1250203e64a76b21248195718 | 1,274 | py | Python | test.py | iron-io/iron_cache_python | f68f5a5e216e3189397ffd7d243de0d53bf7c764 | [
"BSD-2-Clause"
] | 3 | 2015-08-01T13:30:16.000Z | 2021-03-22T10:25:57.000Z | test.py | iron-io/iron_cache_python | f68f5a5e216e3189397ffd7d243de0d53bf7c764 | [
"BSD-2-Clause"
] | 1 | 2015-06-02T08:53:44.000Z | 2015-06-02T09:59:17.000Z | test.py | iron-io/iron_cache_python | f68f5a5e216e3189397ffd7d243de0d53bf7c764 | [
"BSD-2-Clause"
] | 3 | 2015-05-12T18:13:52.000Z | 2016-09-08T20:43:40.000Z | from iron_cache import *
import unittest
import requests
class TestIronCache(unittest.TestCase):
def setUp(self):
self.cache = IronCache("test_cache")
def test_get(self):
self.cache.put("test_item", "testing")
item = self.cache.get("test_item")
self.assertEqual(item.value, "testing")
def test_delete(self):
self.cache.put("test_item", "will be deleted")
self.cache.delete("test_item")
self.assertRaises(requests.exceptions.HTTPError,
self.cache.get, "test_item")
def test_increment(self):
self.cache.put("test_item", 2)
self.cache.increment("test_item")
item = self.cache.get("test_item")
self.assertEqual(item.value, 3)
self.cache.increment("test_item", amount=42)
item = self.cache.get("test_item")
self.assertEqual(item.value, 45)
def test_decrement(self):
self.cache.put("test_item", 100)
self.cache.decrement("test_item")
item = self.cache.get("test_item")
self.assertEqual(item.value, 99)
self.cache.decrement("test_item", amount=98)
item = self.cache.get("test_item")
self.assertEqual(item.value, 1)
if __name__ == '__main__':
unittest.main()
| 31.073171 | 56 | 0.631868 | 1,167 | 0.916013 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.174254 |
e059b01690fb071d4b03811c7664f63e0007961b | 3,914 | py | Python | lib_exec/StereoPipeline/libexec/asp_image_utils.py | sebasmurphy/iarpa | aca39cc5390a153a9779a636ab2523e65cb6d3b0 | [
"MIT"
] | 20 | 2017-02-01T14:54:57.000Z | 2022-01-25T06:34:35.000Z | lib_exec/StereoPipeline/libexec/asp_image_utils.py | sebasmurphy/iarpa | aca39cc5390a153a9779a636ab2523e65cb6d3b0 | [
"MIT"
] | 3 | 2020-04-21T12:11:26.000Z | 2021-01-10T07:00:51.000Z | lib_exec/StereoPipeline/libexec/asp_image_utils.py | sebasmurphy/iarpa | aca39cc5390a153a9779a636ab2523e65cb6d3b0 | [
"MIT"
] | 10 | 2017-12-18T18:45:25.000Z | 2021-11-22T02:43:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
"""
Basic functions for working with images on disk.
"""
import sys, os, re, subprocess, string, time, errno
import asp_string_utils
def stripRgbImageAlphaChannel(inputPath, outputPath):
"""Makes an RGB copy of an RBGA image"""
cmd = 'gdal_translate ' + inputPath + ' ' + outputPath + ' -b 1 -b 2 -b 3 -co "COMPRESS=LZW" -co "TILED=YES" -co "BLOCKXSIZE=256" -co "BLOCKYSIZE=256"'
print cmd
os.system(cmd)
def getImageSize(imagePath):
"""Returns the size [samples, lines] in an image"""
# Make sure the input file exists
if not os.path.exists(imagePath):
raise Exception('Image file ' + imagePath + ' not found!')
# Use subprocess to suppress the command output
cmd = ['gdalinfo', imagePath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
textOutput, err = p.communicate()
# Extract the size from the text
sizePos = textOutput.find('Size is')
endPos = textOutput.find('\n', sizePos+7)
sizeStr = textOutput[sizePos+7:endPos]
sizeStrs = sizeStr.strip().split(',')
numSamples = int(sizeStrs[0])
numLines = int(sizeStrs[1])
size = [numSamples, numLines]
return size
def isIsisFile(filePath):
"""Returns True if the file is an ISIS file, False otherwise."""
# Currently we treat all files with .cub extension as ISIS files
extension = os.path.splitext(filePath)[1]
return (extension == '.cub')
def getImageStats(imagePath):
"""Obtains some image statistics from gdalinfo"""
if not os.path.exists(imagePath):
raise Exception('Image file ' + imagePath + ' not found!')
# Call command line tool silently
cmd = ['gdalinfo', imagePath, '-stats']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
textOutput, err = p.communicate()
# Statistics are computed seperately for each band
bandStats = []
band = 0
while (True): # Loop until we run out of bands
# Look for the stats line for this band
bandString = 'Band ' + str(band+1) + ' Block='
bandLoc = textOutput.find(bandString)
if bandLoc < 0:
return bandStats # Quit if we did not find it
# Now parse out the statistics for this band
bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc)
bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc)
bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc)
bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc)
bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart)
bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart)
bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart)
bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart)
# Add results to the output list
bandStats.append( (bandMin, bandMax, bandMean, bandStd) )
band = band + 1 # Move to the next band
| 34.946429 | 155 | 0.67348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,834 | 0.468574 |
e05b4851d3707561c8c65e7a4b20ce903889be85 | 1,550 | py | Python | src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py | leipzig/gatk-sv | 96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a | [
"BSD-3-Clause"
] | 76 | 2020-06-18T21:31:43.000Z | 2022-03-02T18:42:58.000Z | src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py | iamh2o/gatk-sv | bf3704bd1d705339577530e267cd4d1b2f77a17f | [
"BSD-3-Clause"
] | 195 | 2020-06-22T15:12:28.000Z | 2022-03-28T18:06:46.000Z | src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py | iamh2o/gatk-sv | bf3704bd1d705339577530e267cd4d1b2f77a17f | [
"BSD-3-Clause"
] | 39 | 2020-07-03T06:47:18.000Z | 2022-03-03T03:47:25.000Z | #!/usr/bin/env python
import argparse
DELIMITER = "\t"
def merge(genotypes_filename, gq_filename, merged_filename):
with open(genotypes_filename, "r") as genotypes, open(gq_filename, "r") as gq, open(merged_filename, "w") as merged:
# Integrity check: do the files have same columns?
genotypes_header = genotypes.readline().rstrip().split(DELIMITER)
gq_header = gq.readline().rstrip().split(DELIMITER)
if not genotypes_header == gq_header:
raise ValueError("The files do not have same number/order of columns")
n_cols = len(gq_header)
for genotypes_line, gq_line in zip(genotypes, gq):
x = genotypes_line.rstrip().split(DELIMITER)
y = gq_line.rstrip().split(DELIMITER)
# Check if lines in the files are in the correct order.
if not x[0:4] == y[0:4]:
raise ValueError(f"The lines in the files are not in the same order; "
f"expected the following lines to match.\n{x[0:4]}\n{y[0:4]}")
h = DELIMITER.join(x[0:4])
for i in range(4, n_cols):
merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('genotypes')
parser.add_argument('GQ')
parser.add_argument('fout')
args = parser.parse_args()
merge(args.genotypes, args.GQ, args.fout)
| 36.046512 | 120 | 0.627742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.219355 |
e05c65974024f19246bfde72289d00cbac7e1014 | 32 | py | Python | esperanto_analyzer/web/__init__.py | fidelisrafael/esperanto-analyzer | af1e8609ec0696e3d1975aa0ba0c88e5f04f8468 | [
"BSD-2-Clause"
] | 18 | 2018-09-05T00:46:47.000Z | 2021-12-08T08:54:35.000Z | esperanto_analyzer/web/__init__.py | fidelisrafael/esperanto-analyzer | af1e8609ec0696e3d1975aa0ba0c88e5f04f8468 | [
"BSD-2-Clause"
] | null | null | null | esperanto_analyzer/web/__init__.py | fidelisrafael/esperanto-analyzer | af1e8609ec0696e3d1975aa0ba0c88e5f04f8468 | [
"BSD-2-Clause"
] | 3 | 2019-03-12T17:54:18.000Z | 2020-01-11T13:05:03.000Z | from .api.server import run_app
| 16 | 31 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e05cac875b2516b4ba7c777d72d8ac768173cf38 | 3,091 | py | Python | crawling/sns/main.py | CSID-DGU/2021-2-OSSP2-TwoRolless-2 | e9381418e3899d8e1e78415e9ab23b73b4f30a95 | [
"MIT"
] | null | null | null | crawling/sns/main.py | CSID-DGU/2021-2-OSSP2-TwoRolless-2 | e9381418e3899d8e1e78415e9ab23b73b4f30a95 | [
"MIT"
] | null | null | null | crawling/sns/main.py | CSID-DGU/2021-2-OSSP2-TwoRolless-2 | e9381418e3899d8e1e78415e9ab23b73b4f30a95 | [
"MIT"
] | 1 | 2021-10-15T05:19:20.000Z | 2021-10-15T05:19:20.000Z | import tweepy
import traceback
import time
import pymongo
from tweepy import OAuthHandler
from pymongo import MongoClient
from pymongo.cursor import CursorType
twitter_consumer_key = ""
twitter_consumer_secret = ""
twitter_access_token = ""
twitter_access_secret = ""
auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_secret)
api = tweepy.API(auth)
def crawllTwit(snsname, findtag):
account = snsname
tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended')
snsList = []
snsTime = []
url = []
pic = []
i = 0
for tweet in tweets:
flag = tweet.full_text.find(findtag)
if flag >= 0:
ttp = tweet.full_text.split("https://")
gong = ""
count = 0
for slist in ttp:
if count == (len(ttp) - 1):
break
gong = gong + slist
count += 1
snsList.append(gong)
snsTime.append(tweet.created_at)
tmp = f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}"
url.append(tmp)
i += 1
media = tweet.entities.get('media', [])
if (len(media) > 0):
pic.append(media[0]['media_url'])
else:
pic.append("")
j = 0
while j < len(snsList):
if j == 10:
break
snsList[j] = snsList[j].replace('<', '<')
snsList[j] = snsList[j].replace('>', '>')
snsList[j] = snsList[j].replace('▶️', ' ⇒ ')
j += 1
mydb = my_client['TwoRolless']
mycol = mydb['sns']
for k in range(0, len(snsList)):
if k == 15:
break
x = mycol.insert_one(
{
"tag": findtag,
"time": snsTime[k],
"text": snsList[k],
"img": pic[k],
"url": url[k]
}
)
conn_str = ""
my_client = pymongo.MongoClient(conn_str)
if __name__ == '__main__':
while True:
print("cycles start")
mydb = my_client['TwoRolless']
mycol = mydb['sns']
mycol.remove({})
crawllTwit("@m_thelastman", "더라스트맨")
crawllTwit("@Musical_NarGold", "나르치스와_골드문트")
crawllTwit("@rndworks", "더데빌")
crawllTwit("@ninestory9", "엘리펀트송")
crawllTwit("@companyrang", "쿠로이저택엔누가살고있을까")
crawllTwit("@companyrang", "난쟁이들")
crawllTwit("@page1company", "곤투모로우")
crawllTwit("@HONGcompany", "더모먼트")
crawllTwit("@orchardmusical", "칠칠")
crawllTwit("@livecorp2011", "팬레터")
crawllTwit("@shownote", "젠틀맨스가이드")
crawllTwit("@od_musical", "지킬앤하이드")
crawllTwit("@kontentz", "엔딩노트")
crawllTwit("@i_seensee", "빌리")
crawllTwit("@doublek_ent", "은하철도의")
crawllTwit("@Insight_Since96", "뱀파이어아더")
print("cycle end")
print("sleep 30 seconds")
time.sleep(30)
print("sleep end")
| 29.438095 | 126 | 0.547072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 782 | 0.239657 |
e05cbd467aaeb3118a784785e85a274a27c23842 | 698 | py | Python | demos/interactive-classifier/config.py | jepabe/Demo_earth2 | ab20c3a9114904219688b16f8a1273e68927e6f9 | [
"Apache-2.0"
] | 1,909 | 2015-04-22T20:18:22.000Z | 2022-03-31T13:42:03.000Z | demos/interactive-classifier/config.py | jepabe/Demo_earth2 | ab20c3a9114904219688b16f8a1273e68927e6f9 | [
"Apache-2.0"
] | 171 | 2015-09-24T05:49:49.000Z | 2022-03-14T00:54:50.000Z | demos/interactive-classifier/config.py | jepabe/Demo_earth2 | ab20c3a9114904219688b16f8a1273e68927e6f9 | [
"Apache-2.0"
] | 924 | 2015-04-23T05:43:18.000Z | 2022-03-28T12:11:31.000Z | #!/usr/bin/env python
"""Handles Earth Engine service account configuration."""
import ee
# The service account email address authorized by your Google contact.
# Set up a service account as described in the README.
EE_ACCOUNT = '[email protected]'
# The private key associated with your service account in Privacy Enhanced
# Email format (.pem suffix). To convert a private key from the RSA format
# (.p12 suffix) to .pem, run the openssl command like this:
# openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem
EE_PRIVATE_KEY_FILE = 'privatekey.pem'
EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)
| 41.058824 | 79 | 0.787966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.802292 |
e05d022e20ec708234ba466419ce63a57d30ac77 | 2,716 | py | Python | PythonScripting/NumbersInPython.py | Neo-sunny/pythonProgs | a9d2359d8a09d005d0ba6f94d7d256bf91499793 | [
"MIT"
] | null | null | null | PythonScripting/NumbersInPython.py | Neo-sunny/pythonProgs | a9d2359d8a09d005d0ba6f94d7d256bf91499793 | [
"MIT"
] | null | null | null | PythonScripting/NumbersInPython.py | Neo-sunny/pythonProgs | a9d2359d8a09d005d0ba6f94d7d256bf91499793 | [
"MIT"
] | null | null | null | """
Demonstration of numbers in Python
"""
# Python has an integer type called int
print("int")
print("---")
print(0)
print(1)
print(-3)
print(70383028364830)
print("")
# Python has a real number type called float
print("float")
print("-----")
print(0.0)
print(7.35)
print(-43.2)
print("")
# Limited precision
print("Precision")
print("---------")
print(4.56372883832331773)
print(1.23456789012345678)
print("")
# Scientific/exponential notation
print("Scientific notation")
print("-------------------")
print(5e32)
print(999999999999999999999999999999999999999.9)
print("")
# Infinity
print("Infinity")
print("--------")
print(1e500)
print(-1e500)
print("")
# Conversions
print("Conversions between numeric types")
print("---------------------------------")
print(float(3))
print(float(99999999999999999999999999999999999999))
print(int(3.0))
print(int(3.7))
print(int(-3.7))
"""
Demonstration of simple arithmetic expressions in Python
"""
# Unary + and -
print("Unary operators")
print(+3)
print(-5)
print(+7.86)
print(-3348.63)
print("")
# Simple arithmetic
print("Addition and Subtraction")
print(1 + 2)
print(48 - 89)
print(3.45 + 2.7)
print(87.3384 - 12.35)
print(3 + 6.7)
print(9.8 - 4)
print("")
print("Multiplication")
print(3 * 2)
print(7.8 * 27.54)
print(7 * 8.2)
print("")
print("Division")
print(8 / 2)
print(3 / 2)
print(7.538 / 14.3)
print(8 // 2)
print(3 // 2)
print(7.538 // 14.3)
print("")
print("Exponentiation")
print(3 ** 2)
print(5 ** 4)
print(32.6 ** 7)
print(9 ** 0.5)
"""
Demonstration of compound arithmetic expressions in Python
"""
# Expressions can include multiple operations
print("Compound expressions")
print(3 + 5 + 7 + 27)
#Operator with same precedence are evaluated from left to right
print(18 - 6 + 4)
print("")
# Operator precedence defines how expressions are evaluated
print("Operator precedence")
print(7 + 3 * 5)
print(5.5 * 6 // 2 + 8)
print(-3 ** 2)
print("")
# Use parentheses to change evaluation order
print("Grouping with parentheses")
print((7 + 3) * 5)
print(5.5 * ((6 // 2) + 8))
print((-3) ** 2)
"""
Demonstration of the use of variables and how to assign values to
them.
"""
# The = operator can be used to assign values to variables
bakers_dozen = 12 + 1
temperature = 93
# Variables can be used as values and in expressions
print(temperature, bakers_dozen)
print("celsius:", (temperature - 32) * 5 / 9)
print("fahrenheit:", float(temperature))
# You can assign a different value to an existing variable
temperature = 26
print("new value:", temperature)
# Multiple variables can be used in arbitrary expressions
offset = 32
multiplier = 5.0 / 9.0
celsius = (temperature - offset) * multiplier
print("celsius value:", celsius)
| 17.522581 | 65 | 0.674521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.473122 |
e05e6c4440c357c867a4c38e37f726c4d615e768 | 1,676 | py | Python | 3DBeam/source/solving_strategies/strategies/linear_solver.py | JoZimmer/Beam-Models | e701c0bae6e3035e7a07cc590da4a132b133dcff | [
"BSD-3-Clause"
] | null | null | null | 3DBeam/source/solving_strategies/strategies/linear_solver.py | JoZimmer/Beam-Models | e701c0bae6e3035e7a07cc590da4a132b133dcff | [
"BSD-3-Clause"
] | null | null | null | 3DBeam/source/solving_strategies/strategies/linear_solver.py | JoZimmer/Beam-Models | e701c0bae6e3035e7a07cc590da4a132b133dcff | [
"BSD-3-Clause"
] | 1 | 2022-01-05T17:32:32.000Z | 2022-01-05T17:32:32.000Z | from source.solving_strategies.strategies.solver import Solver
class LinearSolver(Solver):
def __init__(self,
array_time, time_integration_scheme, dt,
comp_model,
initial_conditions,
force,
structure_model):
super().__init__(array_time, time_integration_scheme, dt,
comp_model, initial_conditions, force, structure_model)
def _print_solver_info(self):
print("Linear Solver")
def solve(self):
# time loop
for i in range(0, len(self.array_time)):
self.step = i
current_time = self.array_time[i]
#print("time: {0:.2f}".format(current_time))
self.scheme.solve_single_step(self.force[:, i])
# appending results to the list
self.displacement[:, i] = self.scheme.get_displacement()
self.velocity[:, i] = self.scheme.get_velocity()
self.acceleration[:, i] = self.scheme.get_acceleration()
# TODO: only calculate reaction when user wants it
# if self.structure_model is not None:
# self.dynamic_reaction[:, i] = self._compute_reaction()
# reaction computed in dynamic analysis
# TODO: only calculate reaction when user wants it
# moved reaction computation to dynamic analysis level
# AK . this doesnt considers the support reaction check
#if self.structure_model is not None:
# self.dynamic_reaction[:, i] = self._compute_reaction()
# update results
self.scheme.update()
| 38.976744 | 80 | 0.590095 | 1,610 | 0.960621 | 0 | 0 | 0 | 0 | 0 | 0 | 563 | 0.335919 |
e05ea195ece947573587efca60ad05b204af43f6 | 1,095 | py | Python | payment/migrations/0002_auto_20171125_0022.py | Littledelma/mofadog | 5a7c6672da248e400a8a5746506a6e7b273c9510 | [
"MIT"
] | null | null | null | payment/migrations/0002_auto_20171125_0022.py | Littledelma/mofadog | 5a7c6672da248e400a8a5746506a6e7b273c9510 | [
"MIT"
] | 1 | 2021-06-08T03:28:08.000Z | 2021-06-08T03:28:08.000Z | payment/migrations/0002_auto_20171125_0022.py | Littledelma/mofadog | 5a7c6672da248e400a8a5746506a6e7b273c9510 | [
"MIT"
] | 1 | 2021-06-08T03:23:34.000Z | 2021-06-08T03:23:34.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-24 16:22
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='history_order',
name='dead_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'),
),
migrations.AlterField(
model_name='history_order',
name='order_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'),
),
migrations.AlterField(
model_name='history_order',
name='valid_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758, tzinfo=utc), verbose_name='valid_date'),
),
]
| 33.181818 | 138 | 0.63379 | 883 | 0.806393 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.190868 |
e05fe1dabdb8d88cb6b7077a77b9ecb4a63a39fd | 841 | py | Python | src/sqlfluff/rules/L024.py | NathanHowell/sqlfluff | 9eb30226d77727cd613947e144a0abe483151f18 | [
"MIT"
] | 3,024 | 2020-10-01T11:03:51.000Z | 2022-03-31T16:42:00.000Z | src/sqlfluff/rules/L024.py | NathanHowell/sqlfluff | 9eb30226d77727cd613947e144a0abe483151f18 | [
"MIT"
] | 2,395 | 2020-09-30T12:59:21.000Z | 2022-03-31T22:05:29.000Z | src/sqlfluff/rules/L024.py | NathanHowell/sqlfluff | 9eb30226d77727cd613947e144a0abe483151f18 | [
"MIT"
] | 246 | 2020-10-02T17:08:03.000Z | 2022-03-30T17:43:51.000Z | """Implementation of Rule L024."""
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
from sqlfluff.rules.L023 import Rule_L023
@document_fix_compatible
class Rule_L024(Rule_L023):
"""Single whitespace expected after USING in JOIN clause.
| **Anti-pattern**
.. code-block:: sql
SELECT b
FROM foo
LEFT JOIN zoo USING(a)
| **Best practice**
| The • character represents a space.
| Add a space after USING, to avoid confusing it
| for a function.
.. code-block:: sql
:force:
SELECT b
FROM foo
LEFT JOIN zoo USING•(a)
"""
expected_mother_segment_type = "join_clause"
pre_segment_identifier = ("name", "using")
post_segment_identifier = ("type", "bracketed")
expand_children = None
allow_newline = True
| 21.564103 | 70 | 0.652794 | 667 | 0.789349 | 0 | 0 | 692 | 0.818935 | 0 | 0 | 511 | 0.604734 |
e061aa108e5ec8060888f9dff1215ff5763d024a | 2,847 | py | Python | projects/scocen/cmd_components_simple.py | mikeireland/chronostar | fcf37614e1d145f3a5e265e54512bf8cd98051a0 | [
"MIT"
] | 4 | 2018-05-28T11:05:42.000Z | 2021-05-14T01:13:11.000Z | projects/scocen/cmd_components_simple.py | mikeireland/chronostar | fcf37614e1d145f3a5e265e54512bf8cd98051a0 | [
"MIT"
] | 13 | 2019-08-14T07:30:24.000Z | 2021-11-08T23:44:29.000Z | projects/scocen/cmd_components_simple.py | mikeireland/chronostar | fcf37614e1d145f3a5e265e54512bf8cd98051a0 | [
"MIT"
] | 4 | 2016-04-21T08:25:26.000Z | 2021-02-25T06:53:52.000Z | """
Plot CMDs for each component.
"""
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.ion()
# Pretty plots
from fig_settings import *
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
comps_filename = lib.comps_filename
compnames = lib.compnames
colors = lib.colors
############################################
# Minimal probability required for membership
pmin_membership = 0.5
############################################
# how to split subplots
grid = [5, 5]
# CMD limits
xlim = [-1, 5]
ylim = [17, -3]
############################################
# Read data
try:
tab = tab0
comps = comps0
except:
tab0 = Table.read(data_filename)
Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec
tab0['Gmag'] = Gmag
comps0 = Table.read(comps_filename)
tab = tab0
comps = comps0
# Main sequence parametrization
# fitpar for pmag, rpmag
fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507]
poly = np.poly1d(fitpar)
x = np.linspace(1, 4, 100)
y = poly(x)
m = y > 4
yms = y[m]
xms = x[m]
def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim):
ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1)
ax.plot(xms, yms - 1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--')
ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--')
ax.axvline(x=0.369, linewidth=0.5, color='k') # F
ax.axvline(x=0.767, linewidth=0.5, color='k') # G
ax.axvline(x=0.979, linewidth=0.5, color='k') # K
ax.axvline(x=1.848, linewidth=0.5, color='k') # M
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
return ax
print('Plotting %d components.'%len(comps))
fig=plt.figure()
for i, c in enumerate(comps):
ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed
comp_ID = c['comp_ID']
col=tab['membership%s'%comp_ID]
mask = col > pmin_membership
t=tab[mask]
if len(t)>100:
alpha=0.5
else:
alpha=1
t.sort('membership%s'%comp_ID)
#~ t.reverse()
#~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha)
ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet)
ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim)
age=c['Age']
ax.set_title('%s (%.2f$\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t)))
#~ plt.tight_layout()
plt.show()
| 26.858491 | 122 | 0.601686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 964 | 0.338602 |
e061c15bed338723a46d4f04e8c849cc852fe7c0 | 5,326 | py | Python | test/test_cursor_binding.py | rhlahuja/snowflake-connector-python | 6abc56c970cdb698a833b7f6ac9cbe7dfa667abd | [
"Apache-2.0"
] | null | null | null | test/test_cursor_binding.py | rhlahuja/snowflake-connector-python | 6abc56c970cdb698a833b7f6ac9cbe7dfa667abd | [
"Apache-2.0"
] | null | null | null | test/test_cursor_binding.py | rhlahuja/snowflake-connector-python | 6abc56c970cdb698a833b7f6ac9cbe7dfa667abd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018 Snowflake Computing Inc. All right reserved.
#
import pytest
from snowflake.connector.errors import (ProgrammingError)
def test_binding_security(conn_cnx, db_parameters):
"""
SQL Injection Tests
"""
try:
with conn_cnx() as cnx:
cnx.cursor().execute(
"CREATE OR REPLACE TABLE {name} "
"(aa INT, bb STRING)".format(
name=db_parameters['name']))
cnx.cursor().execute(
"INSERT INTO {name} VALUES(%s, %s)".format(
name=db_parameters['name']),
(1, 'test1'))
cnx.cursor().execute(
"INSERT INTO {name} VALUES(%(aa)s, %(bb)s)".format(
name=db_parameters['name']),
{'aa': 2, 'bb': 'test2'})
for rec in cnx.cursor().execute(
"SELECT * FROM {name} ORDER BY 1 DESC".format(
name=db_parameters['name'])):
break
assert rec[0] == 2, 'First column'
assert rec[1] == 'test2', 'Second column'
for rec in cnx.cursor().execute(
"SELECT * FROM {name} WHERE aa=%s".format(
name=db_parameters['name']), (1,)):
break
assert rec[0] == 1, 'First column'
assert rec[1] == 'test1', 'Second column'
# SQL injection safe test
# Good Example
with pytest.raises(ProgrammingError):
cnx.cursor().execute(
"SELECT * FROM {name} WHERE aa=%s".format(
name=db_parameters['name']),
("1 or aa>0",))
with pytest.raises(ProgrammingError):
cnx.cursor().execute(
"SELECT * FROM {name} WHERE aa=%(aa)s".format(
name=db_parameters['name']),
{"aa": "1 or aa>0"})
# Bad Example in application. DON'T DO THIS
c = cnx.cursor()
c.execute("SELECT * FROM {name} WHERE aa=%s".format(
name=db_parameters['name']) % ("1 or aa>0",))
rec = c.fetchall()
assert len(rec) == 2, "not raising error unlike the previous one."
finally:
with conn_cnx() as cnx:
cnx.cursor().execute(
"drop table if exists {name}".format(
name=db_parameters['name']))
def test_binding_list(conn_cnx, db_parameters):
"""
SQL binding list type for IN
"""
try:
with conn_cnx() as cnx:
cnx.cursor().execute(
"CREATE OR REPLACE TABLE {name} "
"(aa INT, bb STRING)".format(
name=db_parameters['name']))
cnx.cursor().execute(
"INSERT INTO {name} VALUES(%s, %s)".format(
name=db_parameters['name']),
(1, 'test1'))
cnx.cursor().execute(
"INSERT INTO {name} VALUES(%(aa)s, %(bb)s)".format(
name=db_parameters['name']),
{'aa': 2, 'bb': 'test2'})
cnx.cursor().execute(
"INSERT INTO {name} VALUES(3, 'test3')".format(
name=db_parameters['name']))
for rec in cnx.cursor().execute("""
SELECT * FROM {name} WHERE aa IN (%s) ORDER BY 1 DESC
""".format(name=db_parameters['name']), ([1, 3],)):
break
assert rec[0] == 3, 'First column'
assert rec[1] == 'test3', 'Second column'
for rec in cnx.cursor().execute(
"SELECT * FROM {name} WHERE aa=%s".format(
name=db_parameters['name']), (1,)):
break
assert rec[0] == 1, 'First column'
assert rec[1] == 'test1', 'Second column'
rec = cnx.cursor().execute("""
SELECT * FROM {name} WHERE aa IN (%s) ORDER BY 1 DESC
""".format(name=db_parameters['name']), ((1,),))
finally:
with conn_cnx() as cnx:
cnx.cursor().execute(
"drop table if exists {name}".format(
name=db_parameters['name']))
def test_unsupported_binding(conn_cnx, db_parameters):
"""
Unsupported data binding
"""
try:
with conn_cnx() as cnx:
cnx.cursor().execute(
"CREATE OR REPLACE TABLE {name} "
"(aa INT, bb STRING)".format(
name=db_parameters['name']))
cnx.cursor().execute(
"INSERT INTO {name} VALUES(%s, %s)".format(
name=db_parameters['name']),
(1, 'test1'))
sql = 'select count(*) from {name} where aa=%s'.format(
name=db_parameters['name'])
with cnx.cursor() as cur:
rec = cur.execute(sql, (1,)).fetchone()
assert rec[0] is not None, 'no value is returned'
# dict
with pytest.raises(ProgrammingError):
cnx.cursor().execute(sql, ({'value': 1},))
finally:
with conn_cnx() as cnx:
cnx.cursor().execute(
"drop table if exists {name}".format(
name=db_parameters['name']))
| 36.731034 | 78 | 0.480473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,608 | 0.301915 |
e061da410580634f463731e3265faeb51909f55c | 1,071 | py | Python | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/model/__init__.py | hectormartinez/rougexstem | 32da9eab253cb88fc1882e59026e8b5b40900a25 | [
"Apache-2.0"
] | null | null | null | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/model/__init__.py | hectormartinez/rougexstem | 32da9eab253cb88fc1882e59026e8b5b40900a25 | [
"Apache-2.0"
] | null | null | null | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/model/__init__.py | hectormartinez/rougexstem | 32da9eab253cb88fc1882e59026e8b5b40900a25 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Language Models
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
class ModelI(object):
"""
A processing interface for assigning a probability to the next word.
"""
def __init__(self):
'''Create a new language model.'''
raise NotImplementedError()
def train(self, text):
'''Train the model on the text.'''
raise NotImplementedError()
def probability(self, word, context):
'''Evaluate the probability of this word in this context.'''
raise NotImplementedError()
def choose_random_word(self, context):
'''Randomly select a word that is likely to appear in this context.'''
raise NotImplementedError()
def entropy(self, text):
'''Evaluate the total entropy of a message with respect to the model.
This is the sum of the log probability of each word in the message.'''
raise NotImplementedError()
| 31.5 | 78 | 0.6676 | 851 | 0.794585 | 0 | 0 | 0 | 0 | 0 | 0 | 641 | 0.598506 |
e06275178027bd16b4be36faab1b32af531b42cb | 1,047 | py | Python | flask-graphene-sqlalchemy/models.py | JovaniPink/flask-apps | de887f15261c286986cf38d234d49f7e4eb79c1a | [
"MIT"
] | null | null | null | flask-graphene-sqlalchemy/models.py | JovaniPink/flask-apps | de887f15261c286986cf38d234d49f7e4eb79c1a | [
"MIT"
] | null | null | null | flask-graphene-sqlalchemy/models.py | JovaniPink/flask-apps | de887f15261c286986cf38d234d49f7e4eb79c1a | [
"MIT"
] | null | null | null | import os
from graphene_sqlalchemy import SQLAlchemyObjectType
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
POSTGRES_CONNECTION_STRING = (
os.environ.get("POSTGRES_CONNECTION_STRING")
or "postgres://postgres:password@localhost:6432/postgres"
)
engine = create_engine(POSTGRES_CONNECTION_STRING, convert_unicode=True)
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
Base = declarative_base()
Base.query = db_session.query_property()
class UserModel(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String)
balance = Column(Integer)
class MinAmountModel(Base):
__tablename__ = "min_amount"
amount = Column(Integer, primary_key=True)
class User(SQLAlchemyObjectType):
class Meta:
model = UserModel
class MinAmount(SQLAlchemyObjectType):
class Meta:
model = MinAmountModel
| 26.175 | 72 | 0.770774 | 416 | 0.397326 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.096466 |
e06418cb46f2f01ccc35fc22e565190b30c821ed | 16,478 | py | Python | curlypiv/synthetics/microsig.py | sean-mackenzie/curlypiv | 21c96c1bb1ba2548c4d5bebb389eb66ff58f851d | [
"MIT"
] | null | null | null | curlypiv/synthetics/microsig.py | sean-mackenzie/curlypiv | 21c96c1bb1ba2548c4d5bebb389eb66ff58f851d | [
"MIT"
] | 1 | 2021-06-14T17:24:43.000Z | 2021-06-14T17:24:43.000Z | curlypiv/synthetics/microsig.py | sean-mackenzie/curlypiv | 21c96c1bb1ba2548c4d5bebb389eb66ff58f851d | [
"MIT"
] | null | null | null | # microsig
"""
Author: Maximilliano Rossi
More detail about the MicroSIG can be found at:
Website:
https://gitlab.com/defocustracking/microsig-python
Publication:
Rossi M, Synthetic image generator for defocusing and astigmatic PIV/PTV, Meas. Sci. Technol., 31, 017003 (2020)
DOI:10.1088/1361-6501/ab42bb.
"""
import numpy as np
import imageio
import tkinter as tk
import os
from os import listdir
from os.path import isfile, basename, join, isdir
import sys
import glob
# import time as tm
from tkinter import filedialog
# ----- code adapted by Sean MacKenzie ------
# 2.0 define class
class CurlypivMicrosigCollection(object):
def __init__(self, testSetup, synCol, use_gui=False,
use_internal_setting=False, setting_file=None,
use_internal_data=False, data_files=None,
to_internal_sequence=False, destination_folder=None,
output_dtype='np.uint16'):
if not isinstance(testSetup, object):
raise ValueError("{} must be a CurlypivTestSetup class object".format(testSetup))
if not isinstance(synCol, object):
raise ValueError("{} must be a CurlypivSyntheticCollection class object".format(synCol))
valid_output_dtype = ['np.uint16', 'np.uint8']
if output_dtype not in valid_output_dtype:
raise ValueError("{} must be one of {}".format(output_dtype, valid_output_dtype))
self.testSetup = testSetup
self.synCol = synCol
self.use_gui = use_gui
self.output_dtype = output_dtype
if self.use_gui:
run()
else:
if use_internal_setting:
self.setting_file = self.synCol.microsigSetup
else:
if not isinstance(setting_file, str):
raise ValueError("{} must be a filepath to microsig settings text file".format(setting_file))
self.setting_file = os.path.abspath(setting_file)
if use_internal_data:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(data_files, str):
raise ValueError("{} must be a filepath to particle location text files".format(data_files))
all_files = glob.glob(data_files + '/*.txt')
save_files = []
for ff in [f for f in all_files if f.endswith('.txt')]:
save_files.append(ff)
save_files.sort()
self.data_files = save_files
if to_internal_sequence:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(destination_folder, str):
raise ValueError("{} must be a filepath to write output images".format(destination_folder))
self.destination_folder = os.path.abspath(destination_folder)
self.generate()
def generate(self):
# %%
mic = {}
f = open(self.setting_file)
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(self.data_files)
for data in self.data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
if self.output_dtype == 'np.uint16':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint16(I))
elif self.output_dtype == 'np.uint8':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint8(I))
print('done!')
# %%
def sorter(f):
sorting = int(f[:-4])
return sorting
def run():
# %%
root = tk.Tk()
root.attributes('-topmost', True)
root.withdraw()
setting_file = filedialog.askopenfilenames(
title="Select settings file", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
data_files = filedialog.askopenfilenames(
title="Select data file(s)", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
destination_folder = filedialog.askdirectory(
title="Select destination file", parent=root)
if not setting_file:
sys.exit('input file not valid')
# %%
mic = {}
f = open(setting_file[0])
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(data_files)
for data in data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
print('done!')
# %%
def take_image(mic, P):
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']));
dp_s = np.unique(P[:, 3])
if P.shape[1] == 5 or P.shape[1] == 8:
k_id = P[:, -1]
else:
k_id = np.ones(P.shape[0])
if P.shape[1] <= 5 and dp_s.size == 1:
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(dp_s * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(dp_s, n_points, mic['n_rays'])
for ii in range(0, P.shape[0]):
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] <= 5 and dp_s.size != 1:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(P[ii, 3], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] >= 7:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
ecc = P[ii, 4]
if ecc > 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc / np.sqrt(1 - 1 / ecc ** 2)
* np.arcsin(np.sqrt(1 - 1 / ecc ** 2)))
n_points = int(np.round(fact * n_points))
elif ecc < 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc ** 2 / np.sqrt(1 - ecc ** 2)
* np.arctan(np.sqrt(1 - ecc ** 2)))
n_points = int(np.round(fact * n_points))
xp = create_ellipsoid(P[ii, 3:7], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3]);
I = I + Id * k_id[ii]
I = I * mic['gain']
if mic['background_mean'] != 0:
I = I + mic['background_mean']
if mic['background_noise'] != 0:
Irand = np.random.normal(0, mic['background_noise'],
(mic['pixel_dim_y'], mic['pixel_dim_x']))
I = I + np.round(Irand)
# I = np.round(I+random('norm',0,mic.background_noise,...
# mic.pixel_dim_y,mic.pixel_dim_x));
return I
# %%
def image_spherical(mic, xp, P1):
# take image of a particle with a spherical lens
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P1
lens_radius = (np.tan(np.arcsin(mic['numerical_aperture']))
* (1 + 1 / mic['magnification']) * mic['focal_length'])
# distance lens-ccd
dCCD = -mic['focal_length'] * (mic['magnification'] + 1);
# distance particle-lens
dPART = P1[2] + mic['focal_length'] * (1 / mic['magnification'] + 1);
# linear transformation from the object plane to the lens plane
T2 = np.array([[1, 0, dPART, 0],
[0, 1, 0, dPART],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field right before the lens
x = np.linalg.inv(T2) @ xp
# remove rays outside of the lens aperture
ind = x[0, :] ** 2 + x[1, :] ** 2 <= lens_radius ** 2
x = x[:, ind]
# transformation of the light field with spherical lens
a = x[0, :];
b = x[1, :]
c = x[2, :];
d = x[3, :]
# radius of curvature of the lens
rk = mic['focal_length'] * (mic['ri_lens'] / mic['ri_medium'] - 1) * 2
dum = a * 0
# refraction medium-lens
# ray-vector befor lens
Vr = np.vstack((1 + dum, c, d))
Vr = (Vr / np.tile(np.sqrt(sum(Vr ** 2)), (3, 1)))
# normal-vector to the lens surface
Vl = np.vstack((rk + dum, a, b))
Vl = (Vl / np.tile(np.sqrt(sum(Vl ** 2)), (3, 1)))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_medium'] / mic['ri_lens'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector inside the lens
Vr11 = (Vl * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# refraction lens-medium
# normal-vector to the lens surface
Vl2 = np.vstack((Vl[0, :], -Vl[1:, :]))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl2, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl2, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl2, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_lens'] / mic['ri_medium'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector outside the lens
Vr11 = (Vl2 * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# light field after the spherical lens
x[2, :] = Vr[1, :]
x[3, :] = Vr[2, :]
if mic['cyl_focal_length'] == 0:
# linear transformation from the lens plane to the ccd plane
T1 = np.array([[1, 0, -dCCD, 0],
[0, 1, 0, -dCCD],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field at the ccd plane
xs = np.linalg.inv(T1) @ x
else:
# # linear transformation from the lens plane to the cyl_lens plane
T1c = np.array([[1, 0, -dCCD * 1 / 3, 0],
[0, 1, 0, -dCCD * 1 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# # light field at the cylindrical lens plane
xc = np.linalg.inv(T1c) @ x
# # light field after the cylindrical lens plane
Tc = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[-1 / mic['cyl_focal_length'], 0, 1, 0],
[0, 0, 0, 1]])
xc_a = np.linalg.inv(Tc) @ xc
# # light field at the ccd plane
T1 = np.array([[1, 0, -dCCD * 2 / 3, 0],
[0, 1, 0, -dCCD * 2 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]]);
# # light field at the ccd plane
xs = np.linalg.inv(T1) @ xc_a
# transform the position in pixel units
X = np.round(xs[0, :] / mic['pixel_size'] + P1[0])
Y = np.round(xs[1, :] / mic['pixel_size'] + P1[1])
# remove rays outside the CCD
ind = np.all([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'],
X.imag == 0, Y.imag == 0], axis=0)
# count number of rays in each pixel
countXY = np.sort(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])
indi, ia = np.unique(countXY, return_index=True)
nCounts = np.hstack((ia[1:], countXY.size + 1)) - ia
# prepare image
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']))
Ifr = I.flatten('F')
Ifr[indi.astype(int) - 1] = nCounts
I = Ifr.reshape(mic['pixel_dim_y'], mic['pixel_dim_x'], order='F')
return I
# %%
def create_particle(D, Ns, Nr):
R = D / 2
V = spiral_sphere(Ns)
V[0:2, V[0, :] > 0] = -V[0:2, V[0, :] > 0]
x = R * V[0, :]
y = R * V[1, :]
z = R * V[2, :]
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def create_ellipsoid(Deab, Ns, Nr):
D = Deab[0];
ecc = Deab[1]
alpha = Deab[2];
beta = Deab[3]
R = D / 2
V = spiral_sphere(Ns)
V = R * V
V[2, :] = V[2, :] * ecc
R_beta = np.array([[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)]])
R_alpha = np.array([[np.cos(alpha), -np.sin(alpha), 0],
[np.sin(alpha), np.cos(alpha), 0],
[0, 0, 1]])
Vf = R_alpha @ (R_beta @ V)
ii1 = (Vf[1, :] == np.min(Vf[1, :])).nonzero()[0][0]
ii2 = (Vf[1, :] == np.max(Vf[1, :])).nonzero()[0][0]
ii3 = (Vf[2, :] == np.min(Vf[2, :])).nonzero()[0][0]
ii4 = (Vf[2, :] == np.max(Vf[2, :])).nonzero()[0][0]
Vdum = Vf[:, [ii1, ii2, ii3, ii4]]
A = np.c_[Vdum[1, :], Vdum[2, :], np.ones(Vdum.shape[1])]
C, _, _, _ = np.linalg.lstsq(A, Vdum[0, :], rcond=None)
V1dum = C[0] * Vf[1, :] + C[1] * Vf[2, :] + C[2]
ind = (Vf[0, :] - V1dum) < 0
x = Vf[0, ind]
y = Vf[1, ind]
z = Vf[2, ind]
Ns = z.size
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def spiral_sphere(N):
gr = (1 + np.sqrt(5)) / 2 # golden ratio
ga = 2 * np.pi * (1 - 1 / gr) # golden angle
ind_p = np.arange(0, N) # particle (i.e., point sample) index
lat = np.arccos(1 - 2 * ind_p / (
N - 1)) # latitude is defined so that particle index is proportional to surface area between 0 and lat
lon = ind_p * ga # position particles at even intervals along longitude
# Convert from spherical to Cartesian co-ordinates
x = np.sin(lat) * np.cos(lon)
y = np.sin(lat) * np.sin(lon)
z = np.cos(lat)
V = np.vstack((x, y, z))
return V
# %%
if __name__ == '__main__':
run()
| 32.956 | 119 | 0.506615 | 3,499 | 0.212344 | 0 | 0 | 0 | 0 | 0 | 0 | 3,915 | 0.23759 |
e0651470e7323b974d75e2d23e40d53bc5af99ea | 4,146 | py | Python | planning/scenario_planning/lane_driving/motion_planning/obstacle_avoidance_planner/scripts/trajectory_visualizer.py | kmiya/AutowareArchitectureProposal.iv | 386b52c9cc90f4535ad833014f2f9500f0e64ccf | [
"Apache-2.0"
] | null | null | null | planning/scenario_planning/lane_driving/motion_planning/obstacle_avoidance_planner/scripts/trajectory_visualizer.py | kmiya/AutowareArchitectureProposal.iv | 386b52c9cc90f4535ad833014f2f9500f0e64ccf | [
"Apache-2.0"
] | null | null | null | planning/scenario_planning/lane_driving/motion_planning/obstacle_avoidance_planner/scripts/trajectory_visualizer.py | kmiya/AutowareArchitectureProposal.iv | 386b52c9cc90f4535ad833014f2f9500f0e64ccf | [
"Apache-2.0"
] | 1 | 2021-07-20T09:38:30.000Z | 2021-07-20T09:38:30.000Z | # Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO(kosuke murakami): write ros2 visualizer
# import rospy
# from autoware_planning_msgs.msg import Trajectory
# from autoware_planning_msgs.msg import TrajectoryPoint
# import matplotlib.pyplot as plt
# import numpy as np
# import tf
# from geometry_msgs.msg import Vector3
# def quaternion_to_euler(quaternion):
# """Convert Quaternion to Euler Angles
# quaternion: geometry_msgs/Quaternion
# euler: geometry_msgs/Vector3
# """
# e = tf.transformations.euler_from_quaternion(
# (quaternion.x, quaternion.y, quaternion.z, quaternion.w))
# return Vector3(x=e[0], y=e[1], z=e[2])
# class TrajectoryVisualizer():
# def __init__(self):
# self.in_trajectory = Trajectory()
# self.debug_trajectory = Trajectory()
# self.debug_fixed_trajectory = Trajectory()
# self.plot_done1 = True
# self.plot_done2 = True
# self.plot_done3 = True
# self.length = 50
# self.substatus1 = rospy.Subscriber(
# "/planning/scenario_planning/lane_driving/motion_planning/obstacle_avoidance_planner/trajectory",
# Trajectory, self.CallBackTraj, queue_size=1, tcp_nodelay=True)
# rospy.Timer(rospy.Duration(0.3), self.timerCallback)
# def CallBackTraj(self, cmd):
# if (self.plot_done1):
# self.in_trajectory = cmd
# self.plot_done1 = False
# def CallBackDebugTraj(self, cmd):
# if (self.plot_done2):
# self.debug_trajectory = cmd
# self.plot_done2 = False
# def CallBackDebugFixedTraj(self, cmd):
# if (self.plot_done3):
# self.debug_fixed_trajectory = cmd
# self.plot_done3 = False
# def timerCallback(self, event):
# self.plotTrajectory()
# self.plot_done1 = True
# self.plot_done2 = True
# self.plot_done3 = True
# def CalcArcLength(self, traj):
# s_arr = []
# ds = 0.0
# s_sum = 0.0
# if len(traj.points) > 0:
# s_arr.append(s_sum)
# for i in range(1, len(traj.points)):
# p0 = traj.points[i-1]
# p1 = traj.points[i]
# dx = p1.pose.position.x - p0.pose.position.x
# dy = p1.pose.position.y - p0.pose.position.y
# ds = np.sqrt(dx**2 + dy**2)
# s_sum += ds
# if(s_sum > self.length):
# break
# s_arr.append(s_sum)
# return s_arr
# def CalcX(self, traj):
# v_list = []
# for p in traj.points:
# v_list.append(p.pose.position.x)
# return v_list
# def CalcY(self, traj):
# v_list = []
# for p in traj.points:
# v_list.append(p.pose.position.y)
# return v_list
# def CalcYaw(self, traj, s_arr):
# v_list = []
# for p in traj.points:
# v_list.append(quaternion_to_euler(p.pose.orientation).z)
# return v_list[0: len(s_arr)]
# def plotTrajectory(self):
# plt.clf()
# ax3 = plt.subplot(1, 1, 1)
# x = self.CalcArcLength(self.in_trajectory)
# y = self.CalcYaw(self.in_trajectory, x)
# if len(x) == len(y):
# ax3.plot(x, y, label="final", marker="*")
# ax3.set_xlabel("arclength [m]")
# ax3.set_ylabel("yaw")
# plt.pause(0.01)
# def main():
# rospy.init_node("trajectory_visualizer")
# TrajectoryVisualizer()
# rospy.spin()
# if __name__ == "__main__":
# main()
| 30.485294 | 109 | 0.600338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,011 | 0.967438 |
e067ec51a3742d007fe87145e158ef565747647f | 2,642 | py | Python | main/forms.py | agokhale11/test2 | deddf17e7bb67777251cf73cbdb5f6970c16050a | [
"MIT"
] | null | null | null | main/forms.py | agokhale11/test2 | deddf17e7bb67777251cf73cbdb5f6970c16050a | [
"MIT"
] | 7 | 2020-06-05T18:32:16.000Z | 2022-03-11T23:24:17.000Z | main/forms.py | agokhale11/test2 | deddf17e7bb67777251cf73cbdb5f6970c16050a | [
"MIT"
] | null | null | null | from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django import forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
# If you don't do this you cannot use Bootstrap CSS
class LoginForm(AuthenticationForm):
username = forms.CharField(label="Username", max_length=16,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'}))
password = forms.CharField(label="Password", max_length=16,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password'}))
class SignUpForm(UserCreationForm):
full_name = forms.CharField(label="Full Name", max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'full_name'}))
email = forms.EmailField(label = "Email", max_length =50, widget=forms.EmailInput(attrs={'class': 'form-control', 'name': 'email'}))
class Meta:
model = User
fields = ("email", "full_name", "username", "password1", "password2")
def save(self, commit=True):
user = super(SignUpForm, self).save(commit=False)
user.full_name = self.cleaned_data["full_name"]
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class EmailSignupForm(UserCreationForm):
full_name = forms.CharField(label="Full Name", max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'full_name'}))
class Meta:
model = User
fields = ("full_name", "username", "password1", "password2")
def save(self, commit=True):
user = super(EmailSignupForm, self).save(commit=False)
user.full_name = self.cleaned_data["full_name"]
if commit:
user.save()
return user
class ChangePasswordForm(forms.Form):
security_code = forms.CharField(label="Security Code", max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'security_code'}))
password1 = forms.CharField(label="New Password", max_length=16,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password1'}))
password2 = forms.CharField(label="Re-enter New Password", max_length=16,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password2'}))
class Meta:
fields = ("security_code", "password1", "password2") | 41.28125 | 136 | 0.62869 | 2,433 | 0.920893 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.232021 |
e068d2bbe0be95225acd32e5324a05a51bc85276 | 5,641 | py | Python | pandas 9 - Statistics Information on data sets.py | PythonProgramming/Pandas-Basics-with-2.7 | a6ecd5ac7c25dba83e934549903f229de89290d3 | [
"MIT"
] | 10 | 2015-07-16T05:46:10.000Z | 2020-10-28T10:35:50.000Z | pandas 9 - Statistics Information on data sets.py | PythonProgramming/Pandas-Basics-with-2.7 | a6ecd5ac7c25dba83e934549903f229de89290d3 | [
"MIT"
] | null | null | null | pandas 9 - Statistics Information on data sets.py | PythonProgramming/Pandas-Basics-with-2.7 | a6ecd5ac7c25dba83e934549903f229de89290d3 | [
"MIT"
] | 9 | 2017-01-31T18:57:25.000Z | 2019-09-10T08:52:57.000Z | import pandas as pd
from pandas import DataFrame
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
df['H-L'] = df.High - df.Low
# Giving us count (rows), mean (avg), std (standard deviation for the entire
# set), minimum for the set, maximum for the set, and some %s in that range.
print( df.describe())
x = input('enter to cont')
# gives us correlation data. Remember the 3d chart we plotted?
# now you can see if correlation of H-L and Volume also is correlated
# with price swings. Correlations for your correlations
print( df.corr())
x = input('enter to cont')
# covariance... now plenty of people know what correlation is, but what in the
# heck is covariance.
# Let's defined the two.
# covariance is the measure of how two variables change together.
# correlation is the measure of how two variables move in relation to eachother.
# so covariance is a more direct assessment of the relationship between two variables.
# Maybe a better way to put it is that covariance is the measure of the strength of correlation.
print( df.cov())
x = input('enter to cont')
print( df[['Volume','H-L']].corr())
x = input('enter to cont')
# see how it makes a table?
# so now, we can actually perform a service that some people actually pay for
# I once had a short freelance gig doing this
# so a popular form of analysis within especially forex is to compare correlations between
# the currencies. The idea here is that you pace one currency with another.
#
import datetime
import pandas.io.data
C = pd.io.data.get_data_yahoo('C',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
AAPL = pd.io.data.get_data_yahoo('AAPL',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
MSFT = pd.io.data.get_data_yahoo('MSFT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TSLA = pd.io.data.get_data_yahoo('TSLA',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
print( C.head())
x = input('enter to cont')
del C['Open']
# , 'high', 'low', 'close', 'volume'
del C['High']
del C['Low']
del C['Close']
del C['Volume']
corComp = C
corComp.rename(columns={'Adj Close': 'C'}, inplace=True)
corComp['AAPL'] = AAPL['Adj Close']
corComp['MSFT'] = MSFT['Adj Close']
corComp['TSLA'] = TSLA['Adj Close']
print( corComp.head())
x = input('enter to cont')
print( corComp.corr())
x = input('enter to cont')
C = pd.io.data.get_data_yahoo('C',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
AAPL = pd.io.data.get_data_yahoo('AAPL',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
MSFT = pd.io.data.get_data_yahoo('MSFT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TSLA = pd.io.data.get_data_yahoo('TSLA',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
BAC = pd.io.data.get_data_yahoo('BAC',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
BBRY = pd.io.data.get_data_yahoo('BBRY',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
CMG = pd.io.data.get_data_yahoo('CMG',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
EBAY = pd.io.data.get_data_yahoo('EBAY',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
JPM = pd.io.data.get_data_yahoo('JPM',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
SBUX = pd.io.data.get_data_yahoo('SBUX',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TGT = pd.io.data.get_data_yahoo('TGT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
WFC = pd.io.data.get_data_yahoo('WFC',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
x = input('enter to cont')
print( C.head())
del C['Open']
# , 'high', 'low', 'close', 'volume'
del C['High']
del C['Low']
del C['Close']
del C['Volume']
corComp = C
corComp.rename(columns={'Adj Close': 'C'}, inplace=True)
corComp['BAC'] = BAC['Adj Close']
corComp['MSFT'] = MSFT['Adj Close']
corComp['TSLA'] = TSLA['Adj Close']
corComp['AAPL'] = AAPL['Adj Close']
corComp['BBRY'] = BBRY['Adj Close']
corComp['CMG'] = CMG['Adj Close']
corComp['EBAY'] = EBAY['Adj Close']
corComp['JPM'] = JPM['Adj Close']
corComp['SBUX'] = SBUX['Adj Close']
corComp['TGT'] = TGT['Adj Close']
corComp['WFC'] = WFC['Adj Close']
print( corComp.head())
x = input('enter to cont')
print( corComp.corr())
x = input('enter to cont')
fancy = corComp.corr()
fancy.to_csv('bigmoney.csv')
| 32.606936 | 96 | 0.565148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,791 | 0.317497 |
e06b5b33923a9795875422db89edadd2030423bd | 292 | py | Python | working/tkinter_widget/test.py | songdaegeun/school-zone-enforcement-system | b5680909fd5a348575563534428d2117f8dc2e3f | [
"MIT"
] | null | null | null | working/tkinter_widget/test.py | songdaegeun/school-zone-enforcement-system | b5680909fd5a348575563534428d2117f8dc2e3f | [
"MIT"
] | null | null | null | working/tkinter_widget/test.py | songdaegeun/school-zone-enforcement-system | b5680909fd5a348575563534428d2117f8dc2e3f | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import threading
def test():
while 1:
img1=cv2.imread('captured car1.jpg')
print("{}".format(img1.shape))
print("{}".format(img1))
cv2.imshow('asd',img1)
cv2.waitKey(1)
t1 = threading.Thread(target=test)
t1.start()
| 18.25 | 44 | 0.60274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.109589 |
e06beb7e97ea00b98e3ff8423b4c33335a68172e | 7,856 | py | Python | ceilometer/compute/virt/hyperv/utilsv2.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | 2 | 2015-09-07T09:15:26.000Z | 2015-09-30T02:13:23.000Z | ceilometer/compute/virt/hyperv/utilsv2.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/compute/virt/hyperv/utilsv2.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | 1 | 2019-09-16T02:11:41.000Z | 2019-09-16T02:11:41.000Z | # Copyright 2013 Cloudbase Solutions Srl
#
# Author: Claudiu Belu <[email protected]>
# Alessandro Pilotti <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from ceilometer.compute.virt import inspector
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
def __init__(self, host='.'):
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._init_cimv2_wmi_conn(host)
self._host_cpu_info = None
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _init_cimv2_wmi_conn(self, host):
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def get_host_cpu_info(self):
if not self._host_cpu_info:
host_cpus = self._conn_cimv2.Win32_Processor()
self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus))
return self._host_cpu_info
def get_all_vms(self):
vms = [(v.ElementName, v.Name) for v in
self._conn.Msvm_ComputerSystem(['ElementName', 'Name'],
Caption="Virtual Machine")]
return vms
def get_cpu_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0]
cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME)
cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def)
cpu_used = 0
if cpu_metric_aggr:
cpu_used = long(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
long(vm.OnTimeInMilliseconds))
def get_vnic_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC)
vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT)
metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME)
metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME)
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
metric_value_instances = self._get_metric_value_instances(
port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
self._BASE_METRICS_VALUE)
metric_values = self._sum_metric_values_by_defs(
metric_value_instances, [metric_def_in, metric_def_out])
yield {
'rx_mb': metric_values[0],
'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
def get_disk_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME)
metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_r, metric_def_w])
# Thi sis e.g. the VHD file location
if disk.HostResource:
host_resource = disk.HostResource[0]
yield {
# Values are in megabytes
'read_mb': metric_values[0],
'write_mb': metric_values[1],
'instance_id': disk.InstanceID,
'host_resource': host_resource
}
def _sum_metric_values(self, metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += long(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
metrics = self._filter_metrics(element_metrics, metric_def)
metric_values.append(self._sum_metric_values(metrics))
else:
# In case the metric is not defined on this host
metric_values.append(0)
return metric_values
def _get_metric_value_instances(self, elements, result_class):
instances = []
for el in elements:
associators = el.associators(wmi_result_class=result_class)
if associators:
instances.append(associators[0])
return instances
def _get_metric_values(self, element, metric_defs):
element_metrics = element.associators(
wmi_association_class=self._METRICS_ME)
return self._sum_metric_values_by_defs(element_metrics, metric_defs)
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
raise inspector.InstanceNotFoundException(
_('VM %s not found on Hyper-V') % vm_name)
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def _get_metrics(self, element, metric_def):
return self._filter_metrics(
element.associators(
wmi_association_class=self._METRICS_ME), metric_def)
def _filter_metrics(self, all_metrics, metric_def):
return [v for v in all_metrics if
v.MetricDefinitionId == metric_def.Id]
def _get_metric_def(self, metric_def):
metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def)
if metric:
return metric[0]
def _get_vm_setting_data(self, vm):
vm_settings = vm.associators(
wmi_result_class=self._VS_SETTING_DATA)
# Avoid snapshots
return [s for s in vm_settings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_vm_resources(self, vm, resource_class):
setting_data = self._get_vm_setting_data(vm)
return setting_data.associators(wmi_result_class=resource_class)
| 37.056604 | 78 | 0.66777 | 6,691 | 0.851706 | 1,794 | 0.22836 | 0 | 0 | 0 | 0 | 1,691 | 0.215249 |
e06fed7cfa54e3e815b314104d5c76b1f273336e | 1,126 | py | Python | src/cli.py | cajones314/avocd2019 | 268e03c5d1bb5b3e14459b831916bb7846f40def | [
"MIT"
] | null | null | null | src/cli.py | cajones314/avocd2019 | 268e03c5d1bb5b3e14459b831916bb7846f40def | [
"MIT"
] | null | null | null | src/cli.py | cajones314/avocd2019 | 268e03c5d1bb5b3e14459b831916bb7846f40def | [
"MIT"
] | null | null | null | # system
from io import IOBase, StringIO
import os
# 3rd party
import click
# internal
from days import DayFactory
# import logging
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
# ch = logging.StreamHandler()
# logger.addHandler(ch)
@click.group(invoke_without_command=True)
@click.option('-d', '--day', required=True, type=click.IntRange(1, 31), metavar="<1..31>", help="Day you want to select.")
@click.option('-p', '--puzzle', required=True, type=click.IntRange(1, 2), metavar="<1|2>", help="Puzzle you want to run.")
@click.option('-i', '--input', required=True, type=click.Path(exists=True), help="Path to puzzle data.")
def cli(day: int, puzzle: int, input: str):
filename = os.path.join(input, f"{day:02}_puzzle_{puzzle}.txt")
if os.path.exists(filename):
input_stream = open(filename, "r")
else:
input_stream = StringIO('')
avocd = DayFactory(day, input_stream)
try:
print(avocd.run(puzzle))
except NotImplementedError:
print(f"Puzzle {puzzle} for day {day} not implemented.")
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
cli()
| 28.15 | 122 | 0.69627 | 0 | 0 | 0 | 0 | 780 | 0.692718 | 0 | 0 | 429 | 0.380995 |
e0702674ad6cf140cbb31a3b885b600b8569c9c4 | 17,033 | py | Python | option_c.py | wrosecrans/colormap | 0b6a3b7e4caa5df72e7bad8ba196acfbbe5e5946 | [
"CC0-1.0"
] | 231 | 2015-06-03T01:28:13.000Z | 2022-03-27T02:02:42.000Z | option_c.py | CatarinaL/colormap | bc549477db0c12b54a5928087552ad2cf274980f | [
"CC0-1.0"
] | 10 | 2015-06-06T23:06:06.000Z | 2019-10-25T20:10:48.000Z | option_c.py | CatarinaL/colormap | bc549477db0c12b54a5928087552ad2cf274980f | [
"CC0-1.0"
] | 97 | 2015-06-04T00:46:34.000Z | 2022-01-23T17:37:24.000Z |
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [-5.4895292543686764, 14.790571669586654, 82.5546687431056, 29.15531114139253, -4.1316769886951761, -13.002076438907238],
'yp': [-35.948168839230306, -42.273376159885785, -28.845467523197698, 52.03426124197, 36.832712600868973, 40.792291220556734],
'min_JK': 16.8314150305,
'max_JK': 95}
cm_data = [[ 5.03832136e-02, 2.98028976e-02, 5.27974883e-01],
[ 6.35363639e-02, 2.84259729e-02, 5.33123681e-01],
[ 7.53531234e-02, 2.72063728e-02, 5.38007001e-01],
[ 8.62217979e-02, 2.61253206e-02, 5.42657691e-01],
[ 9.63786097e-02, 2.51650976e-02, 5.47103487e-01],
[ 1.05979704e-01, 2.43092436e-02, 5.51367851e-01],
[ 1.15123641e-01, 2.35562500e-02, 5.55467728e-01],
[ 1.23902903e-01, 2.28781011e-02, 5.59423480e-01],
[ 1.32380720e-01, 2.22583774e-02, 5.63250116e-01],
[ 1.40603076e-01, 2.16866674e-02, 5.66959485e-01],
[ 1.48606527e-01, 2.11535876e-02, 5.70561711e-01],
[ 1.56420649e-01, 2.06507174e-02, 5.74065446e-01],
[ 1.64069722e-01, 2.01705326e-02, 5.77478074e-01],
[ 1.71573925e-01, 1.97063415e-02, 5.80805890e-01],
[ 1.78950212e-01, 1.92522243e-02, 5.84054243e-01],
[ 1.86212958e-01, 1.88029767e-02, 5.87227661e-01],
[ 1.93374449e-01, 1.83540593e-02, 5.90329954e-01],
[ 2.00445260e-01, 1.79015512e-02, 5.93364304e-01],
[ 2.07434551e-01, 1.74421086e-02, 5.96333341e-01],
[ 2.14350298e-01, 1.69729276e-02, 5.99239207e-01],
[ 2.21196750e-01, 1.64970484e-02, 6.02083323e-01],
[ 2.27982971e-01, 1.60071509e-02, 6.04867403e-01],
[ 2.34714537e-01, 1.55015065e-02, 6.07592438e-01],
[ 2.41396253e-01, 1.49791041e-02, 6.10259089e-01],
[ 2.48032377e-01, 1.44393586e-02, 6.12867743e-01],
[ 2.54626690e-01, 1.38820918e-02, 6.15418537e-01],
[ 2.61182562e-01, 1.33075156e-02, 6.17911385e-01],
[ 2.67702993e-01, 1.27162163e-02, 6.20345997e-01],
[ 2.74190665e-01, 1.21091423e-02, 6.22721903e-01],
[ 2.80647969e-01, 1.14875915e-02, 6.25038468e-01],
[ 2.87076059e-01, 1.08554862e-02, 6.27294975e-01],
[ 2.93477695e-01, 1.02128849e-02, 6.29490490e-01],
[ 2.99855122e-01, 9.56079551e-03, 6.31623923e-01],
[ 3.06209825e-01, 8.90185346e-03, 6.33694102e-01],
[ 3.12543124e-01, 8.23900704e-03, 6.35699759e-01],
[ 3.18856183e-01, 7.57551051e-03, 6.37639537e-01],
[ 3.25150025e-01, 6.91491734e-03, 6.39512001e-01],
[ 3.31425547e-01, 6.26107379e-03, 6.41315649e-01],
[ 3.37683446e-01, 5.61830889e-03, 6.43048936e-01],
[ 3.43924591e-01, 4.99053080e-03, 6.44710195e-01],
[ 3.50149699e-01, 4.38202557e-03, 6.46297711e-01],
[ 3.56359209e-01, 3.79781761e-03, 6.47809772e-01],
[ 3.62553473e-01, 3.24319591e-03, 6.49244641e-01],
[ 3.68732762e-01, 2.72370721e-03, 6.50600561e-01],
[ 3.74897270e-01, 2.24514897e-03, 6.51875762e-01],
[ 3.81047116e-01, 1.81356205e-03, 6.53068467e-01],
[ 3.87182639e-01, 1.43446923e-03, 6.54176761e-01],
[ 3.93304010e-01, 1.11388259e-03, 6.55198755e-01],
[ 3.99410821e-01, 8.59420809e-04, 6.56132835e-01],
[ 4.05502914e-01, 6.78091517e-04, 6.56977276e-01],
[ 4.11580082e-01, 5.77101735e-04, 6.57730380e-01],
[ 4.17642063e-01, 5.63847476e-04, 6.58390492e-01],
[ 4.23688549e-01, 6.45902780e-04, 6.58956004e-01],
[ 4.29719186e-01, 8.31008207e-04, 6.59425363e-01],
[ 4.35733575e-01, 1.12705875e-03, 6.59797077e-01],
[ 4.41732123e-01, 1.53984779e-03, 6.60069009e-01],
[ 4.47713600e-01, 2.07954744e-03, 6.60240367e-01],
[ 4.53677394e-01, 2.75470302e-03, 6.60309966e-01],
[ 4.59622938e-01, 3.57374415e-03, 6.60276655e-01],
[ 4.65549631e-01, 4.54518084e-03, 6.60139383e-01],
[ 4.71456847e-01, 5.67758762e-03, 6.59897210e-01],
[ 4.77343929e-01, 6.97958743e-03, 6.59549311e-01],
[ 4.83210198e-01, 8.45983494e-03, 6.59094989e-01],
[ 4.89054951e-01, 1.01269996e-02, 6.58533677e-01],
[ 4.94877466e-01, 1.19897486e-02, 6.57864946e-01],
[ 5.00677687e-01, 1.40550640e-02, 6.57087561e-01],
[ 5.06454143e-01, 1.63333443e-02, 6.56202294e-01],
[ 5.12206035e-01, 1.88332232e-02, 6.55209222e-01],
[ 5.17932580e-01, 2.15631918e-02, 6.54108545e-01],
[ 5.23632990e-01, 2.45316468e-02, 6.52900629e-01],
[ 5.29306474e-01, 2.77468735e-02, 6.51586010e-01],
[ 5.34952244e-01, 3.12170300e-02, 6.50165396e-01],
[ 5.40569510e-01, 3.49501310e-02, 6.48639668e-01],
[ 5.46157494e-01, 3.89540334e-02, 6.47009884e-01],
[ 5.51715423e-01, 4.31364795e-02, 6.45277275e-01],
[ 5.57242538e-01, 4.73307585e-02, 6.43443250e-01],
[ 5.62738096e-01, 5.15448092e-02, 6.41509389e-01],
[ 5.68201372e-01, 5.57776706e-02, 6.39477440e-01],
[ 5.73631859e-01, 6.00281369e-02, 6.37348841e-01],
[ 5.79028682e-01, 6.42955547e-02, 6.35126108e-01],
[ 5.84391137e-01, 6.85790261e-02, 6.32811608e-01],
[ 5.89718606e-01, 7.28775875e-02, 6.30407727e-01],
[ 5.95010505e-01, 7.71902878e-02, 6.27916992e-01],
[ 6.00266283e-01, 8.15161895e-02, 6.25342058e-01],
[ 6.05485428e-01, 8.58543713e-02, 6.22685703e-01],
[ 6.10667469e-01, 9.02039303e-02, 6.19950811e-01],
[ 6.15811974e-01, 9.45639838e-02, 6.17140367e-01],
[ 6.20918555e-01, 9.89336721e-02, 6.14257440e-01],
[ 6.25986869e-01, 1.03312160e-01, 6.11305174e-01],
[ 6.31016615e-01, 1.07698641e-01, 6.08286774e-01],
[ 6.36007543e-01, 1.12092335e-01, 6.05205491e-01],
[ 6.40959444e-01, 1.16492495e-01, 6.02064611e-01],
[ 6.45872158e-01, 1.20898405e-01, 5.98867442e-01],
[ 6.50745571e-01, 1.25309384e-01, 5.95617300e-01],
[ 6.55579615e-01, 1.29724785e-01, 5.92317494e-01],
[ 6.60374266e-01, 1.34143997e-01, 5.88971318e-01],
[ 6.65129493e-01, 1.38566428e-01, 5.85582301e-01],
[ 6.69845385e-01, 1.42991540e-01, 5.82153572e-01],
[ 6.74522060e-01, 1.47418835e-01, 5.78688247e-01],
[ 6.79159664e-01, 1.51847851e-01, 5.75189431e-01],
[ 6.83758384e-01, 1.56278163e-01, 5.71660158e-01],
[ 6.88318440e-01, 1.60709387e-01, 5.68103380e-01],
[ 6.92840088e-01, 1.65141174e-01, 5.64521958e-01],
[ 6.97323615e-01, 1.69573215e-01, 5.60918659e-01],
[ 7.01769334e-01, 1.74005236e-01, 5.57296144e-01],
[ 7.06177590e-01, 1.78437000e-01, 5.53656970e-01],
[ 7.10548747e-01, 1.82868306e-01, 5.50003579e-01],
[ 7.14883195e-01, 1.87298986e-01, 5.46338299e-01],
[ 7.19181339e-01, 1.91728906e-01, 5.42663338e-01],
[ 7.23443604e-01, 1.96157962e-01, 5.38980786e-01],
[ 7.27670428e-01, 2.00586086e-01, 5.35292612e-01],
[ 7.31862231e-01, 2.05013174e-01, 5.31600995e-01],
[ 7.36019424e-01, 2.09439071e-01, 5.27908434e-01],
[ 7.40142557e-01, 2.13863965e-01, 5.24215533e-01],
[ 7.44232102e-01, 2.18287899e-01, 5.20523766e-01],
[ 7.48288533e-01, 2.22710942e-01, 5.16834495e-01],
[ 7.52312321e-01, 2.27133187e-01, 5.13148963e-01],
[ 7.56303937e-01, 2.31554749e-01, 5.09468305e-01],
[ 7.60263849e-01, 2.35975765e-01, 5.05793543e-01],
[ 7.64192516e-01, 2.40396394e-01, 5.02125599e-01],
[ 7.68090391e-01, 2.44816813e-01, 4.98465290e-01],
[ 7.71957916e-01, 2.49237220e-01, 4.94813338e-01],
[ 7.75795522e-01, 2.53657797e-01, 4.91170517e-01],
[ 7.79603614e-01, 2.58078397e-01, 4.87539124e-01],
[ 7.83382636e-01, 2.62499662e-01, 4.83917732e-01],
[ 7.87132978e-01, 2.66921859e-01, 4.80306702e-01],
[ 7.90855015e-01, 2.71345267e-01, 4.76706319e-01],
[ 7.94549101e-01, 2.75770179e-01, 4.73116798e-01],
[ 7.98215577e-01, 2.80196901e-01, 4.69538286e-01],
[ 8.01854758e-01, 2.84625750e-01, 4.65970871e-01],
[ 8.05466945e-01, 2.89057057e-01, 4.62414580e-01],
[ 8.09052419e-01, 2.93491117e-01, 4.58869577e-01],
[ 8.12611506e-01, 2.97927865e-01, 4.55337565e-01],
[ 8.16144382e-01, 3.02368130e-01, 4.51816385e-01],
[ 8.19651255e-01, 3.06812282e-01, 4.48305861e-01],
[ 8.23132309e-01, 3.11260703e-01, 4.44805781e-01],
[ 8.26587706e-01, 3.15713782e-01, 4.41315901e-01],
[ 8.30017584e-01, 3.20171913e-01, 4.37835947e-01],
[ 8.33422053e-01, 3.24635499e-01, 4.34365616e-01],
[ 8.36801237e-01, 3.29104836e-01, 4.30905052e-01],
[ 8.40155276e-01, 3.33580106e-01, 4.27454836e-01],
[ 8.43484103e-01, 3.38062109e-01, 4.24013059e-01],
[ 8.46787726e-01, 3.42551272e-01, 4.20579333e-01],
[ 8.50066132e-01, 3.47048028e-01, 4.17153264e-01],
[ 8.53319279e-01, 3.51552815e-01, 4.13734445e-01],
[ 8.56547103e-01, 3.56066072e-01, 4.10322469e-01],
[ 8.59749520e-01, 3.60588229e-01, 4.06916975e-01],
[ 8.62926559e-01, 3.65119408e-01, 4.03518809e-01],
[ 8.66077920e-01, 3.69660446e-01, 4.00126027e-01],
[ 8.69203436e-01, 3.74211795e-01, 3.96738211e-01],
[ 8.72302917e-01, 3.78773910e-01, 3.93354947e-01],
[ 8.75376149e-01, 3.83347243e-01, 3.89975832e-01],
[ 8.78422895e-01, 3.87932249e-01, 3.86600468e-01],
[ 8.81442916e-01, 3.92529339e-01, 3.83228622e-01],
[ 8.84435982e-01, 3.97138877e-01, 3.79860246e-01],
[ 8.87401682e-01, 4.01761511e-01, 3.76494232e-01],
[ 8.90339687e-01, 4.06397694e-01, 3.73130228e-01],
[ 8.93249647e-01, 4.11047871e-01, 3.69767893e-01],
[ 8.96131191e-01, 4.15712489e-01, 3.66406907e-01],
[ 8.98983931e-01, 4.20391986e-01, 3.63046965e-01],
[ 9.01807455e-01, 4.25086807e-01, 3.59687758e-01],
[ 9.04601295e-01, 4.29797442e-01, 3.56328796e-01],
[ 9.07364995e-01, 4.34524335e-01, 3.52969777e-01],
[ 9.10098088e-01, 4.39267908e-01, 3.49610469e-01],
[ 9.12800095e-01, 4.44028574e-01, 3.46250656e-01],
[ 9.15470518e-01, 4.48806744e-01, 3.42890148e-01],
[ 9.18108848e-01, 4.53602818e-01, 3.39528771e-01],
[ 9.20714383e-01, 4.58417420e-01, 3.36165582e-01],
[ 9.23286660e-01, 4.63250828e-01, 3.32800827e-01],
[ 9.25825146e-01, 4.68103387e-01, 3.29434512e-01],
[ 9.28329275e-01, 4.72975465e-01, 3.26066550e-01],
[ 9.30798469e-01, 4.77867420e-01, 3.22696876e-01],
[ 9.33232140e-01, 4.82779603e-01, 3.19325444e-01],
[ 9.35629684e-01, 4.87712357e-01, 3.15952211e-01],
[ 9.37990034e-01, 4.92666544e-01, 3.12575440e-01],
[ 9.40312939e-01, 4.97642038e-01, 3.09196628e-01],
[ 9.42597771e-01, 5.02639147e-01, 3.05815824e-01],
[ 9.44843893e-01, 5.07658169e-01, 3.02433101e-01],
[ 9.47050662e-01, 5.12699390e-01, 2.99048555e-01],
[ 9.49217427e-01, 5.17763087e-01, 2.95662308e-01],
[ 9.51343530e-01, 5.22849522e-01, 2.92274506e-01],
[ 9.53427725e-01, 5.27959550e-01, 2.88883445e-01],
[ 9.55469640e-01, 5.33093083e-01, 2.85490391e-01],
[ 9.57468770e-01, 5.38250172e-01, 2.82096149e-01],
[ 9.59424430e-01, 5.43431038e-01, 2.78700990e-01],
[ 9.61335930e-01, 5.48635890e-01, 2.75305214e-01],
[ 9.63202573e-01, 5.53864931e-01, 2.71909159e-01],
[ 9.65023656e-01, 5.59118349e-01, 2.68513200e-01],
[ 9.66798470e-01, 5.64396327e-01, 2.65117752e-01],
[ 9.68525639e-01, 5.69699633e-01, 2.61721488e-01],
[ 9.70204593e-01, 5.75028270e-01, 2.58325424e-01],
[ 9.71835007e-01, 5.80382015e-01, 2.54931256e-01],
[ 9.73416145e-01, 5.85761012e-01, 2.51539615e-01],
[ 9.74947262e-01, 5.91165394e-01, 2.48151200e-01],
[ 9.76427606e-01, 5.96595287e-01, 2.44766775e-01],
[ 9.77856416e-01, 6.02050811e-01, 2.41387186e-01],
[ 9.79232922e-01, 6.07532077e-01, 2.38013359e-01],
[ 9.80556344e-01, 6.13039190e-01, 2.34646316e-01],
[ 9.81825890e-01, 6.18572250e-01, 2.31287178e-01],
[ 9.83040742e-01, 6.24131362e-01, 2.27937141e-01],
[ 9.84198924e-01, 6.29717516e-01, 2.24595006e-01],
[ 9.85300760e-01, 6.35329876e-01, 2.21264889e-01],
[ 9.86345421e-01, 6.40968508e-01, 2.17948456e-01],
[ 9.87332067e-01, 6.46633475e-01, 2.14647532e-01],
[ 9.88259846e-01, 6.52324832e-01, 2.11364122e-01],
[ 9.89127893e-01, 6.58042630e-01, 2.08100426e-01],
[ 9.89935328e-01, 6.63786914e-01, 2.04858855e-01],
[ 9.90681261e-01, 6.69557720e-01, 2.01642049e-01],
[ 9.91364787e-01, 6.75355082e-01, 1.98452900e-01],
[ 9.91984990e-01, 6.81179025e-01, 1.95294567e-01],
[ 9.92540939e-01, 6.87029567e-01, 1.92170500e-01],
[ 9.93031693e-01, 6.92906719e-01, 1.89084459e-01],
[ 9.93456302e-01, 6.98810484e-01, 1.86040537e-01],
[ 9.93813802e-01, 7.04740854e-01, 1.83043180e-01],
[ 9.94103226e-01, 7.10697814e-01, 1.80097207e-01],
[ 9.94323596e-01, 7.16681336e-01, 1.77207826e-01],
[ 9.94473934e-01, 7.22691379e-01, 1.74380656e-01],
[ 9.94553260e-01, 7.28727890e-01, 1.71621733e-01],
[ 9.94560594e-01, 7.34790799e-01, 1.68937522e-01],
[ 9.94494964e-01, 7.40880020e-01, 1.66334918e-01],
[ 9.94355411e-01, 7.46995448e-01, 1.63821243e-01],
[ 9.94140989e-01, 7.53136955e-01, 1.61404226e-01],
[ 9.93850778e-01, 7.59304390e-01, 1.59091984e-01],
[ 9.93482190e-01, 7.65498551e-01, 1.56890625e-01],
[ 9.93033251e-01, 7.71719833e-01, 1.54807583e-01],
[ 9.92505214e-01, 7.77966775e-01, 1.52854862e-01],
[ 9.91897270e-01, 7.84239120e-01, 1.51041581e-01],
[ 9.91208680e-01, 7.90536569e-01, 1.49376885e-01],
[ 9.90438793e-01, 7.96858775e-01, 1.47869810e-01],
[ 9.89587065e-01, 8.03205337e-01, 1.46529128e-01],
[ 9.88647741e-01, 8.09578605e-01, 1.45357284e-01],
[ 9.87620557e-01, 8.15977942e-01, 1.44362644e-01],
[ 9.86509366e-01, 8.22400620e-01, 1.43556679e-01],
[ 9.85314198e-01, 8.28845980e-01, 1.42945116e-01],
[ 9.84031139e-01, 8.35315360e-01, 1.42528388e-01],
[ 9.82652820e-01, 8.41811730e-01, 1.42302653e-01],
[ 9.81190389e-01, 8.48328902e-01, 1.42278607e-01],
[ 9.79643637e-01, 8.54866468e-01, 1.42453425e-01],
[ 9.77994918e-01, 8.61432314e-01, 1.42808191e-01],
[ 9.76264977e-01, 8.68015998e-01, 1.43350944e-01],
[ 9.74443038e-01, 8.74622194e-01, 1.44061156e-01],
[ 9.72530009e-01, 8.81250063e-01, 1.44922913e-01],
[ 9.70532932e-01, 8.87896125e-01, 1.45918663e-01],
[ 9.68443477e-01, 8.94563989e-01, 1.47014438e-01],
[ 9.66271225e-01, 9.01249365e-01, 1.48179639e-01],
[ 9.64021057e-01, 9.07950379e-01, 1.49370428e-01],
[ 9.61681481e-01, 9.14672479e-01, 1.50520343e-01],
[ 9.59275646e-01, 9.21406537e-01, 1.51566019e-01],
[ 9.56808068e-01, 9.28152065e-01, 1.52409489e-01],
[ 9.54286813e-01, 9.34907730e-01, 1.52921158e-01],
[ 9.51726083e-01, 9.41670605e-01, 1.52925363e-01],
[ 9.49150533e-01, 9.48434900e-01, 1.52177604e-01],
[ 9.46602270e-01, 9.55189860e-01, 1.50327944e-01],
[ 9.44151742e-01, 9.61916487e-01, 1.46860789e-01],
[ 9.41896120e-01, 9.68589814e-01, 1.40955606e-01],
[ 9.40015097e-01, 9.75158357e-01, 1.31325517e-01]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| 60.187279 | 141 | 0.577291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.00775 |
e0709fa966341538c2d49529de984d39878ed846 | 3,885 | py | Python | RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py | Aditya239233/MDP | 87491e1d67e547c11f4bdd5d784d120473429eae | [
"MIT"
] | 4 | 2022-01-14T15:06:43.000Z | 2022-01-18T14:45:04.000Z | RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py | Aditya239233/MDP | 87491e1d67e547c11f4bdd5d784d120473429eae | [
"MIT"
] | null | null | null | RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py | Aditya239233/MDP | 87491e1d67e547c11f4bdd5d784d120473429eae | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import math
from algorithm.planner.utils.car_utils import Car_C
PI = np.pi
class Arrow:
def __init__(self, x, y, theta, L, c):
angle = np.deg2rad(30)
d = 0.3 * L
w = 2
x_start = x
y_start = y
x_end = x + L * np.cos(theta)
y_end = y + L * np.sin(theta)
theta_hat_L = theta + PI - angle
theta_hat_R = theta + PI + angle
x_hat_start = x_end
x_hat_end_L = x_hat_start + d * np.cos(theta_hat_L)
x_hat_end_R = x_hat_start + d * np.cos(theta_hat_R)
y_hat_start = y_end
y_hat_end_L = y_hat_start + d * np.sin(theta_hat_L)
y_hat_end_R = y_hat_start + d * np.sin(theta_hat_R)
plt.plot([x_start, x_end], [y_start, y_end], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_L],
[y_hat_start, y_hat_end_L], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_R],
[y_hat_start, y_hat_end_R], color=c, linewidth=w)
class Car:
def __init__(self, x, y, yaw, w, L):
theta_B = PI + yaw
xB = x + L / 4 * np.cos(theta_B)
yB = y + L / 4 * np.sin(theta_B)
theta_BL = theta_B + PI / 2
theta_BR = theta_B - PI / 2
x_BL = xB + w / 2 * np.cos(theta_BL) # Bottom-Left vertex
y_BL = yB + w / 2 * np.sin(theta_BL)
x_BR = xB + w / 2 * np.cos(theta_BR) # Bottom-Right vertex
y_BR = yB + w / 2 * np.sin(theta_BR)
x_FL = x_BL + L * np.cos(yaw) # Front-Left vertex
y_FL = y_BL + L * np.sin(yaw)
x_FR = x_BR + L * np.cos(yaw) # Front-Right vertex
y_FR = y_BR + L * np.sin(yaw)
plt.plot([x_BL, x_BR, x_FR, x_FL, x_BL],
[y_BL, y_BR, y_FR, y_FL, y_BL],
linewidth=1, color='black')
Arrow(x, y, yaw, L / 2, 'black')
def draw_car(x, y, yaw, steer, color='black', extended_car=True):
if extended_car:
car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB, Car_C.ACTUAL_RF, Car_C.ACTUAL_RF, -Car_C.ACTUAL_RB, -Car_C.ACTUAL_RB],
[Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, Car_C.W/2, -Car_C.W/2, -Car_C.W/2, Car_C.W/2]])
else:
car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB],
[Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2]])
wheel = np.array([[-Car_C.TR, -Car_C.TR, Car_C.TR, Car_C.TR, -Car_C.TR],
[Car_C.TW / 4, -Car_C.TW / 4, -Car_C.TW / 4, Car_C.TW / 4, Car_C.TW / 4]])
rlWheel = wheel.copy()
rrWheel = wheel.copy()
frWheel = wheel.copy()
flWheel = wheel.copy()
Rot1 = np.array([[math.cos(yaw), -math.sin(yaw)],
[math.sin(yaw), math.cos(yaw)]])
Rot2 = np.array([[math.cos(steer), math.sin(steer)],
[-math.sin(steer), math.cos(steer)]])
frWheel = np.dot(Rot2, frWheel)
flWheel = np.dot(Rot2, flWheel)
frWheel += np.array([[Car_C.WB], [-Car_C.WD / 2]])
flWheel += np.array([[Car_C.WB], [Car_C.WD / 2]])
rrWheel[1, :] -= Car_C.WD / 2
rlWheel[1, :] += Car_C.WD / 2
frWheel = np.dot(Rot1, frWheel)
flWheel = np.dot(Rot1, flWheel)
rrWheel = np.dot(Rot1, rrWheel)
rlWheel = np.dot(Rot1, rlWheel)
car = np.dot(Rot1, car)
frWheel += np.array([[x], [y]])
flWheel += np.array([[x], [y]])
rrWheel += np.array([[x], [y]])
rlWheel += np.array([[x], [y]])
car += np.array([[x], [y]])
plt.plot(car[0, :], car[1, :], color)
plt.plot(frWheel[0, :], frWheel[1, :], color)
plt.plot(rrWheel[0, :], rrWheel[1, :], color)
plt.plot(flWheel[0, :], flWheel[1, :], color)
plt.plot(rlWheel[0, :], rlWheel[1, :], color)
Arrow(x, y, yaw, Car_C.WB * 0.8, color)
| 33.491379 | 148 | 0.53565 | 1,798 | 0.462806 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.025997 |
e0712713ade0b6560e6616c234015a83c6ef39c9 | 696 | py | Python | models/database_models/comment_model.py | RuiCoreSci/Flask-Restful | 03f98a17487d407b69b853a9bf0ed20d2c5b003b | [
"MIT"
] | 7 | 2020-05-24T02:15:46.000Z | 2020-11-26T07:14:44.000Z | models/database_models/comment_model.py | RuiCoreSci/Flask-Restful | 03f98a17487d407b69b853a9bf0ed20d2c5b003b | [
"MIT"
] | 12 | 2020-05-17T10:46:29.000Z | 2021-05-06T20:08:37.000Z | models/database_models/comment_model.py | RuiCoreSci/Flask-Restful | 03f98a17487d407b69b853a9bf0ed20d2c5b003b | [
"MIT"
] | 4 | 2020-05-09T07:26:09.000Z | 2021-10-31T07:09:10.000Z | from sqlalchemy import Integer, Text, DateTime, func, Boolean, text
from models.database_models import Base, Column
class Comment(Base):
__tablename__ = "comment"
id = Column(Integer, primary_key=True, )
user_id = Column(Integer, nullable=False, comment="评论用户的 ID")
post_id = Column(Integer, nullable=False, comment="Post 文章的 ID")
content = Column(Text, nullable=False, comment="用户的评论")
create_time = Column(DateTime, server_default=func.now(), comment="创建时间")
update_time = Column(DateTime, server_default=func.now(), onupdate=func.now(), comment="更新时间")
deleted = Column(Boolean, default=False, server_default=text('0'), nullable=False, comment="该项目是否被删除")
| 40.941176 | 106 | 0.728448 | 634 | 0.840849 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.161804 |
e072653c74adbd64f985b81e9b674ad50e5a700a | 27,779 | py | Python | aws_deploy/ecs/helper.py | jmsantorum/aws-deploy | f117cff3a5440ee42470feaa2a83263c3212cf10 | [
"BSD-3-Clause"
] | null | null | null | aws_deploy/ecs/helper.py | jmsantorum/aws-deploy | f117cff3a5440ee42470feaa2a83263c3212cf10 | [
"BSD-3-Clause"
] | null | null | null | aws_deploy/ecs/helper.py | jmsantorum/aws-deploy | f117cff3a5440ee42470feaa2a83263c3212cf10 | [
"BSD-3-Clause"
] | 1 | 2021-08-05T12:07:11.000Z | 2021-08-05T12:07:11.000Z | import json
import re
from datetime import datetime
from json.decoder import JSONDecodeError
import click
from boto3.session import Session
from boto3_type_annotations.ecs import Client
from botocore.exceptions import ClientError, NoCredentialsError
from dateutil.tz.tz import tzlocal
from dictdiffer import diff
JSON_LIST_REGEX = re.compile(r'^\[.*\]$')
LAUNCH_TYPE_EC2 = 'EC2'
LAUNCH_TYPE_FARGATE = 'FARGATE'
def read_env_file(container_name, file):
env_vars = []
try:
with open(file) as f:
for line in f:
if line.startswith('#') or not line.strip() or '=' not in line:
continue
key, value = line.strip().split('=', 1)
env_vars.append((container_name, key, value))
except Exception as e:
raise EcsTaskDefinitionCommandError(str(e))
return tuple(env_vars)
class EcsClient(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name=None,
profile_name=None):
session = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
profile_name=profile_name
)
self.boto: Client = session.client('ecs')
self.events = session.client('events')
def describe_services(self, cluster_name, service_name):
return self.boto.describe_services(
cluster=cluster_name,
services=[service_name]
)
def describe_task_definition(self, task_definition_arn):
try:
return self.boto.describe_task_definition(
taskDefinition=task_definition_arn,
include=[
'TAGS',
]
)
except ClientError:
raise UnknownTaskDefinitionError(
u'Unknown task definition arn: %s' % task_definition_arn
)
def list_tasks(self, cluster_name, service_name):
return self.boto.list_tasks(
cluster=cluster_name,
serviceName=service_name
)
def describe_tasks(self, cluster_name, task_arns):
return self.boto.describe_tasks(cluster=cluster_name, tasks=task_arns)
def register_task_definition(self, family, containers, volumes, role_arn,
execution_role_arn, tags, additional_properties):
if tags:
additional_properties['tags'] = tags
return self.boto.register_task_definition(
family=family,
containerDefinitions=containers,
volumes=volumes,
taskRoleArn=role_arn,
executionRoleArn=execution_role_arn,
**additional_properties
)
def deregister_task_definition(self, task_definition_arn):
return self.boto.deregister_task_definition(
taskDefinition=task_definition_arn
)
def update_service(self, cluster, service, desired_count, task_definition):
if desired_count is None:
return self.boto.update_service(
cluster=cluster,
service=service,
taskDefinition=task_definition
)
return self.boto.update_service(
cluster=cluster,
service=service,
desiredCount=desired_count,
taskDefinition=task_definition
)
def run_task(self, cluster, task_definition, count, started_by, overrides,
launchtype='EC2', subnets=(), security_groups=(),
public_ip=False, platform_version=None):
if launchtype == LAUNCH_TYPE_FARGATE:
if not subnets or not security_groups:
msg = 'At least one subnet (--subnet) and one security ' \
'group (--securitygroup) definition are required ' \
'for launch type FARGATE'
raise TaskPlacementError(msg)
network_configuration = {
"awsvpcConfiguration": {
"subnets": subnets,
"securityGroups": security_groups,
"assignPublicIp": "ENABLED" if public_ip else "DISABLED"
}
}
if platform_version is None:
platform_version = 'LATEST'
return self.boto.run_task(
cluster=cluster,
taskDefinition=task_definition,
count=count,
startedBy=started_by,
overrides=overrides,
launchType=launchtype,
networkConfiguration=network_configuration,
platformVersion=platform_version,
)
return self.boto.run_task(
cluster=cluster,
taskDefinition=task_definition,
count=count,
startedBy=started_by,
overrides=overrides
)
def update_rule(self, cluster, rule, task_definition):
target = self.events.list_targets_by_rule(Rule=rule)['Targets'][0]
target['Arn'] = task_definition.arn.partition('task-definition')[0] + 'cluster/' + cluster
target['EcsParameters']['TaskDefinitionArn'] = task_definition.arn
self.events.put_targets(Rule=rule, Targets=[target])
return target['Id']
class EcsService(dict):
def __init__(self, cluster, service_definition=None, **kwargs):
self._cluster = cluster
super(EcsService, self).__init__(service_definition, **kwargs)
def set_task_definition(self, task_definition):
self[u'taskDefinition'] = task_definition.arn
@property
def cluster(self):
return self._cluster
@property
def name(self):
return self.get(u'serviceName')
@property
def task_definition(self):
return self.get(u'taskDefinition')
@property
def desired_count(self):
return self.get(u'desiredCount')
@property
def deployment_created_at(self):
for deployment in self.get(u'deployments'):
if deployment.get(u'status') == u'PRIMARY':
return deployment.get(u'createdAt')
return datetime.now()
@property
def deployment_updated_at(self):
for deployment in self.get(u'deployments'):
if deployment.get(u'status') == u'PRIMARY':
return deployment.get(u'updatedAt')
return datetime.now()
@property
def errors(self):
return self.get_warnings(
since=self.deployment_updated_at
)
@property
def older_errors(self):
return self.get_warnings(
since=self.deployment_created_at,
until=self.deployment_updated_at
)
def get_warnings(self, since=None, until=None):
since = since or self.deployment_created_at
until = until or datetime.now(tz=tzlocal())
errors = {}
for event in self.get(u'events'):
if u'unable' not in event[u'message']:
continue
if since < event[u'createdAt'] < until:
errors[event[u'createdAt']] = event[u'message']
return errors
class EcsTaskDefinition(object):
def __init__(self, containerDefinitions, volumes, family, revision, status, taskDefinitionArn,
requiresAttributes=None, taskRoleArn=None, executionRoleArn=None, compatibilities=None, tags=None,
**kwargs):
self.containers = containerDefinitions
self.volumes = volumes
self.family = family
self.revision = revision
self.status = status
self.arn = taskDefinitionArn
self.requires_attributes = requiresAttributes or {}
self.role_arn = taskRoleArn or ''
self.execution_role_arn = executionRoleArn or ''
self.tags = tags
self.additional_properties = kwargs
self._diff = []
# the compatibilities parameter is returned from the ECS API, when
# describing a task, but may not be included, when registering a new
# task definition. Just storing it for now.
self.compatibilities = compatibilities
@property
def container_names(self):
for container in self.containers:
yield container['name']
@property
def images(self):
for container in self.containers:
yield container['name'], container['image']
@property
def family_revision(self):
return f'{self.family}:{self.revision}'
@property
def updated(self) -> bool:
return self._diff != []
@property
def diff(self):
return self._diff
def show_diff(self, show_diff: bool = False):
if show_diff:
click.secho('Task definition modified:')
for d in self._diff:
click.secho(f' {str(d)}', fg='blue')
click.secho('')
def diff_raw(self, task_b):
containers_a = {c['name']: c for c in self.containers}
containers_b = {c['name']: c for c in task_b.containers}
requirements_a = sorted([r['name'] for r in self.requires_attributes])
requirements_b = sorted([r['name'] for r in task_b.requires_attributes])
for container in containers_a:
containers_a[container]['environment'] = {e['name']: e['value'] for e in
containers_a[container].get('environment', {})}
for container in containers_b:
containers_b[container]['environment'] = {e['name']: e['value'] for e in
containers_b[container].get('environment', {})}
for container in containers_a:
containers_a[container]['secrets'] = {e['name']: e['valueFrom'] for e in
containers_a[container].get('secrets', {})}
for container in containers_b:
containers_b[container]['secrets'] = {e['name']: e['valueFrom'] for e in
containers_b[container].get('secrets', {})}
composite_a = {
'containers': containers_a,
'volumes': self.volumes,
'requires_attributes': requirements_a,
'role_arn': self.role_arn,
'execution_role_arn': self.execution_role_arn,
'compatibilities': self.compatibilities,
'additional_properties': self.additional_properties,
}
composite_b = {
'containers': containers_b,
'volumes': task_b.volumes,
'requires_attributes': requirements_b,
'role_arn': task_b.role_arn,
'execution_role_arn': task_b.execution_role_arn,
'compatibilities': task_b.compatibilities,
'additional_properties': task_b.additional_properties,
}
return list(diff(composite_a, composite_b))
def get_overrides(self):
override = dict()
overrides = []
for diff in self.diff:
if override.get('name') != diff.container:
override = dict(name=diff.container)
overrides.append(override)
if diff.field == 'command':
override['command'] = self.get_overrides_command(diff.value)
elif diff.field == 'environment':
override['environment'] = self.get_overrides_env(diff.value)
elif diff.field == 'secrets':
override['secrets'] = self.get_overrides_secrets(diff.value)
return overrides
@staticmethod
def parse_command(command):
if re.match(JSON_LIST_REGEX, command):
try:
return json.loads(command)
except JSONDecodeError as e:
raise EcsTaskDefinitionCommandError(
f"command should be valid JSON list. Got following command: {command} resulting in error: {str(e)}"
)
return command.split()
@staticmethod
def get_overrides_command(command):
return EcsTaskDefinition.parse_command(command)
@staticmethod
def get_overrides_env(env):
return [{"name": e, "value": env[e]} for e in env]
@staticmethod
def get_overrides_secrets(secrets):
return [{"name": s, "valueFrom": secrets[s]} for s in secrets]
def get_tag(self, key):
for tag in self.tags:
if tag['key'] == key:
return tag['value']
return None
def set_tag(self, key: str, value: str):
if key and value:
done = False
for tag in self.tags:
if tag['key'] == key:
if tag['value'] != value:
diff = EcsTaskDefinitionDiff(
container=None,
field=f"tags['{key}']",
value=value,
old_value=tag['value']
)
self._diff.append(diff)
tag['value'] = value
done = True
break
if not done:
diff = EcsTaskDefinitionDiff(container=None, field=f"tags['{key}']", value=value, old_value=None)
self._diff.append(diff)
self.tags.append({'key': key, 'value': value})
def set_images(self, tag=None, **images):
self.validate_container_options(**images)
for container in self.containers:
if container['name'] in images:
new_image = images[container['name']]
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='image',
value=new_image,
old_value=container['image']
)
self._diff.append(diff)
container['image'] = new_image
elif tag:
image_definition = container['image'].rsplit(':', 1)
new_image = f'{image_definition[0]}:{tag.strip()}'
# check if tag changes
if new_image != container['image']:
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='image',
value=new_image,
old_value=container['image']
)
self._diff.append(diff)
container['image'] = new_image
def set_commands(self, **commands):
self.validate_container_options(**commands)
for container in self.containers:
if container['name'] in commands:
new_command = commands[container['name']]
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='command',
value=new_command,
old_value=container.get('command')
)
self._diff.append(diff)
container['command'] = self.parse_command(new_command)
def set_environment(self, environment_list, exclusive=False, env_file=((None, None),)):
environment = {}
if None not in env_file[0]:
for env in env_file:
line = read_env_file(env[0], env[1])
environment_list = line + environment_list
for env in environment_list:
environment.setdefault(env[0], {})
environment[env[0]][env[1]] = env[2]
self.validate_container_options(**environment)
for container in self.containers:
if container['name'] in environment:
self.apply_container_environment(
container=container,
new_environment=environment[container['name']],
exclusive=exclusive,
)
elif exclusive is True:
self.apply_container_environment(
container=container,
new_environment={},
exclusive=exclusive,
)
def apply_container_environment(self, container, new_environment, exclusive=False):
environment = container.get('environment', {})
old_environment = {env['name']: env['value'] for env in environment}
if exclusive is True:
merged = new_environment
else:
merged = old_environment.copy()
merged.update(new_environment)
if old_environment == merged:
return
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='environment',
value=merged,
old_value=old_environment
)
self._diff.append(diff)
container['environment'] = [
{"name": e, "value": merged[e]} for e in merged
]
def set_secrets(self, secrets_list, exclusive=False):
secrets = {}
for secret in secrets_list:
secrets.setdefault(secret[0], {})
secrets[secret[0]][secret[1]] = secret[2]
self.validate_container_options(**secrets)
for container in self.containers:
if container['name'] in secrets:
self.apply_container_secrets(
container=container,
new_secrets=secrets[container['name']],
exclusive=exclusive,
)
elif exclusive is True:
self.apply_container_secrets(
container=container,
new_secrets={},
exclusive=exclusive,
)
def apply_container_secrets(self, container, new_secrets, exclusive=False):
secrets = container.get('secrets', {})
old_secrets = {secret['name']: secret['valueFrom'] for secret in secrets}
if exclusive is True:
merged = new_secrets
else:
merged = old_secrets.copy()
merged.update(new_secrets)
if old_secrets == merged:
return
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='secrets',
value=merged,
old_value=old_secrets
)
self._diff.append(diff)
container['secrets'] = [
{"name": s, "valueFrom": merged[s]} for s in merged
]
def validate_container_options(self, **container_options):
for container_name in container_options:
if container_name not in self.container_names:
raise UnknownContainerError(f'Unknown container: {container_name}')
def set_role_arn(self, role_arn):
if role_arn:
diff = EcsTaskDefinitionDiff(
container=None,
field='role_arn',
value=role_arn,
old_value=self.role_arn
)
self.role_arn = role_arn
self._diff.append(diff)
def set_execution_role_arn(self, execution_role_arn):
if execution_role_arn:
diff = EcsTaskDefinitionDiff(
container=None,
field='execution_role_arn',
value=execution_role_arn,
old_value=self.execution_role_arn
)
self.execution_role_arn = execution_role_arn
self._diff.append(diff)
class EcsTaskDefinitionDiff(object):
def __init__(self, container, field, value, old_value):
self.container = container
self.field = field
self.value = value
self.old_value = old_value
def __repr__(self):
if self.field == 'environment':
return '\n'.join(self._get_environment_diffs(
self.container,
self.value,
self.old_value,
))
elif self.field == 'secrets':
return '\n'.join(self._get_secrets_diffs(
self.container,
self.value,
self.old_value,
))
elif self.container:
return f'Changed {self.field} of container "{self.container}" to: "{self.value}" (was: "{self.old_value}")'
else:
return f'Changed {self.field} to: "{self.value}" (was: "{self.old_value}")'
@staticmethod
def _get_environment_diffs(container, env, old_env):
diffs = []
for name, value in env.items():
old_value = old_env.get(name)
if value != old_value or value and not old_value:
message = f'Changed environment "{name}" of container "{container}" to: "{value}"'
diffs.append(message)
for old_name in old_env.keys():
if old_name not in env.keys():
message = f'Removed environment "{old_name}" of container "{container}"'
diffs.append(message)
return diffs
@staticmethod
def _get_secrets_diffs(container, secrets, old_secrets):
diffs = []
for name, value in secrets.items():
old_value = old_secrets.get(name)
if value != old_value or not old_value:
message = f'Changed secret "{name}" of container "{container}" to: "{value}"'
diffs.append(message)
for old_name in old_secrets.keys():
if old_name not in secrets.keys():
message = f'Removed secret "{old_name}" of container "{container}"'
diffs.append(message)
return diffs
class EcsAction(object):
def __init__(self, client: EcsClient, cluster_name: str, service_name: str):
self._client = client
self._cluster_name = cluster_name
self._service_name = service_name
try:
if service_name:
self._service = self.get_service()
except IndexError:
raise EcsConnectionError(
u'An error occurred when calling the DescribeServices '
u'operation: Service not found.'
)
except ClientError as e:
raise EcsConnectionError(str(e))
except NoCredentialsError:
raise EcsConnectionError(
u'Unable to locate credentials. Configure credentials '
u'by running "aws configure".'
)
def get_service(self):
services_definition = self._client.describe_services(
cluster_name=self._cluster_name,
service_name=self._service_name
)
return EcsService(
cluster=self._cluster_name,
service_definition=services_definition[u'services'][0]
)
def get_current_task_definition(self, service):
return self.get_task_definition(service.task_definition)
def get_task_definition(self, task_definition):
task_definition_payload = self._client.describe_task_definition(
task_definition_arn=task_definition
)
task_definition = EcsTaskDefinition(
tags=task_definition_payload.get('tags', None),
**task_definition_payload[u'taskDefinition']
)
return task_definition
def update_task_definition(self, task_definition):
response = self._client.register_task_definition(
family=task_definition.family,
containers=task_definition.containers,
volumes=task_definition.volumes,
role_arn=task_definition.role_arn,
execution_role_arn=task_definition.execution_role_arn,
tags=task_definition.tags,
additional_properties=task_definition.additional_properties
)
new_task_definition = EcsTaskDefinition(**response[u'taskDefinition'])
return new_task_definition
def deregister_task_definition(self, task_definition):
self._client.deregister_task_definition(task_definition.arn)
def update_service(self, service, desired_count=None):
response = self._client.update_service(
cluster=service.cluster,
service=service.name,
desired_count=desired_count,
task_definition=service.task_definition
)
return EcsService(self._cluster_name, response[u'service'])
def is_deployed(self, service):
if len(service[u'deployments']) != 1:
return False
running_tasks = self._client.list_tasks(
cluster_name=service.cluster,
service_name=service.name
)
if not running_tasks[u'taskArns']:
return service.desired_count == 0
running_count = self.get_running_tasks_count(
service=service,
task_arns=running_tasks[u'taskArns']
)
return service.desired_count == running_count
def get_running_tasks_count(self, service, task_arns):
running_count = 0
tasks_details = self._client.describe_tasks(
cluster_name=self._cluster_name,
task_arns=task_arns
)
for task in tasks_details[u'tasks']:
arn = task[u'taskDefinitionArn']
status = task[u'lastStatus']
if arn == service.task_definition and status == u'RUNNING':
running_count += 1
return running_count
@property
def client(self):
return self._client
@property
def service(self):
return self._service
@property
def cluster_name(self):
return self._cluster_name
@property
def service_name(self):
return self._service_name
class DeployAction(EcsAction):
def deploy(self, task_definition):
try:
self._service.set_task_definition(task_definition)
return self.update_service(self._service)
except ClientError as e:
raise EcsError(str(e))
class ScaleAction(EcsAction):
def scale(self, desired_count):
try:
return self.update_service(self._service, desired_count)
except ClientError as e:
raise EcsError(str(e))
class RunAction(EcsAction):
def __init__(self, client, cluster_name):
super(RunAction, self).__init__(client, cluster_name, None)
self._client = client
self._cluster_name = cluster_name
self.started_tasks = []
def run(self, task_definition, count, started_by, launchtype, subnets,
security_groups, public_ip, platform_version):
try:
result = self._client.run_task(
cluster=self._cluster_name,
task_definition=task_definition.family_revision,
count=count,
started_by=started_by,
overrides=dict(containerOverrides=task_definition.get_overrides()),
launchtype=launchtype,
subnets=subnets,
security_groups=security_groups,
public_ip=public_ip,
platform_version=platform_version,
)
self.started_tasks = result['tasks']
return True
except ClientError as e:
raise EcsError(str(e))
class UpdateAction(EcsAction):
def __init__(self, client):
super(UpdateAction, self).__init__(client, None, None)
class DiffAction(EcsAction):
def __init__(self, client):
super(DiffAction, self).__init__(client, None, None)
class EcsError(Exception):
pass
class EcsConnectionError(EcsError):
pass
class UnknownContainerError(EcsError):
pass
class TaskPlacementError(EcsError):
pass
class UnknownTaskDefinitionError(EcsError):
pass
class EcsTaskDefinitionCommandError(EcsError):
pass
| 34.767209 | 119 | 0.585586 | 26,857 | 0.966809 | 219 | 0.007884 | 3,735 | 0.134454 | 0 | 0 | 2,764 | 0.0995 |
e07447362c2cd948e8959b2a92a8309441af1ece | 3,715 | py | Python | sbm.py | emmaling27/networks-research | be209e2b653a1fe9eec480a94538d59104e4aa23 | [
"MIT"
] | null | null | null | sbm.py | emmaling27/networks-research | be209e2b653a1fe9eec480a94538d59104e4aa23 | [
"MIT"
] | null | null | null | sbm.py | emmaling27/networks-research | be209e2b653a1fe9eec480a94538d59104e4aa23 | [
"MIT"
] | null | null | null | import networkx as nx
from scipy.special import comb
import attr
@attr.s
class Count(object):
"""Count class with monochromatic and bichromatic counts"""
n = attr.ib()
monochromatic = attr.ib(default=0)
bichromatic = attr.ib(default=0)
def count_edge(self, u, v):
if (u < self.n / 2) != (v < self.n / 2):
self.bichromatic += 1
else:
self.monochromatic += 1
class SBM():
"""SBM class with predicted numbers of wedges and local bridges and actual counts"""
def __init__(self, n, p, q, seed=0):
self.n = n
self.p = p
self.q = q
self.g = nx.generators.community.stochastic_block_model(
[int(self.n / 2), int(self.n / 2)],
[[p, q], [q, p]],
seed=seed)
def is_bichromatic(self, u, v):
return (u < self.n / 2) != (v < self.n / 2)
def get_bichromatic_fraction(self):
bichromatic = 0
for (x, y) in self.g.edges():
if self.is_bichromatic(x, y):
bichromatic += 1
return bichromatic / len(self.g.edges())
def is_local_bridge(self, u, v):
return not set(self.g.neighbors(u)).intersection(set(self.g.neighbors(v)))
def count_local_bridges(self):
monochromatic, bichromatic = 0, 0
for (u, v) in self.g.edges():
if self.is_local_bridge(u, v):
if self.is_bichromatic(u, v):
bichromatic += 1
else:
monochromatic += 1
return monochromatic, bichromatic
def _count_possible_edges(self, local_bridge):
count = Count(self.n)
for u in range(self.n):
for v in range(u+1, self.n):
if not self.g.has_edge(u, v) and \
(self.is_local_bridge(u, v) == local_bridge):
count.count_edge(u, v)
return count
def count_possible_local_bridges(self):
return self._count_possible_edges(local_bridge=True)
def count_possible_closures(self):
return self._count_possible_edges(local_bridge=False)
def count_wedges(self):
count = Count(self.n)
for v in self.g.nodes():
sorted_neighbors = sorted(self.g.neighbors(v))
for i in range(len(sorted_neighbors)):
for j in range(i + 1, len(sorted_neighbors)):
if not self.g.has_edge(sorted_neighbors[i], sorted_neighbors[j]):
count.count_edge(sorted_neighbors[i], sorted_neighbors[j])
return count
def predicted_wedges(self):
return Count(
self.n,
monochromatic=3 * 2 * comb(self.n/2, 3) * self.p**2 * (1-self.p) \
+ self.n * comb(self.n/2, 2) * self.q**2 * (1-self.p),
bichromatic=2 * self.n * comb(self.n/2, 2) * self.p * self.q * (1-self.q)
)
def predicted_local_bridges(self):
return Count(
self.n,
monochromatic=2 * (1-self.p) * comb(self.n/2, 2) * (1-self.p**2)**(self.n/2-2) * (1-self.q**2)**(self.n/2),
bichromatic=(1-self.q) * (self.n/2) ** 2 * (1-self.p*self.q)**(self.n-2)
)
def predicted_possible_closures(self):
return Count(
self.n,
monochromatic=2 * (1-self.p) * comb(self.n/2, 2) * (1 - (1-self.p**2)**(self.n/2-2) * (1-self.q**2)**(self.n/2)),
bichromatic=(1-self.q) * (self.n/2) ** 2 * (1 - (1-self.p*self.q)**(self.n-2))
)
def predicted_possible_edges(self):
return Count(
self.n,
monochromatic=2 * (1-self.p) * comb(self.n/2, 2),
bichromatic=(1-self.q) * (self.n/2) ** 2
) | 35.04717 | 125 | 0.54105 | 3,637 | 0.979004 | 0 | 0 | 352 | 0.094751 | 0 | 0 | 143 | 0.038493 |
e0745cd9bd4ca77f2c09e9dd6bb425b9d75991b3 | 4,516 | py | Python | src/data/graph/ops/anagram_transform_op.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | 2 | 2020-08-18T18:43:09.000Z | 2020-08-18T20:05:59.000Z | src/data/graph/ops/anagram_transform_op.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | null | null | null | src/data/graph/ops/anagram_transform_op.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | null | null | null | from typing import Callable, Collection, Iterable, List, Union
from data.anagram import anagram_iter
from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer
Transformer = Callable[['bloom_node.BloomNode'], 'bloom_node.BloomNode']
_SPACE_MASK = bloom_mask.for_alpha(' ')
def merge_fn(
host: 'bloom_node.BloomNode',
sources: List['bloom_node.BloomNode'],
extra: list,
whitelist: Collection = None,
blacklist: Collection = None,
**kwargs) -> None:
del kwargs
assert len(sources) == 1
exit_node = sources[0]
assert len(extra) == 1
state = _normalize_state(exit_node, extra[0])
children = list(state)
# TODO: Need a cleaner way to inject and rerun these nodes.
if len(children) == 1:
host.op = _op_mixin.Op(_op_mixin.OP_IDENTITY, children)
else:
host.op = _op_mixin.Op(_op_mixin.OP_ADD, children)
# HACK: This duplicates BloomNode._expand, essentially.
for key, reduced in bloom_node_reducer.reduce(
host, whitelist=whitelist, blacklist=blacklist):
host.link(key, reduced)
class _AnagramTransformIndex(object):
"""Singleton object used during anagram traversal."""
def __init__(
self,
exit_node: 'bloom_node.BloomNode',
root: anagram_iter.AnagramIter) -> None:
self._exit_node = exit_node
reference = bloom_node.BloomNode()
reference.distance(0)
reference.weight(1, True)
reference_choice_paths = {}
for choice, _ in root.available():
reference_choice_paths[choice] = choice(reference)
self._reference_choice_paths = reference_choice_paths
self._child_cache = {}
def iter(
self,
anagrams: anagram_iter.AnagramIter,
) -> Iterable['bloom_node.BloomNode']:
for child_choice, child_anagrams in anagrams.items():
key = (child_choice, child_anagrams)
if key not in self._child_cache:
self._child_cache[key] = self._make_child(child_choice, child_anagrams)
yield self._child_cache[key]
def _make_child(
self,
choice: Transformer,
anagrams: anagram_iter.AnagramIter) -> 'bloom_node.BloomNode':
children = list(anagrams.available())
if not children:
return choice(self._exit_node)
elif len(children) == 1:
child_choice, child_duplicates = children[0]
node = self._exit_node
while child_duplicates:
node = child_choice(node)
child_duplicates -= 1
return choice(node)
# Compute requirements from exits.
node = self._exit_node // _AnagramState(self, anagrams)
node.provide_mask = self._exit_node.provide_mask
node.require_mask = self._exit_node.require_mask
node.lengths_mask = self._exit_node.lengths_mask
node.annotate({'anagrams': anagrams})
node.max_weight = self._exit_node.max_weight
nodes_with_spaces = []
for child_choice, child_duplicates in children:
path = self._reference_choice_paths[child_choice]
if path.require_mask and path.require_mask & _SPACE_MASK:
nodes_with_spaces.append(path)
node.provide_mask |= path.provide_mask
node.require_mask |= path.require_mask
node.lengths_mask = bloom_mask.lengths_product(
node.lengths_mask, path.lengths_mask, duplicates=child_duplicates)
if nodes_with_spaces:
# Distance and provide masks should be correct. Reset required values.
# Any route to any of the spaces is now okay but 1+ must be taken.
node.require_mask = bloom_mask.REQUIRE_NOTHING
for node_with_spaces in nodes_with_spaces:
# Only require what all node_with_spaces require.
node.require_mask &= node_with_spaces.require_mask
return choice(node)
class _AnagramState(object):
def __init__(
self,
index: _AnagramTransformIndex,
anagrams: anagram_iter.AnagramIter):
self._index = index
self._anagrams = anagrams
def __iter__(self) -> Iterable['bloom_node.BloomNode']:
yield from self._index.iter(self._anagrams)
def __repr__(self) -> str:
return '_AnagramState(%s)' % self._anagrams
__str__ = __repr__
def _normalize_state(
exit_node: 'bloom_node.BloomNode',
index: Union[Iterable, anagram_iter.AnagramIter]) -> _AnagramState:
if isinstance(index, _AnagramState):
return index
# `index` is an iterable list of ???, one-by-one these will be taken as a
# route to the `exit_node`.
initial_anagrams = anagram_iter.from_choices(index)
index = _AnagramTransformIndex(exit_node, initial_anagrams)
return _AnagramState(index, initial_anagrams)
| 35.84127 | 79 | 0.717449 | 2,993 | 0.662755 | 462 | 0.102303 | 0 | 0 | 0 | 0 | 716 | 0.158547 |
e074a8e70a88cdf3e39529ffdda1dc94abc0febf | 15,854 | py | Python | gogapi/api.py | tikki/pygogapi | f1b3a811444dc521ea4ad7884104086b52348995 | [
"MIT"
] | 23 | 2017-01-03T21:00:27.000Z | 2022-01-25T22:08:39.000Z | gogapi/api.py | tikki/pygogapi | f1b3a811444dc521ea4ad7884104086b52348995 | [
"MIT"
] | 3 | 2017-06-06T23:08:30.000Z | 2019-01-28T02:20:34.000Z | gogapi/api.py | tikki/pygogapi | f1b3a811444dc521ea4ad7884104086b52348995 | [
"MIT"
] | 8 | 2017-02-10T15:13:32.000Z | 2020-04-18T11:17:15.000Z | import json
import re
import logging
import html.parser
import zlib
import requests
from gogapi import urls
from gogapi.base import NotAuthorizedError, logger
from gogapi.product import Product, Series
from gogapi.search import SearchResult
DEBUG_JSON = False
GOGDATA_RE = re.compile(r"gogData\.?(.*?) = (.+);")
CLIENT_VERSION = "1.2.17.9" # Just for their statistics
USER_AGENT = "GOGGalaxyClient/{} pygogapi/0.1".format(CLIENT_VERSION)
REQUEST_RETRIES = 3
PRODUCT_EXPANDABLE = [
"downloads", "expanded_dlcs", "description", "screenshots", "videos",
"related_products", "changelog"
]
USER_EXPANDABLE = ["friendStatus", "wishlistStatus", "blockedStatus"]
LOCALE_CODES = ["de-DE", "en-US", "fr-FR", "pt-BR", "pl-PL", "ru-RU", "zh-Hans"]
CURRENCY_CODES = [
"USD", "EUR", "GBP", "AUD", "RUB", "PLN", "CAD", "CHF", "NOK", "SEK", "DKK"
]
def find_scripts(site):
parser = ScriptParser()
parser.feed(site)
return parser.scripts
class ScriptParser(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self.last_tag = None
self.scripts = []
def handle_starttag(self, tag, attrs):
self.last_tag = tag
def handle_data(self, data):
if self.last_tag == "script":
self.scripts.append(data)
class GogApi:
def __init__(self, token=None):
self.token = token
self.locale = (None, None, None) # TODO: replace tuple
self.session = requests.Session()
self.session.headers["User-Agent"] = USER_AGENT
self.force_authorize = False
# Helpers
def request(self, method, url, authorized=True, allow_redirects=False,
**kwargs):
"""
Wrapper around requests.request that also handles authorization,
retries and logging
"""
if authorized or self.force_authorize:
if self.token is None:
raise NotAuthorizedError()
if self.token.expired():
self.token.refresh()
self.session.headers["Authorization"] = \
"Bearer " + self.token.access_token
else:
self.session.headers.pop("Authorization", None)
# Retries
retries = REQUEST_RETRIES
while retries > 0:
resp = self.session.request(
method, url, allow_redirects=allow_redirects, **kwargs)
if resp.status_code < 400:
return resp
elif 400 <= resp.status_code < 500:
break
else:
retries -= 1
resp.raise_for_status()
def get(self, *args, **kwargs):
"""
Wrapper around requests.get
"""
return self.request("GET", *args, **kwargs)
def post(self, *args, **kwargs):
"""
Wrapper around requests.post
"""
return self.request("POST", *args, **kwargs)
def request_json(self, *args, compressed=False, **kwargs):
"""
Wrapper around GogApi.request that automatically parses the
JSON response. Also does zlib decompression because GOG decided
to reinvent the wheel instead of using HTTP gzip encoding for
their content system V2.
"""
resp = self.request(*args, **kwargs)
if not compressed:
if DEBUG_JSON:
print(resp.text)
return resp.json()
else:
json_comp = resp.content
json_text = zlib.decompress(json_comp, 15).decode("utf-8")
if DEBUG_JSON:
print(json_text)
return json.loads(json_text)
def get_json(self, *args, **kwargs):
"""
Wrapper around GogApi.get with JSON parsing
"""
return self.request_json("GET", *args, **kwargs)
def get_gogdata(self, url, *args, **kwargs):
"""
Downloads a page and returns the embedded JavaScript gogData
variable.
"""
resp = self.get(url, *args, **kwargs)
gogdata = {}
for script in find_scripts(resp.text):
matches = GOGDATA_RE.finditer(resp.text)
for match in matches:
subkey = match.group(1)
value = match.group(2)
value_parsed = json.loads(value)
if subkey:
data = {subkey: value_parsed}
else:
data = value_parsed
gogdata.update(data)
return gogdata
def set_locale(self, country, currency, locale):
"""
country: ISO 3166 Alpha-2
currency: ISO 4217
locale: ISO 639 + ISO 3166 like language[_territory]
"""
if len(country) != 2:
return AttributeError("Invalid country code {}".format(country))
elif currency not in CURRENCY_CODES:
return AttributeError("Invalid currency code {}".format(locale))
elif locale not in LOCALE_CODES:
return AttributeError("Invalid locale code {}".format(locale))
self.locale = (country, currency, locale)
self.session.cookies["gog_lc"] = "_".join(self.locale)
# Web APIs
def web_game_gogdata(self, slug):
return self.get_gogdata(urls.web("game", slug), authorized=False)
def web_games_gogdata(self):
return self.get_gogdata(urls.web("account.games"))
def web_movies_gogdata(self):
return self.get_gogdata(urls.web("account.movies"))
def web_wishlist_gogdata(self):
return self.get_gogdata(urls.web("account.wishlist"))
def web_friends_gogdata(self):
return self.get_gogdata(urls.web("account.friends"))
def web_chat_gogdata(self):
return self.get_gogdata(urls.web("account.chat"))
def web_wallet_gogdata(self):
return self.get_gogdata(urls.web("wallet"))
def web_orders_gogdata(self):
return self.get_gogdata(urls.web("settings.orders"))
def web_account_gamedetails(self, game_id):
return self.get_json(urls.web("account.gamedetails", game_id))
def web_account_search(self, **query):
"""
Allowed query keys:
category: Genre
feature: Feature
hiddenFlag: Show hidden games
language: Language
mediaType: Game or movie
page: Page number
search: Search string
sortBy: Sort order
system: OS
tags: Tags
totalPages: Total Pages
"""
return self.get_json(urls.web("account.get_filtered"), params=query)
def web_search(self, **query):
"""
Allowed query keys:
category: Genre
devpub: Developer or Published
feature: Features
language: Language
mediaType: Game or movie
page: Page number
price: Price range
release: Release timeframe
search: Search string
sort: Sort order
system: OS
limit: Max results
"""
return self.get_json(
urls.web("search.filtering"), params=query, authorized=False)
def web_user_data(self):
return self.get_json(urls.web("user.data"))
def web_user_games(self):
return self.get_json(urls.web("user.games"))
def web_user_wishlist(self):
return self.get_json(urls.web("user.wishlist"))
def web_user_wishlist_add(self, game_id):
"""Returns new wishlist"""
return self.get_json(urls.web("user.wishlist.add", game_id))
def web_user_wishlist_remove(self, game_id):
"""Returns new wishlist"""
return self.get_json(urls.web("user.wishlist.remove", game_id))
def web_user_ratings(self):
return self.get_json(urls.web("user.ratings"))
def web_user_review_votes(self):
return self.get_json(urls.web("user.review_votes"))
def web_user_change_currency(self, currency):
return self.get_json(urls.web("user.change_currency", currency))
def web_user_change_language(self, lang):
return self.get_json(urls.web("user.change_language", lang))
def web_user_set_redirect_url(self, url):
"""Set redirect url after login. Only know valid url: checkout"""
return self.get(urls.web("user.set_redirect_url", params={"url": url}))
def web_user_review_guidelines(self):
return self.get_json(urls.web("user.review_guidelines"))
def web_user_public_info(self, user_id, expand=None):
if not expand:
params = None
elif expand == True:
params = {"expand": ",".join(USER_EXPANDABLE)}
else:
params = {"expand": ",".join(expand)}
return self.get_json(
urls.web("user.public.info", user_id, params=params))
def web_user_public_block(self, user_id):
return self.get_json(urls.web("user.public.block", user_id))
def web_user_public_unblock(self, user_id):
return self.get_json(urls.web("user.public.unblock", user_id))
def web_friends_remove(self, user_id):
return self.get_json(urls.web("friends.remove", user_id))
def web_friends_invite(self, user_id):
return self.get_json(urls.web("friends.invite", user_id))
def web_friends_accept(self, user_id):
return self.get_json(urls.web("friends.accept", user_id))
def web_friends_decline(self, user_id):
return self.get_json(urls.web("friends.decline", user_id))
def web_cart_get(self):
return self.get_json(urls.web("cart.get"))
def web_cart_add(self, game_id):
return self.get_json(urls.web("cart.add", game_id))
def web_cart_add_series(self, series_id):
return self.get_json(urls.web("cart.add_series", series_id))
def web_cart_remove(self, game_id):
return self.get_json(urls.web("cart.remove", game_id))
def web_reviews_search(self, game_id):
return self.get_json(urls.web("reviews.search", game_id))
def web_reviews_vote(self, game_id):
return self.get_json(urls.web("reviews.vote", game_id))
def web_reviews_report(self, game_id):
return self.get_json(urls.web("reviews.report", game_id))
def web_reviews_rate(self, game_id):
return self.get_json(urls.web("reviews.rate", game_id))
def web_reviews_add(self, game_id):
return self.get_json(urls.web("reviews.add", game_id))
def web_order_change_currency(self, order_id, currency):
return self.get_json(
urls.web("order.change_currency", order_id, currency))
def web_order_add(self, order_id, game_id):
return self.get_json(urls.web("order.add", order_id, game_id))
def web_order_remove(self, order_id, game_id):
return self.get_json(urls.web("order.remove", order_id, game_id))
def web_order_enable_store_credit(self, order_id):
return self.get_json(urls.web("order.enable_store_credit", order_id))
def web_order_disable_store_credit(self, order_id):
return self.get_json(urls.web("order.disable_store_credit", order_id))
def web_order_set_as_gift(self, order_id):
return self.get_json(urls.web("order.set_as_gift", order_id))
def web_order_set_as_not_gift(self, order_id):
return self.get_json(urls.web("order.set_as_non_gift", order_id))
def web_order_process_order(self, order_id):
return self.get_json(urls.web("order.process_order", order_id))
def web_order_payment_status(self, order_id):
return self.get_json(urls.web("order.payment_status", order_id))
def web_order_check_status(self, order_id):
return self.get_json(urls.web("order.check_status", order_id))
def web_checkout(self, order_id=None):
if order_id is None:
return self.get_json(urls.web("checkout"))
else:
return self.get_json(urls.web("checkout_id", order_id))
def web_checkout_manual(self, order_id):
return self.get_json(urls.web("checkout_manual", order_id))
# Galaxy APIs
def galaxy_file(self, game_id, dl_url):
dl_url = dl_url.lstrip("/")
return self.get_json(urls.galaxy("file", game_id, dl_url))
def galaxy_user(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(urls.galaxy("user", user_id))
def galaxy_friends(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(urls.galaxy("friends", user_id))
def galaxy_invitations(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(urls.galaxy("invitations", user_id))
def galaxy_status(self, user_id=None):
if user_id is None:
user_id = self.token.user_id
reqdata = {"version": CLIENT_VERSION}
self.post(urls.galaxy("status", user_id), data=reqdata)
def galaxy_statuses(self, user_ids):
user_ids_str = ",".join(user_ids)
params = {"user_id": user_ids_str}
#self.request("OPTIONS", urls.galaxy("statuses"), params=params)
return self.get_json(urls.galaxy("statuses"), params=params)
def galaxy_achievements(self, game_id, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(urls.galaxy("achievements", game_id, user_id))
def galaxy_sessions(self, game_id, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(urls.galaxy("sessions", game_id, user_id))
def galaxy_friends_achievements(self, game_id, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(
urls.galaxy("friends.achievements", game_id, user_id))
def galaxy_friends_sessions(self, game_id, user_id=None):
if user_id is None:
user_id = self.token.user_id
return self.get_json(urls.galaxy("friends.sessions", game_id, user_id))
def galaxy_product(self, game_id, expand=None):
if not expand:
params = {}
elif expand is True:
params = {"expand": ",".join(PRODUCT_EXPANDABLE)}
else:
params = {"expand": ",".join(expand)}
if self.locale[2]:
params["locale"] = self.locale[2]
return self.get_json(
urls.galaxy("product", game_id), params=params,
authorized=False)
def galaxy_products(self, game_ids, expand=None):
if not expand:
params = {}
elif expand is True:
params = {"expand": ",".join(PRODUCT_EXPANDABLE)}
else:
params = {"expand": ",".join(expand)}
if self.locale[2]:
params["locale"] = self.locale[2]
ids_string = ",".join(str(game_id) for game_id in game_ids)
params["ids"] = ids_string
return self.get_json(
urls.galaxy("products"), params=params, authorized=False)
def galaxy_secure_link(self, game_id, path, generation):
return self.get_json(
urls.galaxy("cs.securelink", game_id),
params={"path": path, "generation": generation})
def galaxy_builds(self, game_id, system):
return self.get_json(
urls.galaxy("cs.builds", game_id, system), authorized=False)
def galaxy_cs_meta(self, meta_id):
return self.get_json(
urls.galaxy("cs.meta", meta_id[0:2], meta_id[2:4], meta_id),
compressed=True,
authorized=False)
def galaxy_client_config():
return self.get_json(urls.galaxy("client-config"), authorized=False)
def product(self, product_id, slug=None):
return Product(self, product_id, slug)
def search(self, **query):
search_data = self.web_search(**query)
return SearchResult(self, query, search_data)
| 32.892116 | 80 | 0.628233 | 14,893 | 0.939384 | 0 | 0 | 0 | 0 | 0 | 0 | 3,445 | 0.217295 |
e0756c223fe0a2644bdda0e4b367139a612e5089 | 943 | py | Python | setup.py | gibsonMatt/stacks-pairwise | 8f3cde603c2bfed255f6c399557e9332072886fb | [
"MIT"
] | null | null | null | setup.py | gibsonMatt/stacks-pairwise | 8f3cde603c2bfed255f6c399557e9332072886fb | [
"MIT"
] | null | null | null | setup.py | gibsonMatt/stacks-pairwise | 8f3cde603c2bfed255f6c399557e9332072886fb | [
"MIT"
] | null | null | null | import pathlib
import os
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# specify requirements of your package here
REQUIREMENTS = ['biopython', 'numpy', 'pandas']
setup(name='stacksPairwise',
version='0.0.0',
description='Calculate pairwise divergence (pairwise pi) from Stacks `samples.fa` output fle',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/gibsonmatt/stacks-pairwise',
author='Matt Gibson',
author_email='[email protected]',
license='MIT',
packages=['stacksPairwise'],
install_requires=REQUIREMENTS,
entry_points={
"console_scripts": [
"stacksPairwise=stacksPairwise.__main__:main"
]
},
keywords='genetics genotyping sequencing Stacks'
)
| 29.46875 | 100 | 0.694592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.501591 |
e076824b715f780b36bdb8e03020e256a3cf8b8d | 156 | py | Python | csv_experiment.py | komax/spanningtree-crossingnumber | 444c8809a543905000a63c9d2ff1dcfb31835766 | [
"MIT"
] | 2 | 2019-01-07T22:12:09.000Z | 2020-05-08T06:44:19.000Z | csv_experiment.py | komax/spanningtree-crossingnumber | 444c8809a543905000a63c9d2ff1dcfb31835766 | [
"MIT"
] | null | null | null | csv_experiment.py | komax/spanningtree-crossingnumber | 444c8809a543905000a63c9d2ff1dcfb31835766 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import os
import sys
args = sys.argv[1:]
os.system('python -O -m spanningtree.csv_experiment_statistics ' +
' '.join(args))
| 19.5 | 66 | 0.673077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.50641 |
e0774173b092651de83171acaf096405634f72ae | 2,536 | py | Python | projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py | klemenkotar/dcrl | 457be7af1389db37ec12e165dfad646e17359162 | [
"MIT"
] | 18 | 2021-06-09T04:50:47.000Z | 2022-02-04T22:56:56.000Z | projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py | klemenkotar/dcrl | 457be7af1389db37ec12e165dfad646e17359162 | [
"MIT"
] | null | null | null | projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py | klemenkotar/dcrl | 457be7af1389db37ec12e165dfad646e17359162 | [
"MIT"
] | 4 | 2021-06-09T06:20:25.000Z | 2022-03-13T03:11:17.000Z | import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.tutorials.object_nav_ithor_ppo_one_object import (
ObjectNavThorPPOExperimentConfig,
)
class ObjectNavThorDaggerThenPPOExperimentConfig(ObjectNavThorPPOExperimentConfig):
"""A simple object navigation experiment in THOR.
Training with DAgger and then PPO.
"""
@classmethod
def tag(cls):
return "ObjectNavThorDaggerThenPPO"
@classmethod
def training_pipeline(cls, **kwargs):
dagger_steos = int(1e4)
ppo_steps = int(1e6)
lr = 2.5e-4
num_mini_batch = 2 if not torch.cuda.is_available() else 6
update_repeats = 4
num_steps = 128
metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks
save_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 1.0
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig),
"imitation_loss": Imitation(), # We add an imitation loss.
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=dagger_steos,
),
max_stage_steps=dagger_steos,
),
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,),
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
| 34.27027 | 88 | 0.635252 | 2,024 | 0.798107 | 0 | 0 | 1,827 | 0.720426 | 0 | 0 | 246 | 0.097003 |
e077592087a48a19c044b7ca66417c720c7d2548 | 12,328 | py | Python | BioCAT/src/Calculating_scores.py | DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool | d58d330e3e11380c0c917a0ad9c12a51447f1624 | [
"MIT"
] | 4 | 2021-04-16T14:42:47.000Z | 2021-06-11T14:29:35.000Z | BioCAT/src/Calculating_scores.py | DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool | d58d330e3e11380c0c917a0ad9c12a51447f1624 | [
"MIT"
] | 3 | 2021-07-23T09:30:59.000Z | 2021-11-07T17:40:59.000Z | BioCAT/src/Calculating_scores.py | DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool | d58d330e3e11380c0c917a0ad9c12a51447f1624 | [
"MIT"
] | 1 | 2022-02-27T17:19:50.000Z | 2022-02-27T17:19:50.000Z | from numpy import array
from pickle import load
from pandas import read_csv
import os
from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper
# Importing random forest model
modelpath = os.path.dirname(os.path.abspath(__file__)) + '/RFC.dump'
Rf = load(open(modelpath, 'rb'))
# The function generate list of shuflled matrix
def make_shuffle_matrix(matrix, cpu, iterat):
"""
The functuion generate massive of shuffled matrix.
Parameters
----------
matrix : pandas DataFrame
PSSM profile.
cpu : int
Number of tred used.
iterat : int
Number of iterations of shuffling.
Returns
-------
module_shuffling_matrix : list
List of matrix, shuffled by module.
substrate_shuffling_matrix : list
List of matrix, shuffled by substrate.
"""
module_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='module', iterations=iterat, threads=cpu)
substrate_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='substrate', iterations=iterat, threads=cpu)
return module_shuffling_matrix, substrate_shuffling_matrix
# The fujnction finds suquence with maximum possible value, results from alignment
def get_MaxSeq(matrix, variant_seq):
"""
The functuion parallel calculation of scores for shuffled matrix.
Parameters
----------
matrix : pandas DataFrame
PSSM profile.
variant_seq : list
Variant of core peptide chain.
Returns
-------
shuffled_scores : list
List of scores for shuffled matrix.
"""
MaxSeq = []
subs = matrix.keys()[1: ]
# Find sequence, wich have maximum alignment score
for idx in matrix.index:
MAX_value = max(list(matrix.iloc[idx][1:]))
for key in subs:
if matrix[key][idx] == MAX_value:
MaxSeq.append(key) # If two smonomer have same value
break
# Making two variants of MaxSeq
MaxSeq_full = MaxSeq.copy()
MaxSeq_nan = MaxSeq.copy()
for max_sub_idx in range(len(MaxSeq)):
if variant_seq[max_sub_idx] == 'nan':
MaxSeq_nan[max_sub_idx] = 'nan' # Adding nan to MaxSeq
return MaxSeq_full, MaxSeq_nan
# The function gives an information about clusters
def get_cluster_info(table, BGC_ID, target_file):
"""
The functuion return information about cluster.
Parameters
----------
table : pandas DataFrame
Table with meta inforamtion about NRPS clusters.
BGC_ID : str
PSSM cluster ID.
target_file : pandas DataFrame
PSSM profile.
Returns
-------
Name : str
Cluster ID.
Coord_cluster : str
Coordinate of cluster.
strand : str
Strand of cluster.
"""
for ind in table[table['ID'].str.contains(BGC_ID)].index:
Name = table[table['ID'].str.contains(target_file.split('.')[0].split('_A_')[1])]['Name'][ind]
Coord_cluster = table['Coordinates of cluster'][ind]
strand = table['Gen strand'][ind]
break
return Name, Coord_cluster, strand
# Calculate scores
def calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat):
"""
Calculating scores.
Parameters
----------
variant_seq : list
Variant of core peptide chain.
matrix : pandas DataFrame
PSSM profile.
substrate_shuffling_matrix : list
List of matrix, shuffled by substrate.
module_shuffling_matrix : list
List of matrix, shuffled by module.
cpu : int
Number of threads used.
iterat : int
Number of iterations of shuffling.
Returns
-------
Sln_score : float
Mln_score : float
Slt_score : float
Mlt_score : float
Sdn_score : float
Mdn_score : float
Sdt_score : float
Mdt_score : float
Scores, which calculated with shuffling matrix by different variants.
M - module shuffling S - substrate shuffling
l - logarithmic transformation of score d - raw score
n - MaxSeq with nan replacement t - MaxSeq without nan replacement
Relative_score : float
Relative score (Probability of target class)
Binary : float
Binary score of cluster matching.
"""
# Finding suquence with maximum possible value, results from alignment
MaxSeq_full, MaxSeq_nan = get_MaxSeq(matrix, variant_seq)
# Calculating shuffled scores
Sln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Mln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Slt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Mlt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Sdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Mdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Sdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Mdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
# Calculating scores for target sequence
log_target_score = get_score(variant_seq, matrix, type_value='log')
non_log_target_score = get_score(variant_seq, matrix, type_value=None)
# Calculating features scores
Sln_score = len(Sln_shuffled_score[Sln_shuffled_score < log_target_score])/len(Sln_shuffled_score)
Mln_score = len(Mln_shuffled_score[Mln_shuffled_score < log_target_score])/len(Mln_shuffled_score)
Slt_score = len(Slt_shuffled_score[Slt_shuffled_score < log_target_score])/len(Slt_shuffled_score)
Mlt_score = len(Mlt_shuffled_score[Mlt_shuffled_score < log_target_score])/len(Mlt_shuffled_score)
Sdn_score = len(Sdn_shuffled_score[Sdn_shuffled_score < non_log_target_score])/len(Sdn_shuffled_score)
Mdn_score = len(Mdn_shuffled_score[Mdn_shuffled_score < non_log_target_score])/len(Mdn_shuffled_score)
Sdt_score = len(Sdt_shuffled_score[Sdt_shuffled_score < non_log_target_score])/len(Sdt_shuffled_score)
Mdt_score = len(Mdt_shuffled_score[Mdt_shuffled_score < non_log_target_score])/len(Mdt_shuffled_score)
# Calculating Relative score
Relative_score = round(Rf.predict_proba([[Sln_score, Mln_score,
Sdn_score, Mdn_score,
Sdt_score, Mdt_score,
Slt_score, Mlt_score
]])[0][1], 3)
Binary = Rf.predict([[Sln_score, Mln_score,
Sdn_score, Mdn_score,
Sdt_score, Mdt_score,
Slt_score, Mlt_score
]])[0]
return Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary
def give_results(tsv_out, folder, files, table, ID, PeptideSeq, skip, cpu, iterat):
"""
The functuion return information about cluster.
Parameters
----------
tsv_out : dict
Empty dictionary for adding results.
folder : str
Path to PSSMs.
files : list
List of PSSMs.
table : pandas DataFrame
Table with meta inforamtion about NRPS clusters.
ID : str
Name of substance.
PeptideSeq : dict
Core peptide chains for different biosynthesis types (e.g. A, B, or C).
kip : int
Number of presumptive skip.
cpu : int
Number of threads used.
iterat : int
Number of iterations of shuffling.
Returns
-------
tsv_out : dict
Full dictionary for adding results.
"""
for target_file in files:
try:
BGC_ID = target_file.split('.')[0].split('_A_')[1]
except:
continue
if '_A_' not in target_file:
continue
Name, Coord_cluster, strand = get_cluster_info(table, BGC_ID, target_file) # Getting information about cluster
BGC = read_csv(folder + target_file, sep='\t')
# Skipping mode
if skip == 0:
BGC = [BGC]
else:
BGC == skipper(BGC, skip)
for matrix in BGC:
# Check quality of matrix
if len(matrix) == 1:
continue
check = 0
values = matrix.drop(matrix.columns[0], axis=1).values
for i in values:
if all(i) == 0:
check += 1
if check == len(values): # If thes condition is True, the matrix of unrecognized monomers
continue
# Generating shuffling matrix
module_shuffling_matrix, substrate_shuffling_matrix = make_shuffle_matrix(matrix, cpu, iterat)
for BS_type in PeptideSeq:# For every biosynthesis profile pathways
if PeptideSeq[BS_type] == None: # If in sequence only nan monomers
continue
if len(PeptideSeq[BS_type]) == 0: # If have not the variant
continue
# Check correctness of PeptideSeq
length_max= get_max_aminochain(PeptideSeq[BS_type])
EPs = make_combine(PeptideSeq[BS_type], length_max, matrix, delta=3)
if EPs is None: # If length sequnce can't be scaled to cluster size
continue
for variant_seq in EPs:
Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary = calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat)
#Recordind dictionary
tsv_out['Chromosome ID'].append(Name)
tsv_out['Coordinates of cluster'].append(Coord_cluster)
tsv_out['Strand'].append(strand)
tsv_out['Substance'].append(ID)
tsv_out['BGC ID'].append(BGC_ID)
tsv_out['Putative linearized NRP sequence'].append('--'.join(variant_seq))
tsv_out['Biosynthesis profile'].append('Type {}'.format(BS_type))
tsv_out['Sln score'].append(Sln_score) #shaffling substrates in matrix with log score and nan in maximally possible sequence
tsv_out['Mln score'].append(Mln_score) #shaffling modules matrix with log score and nan in maximally possible sequence
tsv_out['Sdn score'].append(Sdn_score) #shaffling substrates matrix without log score and nan in maximally possible sequence
tsv_out['Mdn score'].append(Mdn_score) #shaffling modules matrix without log score and nan in maximally possible sequence
tsv_out['Sdt score'].append(Sdt_score) #shaffling substrates matrix without log score in maximally possible sequence
tsv_out['Mdt score'].append(Mdt_score) #shaffling modules matrix without log score in maximally possible sequence
tsv_out['Slt score'].append(Slt_score) #shaffling substrates matrix with log score in maximally possible sequence
tsv_out['Mlt score'].append(Mlt_score) #shaffling modules matrix with log score in maximally possible sequence
tsv_out['Relative score'].append(Relative_score) #Final score
tsv_out['Binary'].append(Binary) #Binary value
return tsv_out
| 42.510345 | 236 | 0.649903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,983 | 0.404202 |
e0776cc9711477b5d215a8a600b08e98b5af4d8a | 857 | py | Python | deal/linter/_extractors/returns.py | m4ta1l/deal | 2a8e9bf412b8635b00a2b798dd8802375814a1c8 | [
"MIT"
] | 1 | 2020-09-05T13:54:16.000Z | 2020-09-05T13:54:16.000Z | deal/linter/_extractors/returns.py | m4ta1l/deal | 2a8e9bf412b8635b00a2b798dd8802375814a1c8 | [
"MIT"
] | 7 | 2020-09-05T13:54:28.000Z | 2020-11-27T05:59:19.000Z | deal/linter/_extractors/returns.py | Smirenost/deal | 2a8e9bf412b8635b00a2b798dd8802375814a1c8 | [
"MIT"
] | null | null | null | # built-in
from typing import Optional
# app
from .common import TOKENS, Extractor, Token, traverse
from .value import UNKNOWN, get_value
get_returns = Extractor()
inner_extractor = Extractor()
def has_returns(body: list) -> bool:
for expr in traverse(body=body):
if isinstance(expr, TOKENS.RETURN + TOKENS.YIELD):
return True
return False
@get_returns.register(*TOKENS.RETURN)
def handle_return(expr) -> Optional[Token]:
value = get_value(expr=expr.value)
if value is UNKNOWN:
return None
return Token(value=value, line=expr.lineno, col=expr.value.col_offset)
@get_returns.register(*TOKENS.YIELD)
def handle_yield(expr) -> Optional[Token]:
value = get_value(expr=expr.value)
if value is UNKNOWN:
return None
return Token(value=value, line=expr.lineno, col=expr.value.col_offset)
| 25.205882 | 74 | 0.711785 | 0 | 0 | 0 | 0 | 478 | 0.55776 | 0 | 0 | 15 | 0.017503 |
e077be2cbaa5c0711f376c7e5a696aa0b37ee960 | 1,526 | py | Python | qubiter/device_specific/chip_couplings_ibm.py | yourball/qubiter | 5ef0ea064fa8c9f125f7951a01fbb88504a054a5 | [
"Apache-2.0"
] | 3 | 2019-10-03T04:27:36.000Z | 2021-02-13T17:49:34.000Z | qubiter/device_specific/chip_couplings_ibm.py | yourball/qubiter | 5ef0ea064fa8c9f125f7951a01fbb88504a054a5 | [
"Apache-2.0"
] | null | null | null | qubiter/device_specific/chip_couplings_ibm.py | yourball/qubiter | 5ef0ea064fa8c9f125f7951a01fbb88504a054a5 | [
"Apache-2.0"
] | 2 | 2020-10-07T15:22:19.000Z | 2021-06-07T04:59:58.000Z | def aaa():
# trick sphinx to build link in doc
pass
# retired
ibmqx2_c_to_tars =\
{
0: [1, 2],
1: [2],
2: [],
3: [2, 4],
4: [2]
} # 6 edges
# retired
ibmqx4_c_to_tars =\
{
0: [],
1: [0],
2: [0, 1, 4],
3: [2, 4],
4: []
} # 6 edges
# retired
ibmq16Rus_c_to_tars = \
{
0: [],
1: [0, 2],
2: [3],
3: [4, 14],
4: [],
5: [4],
6: [5, 7, 11],
7: [10],
8: [7],
9: [8, 10],
10: [],
11: [10],
12: [5, 11, 13],
13: [4, 14],
14: [],
15: [0, 2, 14]
} # 22 edges
ibm20AustinTokyo_c_to_tars = \
{
0: [1, 5],
1: [0, 2, 6, 7],
2: [1, 3, 6, 7],
3: [2, 4, 8, 9],
4: [3, 8, 9],
5: [0, 6, 10, 11],
6: [1, 2, 5, 7, 10, 11],
7: [1, 2, 6, 8, 12, 13],
8: [3, 4, 7, 9, 12, 13],
9: [3, 4, 8, 14],
10: [5, 6, 11, 15],
11: [5, 6, 10, 12, 16, 17],
12: [7, 8, 11, 13, 16, 17],
13: [7, 8, 12, 14, 18, 19],
14: [9, 13, 18, 19],
15: [10, 16],
16: [11, 12, 15, 17],
17: [11, 12, 16, 18],
18: [13, 14, 17, 19],
19: [13, 14, 18]
} # 86 edges
ibmq5YorktownTenerife_c_to_tars = \
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 4],
3: [2, 4],
4: [2, 3]
} # 12 edges
ibmq14Melb_c_to_tars = \
{
0: [1],
1: [0, 2, 13],
2: [1, 3, 12],
3: [2, 4, 11],
4: [3, 5, 10],
5: [4, 6, 9],
6: [5, 8],
7: [8],
8: [6, 7, 9],
9: [5, 8, 10],
10: [4, 9, 11],
11: [3, 10, 12],
12: [2, 11, 13],
13: [1, 12]
} # 36 edges
| 15.895833 | 39 | 0.355177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.078637 |
e07835355388fff9c6902a335662f753bb73c86c | 14,599 | py | Python | Template.py | rainshen49/citadel-trading-comp | 3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5 | [
"MIT"
] | 2 | 2018-12-11T03:33:06.000Z | 2021-09-21T01:12:58.000Z | Template.py | rainshen49/citadel-trading-comp | 3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5 | [
"MIT"
] | null | null | null | Template.py | rainshen49/citadel-trading-comp | 3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5 | [
"MIT"
] | null | null | null | import signal
import requests
import time
from math import floor
shutdown = False
MAIN_TAKER = 0.0065
MAIN_MAKER = 0.002
ALT_TAKER = 0.005
ALT_MAKER = 0.0035
TAKER = (MAIN_TAKER + ALT_TAKER)*2
MAKER = MAIN_MAKER + ALT_MAKER
TAKEMAIN = MAIN_TAKER - ALT_MAKER
TAKEALT = ALT_TAKER - MAIN_MAKER
BUFFER = 0.01
NaN = float('nan')
class ApiException(Exception):
pass
class Book(object):
def __init__(self, sym, json):
global NaN
self.sym = sym
self.json = json
# could be cached
self.bids = self.json['bids']
self.asks = self.json['asks']
self.ask_price = 1
self.asks_quantity_left = 0
self.bid_price = 1
self.bids_quantity_left = 0
if self.bids:
self.bid_price = self.bids[0]['price']
if self.asks:
self.ask_price = self.asks[0]['price']
def bids_room(self):
if self.bids:
quantity = sum([b['quantity']
for b in self.bids if b['price'] == self.bid_price])
filled = sum([b['quantity_filled']
for b in self.bids if b['price'] == self.bid_price])
return quantity - filled
else:
return 0
def asks_room(self):
if self.asks:
quantity = sum([b['quantity']
for b in self.asks if b['price'] == self.ask_price])
filled = sum([b['quantity_filled']
for b in self.asks if b['price'] == self.ask_price])
return quantity - filled
else:
return 0
class Limits(dict):
def __init__(self, json):
self.update(json)
self.gross_limit = int(json['gross_limit'])
self.net_limit = int(json['net_limit'])
self.gross = int(json['gross'])
self.net = int(json['net'])
class OHLC(dict):
def __init__(self, sym, json):
self.sym = sym
self.update(json)
self.tick = json['tick']
self.open = json['open']
self.high = json['high']
self.low = json['low']
self.close = json['close']
class Shock(dict):
def __init__(self, news, currtick):
self.ticker = news['ticker']
self.elapsed = currtick - news['tick']
headline = news['headline']
try:
self.amount = float(headline[-6:].replace('$', ''))
except:
self.amount = 0
class Session(object):
def __init__(self, url, key):
self.url = url
self.key = key
self.tick = -1
def __enter__(self):
self.session = requests.Session()
self.session.headers.update({'X-API-Key': self.key})
return self
def __exit__(self, type, value, traceback):
self.session.close()
def get_tick(self):
while True:
resp = self.session.get(self.url + '/v1/case', params=None)
if not resp.ok:
raise ApiException('could not get tick: ' + str(resp))
json = resp.json()
if json['status'] == 'STOPPED' or shutdown:
return False
if json['tick'] != self.tick:
self.tick = json['tick']
print('.', self.tick)
return True
# this timer is unnecessary, network latency should be enough
time.sleep(0.1)
def get_book(self, sym):
resp = self.session.get(
self.url + '/v1/securities/book', params={'ticker': sym})
if not resp.ok:
raise ApiException('could not get book: ' + str(resp))
return Book(sym, resp.json())
def send_order(self, sym, side, price, size):
resp = self.session.post(self.url + '/v1/orders', params={
'ticker': sym, 'type': 'LIMIT', 'action': side, 'quantity': size, 'price': price})
if resp.ok:
print('sent order', side, sym, size, '@', price)
else:
print('failed to send order', side, sym,
size, '@', price, ':', resp.text)
def getLimit(self):
resp = self.session.get(self.url+'/v1/limits')
if not resp.ok:
raise ApiException('could not get limit: '+str(resp))
return Limits(resp.json()[0])
def getSecurities(self, sym=None):
if sym is None:
resp = self.session.get(self.url+'/v1/securities')
else:
resp = self.session.get(
self.url+'/v1/securities', params={'ticker': sym})
if not resp.ok:
raise ApiException('could not get position: '+str(resp))
json = resp.json()
return {sec['ticker']: {k: sec[k] for k in [
"position",
"vwap",
"nlv",
"last",
"bid",
"bid_size",
"ask",
"ask_size",
"unrealized",
"realized"
]} for sec in json}
def get_OHLC(self, sym, ticks=50):
resp = self.session.get(
self.url + '/v1/securities/history', params={'ticker': sym,'limit':ticks})
if not resp.ok:
raise ApiException('could not get OHLC: ' + str(resp))
return [OHLC(sym, ohlc) for ohlc in resp.json()]
def buy(self, sym, price, size):
self.send_order(sym, 'BUY', price, size)
def sell(self, sym, price, size):
self.send_order(sym, 'SELL', price, size)
def send_market(self, sym, side, size):
resp = self.session.post(self.url + '/v1/orders', params={
'ticker': sym, 'type': 'MARKET', 'action': side, 'quantity': size})
if resp.ok:
json = resp.json()
print('market order', side, sym, size, '@', json['vwap'])
return json['vwap']
else:
print('failed to send order', side, sym,
size, '@Market:', resp.text)
return 0
def buyM(self, sym, size):
return self.send_market(sym, 'BUY', size)
def sellM(self, sym, size):
return self.send_market(sym, 'SELL', size)
def getNews(self):
resp = self.session.get(self.url + '/v1/news', params={'limit': 10})
if not resp.ok:
raise ApiException('failed to get news', resp.text)
else:
json = resp.json()
# only care about recent news
return [Shock(news, self.tick) for news in json if news['tick'] > self.tick-4]
def getTrader(self):
resp = self.session.get(self.url + '/v1/trader')
if not resp.ok:
raise ApiException('failed to get trader info', resp.text)
else:
json = resp.json()
return json
def main():
# price does change in every tick
# check position
# plain arbitradge
# index arbitrage
# shock handling
# wave riding
# pairTickers = [('WMT-M', 'WMT-A'), ('CAT-M', 'CAT-A'), ('MMM-M', 'MMM-A')]
with Session('http://localhost:9998', 'VHK3DEDE') as session:
while session.get_tick():
try:
shock_runner(session)
exchange_arbitrage(session, "WMT-M", "WMT-A")
exchange_arbitrage(session, "CAT-M", "CAT-A")
exchange_arbitrage(session, "MMM-M", "MMM-A")
index_arbitrage(session, ['WMT', 'MMM', 'CAT'])
except Exception as ex:
print("error", str(ex))
# trader = session.getTrader()
# print(trader['nlv'])
# TODO: position cleaner: try to reduce gross position loss-free
# TODO: implement range runner for the last x ticks
def avg(arr):
return sum(arr)/float(len(arr))
def window_trend(left,right):
leftavg = avg(left)
rightavg = avg(right)
if rightavg > leftavg:
return 1
elif rightavg < leftavg:
return -1
else:
return 0
def splitarr(arr):
n = len(arr)
left = arr[:n//2]
right = arr[n//2:]
return left,right
def wwindow_trend(prices):
left, right = splitarr(prices)
trend = window_trend(left,right)
lleft, lright = splitarr(left)
rleft, rright = splitarr(right)
trendl = window_trend(lleft,lright)
trendr = window_trend(rleft,rright)
return trend + trendl + trendr
def trend_runner(session, ticker):
if session.tick<20:
return
# short term trend
prices = session.get_OHLC(ticker, 20)
highs = [price.high for price in prices]
lows = [price.low for price in prices]
highTrend = wwindow_trend(highs)
lowTrend = wwindow_trend(lows)
if highTrend+lowTrend < -4:
# volatile, but no trend
session.buyM(ticker,1000)
if highTrend+lowTrend > 4:
session.sellM(ticker,1000)
print(ticker,"short hightrend",highTrend,"lowtrend",lowTrend)
if session.tick<100:
return
prices = session.get_OHLC(ticker, 100)
highs = [price.high for price in prices]
lows = [price.low for price in prices]
highTrend = wwindow_trend(highs)
lowTrend = wwindow_trend(lows)
# grown too much
if highTrend+lowTrend < -4:
# volatile, but no trend
session.sellM(ticker,1000)
# dropped too much
if highTrend+lowTrend > 4:
session.buyM(ticker,1000)
print(ticker,"long hightrend",highTrend,"lowtrend",lowTrend)
def shock_runner(session):
shocks = session.getNews()
quantity = 50000
for shock in sorted(shocks, key=lambda s: s.elapsed):
Mticker = shock.ticker+"-M"
Aticker = shock.ticker+"-A"
if shock.elapsed < 2:
if shock.amount > MAIN_TAKER + BUFFER*2:
session.buyM(Mticker, quantity)
session.buyM(Aticker, quantity)
elif - shock.amount > MAIN_TAKER + BUFFER*2:
session.sellM(Mticker, quantity)
session.sellM(Aticker, quantity)
print('shock', shock.ticker, shock.amount)
if shock.elapsed == 2:
if shock.amount > MAIN_TAKER + BUFFER*2:
session.sellM(Mticker, quantity)
session.sellM(Aticker, quantity)
elif - shock.amount > MAIN_TAKER + BUFFER*2:
session.buyM(Mticker, quantity)
session.buyM(Aticker, quantity)
print('post shock', shock.ticker, shock.amount)
TAKER4 = MAIN_TAKER * 5
def index_arbitrage(session, tickers):
secs = session.getSecurities()
ETF = secs['ETF']
etfBid = ETF['bid']
etfAsk = ETF['ask']
bestBids = {}
bestBidsQ = {}
bestAsks = {}
bestAsksQ = {}
for ticker in tickers:
tickerM = ticker+"-M"
tickerA = ticker+"-A"
Mticker = secs[tickerM]
Aticker = secs[tickerA]
Mbid = Mticker['bid']
Abid = Aticker['bid']
Mask = Mticker['ask']
Aask = Aticker['ask']
if Mbid >= Abid:
bestBids[tickerM] = Mbid
bestBidsQ[tickerM] = Mticker['bid_size']
else:
bestBids[tickerA] = Abid
bestBidsQ[tickerA] = Aticker['bid_size']
if Mask <= Aask:
bestAsks[tickerM] = Mask
bestAsksQ[tickerM] = Mticker['ask_size']
else:
bestAsks[tickerA] = Aask
bestAsksQ[tickerA] = Aticker['ask_size']
compositBid = sum(bestBids.values())
compositBidQ = min(bestBidsQ.values())
compositAsk = sum(bestAsks.values())
compositAskQ = min(bestAsksQ.values())
boughtprice = 0
soldprice = 0
if etfBid - compositAsk > TAKER4+BUFFER:
quantity = ETF['bid_size'] if ETF['bid_size'] < compositAskQ else compositAskQ
if quantity == 0:
return
quantity = min([quantity, 50000])
soldprice = session.sellM('ETF', quantity)
for ticker in bestAsks:
boughtprice += session.buyM(ticker, quantity)
print('Plan ETF', etfBid, 'Stocks', compositAsk)
print('Actual ETF', soldprice, 'Stocks', boughtprice)
elif compositBid - etfAsk > TAKER4+BUFFER:
quantity = ETF['ask_size'] if ETF['ask_size'] < compositBidQ else compositBidQ
if quantity == 0:
return
quantity = min([quantity, 50000])
for ticker in bestBids:
soldprice += session.sellM(ticker, quantity)
boughtprice = session.buyM('ETF', quantity)
print('Plan Stocks', compositBid, 'ETF', etfAsk)
print('Actual Stocks', soldprice, 'ETF', boughtprice)
# TODO: send limit orders and use market to cover unfilled ones after
def exchange_arbitrage(session, mticker, aticker):
global NaN
mbook = session.get_book(mticker)
masks_room = mbook.asks_room()
mbids_room = mbook.bids_room()
abook = session.get_book(aticker)
aasks_room = abook.asks_room()
abids_room = abook.bids_room()
# a lot of room, make market orders
if mbook.bid_price - abook.ask_price > TAKER+BUFFER*2:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sellM(mbook.sym, quantity)
session.buyM(abook.sym, quantity)
elif abook.bid_price - mbook.ask_price > TAKER+BUFFER*2:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sellM(abook.sym, quantity)
session.buyM(mbook.sym, quantity)
# only a little room, make limit orders
if mbook.bid_price - abook.ask_price > BUFFER:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sell(mbook.sym, mbook.bid_price, quantity)
session.buy(abook.sym, abook.ask_price, quantity)
elif abook.bid_price - mbook.ask_price > BUFFER:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sell(abook.sym, abook.bid_price, quantity)
session.buy(mbook.sym, mbook.ask_price, quantity)
def sigint(signum, frame):
global shutdown
signal.signal(signal.SIGINT, signal.SIG_DFL)
shutdown = True
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint)
main()
| 33.407323 | 116 | 0.558052 | 6,541 | 0.448044 | 0 | 0 | 0 | 0 | 0 | 0 | 2,029 | 0.138982 |
e078ffec67d1b2046e248c3ee5d65b353731cbf4 | 1,479 | py | Python | examples/basic/wire_feedthrough.py | souviksaha97/spydrnet-physical | b07bcc152737158ea7cbebf0ef844abe49d29c5e | [
"BSD-3-Clause"
] | null | null | null | examples/basic/wire_feedthrough.py | souviksaha97/spydrnet-physical | b07bcc152737158ea7cbebf0ef844abe49d29c5e | [
"BSD-3-Clause"
] | null | null | null | examples/basic/wire_feedthrough.py | souviksaha97/spydrnet-physical | b07bcc152737158ea7cbebf0ef844abe49d29c5e | [
"BSD-3-Clause"
] | null | null | null | """
==========================================
Genrating feedthrough from single instance
==========================================
This example demostrates how to generate a feedthrough wire connection for
a given scalar or vector wires.
**Initial Design**
.. hdl-diagram:: ../../../examples/basic/_initial_design.v
:type: netlistsvg
:align: center
:module: top
**Output1** ``wire0`` feedthough from ``inst_2_1``
.. hdl-diagram:: ../../../examples/basic/_output_wire.v
:type: netlistsvg
:align: center
:module: top
**Output2** ``bus_in`` feedthrough from ``inst_1_0``
.. hdl-diagram:: ../../../examples/basic/_output_bus.v
:type: netlistsvg
:align: center
:module: top
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
netlist = sdnphy.load_netlist_by_name('basic_hierarchy')
top = netlist.top_instance.reference
cable0 = next(top.get_cables("wire0"))
inst2 = next(top.get_instances("inst_2_0"))
sdn.compose(netlist, '_initial_design.v', skip_constraints=True)
top.create_feedthrough(inst2, cable0)
top.create_unconn_wires()
sdn.compose(netlist, '_output_wire.v', skip_constraints=True)
netlist = sdnphy.load_netlist_by_name('basic_hierarchy')
top = netlist.top_instance.reference
bus_in = next(top.get_cables("bus_in"))
inst1 = next(top.get_instances("inst_1_0"))
cables = top.create_feedthrough(inst1, bus_in)
top.create_unconn_wires()
sdn.compose(netlist, '_output_bus.v', skip_constraints=True)
| 24.65 | 74 | 0.699797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.560514 |
e079004173a435849592703f1baaf8e8d87ed079 | 9,131 | py | Python | workflows/workflow.py | sunnyfloyd/panderyx | 82f03625159833930ff044a43a6619ab710ff159 | [
"MIT"
] | null | null | null | workflows/workflow.py | sunnyfloyd/panderyx | 82f03625159833930ff044a43a6619ab710ff159 | [
"MIT"
] | null | null | null | workflows/workflow.py | sunnyfloyd/panderyx | 82f03625159833930ff044a43a6619ab710ff159 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Optional, Union
from tools import tools
from exceptions import workflow_exceptions
class Workflow:
"""A class to represent a workflow.
Workflow class provides set of methods to manage state of the workflow.
It allows for tool insertions, removals and modifications.
When workflow is run data flow is built and each tool linked to the workflow
instance is executed in determined order. Tool outputs are then consolidated
in a JSON format.
"""
TOOL_CHOICES = {
"generic": tools.GenericTool,
"large_generic": tools.LargeGenericTool,
"input": tools.InputTool,
}
def __init__(self) -> None:
"""Initializes Workflow class with root tool.
Workflow class is initialized with root tool with tool ID `0`. `_root`
points to root tool directly.
"""
self._root = tools.RootTool(id=0)
self._tools = {0: self._root}
self._used_ids = {0}
def insert_tool(
self,
tool_choice: str,
input_ids: Optional[Union[list[int], int]] = None,
output_ids: Optional[Union[list[int], int]] = None,
coordinates: Optional[tuple[int, int]] = None,
) -> tools.Tool:
"""Inserts a new tool to the current workflow.
Args:
tool_choice (str): determines what tool is created (based on the
available choices defined within the Workflow class).
input_ids (list[int], int]): starting input or inputs for the tool
identified by their IDs. Defaults to None.
output_ids (list[int], int): starting output or outputs for the tool
identified by their IDs. Defaults to None.
coordinates (tuple[int, int]): coordinates for the tool on canvas.
Defaults to None.
Raises:
workflow_exceptions.ToolNotAvailable: indicates that provided string
does not refer to an available tool from the Workflow class.
Returns:
tools.Tool: instance of a Tool's class.
"""
try:
tool_class = self.TOOL_CHOICES[tool_choice]
except KeyError:
raise workflow_exceptions.ToolNotAvailable
next_id = self._get_next_tool_id()
tool = tool_class(id=next_id)
self._tools[next_id] = tool
self._add_tool_id(next_id)
if input_ids is not None:
self.add_tool_input(tool_id=tool.id, input_ids=input_ids)
if output_ids is not None:
output_ids = self._clean_tool_ids(output_ids)
for output_id in output_ids:
self.add_tool_input(tool_id=output_id, input_ids=tool.id)
if coordinates is not None:
self.set_tool_coordinates(tool_id=tool.id, coordinates=coordinates)
return tool
def remove_tool(self, tool_ids: Union[list[int], int]) -> None:
"""Removes existing tool from the current workflow.
Removes the tool from the workflow and updates inputs and outputs of the
linked tool instances.
Args:
tool_ids (list[int], int): tool ID or IDs that ought to be removed.
Raises:
workflow_exceptions.RootCannotBeDeleted: indicates that selected
tool for removal is a root which cannot be deleted.
"""
tool_ids = self._clean_tool_ids(tool_ids)
for tool_id in tool_ids:
tool = self._get_tool_by_id(tool_id)
if tool.is_root:
raise workflow_exceptions.RootCannotBeDeleted
# remove tool from linked tools' inputs
tool_outputs = tool.outputs
for output_id in tool_outputs:
self.remove_tool_input(tool_id=output_id, input_ids=tool.id)
# remove tool from linked tools' outputs
tool_inputs = tool.inputs
for input_id in tool_inputs:
self.remove_tool_input(tool_id=tool.id, input_ids=input_id)
del self._tools[tool_id]
def add_tool_input(
self, tool_id: int, input_ids: Union[list[int], int]
) -> tools.Tool:
"""Adds new input(s) for the tool existing in the current workflow.
Args:
tool_id (int): tool ID to which input(s) should be added.
input_ids (list[int], int]): input(s) to be added to the tool
identified by their IDs.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
input_ids = self._clean_tool_ids(input_ids)
for input_id in input_ids:
tool.add_input(input_id)
self._tools[input_id].add_output(tool_id)
return tool
def remove_tool_input(
self, tool_id: int, input_ids: Union[list[int], int]
) -> tools.Tool:
"""Removes input(s) from the tool existing in the current workflow.
Args:
tool_id (int): tool ID from which input(s) should be removed.
input_ids (list[int], int]): input(s) to be removed from the tool
identified by their IDs.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
input_ids = self._clean_tool_ids(input_ids)
for input_id in input_ids:
tool.remove_input(input_id)
self._tools[input_id].remove_output(tool_id)
return tool
def set_tool_config(self, tool_id: int, data: dict) -> tools.Tool:
"""Sets tool's config to passed data dict.
Args:
tool_id (int): tool ID for which config should be set.
data (dict): dict of parameters for given tool.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
tool.config = data
return tool
def set_tool_coordinates(
self, tool_id: int, coordinates: Optional[tuple[int, int]] = None
) -> tools.Tool:
"""Sets (x, y) coordinates for the tool existing in the current workflow.
If no coordinates are passed to this method, default coordinates will be
calculated using `_get_default_coordinates()` internal method.
Args:
tool_id (int): tool ID for which coordinates are to be set.
coordinates (tuple[int, int]): tuple of (x, y) coordinates.
Defaults to None.
Returns:
tools.Tool: instance of a Tool's class.
"""
# I need to decide where to put a check if coordinates will fit a canvas
tool = self._get_tool_by_id(tool_id)
coordinates = (
coordinates if coordinates is not None else self._get_default_coordinates()
)
tool.coordinates = coordinates
return tool
def _get_default_coordinates(self) -> tuple[int, int]:
# might require more sophisticated logic in the future
return (0, 0)
def _get_tool_by_id(self, tool_id: int) -> tools.Tool:
"""Returns an instance of a Tool class selected by its ID.
Args:
tool_id (int): tool ID.
Raises:
workflow_exceptions.ToolDoesNotExist: indicates that for provided ID
there is no tool in this workflow.
Returns:
tools.Tool: instance of a Tool's class.
"""
try:
tool = self._tools[tool_id]
except KeyError:
raise workflow_exceptions.ToolDoesNotExist
return tool
def _clean_tool_ids(self, tool_ids: Union[list[int], int]) -> list[int]:
"""Returns a validated list of tool ID(s).
Checks whether passed tool ID(s) exist in the current workflow
and returns the list of tool IDs. If at least one of the provided tool
IDs is not found, it raises an exception.
Args:
tool_ids (list[int], int): tool ID(s) to be cleaned.
Raises:
workflow_exceptions.ToolDoesNotExist: indicates that at least one of
the provided tool IDs is not present in the current workflow.
Returns:
list[int]: list of checked tool IDs.
"""
cleaned_tool_ids = (
list(set(tool_ids)) if isinstance(tool_ids, list) else [tool_ids]
)
if any(tool_id not in self._tools for tool_id in cleaned_tool_ids):
raise workflow_exceptions.ToolDoesNotExist
return cleaned_tool_ids
def _add_tool_id(self, tool_id: int) -> None:
"""Adds an ID to the used ID pool.
Args:
tool_id (int): ID to be added to the used ID pool.
"""
self._used_ids.add(tool_id)
def _get_next_tool_id(self) -> int:
"""Returns a next available ID to be used for a tool instance.
Returns:
int: next available tool ID.
"""
return max(self._used_ids) + 1
def _build_flow(self) -> None:
NotImplementedError
def __len__(self) -> int:
return len(self._tools) - 1
| 33.818519 | 87 | 0.61461 | 8,990 | 0.984558 | 0 | 0 | 0 | 0 | 0 | 0 | 4,716 | 0.516482 |
e07a13e1121d2676a50044d556f0800f60bfd2f7 | 2,849 | py | Python | team_fundraising/text.py | namtel-hp/fundraising-website | 30cb0cd2bd4505454295d11715e70712525234a3 | [
"MIT"
] | 5 | 2019-10-26T12:41:31.000Z | 2022-03-13T08:30:29.000Z | team_fundraising/text.py | Maalik1/fundraising-website | a5fcd7e8a5966f299f57c22af8c739a3d6cd501a | [
"MIT"
] | 9 | 2021-03-18T21:27:36.000Z | 2022-03-11T23:42:46.000Z | team_fundraising/text.py | Maalik1/fundraising-website | a5fcd7e8a5966f299f57c22af8c739a3d6cd501a | [
"MIT"
] | 2 | 2021-01-11T14:19:01.000Z | 2022-02-18T19:18:38.000Z |
class Donation_text:
# Shown as a message across the top of the page on return from a donation
# used in views.py:new_donation()
thank_you = (
"Thank you for your donation. "
"You may need to refresh this page to see the donation."
)
confirmation_email_subject = (
'Thank you for donating to the Triple Crown for Heart! '
)
# Start of the email sent confirming the paypal payment has gone through
# used in paypal.py:process_paypal()
confirmation_email_opening = (
'Thank you for your donation of '
)
# Closing of the email sent confirming the paypal payment has gone through
# used in paypal.py:process_paypal()
confirmation_email_closing = (
'.\n\nFor all donations over $20, you will receive a tax receipt for '
'the 2019 tax year.'
'\nYour PayPal receipt should arrive in a separate email.\n'
)
notification_email_subject = (
"You got a donation!"
)
notification_email_opening = (
"Great news! You've just received a donation of "
)
notification_email_closing = (
"\n\nAwesome work! They would probably appreciate "
"a quick thank you email.\n\n"
"-- Triple Crown for Heart\n"
)
class Fundraiser_text:
# Subject of the email sent on signup
signup_email_subject = (
"Welcome to fundraising for the Triple Crown for Heart!"
)
# Start of the email sent when someone signs up
# used in views.py:signup()
signup_email_opening = (
"Thanks for signing up to fundraise with us!\n"
"Your fundraising page can be found at:\n"
)
# Closing of the email sent when someone signs up
# used in views.py:signup()
signup_email_closing = (
'\n\nYou can change your information by using the "Login" link at the '
'top of that page.'
'\n\nThe easiest way to start fundraising is to post the above link '
'on social media or write a short email to your friends telling them '
'about your ride.'
'\nDon\'t forget to include the link to your page!\n'
)
# Message show at the top of the fundraiser page after signing up
# used in views.py:signup()
signup_return_message = (
"Thank you for signing up. Sharing your fundraiser page on social "
"media or over email is the best way to get donations."
)
signup_wrong_password_existing_user = (
"The username already exists, but the password entered is incorrect. "
"If you were already a fundraiser for a previous campaign, please "
"enter your previous password or use "
"<a href='/team_fundraising/accounts/password_reset/'>"
"Forgot your password</a>. If this is your first campaign, "
"please choose a different username."
)
| 33.916667 | 79 | 0.65251 | 2,844 | 0.998245 | 0 | 0 | 0 | 0 | 0 | 0 | 2,003 | 0.703054 |
0eb2577f85f04e68e802521ef8915750223e0174 | 624 | py | Python | tests/wagtail_live/test_apps.py | wagtail/wagtail-live | dd769be089d457cf36db2506520028bc5f506ac3 | [
"BSD-3-Clause"
] | 22 | 2021-06-07T20:36:18.000Z | 2022-03-29T01:48:58.000Z | tests/wagtail_live/test_apps.py | wagtail/wagtail-live | dd769be089d457cf36db2506520028bc5f506ac3 | [
"BSD-3-Clause"
] | 73 | 2021-05-21T16:08:44.000Z | 2022-03-20T23:59:59.000Z | tests/wagtail_live/test_apps.py | wagtail/wagtail-live | dd769be089d457cf36db2506520028bc5f506ac3 | [
"BSD-3-Clause"
] | 11 | 2021-06-10T10:05:13.000Z | 2022-02-12T13:31:34.000Z | from django.apps import apps
from django.test import override_settings
from wagtail_live.signals import live_page_update
def test_live_page_update_signal_receivers():
assert len(live_page_update.receivers) == 0
@override_settings(
WAGTAIL_LIVE_PUBLISHER="tests.testapp.publishers.DummyWebsocketPublisher"
)
def test_live_page_update_signal_receivers_websocket():
app_config = apps.get_app_config("wagtail_live")
app_config.ready()
try:
# Receiver should be connected, no IndexError
receiver = live_page_update.receivers[0]
finally:
live_page_update.disconnect(receiver)
| 27.130435 | 77 | 0.780449 | 0 | 0 | 0 | 0 | 403 | 0.645833 | 0 | 0 | 109 | 0.174679 |
0eb2fde0bae97bffa51893b405703a8d74ef6c29 | 14,826 | py | Python | PLM/options.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | 7 | 2017-12-22T02:49:58.000Z | 2018-05-09T05:29:06.000Z | PLM/options.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | null | null | null | PLM/options.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | 3 | 2019-03-11T21:54:52.000Z | 2019-11-25T11:23:17.000Z | # -*- coding: utf-8 -*-
"""
Script Name:
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import os
from PySide2.QtWidgets import (QFrame, QStyle, QAbstractItemView, QSizePolicy, QLineEdit, QPlainTextEdit,
QGraphicsItem, QGraphicsView, QGraphicsScene, QRubberBand, QCalendarWidget, )
from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime
from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor
SingleSelection = QCalendarWidget.SingleSelection
NoSelection = QCalendarWidget.NoSelection
SingleLetterDay = QCalendarWidget.SingleLetterDayNames
ShortDay = QCalendarWidget.ShortDayNames
LongDay = QCalendarWidget.LongDayNames
NoHoriHeader = QCalendarWidget.NoHorizontalHeader
NoVertHeader = QCalendarWidget.NoVerticalHeader
IsoWeekNum = QCalendarWidget.ISOWeekNumbers
SelectMode = QCalendarWidget.SelectionMode
HoriHeaderFm = QCalendarWidget.HorizontalHeaderFormat
VertHeaderFm = QCalendarWidget.VerticalHeaderFormat
DayOfWeek = Qt.DayOfWeek
Sunday = Qt.Sunday
Monday = Qt.Monday
Tuesday = Qt.Tuesday
Wednesday = Qt.Wednesday
Thursday = Qt.Thursday
Friday = Qt.Friday
Saturday = Qt.Saturday
ICONSIZE = 32
ICONBUFFER = -1
BTNTAGSIZE = QSize(87, 20)
TAGBTNSIZE = QSize(87-1, 20-1)
BTNICONSIZE = QSize(ICONSIZE, ICONSIZE)
ICONBTNSIZE = QSize(ICONSIZE+ICONBUFFER, ICONSIZE+ICONBUFFER)
DAMG_LOGO_COLOR = QColor(0, 114, 188, 255)
# Basic color
GlobalColor = Qt.GlobalColor
WHITE = QColor(Qt.white)
LIGHTGRAY = QColor(Qt.lightGray)
GRAY = QColor(Qt.gray)
DARKGRAY = QColor(Qt.darkGray)
BLACK = QColor(Qt.black)
RED = QColor(Qt.red)
GREEN = QColor(Qt.green)
BLUE = QColor(Qt.blue)
DARKRED = QColor(Qt.darkRed)
DARKGREEN = QColor(Qt.darkGreen)
DARKBLUE = QColor(Qt.darkBlue)
CYAN = QColor(Qt.cyan)
MAGENTA = QColor(Qt.magenta)
YELLOW = QColor(Qt.yellow)
DARKCYAN = QColor(Qt.darkCyan)
DARKMAGENTA = QColor(Qt.darkMagenta)
DARKYELLOW = QColor(Qt.darkYellow)
# Dark Palette color
Color_BACKGROUND_LIGHT = QColor('#505F69')
COLOR_BACKGROUND_NORMAL = QColor('#32414B')
COLOR_BACKGROUND_DARK = QColor('#19232D')
COLOR_FOREGROUND_LIGHT = QColor('#F0F0F0')
COLOR_FOREGROUND_NORMAL = QColor('#AAAAAA')
COLOR_FOREGROUND_DARK = QColor('#787878')
COLOR_SELECTION_LIGHT = QColor('#148CD2')
COLOR_SELECTION_NORMAL = QColor('#1464A0')
COLOR_SELECTION_DARK = QColor('#14506E')
# Nice color
blush = QColor(246, 202, 203, 255)
petal = QColor(247, 170, 189, 255)
petunia = QColor(231, 62, 151, 255)
deep_pink = QColor(229, 2, 120, 255)
melon = QColor(241, 118, 110, 255)
pomegranate = QColor(178, 27, 32, 255)
poppy_red = QColor(236, 51, 39, 255)
orange_red = QColor(240, 101, 53, 255)
olive = QColor(174, 188, 43, 255)
spring = QColor(227, 229, 121, 255)
yellow = QColor(255, 240, 29, 255)
mango = QColor(254, 209, 26, 255)
cantaloupe = QColor(250, 176, 98, 255)
tangelo = QColor(247, 151, 47, 255)
burnt_orange = QColor(236, 137, 36, 255)
bright_orange = QColor(242, 124, 53, 255)
moss = QColor(176, 186, 39, 255)
sage = QColor(212, 219, 145, 255)
apple = QColor(178, 215, 140, 255)
grass = QColor(111, 178, 68, 255)
forest = QColor(69, 149, 62, 255)
peacock = QColor(21, 140, 167, 255)
teal = QColor(24, 157, 193, 255)
aqua = QColor(153, 214, 218, 255)
violet = QColor(55, 52, 144, 255)
deep_blue = QColor(15, 86, 163, 255)
hydrangea = QColor(150, 191, 229, 255)
sky = QColor(139, 210, 244, 255)
dusk = QColor(16, 102, 162, 255)
midnight = QColor(14, 90, 131, 255)
seaside = QColor(87, 154, 188, 255)
poolside = QColor(137, 203, 225, 255)
eggplant = QColor(86, 5, 79, 255)
lilac = QColor(222, 192, 219, 255)
chocolate = QColor(87, 43, 3, 255)
blackout = QColor(19, 17, 15, 255)
stone = QColor(125, 127, 130, 255)
gravel = QColor(181, 182, 185, 255)
pebble = QColor(217, 212, 206, 255)
sand = QColor(185, 172, 151, 255)
ignoreARM = Qt.IgnoreAspectRatio
scrollAsNeed = Qt.ScrollBarAsNeeded
scrollOff = Qt.ScrollBarAlwaysOff
scrollOn = Qt.ScrollBarAlwaysOn
SiPoMin = QSizePolicy.Minimum # Size policy
SiPoMax = QSizePolicy.Maximum
SiPoExp = QSizePolicy.Expanding
SiPoPre = QSizePolicy.Preferred
SiPoIgn = QSizePolicy.Ignored
frameStyle = QFrame.Sunken | QFrame.Panel
center = Qt.AlignCenter # Alignment
right = Qt.AlignRight
left = Qt.AlignLeft
top = Qt.AlignTop
bottom = Qt.AlignBottom
hori = Qt.Horizontal
vert = Qt.Vertical
dockL = Qt.LeftDockWidgetArea # Docking area
dockR = Qt.RightDockWidgetArea
dockT = Qt.TopDockWidgetArea
dockB = Qt.BottomDockWidgetArea
dockAll = Qt.AllDockWidgetAreas
datetTimeStamp = QDateTime.currentDateTime().toString("hh:mm - dd MMMM yy") # datestamp
PRS = dict(password = QLineEdit.Password, center = center , left = left , right = right,
spmax = SiPoMax , sppre = SiPoPre, spexp = SiPoExp, spign = SiPoIgn,
expanding = QSizePolicy.Expanding, spmin = SiPoMin,)
# -------------------------------------------------------------------------------------------------------------
""" Event """
NO_WRAP = QPlainTextEdit.NoWrap
NO_FRAME = QPlainTextEdit.NoFrame
ELIDE_RIGHT = Qt.ElideRight
ELIDE_NONE = Qt.ElideNone
# -------------------------------------------------------------------------------------------------------------
""" Window state """
StateNormal = Qt.WindowNoState
StateMax = Qt.WindowMaximized
StateMin = Qt.WindowMinimized
State_Selected = QStyle.State_Selected
# -------------------------------------------------------------------------------------------------------------
""" Nodegraph setting variables """
ASPEC_RATIO = Qt.KeepAspectRatio
SMOOTH_TRANS = Qt.SmoothTransformation
SCROLLBAROFF = Qt.ScrollBarAlwaysOff # Scrollbar
SCROLLBARON = Qt.ScrollBarAlwaysOn
SCROLLBARNEED = Qt.ScrollBarAsNeeded
WORD_WRAP = Qt.TextWordWrap
INTERSECT_ITEM_SHAPE = Qt.IntersectsItemShape
CONTAIN_ITEM_SHAPE = Qt.ContainsItemShape
MATCH_EXACTLY = Qt.MatchExactly
DRAG_ONLY = QAbstractItemView.DragOnly
# -------------------------------------------------------------------------------------------------------------
""" UI flags """
ITEMENABLE = Qt.ItemIsEnabled
ITEMMOVEABLE = QGraphicsItem.ItemIsMovable
ITEMSENDGEOCHANGE = QGraphicsItem.ItemSendsGeometryChanges
ITEMSCALECHANGE = QGraphicsItem.ItemScaleChange
ITEMPOSCHANGE = QGraphicsItem.ItemPositionChange
DEVICECACHE = QGraphicsItem.DeviceCoordinateCache
SELECTABLE = QGraphicsItem.ItemIsSelectable
MOVEABLE = QGraphicsItem.ItemIsMovable
FOCUSABLE = QGraphicsItem.ItemIsFocusable
PANEL = QGraphicsItem.ItemIsPanel
NOINDEX = QGraphicsScene.NoIndex # Scene
RUBBER_DRAG = QGraphicsView.RubberBandDrag # Viewer
RUBBER_REC = QRubberBand.Rectangle
POS_CHANGE = QGraphicsItem.ItemPositionChange
NODRAG = QGraphicsView.NoDrag
NOFRAME = QGraphicsView.NoFrame
ANCHOR_NO = QGraphicsView.NoAnchor
ANCHOR_UNDERMICE = QGraphicsView.AnchorUnderMouse
ANCHOR_CENTER = QGraphicsView.AnchorViewCenter
CACHE_BG = QGraphicsView.CacheBackground
UPDATE_VIEWRECT = QGraphicsView.BoundingRectViewportUpdate
UPDATE_FULLVIEW = QGraphicsView.FullViewportUpdate
UPDATE_SMARTVIEW = QGraphicsView.SmartViewportUpdate
UPDATE_BOUNDINGVIEW = QGraphicsView.BoundingRectViewportUpdate
UPDATE_MINIMALVIEW = QGraphicsView.MinimalViewportUpdate
STAY_ON_TOP = Qt.WindowStaysOnTopHint
STRONG_FOCUS = Qt.StrongFocus
SPLASHSCREEN = Qt.SplashScreen
FRAMELESS = Qt.FramelessWindowHint
CUSTOMIZE = Qt.CustomizeWindowHint
CLOSEBTN = Qt.WindowCloseButtonHint
MINIMIZEBTN = Qt.WindowMinimizeButtonHint
AUTO_COLOR = Qt.AutoColor
# -------------------------------------------------------------------------------------------------------------
""" Drawing """
ANTIALIAS = QPainter.Antialiasing # Painter
ANTIALIAS_TEXT = QPainter.TextAntialiasing
ANTIALIAS_HIGH_QUALITY = QPainter.HighQualityAntialiasing
SMOOTH_PIXMAP_TRANSFORM = QPainter.SmoothPixmapTransform
NON_COSMETIC_PEN = QPainter.NonCosmeticDefaultPen
NO_BRUSH = Qt.NoBrush # Brush
NO_PEN = Qt.NoPen # Pen
ROUND_CAP = Qt.RoundCap
ROUND_JOIN = Qt.RoundJoin
PATTERN_SOLID = Qt.SolidPattern # Pattern
LINE_SOLID = Qt.SolidLine # Line
LINE_DASH = Qt.DashLine
LINE_DOT = Qt.DotLine
LINE_DASH_DOT = Qt.DashDotDotLine
TRANSPARENT = Qt.transparent
TRANSPARENT_MODE = Qt.TransparentMode
# -------------------------------------------------------------------------------------------------------------
""" Meta Object """
QUEUEDCONNECTION = Qt.QueuedConnection
# -------------------------------------------------------------------------------------------------------------
""" Keyboard and cursor """
TEXT_BOLD = QFont.Bold
TEXT_NORMAL = QFont.Normal
MONO_SPACE = QFont.Monospace
TEXT_MENEOMIC = Qt.TextShowMnemonic
KEY_PRESS = QEvent.KeyPress
KEY_RELEASE = QEvent.KeyRelease
KEY_ALT = Qt.Key_Alt
KEY_DEL = Qt.Key_Delete
KEY_TAB = Qt.Key_Tab
KEY_SHIFT = Qt.Key_Shift
KEY_CTRL = Qt.Key_Control
KEY_BACKSPACE = Qt.Key_Backspace
KEY_ENTER = Qt.Key_Enter
KEY_RETURN = Qt.Key_Return
KEY_F = Qt.Key_F
KEY_S = Qt.Key_S
ALT_MODIFIER = Qt.AltModifier
CTRL_MODIFIER = Qt.ControlModifier
SHIFT_MODIFIER = Qt.ShiftModifier
NO_MODIFIER = Qt.NoModifier
CLOSE_HAND_CUSOR = Qt.ClosedHandCursor
SIZEF_CURSOR = Qt.SizeFDiagCursor
windows = os.name = 'nt'
DMK = Qt.AltModifier if windows else CTRL_MODIFIER
MOUSE_LEFT = Qt.LeftButton
MOUSE_RIGHT = Qt.RightButton
MOUSE_MIDDLE = Qt.MiddleButton
NO_BUTTON = Qt.NoButton
ARROW_NONE = Qt.NoArrow # Cursor
CURSOR_ARROW = Qt.ArrowCursor
CURSOR_SIZEALL = Qt.SizeAllCursor
MOVE_OPERATION = QTextCursor.MoveOperation
MOVE_ANCHOR = QTextCursor.MoveMode.MoveAnchor
KEEP_ANCHOR = QTextCursor.MoveMode.KeepAnchor
ACTION_MOVE = Qt.MoveAction # Action
ignoreARM = Qt.IgnoreAspectRatio
# -------------------------------------------------------------------------------------------------------------
""" Set number """
RELATIVE_SIZE = Qt.RelativeSize # Size
INI = QSettings.IniFormat
NATIVE = QSettings.NativeFormat
INVALID = QSettings.InvalidFormat
SYS_SCOPE = QSettings.SystemScope
USER_SCOPE = QSettings.UserScope
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved | 43.994065 | 114 | 0.475651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,756 | 0.118433 |
0eb3ad476194898d48e135372f34d1ee69bc79d8 | 2,509 | py | Python | Crawling/ssafyCrawling.py | Nyapy/FMTG | dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e | [
"MIT"
] | null | null | null | Crawling/ssafyCrawling.py | Nyapy/FMTG | dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e | [
"MIT"
] | null | null | null | Crawling/ssafyCrawling.py | Nyapy/FMTG | dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
import time
import urllib.request
import os
sys.stdin = open('idpwd.txt')
site = input()
id = input()
pwd = input()
# selenium에서 사용할 웹 드라이버 절대 경로 정보
chromedriver = 'C:\Webdriver\chromedriver.exe'
# selenum의 webdriver에 앞서 설치한 chromedirver를 연동한다.
driver = webdriver.Chrome(chromedriver)
# driver로 특정 페이지를 크롤링한다.
driver.get(site)
driver.find_element_by_name('userId').send_keys(id)
driver.find_element_by_name('userPwd').send_keys(pwd)
driver.find_element_by_class_name('form-btn').click()
driver.set_window_size(1600, 800)
driver.find_element_by_xpath("//a[@href='/edu/lectureroom/openlearning/openLearningList.do']/span").click()
# driver.find_element_by_id('searchContNm').send_keys('aps')
#
# driver.find_element_by_xpath("//button[@onclick='fnSearch();']").click()
driver.find_elements_by_xpath("//*[contains(text(), '5기_B반_Java(1)')]")[0].click()
driver.find_element_by_xpath("//span[@class='file-name']").click()
driver.switch_to.window(driver.window_handles[1])
print(driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled'))
# driver.find_elements_by_xpath("//button[@title='마지막 페이지']")[0].click()
# print(driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled'))
# url 가져오기 + find 함수 연습
# pre = driver.current_url
# find = pre.find('/index.html')
# url = pre[:find]
# src = driver.find_element_by_class_name("background").get_attribute('src')
# print(src)
## 다음페이지 넘기기
# for i in driver.find_elements_by_xpath("//button[@title='다음 페이지']"):
# print(i)
cnt = 1
# url = driver.find_elements_by_class_name("background")[-1].get_attribute('src')
# print(url)
# urllib.request.urlretrieve(url, '123.jpg')
# os.system("curl " + url + " > test.jpg")
time.sleep(2)
driver.get_screenshot_as_file("hi.png")
# for i in driver.find_elements_by_class_name("background"):
# time.sleep(2)
# print(i.get_attribute('style'))
# i.screenshot(str(cnt)+'.png')
# cnt += 1
while 1:
time.sleep(0.4)
driver.save_screenshot('APS/C/'+str(cnt)+'.png')
# print(driver.find_element_by_class_name("background").get_attribute('src'))
# driver.find_element_by_class_name("background").screenshot(str(cnt)+'.png')
driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].click()
cnt += 1
if driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled') == 'disabled':
break
| 32.166667 | 109 | 0.719012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,705 | 0.634537 |
0eb4432b0091105498b6cde85c1c9de8fc2676cc | 1,433 | py | Python | 100days/day95/StringIO_demo.py | chainren/python-learn | 5e48e96c4bb212806b9ae0954fdb368abdcf9ba3 | [
"Apache-2.0"
] | null | null | null | 100days/day95/StringIO_demo.py | chainren/python-learn | 5e48e96c4bb212806b9ae0954fdb368abdcf9ba3 | [
"Apache-2.0"
] | 16 | 2020-02-12T03:09:30.000Z | 2022-03-12T00:08:59.000Z | 100days/day95/StringIO_demo.py | chainren/python-learn | 5e48e96c4bb212806b9ae0954fdb368abdcf9ba3 | [
"Apache-2.0"
] | null | null | null |
from io import StringIO
# 定义一个 StringIO 对象,写入并读取其在内存中的内容
f = StringIO()
f.write('Python-100')
str = f.getvalue() # 读取写入的内容
print('写入内存中的字符串为:%s' %str)
f.write('\n') # 追加内容
f.write('坚持100天')
f.close() # 关闭
f1 = StringIO('Python-100' + '\n' + '坚持100天')
# 读取内容
print(f1.read())
f1.close()
# 假设的爬虫数据输出函数 outputData()
def outputData():
dataOne = '我是 1 号爬虫数据\n'
dataTwo = '我是 2 号爬虫数据\n'
dataThree = '我是 3 号爬虫数据'
data = dataOne + dataTwo + dataThree
return data
# dataStr 为爬虫数据字符串
dataStr = outputData()
# 1. 将 outputData() 函数返回的内容写入内存中
dataIO = StringIO(dataStr)
# 假设的爬虫数据输出函数 outputData()
def outputData():
dataOne = '我是 1 号爬虫数据\n'
dataTwo = '我是 2 号爬虫数据\n'
dataThree = '我是 3 号爬虫数据'
data = dataOne + dataTwo + dataThree
return data
# dataStr 为爬虫数据字符串
dataStr = outputData()
# 1. 将 outputData() 函数返回的内容写入内存中
dataIO = StringIO(dataStr)
# 1.1 输出 StringIO 在内存中写入的数据
print('1.1内存中写入的数据为:\n%s' %dataIO.getvalue())
# 1.2 按行输出写入的数据方式一
print('1.2按行输出写入的数据方式一:')
for data in dataIO.readlines():
print(data.strip('\n')) # 去掉每行数据末尾的换行符
# 1.2 按行输出写入的数据方式一
print('1.2按行输出写入的数据方式一:')
for data in dataIO.readlines():
print(data.strip('\n')) # 去掉每行数据末尾的换行符
# 1.3 按行输出写入的数据方式二
# 由于上一步的操作,此时文件指针指向数据末尾(32),我们需要将指针指向起始位置
print('由于上一步操作的输出,此时文件指针位置为:%d' %dataIO.tell())
# 将文件指针指向起始位置,方便下面的演示
dataIO.seek(0)
print('1.3按行输出写入的数据方式二:')
for data in dataIO:
print(data.strip('\n')) | 18.61039 | 47 | 0.673412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,331 | 0.624589 |
0eb4945ca1e15b4e7d0b451aa87077b0cebf76c6 | 10,595 | py | Python | src/hub/dataload/sources/drugcentral/drugcentral_upload.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | 1 | 2021-05-09T04:51:28.000Z | 2021-05-09T04:51:28.000Z | src/hub/dataload/sources/drugcentral/drugcentral_upload.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | null | null | null | src/hub/dataload/sources/drugcentral/drugcentral_upload.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | null | null | null | import biothings.hub.dataload.uploader as uploader
class DrugCentralUploader(uploader.DummySourceUploader):
name = "drugcentral"
__metadata__ = {
"src_meta" : {
"url" : "http://drugcentral.org/",
"license_url" : "http://drugcentral.org/privacy",
"license_url_short" : "https://goo.gl/QDNyNe",
"license" : "CC BY-SA 4.0",
}
}
@classmethod
def get_mapping(klass):
mapping = {
"drugcentral": {
"properties": {
"approval": {
"properties": {
"applicant": {
"type": "string"
},
"date": {
"analyzer": "string_lowercase",
"type": "string"
},
"type": {
"type": "string"
}
}
},
"bioactivity": {
"properties": {
"act_comment": {
"type": "string"
},
"act_source": {
"type": "string"
},
"act_type": {
"analyzer": "string_lowercase",
"type": "string"
},
"act_value": {
"analyzer": "string_lowercase",
"type": "string"
},
"action_type": {
"type": "string"
},
"gene_name": {
"type": "string"
},
"moa": {
"analyzer": "string_lowercase",
"type": "string"
},
"moa_source": {
"type": "string"
},
"swissprot": {
"analyzer": "string_lowercase",
"type": "string"
},
"target": {
"type": "string"
},
"target_class": {
"type": "string"
},
"uniprot_id": {
"analyzer": "string_lowercase",
"type": "string"
}
}
},
"drug_dosage": {
"properties": {
"atc_code": {
"analyzer": "string_lowercase",
"type": "string"
},
"dose": {
"analyzer": "string_lowercase",
"type": "string"
},
"route": {
"analyzer": "string_lowercase",
"type": "string"
},
"unit": {
"analyzer": "string_lowercase",
"type": "string"
}
}
},
"drug_use": {
"properties": {
"relation": {
"type": "string"
},
"snomed_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"snomed_name": {
"type": "string"
}
}
},
"pharmacology_action": {
"properties": {
"class_code": {
"analyzer": "string_lowercase",
"type": "string"
},
"name": {
"type": "string"
},
"source": {
"analyzer": "string_lowercase",
"type": "string"
},
"type": {
"type": "string"
}
}
},
"struct_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"structures": {
"properties": {
"cas_rn": {
"analyzer": "string_lowercase",
"type": "string"
},
"inchi": {
"analyzer": "string_lowercase",
"type": "string"
},
"inchikey": {
"analyzer": "string_lowercase",
"type": "string"
},
"inn": {
"analyzer": "string_lowercase",
"type": "string"
},
"smiles": {
"analyzer": "string_lowercase",
"type": "string"
}
}
},
"synonyms": {
"type": "string"
},
"xref": {
"properties": {
"chebi": {
"analyzer": "string_lowercase",
"type": "string"
},
"chembl_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"drugbank_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"inn_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"iuphar_ligand_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"kegg_drug": {
"analyzer": "string_lowercase",
"type": "string"
},
"mesh_descriptor_ui": {
"analyzer": "string_lowercase",
"type": "string"
},
"mesh_supplemental_record_ui": {
"analyzer": "string_lowercase",
"type": "string"
},
"mmsl": {
"analyzer": "string_lowercase",
"type": "string"
},
"nddf": {
"analyzer": "string_lowercase",
"type": "string"
},
"ndfrt": {
"analyzer": "string_lowercase",
"type": "string"
},
"nui": {
"analyzer": "string_lowercase",
"type": "string"
},
"pdb_chem_id": {
"analyzer": "string_lowercase",
"type": "string"
},
"pubchem_cid": {
"analyzer": "string_lowercase",
"type": "string"
},
"rxnorm": {
"analyzer": "string_lowercase",
"type": "string"
},
"secondary_cas_rn": {
"analyzer": "string_lowercase",
"type": "string"
},
"snomedct_us": {
"analyzer": "string_lowercase",
"type": "string"
},
"umlscui": {
"analyzer": "string_lowercase",
"type": "string"
},
"unii": {
"analyzer": "string_lowercase",
"type": "string"
},
"vandf": {
"analyzer": "string_lowercase",
"type": "string"
},
"vuid": {
"analyzer": "string_lowercase",
"type": "string"
}
}
}
}
}
}
return mapping
| 41.54902 | 65 | 0.224823 | 10,542 | 0.994998 | 0 | 0 | 10,150 | 0.957999 | 0 | 0 | 2,776 | 0.26201 |
0eb4f1bf9aa917694ffc04ea836799d3bd9b4710 | 2,751 | py | Python | tests/test_cli.py | Nate1729/FinPack | d76fd5e6538298d5596d5b0f7d3be2bc6520c431 | [
"Apache-2.0"
] | 1 | 2022-01-28T20:05:22.000Z | 2022-01-28T20:05:22.000Z | tests/test_cli.py | Nate1729/FinPack | d76fd5e6538298d5596d5b0f7d3be2bc6520c431 | [
"Apache-2.0"
] | 30 | 2021-11-22T19:07:54.000Z | 2021-12-18T03:00:47.000Z | tests/test_cli.py | Nate1729/FinPack | d76fd5e6538298d5596d5b0f7d3be2bc6520c431 | [
"Apache-2.0"
] | 2 | 2021-12-13T20:27:52.000Z | 2021-12-17T18:39:40.000Z | """Contains tests for finpack/core/cli.py
"""
__copyright__ = "Copyright (C) 2021 Matt Ferreira"
import os
import unittest
from importlib import metadata
from docopt import docopt
from finpack.core import cli
class TestCli(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.DATA_DIR = "temp"
os.mkdir(cls.DATA_DIR)
@classmethod
def tearDownClass(cls):
os.rmdir(cls.DATA_DIR)
def test_version_option(self):
argv = ["--version"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["--version"])
def test_init_no_options(self):
argv = ["init"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
def test_init_with_filepath_option(self):
argv = ["init", "--filepath=temp/data.csv"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertEqual(args["--filepath"], "temp/data.csv")
def test_init_with_sample_dataset_option(self):
argv = ["init", "--sample-dataset"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertTrue(args["--sample-dataset"])
def test_init_with_overwrite_option(self):
argv = ["init", "--overwrite"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertTrue(args["--overwrite"])
def test_balsheet_no_option(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
def test_balsheet_with_filepath_option(self):
argv = ["balsheet", "--filepath=temp/data2.csv"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--filepath"], "temp/data2.csv")
def test_balsheet_with_levels_default(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--levels"], "3")
def test_balsheet_with_levels_option(self):
argv = ["balsheet", "--levels=2"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--levels"], "2")
def test_balsheet_with_date_default(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--date"], "today")
def test_balsheet_with_date_option(self):
argv = ["balsheet", "--date=2021-12-01"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--date"], "2021-12-01")
| 25.238532 | 62 | 0.623773 | 2,536 | 0.921847 | 0 | 0 | 170 | 0.061796 | 0 | 0 | 537 | 0.195202 |
0eb6190157c1946b37b5fd1be18f551d0e559832 | 612 | py | Python | python/Patterns/inheritance/main.py | zinderud/ysa | e34d3f4c7afab3976d86f5d27edfcd273414e496 | [
"Apache-2.0"
] | null | null | null | python/Patterns/inheritance/main.py | zinderud/ysa | e34d3f4c7afab3976d86f5d27edfcd273414e496 | [
"Apache-2.0"
] | 1 | 2017-12-27T10:09:22.000Z | 2017-12-27T10:22:47.000Z | python/Patterns/inheritance/main.py | zinderud/ysa | e34d3f4c7afab3976d86f5d27edfcd273414e496 | [
"Apache-2.0"
] | null | null | null | class Yaratik(object):
def move_left(self):
print('Moving left...')
def move_right(self):
print('Moving left...')
class Ejderha(Yaratik):
def Ates_puskurtme(self):
print('ates puskurtum!')
class Zombie(Yaratik):
def Isirmak(self):
print('Isirdim simdi!')
enemy = Yaratik()
enemy.move_left()
# ejderha also includes all functions from parent class (yaratik)
ejderha = Ejderha()
ejderha.move_left()
ejderha.Ates_puskurtme()
# Zombie is called the (child class), inherits from Yaratik (parent class)
zombie = Zombie()
zombie.move_right()
zombie.Isirmak()
| 18 | 74 | 0.679739 | 304 | 0.496732 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.333333 |
0eb71b68b065b14b8eebff52fa3bbffc15201b7a | 1,527 | py | Python | clustering/graph_utils.py | perathambkk/ml-techniques | 5d6fd122322342c0b47dc65d09c4425fd73f2ea9 | [
"MIT"
] | null | null | null | clustering/graph_utils.py | perathambkk/ml-techniques | 5d6fd122322342c0b47dc65d09c4425fd73f2ea9 | [
"MIT"
] | null | null | null | clustering/graph_utils.py | perathambkk/ml-techniques | 5d6fd122322342c0b47dc65d09c4425fd73f2ea9 | [
"MIT"
] | null | null | null | """
Author: Peratham Wiriyathammabhum
"""
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
def affinity_graph(X):
'''
This function returns a numpy array.
'''
ni, nd = X.shape
A = np.zeros((ni, ni))
for i in range(ni):
for j in range(i+1, ni):
dist = ((X[i] - X[j])**2).sum() # compute L2 distance
A[i][j] = dist
A[j][i] = dist # by symmetry
return A
def knn_graph(X, knn=4):
'''
This function returns a numpy array.
'''
ni, nd = X.shape
nbrs = NearestNeighbors(n_neighbors=(knn+1), algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
A = np.zeros((ni, ni))
for dist, ind in zip(distances, indices):
i0 = ind[0]
for i in range(1,knn+1):
d = dist[i]
A[i0, i] = d
A[i, i0] = d # by symmetry
return A
def sparse_affinity_graph(X):
'''
TODO: This function returns a numpy sparse matrix.
'''
ni, nd = X.shape
A = np.zeros((ni, ni))
for i in range(ni):
for j in range(i+1, ni):
dist = ((X[i] - X[j])**2).sum() # compute L2 distance
A[i][j] = dist
A[j][i] = dist # by symmetry
return A
def laplacian_graph(X, mode='affinity', knn=3, eta=0.01, sigma=2.5):
'''
The unnormalized graph Laplacian, L = D − W.
'''
if mode == 'affinity':
W = affinity_graph(X)
W[abs(W) > eta] = 0
elif mode == 'nearestneighbor':
W = knn_graph(X, knn=knn)
elif mode == 'gaussian':
W = affinity_graph(X)
bandwidth = 2.0*(sigma**2)
W = np.exp(W) / bandwidth
else:
pass
D = np.diag(W.sum(axis=1))
L = D - W
return L
| 21.814286 | 75 | 0.614276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.255069 |
0eb8ddc2c0219670903c4425de4ca4b63a33f316 | 10,124 | py | Python | recipe_engine/internal/commands/__init__.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | 1 | 2021-04-24T04:03:01.000Z | 2021-04-24T04:03:01.000Z | recipe_engine/internal/commands/__init__.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | null | null | null | recipe_engine/internal/commands/__init__.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""This package houses all subcommands for the recipe engine.
See implementation_details.md for the expectations of the modules in this
directory.
"""
import argparse
import errno
import logging
import os
import pkgutil
import sys
if sys.version_info >= (3, 5): # we're running python > 3.5
OS_WALK = os.walk
else:
# From vpython
from scandir import walk as OS_WALK
# pylint: disable=wrong-import-position
from .. import simple_cfg
from ..recipe_deps import RecipeDeps
from ..recipe_module_importer import RecipeModuleImporter
LOG = logging.getLogger(__name__)
# This incantation finds all loadable submodules of ourself. The
# `prefix=__name__` bit is so that these modules get loaded with the correct
# import names, i.e.
#
# recipe_engine.internal.commands.<submodule>
#
# If omitted, then these submodules can get double loaded as both:
#
# <submodule> AND
# recipe_engine.internal.commands.<submodule>
#
# Which can both interfere with the global python module namespace, and lead to
# strange errors when doing type assertions (since all data in these modules
# will be loaded under two different names; classes will fail isinstance checks
# even though they are "the same").
_COMMANDS = [
loader.find_module(module_name).load_module(module_name)
for (loader, module_name, _) in pkgutil.walk_packages(
__path__, prefix=__name__+'.')
if '.' not in module_name[len(__name__)+1:]
]
# Order all commands by an optional __cmd_priority__ field, and then by module
# name.
_COMMANDS.sort(
key=lambda mod: (
not hasattr(mod, '__cmd_priority__'), # modules defining priority first
getattr(mod, '__cmd_priority__', None), # actual priority
mod.__name__ # name
))
# Now actually set these commands on ourself so that 'mock' works correctly.
#
# This is needed to allow some tests (though it may be worth adjusting these
# tests later to not need this. Just delete this function and see which tests
# fail to find the dependencies on this behavior).
def _patch_our_attrs():
self = sys.modules[__name__]
self.__all__ = [mod.__name__[len(__name__)+1:] for mod in _COMMANDS]
for modname, mod in zip(self.__all__, _COMMANDS):
setattr(self, modname, mod)
_patch_our_attrs()
def _check_recipes_cfg_consistency(recipe_deps):
"""Checks all recipe.cfg files for the loaded recipe_deps and logs
inconsistent dependencies.
Args:
recipe_deps (RecipeDeps) - The loaded+fetched recipe deps
for the current run.
"""
actual = recipe_deps.main_repo.simple_cfg.deps
# For every repo we loaded
for repo_name in actual:
required_deps = recipe_deps.repos[repo_name].simple_cfg.deps
for req_repo_name, req_spec in required_deps.iteritems():
# If this depends on something we didn't load, log an error.
if req_repo_name not in actual:
LOG.error(
'%r depends on %r, but your recipes.cfg is missing an '
'entry for this.', repo_name, req_repo_name)
continue
actual_spec = actual[req_repo_name]
if req_spec.revision == actual_spec.revision:
# They match, it's all good.
continue
LOG.warn(
'recipes.cfg depends on %r @ %s, but %r depends on version %s.',
req_repo_name, actual_spec.revision, repo_name, req_spec.revision)
def _cleanup_pyc(recipe_deps):
"""Removes any .pyc files from the recipes/recipe_module directories.
Args:
* recipe_deps (RecipeDeps) - The loaded recipe dependencies.
"""
for repo in recipe_deps.repos.itervalues():
for to_walk in (repo.recipes_dir, repo.modules_dir):
for root, _dirs, files in OS_WALK(to_walk):
for fname in files:
if not fname.endswith('.pyc'):
continue
try:
to_clean = os.path.join(root, fname)
LOG.info('cleaning %r', to_clean)
os.unlink(to_clean)
except OSError as ex:
# If multiple things are cleaning pyc's at the same time this can
# race. Fortunately we only care that SOMETHING deleted the pyc :)
if ex.errno != errno.ENOENT:
raise
def _common_post_process(args):
# TODO(iannucci): We should always do logging.basicConfig() (probably with
# logging.WARNING), even if no verbose is passed. However we need to be
# careful as this could cause issues with spurious/unexpected output.
# Once the recipe engine is on native build.proto, this should be safe to
# do.
if args.verbose > 0:
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
if args.verbose > 1:
logging.getLogger().setLevel(logging.DEBUG)
else:
# Prevent spurious "No handlers could be found for ..." stderr messages.
# Once we always set a basicConfig (per TODO above), this can go away as
# well.
logging.root.manager.emittedNoHandlerWarning = True
if args.pid_file:
try:
with open(args.pid_file, 'w') as pid_file:
pid_file.write('%d\n' % os.getpid())
except Exception:
logging.exception("unable to write pidfile")
args.recipe_deps = RecipeDeps.create(
args.main_repo_path,
args.repo_override,
args.proto_override,
)
_check_recipes_cfg_consistency(args.recipe_deps)
# Allows:
# import RECIPE_MODULES.repo_name.module_name.submodule
sys.meta_path = [RecipeModuleImporter(args.recipe_deps)] + sys.meta_path
_cleanup_pyc(args.recipe_deps)
# Remove flags that subcommands shouldn't use; everything from this point on
# should ONLY use args.recipe_deps.
del args.main_repo_path
del args.verbose
del args.repo_override
def _add_common_args(parser):
class _RepoOverrideAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
tokens = values.split('=', 2)
if len(tokens) != 2:
raise ValueError('Override must have the form: repo=path')
repo_name, path = tokens
override_dict = getattr(namespace, self.dest)
if repo_name in override_dict:
raise ValueError('An override is already defined for [%s] (%s)' % (
repo_name, override_dict[repo_name]))
path = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(path):
raise ValueError('Override path [%s] is not a directory' % (path,))
override_dict[repo_name] = path
def _package_to_main_repo(value):
try:
value = os.path.abspath(value)
except Exception as ex: # pylint: disable=broad-except
parser.error(
'--package %r could not be converted to absolute path: %r' % (
value, ex,))
recipes_cfg_rel = simple_cfg.RECIPES_CFG_LOCATION_REL
if not value.endswith(recipes_cfg_rel):
parser.error('--package must end with %r.' % (recipes_cfg_rel,))
# We know the arg ends with 'infra/config/recipes.cfg', so chop those
# elements off the path to get the path to the recipe repo root.
for _ in simple_cfg.RECIPES_CFG_LOCATION_TOKS:
value = os.path.dirname(value)
return value
# TODO(iannucci): change --package to --repo-path and avoid having recipes.py
# pass the path to the recipes.cfg. This is preferable because the location of
# recipes.cfg MUST be discovered for recipe dependencies; the RepoSpec
# protobuf doesn't specify where the recipes.cfg is in the dependency repos
# (nor can it, even if it was dynamic; this would be a nightmare to maintain,
# and the autoroller would need to discover it automatically ANYWAY. If we
# allow it to be relocatable, the engine needs to be able to discover it, in
# which case the minimal information is still 'repo root').
parser.add_argument(
'--package',
dest='main_repo_path', type=_package_to_main_repo, required=True,
help='Path to recipes.cfg of the recipe repo to operate on.')
parser.add_argument(
'--verbose', '-v', action='count',
help='Increase logging verboisty')
parser.add_argument('-O', '--repo-override', metavar='ID=PATH',
action=_RepoOverrideAction, default={},
help='Override a repo repository path with a local one.')
parser.add_argument('--pid-file', metavar='PATH',
help=(
'Absolute path to a file where the engine should write its pid. '
'Path must be absolute and not exist.'))
def _proto_override_abspath(value):
try:
value = os.path.abspath(value)
except Exception as ex: # pylint: disable=broad-except
parser.error(
'--proto-override %r could not be converted to absolute path: %r' % (
value, ex,))
return value
# Override the location of the folder containing the `PB` module. This should
# only be used for recipe bundles, so we don't bother giving it a shortform
# option, and suppress the option's help to avoid confusing users.
parser.add_argument(
'--proto-override', type=_proto_override_abspath, help=argparse.SUPPRESS)
parser.set_defaults(
postprocess_func=lambda error, args: None,
)
def parse_and_run():
"""Parses the command line and runs the chosen subcommand.
Returns the command's return value (either int or None, suitable as input to
`os._exit`).
"""
parser = argparse.ArgumentParser(
description='Interact with the recipe system.')
_add_common_args(parser)
subp = parser.add_subparsers(dest='command')
for module in _COMMANDS:
description = module.__doc__
helplines = []
for line in description.splitlines():
line = line.strip()
if not line:
break
helplines.append(line)
module.add_arguments(subp.add_parser(
module.__name__.split('.')[-1], # use module's short name
formatter_class=argparse.RawDescriptionHelpFormatter,
help=' '.join(helplines),
description=description,
))
args = parser.parse_args()
_common_post_process(args)
args.postprocess_func(parser.error, args)
return args.func(args)
| 35.152778 | 80 | 0.697452 | 708 | 0.069933 | 0 | 0 | 0 | 0 | 0 | 0 | 4,743 | 0.468491 |
0eb8efd29824103fb230c6103a6e3a8b1b30a534 | 7,295 | py | Python | openfl/pipelines/stc_pipeline.py | sarthakpati/openfl | 8edebfd565d94f709a7d7f06d9ee38a7975c066e | [
"Apache-2.0"
] | null | null | null | openfl/pipelines/stc_pipeline.py | sarthakpati/openfl | 8edebfd565d94f709a7d7f06d9ee38a7975c066e | [
"Apache-2.0"
] | null | null | null | openfl/pipelines/stc_pipeline.py | sarthakpati/openfl | 8edebfd565d94f709a7d7f06d9ee38a7975c066e | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""STCPipelinemodule."""
import numpy as np
import gzip as gz
from .pipeline import TransformationPipeline, Transformer
class SparsityTransformer(Transformer):
"""A transformer class to sparsify input data."""
def __init__(self, p=0.01):
"""Initialize.
Args:
p (float): sparsity ratio (Default=0.01)
"""
self.lossy = True
self.p = p
def forward(self, data, **kwargs):
"""Sparsify data and pass over only non-sparsified elements by reducing the array size.
Args:
data: an numpy array from the model tensor_dict
Returns:
condensed_data: an numpy array being sparsified.
metadata: dictionary to store a list of meta information.
"""
metadata = {'int_list': list(data.shape)}
# sparsification
data = data.astype(np.float32)
flatten_data = data.flatten()
n_elements = flatten_data.shape[0]
k_op = int(np.ceil(n_elements * self.p))
topk, topk_indices = self._topk_func(flatten_data, k_op)
#
condensed_data = topk
sparse_data = np.zeros(flatten_data.shape)
sparse_data[topk_indices] = topk
nonzero_element_bool_indices = sparse_data != 0.0
metadata['bool_list'] = list(nonzero_element_bool_indices)
return condensed_data, metadata
# return sparse_data, metadata
def backward(self, data, metadata, **kwargs):
"""Recover data array with the right shape and numerical type.
Args:
data: an numpy array with non-zero values.
metadata: dictionary to contain information for recovering back to original data array.
Returns:
recovered_data: an numpy array with original shape.
"""
data = data.astype(np.float32)
data_shape = metadata['int_list']
nonzero_element_bool_indices = list(metadata['bool_list'])
recovered_data = np.zeros(data_shape).reshape(-1).astype(np.float32)
recovered_data[nonzero_element_bool_indices] = data
recovered_data = recovered_data.reshape(data_shape)
return recovered_data
@staticmethod
def _topk_func(x, k):
"""Select top k values.
Args:
x: an numpy array to be sorted out for top-k components.
k: k most maximum values.
Returns:
topk_mag: components with top-k values.
indices: indices of the top-k components.
"""
# quick sort as default on magnitude
idx = np.argsort(np.abs(x))
# sorted order, the right most is the largest magnitude
length = x.shape[0]
start_idx = length - k
# get the top k magnitude
topk_mag = np.asarray(x[idx[start_idx:]])
indices = np.asarray(idx[start_idx:])
if min(topk_mag) - 0 < 10e-8: # avoid zeros
topk_mag = topk_mag + 10e-8
return topk_mag, indices
class TernaryTransformer(Transformer):
"""A transformer class to ternerize input data."""
def __init__(self):
"""Initialize."""
self.lossy = True
def forward(self, data, **kwargs):
"""Ternerize data into positive mean value, negative mean value and zero value.
Args:
data: an flattened numpy array
Returns:
int_data: an numpy array being terneraized.
metadata: dictionary to store a list of meta information.
"""
# ternarization, data is sparse and flattened
mean_topk = np.mean(np.abs(data))
out_ = np.where(data > 0.0, mean_topk, 0.0)
out = np.where(data < 0.0, -mean_topk, out_)
int_array, int2float_map = self._float_to_int(out)
metadata = {'int_to_float': int2float_map}
return int_array, metadata
def backward(self, data, metadata, **kwargs):
"""Recover data array back to the original numerical type.
Args:
data: an numpy array with non-zero values.
Returns:
metadata: dictionary to contain information for recovering back to original data array.
data (return): an numpy array with original numerical type.
"""
# TODO
import copy
data = copy.deepcopy(data)
int2float_map = metadata['int_to_float']
for key in int2float_map:
indices = data == key
data[indices] = int2float_map[key]
return data
@staticmethod
def _float_to_int(np_array):
"""Create look-up table for conversion between floating and integer types.
Args:
np_array:
Returns:
int_array:
int_to_float_map:
"""
flatten_array = np_array.reshape(-1)
unique_value_array = np.unique(flatten_array)
int_array = np.zeros(flatten_array.shape, dtype=np.int)
int_to_float_map = {}
float_to_int_map = {}
# create table
for idx, u_value in enumerate(unique_value_array):
int_to_float_map.update({idx: u_value})
float_to_int_map.update({u_value: idx})
# assign to the integer array
indices = np.where(flatten_array == u_value)
int_array[indices] = idx
int_array = int_array.reshape(np_array.shape)
return int_array, int_to_float_map
class GZIPTransformer(Transformer):
"""A transformer class to losslessly compress data."""
def __init__(self):
"""Initialize."""
self.lossy = False
def forward(self, data, **kwargs):
"""Compress data into numpy of float32.
Args:
data: an numpy array with non-zero values
Returns:
compressed_bytes :
metadata: dictionary to contain information for recovering back to original data array
"""
bytes_ = data.astype(np.float32).tobytes()
compressed_bytes = gz.compress(bytes_)
metadata = {}
return compressed_bytes, metadata
def backward(self, data, metadata, **kwargs):
"""Decompress data into numpy of float32.
Args:
data: an numpy array with non-zero values
metadata: dictionary to contain information for recovering back to original data array
Returns:
data:
"""
decompressed_bytes_ = gz.decompress(data)
data = np.frombuffer(decompressed_bytes_, dtype=np.float32)
return data
class STCPipeline(TransformationPipeline):
"""A pipeline class to compress data lossly using sparsity and ternerization methods."""
def __init__(self, p_sparsity=0.01, n_clusters=6, **kwargs):
"""Initialize a pipeline of transformers.
Args:
p_sparsity (float): Sparsity factor (Default=0.01)
n_cluster (int): Number of K-Means clusters (Default=6)
Returns:
Data compression transformer pipeline object
"""
# instantiate each transformer
self.p = p_sparsity
transformers = [SparsityTransformer(self.p), TernaryTransformer(), GZIPTransformer()]
super(STCPipeline, self).__init__(transformers=transformers, **kwargs)
| 33.159091 | 99 | 0.622207 | 7,078 | 0.970254 | 0 | 0 | 1,681 | 0.230432 | 0 | 0 | 3,391 | 0.464839 |
0eb9c920aa1f94bcf5b75523167a5791a71d6de8 | 1,150 | py | Python | modle/__init__.py | Rex0519/NessusToReport | 047dd4a2f749addab3991b0ebc8ab609140c32a7 | [
"Apache-2.0"
] | 244 | 2020-06-27T12:07:52.000Z | 2022-03-30T02:36:27.000Z | modle/__init__.py | Rex0519/NessusToReport | 047dd4a2f749addab3991b0ebc8ab609140c32a7 | [
"Apache-2.0"
] | 23 | 2021-05-20T07:38:55.000Z | 2022-03-13T14:13:01.000Z | modle/__init__.py | Rex0519/NessusToReport | 047dd4a2f749addab3991b0ebc8ab609140c32a7 | [
"Apache-2.0"
] | 74 | 2020-06-27T12:07:53.000Z | 2022-03-11T19:07:45.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ------------------------------------------------------------
# File: __init__.py.py
# Created Date: 2020/6/24
# Created Time: 0:12
# Author: Hypdncy
# Author Mail: [email protected]
# Copyright (c) 2020 Hypdncy
# ------------------------------------------------------------
# .::::.
# .::::::::.
# :::::::::::
# ..:::::::::::'
# '::::::::::::'
# .::::::::::
# '::::::::::::::..
# ..::::::::::::.
# ``::::::::::::::::
# ::::``:::::::::' .:::.
# ::::' ':::::' .::::::::.
# .::::' :::: .:::::::'::::.
# .:::' ::::: .:::::::::' ':::::.
# .::' :::::.:::::::::' ':::::.
# .::' ::::::::::::::' ``::::.
# ...::: ::::::::::::' ``::.
# ````':. ':::::::::' ::::..
# '.:::::' ':'````..
# ------------------------------------------------------------ | 39.655172 | 62 | 0.117391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,122 | 0.975652 |
0ebc4327dea5e082563be3e589c1e4f6b395a97a | 7,146 | py | Python | tests/component/test_grid_mixin.py | csdms/pymt | 188222d7858cd3e8eb15564e56d9b7f0cb43cae5 | [
"MIT"
] | 38 | 2017-06-30T17:10:53.000Z | 2022-01-05T07:38:03.000Z | tests/component/test_grid_mixin.py | csdms/pymt | 188222d7858cd3e8eb15564e56d9b7f0cb43cae5 | [
"MIT"
] | 96 | 2017-04-04T18:52:41.000Z | 2021-11-01T21:30:48.000Z | tests/component/test_grid_mixin.py | csdms/pymt | 188222d7858cd3e8eb15564e56d9b7f0cb43cae5 | [
"MIT"
] | 15 | 2017-05-23T15:40:16.000Z | 2021-06-14T21:30:28.000Z | import numpy as np
import pytest
from pytest import approx
from pymt.component.grid import GridMixIn
class Port:
def __init__(self, name, uses=None, provides=None):
self._name = name
self._uses = uses or []
self._provides = provides or []
def get_component_name(self):
return self._name
def get_input_item_count(self):
return len(self._uses)
def get_input_item_list(self):
return self._uses
def get_output_item_count(self):
return len(self._provides)
def get_output_item_list(self):
return self._provides
def test_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.input_items == ["invar"]
assert c.output_items == ["outvar"]
def test_no_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test")
super().__init__()
c = Component()
assert c.input_items == []
assert c.output_items == []
def test_raster_1d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (3,)
def get_grid_spacing(self, grid_id):
return (2.0,)
def get_grid_origin(self, grid_id):
return (3.0,)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x("invar") == approx(np.array([3.0, 5.0, 7.0]))
def test_raster_2d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_spacing(self, grid_id):
return (2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-2d", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.name == "test-2d"
assert c.get_grid_type(0) == "RASTER"
assert c.get_x(0) == approx(np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]))
assert c.get_y(0) == approx(np.array([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]))
assert np.all(c.get_connectivity(0) == np.array([0, 1, 4, 3, 1, 2, 5, 4]))
assert np.all(c.get_offset(0) == np.array([4, 8]))
def test_raster_3d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 2, 3)
def get_grid_spacing(self, grid_id):
return (1.0, 2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-3d", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x(0) == approx(
np.array(
[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]
)
)
assert c.get_y(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]
)
)
assert c.get_z(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]
)
)
def test_rectilinear():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return (0.0, 3.0, 4)
def get_grid_y(self, grid_id):
return (2.0, 7.0)
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
assert c.get_x(0) == approx(np.array([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]))
assert c.get_y(0) == approx(np.array([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]))
def test_structured():
class StructuredPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])
class Component(GridMixIn):
def __init__(self):
self._port = StructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "STRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0]))
def test_unstructured():
class UnstructuredPort(Port):
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 0.0, 1.0, 1.0, 0.0])
def get_grid_connectivity(self, grid_id):
return np.array([0, 1, 3, 2, 4, 3, 1])
def get_grid_offset(self, grid_id):
return np.array([4, 7])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 0.0, 1.0, 1.0, 0.0]))
def test_get_grid_shape_is_none():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
return None
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_get_grid_shape_raises():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
raise NotImplementedError("get_grid_shape")
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_structured_1d():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
raise NotImplementedError("get_grid_y")
def get_grid_z(self, grid_id):
raise NotImplementedError("get_grid_z")
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
with pytest.raises(IndexError):
c.get_z(0)
| 27.805447 | 84 | 0.558354 | 4,583 | 0.641338 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.04436 |
0ebe32fa6550f0c6be308f3edf45681f0583afc5 | 730 | py | Python | scripts/compare.py | SnoozeTime/nes | 4d60562c59e175485eb3dff043c0c78473034cdb | [
"Unlicense"
] | 1 | 2022-01-07T02:00:36.000Z | 2022-01-07T02:00:36.000Z | scripts/compare.py | SnoozeTime/nes | 4d60562c59e175485eb3dff043c0c78473034cdb | [
"Unlicense"
] | 6 | 2020-12-12T03:21:55.000Z | 2022-02-18T11:22:28.000Z | scripts/compare.py | SnoozeTime/nes | 4d60562c59e175485eb3dff043c0c78473034cdb | [
"Unlicense"
] | 1 | 2018-12-02T20:42:10.000Z | 2018-12-02T20:42:10.000Z | import sys
def load_log_sp(filename):
data = []
with open(filename) as f:
for line in f.readlines():
tokens = line.split(" ")
spidx = line.find("SP:")
endidx = line.find(' ', spidx)
data.append((line[0:4], line[spidx+3:endidx]))
return data
if __name__ == "__main__":
mylog = sys.argv[1]
correctlog = sys.argv[2]
mylog_sp = load_log_sp(mylog)
correctlog_sp = load_log_sp(correctlog)
for (i, ((nb1, sp1), (nb2, sp2))) in enumerate(zip(mylog_sp, correctlog_sp)):
print('{} {} - {} vs {}'.format(
nb1, nb2, sp1, sp2))
if sp1.lower() != sp2.lower() or int(nb1.lower(),16) != int(nb2.lower(), 16):
break
| 30.416667 | 85 | 0.545205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.053425 |
0ebf6e6f4a1667f2d0b5238c117fa44dfca6f7c4 | 10,203 | py | Python | tercer_modelo.py | nahuelalmeira/deepLearning | f1fcd06f5735c8be9272b0c8392b1ae467c08582 | [
"MIT"
] | null | null | null | tercer_modelo.py | nahuelalmeira/deepLearning | f1fcd06f5735c8be9272b0c8392b1ae467c08582 | [
"MIT"
] | null | null | null | tercer_modelo.py | nahuelalmeira/deepLearning | f1fcd06f5735c8be9272b0c8392b1ae467c08582 | [
"MIT"
] | null | null | null | """Exercise 1
Usage:
$ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --dropout 0.1 0.1 --hidden_layer_sizes 200 100
To know which GPU to use, you can check it with the command
$ nvidia-smi
"""
import argparse
import os
import mlflow
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, models
import warnings
warnings.filterwarnings("ignore")
from auxiliary import process_features, load_dataset, build_columns, log_dir_name
TARGET_COL = 'AdoptionSpeed'
def read_args():
parser = argparse.ArgumentParser(
description='Training a MLP on the petfinder dataset')
# Here you have some examples of classifier parameters. You can add
# more arguments or change these if you need to.
parser.add_argument('--experiment_name', type=str, default='Base model',
help='Name of the experiment, used in mlflow.')
parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str,
help='Directory with the training and test files.')
parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int,
help='Number of hidden units of each hidden layer.')
parser.add_argument('--epochs', default=50, type=int,
help='Number of epochs to train.')
parser.add_argument('--dropout', nargs='+', default=[0.5], type=float,
help='Dropout ratio for every layer.')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of instances in each batch.')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate.')
args = parser.parse_args()
assert len(args.hidden_layer_sizes) == len(args.dropout)
return args
def print_args(args):
print('-------------------------------------------')
print('PARAMS ------------------------------------')
print('-------------------------------------------')
print('--experiment_name ', args.experiment_name)
print('--dataset_dir ', args.dataset_dir)
print('--epochs ', args.epochs)
print('--hidden_layer_sizes', args.hidden_layer_sizes)
print('--dropout ', args.dropout)
print('--batch_size ', args.batch_size)
print('--learning_rate ', args.learning_rate)
print('-------------------------------------------')
def main():
args = read_args()
print_args(args)
experiment_name = args.experiment_name
batch_size = args.batch_size
learning_rate = args.learning_rate
hidden_layer_sizes = args.hidden_layer_sizes
dropout = args.dropout
epochs = args.epochs
### Output directory
dir_name = log_dir_name(args)
print()
print(dir_name)
print()
output_dir = os.path.join('experiments', experiment_name, dir_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir)
nlabels = dataset[TARGET_COL].unique().shape[0]
columns = [
'Gender', 'Color1', 'Vaccinated', 'Dewormed',
'Breed1',
'Age', 'Fee', 'Quantity']
one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns)
# TODO (optional) put these three types of columns in the same dictionary with "column types"
X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns)
direct_features_input_shape = (X_train['direct_features'].shape[1],)
X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns)
###########################################################################################################
### TODO: Shuffle train dataset - Done
###########################################################################################################
shuffle_len = X_train['direct_features'].shape[0]
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_len).batch(batch_size)
###########################################################################################################
dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices(process_features(
test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size)
###########################################################################################################
### TODO: Build the Keras model - Done
###########################################################################################################
tf.keras.backend.clear_session()
# Add one input and one embedding for each embedded column
embedding_layers = []
inputs = []
for embedded_col, max_value in embedded_columns.items():
input_layer = layers.Input(shape=(1,), name=embedded_col)
inputs.append(input_layer)
# Define the embedding layer
embedding_size = int(max_value / 4)
embedding_layers.append(
tf.squeeze(layers.Embedding(input_dim=max_value, output_dim=embedding_size)(input_layer), axis=-2))
print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col))
# Add the direct features already calculated
direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features')
inputs.append(direct_features_input)
# Concatenate everything together
features = layers.concatenate(embedding_layers + [direct_features_input])
denses = []
dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features)
denses.append(dense1)
if len(hidden_layer_sizes) > 1:
for hidden_layer_size in hidden_layer_sizes[1:]:
dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1])
denses.append(dense)
output_layer = layers.Dense(nlabels, activation='softmax')(dense1)
model = models.Model(inputs=inputs, outputs=output_layer)
###########################################################################################################
###########################################################################################################
### TODO: Fit the model - Done
###########################################################################################################
mlflow.set_experiment(experiment_name)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
logdir = "logs/scalars/" + dir_name
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
with mlflow.start_run(nested=True):
# Log model hiperparameters first
mlflow.log_param('hidden_layer_size', hidden_layer_sizes)
mlflow.log_param('dropout', dropout)
mlflow.log_param('embedded_columns', embedded_columns)
mlflow.log_param('one_hot_columns', one_hot_columns)
mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet
mlflow.log_param('epochs', epochs)
mlflow.log_param('batch_size', batch_size)
mlflow.log_param('learning_rate', learning_rate)
# Train
history = model.fit(train_ds, epochs=epochs,
validation_data=dev_ds,
callbacks=[tensorboard_callback])
#######################################################################################################
### TODO: analyze history to see if model converges/overfits
#######################################################################################################
output_csv = os.path.join(output_dir, 'history.pickle')
with open(output_csv, 'bw') as f:
pickle.dump(history.history, f)
#######################################################################################################
#######################################################################################################
### TODO: Evaluate the model, calculating the metrics. - Done
#######################################################################################################
loss, accuracy = model.evaluate(dev_ds)
print("*** Dev loss: {} - accuracy: {}".format(loss, accuracy))
mlflow.log_metric('loss', loss)
mlflow.log_metric('accuracy', accuracy)
predictions = model.predict(test_ds)
#######################################################################################################
#######################################################################################################
### TODO: Convert predictions to classes - Done
#######################################################################################################
prediction_classes = np.argmax(predictions, axis=1)
#######################################################################################################
#######################################################################################################
### TODO: Save the results for submission - Done
#######################################################################################################
output_csv = os.path.join(output_dir, 'submit.csv')
submissions = pd.DataFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID)
submissions.to_csv(output_csv)
#######################################################################################################
###########################################################################################################
print('All operations completed')
if __name__ == '__main__':
main()
| 45.346667 | 138 | 0.51181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,479 | 0.438989 |
0ebfda6d11cf85e7a67d60d7c46e294592497198 | 7,576 | py | Python | catpy/applications/export.py | catmaid/catpy | 481d87591a6dfaedef2767dcddcbed7185ecc8b8 | [
"MIT"
] | 5 | 2018-04-24T15:45:31.000Z | 2021-06-18T17:38:07.000Z | catpy/applications/export.py | catmaid/catpy | 481d87591a6dfaedef2767dcddcbed7185ecc8b8 | [
"MIT"
] | 35 | 2017-05-12T21:49:54.000Z | 2022-03-12T00:47:09.000Z | catpy/applications/export.py | catmaid/catpy | 481d87591a6dfaedef2767dcddcbed7185ecc8b8 | [
"MIT"
] | 4 | 2017-08-24T12:15:41.000Z | 2019-10-13T01:05:34.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from pkg_resources import parse_version
from warnings import warn
from copy import deepcopy
import networkx as nx
from networkx.readwrite import json_graph
from catpy.applications.base import CatmaidClientApplication
NX_VERSION_INFO = parse_version(nx.__version__)._key[1]
err_msg = (
"Tried to treat the edge's source/target fields as indices into the list of nodes, but failed. "
"See issue #26 [1]. "
"Has CATMAID upgraded to networkx 2.x? [2]\n\n"
"[1]: https://github.com/catmaid/catpy/issues/26\n"
"[2]: https://github.com/catmaid/CATMAID/blob/master/django/requirements.txt"
)
def convert_nodelink_data(jso):
"""NetworkX serialises graphs differently in v1.x and v2.x.
This converts v1-style data (as emitted by CATMAID) to v2-style data.
See issue #26 https://github.com/catmaid/catpy/issues/26
Parameters
----------
jso : dict
Returns
-------
dict
"""
if NX_VERSION_INFO < (2, 0):
warn(
"You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON,"
" but you are using networkx v1"
)
out = deepcopy(jso)
for edge in out["links"]:
for label in ["source", "target"]:
try:
edge[label] = out["nodes"][edge[label]]["id"]
except (KeyError, IndexError):
raise RuntimeError(err_msg)
return out
class ExportWidget(CatmaidClientApplication):
def get_swc(self, skeleton_id, linearize_ids=False):
"""
Get a single skeleton in SWC format.
Parameters
----------
skeleton_id : int or str
linearize_ids : bool
Returns
-------
str
"""
return self.get(
(self.project_id, "skeleton", skeleton_id, "swc"),
{"linearize_ids": "true" if linearize_ids else "false"},
)
def get_connector_archive(self, *args, **kwargs):
"""Not implemented: requires an async job"""
raise NotImplementedError("Requires an async job")
def get_treenode_archive(self, *args, **kwargs):
"""Not implemented: requires an async job"""
raise NotImplementedError("Requires an async job")
def get_networkx_dict(self, *skeleton_ids):
"""
Get the data for a networkx graph of the given skeletons in node-link format.
In networkx 1.x, as used by CATMAID and therefore returned by this method,
"source" and "target" in the dicts in "links" refer to nodes by their indices in the "nodes" array.
See ``convert_nodelink_data`` function to convert into networkx 2.x-compatible format.
https://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.readwrite.json_graph.node_link_data.html
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
dict
"""
return self.post(
(self.project_id, "graphexport", "json"),
data={"skeleton_list": list(skeleton_ids)},
)
def get_networkx(self, *skeleton_ids):
"""
Get a networkx MultiDiGraph of the given skeletons.
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
networkx.MultiDiGraph
"""
data = self.get_networkx_dict(*skeleton_ids)
if NX_VERSION_INFO >= (2, 0):
data = convert_nodelink_data(data)
return json_graph.node_link_graph(data, directed=True)
def get_neuroml(self, skeleton_ids, skeleton_inputs=tuple()):
"""
Get NeuroML v1.8.1 (level 3, NetworkML) for the given skeletons, possibly with their input synapses
constrained to another set of skeletons.
N.B. If len(skeleton_ids) > 1, skeleton_inputs will be ignored and only synapses within the first skeleton
set will be used in the model.
Parameters
----------
skeleton_ids : array-like
Skeletons whose NeuroML to return
skeleton_inputs : array-like, optional
If specified, only input synapses from these skeletons will be added to the NeuroML
Returns
-------
str
NeuroML output string
"""
data = {"skids": list(skeleton_ids)}
if skeleton_inputs:
if len(skeleton_ids) > 1:
warn(
"More than one skeleton ID was selected: ignoring skeleton input constraints"
)
else:
data["inputs"] = list(skeleton_inputs)
return self.post((self.project_id, "neuroml", "neuroml_level3_v181"), data=data)
def get_treenode_and_connector_geometry(self, *skeleton_ids):
"""
Get the treenode and connector information for the given skeletons. The returned dictionary will be of the form
{
"skeletons": {
skeleton_id1: {
"treenodes": {
treenode_id1: {
"location": [x, y, z],
"parent_id": id_of_parent_treenode
},
treenode_id2: ...
},
"connectors": {
connector_id1: {
"location": [x, y, z],
"presynaptic_to": [list, of, treenode, ids],
"postsynaptic_to": [list, of, treenode, ids]
},
connector_id2: ...
}
},
skeleton_id2: ...
}
}
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
dict
"""
# todo: factor API call into MorphologyFetcher
skeletons = dict()
warnings = set()
relation_names = {0: "presnaptic_to", 1: "postsynaptic_to"}
for skeleton_id in skeleton_ids:
data = self.get(
"{}/{}/1/0/compact-skeleton".format(self.project_id, skeleton_id)
)
skeleton = {"treenodes": dict(), "connectors": dict()}
for treenode in data[0]:
skeleton["treenodes"][int(treenode[0])] = {
"location": treenode[3:6],
"parent_id": None if treenode[1] is None else int(treenode[1]),
}
for connector in data[1]:
# NOT the database relation ID
# {pre: 0, post: 1, gj: 2}
relation_number = connector[2]
if relation_number not in relation_names:
continue
conn_id = int(connector[1])
if conn_id not in skeleton["connectors"]:
skeleton["connectors"][conn_id] = {
rn: [] for rn in relation_names.values()
}
skeleton["connectors"][conn_id]["location"] = connector[3:6]
skeleton["connectors"][conn_id][relation_names[relation_number]].append(
connector[0]
)
skeletons[int(skeleton_id)] = skeleton
warn(
"Skeleton representations contained some unknown treenode->connector relation IDs:\n\t"
"\n\t".join(sorted(warnings))
)
return {"skeletons": skeletons}
| 31.566667 | 126 | 0.551082 | 6,100 | 0.805174 | 0 | 0 | 0 | 0 | 0 | 0 | 4,277 | 0.564546 |
0ec1afd2facbda8f3febe8ca1dc7c71fb6558f04 | 1,993 | py | Python | packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/external_writer_service.py | Indexical-Metrics-Measure-Advisory/watchmen | c54ec54d9f91034a38e51fd339ba66453d2c7a6d | [
"MIT"
] | null | null | null | from typing import Optional
from watchmen_auth import PrincipalService
from watchmen_data_kernel.cache import CacheService
from watchmen_data_kernel.common import DataKernelException
from watchmen_data_kernel.external_writer import find_external_writer_create, register_external_writer_creator
from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator
from watchmen_meta.system import ExternalWriterService as ExternalWriterStorageService
from watchmen_model.common import ExternalWriterId
from watchmen_model.system import ExternalWriter
def register_external_writer(external_writer: ExternalWriter) -> None:
create = find_external_writer_create(external_writer.type)
if create is None:
raise DataKernelException(f'Creator not found for external writer[{external_writer.dict()}].')
register_external_writer_creator(external_writer.writerCode, create())
class ExternalWriterService:
def __init__(self, principal_service: PrincipalService):
self.principalService = principal_service
def find_by_id(self, writer_id: ExternalWriterId) -> Optional[ExternalWriter]:
external_writer = CacheService.external_writer().get(writer_id)
if external_writer is not None:
if external_writer.tenantId != self.principalService.get_tenant_id():
raise DataKernelException(
f'External writer[id={writer_id}] not belongs to '
f'current tenant[id={self.principalService.get_tenant_id()}].')
register_external_writer(external_writer)
return external_writer
storage_service = ExternalWriterStorageService(
ask_meta_storage(), ask_snowflake_generator(), self.principalService)
storage_service.begin_transaction()
try:
# noinspection PyTypeChecker
external_writer: ExternalWriter = storage_service.find_by_id(writer_id)
if external_writer is None:
return None
CacheService.external_writer().put(external_writer)
register_external_writer(external_writer)
return external_writer
finally:
storage_service.close_transaction()
| 41.520833 | 110 | 0.831912 | 1,111 | 0.557451 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.103864 |
0ec2342f96bb22e61801a222fde8647beb3203c5 | 304 | py | Python | udemy-python/mediaponderada.py | AlbertoAlfredo/exercicios-cursos | 792096ad1f853188adec8fc3e5c629742c8dd7ab | [
"MIT"
] | 1 | 2017-08-27T00:57:20.000Z | 2017-08-27T00:57:20.000Z | udemy-python/mediaponderada.py | AlbertoAlfredo/exercicios-cursos | 792096ad1f853188adec8fc3e5c629742c8dd7ab | [
"MIT"
] | 2 | 2020-09-09T04:22:06.000Z | 2020-12-24T16:25:36.000Z | udemy-python/mediaponderada.py | AlbertoAlfredo/exercicios-cursos | 792096ad1f853188adec8fc3e5c629742c8dd7ab | [
"MIT"
] | null | null | null | nota1 = float(input('Digite a nota da primeira nota '))
peso1 = float(input('Digite o peso da primeira nota '))
nota2 = float(input('Digite a nota da seugundo nota '))
peso2 = float(input('Digite o peso da segundo nota '))
media = (nota1/peso1+nota2/peso2)/2
print('A média das duas notas é:', media)
| 30.4 | 55 | 0.703947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.522876 |
0ec2983c9be55e068e1ac3a8da9a2e78b097ece9 | 882 | py | Python | scrywarden/module.py | chasebrewsky/scrywarden | c6a5a81d14016ca58625df68594ef52dd328a0dd | [
"MIT"
] | 1 | 2020-12-13T00:49:51.000Z | 2020-12-13T00:49:51.000Z | scrywarden/module.py | chasebrewsky/scrywarden | c6a5a81d14016ca58625df68594ef52dd328a0dd | [
"MIT"
] | null | null | null | scrywarden/module.py | chasebrewsky/scrywarden | c6a5a81d14016ca58625df68594ef52dd328a0dd | [
"MIT"
] | null | null | null | from importlib import import_module
from typing import Any
def import_string(path: str) -> Any:
"""Imports a dotted path name and returns the class/attribute.
Parameters
----------
path: str
Dotted module path to retrieve.
Returns
-------
Class/attribute at the given import path.
Raises
------
ImportError
If the path does not exist.
"""
try:
module_path, class_name = path.rsplit('.', 1)
except ValueError as error:
raise ImportError(
f"{path} does not look like a module path",
) from error
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as error:
raise ImportError(
f"Module '{module_path}' does not define a '{class_name}' "
"attribute/class",
) from error
| 24.5 | 71 | 0.603175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.478458 |
0ec3610585ba69b4e61119ece94d8d89e44e43cc | 27,448 | py | Python | examples/oldexamples/sample_program.py | learningequality/klorimin | c569cd4048ac670bc55a83f4fdda0b818c7f626e | [
"MIT"
] | null | null | null | examples/oldexamples/sample_program.py | learningequality/klorimin | c569cd4048ac670bc55a83f4fdda0b818c7f626e | [
"MIT"
] | null | null | null | examples/oldexamples/sample_program.py | learningequality/klorimin | c569cd4048ac670bc55a83f4fdda0b818c7f626e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import json
import os
import re
from enum import Enum
from os.path import join
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import licenses
from ricecooker.chefs import SushiChef
from ricecooker.classes import files
from ricecooker.classes import nodes
from ricecooker.classes import questions
from ricecooker.classes.licenses import get_license
from ricecooker.exceptions import InvalidFormatException
from ricecooker.exceptions import raise_for_invalid_channel
from ricecooker.exceptions import UnknownContentKindError
from ricecooker.exceptions import UnknownFileTypeError
from ricecooker.exceptions import UnknownQuestionTypeError
# CHANNEL SETTINGS
SOURCE_DOMAIN = "<yourdomain.org>" # content provider's domain
SOURCE_ID = "<yourid>" # an alphanumeric channel ID
CHANNEL_TITLE = "Testing Ricecooker Channel" # a humand-readbale title
CHANNEL_LANGUAGE = "en" # language code of channel
# LOCAL DIRS
EXAMPLES_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(EXAMPLES_DIR, "data")
CONTENT_DIR = os.path.join(EXAMPLES_DIR, "content")
#
# A utility function to manage absolute paths that allows us to refer to files
# in the CONTENT_DIR (subdirectory `content/' in current directory) using content://
def get_abspath(path, content_dir=CONTENT_DIR):
"""
Replaces `content://` with absolute path of `content_dir`.
By default looks for content in subdirectory `content` in current directory.
"""
if path:
file = re.search("content://(.+)", path)
if file:
return os.path.join(content_dir, file.group(1))
return path
class FileTypes(Enum):
"""Enum containing all file types Ricecooker can have
Steps:
AUDIO_FILE: mp3 files
THUMBNAIL: png, jpg, or jpeg files
DOCUMENT_FILE: pdf files
"""
AUDIO_FILE = 0
THUMBNAIL = 1
DOCUMENT_FILE = 2
VIDEO_FILE = 3
YOUTUBE_VIDEO_FILE = 4
VECTORIZED_VIDEO_FILE = 5
VIDEO_THUMBNAIL = 6
YOUTUBE_VIDEO_THUMBNAIL_FILE = 7
HTML_ZIP_FILE = 8
SUBTITLE_FILE = 9
TILED_THUMBNAIL_FILE = 10
UNIVERSAL_SUBS_SUBTITLE_FILE = 11
BASE64_FILE = 12
WEB_VIDEO_FILE = 13
H5P_FILE = 14
FILE_TYPE_MAPPING = {
content_kinds.AUDIO: {
file_formats.MP3: FileTypes.AUDIO_FILE,
file_formats.PNG: FileTypes.THUMBNAIL,
file_formats.JPG: FileTypes.THUMBNAIL,
file_formats.JPEG: FileTypes.THUMBNAIL,
},
content_kinds.DOCUMENT: {
file_formats.PDF: FileTypes.DOCUMENT_FILE,
file_formats.PNG: FileTypes.THUMBNAIL,
file_formats.JPG: FileTypes.THUMBNAIL,
file_formats.JPEG: FileTypes.THUMBNAIL,
},
content_kinds.HTML5: {
file_formats.HTML5: FileTypes.HTML_ZIP_FILE,
file_formats.PNG: FileTypes.THUMBNAIL,
file_formats.JPG: FileTypes.THUMBNAIL,
file_formats.JPEG: FileTypes.THUMBNAIL,
},
content_kinds.H5P: {
file_formats.H5P: FileTypes.H5P_FILE,
file_formats.PNG: FileTypes.THUMBNAIL,
file_formats.JPG: FileTypes.THUMBNAIL,
file_formats.JPEG: FileTypes.THUMBNAIL,
},
content_kinds.VIDEO: {
file_formats.MP4: FileTypes.VIDEO_FILE,
file_formats.VTT: FileTypes.SUBTITLE_FILE,
file_formats.PNG: FileTypes.THUMBNAIL,
file_formats.JPG: FileTypes.THUMBNAIL,
file_formats.JPEG: FileTypes.THUMBNAIL,
},
content_kinds.EXERCISE: {
file_formats.PNG: FileTypes.THUMBNAIL,
file_formats.JPG: FileTypes.THUMBNAIL,
file_formats.JPEG: FileTypes.THUMBNAIL,
},
}
def guess_file_type(kind, filepath=None, youtube_id=None, web_url=None, encoding=None):
"""guess_file_class: determines what file the content is
Args:
filepath (str): filepath of file to check
Returns: string indicating file's class
"""
if youtube_id:
return FileTypes.YOUTUBE_VIDEO_FILE
elif web_url:
return FileTypes.WEB_VIDEO_FILE
elif encoding:
return FileTypes.BASE64_FILE
else:
ext = os.path.splitext(filepath)[1][1:].lower()
if kind in FILE_TYPE_MAPPING and ext in FILE_TYPE_MAPPING[kind]:
return FILE_TYPE_MAPPING[kind][ext]
return None
def guess_content_kind(path=None, web_video_data=None, questions=None):
"""guess_content_kind: determines what kind the content is
Args:
files (str or list): files associated with content
Returns: string indicating node's kind
"""
# If there are any questions, return exercise
if questions and len(questions) > 0:
return content_kinds.EXERCISE
# See if any files match a content kind
if path:
ext = os.path.splitext(path)[1][1:].lower()
if ext in content_kinds.MAPPING:
return content_kinds.MAPPING[ext]
raise InvalidFormatException(
"Invalid file type: Allowed formats are {0}".format(
[key for key, value in content_kinds.MAPPING.items()]
)
)
elif web_video_data:
return content_kinds.VIDEO
else:
return content_kinds.TOPIC
# LOAD sample_tree.json (as dict)
with open(join(DATA_DIR, "sample_tree.json"), "r") as json_file:
SAMPLE_TREE = json.load(json_file)
# LOAD JSON DATA (as string) FOR PERSEUS QUESTIONS
SAMPLE_PERSEUS_1_JSON = open(join(DATA_DIR, "sample_perseus01.json"), "r").read()
# SAMPLE_PERSEUS_2_JSON = open(join(DATA_DIR,'sample_perseus02.json'),'r').read()
# ADD EXERCISES
EXERCISES_NODES = [
{
"title": "Rice Cookers",
"id": "d98752",
"description": "Start cooking rice today!",
"children": [
{
"title": "Rice Chef",
"id": "6cafe2",
"author": "Revision 3",
"description": "Become a master rice cooker",
"file": "https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4",
"license": licenses.CC_BY_NC_SA,
"copyright_holder": "Learning Equality",
"files": [
{
"path": "https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4"
},
{
"encoding": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAmFQTFRF////wN/2I0FiNFFuAAAAxdvsN1RxV3KMnrPFFi9PAB1CVG+KXHaQI0NjttLrEjVchIF4AyNGZXB5V087UUw/EzBMpqWeb2thbmpgpqOceXVsERgfTWeADg8QCAEApKGZBAYIop+XCQkIhZ+2T2mEg5mtnK/AobPDkKO2YXqTAAAAJkBetMraZH2VprjIz9zm4enw7/T47fP3wc7ae5GnAAAAN1BsSmSApLfI1ODq2OHp5Orv8PL09vb38fb5wM/bbISbrL/PfZSpxNPgzdnj2+Pr5evw6+/z6e3w3ePp2OPsma2/ABM5Q197ABk4jKG1yNfjytfh1uDo3eXs4unv1t/nztrjqbzMTmmEXneRES1Ji6CzxtXixdPfztrk1N/n1+Dp1d/oz9vlxdPeq73NVG+KYnyUAAAddIuhwtPhvMzaxtTgytfiy9jjwtHewtHenbDCHT1fS2eCRV52qr7PvM3cucrYv87cv8/cvMzavc3bucvacoyl////ByE8WnKKscXWv9Hguszbu8zbvc7dtcnaiJqrcHZ4f4SHEh0nEitFTWZ+hJqumrDDm7HDj6W5dI2lYGJfmZeQl5SNAAAADRciAAATHjdSOVNsPlhyLklmKCYjW1lUlpOLlZKLFSAqWXSOBQAADA0NAAAAHh0bWlhSk5CIk5CIBAYJDRQbERcdDBAUBgkMAAAEDg4NAAAAHBsZWFZQkY6GAAAAAAAABQUEHBsZAAAAGxoYVlROko+GBAQDZ2RdAAAAGhkYcW9oAgICAAAAExMSDQwLjouDjYuDioiAiIV9hoN7VlRO////Z2DcYwAAAMR0Uk5TAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACRKrJyrZlBQECaNXCsKaqypMGAUDcu7Gpn5mf03gDo8+4saiipKq3xRMBH83Eu7OsqbG61DkDMdbFvrizsbK3wNs9Ax/VysS/vLq/zNwfArDhxMfExMXE3pMCMe7byMjIzd33ZgYGQtnz6+zooeJXBQMFD1yHejZ1+l8FBgEELlOR+GgFCQ0SGxoBGFKg+m0BBwEMR6v+hAEDM6nRASWURVuYQQ4AAAABYktHRACIBR1IAAAACXBIWXMAAAjLAAAIywGEuOmJAAABCklEQVQY02NgUGZUUVVT19DUYtBmYmZhYdBh1dXTNzA0MjYxZTFjAwqwm1tYWlnb2NrZO3A4cgIFGJycXVzd3D08vbx9uHyBAn7+AYFBwSEhoWHhEdyRQIGo6JjYuPiExKTklFSeNKBAekZmVnZObk5efkEhbxFQgK+4pLSsvKKyqrqGoZZfgIVBsK6+obGpuaW1rV2oQ1hEgKFTtKu7p7evf8LEI5PEJotLMEyZyjJt+oyZsxhmzzk6V3KeFIO01vwFMrJyCxctXrL02DL55QwsClorVq5avWbtuvUbNh7fpMjAwsKyWWvLFJatStu279h5YhdIAAJ2s+zZu+/kfoQAy4HNLAcPHQYA5YtSi+k2/WkAAAAldEVYdGRhdGU6Y3JlYXRlADIwMTMtMTAtMDRUMTk6Mzk6MjEtMDQ6MDAwU1uYAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDEzLTEwLTA0VDE5OjM5OjIxLTA0OjAwQQ7jJAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAAASUVORK5CYII="
},
],
},
{
"title": "Rice Exercise",
"id": "6cafe3",
"description": "Test how well you know your rice",
"license": licenses.CC_BY_NC_SA,
"copyright_holder": "Learning Equality",
"mastery_model": exercises.DO_ALL,
"files": [
{
"path": "http://www.publicdomainpictures.net/pictures/110000/nahled/bowl-of-rice.jpg"
}
],
"questions": [
{
"id": "eeeee",
"question": "Which rice is your favorite? \\_\\_\\_ ",
"type": exercises.MULTIPLE_SELECTION,
"correct_answers": [
"White rice",
"Brown rice",
"Sushi rice <p>abc</p>",
],
"all_answers": ["White rice", "Quinoa", "Brown rice", "<"],
},
{
"id": "bbbbb",
"question": "Which rice is the crunchiest?",
"type": exercises.SINGLE_SELECTION,
"correct_answer": "Rice Krispies \n",
"all_answers": [
"White rice",
"Brown rice \n",
"Rice Krispies \n",
],
"hints": "It's delicious",
},
{
"id": "aaaaa",
"question": "How many minutes does it take to cook rice? <img src='https://upload.wikimedia.org/wikipedia/commons/5/5e/Jeera-rice.JPG'>",
"type": exercises.INPUT_QUESTION,
"answers": ["20", "25", "15"],
"hints": [
"Takes roughly same amount of time to install kolibri on Windows machine",
"Does this help?\n",
],
},
{
"id": "ddddd",
"type": exercises.PERSEUS_QUESTION,
"item_data": SAMPLE_PERSEUS_1_JSON,
},
],
},
{
"title": "Rice Exercise 2",
"id": "6cafe4",
"description": "Test how well you know your rice",
"license": licenses.CC_BY_NC_SA,
"copyright_holder": "Learning Equality",
"mastery_model": exercises.M_OF_N,
"files": [
{
"path": "https://c1.staticflickr.com/5/4021/4302326650_b11f0f0aaf_b.jpg"
}
],
"questions": [
{
"id": "11111",
"question": "<h3 id=\"rainbow\" style=\"font-weight:bold\">RICE COOKING!!!</h3><script type='text/javascript'><!-- setInterval(function() {$('#rainbow').css('color', '#'+((1<<24)*Math.random()|0).toString(16));}, 300); --></script>",
"type": exercises.SINGLE_SELECTION,
"all_answers": ["Answer"],
"correct_answer": "Answer",
},
{
"id": "121212",
"question": "<math> <mrow> <msup><mi> a </mi><mn>2</mn></msup> <mo> + </mo> <msup><mi> b </mi><mn>2</mn></msup> <mo> = </mo> <msup><mi> c </mi><mn>2</mn></msup> </mrow> </math>",
"type": exercises.SINGLE_SELECTION,
"all_answers": ["Answer"],
"correct_answer": "Answer",
},
],
},
{
"title": "HTML Sample",
"id": "abcdef",
"description": "An example of how html can be imported from the ricecooker",
"license": licenses.PUBLIC_DOMAIN,
"files": [{"path": "content://htmltest.zip"}],
},
{
"title": "Rice Exercise 3",
"id": "6cafe5",
"description": "Test how well you know your rice",
"license": licenses.CC_BY_NC_SA,
"copyright_holder": "Learning Equality",
"mastery_model": exercises.M_OF_N,
"files": [
{
"path": "https://upload.wikimedia.org/wikipedia/commons/b/b7/Rice_p1160004.jpg"
}
],
"questions": [
{
"id": "123456",
"question": "Solve: $$(111^{x+1}\\times111^\\frac14)\\div111^\\frac12=111^3$$",
"type": exercises.SINGLE_SELECTION,
"all_answers": ["Yes", "No", "Rice!"],
"correct_answer": "Rice!",
}
],
},
],
}
]
SAMPLE_TREE.extend(EXERCISES_NODES)
class SampleChef(SushiChef):
"""
The chef class that takes care of uploading channel to the content curation server.
We'll call its `main()` method from the command line script.
"""
channel_info = { #
"CHANNEL_SOURCE_DOMAIN": SOURCE_DOMAIN, # who is providing the content (e.g. learningequality.org)
"CHANNEL_SOURCE_ID": SOURCE_ID, # channel's unique id
"CHANNEL_TITLE": CHANNEL_TITLE,
"CHANNEL_LANGUAGE": CHANNEL_LANGUAGE,
# (optional) local path or url to image file
"CHANNEL_THUMBNAIL": "https://upload.wikimedia.org/wikipedia/commons/thumb/5/50/Banaue_Philippines_Banaue-Rice-Terraces-01.jpg/640px-Banaue_Philippines_Banaue-Rice-Terraces-01.jpg",
"CHANNEL_DESCRIPTION": "A sample sushi chef to demo content types.", # (optional) description of the channel (optional)
}
def construct_channel(self, *args, **kwargs):
"""
Create ChannelNode and build topic tree.
"""
channel = self.get_channel(
*args, **kwargs
) # creates ChannelNode from data in self.channel_info
_build_tree(channel, SAMPLE_TREE)
raise_for_invalid_channel(channel)
return channel
def _build_tree(node, sourcetree):
"""
Parse nodes given in `sourcetree` and add as children of `node`.
"""
for child_source_node in sourcetree:
try:
main_file = (
child_source_node["files"][0] if "files" in child_source_node else {}
)
kind = guess_content_kind(
path=main_file.get("path"),
web_video_data=main_file.get("youtube_id") or main_file.get("web_url"),
questions=child_source_node.get("questions"),
)
except UnknownContentKindError:
continue
if kind == content_kinds.TOPIC:
child_node = nodes.TopicNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
author=child_source_node.get("author"),
description=child_source_node.get("description"),
thumbnail=child_source_node.get("thumbnail"),
)
node.add_child(child_node)
source_tree_children = child_source_node.get("children", [])
_build_tree(child_node, source_tree_children)
elif kind == content_kinds.VIDEO:
child_node = nodes.VideoNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
license=get_license(
child_source_node.get("license"),
description="Description of license",
copyright_holder=child_source_node.get("copyright_holder"),
),
author=child_source_node.get("author"),
description=child_source_node.get("description"),
derive_thumbnail=True, # video-specific data
thumbnail=child_source_node.get("thumbnail"),
)
add_files(child_node, child_source_node.get("files") or [])
node.add_child(child_node)
elif kind == content_kinds.AUDIO:
child_node = nodes.AudioNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
license=child_source_node.get("license"),
author=child_source_node.get("author"),
description=child_source_node.get("description"),
thumbnail=child_source_node.get("thumbnail"),
copyright_holder=child_source_node.get("copyright_holder"),
)
add_files(child_node, child_source_node.get("files") or [])
node.add_child(child_node)
elif kind == content_kinds.DOCUMENT:
child_node = nodes.DocumentNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
license=child_source_node.get("license"),
author=child_source_node.get("author"),
description=child_source_node.get("description"),
thumbnail=child_source_node.get("thumbnail"),
copyright_holder=child_source_node.get("copyright_holder"),
)
add_files(child_node, child_source_node.get("files") or [])
node.add_child(child_node)
elif kind == content_kinds.EXERCISE:
mastery_model = (
child_source_node.get("mastery_model")
and {"mastery_model": child_source_node["mastery_model"]}
) or {}
child_node = nodes.ExerciseNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
license=child_source_node.get("license"),
author=child_source_node.get("author"),
description=child_source_node.get("description"),
exercise_data=mastery_model,
thumbnail=child_source_node.get("thumbnail"),
copyright_holder=child_source_node.get("copyright_holder"),
)
add_files(child_node, child_source_node.get("files") or [])
for q in child_source_node.get("questions"):
question = create_question(q)
child_node.add_question(question)
node.add_child(child_node)
elif kind == content_kinds.HTML5:
child_node = nodes.HTML5AppNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
license=child_source_node.get("license"),
author=child_source_node.get("author"),
description=child_source_node.get("description"),
thumbnail=child_source_node.get("thumbnail"),
copyright_holder=child_source_node.get("copyright_holder"),
)
add_files(child_node, child_source_node.get("files") or [])
node.add_child(child_node)
elif kind == content_kinds.H5P:
child_node = nodes.H5PAppNode(
source_id=child_source_node["id"],
title=child_source_node["title"],
license=child_source_node.get("license"),
author=child_source_node.get("author"),
description=child_source_node.get("description"),
thumbnail=child_source_node.get("thumbnail"),
copyright_holder=child_source_node.get("copyright_holder"),
)
add_files(child_node, child_source_node.get("files") or [])
node.add_child(child_node)
else: # unknown content file format
continue
return node
def add_files(node, file_list):
for f in file_list:
path = f.get("path")
if path is not None:
abspath = get_abspath(
path
) # NEW: expand content:// --> ./content/ in file paths
else:
abspath = None
file_type = guess_file_type(
node.kind,
filepath=abspath,
youtube_id=f.get("youtube_id"),
web_url=f.get("web_url"),
encoding=f.get("encoding"),
)
if file_type == FileTypes.AUDIO_FILE:
node.add_file(files.AudioFile(path=abspath, language=f.get("language")))
elif file_type == FileTypes.THUMBNAIL:
node.add_file(files.ThumbnailFile(path=abspath))
elif file_type == FileTypes.DOCUMENT_FILE:
node.add_file(files.DocumentFile(path=abspath, language=f.get("language")))
elif file_type == FileTypes.HTML_ZIP_FILE:
node.add_file(files.HTMLZipFile(path=abspath, language=f.get("language")))
elif file_type == FileTypes.H5P_FILE:
node.add_file(files.H5PFile(path=abspath, language=f.get("language")))
elif file_type == FileTypes.VIDEO_FILE:
node.add_file(
files.VideoFile(
path=abspath,
language=f.get("language"),
ffmpeg_settings=f.get("ffmpeg_settings"),
)
)
elif file_type == FileTypes.SUBTITLE_FILE:
node.add_file(files.SubtitleFile(path=abspath, language=f["language"]))
elif file_type == FileTypes.BASE64_FILE:
node.add_file(files.Base64ImageFile(encoding=f["encoding"]))
elif file_type == FileTypes.WEB_VIDEO_FILE:
node.add_file(
files.WebVideoFile(
web_url=f["web_url"], high_resolution=f.get("high_resolution")
)
)
elif file_type == FileTypes.YOUTUBE_VIDEO_FILE:
node.add_file(
files.YouTubeVideoFile(
youtube_id=f["youtube_id"], high_resolution=f.get("high_resolution")
)
)
node.add_file(
files.YouTubeSubtitleFile(youtube_id=f["youtube_id"], language="en")
)
else:
raise UnknownFileTypeError("Unrecognized file type '{0}'".format(f["path"]))
def create_question(raw_question):
question = parse_images(raw_question.get("question"))
hints = raw_question.get("hints")
hints = (
parse_images(hints)
if isinstance(hints, str)
else [parse_images(hint) for hint in hints or []]
)
if raw_question["type"] == exercises.MULTIPLE_SELECTION:
return questions.MultipleSelectQuestion(
id=raw_question["id"],
question=question,
correct_answers=[
parse_images(answer) for answer in raw_question["correct_answers"]
],
all_answers=[
parse_images(answer) for answer in raw_question["all_answers"]
],
hints=hints,
)
if raw_question["type"] == exercises.SINGLE_SELECTION:
return questions.SingleSelectQuestion(
id=raw_question["id"],
question=question,
correct_answer=parse_images(raw_question["correct_answer"]),
all_answers=[
parse_images(answer) for answer in raw_question["all_answers"]
],
hints=hints,
)
if raw_question["type"] == exercises.INPUT_QUESTION:
return questions.InputQuestion(
id=raw_question["id"],
question=question,
answers=[parse_images(answer) for answer in raw_question["answers"]],
hints=hints,
)
if raw_question["type"] == exercises.PERSEUS_QUESTION:
return questions.PerseusQuestion(
id=raw_question["id"],
raw_data=parse_images(raw_question.get("item_data")),
source_url="https://www.google.com/",
)
else:
raise UnknownQuestionTypeError(
"Unrecognized question type '{0}': accepted types are {1}".format(
raw_question["type"], [key for key, value in exercises.question_choices]
)
)
def parse_images(content):
if content:
reg = re.compile(questions.MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE)
matches = reg.findall(content)
for match in matches:
path = match[1]
graphie = re.search(questions.WEB_GRAPHIE_URL_REGEX, path)
if graphie:
path = graphie.group(1)
content = content.replace(path, get_abspath(path).replace("\\", "\\\\"))
return content
if __name__ == "__main__":
"""
This code will run when the sushi chef is called from the command line.
"""
chef = SampleChef()
chef.main()
| 45.594684 | 1,965 | 0.620847 | 1,797 | 0.065469 | 0 | 0 | 0 | 0 | 0 | 0 | 10,242 | 0.373142 |
0ec3a322173dd7c7c650f060b94c615e6cceb769 | 19,118 | py | Python | release/scripts/modules/bl_i18n_utils/utils_spell_check.py | dvgd/blender | 4eb2807db1c1bd2514847d182fbb7a3f7773da96 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/modules/bl_i18n_utils/utils_spell_check.py | dvgd/blender | 4eb2807db1c1bd2514847d182fbb7a3f7773da96 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/modules/bl_i18n_utils/utils_spell_check.py | dvgd/blender | 4eb2807db1c1bd2514847d182fbb7a3f7773da96 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-12-02T20:05:42.000Z | 2020-12-02T20:05:42.000Z | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import enchant
import os
import pickle
import re
class SpellChecker:
"""
A basic spell checker.
"""
# These must be all lower case for comparisons
uimsgs = {
# OK words
"adaptively", "adaptivity",
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"chamfer",
"couldn", # couldn't
"decrement",
"derivate",
"deterministically",
"doesn", # doesn't
"duplications",
"effector",
"equi", # equi-angular, etc.
"fader",
"globbing",
"hasn", # hasn't
"hetero",
"hoc", # ad-hoc
"incompressible",
"indices",
"instantiation",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"ons", # add-ons
"pong", # ping pong
"scalable",
"shadeless",
"shouldn", # shouldn't
"smoothen",
"spacings",
"teleport", "teleporting",
"vertices",
"wasn", # wasn't
# Merged words
"antialiasing", "antialias",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoexec",
"autoexecution",
"autogenerated",
"autolock",
"automasking",
"autoname",
"autopack",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitdepth",
"bitflag", "bitflags",
"bitrate",
"blackbody",
"blendfile",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"customdata",
"dataset", "datasets",
"de",
"deadzone",
"deconstruct",
"defocus",
"denoise", "denoised", "denoising", "denoiser",
"deselect", "deselecting", "deselection",
"despill", "despilling",
"dirtree",
"editcurve",
"editmesh",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hardlight",
"hemi",
"hostname",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightprobe", "lightprobes",
"lightless",
"lineset",
"linestyle", "linestyles",
"localview",
"lookup", "lookups",
"mathutils",
"micropolygon",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multiframe",
"multilayer",
"multipaint",
"multires", "multiresolution",
"multisampling",
"multiscatter",
"multitexture",
"multithreaded",
"multiuser",
"multiview",
"namespace",
"nodetree", "nodetrees",
"keyconfig",
"offscreen",
"online",
"playhead",
"popup", "popups",
"pre",
"precache", "precaching",
"precalculate",
"precomputing",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"promillage",
"pushdown",
"raytree",
"readonly",
"realtime",
"reinject", "reinjected",
"rekey",
"remesh",
"reprojection", "reproject", "reprojecting",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"retiming",
"rigidbody",
"ringnoise",
"rolloff",
"runtime",
"scanline",
"screenshot", "screenshots",
"seekability",
"selfcollision",
"shadowbuffer", "shadowbuffers",
"singletexture",
"spellcheck", "spellchecking",
"startup",
"stateful",
"starfield",
"studiolight",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"subitem",
"submode",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"tradeoff",
"un",
"unassociate", "unassociated",
"unbake",
"unclosed",
"uncomment",
"unculled",
"undeformed",
"undistort", "undistorted", "undistortion",
"ungroup", "ungrouped",
"unhide",
"unindent",
"unkeyed",
"unlink", "unlinked",
"unmute",
"unphysical",
"unpremultiply",
"unprojected",
"unprotect",
"unreacted",
"unreferenced",
"unregister",
"unselect", "unselected", "unselectable",
"unsets",
"unshadowed",
"unspill",
"unstitchable", "unstitch",
"unsubdivided", "unsubdivide",
"untrusted",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
"workspace", "workspaces",
# Neologisms, slangs
"affectable",
"animatable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"effectors",
"expander",
"instancer",
"keyer",
"lacunarity",
"linkable",
"numerics",
"occluder", "occluders",
"overridable",
"passepartout",
"perspectively",
"pixelate",
"pointiness",
"polycount",
"polygonization", "polygonalization", # yuck!
"scalings",
"selectable", "selectability",
"shaper",
"smoothen", "smoothening",
"spherize", "spherized",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderers", "renderable", "renderability",
# Really bad!!!
"convertor",
"fullscr",
# Abbreviations
"aero",
"amb",
"anim",
"aov",
"app",
"bbox", "bboxes",
"bksp", # Backspace
"bool",
"calc",
"cfl",
"config", "configs",
"const",
"coord", "coords",
"degr",
"diff",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"fract",
"frs",
"grless",
"http",
"init",
"irr", # Irradiance
"kbit", "kb",
"lang", "langs",
"lclick", "rclick",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mbs", # mouse button 'select'.
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"pano",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sce",
"sel",
"spec",
"struct", "structs",
"subdiv",
"sys",
"tex",
"texcoord",
"tmr", # timer
"tri", "tris",
"udim", "udims",
"upres", # Upresolution
"usd",
"uv", "uvs", "uvw", "uw", "uvmap",
"ve",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"vram",
"xor",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"affine",
"albedo",
"anamorphic",
"anisotropic", "anisotropy",
"bitangent",
"boid", "boids",
"ceil",
"compressibility",
"curvilinear",
"equiangular",
"equisolid",
"euler", "eulers",
"fribidi",
"gettext",
"hashable",
"hotspot",
"interocular",
"intrinsics",
"irradiance",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"msgfmt",
"nand", "xnor",
"normals",
"numpad",
"octahedral",
"octree",
"omnidirectional",
"opengl",
"openmp",
"parametrization",
"photoreceptor",
"poly",
"polyline", "polylines",
"probabilistically",
"pulldown", "pulldowns",
"quantized",
"quartic",
"quaternion", "quaternions",
"quintic",
"samplerate",
"sawtooth",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"subtractive",
"superellipse",
"tooltip", "tooltips",
"trackpad",
"tuple",
"unicode",
"viewport", "viewports",
"viscoelastic",
"vorticity",
"waveform", "waveforms",
"wildcard", "wildcards",
"wintab", # Some Windows tablet API
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"bindpose",
"binormal",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chroma",
"chrominance",
"clearcoat",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"cubemap", "cubemaps",
"cuda",
"deinterlace",
"dropoff",
"duotone",
"dv",
"eigenvectors",
"emissive",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"kerning",
"lightmap",
"linearlight",
"lossless", "lossy",
"luminance",
"mantaflow",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"pinlight",
"qi",
"radiosity",
"raycasting",
"raytrace", "raytracing", "raytraced",
"refractions",
"remesher", "remeshing", "remesh",
"renderfarm",
"scanfill",
"shader", "shaders",
"shadowmap", "shadowmaps",
"softlight",
"specular", "specularity",
"spillmap",
"sobel",
"stereoscopy",
"texel",
"timecode",
"tonemap",
"toon",
"transmissive",
"vividlight",
"volumetrics",
"voronoi",
"voxel", "voxels",
"vsync",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"audaspace",
"azone", # action zone
"backwire",
"bbone",
"bendy", # bones
"bmesh",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"depsgraph",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"dyntopo",
"editbone",
"editmode",
"eevee",
"fcurve", "fcurves",
"fedge", "fedges",
"filmic",
"fluidsim",
"freestyle",
"enum", "enums",
"gizmogroup",
"gons", # N-Gons
"gpencil",
"idcol",
"keyframe", "keyframes", "keyframing", "keyframed",
"lookdev",
"luminocity",
"mathvis",
"metaball", "metaballs", "mball",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"overscan",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"qe", # keys...
"shaderfx", "shaderfxs",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"subdiv",
"subtype",
"sunsky",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"userpref",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"vse",
"wasd", "wasdqe", # keys...
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# UOC (Ugly Operator Categories)
"cachefile",
"paintcurve",
"ptcache",
"dpaint",
# Algorithm/library names
"ashikhmin", # Ashikhmin-Shirley
"arsloe", # Texel-Marsen-Arsloe
"beckmann",
"blackman", # Blackman-Harris
"blosc",
"burley", # Christensen-Burley
"catmull",
"catrom",
"chebychev",
"courant",
"cryptomatte", "crypto",
"embree",
"hosek",
"kutta",
"lennard",
"marsen", # Texel-Marsen-Arsloe
"mikktspace",
"minkowski",
"minnaert",
"moskowitz", # Pierson-Moskowitz
"musgrave",
"nayar",
"netravali",
"nishita",
"ogawa",
"oren",
"peucker", # Ramer-Douglas-Peucker
"pierson", # Pierson-Moskowitz
"preetham",
"prewitt",
"ramer", # Ramer-Douglas-Peucker
"runge",
"sobol",
"verlet",
"wilkie",
"worley",
# Acronyms
"aa", "msaa",
"ao",
"api",
"asc", "cdl",
"ascii",
"atrac",
"avx",
"bsdf",
"bssrdf",
"bw",
"ccd",
"cmd",
"cmos",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fk",
"fov",
"fft",
"futura",
"fx",
"gfx",
"ggx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr", "hdri", "hdris",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva", "hsl",
"id",
"ies",
"ior",
"itu",
"jonswap",
"lhs",
"lmb", "mmb", "rmb",
"kb",
"mocap",
"msgid", "msgids",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rdp",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"ssao",
"ssr",
"svn",
"tma",
"ui",
"unix",
"vbo", "vbos",
"vr",
"wxyz",
"xr",
"ycc", "ycca",
"yrgb",
"yuv", "yuva",
# Blender acronyms
"bli",
"bpy",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dwaa",
"dwab",
"dxf",
"eps",
"exr",
"fbx",
"fbxnode",
"ffmpeg",
"flac",
"gltf",
"gzip",
"ico",
"jpg", "jpeg", "jpegs",
"json",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png", "pngs",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"vp9",
"wav",
"webm",
"xiph",
"xml",
"xna",
"xvid",
}
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret
| 23.145278 | 103 | 0.437703 | 18,251 | 0.95465 | 0 | 0 | 100 | 0.005231 | 0 | 0 | 9,953 | 0.520609 |
0ec3b7be918911b5b776d40be78266905df319e1 | 7,175 | py | Python | naslib/predictors/mlp.py | gmeyerlee/NASLib | 21dbceda04cc1faf3d8b6dd391412a459218ef2b | [
"Apache-2.0"
] | null | null | null | naslib/predictors/mlp.py | gmeyerlee/NASLib | 21dbceda04cc1faf3d8b6dd391412a459218ef2b | [
"Apache-2.0"
] | null | null | null | naslib/predictors/mlp.py | gmeyerlee/NASLib | 21dbceda04cc1faf3d8b6dd391412a459218ef2b | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from naslib.utils.utils import AverageMeterGroup
from naslib.predictors.utils.encodings import encode
from naslib.predictors import Predictor
# NOTE: faster on CPU
device = torch.device("cpu")
print("device:", device)
def accuracy_mse(prediction, target, scale=100.0):
prediction = prediction.detach() * scale
target = (target) * scale
return F.mse_loss(prediction, target)
class FeedforwardNet(nn.Module):
def __init__(
self,
input_dims: int = 5,
num_layers: int = 3,
layer_width: list = [10, 10, 10],
output_dims: int = 1,
activation="relu",
):
super(FeedforwardNet, self).__init__()
assert (
len(layer_width) == num_layers
), "number of widths should be \
equal to the number of layers"
self.activation = eval("F." + activation)
all_units = [input_dims] + layer_width
self.layers = nn.ModuleList(
[nn.Linear(all_units[i], all_units[i + 1]) for i in range(num_layers)]
)
self.out = nn.Linear(all_units[-1], 1)
# make the init similar to the tf.keras version
for l in self.layers:
torch.nn.init.xavier_uniform_(l.weight)
torch.nn.init.zeros_(l.bias)
torch.nn.init.xavier_uniform_(self.out.weight)
torch.nn.init.zeros_(self.out.bias)
def forward(self, x):
for layer in self.layers:
x = self.activation(layer(x))
return self.out(x)
def basis_funcs(self, x):
for layer in self.layers:
x = self.activation(layer(x))
return x
class MLPPredictor(Predictor):
def __init__(
self,
encoding_type="adjacency_one_hot",
ss_type="nasbench201",
hpo_wrapper=False,
hparams_from_file=False
):
self.encoding_type = encoding_type
self.ss_type = ss_type
self.hpo_wrapper = hpo_wrapper
self.default_hyperparams = {
"num_layers": 20,
"layer_width": 20,
"batch_size": 32,
"lr": 0.001,
"regularization": 0.2,
}
self.hyperparams = None
self.hparams_from_file = hparams_from_file
def get_model(self, **kwargs):
predictor = FeedforwardNet(**kwargs)
return predictor
def fit(self, xtrain, ytrain, train_info=None, epochs=500, loss="mae", verbose=0):
if self.hparams_from_file and self.hparams_from_file not in ['False', 'None'] \
and os.path.exists(self.hparams_from_file):
self.hyperparams = json.load(open(self.hparams_from_file, 'rb'))['mlp']
print('loaded hyperparams from', self.hparams_from_file)
elif self.hyperparams is None:
self.hyperparams = self.default_hyperparams.copy()
num_layers = self.hyperparams["num_layers"]
layer_width = self.hyperparams["layer_width"]
batch_size = self.hyperparams["batch_size"]
lr = self.hyperparams["lr"]
regularization = self.hyperparams["regularization"]
self.mean = np.mean(ytrain)
self.std = np.std(ytrain)
if self.encoding_type is not None:
_xtrain = np.array(
[
encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type)
for arch in xtrain
]
)
else:
_xtrain = xtrain
_ytrain = np.array(ytrain)
X_tensor = torch.FloatTensor(_xtrain).to(device)
y_tensor = torch.FloatTensor(_ytrain).to(device)
train_data = TensorDataset(X_tensor, y_tensor)
data_loader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=True,
drop_last=False,
pin_memory=False,
)
self.model = self.get_model(
input_dims=_xtrain.shape[1],
num_layers=num_layers,
layer_width=num_layers * [layer_width],
)
self.model.to(device)
optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.9, 0.99))
if loss == "mse":
criterion = nn.MSELoss().to(device)
elif loss == "mae":
criterion = nn.L1Loss().to(device)
self.model.train()
for e in range(epochs):
meters = AverageMeterGroup()
for b, batch in enumerate(data_loader):
optimizer.zero_grad()
input = batch[0].to(device)
target = batch[1].to(device)
prediction = self.model(input).view(-1)
loss_fn = criterion(prediction, target)
# add L1 regularization
params = torch.cat(
[
x[1].view(-1)
for x in self.model.named_parameters()
if x[0] == "out.weight"
]
)
loss_fn += regularization * torch.norm(params, 1)
loss_fn.backward()
optimizer.step()
mse = accuracy_mse(prediction, target)
meters.update(
{"loss": loss_fn.item(), "mse": mse.item()}, n=target.size(0)
)
if verbose and e % 100 == 0:
print("Epoch {}, {}, {}".format(e, meters["loss"], meters["mse"]))
train_pred = np.squeeze(self.query(xtrain))
train_error = np.mean(abs(train_pred - ytrain))
return train_error
def query(self, xtest, info=None, eval_batch_size=None):
if self.encoding_type is not None:
xtest = np.array(
[
encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type)
for arch in xtest
]
)
X_tensor = torch.FloatTensor(xtest).to(device)
test_data = TensorDataset(X_tensor)
eval_batch_size = len(xtest) if eval_batch_size is None else eval_batch_size
test_data_loader = DataLoader(
test_data, batch_size=eval_batch_size, pin_memory=False
)
self.model.eval()
pred = []
with torch.no_grad():
for _, batch in enumerate(test_data_loader):
prediction = self.model(batch[0].to(device)).view(-1)
pred.append(prediction.cpu().numpy())
pred = np.concatenate(pred)
return np.squeeze(pred)
def set_random_hyperparams(self):
if self.hyperparams is None:
params = self.default_hyperparams.copy()
else:
params = {
"num_layers": int(np.random.choice(range(5, 25))),
"layer_width": int(np.random.choice(range(5, 25))),
"batch_size": 32,
"lr": np.random.choice([0.1, 0.01, 0.005, 0.001, 0.0001]),
"regularization": 0.2,
}
self.hyperparams = params
return params
| 32.466063 | 88 | 0.564739 | 6,588 | 0.918188 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.069686 |
0ec3f2a1fe20def9bc91ffbd4b3742d74abb33b3 | 1,301 | py | Python | pythonforandroid/recipes/libx264/__init__.py | Joreshic/python-for-android | c60e02d2e32e31a3a754838c51e9242cbadcd9e8 | [
"MIT"
] | 1 | 2019-09-03T13:44:06.000Z | 2019-09-03T13:44:06.000Z | pythonforandroid/recipes/libx264/__init__.py | Joreshic/python-for-android | c60e02d2e32e31a3a754838c51e9242cbadcd9e8 | [
"MIT"
] | null | null | null | pythonforandroid/recipes/libx264/__init__.py | Joreshic/python-for-android | c60e02d2e32e31a3a754838c51e9242cbadcd9e8 | [
"MIT"
] | 1 | 2018-11-15T07:58:30.000Z | 2018-11-15T07:58:30.000Z | from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM
from os.path import exists, join, realpath
from os import uname
import glob
import sh
class LibX264Recipe(Recipe):
version = 'x264-snapshot-20170608-2245-stable' # using mirror url since can't use ftp
url = 'http://mirror.yandex.ru/mirrors/ftp.videolan.org/x264/snapshots/{version}.tar.bz2'
md5sum = 'adf3b87f759b5cc9f100f8cf99276f77'
def should_build(self, arch):
build_dir = self.get_build_dir(arch.arch)
return not exists(join(build_dir, 'lib', 'libx264.a'))
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
configure = sh.Command('./configure')
shprint(configure,
'--cross-prefix=arm-linux-androideabi-',
'--host=arm-linux',
'--disable-asm',
'--disable-cli',
'--enable-pic',
'--disable-shared',
'--enable-static',
'--prefix={}'.format(realpath('.')),
_env=env)
shprint(sh.make, '-j4', _env=env)
shprint(sh.make, 'install', _env=env)
recipe = LibX264Recipe()
| 37.171429 | 93 | 0.583397 | 1,103 | 0.847809 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.296695 |
0ec3f460313d8f825c0daad58ff5e76ef71c5401 | 1,704 | py | Python | Win/reg.py | QGB/QPSU | 7bc214676d797f42d2d7189dc67c9377bccdf25d | [
"MIT"
] | 6 | 2018-03-25T20:05:21.000Z | 2022-03-13T17:23:05.000Z | Win/reg.py | pen9un/QPSU | 76e1a3f6f6f6f78452e02f407870a5a32177b667 | [
"MIT"
] | 15 | 2018-05-14T03:30:21.000Z | 2022-03-03T15:33:25.000Z | Win/reg.py | pen9un/QPSU | 76e1a3f6f6f6f78452e02f407870a5a32177b667 | [
"MIT"
] | 1 | 2021-07-15T06:23:45.000Z | 2021-07-15T06:23:45.000Z | #coding=utf-8
try:
if __name__.startswith('qgb.Win'):
from .. import py
else:
import py
except Exception as ei:
raise ei
raise EnvironmentError(__name__)
if py.is2():
import _winreg as winreg
from _winreg import *
else:
import winreg
from winreg import *
def get(skey,name,root=HKEY_CURRENT_USER,returnType=True):
''' from qgb.Win import reg
reg.get(r'Software\Microsoft\Windows\CurrentVersion\Internet Settings','ProxyEnable')
reg.get(r'HKLM\SYSTEM\CurrentControlSet\Services\LanmanServer\Parameters\Size' )
There are seven predefined root keys, traditionally named according to their constant handles defined in the Win32 API
skey不能包含 name,否则 FileNotFoundError: [WinError 2] 系统找不到指定的文件。
'''
r = OpenKey(root,skey)
r = QueryValueEx(r,name)
if returnType:return r[0],'{} : {}'.format(REG_TYPE[r[1]],r[1])
else :return r[0]
def set(skey,name,value,root=HKEY_CURRENT_USER,type='auto,or REG_TYPE int',returnType=True):
r = OpenKey(root,skey,0,KEY_SET_VALUE)
if not py.isint(type):
if py.isint(value):type=4
if py.istr(value):type=1
if py.isbyte(value):type=3 #TODO test,and add more rule
SetValueEx(r,'ProxyEnable',0,type,value)
if get(skey,name,root=root,returnType=False)==value:
return 'reg.set [{}] {}={} sucess!'.format(skey[-55:],name,value)
else:
return 'reg.set [{}] {}={} Failed !'.format(skey,name,value)
REG_TYPE={ 0 : 'REG_NONE',
1 : 'REG_SZ',
2 : 'REG_EXPAND_SZ',
3 : 'REG_BINARY',
4 : 'REG_DWORD',
5 : 'REG_DWORD_BIG_ENDIAN',
6 : 'REG_LINK',
7 : 'REG_MULTI_SZ',
8 : 'REG_RESOURCE_LIST',
9 : 'REG_FULL_RESOURCE_DESCRIPTOR',
10: 'REG_RESOURCE_REQUIREMENTS_LIST',
11: 'REG_QWORD'}
| 29.894737 | 119 | 0.693662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 766 | 0.44023 |
0ec42be3581d1c9d5c9ab4c954473cd6061146b5 | 3,555 | py | Python | tests/test_handler.py | CJSoldier/webssh | b3c33ff6bd76f4f5df40cc1fe9a138cf0cecd08c | [
"MIT"
] | 13 | 2018-09-16T15:51:38.000Z | 2019-10-16T09:13:18.000Z | tests/test_handler.py | CJSoldier/webssh | b3c33ff6bd76f4f5df40cc1fe9a138cf0cecd08c | [
"MIT"
] | null | null | null | tests/test_handler.py | CJSoldier/webssh | b3c33ff6bd76f4f5df40cc1fe9a138cf0cecd08c | [
"MIT"
] | null | null | null | import unittest
import paramiko
from tornado.httputil import HTTPServerRequest
from tests.utils import read_file, make_tests_data_path
from webssh.handler import MixinHandler, IndexHandler, InvalidValueError
class TestMixinHandler(unittest.TestCase):
def test_get_real_client_addr(self):
handler = MixinHandler()
handler.request = HTTPServerRequest(uri='/')
self.assertIsNone(handler.get_real_client_addr())
ip = '127.0.0.1'
handler.request.headers.add('X-Real-Ip', ip)
self.assertEqual(handler.get_real_client_addr(), False)
handler.request.headers.add('X-Real-Port', '12345x')
self.assertEqual(handler.get_real_client_addr(), False)
handler.request.headers.update({'X-Real-Port': '12345'})
self.assertEqual(handler.get_real_client_addr(), (ip, 12345))
handler.request.headers.update({'X-Real-ip': None})
self.assertEqual(handler.get_real_client_addr(), False)
handler.request.headers.update({'X-Real-Port': '12345x'})
self.assertEqual(handler.get_real_client_addr(), False)
class TestIndexHandler(unittest.TestCase):
def test_get_specific_pkey_with_plain_key(self):
fname = 'test_rsa.key'
cls = paramiko.RSAKey
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_specific_pkey(cls, key, None)
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_specific_pkey(cls, key, 'iginored')
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_specific_pkey(cls, 'x'+key, None)
self.assertIsNone(pkey)
def test_get_specific_pkey_with_encrypted_key(self):
fname = 'test_rsa_password.key'
cls = paramiko.RSAKey
password = 'television'
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_specific_pkey(cls, key, password)
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_specific_pkey(cls, 'x'+key, None)
self.assertIsNone(pkey)
with self.assertRaises(paramiko.PasswordRequiredException):
pkey = IndexHandler.get_specific_pkey(cls, key, None)
def test_get_pkey_obj_with_plain_key(self):
fname = 'test_ed25519.key'
cls = paramiko.Ed25519Key
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_pkey_obj(key, None, fname)
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_pkey_obj(key, 'iginored', fname)
self.assertIsInstance(pkey, cls)
with self.assertRaises(InvalidValueError) as exc:
pkey = IndexHandler.get_pkey_obj('x'+key, None, fname)
self.assertIn('Invalid private key', str(exc))
def test_get_pkey_obj_with_encrypted_key(self):
fname = 'test_ed25519_password.key'
password = 'abc123'
cls = paramiko.Ed25519Key
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_pkey_obj(key, password, fname)
self.assertIsInstance(pkey, cls)
with self.assertRaises(InvalidValueError) as exc:
pkey = IndexHandler.get_pkey_obj(key, 'wrongpass', fname)
self.assertIn('Wrong password', str(exc))
with self.assertRaises(InvalidValueError) as exc:
pkey = IndexHandler.get_pkey_obj('x'+key, password, fname)
self.assertIn('Invalid private key', str(exc))
with self.assertRaises(paramiko.PasswordRequiredException):
pkey = IndexHandler.get_pkey_obj(key, '', fname)
| 40.397727 | 72 | 0.686639 | 3,340 | 0.939522 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.085232 |
0ec517fad6215e10cf8fdc40288d6f1a4376050d | 17,499 | py | Python | apps/notifications/tests/test_views.py | SCiO-systems/qcat | 8c2b8e07650bc2049420fa6de758fba7e50c2f28 | [
"Apache-2.0"
] | null | null | null | apps/notifications/tests/test_views.py | SCiO-systems/qcat | 8c2b8e07650bc2049420fa6de758fba7e50c2f28 | [
"Apache-2.0"
] | null | null | null | apps/notifications/tests/test_views.py | SCiO-systems/qcat | 8c2b8e07650bc2049420fa6de758fba7e50c2f28 | [
"Apache-2.0"
] | null | null | null | import logging
from unittest import mock
from unittest.mock import call
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.signing import Signer
from django.urls import reverse
from django.http import Http404
from django.test import RequestFactory
from braces.views import LoginRequiredMixin
from django.test import override_settings
from model_mommy import mommy
from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, \
ActionContextQuerySet
from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, \
LogQuestionnairesListView, LogInformationUpdateCreateView, \
LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView
from apps.qcat.tests import TestCase
class LogListViewTest(TestCase):
def setUp(self):
self.view = LogListView()
self.url_path = reverse('notification_partial_list')
self.request = RequestFactory().get(self.url_path)
self.user = {}
self.request.user = self.user
self.view_instance = self.setup_view(
view=self.view, request=self.request
)
member_add_log = mommy.make(
_model=Log,
id=8,
action=settings.NOTIFICATIONS_ADD_MEMBER
)
self.change_log = mommy.make(
_model=Log,
id=42,
action=settings.NOTIFICATIONS_CHANGE_STATUS
)
mommy.make(_model=StatusUpdate, log=self.change_log)
mommy.make(_model=MemberUpdate, log=member_add_log)
def get_view_with_get_querystring(self, param):
request = RequestFactory().get(
'{url}?{param}'.format(url=self.url_path, param=param)
)
request.user = self.user
return self.setup_view(view=self.view, request=request)
def test_force_login(self):
self.assertIsInstance(self.view_instance, LoginRequiredMixin)
def test_queryset_method(self):
self.assertEqual(
self.view_instance.queryset_method,
'user_log_list'
)
def test_queryset_method_pending(self):
self.assertEqual(
self.get_view_with_get_querystring('is_pending').queryset_method,
'user_pending_list'
)
def test_get_paginate_by(self):
self.assertEqual(
self.view_instance.get_paginate_by(None),
settings.NOTIFICATIONS_LIST_PAGINATE_BY
)
def test_get_paginate_by_teaser(self):
self.assertEqual(
self.get_view_with_get_querystring('is_teaser').get_paginate_by(None),
settings.NOTIFICATIONS_TEASER_PAGINATE_BY
)
@mock.patch('apps.notifications.views.Log.actions.user_log_list')
def test_get_queryset(self, mock_actions):
self.view_instance.get_queryset()
mock_actions.assert_called_once_with(user={})
@mock.patch('apps.notifications.views.Log.actions.user_pending_list')
def test_get_queryset_pending(self, mock_actions):
self.get_view_with_get_querystring('is_pending').get_queryset()
mock_actions.assert_called_once_with(user={})
@mock.patch.object(LogListView, 'add_user_aware_data')
def test_get_context_data_logs(self, mock_add_user_aware_data):
self.view_instance.object_list = 'foo'
self.view_instance.get_context_data()
mock_add_user_aware_data.assert_called_once_with('foo')
def _test_add_user_aware_data(self):
# for faster tests, mock all the elements. elements are created here
# as this makes the tests more readable.
pth = 'apps.notifications.views.Log.actions'
with mock.patch('{}.read_id_list'.format(pth)) as read_id_list:
read_id_list.return_value = [42]
with mock.patch('{}.user_pending_list'.format(pth)) as pending:
pending.values_list.return_value = [8, 42]
logs = Log.objects.all()
return list(self.view_instance.add_user_aware_data(logs))
def test_add_user_aware_data_keys(self):
data_keys = self._test_add_user_aware_data()[0].keys()
for key in ['id', 'created', 'text', 'is_read', 'is_todo', 'edit_url']:
self.assertTrue(key in data_keys)
def test_add_user_aware_data_is_read(self):
data = self._test_add_user_aware_data()
# logs are ordered by creation date - 42 is the newer one
self.assertTrue(data[0]['is_read'])
def test_add_user_aware_data_is_not_read(self):
data = self._test_add_user_aware_data()
self.assertFalse(data[1]['is_read'])
#def test_add_user_aware_data_is_todo(self):
# data = self._test_add_user_aware_data()
# self.assertTrue(data[1]['is_todo'])
def test_add_user_aware_data_is_not_todo(self):
data = self._test_add_user_aware_data()
self.assertFalse(data[0]['is_todo'])
@override_settings(NOTIFICATIONS_ACTIONS={'foo': 'bar', 'result': '42'})
def test_statuses_in_context(self):
self.view_instance.object_list = []
context = self.view_instance.get_context_data()
self.assertDictEqual(
context['statuses'],
{'foo': 'bar', 'result': '42'}
)
@mock.patch('apps.notifications.views.Log.actions.user_log_list')
def test_status_filter_queryset(self, mock_user_log_list):
mock_user_log_list.return_value = []
self.assertEqual(
[], self.view_instance.get_queryset()
)
@mock.patch('apps.notifications.views.Log.actions.user_log_list')
def test_status_filter_queryset_for_status(self, mock_user_log_list):
mock_user_log_list.return_value = Log.objects.filter()
view = self.view
view.get_statuses = mock.MagicMock(return_value=[3])
view_instance = self.setup_view(
view=view, request=self.request
)
self.assertQuerysetEqual(
view_instance.get_queryset(),
[self.change_log.id],
transform=lambda item: item.id
)
def test_get_status_invalid(self):
request = RequestFactory().get('{}?statuses=foo'.format(self.url_path))
view = self.setup_view(self.view, request)
self.assertEqual(view.get_statuses(), [])
@override_settings(NOTIFICATIONS_ACTIONS={'2': 'bar'})
def test_get_status_invalid_config(self):
request = RequestFactory().get('{}?statuses=1'.format(self.url_path))
view = self.setup_view(self.view, request)
self.assertEqual(view.get_statuses(), [])
def test_get_status_valid(self):
request = RequestFactory().get('{}?statuses=1,2,3'.format(self.url_path))
view = self.setup_view(self.view, request)
self.assertEqual(view.get_statuses(), [1, 2, 3])
class ReadLogUpdateViewTest(TestCase):
def setUp(self):
self.view = ReadLogUpdateView()
self.request = RequestFactory().post(
reverse('notification_read'),
data={'user': 123, 'log': 'log', 'checked': 'true'}
)
self.user = mock.MagicMock(id=123)
self.request.user = self.user
self.view_instance = self.setup_view(view=self.view, request=self.request)
def test_validate_data_all_keys(self):
self.assertFalse(
self.view_instance.validate_data()
)
def test_validate_data_id_type(self):
self.assertFalse(
self.view_instance.validate_data(checked='1', log='1', user='foo')
)
def test_validate_data_invalid_user(self):
self.assertFalse(
self.view_instance.validate_data(checked='456', log='1', user='456')
)
def test_validate_data_valid(self):
self.assertTrue(
self.view_instance.validate_data(checked='1', log='1', user='123')
)
@mock.patch('apps.notifications.views.ReadLog.objects.update_or_create')
def test_post_valid_checked(self, mock_get_or_create):
self.view_instance.post(request=self.request)
mock_get_or_create.assert_called_once_with(
user_id='123', log_id='log', defaults={'is_read': True}
)
@mock.patch('apps.notifications.views.ReadLog.objects.update_or_create')
def test_post_valid_unchecked(self, mock_get_or_create):
request = RequestFactory().post(
reverse('notification_read'),
data={'user': 123, 'log': 'log', 'checked': 'false'}
)
self.view_instance.post(request=request)
mock_get_or_create.assert_called_once_with(
user_id='123', log_id='log', defaults={'is_read': False}
)
@mock.patch.object(ReadLogUpdateView, 'validate_data')
def test_post_invalid(self, mock_validate_data):
logging.disable(logging.CRITICAL)
mock_validate_data.return_value = False
with self.assertRaises(Http404):
self.view_instance.post(request=self.request)
class LogCountViewTest(TestCase):
def setUp(self):
super().setUp()
self.request = RequestFactory().get(reverse('notification_new_count'))
self.request.user = mommy.make(_model=get_user_model())
self.view = self.setup_view(view=LogCountView(), request=self.request)
mommy.make(
_model=Log,
catalyst=self.request.user,
action=settings.NOTIFICATIONS_CHANGE_STATUS,
_quantity=4
)
mommy.make(
_model=Log,
catalyst=self.request.user,
action=settings.NOTIFICATIONS_EDIT_CONTENT,
_quantity=2
)
@mock.patch('apps.notifications.views.Log.actions.only_unread_logs')
def test_get_unread_only(self, mock_only_unread_logs):
self.view.get(request=self.request)
mock_only_unread_logs.assert_called_once_with(
user=self.request.user
)
def test_log_count(self):
response = self.view.get(request=self.request)
self.assertEqual(response.content, b'4')
def test_log_count_one_read(self):
mommy.make(
_model=ReadLog,
log=Log.objects.filter(action=settings.NOTIFICATIONS_CHANGE_STATUS).first(),
user=self.request.user,
is_read=True
)
response = self.view.get(request=self.request)
self.assertEqual(response.content, b'3')
class LogQuestionnairesListViewTest(TestCase):
def setUp(self):
super().setUp()
self.request = RequestFactory().get(reverse('notification_questionnaire_logs'))
self.request.user = 'foo'
self.view = self.setup_view(view=LogQuestionnairesListView(), request=self.request)
@mock.patch.object(ActionContextQuerySet, 'user_log_list')
def test_get_questionnaire_logs(self, mock_user_log_list):
self.view.get_questionnaire_logs('foo')
mock_user_log_list.assert_called_once_with(user='foo')
@mock.patch.object(LogQuestionnairesListView, 'get_questionnaire_logs')
def test_get(self, mock_get_questionnaire_logs):
mock_get_questionnaire_logs.return_value = ['foo_1', 'foo_2', 'bar_3']
response = self.view.get(self.request)
self.assertEqual(
response.content, b'{"questionnaires": ["bar_3", "foo_1", "foo_2"]}'
)
class LogInformationUpdateCreateViewTest(TestCase):
def setUp(self):
super().setUp()
self.url = reverse('notification_inform_compiler')
self.view = LogInformationUpdateCreateView()
self.request = RequestFactory().get(self.url)
self.request.user = 'foo'
self.view = self.setup_view(view=self.view, request=self.request)
def test_get_compiler_query(self):
questionnaire = mock.MagicMock()
self.view.get_compiler(questionnaire)
self.assertEqual(
questionnaire.method_calls[0],
call.questionnairemembership_set.get(role='compiler')
)
def test_get_compiler(self):
sentinel = mock.sentinel
questionnaire = mock.MagicMock()
questionnaire.questionnairemembership_set.get.return_value = sentinel
self.assertEqual(
self.view.get_compiler(questionnaire),
sentinel.user
)
@mock.patch('apps.notifications.views.query_questionnaire')
def test_get_questionnaire(self, mock_query_questionnaire):
one_questionnaire = mock.MagicMock()
one_questionnaire.first = lambda : 'foo'
mock_query_questionnaire.return_value = one_questionnaire
self.assertEqual(
self.view.get_questionnaire('foo'), 'foo'
)
@mock.patch('apps.notifications.views.query_questionnaire')
def test_get_questionnaire_raises(self, mock_query_questionnaire):
not_exists = mock.MagicMock()
not_exists.exists = lambda : False
mock_query_questionnaire.return_value = not_exists
with self.assertRaises(Http404):
self.view.get_questionnaire('foo')
@mock.patch('apps.notifications.views.query_questionnaire')
def test_get_questionnaire_calls_filter(self, mock_query_questionnaire):
self.view.get_questionnaire('foo')
mock_query_questionnaire.assert_called_once_with(
identifier='foo', request=self.request
)
@override_settings(NOTIFICATIONS_FINISH_EDITING='setting')
@mock.patch.object(LogInformationUpdateCreateView, 'get_questionnaire')
@mock.patch.object(LogInformationUpdateCreateView, 'get_compiler')
def test_post(self, mock_get_compiler, mock_get_questionnaire):
compiler = mock.MagicMock()
mock_get_questionnaire.return_value = mock.sentinel.questionnaire
mock_get_compiler.return_value = compiler
request = RequestFactory().post(self.url, data={
'identifier': 'foo',
'message': 'bar'
})
with mock.patch('apps.notifications.views.InformationLog') as mock_create:
self.setup_view(view=self.view, request=self.request).post(request)
mock_create.assert_called_once_with(
action='setting',
questionnaire=mock.sentinel.questionnaire,
receiver=compiler,
sender='foo'
)
class LogSubscriptionPreferencesMixinTest(TestCase):
def setUp(self):
self.url = reverse('notification_preferences')
self.view = LogSubscriptionPreferencesView()
self.request = RequestFactory().get(self.url)
self.user = mommy.make(_model=get_user_model())
self.obj = self.user.mailpreferences
self.request.user = self.user
self.request._messages = mock.MagicMock()
self.view = self.setup_view(view=self.view, request=self.request)
self.view.object = self.obj
def test_get_initial(self):
self.obj.wanted_actions = 'some,thing,yay'
self.assertEqual(
['some', 'thing', 'yay'],
self.view.get_initial()['wanted_actions']
)
def test_get_form_valid_changed_language(self):
self.view.object = mock.MagicMock()
self.view.object.has_changed_language = False
form = mock.MagicMock()
form.changed_data = ['language']
self.view.form_valid(form)
self.assertTrue(self.view.object.has_changed_language)
def test_get_form_valid_message(self):
self.view.form_valid(mock.MagicMock())
self.assertTrue(self.request._messages.method_calls)
class SignedLogSubscriptionPreferencesViewTest(TestCase):
def setUp(self):
self.user = mommy.make(_model=get_user_model())
self.obj = self.user.mailpreferences
self.view = SignedLogSubscriptionPreferencesView()
self.request = RequestFactory().get(str(self.obj.get_signed_url()))
self.request._messages = mock.MagicMock()
self.view = self.setup_view(view=self.view, request=self.request)
self.view.object = self.obj
def test_get_success_url_signed(self):
mock_user = mock.MagicMock(return_value=self.user)
mock_user.is_authenticated = False
mock_user.id = self.user.id
self.request.user = mock_user
self.assertEqual(
self.view.get_success_url(),
self.obj.get_signed_url()
)
def test_get_success_url_user(self):
self.request.user = self.user
self.assertEqual(
self.view.get_success_url(),
reverse('notification_preferences')
)
def test_get_object_user(self):
self.request.user = self.user
self.assertEqual(
self.view.get_object(),
self.obj
)
def test_get_signed_object(self):
mock_user = mock.MagicMock(return_value=self.user)
mock_user.is_authenticated = False
mock_user.id=self.user.id
self.request.user = mock_user
self.view.kwargs['token'] = mock.MagicMock()
with mock.patch.object(Signer, 'unsign') as mock_unsign:
mock_unsign.return_value = self.obj.id
self.assertEqual(
self.view.get_object(), self.obj
)
mock_unsign.assert_called_with(self.view.kwargs['token'])
def test_get_signed_object_404(self):
mock_user = mock.MagicMock(return_value=self.user)
mock_user.is_authenticated = False
mock_user.id = self.user.id
self.request.user = mock_user
self.view.kwargs['token'] = mock.MagicMock()
with self.assertRaises(Http404):
self.view.get_object()
| 37.958785 | 91 | 0.66821 | 16,698 | 0.954226 | 0 | 0 | 6,076 | 0.34722 | 0 | 0 | 1,946 | 0.111206 |
0ec65d0e2393fe675648f46032adc3e480a8ef52 | 1,032 | py | Python | examples/resources.py | willvousden/clint | 6dc7ab1a6a162750e968463b43994447bca32544 | [
"0BSD"
] | 1,230 | 2015-01-03T05:39:25.000Z | 2020-02-18T12:36:03.000Z | examples/resources.py | willvousden/clint | 6dc7ab1a6a162750e968463b43994447bca32544 | [
"0BSD"
] | 50 | 2015-01-06T17:58:20.000Z | 2018-03-19T13:25:22.000Z | examples/resources.py | willvousden/clint | 6dc7ab1a6a162750e968463b43994447bca32544 | [
"0BSD"
] | 153 | 2015-01-03T03:56:25.000Z | 2020-02-13T20:59:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from clint import resources
resources.init('kennethreitz', 'clint')
lorem = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
print('%s created.' % resources.user.path)
resources.user.write('lorem.txt', lorem)
print('lorem.txt created')
assert resources.user.read('lorem.txt') == lorem
print('lorem.txt has correct contents')
resources.user.delete('lorem.txt')
print('lorem.txt deleted')
assert resources.user.read('lorem.txt') == None
print('lorem.txt deletion confirmed')
| 33.290323 | 456 | 0.767442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.653101 |
0ec667f34cc8524a0bd9453e82114220e88aef5a | 813 | py | Python | photos/urls.py | charlesmugambi/Instagram | 3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7 | [
"MIT"
] | null | null | null | photos/urls.py | charlesmugambi/Instagram | 3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7 | [
"MIT"
] | null | null | null | photos/urls.py | charlesmugambi/Instagram | 3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^image/$', views.add_image, name='upload_image'),
url(r'^profile/$', views.profile_info, name='profile'),
url(r'^update/$', views.profile_update, name='update'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^search/', views.search_results, name = 'search_results'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'),
url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'),
url(r'^likes/(\d+)/$', views.like_images,name='likes')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 42.789474 | 80 | 0.675277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.297663 |
0ec7068e816bc6b2d31f51831d9d75f6ffc1151c | 11,247 | py | Python | bread.py | vgfang/breadbot | e58807431945e6d4de8dfc6c4dc4c90caebf88ca | [
"MIT"
] | null | null | null | bread.py | vgfang/breadbot | e58807431945e6d4de8dfc6c4dc4c90caebf88ca | [
"MIT"
] | null | null | null | bread.py | vgfang/breadbot | e58807431945e6d4de8dfc6c4dc4c90caebf88ca | [
"MIT"
] | null | null | null | import random
import math
from fractions import Fraction
from datetime import datetime
from jinja2 import Template
# empty class for passing to template engine
class Recipe:
def __init__(self):
return
# returns flour percent using flour type
def get_special_flour_percent(flourType: str, breadFlourPercent:int) -> int:
if flourType == 'Hard Red Whole Wheat' or flourType == 'Hard White Wheat':
percentages = [0,25,30,35,40,45,50]
percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages))
return random.choice(percentages)
elif flourType == 'Rye' and breadFlourPercent >= 75:
percentages = [0,10,15,20]
percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages))
return random.choice(percentages)
else:
percentages = [0,10,15,20,25.30]
percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages))
return random.choice(percentages)
# returns multiplied spoon units from teaspoon fraction input, 3 tsp = 1 tbsp
def spoon_mult(tsp: Fraction(), multiplier: float) -> str:
tsp *= Fraction(multiplier)
spoonString = ""
if tsp >= 3: # use tablespoons
tablespoons = int(tsp // 3)
remainder = (tsp % 3) / 3
if tablespoons != 0:
spoonString += f"{tablespoons} "
if remainder.numerator != 0:
spoonString += f"{remainder.numerator}/{remainder.denominator} "
return f"{spoonString}tbsp"
else:
teaspoons = int(tsp // 1)
remainder = tsp % 1
if teaspoons != 0:
spoonString += f"{teaspoons} "
if remainder.numerator != 0:
spoonString += f"{remainder.numerator}/{remainder.denominator} "
return f"{spoonString}tsp"
# returns amount given the type of flavoring(spices)
def get_flavor_amount(flavor: str, flourAmount: int) -> str:
colorsDict = {}
scale = 4 # floors to the 500g/scale for clean fractional multiplication
multiplier = math.floor(flourAmount/500*scale) / scale
# flavors in category
red = ('Cardamom', 'Nutmeg','Hazelnut','Almond','Lemon Extract','Peppermint')
blue = ('Cinnamon', 'Allspice')
green = ('Vanilla', 'Instant Coffee')
purple = ('Orange Zest', 'Lime Zest', 'Lemon Zest', 'Ginger')
orange = ('Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong')
# default possible teaspoon values list for flour = 500, 3 tsp = 1 tbsp
redAmt = list(map(Fraction, [1/4, 1/2]))
blueAmt = list(map(Fraction, [1/4, 1/2, 1]))
greenAmt = list(map(Fraction, [1/2, 1, 3/2]))
purpleAmt = list(map(Fraction, [2, 3, 9/2]))
orangeAmt = list(map(Fraction, [9]))
# random tablespoons
colorsDict[red] = list(map(lambda x: spoon_mult(x, multiplier), redAmt))
colorsDict[blue] = list(map(lambda x: spoon_mult(x, multiplier), blueAmt))
colorsDict[green] = list(map(lambda x: spoon_mult(x, multiplier), greenAmt))
colorsDict[purple] = list(map(lambda x: spoon_mult(x, multiplier), purpleAmt))
colorsDict[orange] = list(map(lambda x: spoon_mult(x, multiplier), orangeAmt))
for color in colorsDict.keys():
if flavor in color:
return random.choice(colorsDict[color])
# print("Error in Flavor Input: " + flavor)
return "get_flavor_amount wrong input"
# returns list of spices using number of spices
def get_spices(spicesNum: int) -> [str]:
spicesList = ['Cinnamon', 'Allspice', 'Cardamom', 'Nutmeg']
if spicesNum > len(spicesList):
print("WARNING: spicesNum exceeds spices of num")
return spicesList
if spicesNum == 1:
return random.sample(['Cinnamon', 'Cardamom'], 1)
return random.sample(spicesList, spicesNum)
# check if extract is nut
def is_nut(extract: str) -> bool:
nuts = ['Hazelnut','Almond']
return extract in nuts
# checks if extract1 and extract2 are both allowed based on zest/extract same flavor
def zest_extract_same_flavor(extract1: str, extract2: str) -> bool:
if extract1 == extract2:
return False
e1 = extract1.split(" ") # may need to change if new types are added
e2 = extract2.split(" ")
if len(e1) != 2 or len(e2) != 2:
return False
if e1[0]==e2[0] and 'Zest' in [e1[1],e2[1]] and 'Extract' in [e1[1],e2[1]]:
return True
return False
# return list of extracts using number of extracts
def get_extracts(extractsNum: int) -> [str]:
if extractsNum == 0:
return []
allowedExtracts = ['Vanilla', 'Hazelnut', 'Almond', 'Lemon Extract', 'Peppermint',
'Orange Zest', 'Lime Zest', 'Lemon Zest', 'Ginger']
# if more than one, vanilla must be included
currentExtracts = ['Vanilla']
allowedExtracts.remove('Vanilla')
extractsLeft = extractsNum-1
while extractsLeft > 0:
if len(allowedExtracts) <= 0:
print("Incorrecnt number of extracts")
return "Incorrecnt number of extracts"
newExtract = random.choice(allowedExtracts)
# one nut at a time
if True in map(is_nut, currentExtracts) and is_nut(newExtract):
allowedExtracts.remove(newExtract)
continue # skips decrement, try again
# no zest + extract comibination of the same flavor
for currentExtract in currentExtracts:
exit = False
if zest_extract_same_flavor(currentExtract, newExtract):
allowedExtracts.remove(newExtract)
exit = True # skips decrement, try again
if exit:
continue
# passed restraints, remove it from allowed
currentExtracts.append(newExtract)
if newExtract in allowedExtracts:
allowedExtracts.remove(newExtract)
extractsLeft -= 1
return currentExtracts
# return percentage of enrichment
def get_enrichment_percent(enrichment: str) -> int:
if enrichment == 'Cream Cheese':
return 10
return 5
# return liquid percent from liquid tpye
def get_liquid_percent(liquidType: str) -> int:
if liquidType in ['Heavy Cream', 'Coconut Milk']:
return 13
elif liquidType in ['Cow Milk']:
return 63
# print("Error in liquidType input.")
return -1
# return fruit puree fruit choice(s), omitted fruit chance weighting for now
def get_fruit_purees() -> [str]:
fruitPureesNum = random.randint(1,2)
fruitPureesChoices = ['Banana','Apple','Cherry','Strawberry','Fig','Mango']
return random.sample(fruitPureesChoices, fruitPureesNum)
# retrun fruit puree percent from 0-2 fruitPurees using random generation
def get_fruit_purees_percent(fruitPurees) -> [float]:
totalFruitPureePercent = random.choice([25,30,35,40,45,50])
fruitPureeNum = len(fruitPurees)
if fruitPureeNum == 1:
return [totalFruitPureePercent]
elif fruitPureeNum == 2:
firstPercent = random.randint(0,totalFruitPureePercent)
return [firstPercent, totalFruitPureePercent - firstPercent]
return [0]
# returns rounded ml conversion from percent, used in template
def to_g(flourMl, percent) -> int:
return round(flourMl * percent/100)
# takes filename and writes an html recipe file
def generate_recipe(breadname: str, filename: str, flourGramInput: int) -> str:
# ALL NUMBERICAL VALUES REPRESENT PERCENTAGES
r = Recipe()
r.breadname = breadname
r.totalFlourGrams = flourGramInput
r.totalLiquidPercent = 63
r.preferment = random.choice(['Poolish', 'None'])
r.breadFlourPercent = random.choice([75, 50])
# FLOUR STYLE
r.breadShape = random.choice(['Pullman', 'Regular'])
# FLOUR TYPES
r.specialFlour = random.choice([
'Einkorn',
'Khorasan',
'Spelt',
'Emmer',
'Semolina (Durum)',
'Hard Red Whole Wheat',
'Regular Whole Wheat',
'Hard White Wheat',
'Rye'
])
r.specialFlourPercent = get_special_flour_percent(r.specialFlour, r.breadFlourPercent)
r.whiteFlourPercent = 100 - r.breadFlourPercent - r.specialFlourPercent
# SPICES/FLAVORING
spicesNum = random.randint(0,4)
r.spices = get_spices(spicesNum)
extractsNum = random.randint(0,3)
r.extracts = get_extracts(extractsNum)
teaList = ['Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong', 'Instant Coffee']
r.tea = random.choice(teaList)
# illegal with fruit purees and all extracts but ginger, almond, and hazelnut
# BASIC INGREDIENTS
r.sugar = random.choice(['Brown Sugar','White Sugar','Honey','Molasses'])
r.sugarPercent = random.choice([5,10,15])
r.salt = 'Table Salt'
r.saltPercent = random.choice([1,1.5,2])
r.yeast = random.choice(['Instant Yeast','Active Yeast'])
r.yeastPercent = 0.62
# ENRICHMENTS – All 5% , only one chosen
enrichmentList = ['Olive Oil','Butter','Cream Cheese','Coconut oil']
if r.tea == 'Instant Coffee':
enrichmentList.remove('Olive Oil')
r.enrichment = random.choice(enrichmentList)
r.enrichmentPercent = get_enrichment_percent(r.enrichment)
if r.enrichment == 'Cream Cheese':
r.totalLiquidPercent -= 5
# LIQUIDS
# cap total liquid at 60% when these sugars are used
if r.sugar in ['Honey', 'Molasses']:
r.totalLiquidPercent = 60
# cow milk only if there is no preferemnt
viableLiquids = ['Heavy Cream', 'Coconut Milk', 'Cow Milk']
if r.preferment != 'None':
viableLiquids.remove('Cow Milk')
r.liquid = random.choice(viableLiquids)
r.liquidPercent = get_liquid_percent(r.liquid)
## LIQUIDS - FRUIT PUREE
r.fruitPurees = []
r.fruitPureesPercent = []
if r.preferment != 'Poolish':
# 50 percent chance to include
# sugar reduction by 5 percent
r.sugarPercent -= 5
r.fruitPurees = get_fruit_purees()
r.fruitPureesPercent = get_fruit_purees_percent(r.fruitPurees)
# account for cow milk
r.liquidPercent = min(r.liquidPercent, r.totalLiquidPercent - sum(r.fruitPureesPercent))
r.waterPercent = max(0, r.totalLiquidPercent - sum(r.fruitPureesPercent) - r.liquidPercent)
# BICOLOR ROLL
r.isBicolorRoll = False
if len(r.fruitPureesPercent) > 0 or r.tea in ['Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong']:
r.isBicolorRoll = random.choice([True,False])
# COCOA POWDER
r.cocoaPowderPercent = 0
cocoaPowderAllowedExtracts = ['Ginger', 'Almond', 'Hazelnut']
if r.fruitPurees == [] and any(not x in cocoaPowderAllowedExtracts for x in r.extracts): # allowed
if random.randint(0,2) == 0:
r.tea = '' # removes tea
r.cocoaPowderPercent = round(random.choice([5,10])/100 * r.whiteFlourPercent,1)
r.whiteFlourPercent = round(r.whiteFlourPercent - r.cocoaPowderPercent,1)
# WRITE FORMAT
time = datetime.now()
r.datetime = time.strftime('%A, %b %d %Y')
templateFile = open("./template.html")
templateString = templateFile.read()
## Conversion to ml for percentages
r.totalLiquidGrams = to_g(r.totalFlourGrams, r.totalLiquidPercent)
r.breadFlourGrams = to_g(r.totalFlourGrams, r.breadFlourPercent)
r.specialFlourGrams = to_g(r.totalFlourGrams, r.specialFlourPercent)
r.whiteFlourGrams = to_g(r.totalFlourGrams, r.whiteFlourPercent)
r.sugarGrams = to_g(r.totalFlourGrams, r.sugarPercent)
r.saltGrams = to_g(r.totalFlourGrams, r.saltPercent)
r.yeastGrams = to_g(r.totalFlourGrams, r.yeastPercent)
r.spicesAmt = list(map(lambda x: get_flavor_amount(x, r.totalFlourGrams), r.spices))
r.extractsAmt = list(map(lambda x: get_flavor_amount(x, r.totalFlourGrams), r.extracts))
r.teaAmt = get_flavor_amount(r.tea, r.totalFlourGrams)
r.enrichmentGrams = to_g(r.totalFlourGrams, r.enrichmentPercent)
r.waterGrams = to_g(r.totalFlourGrams, r.waterPercent)
r.liquidGrams = to_g(r.totalFlourGrams, r.liquidPercent)
r.fruitPureesGrams = list(map(lambda x: to_g(r.totalFlourGrams,x), r.fruitPureesPercent))
r.cocoaPowderGrams = round(r.cocoaPowderPercent/100 * r.totalFlourGrams)
template = Template(templateString)
htmlString = template.render(r = r)
outfile = open(f'{filename}', 'w')
outfile.write(htmlString)
outfile.close()
templateFile.close()
return htmlString | 36.996711 | 103 | 0.727927 | 43 | 0.003823 | 0 | 0 | 0 | 0 | 0 | 0 | 3,258 | 0.289626 |
0ec7b7a0dee386c8044a5e357cb59fce0132a0cf | 19,177 | py | Python | posthog/api/test/test_organization_domain.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | posthog/api/test/test_organization_domain.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | posthog/api/test/test_organization_domain.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | import datetime
from unittest.mock import patch
import dns.resolver
import dns.rrset
import pytest
import pytz
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status
from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team
from posthog.test.base import APIBaseTest, BaseTest
class FakeAnswer(object):
def __init__(self, answer):
self.answer = answer
class FakeDNSResponse(object):
def __init__(self, answer):
self.response = FakeAnswer(answer)
class TestOrganizationDomains(BaseTest):
def test_continuous_verification_task(self):
"""
Tests the task that re-verifies domains to ensure ownership is maintained.
"""
pass
class TestOrganizationDomainsAPI(APIBaseTest):
domain: OrganizationDomain = None # type: ignore
another_domain: OrganizationDomain = None # type: ignore
another_org: Organization = None # type: ignore
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.domain = OrganizationDomain.objects.create(organization=cls.organization, domain="myposthog.com")
cls.another_org = Organization.objects.create(name="Another Org")
Team.objects.create(organization=cls.another_org)
cls.another_domain = OrganizationDomain.objects.create(organization=cls.another_org, domain="org.posthog.net")
# List & retrieve domains
def test_can_list_and_retrieve_domains(self):
response = self.client.get("/api/organizations/@current/domains")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(response_data["count"], 1)
item = response_data["results"][0]
self.assertEqual(item["domain"], "myposthog.com")
self.assertEqual(item["verified_at"], None)
self.assertEqual(item["is_verified"], False)
self.assertEqual(item["jit_provisioning_enabled"], False)
self.assertEqual(item["sso_enforcement"], "")
self.assertRegex(item["verification_challenge"], r"[0-9A-Za-z_-]{32}")
retrieve_response = self.client.get(f"/api/organizations/{self.organization.id}/domains/{self.domain.id}")
self.assertEqual(retrieve_response.status_code, status.HTTP_200_OK)
self.assertEqual(retrieve_response.json(), response_data["results"][0])
def test_cannot_list_or_retrieve_domains_for_other_org(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.get(f"/api/organizations/@current/domains/{self.another_domain.id}")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.json(), self.not_found_response())
response = self.client.get(f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.json(), self.permission_denied_response())
# Create domains
def test_create_domain(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
with self.settings(MULTI_TENANCY=True):
response = self.client.post(
"/api/organizations/@current/domains/",
{
"domain": "the.posthog.com",
"verified_at": "2022-01-01T14:25:25.000Z", # ignore me
"verification_challenge": "123", # ignore me
"jit_provisioning_enabled": True, # ignore me
"sso_enforcement": "saml", # ignore me
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response_data = response.json()
self.assertEqual(response_data["domain"], "the.posthog.com")
self.assertEqual(response_data["verified_at"], None)
self.assertEqual(response_data["jit_provisioning_enabled"], False)
self.assertRegex(response_data["verification_challenge"], r"[0-9A-Za-z_-]{32}")
instance = OrganizationDomain.objects.get(id=response_data["id"])
self.assertEqual(instance.domain, "the.posthog.com")
self.assertEqual(instance.verified_at, None)
self.assertEqual(instance.last_verification_retry, None)
self.assertEqual(instance.sso_enforcement, "")
@pytest.mark.skip_on_multitenancy
def test_creating_domain_on_self_hosted_is_automatically_verified(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
with freeze_time("2021-08-08T20:20:08Z"):
response = self.client.post(
"/api/organizations/@current/domains/",
{
"domain": "the.posthog.com",
"verified_at": "2022-01-01T14:25:25.000Z", # ignore me
"verification_challenge": "123", # ignore me
"jit_provisioning_enabled": True, # ignore me
"sso_enforcement": "saml", # ignore me
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response_data = response.json()
self.assertEqual(response_data["domain"], "the.posthog.com")
self.assertEqual(
response_data["verified_at"], "2021-08-08T20:20:08Z",
)
self.assertEqual(response_data["jit_provisioning_enabled"], False)
self.assertRegex(response_data["verification_challenge"], r"[0-9A-Za-z_-]{32}")
instance = OrganizationDomain.objects.get(id=response_data["id"])
self.assertEqual(instance.domain, "the.posthog.com")
self.assertEqual(
instance.verified_at, datetime.datetime(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC),
)
self.assertEqual(instance.last_verification_retry, None)
self.assertEqual(instance.sso_enforcement, "")
def test_cannot_create_duplicate_domain(self):
OrganizationDomain.objects.create(domain="i-registered-first.com", organization=self.another_org)
count = OrganizationDomain.objects.count()
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.post("/api/organizations/@current/domains/", {"domain": "i-registered-first.com"},)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "unique",
"detail": "domain with this domain already exists.",
"attr": "domain",
},
)
self.assertEqual(OrganizationDomain.objects.count(), count)
def test_cannot_create_invalid_domain(self):
count = OrganizationDomain.objects.count()
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
invalid_domains = ["[email protected]", "🦔🦔🦔.com", "one.two.c", "--alpha.com", "javascript: alert(1)"]
for _domain in invalid_domains:
response = self.client.post("/api/organizations/@current/domains/", {"domain": _domain,},)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "invalid_input",
"detail": "Please enter a valid domain or subdomain name.",
"attr": "domain",
},
)
self.assertEqual(OrganizationDomain.objects.count(), count)
@patch("posthog.models.organization_domain.dns.resolver.resolve")
def test_can_request_verification_for_unverified_domains(self, mock_dns_query):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
mock_dns_query.return_value = FakeDNSResponse(
[
dns.rrset.from_text(
"_posthog-challenge.myposthog.com.", 3600, "IN", "TXT", self.domain.verification_challenge,
)
],
)
with freeze_time("2021-08-08T20:20:08Z"):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.domain.refresh_from_db()
self.assertEqual(response_data["domain"], "myposthog.com")
self.assertEqual(
response_data["verified_at"], self.domain.verified_at.strftime("%Y-%m-%dT%H:%M:%SZ"),
)
self.assertEqual(response_data["is_verified"], True)
self.assertEqual(
self.domain.verified_at, datetime.datetime(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC),
)
self.assertEqual(self.domain.is_verified, True)
@patch("posthog.models.organization_domain.dns.resolver.resolve")
def test_domain_is_not_verified_with_missing_challenge(self, mock_dns_query):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
mock_dns_query.side_effect = dns.resolver.NoAnswer()
with freeze_time("2021-10-10T10:10:10Z"):
with self.settings(MULTI_TENANCY=True):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.domain.refresh_from_db()
self.assertEqual(response_data["domain"], "myposthog.com")
self.assertEqual(response_data["verified_at"], None)
self.assertEqual(self.domain.verified_at, None)
self.assertEqual(
self.domain.last_verification_retry, datetime.datetime(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC),
)
@patch("posthog.models.organization_domain.dns.resolver.resolve")
def test_domain_is_not_verified_with_incorrect_challenge(self, mock_dns_query):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
mock_dns_query.return_value = FakeDNSResponse(
[dns.rrset.from_text("_posthog-challenge.myposthog.com.", 3600, "IN", "TXT", "incorrect_challenge",)],
)
with freeze_time("2021-10-10T10:10:10Z"):
with self.settings(MULTI_TENANCY=True):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.domain.refresh_from_db()
self.assertEqual(response_data["domain"], "myposthog.com")
self.assertEqual(response_data["verified_at"], None)
self.assertEqual(self.domain.verified_at, None)
self.assertEqual(
self.domain.last_verification_retry, datetime.datetime(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC),
)
def test_cannot_request_verification_for_verified_domains(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
self.domain.verified_at = timezone.now()
self.domain.save()
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "already_verified",
"detail": "This domain has already been verified.",
"attr": None,
},
)
def test_only_admin_can_create_verified_domains(self):
count = OrganizationDomain.objects.count()
response = self.client.post("/api/organizations/@current/domains/", {"domain": "evil.posthog.com"})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.assertEqual(OrganizationDomain.objects.count(), count)
def test_only_admin_can_request_verification(self):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.verified_at, None)
# Update domains
def test_can_update_jit_provisioning_and_sso_enforcement(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
self.domain.verified_at = timezone.now()
self.domain.save()
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/",
{"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["sso_enforcement"], "google-oauth2")
self.assertEqual(response.json()["jit_provisioning_enabled"], True)
self.domain.refresh_from_db()
self.assertEqual(self.domain.sso_enforcement, "google-oauth2")
self.assertEqual(self.domain.jit_provisioning_enabled, True)
def test_cannot_enforce_sso_or_enable_jit_provisioning_on_unverified_domain(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
# SSO Enforcement
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/", {"sso_enforcement": "google-oauth2"},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "verification_required",
"detail": "This attribute cannot be updated until the domain is verified.",
"attr": "sso_enforcement",
},
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.sso_enforcement, "")
# JIT Provisioning
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/", {"jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "verification_required",
"detail": "This attribute cannot be updated until the domain is verified.",
"attr": "jit_provisioning_enabled",
},
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.jit_provisioning_enabled, False)
def test_only_allowed_parameters_can_be_updated(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/",
{"verified_at": "2020-01-01T12:12:12Z", "verification_challenge": "123"},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["verified_at"], None)
self.assertRegex(response.json()["verification_challenge"], r"[0-9A-Za-z_-]{32}")
def test_only_admin_can_update_domain(self):
self.domain.verified_at = timezone.now()
self.domain.save()
response = self.client.patch(
f"/api/organizations/{self.organization.id}/domains/{self.domain.id}/",
{"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.jit_provisioning_enabled, False)
self.assertEqual(self.domain.sso_enforcement, "")
def test_cannot_update_domain_for_another_org(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
self.another_domain.verified_at = timezone.now()
self.another_domain.save()
response = self.client.patch(
f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}/",
{"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.json(), self.permission_denied_response())
self.another_domain.refresh_from_db()
self.assertEqual(self.another_domain.jit_provisioning_enabled, False)
self.assertEqual(self.another_domain.sso_enforcement, "")
# Delete domains
def test_admin_can_delete_domain(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.delete(f"/api/organizations/@current/domains/{self.domain.id}")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content, b"")
self.assertFalse(OrganizationDomain.objects.filter(id=self.domain.id).exists())
def test_only_admin_can_delete_domain(self):
response = self.client.delete(f"/api/organizations/@current/domains/{self.domain.id}")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.domain.refresh_from_db()
def test_cannot_delete_domain_for_another_org(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.delete(f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.json(), self.permission_denied_response())
self.another_domain.refresh_from_db()
| 45.228774 | 118 | 0.671012 | 18,817 | 0.980767 | 0 | 0 | 5,464 | 0.284791 | 0 | 0 | 4,349 | 0.226676 |
0ec7d9e291a15b37ad7f7b106420f6f50a25a3a0 | 1,248 | py | Python | tutorial/test input.py | nataliapryakhina/FA_group3 | 3200464bc20d38a85af9ad3583a360db4ffb7f8d | [
"MIT"
] | null | null | null | tutorial/test input.py | nataliapryakhina/FA_group3 | 3200464bc20d38a85af9ad3583a360db4ffb7f8d | [
"MIT"
] | null | null | null | tutorial/test input.py | nataliapryakhina/FA_group3 | 3200464bc20d38a85af9ad3583a360db4ffb7f8d | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from os import listdir
from tensorflow.keras.callbacks import ModelCheckpoint
dataDir = "./data/trainSmallFA/"
files = listdir(dataDir)
files.sort()
totalLength = len(files)
inputs = np.empty((len(files), 3, 64, 64))
targets = np.empty((len(files), 3, 64, 64))
for i, file in enumerate(files):
npfile = np.load(dataDir + file)
d = npfile['a']
inputs[i] = d[0:3] # inx, iny, mask
targets[i] = d[3:6] # p, velx, vely
# print("inputs shape = ", inputs.shape)
print(np.shape(targets[:, 1, :, :].flatten()))
maxvel = np.amax(np.sqrt(targets[:, 1, :, :]* targets[:, 1, :, :]
+ targets[:, 2, :, :]* targets[:, 2, :, :]))
print(maxvel)
targets[:, 1:3, :, :] /= maxvel
targets[:, 0, :, :] /= np.amax(targets[:, 0, :, :])
for input in inputs:
plt.figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')
# predicted data
plt.subplot(331)
plt.title('x vel')
plt.imshow(input[0, :, :], cmap='jet') # vmin=-100,vmax=100, cmap='jet')
plt.colorbar()
plt.subplot(332)
plt.title('y vel')
plt.imshow(input[1, :, :], cmap='jet')
plt.colorbar()
plt.show() | 30.439024 | 80 | 0.600962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.140224 |
0ec8d0b22163c94b04ce1660f7662d06d776efe5 | 2,781 | py | Python | pepper/responder/brain.py | cltl/pepper | 5d34fc5074473163aa9273016d89e5e2b8edffa9 | [
"MIT"
] | 29 | 2018-01-20T08:51:42.000Z | 2022-01-25T11:59:28.000Z | pepper/responder/brain.py | cltl/pepper | 5d34fc5074473163aa9273016d89e5e2b8edffa9 | [
"MIT"
] | 32 | 2018-09-20T13:09:34.000Z | 2021-06-04T15:23:45.000Z | pepper/responder/brain.py | cltl/pepper | 5d34fc5074473163aa9273016d89e5e2b8edffa9 | [
"MIT"
] | 10 | 2018-10-25T02:45:21.000Z | 2020-10-03T12:59:10.000Z | from pepper.framework import *
from pepper import logger
from pepper.language import Utterance
from pepper.language.generation.thoughts_phrasing import phrase_thoughts
from pepper.language.generation.reply import reply_to_question
from .responder import Responder, ResponderType
from pepper.language import UtteranceType
from pepper.knowledge import sentences, animations
from random import choice
import re
from typing import Optional, Union, Tuple, Callable
class BrainResponder(Responder):
def __init__(self):
self._log = logger.getChild(self.__class__.__name__)
@property
def type(self):
return ResponderType.Brain
@property
def requirements(self):
return [TextToSpeechComponent, BrainComponent]
def respond(self, utterance, app):
# type: (Utterance, Union[TextToSpeechComponent, BrainComponent]) -> Optional[Tuple[float, Callable]]
try:
utterance.analyze()
self._log.debug("TRIPLE: {}".format(utterance.triple))
if utterance.triple is not None:
brain_response_statement = []
brain_response_question = []
if utterance.type == UtteranceType.QUESTION:
brain_response_question = app.brain.query_brain(utterance)
reply = reply_to_question(brain_response_question)
self._log.info("REPLY to question: {}".format(reply))
else:
brain_response_statement = app.brain.update(utterance, reason_types=True) # Searches for types in dbpedia
reply = phrase_thoughts(brain_response_statement, True, True, True)
self._log.info("REPLY to statement: {}".format(reply))
if (isinstance(reply, str) or isinstance(reply, unicode)) and reply != "":
# Return Score and Response
# Make sure to not execute the response here, but just to return the response function
return 1.0, lambda: app.say(re.sub(r"[\s+_]", " ", reply))
elif brain_response_statement:
# Thank Human for the Data!
return 1.0, lambda: app.say("{} {}".format(choice([choice(sentences.THANK), choice(sentences.HAPPY)]),
choice(sentences.PARSED_KNOWLEDGE)), animations.HAPPY)
elif brain_response_question:
# Apologize to human for not knowing
return 1.0, lambda: app.say("{} {}".format(choice(sentences.SORRY),
choice(sentences.NO_ANSWER)), animations.ASHAMED)
except Exception as e:
self._log.error(e)
| 41.507463 | 126 | 0.612729 | 2,313 | 0.831715 | 0 | 0 | 156 | 0.056095 | 0 | 0 | 395 | 0.142035 |
0ec932467a0e10a4a3b540d34642f573915937be | 7,076 | py | Python | fedora_college/modules/content/views.py | fedora-infra/fedora-college | cf310dab2e4fea02b9ac5e7f57dc53aafb4834d8 | [
"BSD-3-Clause"
] | 2 | 2015-05-16T09:54:17.000Z | 2017-01-11T17:58:31.000Z | fedora_college/modules/content/views.py | fedora-infra/fedora-college | cf310dab2e4fea02b9ac5e7f57dc53aafb4834d8 | [
"BSD-3-Clause"
] | null | null | null | fedora_college/modules/content/views.py | fedora-infra/fedora-college | cf310dab2e4fea02b9ac5e7f57dc53aafb4834d8 | [
"BSD-3-Clause"
] | 1 | 2020-12-07T22:14:01.000Z | 2020-12-07T22:14:01.000Z | # -*- coding: utf-8 -*-
import re
from unicodedata import normalize
from flask import Blueprint, render_template, current_app
from flask import redirect, url_for, g, abort
from sqlalchemy import desc
from fedora_college.core.database import db
from fedora_college.modules.content.forms import * # noqa
from fedora_college.core.models import * # noqa
from fedora_college.fedmsgshim import publish
from flask_fas_openid import fas_login_required
bundle = Blueprint('content', __name__, template_folder='templates')
from fedora_college.modules.content.media import * # noqa
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
# Verify if user is authenticated
def authenticated():
return hasattr(g, 'fas_user') and g.fas_user
# generate url slug
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
# attach tags to a content entry
def attach_tags(tags, content):
rem = TagsMap.query.filter_by(content_id=content.content_id).all()
for r in rem:
db.session.delete(r)
db.session.commit()
for tag in tags:
tag_db = Tags.query.filter_by(tag_text=tag).first()
if tag_db is None:
tag_db = Tags(tag)
db.session.add(tag_db)
db.session.commit()
Map = TagsMap(tag_db.tag_id, content.content_id)
db.session.add(Map)
db.session.commit()
# delete content
@bundle.route('/content/delete/<posturl>', methods=['GET', 'POST'])
@bundle.route('/content/delete/<posturl>/', methods=['GET', 'POST'])
@fas_login_required
def delete_content(posturl=None):
if posturl is not None:
db.session.rollback()
content = Content.query.filter_by(slug=posturl).first_or_404()
rem = TagsMap.query.filter_by(
content_id=content.content_id).all()
'''delete mapped tags'''
for r in rem:
db.session.delete(r)
comments = Comments.query.filter_by(
content_id=content.content_id).all()
'''delete comments with foriegn keys'''
for r in comments:
db.session.delete(r)
db.session.delete(content)
db.session.commit()
return redirect(url_for('profile.user',
nickname=g.fas_user['username']))
abort(404)
# add / edit more content
@bundle.route('/content/add/', methods=['GET', 'POST'])
@bundle.route('/content/add', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>/', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>', methods=['GET', 'POST'])
@fas_login_required
def addcontent(posturl=None):
if authenticated():
form = CreateContent()
form_action = url_for('content.addcontent')
media = Media.query.order_by(desc(Media.timestamp)).limit(10).all()
if posturl is not None:
content = Content.query.filter_by(slug=posturl).first_or_404()
form = CreateContent(obj=content)
if form.validate_on_submit():
form.populate_obj(content)
tags = str(form.tags.data).split(',')
attach_tags(tags, content)
content.rehtml()
db.session.commit()
'''Publish the message'''
msg = content.getdata()
msg['title'] = content.title
msg['link'] = current_app.config[
'EXTERNAL_URL'] + content.slug
publish(
topic=current_app.config['CONTENT_EDIT_TOPIC'],
msg=msg
)
if content.type_content == "blog":
print url_for('content.blog', slug=posturl)
return redirect(url_for('content.blog', slug=posturl))
return redirect(url_for('home.content', slug=posturl))
else:
if form.validate_on_submit():
url_name = slugify(form.title.data)
content = Content(form.title.data,
url_name,
form.description.data,
form.active.data,
form.tags.data,
g.fas_user['username'],
form.type_content.data
)
tags = str(form.tags.data).split(',')
try:
db.session.add(content)
db.session.commit()
attach_tags(tags, content)
'''Publish the message'''
msg = content.getdata()
msg['title'] = content.title
msg['link'] = current_app.config[
'EXTERNAL_URL'] + url_name
publish(
topic=current_app.config['CONTENT_CREATE_TOPIC'],
msg=msg
)
if content.type_content == "blog":
return redirect(url_for('content.blog', slug=posturl))
return redirect(url_for('home.content', slug=url_name))
# Duplicate entry
except Exception as e:
return str(e)
db.session.rollback()
pass
tags = Tags.query.all()
return render_template('content/edit_content.html', form=form,
form_action=form_action, title="Create Content",
media=media[0:5], tags=tags)
abort(404)
# View Blog post
@bundle.route('/blog', methods=['GET', 'POST'])
@bundle.route('/blog/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>', methods=['GET', 'POST'])
@bundle.route('/blog/page/<id>', methods=['GET', 'POST'])
@bundle.route('/blog/page/<id>', methods=['GET', 'POST'])
def blog(slug=None, id=0):
id = int(id)
screen = Content.query. \
filter_by(
type_content="lecture",
active=True
).limit(10).all()
if slug is not None:
try:
posts = Content.query. \
filter_by(slug=slug).all()
except:
posts = "No such posts in database."
else:
try:
posts = Content.query. \
filter_by(type_content="blog").all()
if id > 0:
posts = posts[id - 1:id + 5]
else:
posts = posts[0:5]
except:
posts = []
return render_template('blog/index.html',
title='Blog',
content=posts,
screen=screen,
id=id,
slug=slug
)
| 34.517073 | 79 | 0.53307 | 0 | 0 | 0 | 0 | 5,401 | 0.763284 | 0 | 0 | 1,151 | 0.162663 |
0ecb2c7a8dccded4280171cf1a9314223cfca421 | 3,611 | py | Python | tests/components/airthings/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/airthings/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/airthings/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Test the Airthings config flow."""
from unittest.mock import patch
import airthings
from homeassistant import config_entries
from homeassistant.components.airthings.const import CONF_ID, CONF_SECRET, DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
TEST_DATA = {
CONF_ID: "client_id",
CONF_SECRET: "secret",
}
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch("airthings.get_token", return_value="test_token",), patch(
"homeassistant.components.airthings.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Airthings"
assert result2["data"] == TEST_DATA
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=airthings.AirthingsAuthError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=airthings.AirthingsConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_flow_entry_already_exists(hass: HomeAssistant) -> None:
"""Test user input for config_entry that already exists."""
first_entry = MockConfigEntry(
domain="airthings",
data=TEST_DATA,
unique_id=TEST_DATA[CONF_ID],
)
first_entry.add_to_hass(hass)
with patch("airthings.get_token", return_value="token"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=TEST_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| 30.601695 | 84 | 0.675159 | 0 | 0 | 0 | 0 | 0 | 0 | 3,142 | 0.870119 | 702 | 0.194406 |
0ecb9ff079e3fe67fcf620b3218ea8892b9b9c1c | 1,726 | py | Python | utils/utils.py | scomup/StereoNet-ActiveStereoNet | 05994cf1eec4a109e095732fe01ecb5558880ba5 | [
"MIT"
] | null | null | null | utils/utils.py | scomup/StereoNet-ActiveStereoNet | 05994cf1eec4a109e095732fe01ecb5558880ba5 | [
"MIT"
] | null | null | null | utils/utils.py | scomup/StereoNet-ActiveStereoNet | 05994cf1eec4a109e095732fe01ecb5558880ba5 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) NKU
# Licensed under the MIT License.
# Written by Xuanyi Li ([email protected])
# ------------------------------------------------------------------------------
import os
import torch
import torch.nn.functional as F
#import cv2 as cv
import numpy as np
def GERF_loss(GT, pred, args):
# mask = (GT < args.maxdisp) & (GT >= 0)
mask = GT > 0
mask.detach_()
# print(mask.size(), GT.size(), pred.size())
count = len(torch.nonzero(mask))
# print(count)
if count == 0:
count = 1
return torch.sum(torch.sqrt(torch.pow(GT[mask] - pred[mask], 2) + 4) /2 - 1) / count
def smooth_L1_loss(GT, pred, args):
mask = GT < args.maxdisp
mask.detach_()
# loss = F.smooth_l1_loss(pred[mask], GT[mask], size_average=True)
loss = (pred[mask] - GT[mask]).abs().mean()
return loss
if __name__ == '__main__':
pass
# import matplotlib.pyplot as plt
# image = cv.imread('/media/lxy/sdd1/ActiveStereoNet/StereoNet_pytorch/results/forvideo/iter-122.jpg')
#im_gray = cv.imread('/media/lxy/sdd1/ActiveStereoNet/StereoNet_pytorch/results/forvideo/iter-133.jpg', cv.IMREAD_GRAYSCALE)
# print(im_gray.shape)
#im_color = cv.applyColorMap(im_gray*2, cv.COLORMAP_JET)
# cv.imshow('test', im_color)
# cv.waitKey(0)
#cv.imwrite('test.png',im_color)
# print(image.shape)
# plt.figure('Image')
# sc =plt.imshow(image)
# sc.set_cmap('hsv')
# plt.colorbar()
# plt.axis('off')
# plt.show()
# print('end')
# image[:,:,0].save('/media/lxy/sdd1/ActiveStereoNet/StereoNet_pytorch/results/pretrained_StereoNet_single/it1er-151.jpg')
| 32.566038 | 128 | 0.589803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,128 | 0.653534 |
0ecc375d6cf3b58f62ba3d07d23244af90a9b759 | 1,036 | py | Python | worker/main.py | Devalent/facial-recognition-service | 342e31fa7d016992d938b0121b03f0e8fe776ea8 | [
"MIT"
] | null | null | null | worker/main.py | Devalent/facial-recognition-service | 342e31fa7d016992d938b0121b03f0e8fe776ea8 | [
"MIT"
] | null | null | null | worker/main.py | Devalent/facial-recognition-service | 342e31fa7d016992d938b0121b03f0e8fe776ea8 | [
"MIT"
] | null | null | null | from aiohttp import web
import base64
import io
import face_recognition
async def encode(request):
request_data = await request.json()
# Read base64 encoded image
url = request_data['image'].split(',')[1]
image = io.BytesIO(base64.b64decode(url))
# Load image data
np_array = face_recognition.load_image_file(image)
# Find face locations
locations = face_recognition.face_locations(np_array)
# Create face encodings
encodings = face_recognition.face_encodings(np_array, locations)
results = []
for i in range(len(locations)):
top, right, bottom, left = locations[i]
result = {
'x': left,
'y': top,
'width': right - left,
'height': bottom - top,
'encodings': encodings[i].tolist()
}
results.append(result)
return web.json_response(results)
def main():
app = web.Application()
app.router.add_post('/encode', encode)
web.run_app(app, host='0.0.0.0', port='3000')
main()
| 22.521739 | 68 | 0.625483 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.790541 | 154 | 0.148649 |
0ecd026a7b7cddee19fb7d65983aadf807f4917d | 657 | py | Python | rblod/setup.py | TiKeil/Two-scale-RBLOD | 23f17a3e4edf63ea5f208eca50ca90c19bf511a9 | [
"BSD-2-Clause"
] | null | null | null | rblod/setup.py | TiKeil/Two-scale-RBLOD | 23f17a3e4edf63ea5f208eca50ca90c19bf511a9 | [
"BSD-2-Clause"
] | null | null | null | rblod/setup.py | TiKeil/Two-scale-RBLOD | 23f17a3e4edf63ea5f208eca50ca90c19bf511a9 | [
"BSD-2-Clause"
] | null | null | null | # ~~~
# This file is part of the paper:
#
# " An Online Efficient Two-Scale Reduced Basis Approach
# for the Localized Orthogonal Decomposition "
#
# https://github.com/TiKeil/Two-scale-RBLOD.git
#
# Copyright 2019-2021 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# Stephan Rave
# Tim Keil
# ~~~
from setuptools import setup
setup(name='rblod',
version='2021.1',
description='Pymor support for RBLOD',
author='Tim Keil',
author_email='[email protected]',
license='MIT',
packages=['rblod'])
| 26.28 | 89 | 0.648402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.748858 |
0ecdf401d5b3926e749aa892bfa6a87de7f72b30 | 8,060 | py | Python | bin/euclid_fine_plot_job_array.py | ndeporzio/cosmicfish | f68f779d73f039512a958d110bb44194d0daceec | [
"MIT"
] | null | null | null | bin/euclid_fine_plot_job_array.py | ndeporzio/cosmicfish | f68f779d73f039512a958d110bb44194d0daceec | [
"MIT"
] | null | null | null | bin/euclid_fine_plot_job_array.py | ndeporzio/cosmicfish | f68f779d73f039512a958d110bb44194d0daceec | [
"MIT"
] | null | null | null | import os
import shutil
import numpy as np
import pandas as pd
import seaborn as sns
import cosmicfish as cf
import matplotlib.pyplot as plt
import dill
# Instruct pyplot to use seaborn
sns.set()
# Set project, data, CLASS directories
projectdir = os.environ['STORAGE_DIR']
datastore = os.environ['DATASTORE_DIR']
classpath = os.environ['CLASS_DIR']
fidx = int(os.environ['FORECAST_INDEX'])
# Generate output paths
fp_resultsdir = projectdir
cf.makedirectory(fp_resultsdir)
# Specify resolution of numerical integrals
derivative_step = 0.008 # How much to vary parameter to calculate numerical derivative
g_derivative_step = 0.1
mu_integral_step = 0.05 # For calculating numerical integral wrt mu between -1 and 1
# Linda Fiducial Cosmology
fp_fid = {
"A_s" : 2.2321e-9,
"n_s" : 0.967,
"omega_b" : 0.02226,
"omega_cdm" : 0.1127,
"tau_reio" : 0.0598,
"h" : 0.701,
"T_cmb" : 2.726, # Units [K]
"N_ncdm" : 4.,
"deg_ncdm" : 1.0,
"T_ncdm" : (0.79/2.726), # Units [T_cmb].
"m_ncdm" : 0.01, # Units [eV]
"b0" : 1.0,
"beta0" : 1.7,
"beta1" : 1.0,
"alphak2" : 1.0,
"sigma_fog_0" : 250000, #Units [m s^-2]
"N_eff" : 0.0064, #We allow relativistic neutrinos in addition to our DM relic
"relic_vary" : "N_ncdm", # Fix T_ncdm or m_ncdm
"m_nu" : 0.02
}
# EUCLID values
z_table = np.array([0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65, 1.75, 1.85, 1.95])
dNdz = np.array([2434.280, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, 4269.851, 3720.657, 3104.309,
2308.975, 1514.831, 1474.707, 893.716, 497.613])
skycover = 0.3636
# Run Fisher Forecast
full_masses = np.geomspace(0.01, 10., 21)
full_temps = np.array([0.79, 0.91, 0.94, 1.08])
mass_index=(fidx % 21)
temp_index=(fidx // 21)
masses = np.array([full_masses[mass_index]])
temps = np.array([full_temps[temp_index]])
omegacdm_set = np.array([
fp_fid['omega_cdm']
- ((masses/cf.NEUTRINO_SCALE_FACTOR)* np.power(tval / 1.95, 3.))
for tidx, tval in enumerate(temps)])
fp_fiducialset = [[
dict(fp_fid, **{
'm_ncdm' : masses[midx],
'omega_cdm' : omegacdm_set[tidx, midx],
'T_ncdm' : temps[tidx]/2.726})
for midx, mval in enumerate(masses)]
for tidx, tval in enumerate(temps)]
fp_forecastset = [[cf.forecast(
classpath,
datastore,
'2relic',
fidval,
z_table,
"EUCLID",
dNdz,
fsky=skycover,
dstep=derivative_step,
gstep=g_derivative_step,
RSD=True,
FOG=True,
AP=True,
COV=True)
for fididx, fidval in enumerate(fidrowvals)]
for fidrowidx, fidrowvals in enumerate(fp_fiducialset)]
#dill.load_session('')
for frowidx, frowval in enumerate(fp_forecastset):
for fidx, fcst in enumerate(frowval):
if type(fcst.fisher)==type(None):
fcst.gen_pm()
fcst.gen_fisher(
fisher_order=[
'omega_b',
'omega_cdm',
'n_s',
'A_s',
'tau_reio',
'h',
'N_ncdm',
'M_ncdm',
'sigma_fog',
'beta0',
'beta1',
'alpha_k2'],
mu_step=mu_integral_step,
skipgen=False)
print("Relic Forecast ", fidx, " complete...")
dill.dump_session(os.path.join(fp_resultsdir, 'fp_'+str(temp_index)+'_'+str(mass_index)+'.db'))
else:
print('Fisher matrix already generated!')
| 65.528455 | 116 | 0.262655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,490 | 0.184864 |
0ece61d6db781e687c9a0cc4ff7c881e2a9a0b06 | 346 | py | Python | project4/test/test_arm.py | XDZhelheim/CS205_C_CPP_Lab | f585fd685a51e19fddc9c582846547d34442c6ef | [
"MIT"
] | 3 | 2022-01-11T08:12:40.000Z | 2022-03-27T08:15:45.000Z | project4/test/test_arm.py | XDZhelheim/CS205_C_CPP_Lab | f585fd685a51e19fddc9c582846547d34442c6ef | [
"MIT"
] | null | null | null | project4/test/test_arm.py | XDZhelheim/CS205_C_CPP_Lab | f585fd685a51e19fddc9c582846547d34442c6ef | [
"MIT"
] | 2 | 2022-03-03T03:01:20.000Z | 2022-03-27T08:16:02.000Z | import os
if __name__ == "__main__":
dims = ["32", "64", "128", "256", "512", "1024", "2048"]
for dim in dims:
os.system(
f"perf stat -e r11 -x, -r 10 ../matmul.out ../data/mat-A-{dim}.txt ../data/mat-B-{dim}.txt ./out/out-{dim}.txt 2>>res_arm.csv"
)
print(f"Finished {dim}")
print("Finished.") | 26.615385 | 138 | 0.514451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.575145 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.