seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19670935871
|
from konlpy.tag import Kkma, Okt
from pandas import DataFrame as df
from gensim.models.word2vec import Word2Vec
import pandas as pd
import logging
import time
import re
import os
import matplotlib as mpl
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
start = time.time()
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
kkma = Kkma()
mc = Okt()
def word2vec():
word_list = []
path_dir = "C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\리포트 2013~2015 pkl"
file_list = os.listdir(path_dir)
file_list.sort()
print(file_list)
for i in file_list:
df = pd.read_pickle("C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\리포트 2013~2015 pkl\\%s" % i)
for j in df['sentences']:
if len(j) > 1:
#print(j)
word_list.append(j)
print(len(word_list))
#print(word_list)
embedding_model = Word2Vec(word_list, size=200, window=10, min_count=5, iter=500, sg=1, sample=1e-3, hs=0)
# embedding_model2 = Word2Vec.load('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\stock_summary_model_01.model')
# embedding_model2.wv.save_word2vec_format("C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\word_vector_sample.bin", binary=True)
# model2 = Word2Vec.load('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\2013~2015_report_size20_win10_min5_iter500_hs0_intersect_ko2')
# model2.wv.save_word2vec_format("C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\ko\\2013~2015_report_size20_win10_min5_iter500_hs0_intersect_ko2.bin", binary=True)
#
# prev_model = 'C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\ko\\2013~2015_report_size20_win10_min5_iter500_hs0_intersect_ko2.bin'
# embedding_model.intersect_word2vec_format(fname=prev_model, lockf=1.0, binary=True)
model_name = "2013~2015_report_size200_win10_min5_iter500_hs0"
embedding_model.save('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\%s' % model_name)
word_vector = embedding_model.wv
def tsne_plot(model):
labels = []
tokens = []
mpl.rcParams['axes.unicode_minus'] = False
plt.rc('font', family='NanumGothic')
for word in model.wv.vocab:
tokens.append(model[word])
labels.append(word)
print(labels)
print(len(labels))
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values[:300]:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i], y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
def cluster(model, file, model_name):
result = model.wv # 어휘의 feature vector
topic = pd.read_pickle('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\토픽 모델링 결과\\%s' % file)
#print(result.vocab.keys())
#vocabs = result.vocab.keys()
vocabs = []
for i in topic['sentences']:
for j in i:
vocabs.append(j)
print(len(vocabs))
# clean_file = open('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\클러스터전처리.txt', 'r')
# lines = clean_file.readlines()
# clean_file.close()
# remove_list = lines[0].split(', ')
remove_list = []
word_vectors = []
clean_vocabs = []
for i in vocabs:
for remove in remove_list:
i = re.sub(remove, '', i)
if len(i) > 1:
clean_vocabs.append(i)
for v in clean_vocabs:
try:
word_vectors.append(result[v])
except:
print(v)
clean_vocabs.remove(v)
num_clusters = 50 # int(len(clean_vocabs) / 5) # int(word_vectors.shape[0]/50) # 어휘 크기의 1/5나 평균 5단어
print(num_clusters)
num_clusters = int(num_clusters)
kmeans_clustering = KMeans(n_clusters=num_clusters)
idx = kmeans_clustering.fit_predict(word_vectors)
#idx = DBSCAN(eps=1000, min_samples=2).fit(word_vectors)
print(id)
idx = list(idx)
print(len(vocabs))
print(len(idx))
names = clean_vocabs
print(names)
word_centroid_map = {names[i]: idx[i] for i in range(len(idx))}
dfIndustry = pd.DataFrame(columns=["cluster", "keyword"])
for c in range(num_clusters):
# 클러스터 번호를 출력
print("\ncluster {}".format(c))
words = []
cluster_values = list(word_centroid_map.values())
for i in range(len(cluster_values)):
if (cluster_values[i] == c):
words.append(list(word_centroid_map.keys())[i])
if len(words) == 1:
print(words)
rowIndustry = [c, words]
dfIndustry.loc[len(dfIndustry)] = rowIndustry
print(dfIndustry)
clean_file = open('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\클러스터전처리.txt', 'r')
lines = clean_file.readlines()
clean_file.close()
remove_list = lines[0].split(', ')
count = 0
for i in dfIndustry['keyword']:
clean_v = []
for j in i:
print(j)
for remove in remove_list:
j = re.sub(remove, '', j)
if len(j) > 1:
clean_v.append(j)
dfIndustry['keyword'][count] = clean_v
count += 1
print(dfIndustry)
print("time: ", time.time() - start)
dfIndustry.to_pickle("C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\클러스터링최종\\군집_%s.pkl" % (model_name))
word2vec()
#tsne_plot(model)
# path_dir = "C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\토픽 모델링 결과"
# file_list = os.listdir(path_dir)
# file_list.sort()
# print(file_list)
#
# for file in file_list:
# cluster(model, file)
# model_name = '2013~2015_report_size20_win20_min5_iter1000_hs0'
# model = Word2Vec.load('C:\\Users\\gusals\\Desktop\\현민\\딥러닝 특론\\word2vec_model\\%s' % model_name)
# file = '3년.pkl'
#
# cluster(model, file, model_name)
#sim(['기계', '펄프'], model)
|
gusals6804/TopicModelling
|
Word2Vec.py
|
Word2Vec.py
|
py
| 6,481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "konlpy.tag.Kkma",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "konlpy.tag.Okt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "pandas.read_pickle",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "gensim.models.word2vec.Word2Vec",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "pandas.read_pickle",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 180,
"usage_type": "call"
}
] |
16132746633
|
'''
mobile monkey
'''
import time
from typing import List
from threading import Thread
import config_reader as config
import emulator_manager
import api_commands
from telnet_connector import TelnetAdb
from telnet_connector import GsmProfile
from telnet_connector import NetworkDelay
from telnet_connector import NetworkStatus
from emulator import Emulator
from fuzz_context import Fuzzer
from adb_settings import Airplane, KeyboardEvent, UserRotation
# import adb_settings as AdbSettings
import util
from adb_monkey import AdbMonkey
from apk import Apk
from adb_logcat import Logcat, TestType, FatalWatcher
from log_analyzer import Analyzer
PRINT_FLAG = True
TIME_PRINT_FLAG = True
emulator_model = config.EMULATOR_NAME
emulator_port = config.EMULATOR_PORT
contextual_events = 0
WILL_MONKEY = True
def start_emulator() -> bool:
'''
starts emulator
'''
global emulator_model
if emulator_manager.adb_instances_manager():
util.debug_print('already emulators are running.', flag=PRINT_FLAG)
return True
else:
util.debug_print(
str.format("No emulator instance running. starting {} at port {}",
emulator_model, emulator_port), flag=PRINT_FLAG)
api_commands.adb_start_server_safe()
emulator_manager.emulator_start_avd(emulator_port, emulator_model)
# subprocess.Popen([command,
# '-port', str(emulator_port), '-avd',
# emulator_name, '-use-system-libs'],
# stdout=subprocess.PIPE)
emulator_manager.check_avd_booted_completely(emulator_port)
return True
def threads_to_run(emulator: Emulator, apk: Apk, fuzz: Fuzzer,
will_monkey: bool) -> List:
'''
runs the threads after checking permissions.
'''
threads = []
global contextual_events
util.debug_print(apk.permissions, flag=PRINT_FLAG)
emulator_name = 'emulator-' + str(emulator.port)
if "android.permission.INTERNET" in apk.permissions or \
"android.permission.ACCESS_NETWORK_STATE" in apk.permissions:
util.debug_print("Internet permission detected", flag=PRINT_FLAG)
network_delay_interval_events = fuzz.generate_step_interval_event(
NetworkDelay)
# print(network_delay_interval_events)
contextual_events += len(network_delay_interval_events)
threads.append(Thread(target=fuzz.random_network_delay, args=(
config.LOCALHOST, emulator, network_delay_interval_events)))
network_speed_interval_event = fuzz.generate_step_interval_event(
NetworkStatus)
# print(network_speed_interval_event)
contextual_events += len(network_speed_interval_event)
threads.append(Thread(target=fuzz.random_network_speed, args=(
config.LOCALHOST, emulator, network_speed_interval_event)))
airplane_mode_interval_events = fuzz.generate_step_interval_event(
Airplane)
# print(airplane_mode_interval_events)
contextual_events += len(airplane_mode_interval_events)
threads.append(Thread(
target=fuzz.random_airplane_mode_call,
args=(emulator_name, airplane_mode_interval_events)))
if "android.permission.ACCESS_NETWORK_STATE" in apk.permissions:
util.debug_print("access_network_state detected", flag=PRINT_FLAG)
gsm_profile_interval_events = fuzz.generate_step_uniforminterval_event(
GsmProfile)
contextual_events += len(gsm_profile_interval_events)
threads.append(Thread(target=fuzz.random_gsm_profile, args=(
config.LOCALHOST, emulator,
config.UNIFORM_INTERVAL, gsm_profile_interval_events)))
user_rotation_interval_events = fuzz.generate_step_interval_event(
UserRotation)
contextual_events += len(user_rotation_interval_events)
threads.append(Thread(
target=fuzz.random_rotation, args=((emulator_name,
user_rotation_interval_events))))
key_event_interval_events = fuzz.generate_step_interval_event(
KeyboardEvent)
contextual_events += len(key_event_interval_events)
threads.append(Thread(
target=fuzz.random_key_event, args=((emulator_name,
key_event_interval_events))))
if will_monkey:
monkey = AdbMonkey(emulator, apk,
config.SEED, config.DURATION)
thread_monkey = Thread(target=monkey.start_monkey)
threads.append(thread_monkey)
return threads
def run(apk: Apk, emulator_name: str, emulator_port: int):
'''
runs things
'''
to_kill = False
to_test = True
to_full_run = True
wipe_after_finish = False
# test_time_seconds = 30
if not start_emulator():
return
emulator = emulator_manager.get_adb_instance_from_emulators(emulator_name)
# emulator_name = 'emulator-' + emulator.port
telnet_connector = TelnetAdb(config.LOCALHOST, emulator.port)
# apk = Apk(config.APK_FULL_PATH)
# api_commands.adb_uninstall_apk(emulator, apk)
# api_commands.adb_install_apk(emulator, apk)
# api_commands.adb_start_launcher_of_apk(emulator, apk)
log = Logcat(emulator, apk, TestType.MobileMonkey)
# api_commands.adb_pidof_app(emulator, apk)
if to_kill:
telnet_connector.kill_avd()
quit()
if not to_test:
return
log.start_logcat()
fuzz = Fuzzer(config.MINIMUM_INTERVAL,
config.MAXIMUM_INTERVAL, config.SEED,
config.DURATION, FatalWatcher(log.file_address))
# log.experimental_start_logcat(fuzz)
# fuzz.print_intervals_events()
threads = threads_to_run(emulator, apk, fuzz, WILL_MONKEY)
# log_thread = Thread(target=log.start, args=(fuzz,))
global contextual_events
print("Total contextual events: " + str(contextual_events))
# print(threads)
# return
# device = AdbSettings.AdbSettings('emulator-' + adb_instance.port)
# triggers = [fuzz.set_continue_network_speed,
# fuzz.set_continue_gsm_profile,
# fuzz.set_continue_network_delay]
# thread_test = Thread(target=time_to_test, args=[
# test_time_seconds, triggers, ])
# thread_fuzz_delay = Thread(target=fuzz.random_network_delay, args=(
# config.LOCALHOST, emulator.port,))
# thread_fuzz_profile = Thread(target=fuzz.random_gsm_profile, args=(
# config.LOCALHOST, emulator.port, 12,))
# thread_fuzz_speed = Thread(target=fuzz.random_network_speed, args=(
# config.LOCALHOST, emulator.port,))
# thread_fuzz_rotation = Thread(
# target=fuzz.random_rotation, args=((emulator_name,)))
# thread_fuzz_airplane = Thread(
# target=fuzz.random_airplane_mode_call, args=(emulator_name,))
# monkey = AdbMonkey(emulator, config.APP_PACKAGE_NAME,
# config.SEED, config.DURATION)
# thread_monkey = Thread(target=monkey.start_monkey)
if to_full_run:
util.debug_print(
"started testing at {}".format(time.ctime()), flag=TIME_PRINT_FLAG)
[thread.start() for thread in threads]
# log_thread.start()
[thread.join() for thread in threads]
# log.log_process.kill()
# log.stop_logcat()
# log_thread.join()
# thread_monkey.start()
# thread_fuzz_rotation.start()
# thread_fuzz_delay.start()
# thread_fuzz_profile.start()
# thread_fuzz_speed.start()
# thread_fuzz_airplane.start()
# thread_test.start()
# thread_test.join()
# thread_fuzz_delay.join()
# thread_fuzz_profile.join()
# thread_fuzz_speed.join()
# thread_fuzz_rotation.join()
# thread_fuzz_airplane.join()
# thread_monkey.join()
# telnet_connector.kill_avd()
api_commands.adb_stop_activity_of_apk(emulator, apk)
log.stop_logcat()
api_commands.adb_uninstall_apk(emulator, apk)
util.debug_print(
'Finished testing and uninstalling app at {}'.format(time.ctime()),
flag=TIME_PRINT_FLAG)
print(Analyzer(log.file_address))
if wipe_after_finish:
print("successfully completed testing app. Closing emulator")
telnet_connector.kill_avd()
emulator_manager.emulator_wipe_data(emulator)
if __name__ == '__main__':
import os
dir = os.path.dirname(__file__)
StopFlagWatcher = os.path.join(dir, 'test/StopFlagWatcher')
file = open(StopFlagWatcher, 'w')
file.truncate()
file.close()
run(Apk(config.APK_FULL_PATH), config.EMULATOR_NAME, config.EMULATOR_PORT)
|
LordAmit/mobile-monkey
|
mobile_monkey.py
|
mobile_monkey.py
|
py
| 8,654 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "config_reader.EMULATOR_NAME",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "config_reader.EMULATOR_PORT",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "emulator_manager.adb_instances_manager",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "util.debug_print",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "util.debug_print",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "api_commands.adb_start_server_safe",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "emulator_manager.emulator_start_avd",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "emulator_manager.check_avd_booted_completely",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "emulator.Emulator",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "apk.Apk",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "fuzz_context.Fuzzer",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "util.debug_print",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "apk.permissions",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "emulator.port",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "apk.permissions",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "apk.permissions",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "util.debug_print",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "telnet_connector.NetworkDelay",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "config_reader.LOCALHOST",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "telnet_connector.NetworkStatus",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "config_reader.LOCALHOST",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "adb_settings.Airplane",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "apk.permissions",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "util.debug_print",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "telnet_connector.GsmProfile",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "config_reader.LOCALHOST",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "config_reader.UNIFORM_INTERVAL",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "adb_settings.UserRotation",
"line_number": 97,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "adb_settings.KeyboardEvent",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "adb_monkey.AdbMonkey",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "config_reader.SEED",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "config_reader.DURATION",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "apk.Apk",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "emulator_manager.get_adb_instance_from_emulators",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "telnet_connector.TelnetAdb",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "config_reader.LOCALHOST",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "emulator.port",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "adb_logcat.Logcat",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "adb_logcat.TestType.MobileMonkey",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "adb_logcat.TestType",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "telnet_connector.kill_avd",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "fuzz_context.Fuzzer",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "config_reader.MINIMUM_INTERVAL",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "config_reader.MAXIMUM_INTERVAL",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "config_reader.SEED",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "config_reader.DURATION",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "adb_logcat.FatalWatcher",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "util.debug_print",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "time.ctime",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "api_commands.adb_stop_activity_of_apk",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "api_commands.adb_uninstall_apk",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "util.debug_print",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "time.ctime",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "log_analyzer.Analyzer",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "telnet_connector.kill_avd",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "emulator_manager.emulator_wipe_data",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "apk.Apk",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "config_reader.APK_FULL_PATH",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "config_reader.EMULATOR_NAME",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "config_reader.EMULATOR_PORT",
"line_number": 231,
"usage_type": "attribute"
}
] |
21884337737
|
"""
Series of galactic operations (doesn't that sound cool?!).
...as in converting coordinates, calculating DM etc.
"""
from datetime import timedelta
import ctypes as C
import math
import numpy as np
import os
import pandas as pd
import random
from frbpoppy.paths import paths
# Import fortran libraries
uni_mods = os.path.join(paths.models(), 'universe/')
dm_mods = os.path.join(paths.models(), 'ne2001/')
loc = os.path.join(dm_mods, 'libne2001.so')
ne2001lib = C.CDLL(loc)
ne2001lib.dm_.restype = C.c_float
def frac_deg(ra, dec):
"""Convert coordinates expressed in hh:mm:ss to fractional degrees."""
# Inspired by Joe Filippazzo calculator
rh, rm, rs = [float(r) for r in ra.split(':')]
ra = rh*15 + rm/4 + rs/240
dd, dm, ds = [float(d) for d in dec.split(':')]
if dd < 0:
sign = -1
else:
sign = 1
dec = dd + sign*dm/60 + sign*ds/3600
return ra, dec
def lb_to_xyz(gl, gb, dist):
"""
Convert galactic coordinates to galactic XYZ.
Args:
l (float): Galactic longitude [fractional degrees]
b (float): Galactic latitude [fractional degrees]
dist (float): Distance to source [Gpc]
Returns:
gx, gy, gz: Galactic XYZ [Gpc]
"""
rsun = 8.5e-6 # Gpc
L = np.radians(gl)
B = np.radians(gb)
gx = dist * np.cos(B) * np.sin(L)
gy = rsun - dist * np.cos(B) * np.cos(L)
gz = dist * np.sin(B)
return gx, gy, gz
def lb_to_radec(l, b):
"""
Convert galactic coordinates to RA, Dec.
Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by
Bradley W. Carroll, Dale A. Ostlie (Eq. 24.19 onwards).
NOTE: This function is not as accurate as the astropy conversion, nor as
the Javascript calculators found online. However, as using astropy was
prohibitively slow while running over large populations, frbpoppy uses this
function. While this function is not as accurate, the under/over
estimations of the coordinates are equally distributed meaning the errors
cancel each other in the limit of large populations.
Args:
l (float): Galactic longitude [fractional degrees]
b (float): Galactic latitude [fractional degrees]
Returns:
ra, dec (float): Right ascension and declination [fractional degrees]
"""
gl = np.radians(l)
gb = np.radians(b)
# Coordinates of the galactic north pole (J2000)
a_ngp = np.radians(12.9406333 * 15.)
d_ngp = np.radians(27.1282500)
l_ngp = np.radians(123.9320000)
sd_ngp = np.sin(d_ngp)
cd_ngp = np.cos(d_ngp)
sb = np.sin(gb)
cb = np.cos(gb)
# Calculate right ascension
y = cb*np.sin(l_ngp - gl)
x = cd_ngp*sb - sd_ngp*cb*np.cos(l_ngp - gl)
ra = np.arctan2(y, x) + a_ngp
ra = np.degrees(ra) % 360
# Calculate declination
dec = np.arcsin(sd_ngp*sb + cd_ngp*cb*np.cos(l_ngp - gl))
dec = np.degrees(dec) % 360.
dec[dec > 270] = -(360 - dec[dec > 270])
return ra, dec
def radec_to_lb(ra, dec, frac=False):
"""
Convert from ra, dec to galactic coordinates.
Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by
Bradley W. Carroll, Dale A. Ostlie (Eq. 24.16 onwards).
NOTE: This function is not as accurate as the astropy conversion, nor as
the Javascript calculators found online. However, as using astropy was
prohibitively slow while running over large populations, we use this
function. While this function is not as accurate, the under/over
estimations of the coordinates are equally distributed meaning the errors
cancel each other in the limit of large populations.
Args:
ra (string): Right ascension given in the form '19:06:53'
dec (string): Declination given in the form '-40:37:14'
frac (bool): Denote whether coordinates are already fractional or not
Returns:
gl, gb (float): Galactic longitude and latitude [fractional degrees]
"""
if not frac:
ra, dec = frac_deg(ra, dec)
a = np.radians(ra)
d = np.radians(dec)
# Coordinates of the galactic north pole (J2000)
a_ngp = np.radians(12.9406333 * 15.)
d_ngp = np.radians(27.1282500)
l_ngp = np.radians(123.9320000)
sd_ngp = np.sin(d_ngp)
cd_ngp = np.cos(d_ngp)
sd = np.sin(d)
cd = np.cos(d)
# Calculate galactic longitude
y = cd*np.sin(a - a_ngp)
x = cd_ngp*sd - sd_ngp*cd*np.cos(a - a_ngp)
gl = - np.arctan2(y, x) + l_ngp
gl = np.degrees(gl) % 360
# Shift so in range -180 to 180
if isinstance(gl, np.ndarray):
gl[gl > 180] = -(360 - gl[gl > 180])
else:
if gl > 180:
gl = -(360 - gl)
# Calculate galactic latitude
gb = np.arcsin(sd_ngp*sd + cd_ngp*cd*np.cos(a - a_ngp))
gb = np.degrees(gb) % 360
if isinstance(gb, np.ndarray):
gb[gb > 270] = -(360 - gb[gb > 270])
else:
if gb > 270:
gb = -(360 - gb)
return gl, gb
def separation(ra_1, dec_1, ra_2, dec_2):
"""Separation between points on sky [degrees].
Using a special case of the Vincenty formula for an ellipsoid with equal
major and minor axes.
See https://en.wikipedia.org/wiki/Great-circle_distance for more info.
"""
# Convert to radians
ra_1 = np.deg2rad(ra_1)
dec_1 = np.deg2rad(dec_1)
ra_2 = np.deg2rad(ra_2)
dec_2 = np.deg2rad(dec_2)
# Shortcuts
sdr = np.sin(ra_2 - ra_1)
cdr = np.cos(ra_2 - ra_1)
cd1 = np.cos(dec_1)
cd2 = np.cos(dec_2)
sd1 = np.sin(dec_1)
sd2 = np.sin(dec_2)
# Calculation
upper = np.sqrt((cd2*sdr)**2 + (cd1*sd2 - sd1*cd2*cdr)**2)
lower = sd1*sd2 + cd1*cd2*cdr
sep = np.arctan2(upper, lower)
return np.rad2deg(sep)
def ne2001_dist_to_dm(dist, gl, gb):
"""
Convert position to a dispersion measure using NE2001.
Args:
dist (float): Distance to source [Gpc]. Distance will be cut at 100kpc,
as NE2001 can not cope with larger distances. This value
should be more than enough to clear the Milky Way.
gl (float): Galactic longitude [fractional degrees]
gb (float): Galactic latitude [fractional degrees]
Returns:
dm (float): Dispersion measure [pc*cm^-3]
"""
dist *= 1e6 # Convert from Gpc to kpc
# NE2001 gives errors if distance input is too large! 100 kpc ought to be
# enough to clear the galaxy.
if dist > 100:
dist = 100
dist = C.c_float(dist)
gl = C.c_float(gl)
gb = C.c_float(gb)
inpath = C.create_string_buffer(dm_mods.encode())
linpath = C.c_int(len(dm_mods))
dm = ne2001lib.dm_(C.byref(dist),
C.byref(gl),
C.byref(gb),
C.byref(C.c_int(4)),
C.byref(C.c_float(0.0)),
C.byref(inpath),
C.byref(linpath)
)
return dm
def ne2001_get_smtau(dist, gl, gb):
"""
Use the NE2001 model to calculate scattering measure.
Calculations based on work presented in Cordes & Lazio
(1991, DOI: 10.1086/170261)
Args:
dist (array): Distance to source [kpc]. Distance will be cut at 100 kpc
as NE2001 can not cope with larger distances. Therefore
the calculated scattering will only be that from the
Milky Way.
gl (array): Galactic longitude [fractional degrees]
gb (array): Galactic latitude [fractional degrees]
Returns:
sm (array): Scattering measure
smtau (array): Scattering measure, but unsure why different to sm
"""
# NE2001 gives errors if distance input is too large! 100 kpc ought to be
# enough to clear the galaxy.
dist[dist > 100] = 100
sms = np.ones_like(dist)
smtaus = np.ones_like(dist)
for i, d in enumerate(dist):
disti = C.c_float(d)
# Note the galactic coordinates need to be given in radians
gli = C.c_float(math.radians(gl[i]))
gbi = C.c_float(math.radians(gb[i]))
ndir = C.c_int(-1)
sm = C.c_float(0.)
smtau = C.c_float(0.)
inpath = C.create_string_buffer(dm_mods.encode())
linpath = C.c_int(len(dm_mods))
ne2001lib.dmdsm_(C.byref(gli),
C.byref(gbi),
C.byref(ndir),
C.byref(C.c_float(0.0)),
C.byref(disti),
C.byref(C.create_string_buffer(' '.encode())),
C.byref(sm),
C.byref(smtau),
C.byref(C.c_float(0.0)),
C.byref(C.c_float(0.0)),
C.byref(inpath),
C.byref(linpath)
)
sms[i], smtaus[i] = sm.value, smtau.value
return sms, smtaus
def ne2001_scint_time_bw(dist, gl, gb, freq):
"""
Use the NE2001 model to get the diffractive scintillation timescale.
Args:
dist (array): Distance to source [Gpc]. Distance will be cut at 100 kpc
as NE2001 can not cope with larger distances. Therefore
the calculated scintillation timescale will only be that
from the Milky Way.
gl (array): Galactic longitude [fractional degrees]
gb (array): Galactic latitude [fractional degrees]
freq (float): Observing frequency [MHz]
Returns:
scint_time (float): Diffractive scintillation timescale [Hz]
scint_bw (float): Scintillation bandwidth [Hz]
"""
dist *= 1e6 # Convert from Gpc to kpc
sm, smtau = ne2001_get_smtau(dist, gl, gb)
scint_time = np.ones_like(dist)
scint_time[smtau <= 0.] = float('NaN')
# Eq. 46 of Cordes & Lazio 1991, ApJ, 376, 123 uses coefficient 3.3
# instead of 2.3. They do this in the code and mention it explicitly,
# so I trust it! <- From psrpoppy
scint_time[smtau > 0.] = 3.3 * (freq/1e3)**1.2 * smtau**(-0.6)
scint_bw = np.ones_like(dist)
scint_bw[sm <= 0.] = float('NaN')
# (eq. 48)
scint_bw[sm > 0.] = 223. * (freq/1e3)**4.4 * sm**(-1.2) / dist
return scint_time, scint_bw
def scatter_bhat(dm, offset=-6.46, scindex=-3.86, freq=1400.0):
"""
Calculate scattering timescale (values default to those from Bhat et al.
(2004, DOI:10.1086/382680) and to simluate the scatter around this
relationship, draw from a Gaussian around this value.
Args:
dm (array): Dispersion measure [pc*cm^-3]
offset (float): Offset of scattering relationship. Defaults to -6.46
scindex (float): Scattering index. Defaults to -3.86
freq (float): Frequency at which to evaluate scattering time [MHz].
Defaults to 1400 MHz
Returns:
array: Scattering timescale [ms]
"""
log_t = offset + 0.154*np.log10(dm) + 1.07*np.log10(dm)**2
log_t += scindex*np.log10(freq/1e3)
# Width of Gaussian distribution based on values given Lorimer et al (2008)
t_scat = 10**np.random.normal(log_t, 0.8)
return t_scat
def load_T_sky():
"""
Read the Haslam sky temperature map into a list.
... from which temperatures can
be retrieved. The temperature sky map is given in the weird units of
HealPix, and despite looking up info on this coordinate system, I don't
have the foggiest idea of how to transform these to galactic coordinates. I
have therefore directly copied the following code from psrpoppy in the
assumption Sam Bates managed to figure it out.
Returns:
t_sky_list (list): List of sky temperatures in HealPix? coordinates?
"""
model = os.path.join(os.path.dirname(__file__), '../data/models/tsky/')
path = os.path.join(model, 'haslam_2014.dat')
t_sky_list = []
with open(path) as f:
for line in f:
str_idx = 0
while str_idx < len(line):
# each temperature occupies space of 5 chars
temp_string = line[str_idx:str_idx+5]
try:
t_sky_list.append(float(temp_string))
except ValueError:
pass
str_idx += 5
return t_sky_list
class Redshift:
"""Class for converting redshift to other distance measures."""
def __init__(self, z, H_0=67.74, W_m=0.3089, W_v=0.6911):
"""
Convert redshift to a various measures.
Based on James Schombert's python implementation of Edward L. Wright's
cosmology calculator.
Args:
z (array): Redshift
self.H_0 (float, optional): Hubble parameter.
self.W_m (float, optional): Omega matter.
self.W_v (float, optional): Omega vacuum.
Returns:
array: One of the distance measures [Gpc], or comoving volume from
Earth to the source [Gpc^3]
"""
self.z = z
self.H_0 = H_0
self.W_m = W_m
self.W_v = W_v
# Initialize constants
self.W_r = 0.4165/(self.H_0*self.H_0) # Omega radiation
self.W_k = 1.0 - self.W_m - self.W_r - self.W_v # Omega curvature
self.c = 299792.458 # Velocity of light [km/sec]
self.dcmr = 0.
self.az = 1/(1+self.z)
# Distance measures
self.dc_mpc = None
self.dl_mpc = None
def dist_co(self):
"""Calculate the corresponding comoving distance [Gpc]."""
n = 1000
for i in range(n):
a = self.az+(1-self.az)*(i+0.5)/n
s = sum([self.W_k, self.W_m/a, self.W_r/(a*a), self.W_v*a*a])
adot = np.sqrt(s)
self.dcmr += 1/(a*adot)
self.dcmr = (1.-self.az)*self.dcmr/n
self.dc_mpc = (self.c/self.H_0)*self.dcmr # Comoving distance [Mpc]
return self.dc_mpc*1e-3 # Convert to Gpc
def dist_lum(self):
"""Calculate the corresponding luminosity distance [Gpc]."""
if self.dc_mpc is None:
self.dist_co()
# Calculate luminosity distance
ratio = np.ones_like(self.dcmr)
x = np.sqrt(abs(self.W_k))*self.dcmr
mask = (x > 0.1)
if self.W_k > 0:
ratio[mask] = 0.5*(np.exp(x[mask])-np.exp(-x[mask]))/x[mask]
else:
ratio[mask] = np.sin(x[mask])/x[mask]
y = x*x
if self.W_k < 0:
y = -y
ratio[~mask] = 1. + y[~mask]/6. + y[~mask]*y[~mask]/120.
dcmt = ratio*self.dcmr
da = self.az*dcmt
dl = da/(self.az*self.az)
self.dl_mpc = (self.c/self.H_0)*dl # Luminosity distance [Mpc]
return self.dl_mpc*1e-3 # Covert to Gpc
def vol_co(self):
"""Calculate the corresponding comoving volume [Gpc^3]."""
if self.dl_mpc is None:
self.dist_lum()
ratio = np.ones_like(self.dcmr)
x = math.sqrt(abs(self.W_k))*self.dcmr
mask = (x > 0.1)
if self.W_k > 0:
n = (0.125*(np.exp(2.*x[mask])-np.exp(-2.*x[mask]))-x[mask]/2.)
ratio[mask] = n/(x[mask]**3/3)
else:
ratio[mask] = (x[mask]/2. - np.sin(2.*x[mask])/4.)/(x[mask]**3/3)
y = x*x
if self.W_k < 0:
y = -y
ratio[~mask] = 1. + y[~mask]/5. + (2./105.)*y[~mask]*y[~mask]
v_cm = ratio*self.dcmr**3/3
self.v_gpc = 4.*math.pi*((1e-3*self.c/self.H_0)**3)*v_cm
return self.v_gpc
def z_to_d_approx(z, H_0=67.74):
"""
Calculate distance in Gpc from a redshift.
Only holds for z <= 2. Formulas from 'An Introduction to Modern
Astrophysics (2nd Edition)' by Bradley W. Carroll, Dale A. Ostlie.
(Eq. 27.7)
Args:
z (float): Redshift
H_0 (float, optional): Hubble parameter. Defaults to 67.74
Returns:
dist (float): Associated distance [Gpc]
"""
c = 299792.458 # Velocity of light [km/sec]
zsq = (z+1)**2
dist = c/H_0 * (zsq - 1)/(zsq + 1)
dist /= 1e3 # Mpc -> Gpc
return dist
def dist_to_z(dist, H_0=67.74):
"""
Calculate redshift from a distance in Gpc.
Only holds for z <= 2. Formulas from 'An Introduction to Modern
Astrophysics (2nd Edition)' by Bradley W. Carroll, Dale A. Ostlie.
(Eq. 27.7)
Args:
dist (float): Distance [Gpc].
H_0 (float, optional): Hubble parameter. Defaults to 67.74
Returns:
z (float): Associated redshift
"""
c = 299792.458 # Velocity of light [km/sec]
dist *= 1e3 # Gpc -> Mpc
dhc = dist*H_0/c
det = math.sqrt(1 - dhc**2)
z = -(det + dhc - 1)/(dhc - 1)
return z
def datetime_to_julian(date):
"""Convert a datetime object into julian float.
See https://aa.usno.navy.mil/faq/docs/JD_Formula.php for more info.
Args:
date (datetime-object): Date in question
Returns:
float: Julian calculated datetime.
"""
# Add support for numpy arrays of datetime64
if np.issubdtype(date.dtype, np.datetime64):
date = pd.to_datetime(date)
# Define terms
y = date.year
m = date.month
d = date.day
h = date.hour
min = date.minute
sec = date.second
# Calculate julian day number
jdn = 367*y - ((7*(y + ((m+9)/12).astype(int)))/4).astype(int)
jdn += ((275*m)/9).astype(int) + d + 1721013.5
# Add fractional day
jd = jdn + h/24 + min/1440 + sec/86400
# Convert to a numpy array
if isinstance(jd, pd.Float64Index):
jd = jd.values
return jd
def datetime_to_gmst(date):
"""Calculate Greenwich Mean Sidereal Time.
See https://aa.usno.navy.mil/faq/docs/GAST.php for more info.
"""
jd = datetime_to_julian(date)
return ((18.697374558 + 24.06570982441908*(jd - 2451545))*15) % 360
def random_date(start, end):
"""Generate a random datetime between two datetime objects."""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
return start + timedelta(seconds=random_second)
def coord_to_offset(xref, yref, x, y):
"""
Convert point (x, y) to projected offset from reference (xref, yref).
Makes use of a gnomonic projection: see both
https://github.com/LSSTDESC/Coord/blob/master/coord/celestial.py
http://mathworld.wolfram.com/GnomonicProjection.html
Args:
xref (array): Reference RA or Az [rad]
yref (array): Reference Dec or Alt [rad]
x (array): Target RA or Az [rad]
y (array): Target Dec or Alt [rad]
Returns:
array, array: x and y offset [rad]
"""
# Define convenience numbers
sinxref = np.sin(xref)
sinx = np.sin(x)
cosxref = np.cos(xref)
cosx = np.cos(x)
sinyref = np.sin(yref)
siny = np.sin(y)
cosyref = np.cos(yref)
cosy = np.cos(y)
# Sine and cosine of shift in x
cosdx = (cosxref * cosx) + (sinxref * sinx)
sindx = (cosxref * sinx) - (sinxref * cosx)
# Projection effect cosine
cosc = sinyref * siny + cosyref * cosy * cosdx
# Projected offsets
dx = (cosy * sindx) / cosc
dy = (cosyref * siny - sinyref * cosy * cosdx) / cosc
if isinstance(cosc, np.ndarray):
dx[cosc < 0] = np.nan
dy[cosc < 0] = np.nan
elif cosc < 0:
dx, dy = np.nan, np.nan
return dx, dy
def hadec_to_azalt(ha, dec, lat):
"""
Convert hour angle and declination to azimuth and altitude.
Args:
ha (array): Hour angle [rad]
dec (array): Declination [rad]
lat (float): Latitude [rad]
Returns:
array, array: az, alt [rad]
"""
# Ha and dec should be same type
assert type(ha) == type(dec)
# Altitude
sinalt = np.sin(dec) * np.sin(lat) + np.cos(dec) * np.cos(lat) * np.cos(ha)
alt = np.arcsin(sinalt)
# Azimuth (note this uses altitude)
cosaz = (np.sin(dec)-np.sin(alt)*np.sin(lat)) / (np.cos(alt)*np.cos(lat))
convert_to_float = False
if isinstance(cosaz, float):
cosaz = np.array([cosaz])
convert_to_float = True
# Numerical instability can cause cosaz > 1
cosaz[cosaz > 1] = 1
cosaz[cosaz < -1] = -1
az = np.arccos(cosaz)
# Sign of azimuth is lost, but can be recovered using the input hour angle
mask = np.sin(ha) > 0
az[mask] = 2*np.pi - az[mask]
# Convert back to float if input was float
if convert_to_float:
az = float(az)
return az, alt
def in_region(ra, dec, gl, gb,
ra_min=0, ra_max=360,
dec_min=-90, dec_max=90,
gl_min=-180, gl_max=180,
gb_min=-90, gb_max=90):
"""
Check if the given frbs are within the survey region.
Args:
ra, dec, gl, gb (float): Coordinates to check whether in region
Returns:
array: Boolean mask denoting whether frbs are within survey region
"""
# Create mask with False
mask = np.ones_like(ra, dtype=bool)
# Ensure in correct format
gl[gl > 180.] -= 360.
# Create region masks
gl_limits = (gl > gl_max) | (gl < gl_min)
gb_limits = (gb > gb_max) | (gb < gb_min)
ra_limits = (ra > ra_max) | (ra < ra_min)
dec_limits = (dec > dec_max) | (dec < dec_min)
mask[gl_limits] = False
mask[gb_limits] = False
mask[ra_limits] = False
mask[dec_limits] = False
return mask
def calc_sky_radius(area):
"""Determine the radius [deg] along the sky of an area [sq. degrees]."""
# Check whether the full sky
if np.allclose(area, 4*np.pi*(180/np.pi)**2):
return 180
else:
cos_r = (1 - (area*np.pi)/(2*180**2))
# Suppressing warnings when cos_r is invalid (will nan anyway)
with np.errstate(invalid='ignore'):
return np.rad2deg(np.arccos(cos_r))
def calc_sky_area(radius):
"""Determine the area [sq. degree] of a radius [deg] along the sky."""
return (1 - np.cos(np.deg2rad(radius)))*(2*180**2)/np.pi
|
TRASAL/frbpoppy
|
frbpoppy/galacticops.py
|
galacticops.py
|
py
| 22,052 |
python
|
en
|
code
| 26 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "frbpoppy.paths.paths.models",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "frbpoppy.paths.paths",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "frbpoppy.paths.paths.models",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "frbpoppy.paths.paths",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "ctypes.CDLL",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.radians",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "numpy.arcsin",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.degrees",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "numpy.deg2rad",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.rad2deg",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "ctypes.create_string_buffer",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "ctypes.create_string_buffer",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "ctypes.create_string_buffer",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "ctypes.c_float",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "numpy.issubdtype",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "numpy.datetime64",
"line_number": 563,
"usage_type": "attribute"
},
{
"api_name": "pandas.to_datetime",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "pandas.Float64Index",
"line_number": 581,
"usage_type": "attribute"
},
{
"api_name": "random.randrange",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 630,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 644,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 645,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 648,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 671,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "numpy.arccos",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 687,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 688,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones_like",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 735,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 735,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 738,
"usage_type": "attribute"
},
{
"api_name": "numpy.errstate",
"line_number": 740,
"usage_type": "call"
},
{
"api_name": "numpy.rad2deg",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "numpy.arccos",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 746,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 746,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 746,
"usage_type": "attribute"
}
] |
33371549713
|
#---!/usr/bin/env python
#--- -*- coding: utf-8 -*-
# Librerias
import re
import sys
import json
import string
import random
import operator
import unicodedata
sys.stdout.encoding
'UTF-8'
# Libreria NLTK
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.metrics.distance import edit_distance
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
# Libreria para Grafo
import matplotlib.pyplot as plt
import networkx as nx
import random
# Variables
miniparams = []
parametros = []
nombreArchivo = "wikipedia_es_abstracts.txt" # "OWesA.txt"
# Stemmer en Español
stemmer = SnowballStemmer("spanish")
# Recorrer parametros
for i in range(len(sys.argv)):
# Validar
if 0 < i:
# Recuperar paramtros
parm = str(sys.argv[i].strip().lower())
# Validar asignación
if parm not in parametros:
# Asignar parametro
parametros.append(parm)
miniparams.append(stemmer.stem(parm))
# Iniciar Tokenizador
toktok = ToktokTokenizer()
# Crear Tokenizar de oraciones
esTokenizarOraciones = nltk.data.load("tokenizers/punkt/spanish.pickle")
# Generar lista de palabras funcionales
listPalabrasfuns = stopwords.words("spanish")
listPuntuaciones = list(string.punctuation)
listPuntuaciones.append("¿")
listPuntuaciones.append("¡")
listPuntuaciones.append("ja")
listPuntuaciones.append("yme")
listPuntuaciones.append("yczna")
listPuntuaciones.append("así")
# Función de tokenización
def leer_archivo (archivo, params):
# Variables
documento = {}
# Leer documento
with open(archivo, 'r', encoding="utf-8") as myFile: # open(archivo, encoding="latin-1")
# Recuperar lineas del texto
lins_arch = myFile.readlines()
list_temp_parr = []
contador = 0
# Recorrer parrafos del archivo
for parrafo in lins_arch:
# Dividir por \t
list_segmentos = parrafo.split("\t")
# Validar tamaño
if 2 <= len(list_segmentos):
# Variable
textoObjetivo = ""
# Titulo divido por :
list_titulo = list_segmentos[0].split(":")
titulo = ""
# Validar titulo
if 2 <= len(list_titulo):
titulo = list_titulo[1].lower()
# Titulo divido por \n
list_parrafo = list_segmentos[1].split("\n")
# Validar parrafo
textoObjetivo = ""
if 1 <= len(list_parrafo):
textoObjetivo = list_parrafo[0].lower()
# Validar asginación de parrafo
bandera = False
for prm in params:
if textoObjetivo.find(prm) >= 0:
bandera = True
# Validar asignación de parrafo
if bandera and textoObjetivo not in list_temp_parr:
# Agregar el parrafo a la variable documento
documento[contador] = { "T": titulo, "P": textoObjetivo}
list_temp_parr.append(textoObjetivo)
contador = contador + 1
# Cerrar archivo
myFile.close()
# Regresar json de documentos
return documento
# Leer archivo
listParrafos = leer_archivo(nombreArchivo, miniparams)
# Función para buscar palabras objetivo conforme un patron
def buscar_coincidencias(list_pals_funs, list_punts, texto, one_pos, final_star_pos, expresion, dicc_de_rel = None):
# Validar existencia de diccionario
if dicc_de_rel == None:
dicc_de_rel = {}
# Crear patron
patron_exp = re.compile(expresion)
# Buscar coincidencias del patron en el texto
list_matches = patron_exp.findall(texto)
# Recorrer maches
for mi_match in list_matches:
# Lista de palabras de interes temporal
list_of_temp_words = []
# Recorrer palabras de match
for temp_i in range(len(mi_match)):
# Validar match
if (temp_i == one_pos) or (temp_i >= final_star_pos):
# Recuperar palabra relacionada
temp_word = mi_match[temp_i]
temp_word = re.sub(', ', '', temp_word)
temp_word = re.sub('y ', '', temp_word)
temp_word = temp_word.strip()
# Validar resguardo
if temp_word != '' and temp_word not in list_pals_funs and temp_word not in list_punts and temp_word not in list_of_temp_words:
# Resguardar palabra relacionada
list_of_temp_words.append(temp_word)
if len(list_of_temp_words) > 1:
# Recorrer palabras temporales
my_temp_w = list_of_temp_words[0]
# Validar existencia de parametro en diccionario
if my_temp_w not in dicc_de_rel:
# Lista de palabras de interes temporal
dicc_de_rel[my_temp_w] = []
# SubRecorrido de palabras temporales
for m_sbtmp_w in list_of_temp_words:
# Validar existencia en diccionario
if m_sbtmp_w not in dicc_de_rel[my_temp_w]:
# Guardar palabra temporal
dicc_de_rel[my_temp_w].append(m_sbtmp_w)
# Regresar resultados
return dicc_de_rel
# Variable diccionario de relaciones
diccDeRel = {}
# Recorrer parrafos en diccionario
for key,value in listParrafos.items():
# Revisar patrones y actualizar diccionario
diccDeRel = buscar_coincidencias(listPalabrasfuns, listPuntuaciones, value["P"], 1, 3, '(las|la|los|el)*(\s*\w+) (son un|son una|es un|es una|fueron un|fueron una|fue un|fue una){1} (\w+)', diccDeRel)
diccDeRel = buscar_coincidencias(listPalabrasfuns, listPuntuaciones, value["P"], 0, 2, '(\w+) (tal como|así tambien|así como|como por|por ejemplo|tambien conocida como|tambien conocido como|tal como:|como:|como){1} (\w+)*(,\s*\w+)*(\s*y\s*\w+)*', diccDeRel)
diccDeRel = buscar_coincidencias(listPalabrasfuns, listPuntuaciones, value["P"], 0, 4, '(\w+) (es|forma|forman|son){1} (parte){1} (del|de las|de los|de el|de una|de un|de){1} (\w+)', diccDeRel)
diccDeRel = buscar_coincidencias(listPalabrasfuns, listPuntuaciones, value["P"], 0, 5, '(\w+)(\s*le|\s*es|\s*son)* (perteneciente(s)*|pertenecen|pertenece|a){1} (la|al|a)* (\w+)', diccDeRel)
diccDeRel = buscar_coincidencias(listPalabrasfuns, listPuntuaciones, value["P"], 0, 1, '(\w+)(,\s*\w+)*(\s*y\s*\w+)', diccDeRel)
# Variables
list_nodos = []
list_aristas = []
# Recorrer diccionario de relaciones
for key,arry in diccDeRel.items():
# Validar si agregar nodo
if key not in list_nodos:
# Agregar nodo
list_nodos.append(key)
# Recorrer relaciones de la llave
for value in arry:
# Generar tupla
tempTupla = (key, value)
# Validar si agregar tupla
if tempTupla not in list_aristas:
# Agregar nodo
list_aristas.append(tempTupla)
# Validar si agregar nodo
if value not in list_nodos:
# Agregar nodo
list_nodos.append(value)
# Crea grafica
Grafico = nx.DiGraph()
# Vertices
Grafico.add_nodes_from(list_nodos)
# Aristas
Grafico.add_edges_from(list_aristas)
posicion = nx.spring_layout(Grafico)
nx.draw_networkx_labels(Grafico, posicion, labels=dict([(nodo, nodo) for nodo in list_nodos]))
# Dibuja la gráfica
nx.draw(Grafico, posicion)
# Muestra en pantalla lo dibujado
plt.show()
|
SoraGefroren/Practicas_relacionadas_al_NLP_utilizando_Python
|
Práctica_04-Wiki/relaciones.py
|
relaciones.py
|
py
| 6,622 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.stdout",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "nltk.download",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.stem.snowball.SnowballStemmer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "nltk.tokenize.toktok.ToktokTokenizer",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "nltk.data.load",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "string.punctuation",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_labels",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 208,
"usage_type": "name"
}
] |
5035337247
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication_module', '0004_auto_20160801_2318'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='tipo_cuenta',
field=models.CharField(default=b'C', max_length=1, choices=[(b'C', b'COMERCIANTE'), (b'M', b'MUNICIPIO')]),
),
]
|
DirectorioTurismoComercio/BackEnd
|
authentication_module/migrations/0005_customuser_tipo_cuenta.py
|
0005_customuser_tipo_cuenta.py
|
py
| 492 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
}
] |
2732586952
|
#-_- coding: utf-8 -_-
from signature import settings
from control.middleware.config import RET_DATA, apple_url
from control.middleware.common import get_random_s
import re
import json
import logging
import datetime
import requests
import random
import time
import jwt
logger = logging.getLogger('django')
class AppStoreConnectApi(object):
'''
苹果开发者商店 接口
'''
def __init__(self, account, p8, iss, kid):
'''
初始化 个人开发者账号信息
'''
self.__account = account
self.__p8 = p8
self.__iss = iss
self.__kid = kid
self.__ret_data = RET_DATA.copy()
self.__timeout = 15
self.__verify = False
self.__token = self._get_token()
def _get_token(self):
'''
利用 jwt 获取token
'''
# 苹果采用的 ES256 编码方式,key是需要分段(\n)的,密钥头尾的"—BEGIN PRIVATE KEY—"也是必须的。之前我一直直接复制privatekey以文本的形式输入,在HS256下正常但是ES256会报错ValueError: Could not deserialize key data。
private_key = "-----BEGIN PRIVATE KEY-----" + self.__p8.replace("-----BEGIN PRIVATE KEY-----", "").replace("-----END PRIVATE KEY-----", "").replace(" ", "\n") + "-----END PRIVATE KEY-----"
# payload
token_dict = {
"exp": time.time() + 20*60, # 时间戳, token 有效时间 20分钟
"iss": self.__iss,
"aud": "appstoreconnect-v1"
}
# headers
headers = {
"alg": "ES256", # 声明所使用的算法。
"kid": self.__kid,
"typ": "JWT",
}
try:
# 使用jwt 获取苹果开发者 接口token
jwt_token = jwt.encode(token_dict, private_key, algorithm="ES256", headers=headers)
token = str(jwt_token, encoding='utf-8')
logger.info(f"{self.__account} : {token}")
return token
except Exception as e:
logger.error(f"获取苹果开发者 {self.__account} 接口token 错误: {str(e)}")
return None
def create_profile(self, bundleIds, cer_id, device_id):
'''
创建profile
'''
# 初始化 req 参数
self.__content = "创建profile"
self.__method = "POST"
self.__url = f"{apple_url}/profiles"
self.__data = {
"data": {
"type": "profiles",
"attributes": {
"name": get_random_s(16),
"profileType": "IOS_APP_ADHOC"
},
"relationships": {
"bundleId": {
"data": {
"id": bundleIds,
"type": "bundleIds"
}
},
"certificates": {
"data": [{
"id": cer_id,
"type": "certificates"
}]
},
"devices": {
"data": [{
"id": device_id,
"type": "devices"
}]
}
}
}
}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def create_devices(self, udid):
'''
创建devices
'''
# 初始化 req 参数
self.__content = "创建devices"
self.__method = "POST"
self.__url = f"{apple_url}/devices"
self.__data = {
"data": {
"type": "devices",
"attributes": {
"udid": udid,
"name": udid,
"platform": "IOS",
}
}
}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def create_bundleIds(self, bundleId):
'''
创建bundleIds:
'''
# 初始化 req 参数
self.__content = "创建bundleIds"
self.__method = "POST"
self.__url = f"{apple_url}/bundleIds"
self.__data = {
"data": {
"type": "bundleIds",
"attributes": {
"identifier": bundleId,
"name": "AppBundleId",
"platform": "IOS",
}
}
}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def delete_bundleIds(self, bundleIds):
'''
删除bundleIds
'''
# 初始化 req 参数
self.__content = "删除bundleIds"
self.__method = "DELETE"
self.__url = f"{apple_url}/bundleIds/{bundleIds}"
self.__data = {}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def get_bundleIds(self):
'''
获取bundleIds
'''
# 初始化 req 参数
self.__content = "获取bundleIds"
self.__method = "GET"
self.__url = f"{apple_url}/bundleIds?limit=200"
self.__data = {
"platform": "IOS"
}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def create_cer(self, csr):
'''
创建证书:
{
"certificateType": "IOS_DISTRIBUTION"
}
'''
# 初始化 req 参数
self.__content = "创建证书"
self.__method = "POST"
self.__url = f"{apple_url}/certificates"
self.__data = {
"data": {
"type": "certificates",
"attributes": {
"csrContent": csr,
"certificateType": "IOS_DISTRIBUTION"
}
}
}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def delete_cer(self, cer_id):
'''
删除证书:
{
"certificateType": "IOS_DISTRIBUTION"
}
'''
# 初始化 req 参数
self.__content = "删除证书"
self.__method = "DELETE"
self.__url = f"{apple_url}/certificates/{cer_id}"
self.__data = {}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def get_cer(self):
'''
获取证书:
{
"certificateType": "IOS_DISTRIBUTION"
}
'''
# 初始化 req 参数
self.__content = "获取证书"
self.__method = "GET"
self.__url = f"{apple_url}/certificates?limit=200"
self.__data = {
"certificateType": "IOS_DISTRIBUTION" # 这个筛选参数是不生效的,filter[certificateType] 这个不清楚怎么加入到参数里进行请求
}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def get_devices(self):
'''
获取开发者账号上的已注册设备
GET https://api.appstoreconnect.apple.com/v1/devices
'''
# 初始化 req 参数
self.__content = "获取已注册设备信息"
self.__method = "GET"
self.__url = f"{apple_url}/devices?limit=200"
self.__data = {}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
def _send_req(self):
'''
发送 requests 请求
'''
token = self.__token
if not token: # 获取token 失败
self.__ret_data['msg'] = "获取 苹果账号token 失败"
self.__ret_data['code'] = 500
return self.__ret_data
self.__headers["Authorization"] = f"Bearer {token}"
self.__req_id = ''.join(str(random.choice(range(10))) for _ in range(10)) # 对每一次请求,指定一个随机的10位数
logger.info(f"""{self.__content}: # 记录请求参数
req_id: {self.__req_id}
method: {self.__method}
url: {self.__url}
data: {self.__data}
headers: {self.__headers}
""")
s = requests.Session()
req = requests.Request(self.__method, self.__url,
data=json.dumps(self.__data),
headers=self.__headers
)
prepped = s.prepare_request(req)
try:
ret = s.send(prepped, verify=self.__verify, timeout=self.__timeout) # 发起请求
self.__ret_data['code'] = 0
if ret.status_code == 204: # 状态码 204,返回内容为空,例如 DELETE 证书的请求
self.__ret_data['data'] = f"{self.__account}: {self.__content} 成功"
logger.info(f"req_id: {self.__req_id} {self.__ret_data['data']}")
else:
app_ret = ret.json()
self.__ret_data['data'] = app_ret
self.__ret_data['msg'] = f"{self.__account}: {self.__content} 成功"
if "errors" in app_ret.keys():
self.__ret_data['msg'] = f"{self.__account}: {self.__content} 失败"
self.__ret_data['code'] = 500
logger.error(f"req_id: {self.__req_id} {self.__ret_data['msg']}: {self.__url} :{str(app_ret)}")
else:
logger.info(f"req_id: {self.__req_id} {self.__ret_data['msg']}: {self.__url} :{str(app_ret)}")
except Exception as e:
self.__ret_data['msg'] = f"{self.__account}: {self.__content} 失败: {ret.text}"
self.__ret_data['code'] = 500
logger.error(f"req_id: {self.__req_id} {self.__account}: {self.__content} 失败: {self.__url} : {str(e)}。返回错误: {ret.text}")
return self.__ret_data
def test_connect(self):
'''
测试账号能够正常通过 苹果API 来连接
'''
# 初始化 req 参数
self.__content = "测试连接"
self.__method = "GET"
self.__url = f"{apple_url}/apps"
self.__data = {}
self.__headers = {"Content-Type": "application/json"}
# 获取接口结果
return self._send_req()
|
lessknownisland/signature
|
apple/middleware/api.py
|
api.py
|
py
| 10,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "control.middleware.config.RET_DATA.copy",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "control.middleware.config.RET_DATA",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "jwt.encode",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "control.middleware.common.get_random_s",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "requests.Request",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "control.middleware.config.apple_url",
"line_number": 321,
"usage_type": "name"
}
] |
35912386575
|
#coding=utf-8
"""
imgcv
"""
from setuptools import setup
from setuptools import find_packages
install_requires = [
]
setup(
name = "imgcv",
version = "1.0.0",
description = 'image computer visior',
author='Hyxbiao',
author_email="[email protected]",
packages = find_packages(),
entry_points={
'console_scripts': [
'imgcv = imgcv.tools.imgcv:main',
'imgbrowser = imgcv.tools.imgbrowser:main',
]
},
install_requires = install_requires,
zip_safe = False,
)
|
hyxbiao/imgcv
|
setup.py
|
setup.py
|
py
| 537 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 18,
"usage_type": "call"
}
] |
16701006334
|
import os
import sys
from xml.etree import ElementTree
def isPlaylistUpdated(cmusPlaylistFile, jellyfinMusicPathArray) :
cmusMusicPathArray = open(cmusPlaylistFile, 'r').read().splitlines()
if len(cmusMusicPathArray) != len(jellyfinMusicPathArray) :
return True
length = len(cmusMusicPathArray)
for i in range(0, length) :
if cmusMusicPathArray[i] != jellyfinMusicPathArray[i].text :
return True
return False
def updateFile(cmusPlaylistFile, musicPathArray) :
print('updating or creating ' + cmusPlaylistFile)
string = ''
for path in musicPathArray :
string += path.text + '\n'
with open(cmusPlaylistFile, 'w') as sw :
sw.write(string)
JELLYFIN_PLAYLIST_PATH = sys.argv[1]
CMUS_PLAYLIST_PATH = sys.argv[2]
for playlist in os.listdir(JELLYFIN_PLAYLIST_PATH) :
playlistFile = os.path.join(JELLYFIN_PLAYLIST_PATH, playlist)
playlistFile = os.path.join(playlistFile, 'playlist.xml')
if os.path.isfile(playlistFile) :
dom = ElementTree.parse(playlistFile)
paths = dom.findall('PlaylistItems/PlaylistItem/Path')
cmusPlaylistFile = os.path.join(CMUS_PLAYLIST_PATH, playlist)
if (not os.path.isfile(cmusPlaylistFile)) or isPlaylistUpdated(cmusPlaylistFile, paths) :
updateFile(cmusPlaylistFile, paths)
# checkIfPlaylistUpdated('/home/nate/.config/cmus/playlists/test', None)
|
nate-1/playlist-jellyfin-cmus-interface
|
main.py
|
main.py
|
py
| 1,427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
}
] |
28377193501
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.datasets import mnist
from keras.utils import to_categorical
import matplotlib.pyplot as plt
# 载入MNIST数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 将像素值标准化到 0 到 1 之间
x_train, x_test = x_train / 255.0, x_test / 255.0
# 对标签进行独热编码
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# 显示第一张图片
plt.imshow(x_train[0], cmap='gray')
plt.title(f"Label: {np.argmax(y_train[0])}") # 显示标签
plt.show()
# 构建模型
model = Sequential()
model.add(Flatten(input_shape=(28, 28))) # 将 28x28 的图像展平成一维数组
model.add(Dense(128, activation='relu')) # 具有128个神经元和ReLU激活函数的隐藏层
model.add(Dense(10, activation='softmax')) # 具有10个神经元(用于10个类别)和softmax激活函数的输出层
# 编译模型
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 训练模型
model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))
# 在测试集上评估模型
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'测试准确率:{test_acc}')
|
Ldh88/112-LiDingHui-ShangHai
|
112-李鼎辉-上海/第八周作业/cv_tensorflow_keras.py
|
cv_tensorflow_keras.py
|
py
| 1,333 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "keras.datasets.mnist.load_data",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.datasets.mnist",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "keras.utils.to_categorical",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "keras.utils.to_categorical",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "numpy.argmax",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "keras.models.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 26,
"usage_type": "call"
}
] |
43344834943
|
import unittest
import mock
import time
from copy import deepcopy
from gorynych.common.domain import events
from gorynych.common.exceptions import DomainError
from gorynych.info.domain.test.helpers import create_contest
from gorynych.info.domain import contest, person, race
from gorynych.common.domain.types import Address, Name, Country
from gorynych.info.domain.ids import PersonID, RaceID, TrackerID, TransportID
class MockedPersonRepository(mock.Mock):
'''
Necessary only for tracker assignment.
'''
def get_by_id(self, key):
person = mock.Mock()
person.name = Name('name', 'surname')
person.country = Country('RU')
if key == 'person1':
person.tracker = 'tracker1'
elif key == 'person2':
person.tracker = 'tracker2'
elif key == 'person3':
person.tracker = None
return person
class ContestFactoryTest(unittest.TestCase):
def test_contestid_successfull_contest_creation(self):
cont = create_contest(1, 2)
self.assertIsInstance(cont.address, Address)
self.assertEqual(cont.title, 'Hello world')
self.assertEqual(cont.country, 'RU')
self.assertEqual(cont.timezone, 'Europe/Moscow')
self.assertEqual(cont.place, 'Yrupinsk')
self.assertEquals((cont.start_time, cont.end_time), (1, 2))
self.assertIsInstance(cont.id, contest.ContestID)
self.assertIsNone(cont._id)
cont2 = create_contest(3, 4)
self.assertNotEqual(cont.id, cont2.id,
"Contest with the same id has been created.")
def test_str_successfull_contest_creation(self):
cont = create_contest(1, 3, id='cnts-130422-12345')
self.assertEqual(cont.end_time, 3)
self.assertEqual(cont.start_time, 1)
self.assertEqual(cont.id, 'cnts-130422-12345')
def test_unsuccessfull_contest_creation(self):
self.assertRaises(ValueError, create_contest, 3, 1,
"Contest can be created with wrong times.")
class EventsApplyingTest(unittest.TestCase):
def test_ContestRaceCreated(self):
cont = create_contest(1, 2)
rid = RaceID()
ev = events.ContestRaceCreated(cont.id, rid)
self.assertRaises(AssertionError, cont.apply, ev)
cont.apply([ev])
self.assertEqual(len(cont.race_ids), 1)
cont.apply([ev])
self.assertEqual(len(cont.race_ids), 1)
rid = RaceID()
ev = events.ContestRaceCreated(cont.id, rid)
cont.apply([ev])
self.assertEqual(len(cont.race_ids), 2)
class ContestTest(unittest.TestCase):
@mock.patch('gorynych.common.infrastructure.persistence.event_store')
def test_register_paraglider(self, patched):
event_store = mock.Mock()
patched.return_value = event_store
cont = create_contest(1, 2)
p1 = person.PersonID()
c = cont.register_paraglider(p1, 'mantrA 9', '747')
self.assertIsInstance(c, contest.Contest)
self.assertEqual(len(cont._participants), 1)
self.assertEqual(len(cont.paragliders), 1)
self.assertIsInstance(cont.paragliders, dict, "It must be dict.")
self.assertEqual(cont._participants[p1]['role'], 'paraglider')
self.assertEqual(cont._participants[p1]['glider'], 'mantra')
self.assertEqual(cont._participants[p1]['contest_number'], 747)
p2 = person.PersonID()
cont.register_paraglider(p2, 'mantrA 9', '757')
self.assertEqual(len(cont._participants), 2)
self.assertEqual(cont._participants[p2]['role'], 'paraglider')
self.assertEqual(cont._participants[p2]['glider'], 'mantra')
self.assertEqual(cont._participants[p2]['contest_number'], 757)
# Check contest numbers uniqueness.
self.assertRaises(ValueError, cont.register_paraglider, 'person3',
'mantrA 9', '757')
mock_calls = event_store.mock_calls
self.assertEqual(len(mock_calls), 2)
self.assertEqual(mock_calls[-1], mock.call.persist(
events.ParagliderRegisteredOnContest(p2, cont.id)))
self.assertEqual(mock_calls[-2], mock.call.persist(
events.ParagliderRegisteredOnContest(p1, cont.id)))
def test_times_changing(self):
cont = create_contest(1, '15')
cont.start_time = '2'
self.assertEqual(cont.start_time, 2)
cont.end_time = '8'
self.assertEqual(cont.end_time, 8)
self.assertRaises(ValueError, setattr, cont, 'start_time', 8)
self.assertRaises(ValueError, setattr, cont, 'start_time', 9)
self.assertRaises(ValueError, setattr, cont, 'end_time', 2)
self.assertRaises(ValueError, setattr, cont, 'end_time', 1)
cont.change_times('10', '16')
self.assertEqual((cont.start_time, cont.end_time), (10, 16))
self.assertRaises(ValueError, cont.change_times, '10', '8')
def test_change_title(self):
cont = create_contest(1, '15')
cont.title = ' hello moOn '
self.assertEqual(cont.title, 'hello moOn')
def test_change_address(self):
cont = create_contest(1, '15')
cont.place = 'Severodvinsk'
self.assertEqual(cont.place, 'Severodvinsk')
cont.country = 'tw'
self.assertEqual(cont.country, 'TW')
cont.hq_coords = (15, 0)
self.assertEqual(cont.hq_coords, (15, 0))
class ContestTestWithRegisteredParagliders(unittest.TestCase):
def setUp(self):
self.p1_id = person.PersonID()
self.p2_id = person.PersonID()
self.p3_id = person.PersonID()
@mock.patch('gorynych.common.infrastructure.persistence.event_store')
def fixture(patched):
patched.return_value = mock.Mock()
cont = create_contest(1, 15)
cont.register_paraglider(self.p2_id, 'mantrA 9', '757')
cont.register_paraglider(self.p1_id, 'gIn 9', '747')
person1 = cont._participants[self.p1_id]
person2 = cont._participants[self.p2_id]
return cont, person1, person2
try:
self.cont, self.person1, self.person2 = fixture()
except:
raise unittest.SkipTest("ERROR: need contest with paragliders "
"for test.")
def tearDown(self):
del self.cont
del self.person1
del self.person2
def test_correct_change_participant_data(self):
self.cont.change_participant_data(self.p1_id, glider='ajAx ',
contest_number='0')
self.assertEqual(self.person1['glider'], 'ajax')
self.assertEqual(self.person1['contest_number'], 0)
def test_no_data(self):
self.assertRaises(ValueError, self.cont.change_participant_data,
'person2')
def test_wrong_parameters(self):
self.assertRaises(ValueError, self.cont.change_participant_data,
'person3', contest_number=9, glider='ajax')
self.cont.change_participant_data(self.p1_id, cotest_number=9,
glider='aJax')
self.assertEqual(self.person1['contest_number'], 747)
self.assertEqual(self.person1['glider'], 'ajax')
def test_violate_invariants(self):
self.assertRaises(ValueError, self.cont.change_participant_data,
'person1', contest_number='757')
class ParagliderTest(unittest.TestCase):
def test_success_creation(self):
p_id = PersonID()
t_id = TrackerID(TrackerID.device_types[0], '123456789012345')
p = race.Paraglider(p_id, Name('Vasya', 'Pupkin'),
Country('RU'), 'Mantra 9', 15, t_id)
self.assertEqual(p.person_id, p_id)
self.assertEqual(p.glider, 'mantra')
self.assertEqual(p.contest_number, 15)
self.assertEqual(p.tracker_id, t_id)
@mock.patch('gorynych.common.infrastructure.persistence.event_store')
class ContestServiceTest(unittest.TestCase):
def setUp(self):
self.cont = create_contest(time.time(), time.time() + 3600)
def test_register_paraglider(self, patched):
event_store = mock.Mock()
patched.return_value = event_store
alone_cont = deepcopy(self.cont)
pid = PersonID()
populated_cont = self.cont.register_paraglider(pid,
'glider',
11)
self.assertFalse(alone_cont.paragliders)
self.assertTrue(populated_cont.paragliders)
pgl = populated_cont.paragliders
self.assertEquals(pgl.keys()[0], pid)
self.assertEquals(pgl[pid]['role'], 'paraglider')
self.assertEquals(pgl[pid]['glider'], 'glider')
self.assertEquals(pgl[pid]['contest_number'], 11)
def test_add_transport(self, patched):
event_store = mock.Mock()
patched.return_value = event_store
alone_cont = deepcopy(self.cont)
tid = TransportID()
populated_cont = self.cont.add_transport(tid)
self.assertFalse(alone_cont.transport)
self.assertIn(tid, populated_cont.transport)
def test_change_paraglider(self, patched):
event_store = mock.Mock()
patched.return_value = event_store
pid = PersonID()
cont = self.cont.register_paraglider(pid,
'glider',
11)
changed_cont = contest.change_participant(cont, dict(glider='noglider',
contest_number=21,
person_id=pid))
pgl = changed_cont.paragliders
self.assertEquals(pgl.keys()[0], pid)
self.assertEquals(pgl[pid]['glider'], 'noglider')
self.assertEquals(pgl[pid]['contest_number'], 21)
def test_add_winddummy(self, patched):
event_store = mock.Mock()
patched.return_value = event_store
pid = PersonID()
cont = self.cont.add_winddummy(pid)
wdms = cont.winddummies
self.assertEquals(wdms, [pid])
def test_get_winddummy(self, patched):
event_store = mock.Mock()
patched.return_value = event_store
pid = PersonID()
cont = self.cont.add_winddummy(pid)
self.assertEquals(cont.get_winddummy(pid), pid)
pid2 = PersonID()
self.assertRaises(DomainError, cont.get_winddummy, pid2)
|
DmitryLoki/gorynych
|
gorynych/info/domain/test/test_contest.py
|
test_contest.py
|
py
| 10,466 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "mock.Mock",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person.name",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "gorynych.common.domain.types.Name",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person.country",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "gorynych.common.domain.types.Country",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person.tracker",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.person.tracker",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.person.tracker",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.types.Address",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "gorynych.info.domain.contest.ContestID",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.contest",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 56,
"usage_type": "argument"
},
{
"api_name": "unittest.TestCase",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.RaceID",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.events.ContestRaceCreated",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.events",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.ids.RaceID",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.events.ContestRaceCreated",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.events",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person.PersonID",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.contest.Contest",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.contest",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.person.PersonID",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "mock.call.persist",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "mock.call",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "gorynych.common.domain.events.ParagliderRegisteredOnContest",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.events",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "mock.call.persist",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "mock.call",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "gorynych.common.domain.events.ParagliderRegisteredOnContest",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.events",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "mock.patch",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.person.PersonID",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.person.PersonID",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "gorynych.info.domain.person.PersonID",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.person",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "unittest.SkipTest",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.ids.PersonID",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.TrackerID",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.TrackerID.device_types",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.race.Paraglider",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.race",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "gorynych.common.domain.types.Name",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "gorynych.common.domain.types.Country",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "gorynych.info.domain.test.helpers.create_contest",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.PersonID",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.TransportID",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.PersonID",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.contest.change_participant",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.contest",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.PersonID",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.PersonID",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "gorynych.info.domain.ids.PersonID",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "gorynych.common.exceptions.DomainError",
"line_number": 273,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 203,
"usage_type": "call"
}
] |
72739113148
|
from reportlab.lib import colors
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
from reportlab.platypus import Paragraph
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_JUSTIFY
def generate_prescription(patient_name, doctor_name, medicine_list, logo_path, rx_path, signature_path, prescription_no, consultation_no, doctor_email, medconnect_id, reg_no, doctor_location, patient_id, patient_location, date, remarks, doctor_title):
# Create a file named after the patient
filename = f"{patient_name}_prescription.pdf"
c = canvas.Canvas(filename, pagesize=A4, bottomup=1)
# MedConnect Logo
c.setFillColor(colors.white)
c.drawImage(logo_path, 12, 759 - 16, width=169, height=83.12)
# Rx logo
c.drawImage(rx_path, 595-60-12, 759 - 4, width=60, height=60)
# Document Nos.
c.setFillColor(colors.black)
c.setFont("Courier", 13)
c.drawRightString(525, 735 , "Prescription No:")
c.setFont("Courier", 13)
c.drawRightString(570, 735, f"#{prescription_no}")
c.drawRightString(525, 720 , "Consultation No:")
c.setFont("Courier", 13)
c.drawRightString(570, 720, f"#{consultation_no}")
#Doctor Details
c.setFillColor(colors.black)
c.setFont("Courier", 13)
c.drawRightString(570, 680 , "+917738118110")
c.drawRightString(570, 665 , f"{doctor_email}")
c.drawRightString(450, 650 , "MedConnect Id.: ")
c.setFont("Courier", 13)
c.drawRightString(570, 650 , f"{medconnect_id}")
c.drawRightString(450, 635 , "Reg. No.: ")
c.setFont("Courier", 13)
c.drawRightString(570, 635 , f"{reg_no}")
#Doctor Headers
c.setFont("Courier-Bold", 21)
c.drawString(12, 680, f"Dr. {doctor_name}")
c.setFont("Courier", 13)
c.drawString(12, 665 , f"{doctor_location}")
# Patient Details
c.setFont("Courier", 13)
c.drawString(12, 620 , "Patient Id:")
c.drawString(100, 620 , f"# {patient_id}")
c.drawString(12, 605 , "Patient:")
c.setFont("Courier-Bold", 13)
c.drawString(100, 605 , f"{patient_name}")
c.setFont("Courier", 13)
c.drawString(100, 590 , f"{patient_location}")
c.drawString(12, 575 , "Date:")
c.drawString(100, 575 , f"{date}")
c.setFont("Courier-Bold", 18)
c.drawString(12, 530 , "Treatment Advised")
# Add a table with the medicine list
c.setFont("Courier-Bold", 13)
c.drawString(12, 500, "Type")
c.drawString(97, 500, "Medicine")
c.drawString(242, 500, "Power")
c.drawString(346, 500, "Frequency")
c.drawString(440, 500, "Remarks")
c.setStrokeColor(colors.grey)
c.line(12, 490, 570, 490)
c.setFont("Courier", 13)
for i, medicine in enumerate(medicine_list):
c.drawString(12, 475 - i * 25, medicine[0])
c.drawString(97, 475 - i * 25, medicine[1])
c.drawString(242, 475 - i * 25, medicine[2])
c.drawString(346, 475 - i * 25, medicine[3])
c.drawString(440, 475 - i * 25, medicine[4])
c.setFont("Courier-Bold", 18)
c.drawString(12, 280 , "Next Investigation / Other Remarks")
style = getSampleStyleSheet()["Normal"]
style.fontName = "Courier"
style.fontSize = 12
style.alignment = TA_JUSTIFY
p = Paragraph(remarks, style)
p.wrapOn(c, 558, 100)
p.drawOn(c, 12, 140)
c.drawImage(signature_path, 456, 66, width=86, height=39)
c.setFont("Courier-Bold", 13)
c.drawRightString(570, 47 , f"{doctor_name}")
c.setFont("Courier", 10)
c.drawRightString(570, 33 , f"{doctor_title}")
c.line(12, 18, 570, 18)
c.setFont("Courier", 10)
c.drawString(12, 5, "Thank you for choosing MedConnect. Have a Healthy Day!")
# Save the PDF
c.save()
medicine_list = [
("Tablet","Paracetamol", "500 mg", "1-0-1", "-"),
("Tablet","Dolo", "10 mg", "0-1-0", "-"),
]
remarks = "Take rest. Do not work more."
generate_prescription("Prem Kothawle", "Dr. Shubham Saroj", medicine_list, "medconnect_logo.jpg", "rx_logo.jpg", "Shubham_Sign.jpeg", "1258", "1279", "[email protected]", "86438648464", "123454321234", "Thane, India", "2547", "Nere, India", "10 February 2023", remarks, "M.B.B.S")
# patient_name, doctor_name, medicine_list, logo_path, rx_path, signature_path, prescription_no, consultation_no, doctor_email, medconnect_id, reg_no, doctor_location, patient_id, patient_location, date, remarks, doctor_title
|
kothawleprem/MedConnect
|
templates/main.py
|
main.py
|
py
| 4,423 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "reportlab.pdfgen.canvas.Canvas",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.A4",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.colors.white",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "reportlab.lib.colors",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.colors.black",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "reportlab.lib.colors",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.colors.black",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "reportlab.lib.colors",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.colors.grey",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "reportlab.lib.colors",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.styles.getSampleStyleSheet",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "reportlab.lib.enums.TA_JUSTIFY",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "reportlab.platypus.Paragraph",
"line_number": 100,
"usage_type": "call"
}
] |
27264200550
|
"""
Plot.py
Created 21/12/2021
"""
from juzzyPython.generic.Tuple import Tuple
from juzzyPython.generalType2zSlices.sets.GenT2MF_Interface import GenT2MF_Interface
from juzzyPython.type1.sets.T1MF_Interface import T1MF_Interface
from juzzyPython.generalType2zSlices.sets.GenT2MF_Triangular import GenT2MF_Triangular
from juzzyPython.intervalType2.sets.IntervalT2MF_Interface import IntervalT2MF_Interface
from juzzyPython.generalType2zSlices.sets.GenT2MF_Trapezoidal import GenT2MF_Trapezoidal
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from typing import List
plt.rcParams.update({'figure.max_open_warning': 0})
class Plot:
"""
Class Plot:
Uses the matplotlib to plot various graphs
Parameters: None
Funtions:
plotControlSurface
show
figure
title
legend
discretize
plotMF
"""
def __init__(self) -> None:
self.colorList = ['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown','tab:pink','tab:gray','tab:olive','tab:cyan']
def show(self):
"""Show all the figures created"""
plt.show()
def figure(self):
"""Create a new plot to draw upon"""
self.fig = plt.figure()
def figure3d(self):
"""Create a new 3d plot to draw upon"""
self.fig, self.ax = plt.subplots(subplot_kw={"projection": "3d"})
def title(self,title: str):
"""Set the title of the current figure"""
plt.title(title)
def legend(self):
"""Add legend to the current figure"""
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
def plotControlSurface(self,x: List[float],y: List[float],z: List[List[float]],xLabel: str,yLabel: str,zLabel: str) -> None:
"""Plot a 3D surface showcasing the relationship between input (x,y) and output z"""
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x,y = np.meshgrid(x,y)
ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(z))
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.set_zlabel(zLabel)
plt.title("Control Surface")
def plotMF2(self,xaxis: str,name: str,sets: IntervalT2MF_Interface,xDisc: int,addExtraEndPoints: bool) -> None:
x = self.discretize(sets.getSupport(),xDisc)
y1 = [0] * xDisc
y2 = [0] * xDisc
for i in range(xDisc):
temp = sets.getFS(x[i])
y1[i] = temp.getRight()
y2[i] = temp.getLeft()
if addExtraEndPoints:
x2 = [0.0] * (len(x)+2)
y1b = [0.0] * (len(y1)+2)
y2b = [0.0] * (len(y2)+2)
x2[0] = sets.getSupport().getLeft()
x2[-1] = sets.getSupport().getRight()
y1b[0] = 0.0
y1b[len(y1)-1] = 0.0
y2b[0] = 0.0
y2b[len(y2)-1] = 0.0
for i in range(len(x)):
x2[i+1] = x[i]
y1b[i+1] = y1[i]
y2b[i+1] = y2[i]
x = x2
y1 = y1b
y2 = y2b
ax = plt.gca()
color = next(ax._get_lines.prop_cycler)['color']
plt.plot(x,y1,label=name+"_upper", color = color)
plt.plot(x,y2,label=name+"_lower", color = color, alpha=0.5)
#plt.xlim(xAxisRange.getLeft(),xAxisRange.getRight())
#plt.ylim(yAxisRange.getLeft(),yAxisRange.getRight())
plt.ylabel("μ")
plt.xlabel(xaxis)
def plotMF(self,xaxis: str,name: str,sets: T1MF_Interface,xDisc: int,xAxisRange: Tuple,yAxisRange: Tuple,addExtraEndPoints: bool) -> None:
"""Plot a membership function on the current figure"""
x = self.discretize(sets.getSupport(),xDisc)
y = [0] * xDisc
for i in range(xDisc):
y[i] = sets.getFS(x[i])
if addExtraEndPoints:
x2 = [0.0] * (len(x)+2)
y2 = [0.0] * (len(y)+2)
x2[0] = sets.getSupport().getLeft()
x2[-1] = sets.getSupport().getRight()
for i in range(len(x)):
x2[i+1] = x[i]
y2[i+1] = y[i]
x = x2
y = y2
plt.plot(x,y,label=name)
plt.xlim(xAxisRange.getLeft(),xAxisRange.getRight())
plt.ylim(yAxisRange.getLeft(),yAxisRange.getRight())
plt.ylabel("μ")
plt.xlabel(xaxis)
def plotMFasLines(self,sets: GenT2MF_Interface,xDisc: int) -> None:
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.set_zlabel("z")
x = self.discretize(sets.getSupport(),xDisc)
y1 = [[0 for c in range(xDisc)] for r in range(sets.getNumberOfSlices())]
y2 = [[0 for c in range(xDisc)] for r in range(sets.getNumberOfSlices())]
z1 = [[0 for c in range(xDisc)] for r in range(sets.getNumberOfSlices())]
z2 = [[0 for c in range(xDisc)] for r in range(sets.getNumberOfSlices())]
for zLevel in range(sets.getNumberOfSlices()):
for i in range(xDisc):
temp = sets.getZSlice(zLevel).getFS(x[i])
y1[zLevel][i] = temp.getRight()
y2[zLevel][i] = temp.getLeft()
if zLevel==0:
z1[zLevel][i] = 0.0
else:
z1[zLevel][i] = sets.getZValue(zLevel-1)
z2[zLevel][i] = sets.getZValue(zLevel)
for zLevel in range(sets.getNumberOfSlices()):
self.ax.plot3D(x,y1[zLevel],z1[zLevel],label=sets.getName()+"_upper",color= self.colorList[zLevel%10])
self.ax.plot3D(x,y2[zLevel],z1[zLevel],label=sets.getName()+"_lower",color= self.colorList[zLevel%10])
self.ax.plot3D(x,y1[zLevel],z2[zLevel],label=sets.getName()+"_upper",color= self.colorList[zLevel%10])
self.ax.plot3D(x,y2[zLevel],z2[zLevel],label=sets.getName()+"_lower",color= self.colorList[zLevel%10])
def turnOnInteraction(self):
plt.ion()
def closeAllFigures(self):
plt.close('all')
def plotMFasSurface(self,plotName: str,sets: GenT2MF_Interface,xAxisRange: Tuple,xDisc: int,addExtraPoints: bool):
self.ax.set_xlabel("X-Axis")
self.ax.set_ylabel("Z-Axis")
self.ax.set_zlabel("Y-Axis")
if isinstance(sets,GenT2MF_Triangular):
for zLevel in range(sets.getNumberOfSlices()):
xUpper = [sets.getZSlice(zLevel).getUMF().getStart(), sets.getZSlice(zLevel).getUMF().getPeak(),sets.getZSlice(zLevel).getUMF().getEnd()]
zUpper = None
yUpper = [[0 for i in range(3)] for j in range(2)]
if zLevel == 0:
zUpper = [0.0,sets.getZValue(zLevel)]
else:
zUpper = [sets.getZValue(zLevel-1),sets.getZValue(zLevel)]
for xD in range(3):
yUpper[0][xD] = sets.getZSlice(zLevel).getFS(xUpper[xD]).getRight()
yUpper[1][xD] = yUpper[0][xD]
xLower = [sets.getZSlice(zLevel).getLMF().getStart(), sets.getZSlice(zLevel).getLMF().getPeak(),sets.getZSlice(zLevel).getLMF().getEnd()]
zLower = None
yLower = [[0 for i in range(3)] for j in range(2)]
if zLevel == 0:
zLower = [0.0,sets.getZValue(zLevel)]
else:
zLower = [sets.getZValue(zLevel-1),sets.getZValue(zLevel)]
for xD in range(3):
yLower[0][xD] = sets.getZSlice(zLevel).getFS(xLower[xD]).getLeft()
yLower[1][xD] = yLower[0][xD]
x,y = np.meshgrid(xUpper,zUpper)
self.ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(yUpper),alpha = 0.5,color=self.colorList[zLevel%10])
x,y = np.meshgrid(xLower,zLower)
self.ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(yLower),alpha = 0.5,color=self.colorList[zLevel%10])
elif isinstance(sets,GenT2MF_Trapezoidal):
for zLevel in range(sets.getNumberOfSlices()):
xUpper = [sets.getZSlice(zLevel).getUMF().getA(), sets.getZSlice(zLevel).getUMF().getB(),sets.getZSlice(zLevel).getUMF().getC(),sets.getZSlice(zLevel).getUMF().getD()]
zUpper = None
yUpper = [[0 for i in range(4)] for j in range(2)]
if zLevel == 0:
zUpper = [0.0,sets.getZValue(zLevel)]
else:
zUpper = [sets.getZValue(zLevel-1),sets.getZValue(zLevel)]
for xD in range(4):
yUpper[0][xD] = sets.getZSlice(zLevel).getFS(xUpper[xD]).getRight()
yUpper[1][xD] = yUpper[0][xD]
xLower = [sets.getZSlice(zLevel).getLMF().getA(), sets.getZSlice(zLevel).getLMF().getB(),sets.getZSlice(zLevel).getLMF().getC(),sets.getZSlice(zLevel).getLMF().getD()]
zLower = None
yLower = [[0 for i in range(4)] for j in range(2)]
if zLevel == 0:
zLower = [0.0,sets.getZValue(zLevel)]
else:
zLower = [sets.getZValue(zLevel-1),sets.getZValue(zLevel)]
for xD in range(4):
yLower[0][xD] = sets.getZSlice(zLevel).getFS(xLower[xD]).getLeft()
yLower[1][xD] = yLower[0][xD]
x,y = np.meshgrid(xUpper,zUpper)
self.ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(yUpper),alpha = 0.5,color=self.colorList[zLevel%10])
x,y = np.meshgrid(xLower,zLower)
self.ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(yLower),alpha = 0.5,color=self.colorList[zLevel%10])
else:
for zLevel in range(sets.getNumberOfSlices()):
xUpper = self.discretize(xAxisRange,xDisc)
zUpper = None
yUpper = [[0 for i in range(xDisc)] for j in range(2)]
if zLevel == 0:
zUpper = [0.0,sets.getZValue(zLevel)]
else:
zUpper = [sets.getZValue(zLevel-1),sets.getZValue(zLevel)]
for xD in range(xDisc):
yUpper[0][xD] = sets.getZSlice(zLevel).getFS(xUpper[xD]).getRight()
yUpper[1][xD] = yUpper[0][xD]
xLower = self.discretize(xAxisRange,xDisc)
zLower = None
yLower = [[0 for i in range(xDisc)] for j in range(2)]
if zLevel == 0:
zLower = [0.0,sets.getZValue(zLevel)]
else:
zLower = [sets.getZValue(zLevel-1),sets.getZValue(zLevel)]
for xD in range(xDisc):
yLower[0][xD] = sets.getZSlice(zLevel).getFS(xLower[xD]).getLeft()
yLower[1][xD] = yLower[0][xD]
if addExtraPoints:
x_upper2 = [0.0] * (len(xUpper) + 2)
y_upper2 = [[0.0 for i in range(len(yUpper[0]) + 2)] for j in range(2)]
x_Lower2 = [0.0] * (len(xLower) + 2)
y_Lower2 = [[0.0 for i in range(len(yLower[0]) + 2)] for j in range(2)]
x_upper2[0] = sets.getSupport().getLeft()
x_upper2[-1] = sets.getSupport().getRight()
x_Lower2[0] = x_upper2[0]
x_Lower2[-1] = x_upper2[-1]
for i in range(len(xUpper)):
x_upper2[i + 1] = xUpper[i]
x_Lower2[i + 1] = xLower[i]
y_upper2[0][i + 1] = yUpper[0][i]
y_Lower2[0][i + 1] = yLower[0][i]
y_upper2[1][i + 1] = yUpper[1][i]
y_Lower2[1][i + 1] = yLower[1][i]
xUpper = x_upper2
xLower = x_Lower2
yUpper = y_upper2
yLower = y_Lower2
x,y = np.meshgrid(xUpper,zUpper)
self.ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(yUpper),alpha = 0.5,color=self.colorList[zLevel%10])
x,y = np.meshgrid(xLower,zLower)
self.ax.plot_surface(np.asarray(x), np.asarray(y),np.asarray(yLower),alpha = 0.5,color=self.colorList[zLevel%10])
def discretize(self,support: Tuple,discLevel: int) -> List[float]:
"""Discretize the support values"""
d = [0] * discLevel
stepSize = (support.getSize())/(discLevel-1.0)
d[0] = support.getLeft()
d[-1] = support.getRight()
for i in range(1,discLevel-1):
d[i] = support.getLeft()+i*stepSize
return d
|
LUCIDresearch/JuzzyPython
|
juzzyPython/generic/Plot.py
|
Plot.py
|
py
| 12,826 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.meshgrid",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "juzzyPython.intervalType2.sets.IntervalT2MF_Interface.IntervalT2MF_Interface",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "juzzyPython.type1.sets.T1MF_Interface.T1MF_Interface",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generic.Tuple.Tuple",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.sets.GenT2MF_Interface.GenT2MF_Interface",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.sets.GenT2MF_Interface.GenT2MF_Interface",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generic.Tuple.Tuple",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "juzzyPython.generalType2zSlices.sets.GenT2MF_Triangular.GenT2MF_Triangular",
"line_number": 171,
"usage_type": "argument"
},
{
"api_name": "numpy.meshgrid",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "juzzyPython.generalType2zSlices.sets.GenT2MF_Trapezoidal.GenT2MF_Trapezoidal",
"line_number": 200,
"usage_type": "argument"
},
{
"api_name": "numpy.meshgrid",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "juzzyPython.generic.Tuple.Tuple",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 284,
"usage_type": "name"
}
] |
14810439425
|
import os, sys
from os.path import join as ospj
import torch
import numpy as np
from PIL import Image
import torch.utils.data as data
import kornia
import argparse
from logger import Logger
class PairedImageDataset(data.Dataset):
def __init__(self, lr_img_path, lr_filelist_path, hr_img_path, hr_filelist_path, args):
self.args=args
self.lr_img_path = lr_img_path
self.hr_img_path = hr_img_path
self.lr_filelist_path = lr_filelist_path
self.hr_filelist_path = hr_filelist_path
self.lr_img_list = [x.strip() for x in open(self.lr_filelist_path).readlines()]
self.hr_img_list = [x.strip() for x in open(self.hr_filelist_path).readlines()]
# -85.61112_30.197733_28cm.tif -> -85.61112_30.197733_50cm.png
self.paired_lr_img_list = [x.replace("28cm.tif", "50cm.png") for x in self.hr_img_list]
def __getitem__(self, item):
lr_img_name = self.paired_lr_img_list[item]
hr_img_name = self.hr_img_list[item]
lr_img = Image.open(ospj(self.lr_img_path, lr_img_name)).convert('RGB')
hr_img = Image.open(ospj(self.hr_img_path, hr_img_name)).convert('RGB')
lr_img = np.asarray(lr_img) / 255.0
hr_img = np.asarray(hr_img) / 255.0
lr_img = kornia.image_to_tensor(lr_img).squeeze()
hr_img = kornia.image_to_tensor(hr_img).squeeze()
return lr_img, hr_img
def __len__(self):
return len(self.hr_img_list)
class TVDenoise(torch.nn.Module):
def __init__(self, args):
super(TVDenoise, self).__init__()
self.l2_term = torch.nn.MSELoss(reduction='mean')
self.l1_term = torch.nn.L1Loss(reduction='mean')
self.psnr = kornia.losses.PSNRLoss(max_val=1.0)
self.ssim=kornia.losses.SSIM(5, reduction='mean')
self.regularization_term = kornia.losses.TotalVariation()
self.args=args
self.xyxy = torch.nn.Parameter(data=torch.tensor([[0.], [0.], [713], [713]]), requires_grad=True)
self.mem = torch.nn.Parameter(data=torch.tensor(
[[1., 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0],
[0, 0, 0, 1]]), requires_grad=False)
def forward(self, src_img, dst_img):
new_image = self.get_new_image(src_img, dst_img)
return new_image
def get_new_image(self, src_img, dst_img):
self.boxes=torch.matmul(self.mem, self.xyxy).reshape(1, 4, 2)
return kornia.crop_and_resize((src_img), self.boxes, dst_img.shape[-2:])
def train(epoch_i, data_loader, network, optimizer, args):
num_iters = len(data_loader)
loss_list=[]
l1loss_list=[]
l2loss_list=[]
for i, input_tuple in enumerate(data_loader):
optimizer.zero_grad()
lr_img, hr_img = input_tuple
resized_img = network(hr_img, lr_img)
l1loss = network.l1_term(resized_img, lr_img)
l2loss = network.l2_term(resized_img, lr_img)
if args.use_l2_loss:
loss = l2loss
else:
loss = l1loss
loss.backward()
optimizer.step()
loss_list.append(loss.detach().numpy())
l1loss_list.append(l1loss.item())
l2loss_list.append(l2loss.item())
if i % 20 == 0:
print("[{:2d}] [{:3d}/{:3d}]: loss {:.5f} l1:{:.5f} l2:{:.5f}".
format(epoch_i, i, num_iters, loss.item(), l1loss.item(), l2loss.item()),
"crop", network.xyxy.detach().numpy().flatten())
print("Averge loss: %.5f\tl1: %.5f\tl2: %.5f"%(np.mean(loss_list), np.mean(l1loss_list), np.mean(l2loss_list)))
def main():
parser = argparse.ArgumentParser(description="Learnable Cropping Images")
parser.add_argument('--use_l2_loss', action='store_true')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--workers', type=int, default=2)
parser.add_argument('--not_pin_memory', action='store_true')
parser.add_argument('--lr_img_path', type=str, default="../../dataset/satellite_images/")
parser.add_argument('--lr_filelist_path', type=str, default="data/satellite_images_filelist.txt")
parser.add_argument('--hr_img_path', type=str, default="../../dataset/aerial_images/")
parser.add_argument('--hr_filelist_path', type=str, default="data/aerial_images_filelist.txt")
parser.add_argument('--num_epochs', type=int, default=25)
parser.add_argument('--learning_rate', type=float, default=100.0)
parser.add_argument('--exp_name', type=str, default="learncrop")
args = parser.parse_args()
logger = Logger()
exp_dir = ospj("exps", args.exp_name+logger._timestr)
os.makedirs(exp_dir, exist_ok=True)
logger.create_log(exp_dir)
sys.stdout = logger
if args.use_l2_loss:
print("use l2 loss")
else:
print("use l1 loss")
dataset = PairedImageDataset(args.lr_img_path, args.lr_filelist_path, args.hr_img_path, args.hr_filelist_path, args)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=args.shuffle,
num_workers=args.workers, pin_memory=not args.not_pin_memory, sampler=None,
drop_last=False)
network = TVDenoise(args)
optimizer = torch.optim.SGD(network.parameters(), lr=args.learning_rate, momentum=0.9)
for epoch_i in range(args.num_epochs):
train(epoch_i, data_loader, network, optimizer, args)
if __name__ == "__main__":
main()
|
mengyuest/satellite2aerial
|
learn_crop.py
|
learn_crop.py
|
py
| 5,572 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "kornia.image_to_tensor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "kornia.image_to_tensor",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.L1Loss",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "kornia.losses.PSNRLoss",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "kornia.losses",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "kornia.losses.SSIM",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "kornia.losses",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "kornia.losses.TotalVariation",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "kornia.losses",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "kornia.crop_and_resize",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "logger.Logger",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "logger._timestr",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "logger.create_log",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 133,
"usage_type": "attribute"
}
] |
27146623453
|
from UserSimulator.User import User
from UserSimulator.user_behavior import calculate_session_length, Devices, load_spotify, playback_decision
from datetime import datetime
from datetime import timedelta
import random
import time
from json import load
import pickle
import json
import csv
import requests
from DataPreprocessor.build_user_track_dict import TrainTripletParser
from DataPreprocessor.normalize_play_counts import SongRatingNormalizer
from DataPreprocessor.create_blc_input import SparseMatGenerator
from DataPreprocessor.filter_sparse_songs import SparseSongFilter
from DataPreprocessor.build_song_dict import SongDictBuilder
from DataPreprocessor.get_top_user_artists import TopUserBuilder
from ResultProcessor.process_P import PProcessor
from ResultProcessor.build_nym_ratings import NymRatingBuilder
from ResultProcessor.get_top_nym_songs import SongListBuilder
from ResultProcessor.get_unique_nym_artists import UniqueNymArtistFilter
from ResultProcessor.get_nym_artist_variance import ArtistVarianceCalculator
from ResultProcessor.NymRatingFormatter import NymRatingFormatter
from spotify import SpotifyWrapper
from os import path
REQ = "https://ec2-52-50-185-176.eu-west-1.compute.amazonaws.com:4400/ratings/update"
RATINGS_REQ = "http://localhost:4000/ratings/{}/spotify.com"
# pre-selected users just for convenience
# contains tuples in the form (nym, user_number)
USER_LIST = [
(0,234384),
(0,234384),
(0,402687),
(0,462404),
(0,669980),
(0,991089),
(1,679065),
(1,723268),
(1,889236),
(1,954125),
(1,964856),
(10,12383),
(10,222379),
(10,241854),
(10,332593),
(10,436898),
(12,179532),
(12,351979),
(12,473021),
(12,811920),
(12,94387),
(13,190513),
(13,272213),
(13,372156),
(13,745999),
(13,752718),
(14,152776),
(14,291748),
(14,555065),
(14,880214),
(14,948598),
(2,202368),
(2,8028),
(2,869250),
(2,957121),
(2,975702),
(3,329210),
(3,491540),
(3,622692),
(3,819217),
(3,835998),
(4,143096),
(4,411888),
(4,470913),
(4,669115),
(4,792160),
(5,169059),
(5,472503),
(5,502502),
(5,599726),
(5,883355),
(6,151851),
(6,269475),
(6,427642),
(6,483795),
(6,864712),
(7,117436),
(7,471509),
(7,542147),
(7,605562),
(7,66213),
(8,355770),
(8,400013),
(8,689580),
(8,74987),
(8,824276),
(9,189979),
(9,396445),
(9,513441),
(9,543235),
(9,753614)
]
config = load(open('config.json'))
def make_rating(nym_id, domain, item, score, num_v,):
return {
"nymRating" : {
"numVotes" : num_v,
"score": score
},
"domain": domain,
"item": item,
"nym_id": nym_id
}
def manual_update(details):
_, nym, domain, item, rating, num_votes = details
new_rating = make_rating(nym, domain, item, rating, num_votes+1)
headers = { "content-type": "application/json"}
resp = requests.put(REQ, data=json.dumps({'rating' : new_rating}), headers=headers, verify=False)
return resp
def load_user_nym_pairs():
nym_users_dict = {}
user_nym_pairs = []
path_to_P_with_ids = path.join(config["nym_data"]["base"], config["nym_data"]["P_with_ids"])
with open(path_to_P_with_ids) as input_file:
for line in input_file:
user_nym_pairs.append(map(int, line.split(",")))
# Convert list to dict
for user, nym in user_nym_pairs:
if nym not in nym_users_dict:
nym_users_dict[nym] = []
nym_users_dict[nym].append(user)
return nym_users_dict
def load_user_song_map():
with open(path.join(config["user_data"]["base"], config["user_data"]["user_songs_map"]), 'rb') as input_pickle:
return pickle.load(input_pickle)
print("Done")
def havent_played_song(user,song_id):
song = user.song_to_id_dict[song_id]
user_songs_map = load_user_song_map()
nym_users_dict = load_user_nym_pairs()
result = []
for nym, users in nym_users_dict.items():
# print("Building ratings for nym {}".format(nym))
# Iterate through each user in a Nym
for user in sorted(users):
# For each user get every song they listened to and their play counts
found = False
for user_song, _ in user_songs_map[user]:
if user_song == song:
found = True
break
if not found:
result.append((nym, user))
return sorted(result, key=lambda x: x[0])
def update_data():
# Normalize play counts
print("Normalizing play counts")
song_rating_normalizer = SongRatingNormalizer(config)
song_rating_normalizer.load_user_songs_dict()
song_rating_normalizer.normalize_data()
song_rating_normalizer.write_data_to_disk()
print("Done")
del song_rating_normalizer
# Generate sparse matrix for BLC
print("Generating Sparse Matrix")
sparse_mat_generator = SparseMatGenerator(config, num_users=40000)
sparse_mat_generator.load_data()
sparse_mat_generator.generate_sparse_mat()
sparse_mat_generator.write_user_data()
print("Done")
del sparse_mat_generator
# Filter sparse songs from matrix
print("Filtering Sparse Songs from matrix")
sparse_song_filter = SparseSongFilter(config)
sparse_song_filter.parse_sparse_mat_files()
sparse_song_filter.filter_sparse_songs()
sparse_song_filter.write_filtered_matrix()
print("Done")
del sparse_song_filter
# Build dict of song IDs to artist-song tuples
print("Building dict of songs")
song_dict_builder = SongDictBuilder(config)
song_dict_builder.load_track_list()
song_dict_builder.write_song_details_to_file()
print("Done")
del song_dict_builder
# Build the top users for dataset
print("Outputting top users")
top_user_builder = TopUserBuilder(config)
top_user_builder.load_data()
top_user_builder.get_top_songs()
top_user_builder.dump_top_users()
del top_user_builder
def gen_db_data():
# Map row numbers to users in raw P file
print("Processing P")
p_processor = PProcessor(config)
p_processor.generate_row_user_map()
p_processor.map_rows_to_users()
del p_processor
# Build ratings for nym and write out to nym_ratings directory
print("Generating Nym Ratings")
nym_rating_builder = NymRatingBuilder(config)
nym_rating_builder.load_data()
nym_rating_builder.delete_old_ratings()
nym_rating_builder.build_ratings()
nym_rating_builder.dump_nym_users_map()
del nym_rating_builder
# Get Top Nym songs based on ratings
print("Generating Song Lists")
song_list_builder = SongListBuilder(config)
song_list_builder.load_data()
song_list_builder.load_ratings()
song_list_builder.delete_old_songs()
song_list_builder.build_song_lists()
del song_list_builder
# Get artists unique to each nym
print("Generating artists unique to each nym")
unique_nym_artist_filter = UniqueNymArtistFilter(config)
unique_nym_artist_filter.load_songs()
unique_nym_artist_filter.delete_old_artists()
unique_nym_artist_filter.build_top_nym_artists()
unique_nym_artist_filter.filter_unique_artists()
del unique_nym_artist_filter
print("Calculating Artist Variances")
artist_variance_calculator = ArtistVarianceCalculator(config)
artist_variance_calculator.load_data()
artist_variance_calculator.calculate_variance()
del artist_variance_calculator
print("Generating ratings for db")
nym_rating_formatter = NymRatingFormatter(config)
nym_rating_formatter.load_data()
nym_rating_formatter.parse_song_rankings()
nym_rating_formatter.generate_db_input()
del nym_rating_formatter
def load_previous_ratings(nym):
result = {}
with open('Data/DB_Data/ratings-1.csv', 'r') as input_file:
ratings = csv.reader(input_file, delimiter=',')
for _,nym_r,domain, item,rating,num_v in ratings:
if nym_r != "nym" and int(nym_r) == nym:
result[item] = [domain,rating,num_v]
# sort by item
return result
def load_new_ratings(nym):
result = {}
with open('Data/DB_Data/ratings.csv', 'r') as input_file:
ratings = csv.reader(input_file, delimiter=',')
for _,nym_r,domain, item,rating,num_v in ratings:
if nym_r != "nym" and int(nym_r) == nym:
result[item] = [domain,rating,num_v]
# sort by item
return result
# Send new ratings to the server
def update_server(nym):
#ratings_resp = requests.get(RATINGS_REQ.format(nym), verify=False)
#current_ratings = ratings_resp.content[:len(ratings_resp.content) - int(ratings_resp.headers["padding-len"])]
old_ratings = load_previous_ratings(nym)
new_ratings = load_new_ratings(nym)
resp = None
for k, v in new_ratings.items():
if (not k in old_ratings) or old_ratings[k] != v:
domain, rating, num_v = v
print("item:{} , rating:{}, num votes:{}".format(k, rating, num_v))
new_rating = {
"nymRating" : {
"numVotes" : int(num_v),
"score": float(rating)
},
"domain": domain,
"item": k,
"nym_id": nym
}
headers = { "content-type": "application/json"}
resp = requests.put(REQ, data=json.dumps({'rating' : new_rating}), headers=headers, verify=False)
return resp
def listen_to_playlist(nym, user_num):
prev_sess = None
user = User(nym, user_num, config)
start = datetime.now()
sess_length = float(calculate_session_length(start, Devices.Mobile))
end = timedelta(seconds=(sess_length * 60))
spotify_obj = load_spotify()
decision = 'appload'
while sess_length > 0:
while datetime.now() < start + end:
print("Got here")
try:
id, nym, domain, uri, rating, num_votes = user.get_next_recommendation()
resp = None
decision = playback_decision(spotify_obj, uri, decision)
if decision == 'trackdone':
print("Updating")
resp = manual_update([id, nym, domain, uri, rating, int(num_votes)])
elif decision == "clickrow":
user.set_recommendation(random.randint(0, len(user.recommendations)))
if resp:
to_be_added = False
print(resp.status_code)
print(resp.headers["padding-len"])
while resp.status_code != 200 and not to_be_added:
rating = resp.content[:len(resp.content) - int(resp.headers["padding-len"])].decode('utf8')
rating = load(rating)
if int(rating["nymRating"]["numVotes"]) == 0:
to_be_added = True
else:
num_votes = float(rating["nymRating"]["score"])
num_votes = int(rating["nymRating"]["numVotes"])
resp = manual_update([id, nym, domain, uri, rating, num_votes])
except Exception as e:
print(e)
prev_sess = sess_length
sess_length = calculate_session_length(start, Devices.Mobile, prev_session=prev_sess)
print("Session length is {}".format(sess_length))
end = timedelta(seconds=(int(sess_length) * 60))
start = datetime.now()
if __name__ == "__main__":
start = datetime.now()
period = timedelta(hours=3)
for _ in range(1):
index = random.randint(0, len(USER_LIST) - 1)
nym, user_num = USER_LIST[index]
print("nym:{}, user:{}".format(0, user_num))
current_hour = datetime.now().hour
played = False
pick_time = random.uniform(current_hour, current_hour + 1) % 24
print("Picked time is {}".format(pick_time))
while datetime.now() < start + period:
if not played and datetime.now().minute >= (pick_time % 1) * 60:
listen_to_playlist(nym, user_num)
print("finished iteration")
played = True
if current_hour < datetime.now().hour:
current_hour = datetime.now().hour
played = False
pick_time = random.uniform(current_hour, current_hour + 1) % 24
print("Picked time is {}".format(pick_time))
|
dyllew3/Timing-Attacks-Against-Opennym
|
MillionSongDataset/simulate_user.py
|
simulate_user.py
|
py
| 12,732 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "DataPreprocessor.normalize_play_counts.SongRatingNormalizer",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "DataPreprocessor.create_blc_input.SparseMatGenerator",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "DataPreprocessor.filter_sparse_songs.SparseSongFilter",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "DataPreprocessor.build_song_dict.SongDictBuilder",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "DataPreprocessor.get_top_user_artists.TopUserBuilder",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "ResultProcessor.process_P.PProcessor",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "ResultProcessor.build_nym_ratings.NymRatingBuilder",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "ResultProcessor.get_top_nym_songs.SongListBuilder",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "ResultProcessor.get_unique_nym_artists.UniqueNymArtistFilter",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "ResultProcessor.get_nym_artist_variance.ArtistVarianceCalculator",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "ResultProcessor.NymRatingFormatter.NymRatingFormatter",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "UserSimulator.User.User",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "UserSimulator.user_behavior.calculate_session_length",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "UserSimulator.user_behavior.Devices.Mobile",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "UserSimulator.user_behavior.Devices",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "UserSimulator.user_behavior.load_spotify",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "UserSimulator.user_behavior.playback_decision",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "UserSimulator.user_behavior.calculate_session_length",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "UserSimulator.user_behavior.Devices.Mobile",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "UserSimulator.user_behavior.Devices",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "random.uniform",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "random.uniform",
"line_number": 372,
"usage_type": "call"
}
] |
7811594670
|
from qiskit import *
from qiskit.visualization import plot_histogram
from qiskit.visualization import plot_state_city, plot_bloch_multivector
from qiskit.visualization import plot_state_paulivec, plot_state_hinton
from qiskit.visualization import plot_state_qsphere
# quantum circuit to make a Bell state
bell = QuantumCircuit(2, 2)
bell.h(0)
bell.cx(0, 1)
meas = QuantumCircuit(2, 2)
meas.measure([0,1], [0,1])
# execute the quantum circuit
backend = BasicAer.get_backend('qasm_simulator') # the device to run on
circ = bell.compose(meas)
result = backend.run(transpile(circ, backend), shots=1000).result()
counts = result.get_counts(circ)
print(counts)
#히스토그램
plot_histogram(counts)
#히스토그램 그래프 옵션
# Execute 2-qubit Bell state again
second_result = backend.run(transpile(circ, backend), shots=1000).result()#트랜스파일: 서킷(cric)을 벡엔드로 소스코드를 변환한다.
second_counts = second_result.get_counts(circ)
# Plot results with legend
legend = ['First execution', 'Second execution']#히스토그램에 레이블 지정
plot_histogram([counts, second_counts], legend=legend, figsize=(15,12), color=['red', 'blue'], bar_labels=False)#figsize :그래프 사이즈 설정
#마치 건물처럼 표현하는 그래프
backend = BasicAer.get_backend('statevector_simulator') # the device to run on
result = backend.run(transpile(bell, backend)).result()
psi = result.get_statevector(bell)
plot_state_city(psi)
#힌튼
plot_state_hinton(psi)
#qsphere 상태 벡터의 진폭과 위상이 구체에 그려지는 양자 상태
plot_state_qsphere(psi)
#블로흐 구면
plot_bloch_multivector(psi)
|
xhaeng06x/quantum_computing
|
codingproject/whatsyoureta/ETA_3Qiskit 시각화하기/ETA-3 여러가지 시각화 도구main.py
|
ETA-3 여러가지 시각화 도구main.py
|
py
| 1,705 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "qiskit.visualization.plot_histogram",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "qiskit.visualization.plot_histogram",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "qiskit.visualization.plot_state_city",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "qiskit.visualization.plot_state_hinton",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "qiskit.visualization.plot_state_qsphere",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "qiskit.visualization.plot_bloch_multivector",
"line_number": 48,
"usage_type": "call"
}
] |
2028366431
|
#Aiswarya Sankar
#8/5/2015
import webapp2
import jinja2
import os
import logging
import hashlib
import hmac
import re
import string
import random
import time
import math
import urllib2
import json
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASS_RE = re.compile(r"^.{3,20}$")
DAYS_OF_WEEK = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
EVENT_TYPE = ['Competition', 'Meeting notice', 'Resource', 'Reminder', 'Survey']
ALL_INTERESTS = {
'Mathematics':'http://www.todayifoundout.com/wp-content/uploads/2014/11/mathematics-symbols.jpg',
'Biology': 'http://www.ccny.cuny.edu/biology/images/biologybanner.jpg',
'Chemistry': 'http://www.learnin4livin.com/wp-content/uploads/2014/07/acl2.jpg',
'Physics': 'http://callidolearning.com/wp-content/uploads/2015/07/physics.jpeg',
'Earth Science': 'http://science.nasa.gov/media/medialibrary/2011/03/01/small_earth.jpg',
'History': 'http://xemlasuong.org/wp-content/uploads/2015/01/ebola-virus-history.jpg',
'Computer Science Theory': 'https://upload.wikimedia.org/wikipedia/en/6/64/Theoretical_computer_science.svg',
'Computer Programming' : 'http://static.topyaps.com/wp-content/uploads/2012/12/computer-programming.jpg',
'Law' :'http://nagps.org/wordpress/wp-content/uploads/2014/02/law.jpg',
'Business' : 'http://globe-views.com/dcim/dreams/business/business-01.jpg',
'Economics' : 'http://www.stlawu.edu/sites/default/files/page-images/1economics_1.jpg',
'Finance' : 'http://intraweb.stockton.edu/eyos/hshs/content/images/2013%20Pics/finance.jpg',
'Marketing' : 'http://2z15ag3nu0eh3p41p2jsw3l1.wpengine.netdna-cdn.com/wp-content/uploads/2015/06/Marketing1.jpg',
'Arts' : 'http://bcaarts.org/images/Paint.jpg',
'Medicine' : 'http://ufatum.com/data_images/medicine/medicine5.jpg',
'Theater' : 'http://princetonfestival.org/wp-content/uploads/2015/03/LectureConvo.jpg',
'Dance' : 'http://static.wixstatic.com/media/11c679_bd0d108824a847729f998a7d4cd903de.gif',
'Health' : 'http://www.pacific.edu/Images/administration/finance/hr/healthy-heart.jpg',
'Food' : 'http://www.changefood.org/wp-content/uploads/2013/09/feel-healthier-bodymind-fresh-food-better-than-canned_32.jpg',
'Foreign Language' : 'http://www.scps.nyu.edu/content/scps/academics/departments/foreign-languages/_jcr_content/main_content/component_carousel/image_with_overlay_1.img.jpg/1406040703759.jpg',
'Literature' : 'http://c.tadst.com/gfx/600x400/galician-literature-day-spain.jpg?1',
'Design' : 'http://www.fotosefotos.com/admin/foto_img/foto_big/vetor_em_alta_qualidade_ee11960a4ece46ad67babac86517de82_vetor%20em%20alta%20qualidade.jpg',
'Service' : 'http://www.ycdsb.ca/assets/images/christian-community-service.jpg',
'Engineering' : 'http://cdn1.tnwcdn.com/wp-content/blogs.dir/1/files/2014/03/engineering-blueprint.jpg',
'Environmental Science' : 'http://www.ccny.cuny.edu/enveng/images/essbanner.jpg',
'Speech' : 'http://trullsenglish.weebly.com/uploads/2/5/1/9/25194894/1190544_orig.jpg'
}
urlfetch.set_default_fetch_deadline(240)
secret = 'changetheworld'
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def valid_username(username):
return username and USER_RE.match(username)
def valid_password(password):
return password and PASS_RE.match(password)
def memcacheClub():
#x = memcache.get('clubs')
if x is None:
clubQuery = Club.all()
if clubQuery is not None:
x = clubQuery
else:
x = []
x.append(a)
memcache.set('clubs', x)
def memcacheClublist():
y = memcache.get('CLUB_LIST')
if y is None:
clubNameQuery = db.GqlQuery('Select name from Club')
if clubNameQuery is not None:
y = clubNameQuery
else:
y= []
y.append(n)
memcache.set('CLUB_LIST', y)
#password salting functions
def make_salt():
return ''.join(random.choice(string.letters) for x in xrange(5))
def create_salt_pass(name, password, salt=''):
if salt == '':
salt = make_salt()
h = str(hashlib.sha256(name+password+salt).hexdigest())
return '%s,%s' %(salt, h)
def check_salt_pass(name, password, h):
salt = h.split(',')[0]
if h == create_salt_pass(name, password, salt):
return True
#cookie hashing functions
def create_cookie_hash(val):
return '%s|%s' %(val, hmac.new(secret, val).hexdigest())
def check_cookie_hash(h):
val = h.split('|')[0]
if h == create_cookie_hash(val):
return val
# def topics():
# x = urllib2.urlopen('https://api.coursera.org/api/catalog.v1/categories').read()
# j = json.loads(x)
# topics = []
# for x in range(0, len(j['elements'])):
# topics.append(j['elements'][x]['name'])
# memcache.set('topics', topics)
# def urls():
# start = 'http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q='
# urlQueries = []
# temp = []
# topics = memcache.get('topics')
# logging.info(topics)
# for a in topics:
# m = a.split(' ')
# urlQueries.append('%s%s' % (start, '%20'.join(m)))
# for url in urlQueries:
# x = urllib2.urlopen(url).read()
# j = json.loads(x)
# logging.info(j['responseData']['results'][0]['url'])
# temp.append( j['responseData']['results'][0]['url'])
# memcache.set('urls', temp)
########
# 4 entity kinds here User, Club, Interest and Post
########
class User(db.Model):
name = db.StringProperty(required=True)
username = db.StringProperty(required=True)
idNum = db.StringProperty(required=True)
password = db.StringProperty(required=True)
interests = db.StringListProperty()
class Club(db.Model):
name = db.StringProperty(required=True)
officers = db.StringListProperty()
interests = db.StringListProperty()
location = db.StringProperty()
days = db.StringListProperty()
time = db.StringProperty() #brunch, lunch, after school
adviser = db.StringProperty()
picUrl = db.StringProperty()
def render_new_post(self):
global EVENT_TYPE
return render_str('newPost.html', eventType = EVENT_TYPE)
class Post(db.Model):
title = db.StringProperty()
content = db.TextProperty()
created_time = db.DateTimeProperty(auto_now_add = True)
interest = db.StringListProperty()
inputter = db.StringProperty()
picUrl = db.StringProperty()
eventType = db.StringProperty()
def render_post(self):
return render_str('post.html', p = self)
class Interest(db.Model):
name = db.StringProperty()
picUrl = db.StringProperty()
# def members (self):
# return Interest.gql("where user = :n", n=self.key())
# def render(self, num=0, int_list=[]):
# return render_str("interestTable.html", int_list=int_list, num= num)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def login(self, u):
self.set_cookie(val=u.idNum)
#cookie functions
def set_club_cookie(self, name='', val=''):
cookie_hash = str(create_cookie_hash(val))
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('set-cookie','club_id=%s;Path=/' % cookie_hash)
self.response.headers['Content-Type'] = 'text/html'
def set_cookie(self, name='', val=''):
cookie_hash = str(create_cookie_hash(val))
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('set-cookie','user_id=%s;Path=/' % cookie_hash)
def get_cookie(self, name=''):
cookie = self.request.cookies.get(name)
if cookie:
return check_cookie_hash(cookie)
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
idNum = self.get_cookie('user_id')
clubNum = self.get_cookie('club_id')
if idNum:
self.user = User.get_by_key_name(idNum)
else:
self.user=None
if clubNum:
self.club = Club.get_by_id(int(clubNum))
else:
self.club=None
class LoginHandler(Handler):
def get(self):
self.render('login.html')
def post(self):
username= self.request.get('username')
password = self.request.get('password')
u = User.gql('where username = :n', n=username).get()
if u and check_salt_pass(username, password, u.password):
self.login(u)
self.redirect('/home')
else:
err1 = 'Please check your username.'
self.render('login.html', err1=err1)
class EditClubHandler(Handler):
def get(self):
# top = memcache.get('topics')
# if top is None:
# topics()
# top = memcache.get('topics')
top=ALL_INTERESTS.keys()
club_id = self.get_cookie('club_id')
user_id = self.get_cookie('user_id')
if club_id and user_id:
cl = Club.get_by_id(int(club_id))
self.render('createClub.html', week=DAYS_OF_WEEK, topic_list= top,
name=cl.name, location=cl.location, time=cl.time, days=cl.days,
interests=cl.interests, officers=cl.officers, picUrl=cl.picUrl,
adviser=cl.adviser)
def post(self):
n = self.request.get('name')
a = self.request.get('adviser')
l = self.request.get('location')
t = self.request.get('time')
d = self.request.get_all('days')
i = self.request.get_all('interests')
o = self.request.get_all('officers')
picUrl = self.request.get('picUrl')
if self.club:
self.club.name=n
self.club.adviser=a
self.club.location=l
self.club.time=t
self.club.days=d
for x in self.request.get_all('interests'):
if x not in self.club.interests:
self.club.interests.append(x)
self.club.officers=o
self.club.picUrl=picUrl
self.club.put()
if self.get_cookie('user_id'):
self.redirect('/clubHome/%s' % self.get_cookie('club_id'))
class ClubHandler(Handler):
def get(self):
# top = memcache.get('topics')
# if top is None:
# topics()
# top = memcache.get('topics')
top=ALL_INTERESTS.keys()
club_id = self.get_cookie('club_id')
user_id = self.get_cookie('user_id')
# if club_id and user_id:
# cl = Club.get_by_id(int(club_id))
# self.render('createClub.html', week=DAYS_OF_WEEK, topic_list= top,
# name=cl.name, location=cl.location, time=cl.time, days=cl.days,
# interests=cl.interests, officers=cl.officers, picUrl=cl.picUrl,
# adviser=cl.adviser)
# else:
self.render('createClub.html', week=DAYS_OF_WEEK, topic_list= top)
def post(self):
n = self.request.get('name')
a = self.request.get('adviser')
l = self.request.get('location')
t = self.request.get('time')
d = self.request.get_all('days')
i = self.request.get_all('interests')
o = self.request.get_all('officers')
picUrl = self.request.get('picUrl')
# if self.club:
# self.club.name=n
# self.club.adviser=a
# self.club.location=l
# self.club.time=t
# self.club.days=d
# for x in self.request.get_all('interests'):
# if x not in self.club.interests:
# self.club.interests.append(x)
# self.club.officers=o
# self.club.picUrl=picUrl
# self.club.put()
# logging.info(self.club.location)
# else:
a = Club(name=n, location=l, time=t, days=d, interests=i, officers=o, picUrl=picUrl, adviser=a)
a.put()
if self.get_cookie('user_id'):
self.redirect('/clubHome/%s' % self.get_cookie('club_id'))
elif 'Club' or 'club' in n:
self.render('extra.html', name=n, x=True, thanks=True)
else:
self.render('extra.html', name=n, x=False, thanks=True)
class SignUpHandler(Handler):
def register(self, u, p, n, i):
m = User.gql('where idNum= :n', n=i).get()
s = User.gql('where username = :n', n = u).get()
if m:
self.render('signup.html', err_user = "Student id %s already has an account" %i)
elif s:
self.render('signup.html', err_user = "That username already exists. Please choose another.")
else:
password=str(create_salt_pass(u, p))
a = User(key_name= i, username=u, password=password, name=n, idNum=i)
a.put()
self.set_cookie(name='user_id', val = i)
self.redirect('/interest')
def get(self):
self.render('signup.html')
def post(self):
logging.info('in post')
have_error=False
username= self.request.get('username')
password = self.request.get('password')
name = self.request.get('name')
idNum = self.request.get('idNum')
params = dict(username = username)
if not valid_username(username):
params['err_user'] = "That's not a valid username."
have_error = True
if not valid_password(password):
params['err_pass'] = "That's not a valid password."
have_error = True
if not name:
params['err_name'] = "Please enter your name."
have_error=True
if not idNum:
params['err_id'] = "Please enter your id Number."
have_error=True
if have_error:
self.render('signup.html', **params)
else:
self.register(u=username, p=password, n=name, i=idNum)
class InterestHandler(Handler):
def get(self):
if self.user:
global ALL_INTERESTS
# vtop = memcache.get('topics')
# vurls = memcache.get('urls')
# if vtop or vurls is None:
# topics()
# urls()
# vtop = memcache.get('topics')
# vurls = memcache.get('urls')
# int_list = memcache.get('int_list')
# l = []
# if int_list is None:
# for x in range(0, len(vtop)):
# a = Interest(name=vtop[x], picUrl=vurls[x])
# a.put()
# l.append(a)
# memcache.set('int_list', l)
# int_list = memcache.get('int_list')
# length = len(int_list)
# self.render('interest.html', int_list = int_list, length=length)
self.render('interest.html', ALL_INTERESTS = ALL_INTERESTS)
else:
self.redirect('/logout')
def post(self):
for x in self.request.get_all('interests'):
if x not in self.user.interests:
self.user.interests.append(x)
else:
logging.info(x)
self.user.put()
self.redirect('/home')
class HomeHandler(Handler):
def render_page(self, user):
m = []
posts = []
postIds = []
CLUB_LIST= []
clubs = Club.all()
for x in clubs:
CLUB_LIST.append(x.name)
clubIds = []
if clubs:
for x in clubs:
clubIds.append(str(x.key().id()))
length = len(clubIds)
for a in user.interests:
m.append(a)
w = Post.gql("where interest = :c order by created_time desc", c = a)
for e in w:
if e.key().id() not in postIds:
posts.append(e)
postIds.append(e.key().id())
self.render('userHome.html', account=True, isClub=False, length = length, clubIds = clubIds, clubs=CLUB_LIST, user=user, posts=posts, intList=m)
def get(self):
if self.user:
self.render_page(self.user)
else:
self.redirect('/logout')
def post(self):
clubName = self.request.get('club')
clu = Club.gql('where name = :n', n=clubName).get()
if clu:
idNum = clu.key().id()
logging.info('idNum = %s' %idNum)
self.redirect('/clubHome/%s' %idNum)
class ClubHomeHandler(Handler):
def checkOfficers(self, club):
vari = self.get_cookie(name='user_id')
if vari in club.officers:
return True
def render_page(self, post_id):
userId = self.get_cookie('user_id')
if userId:
account = True
else:
account = False
CLUB_LIST= []
clubs = Club.all()
for x in clubs:
CLUB_LIST.append(x.name)
club = Club.get_by_id(int(post_id))
clubIds = []
if clubs:
for x in clubs:
clubIds.append(str(x.key().id()))
if club:
isOfficer = self.checkOfficers(club)
posts = Post.gql("where inputter = :c order by created_time desc", c = post_id)
offNames = []
for x in club.officers:
if x != '' and User.get_by_key_name(x):
offNames.append(User.get_by_key_name(x).name)
self.render('clubHome.html', account = account, isClub=True, length=len(clubIds), clubIds = clubIds, clubs=CLUB_LIST, offNames = offNames, club=club, isOfficer=isOfficer, posts=posts)
else:
self.render('extra.html', thanks=False)
def get(self, post_id):
#if self.user:
self.set_club_cookie(name='club_id', val=post_id)
self.render_page(post_id=post_id)
#else:
# self.redirect('/')
def post(self, post_id):
if self.request.get('form_name') == 'search':
clubName = self.request.get('club')
clu = Club.gql('where name = :n', n=clubName).get()
if clu:
idNum = clu.key().id()
logging.info('idNum = %s' %idNum)
self.redirect('/clubHome/%s' %idNum)
else:
club = Club.get_by_id(int(post_id))
content = self.request.get("content")
eventType =self.request.get("eventType")
interest = club.interests
title = "%s posted a %s" % (club.name, eventType)
picUrl = club.picUrl
inputter = post_id
p = Post(eventType=eventType, picUrl = picUrl, title=title, content=content, interest=interest, inputter=inputter)
p.put()
time.sleep(0.5)
self.render_page(post_id=post_id)
class LogoutHandler(Handler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
var = ''
self.response.headers.add_header('set-cookie', 'user_id=%s;Path=/' % var)
self.response.headers.add_header('set-cookie', 'club_id=%s;Path=/' % var)
self.redirect('/')
def post(self):
pass
class AllClubsHandler(Handler):
def get(self):
#clubs = memcache.get('clubs')
clubs = Club.all()
clubIds=[]
if clubs:
for x in clubs:
clubIds.append(str(x.key().id()))
if clubs:
length = len(clubIds)
self.render('allClubs.html', clubIds=clubIds, clubs= clubs, length=length)
else:
self.response.write("No clubs have been added yet")
app = webapp2.WSGIApplication([
('/login', LoginHandler),
('/createClub', ClubHandler),
('/', SignUpHandler),
('/allClubs', AllClubsHandler),
('/interest', InterestHandler),
('/home', HomeHandler),
('/clubHome/(\w+)', ClubHomeHandler),
('/editClub', EditClubHandler),
('/logout', LogoutHandler)
], debug=True)
|
aiswaryasankar/mock2
|
main.py
|
main.py
|
py
| 17,647 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch.set_default_fetch_deadline",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.memcache.set",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.memcache",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.memcache.get",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.memcache",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.GqlQuery",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.memcache.set",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.memcache",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "string.letters",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "hashlib.sha256",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "hmac.new",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db.Model",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringListProperty",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.Model",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringListProperty",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringListProperty",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringListProperty",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.Model",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.TextProperty",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.DateTimeProperty",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringListProperty",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.Model",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "webapp2.RequestHandler.initialize",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "webapp2.WSGIApplication",
"line_number": 550,
"usage_type": "call"
}
] |
35396164763
|
import codecs
import unittest
from asyncssh.asn1 import der_encode, der_decode
from asyncssh.asn1 import ASN1EncodeError, ASN1DecodeError
from asyncssh.asn1 import BitString, IA5String, ObjectIdentifier
from asyncssh.asn1 import RawDERObject, TaggedDERObject, PRIVATE
class _TestASN1(unittest.TestCase):
"""Unit tests for ASN.1 module"""
tests = [
(None, '0500'),
(False, '010100'),
(True, '0101ff'),
(0, '020100'),
(127, '02017f'),
(128, '02020080'),
(256, '02020100'),
(-128, '020180'),
(-129, '0202ff7f'),
(-256, '0202ff00'),
(b'', '0400'),
(b'\0', '040100'),
(b'abc', '0403616263'),
(127*b'\0', '047f' + 127*'00'),
(128*b'\0', '048180' + 128*'00'),
('', '0c00'),
('\0', '0c0100'),
('abc', '0c03616263'),
((), '3000'),
((1,), '3003020101'),
((1, 2), '3006020101020102'),
(frozenset(), '3100'),
(frozenset({1}), '3103020101'),
(frozenset({1, 2}), '3106020101020102'),
(frozenset({-128, 127}), '310602017f020180'),
(BitString(b''), '030100'),
(BitString(b'\0', 7), '03020700'),
(BitString(b'\x80', 7), '03020780'),
(BitString(b'\x80', named=True), '03020780'),
(BitString(b'\x81', named=True), '03020081'),
(BitString(b'\x81\x00', named=True), '03020081'),
(BitString(b'\x80', 6), '03020680'),
(BitString(b'\x80'), '03020080'),
(BitString(b'\x80\x00', 7), '0303078000'),
(BitString(''), '030100'),
(BitString('0'), '03020700'),
(BitString('1'), '03020780'),
(BitString('10'), '03020680'),
(BitString('10000000'), '03020080'),
(BitString('10000001'), '03020081'),
(BitString('100000000'), '0303078000'),
(IA5String(b''), '1600'),
(IA5String(b'\0'), '160100'),
(IA5String(b'abc'), '1603616263'),
(ObjectIdentifier('0.0'), '060100'),
(ObjectIdentifier('1.2'), '06012a'),
(ObjectIdentifier('1.2.840'), '06032a8648'),
(ObjectIdentifier('2.5'), '060155'),
(ObjectIdentifier('2.40'), '060178'),
(TaggedDERObject(0, None), 'a0020500'),
(TaggedDERObject(1, None), 'a1020500'),
(TaggedDERObject(32, None), 'bf20020500'),
(TaggedDERObject(128, None), 'bf8100020500'),
(TaggedDERObject(0, None, PRIVATE), 'e0020500'),
(RawDERObject(0, b'', PRIVATE), 'c000')
]
encode_errors = [
(range, [1]), # Unsupported type
(BitString, [b'', 1]), # Bit count with empty value
(BitString, [b'', -1]), # Invalid unused bit count
(BitString, [b'', 8]), # Invalid unused bit count
(BitString, [b'0c0', 7]), # Unused bits not zero
(BitString, ['', 1]), # Unused bits with string
(BitString, [0]), # Invalid type
(ObjectIdentifier, ['']), # Too few components
(ObjectIdentifier, ['1']), # Too few components
(ObjectIdentifier, ['-1.1']), # First component out of range
(ObjectIdentifier, ['3.1']), # First component out of range
(ObjectIdentifier, ['0.-1']), # Second component out of range
(ObjectIdentifier, ['0.40']), # Second component out of range
(ObjectIdentifier, ['1.-1']), # Second component out of range
(ObjectIdentifier, ['1.40']), # Second component out of range
(ObjectIdentifier, ['1.1.-1']), # Later component out of range
(TaggedDERObject, [0, None, 99]), # Invalid ASN.1 class
(RawDERObject, [0, None, 99]), # Invalid ASN.1 class
]
decode_errors = [
'', # Incomplete data
'01', # Incomplete data
'0101', # Incomplete data
'1f00', # Incomplete data
'1f8000', # Incomplete data
'1f0001', # Incomplete data
'1f80', # Incomplete tag
'0180', # Indefinite length
'050001', # Unexpected bytes at end
'2500', # Constructed null
'050100', # Null with content
'2100', # Constructed boolean
'010102', # Boolean value not 0x00/0xff
'2200', # Constructed integer
'2400', # Constructed octet string
'2c00', # Constructed UTF-8 string
'1000', # Non-constructed sequence
'1100', # Non-constructed set
'2300', # Constructed bit string
'03020800', # Invalid unused bit count
'3600', # Constructed IA5 string
'2600', # Constructed object identifier
'0600', # Empty object identifier
'06020080', # Invalid component
'06020081' # Incomplete component
]
def test_asn1(self):
"""Unit test ASN.1 module"""
for value, data in self.tests:
data = codecs.decode(data, 'hex')
with self.subTest(msg='encode', value=value):
self.assertEqual(der_encode(value), data)
with self.subTest(msg='decode', data=data):
decoded_value = der_decode(data)
self.assertEqual(decoded_value, value)
self.assertEqual(hash(decoded_value), hash(value))
self.assertEqual(repr(decoded_value), repr(value))
self.assertEqual(str(decoded_value), str(value))
for cls, args in self.encode_errors:
with self.subTest(msg='encode error', cls=cls.__name__, args=args):
with self.assertRaises(ASN1EncodeError):
der_encode(cls(*args))
for data in self.decode_errors:
with self.subTest(msg='decode error', data=data):
with self.assertRaises(ASN1DecodeError):
der_decode(codecs.decode(data, 'hex'))
|
ronf/asyncssh
|
tests/test_asn1.py
|
test_asn1.py
|
py
| 7,788 |
python
|
en
|
code
| 1,408 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.IA5String",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.IA5String",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.IA5String",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.TaggedDERObject",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.TaggedDERObject",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.TaggedDERObject",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.TaggedDERObject",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.TaggedDERObject",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.PRIVATE",
"line_number": 76,
"usage_type": "argument"
},
{
"api_name": "asyncssh.asn1.RawDERObject",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.PRIVATE",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.BitString",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.ObjectIdentifier",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.TaggedDERObject",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "asyncssh.asn1.RawDERObject",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "codecs.decode",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.der_encode",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.der_decode",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ASN1EncodeError",
"line_number": 165,
"usage_type": "argument"
},
{
"api_name": "asyncssh.asn1.der_encode",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "asyncssh.asn1.ASN1DecodeError",
"line_number": 170,
"usage_type": "argument"
},
{
"api_name": "asyncssh.asn1.der_decode",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "codecs.decode",
"line_number": 171,
"usage_type": "call"
}
] |
20634168426
|
from .utils import get_colors
def show_help_mess(error: bool = False) -> None:
"""Usage: pytrash <param> [param[, param ...]]
{0}-h, --help{1}
Print this help message and exit.
{0}-d, --del <path> [path[ path ...]]{1}
Move files/dirs to trash (~/.local/share/Trash/).
{0}-f, --find <pattern>{1}
Search for files and directories in the trash.
{0}-r, --restore [pattern]{1}
Print list of files/dirs on trash with the possibility
of their recovery. If the pattern is specified, then
only matches with this pattern are displayed.
{0}-c, --clear{1}
Clear trash.
{0}-s, --size{1}
Show the size of the trash.
"""
colors = get_colors()
if error:
print(('{0}Wrong parameters.{1} '
'\'pytrash --help\'{2} for help').format(colors['red'],
colors['cyan'],
colors['reset']))
raise SystemExit
# show usage
print(str(show_help_mess.__doc__).format(colors['cyan'], colors['reset']))
|
MyRequiem/pytrash
|
src/helpmess.py
|
helpmess.py
|
py
| 1,128 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "utils.get_colors",
"line_number": 28,
"usage_type": "call"
}
] |
33042404005
|
"""Helpers for tests."""
import json
import pytest
from .common import MQTTMessage
from tests.async_mock import patch
from tests.common import load_fixture
@pytest.fixture(name="generic_data", scope="session")
def generic_data_fixture():
"""Load generic MQTT data and return it."""
return load_fixture("ozw/generic_network_dump.csv")
@pytest.fixture(name="light_data", scope="session")
def light_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_network_dump.csv")
@pytest.fixture(name="sent_messages")
def sent_messages_fixture():
"""Fixture to capture sent messages."""
sent_messages = []
with patch(
"homeassistant.components.mqtt.async_publish",
side_effect=lambda hass, topic, payload: sent_messages.append(
{"topic": topic, "payload": json.loads(payload)}
),
):
yield sent_messages
@pytest.fixture(name="light_msg")
async def light_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="switch_msg")
async def switch_msg_fixture(hass):
"""Return a mock MQTT msg with a switch actuator message."""
switch_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/switch.json")
)
message = MQTTMessage(topic=switch_json["topic"], payload=switch_json["payload"])
message.encode()
return message
@pytest.fixture(name="sensor_msg")
async def sensor_msg_fixture(hass):
"""Return a mock MQTT msg with a sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/sensor.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="binary_sensor_msg")
async def binary_sensor_msg_fixture(hass):
"""Return a mock MQTT msg with a binary_sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="binary_sensor_alt_msg")
async def binary_sensor_alt_msg_fixture(hass):
"""Return a mock MQTT msg with a binary_sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor_alt.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
|
84KaliPleXon3/home-assistant-core
|
tests/components/ozw/conftest.py
|
conftest.py
|
py
| 2,850 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "tests.common.load_fixture",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tests.common.load_fixture",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tests.async_mock.patch",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tests.common.load_fixture",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "common.MQTTMessage",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tests.common.load_fixture",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "common.MQTTMessage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tests.common.load_fixture",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "common.MQTTMessage",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "tests.common.load_fixture",
"line_number": 75,
"usage_type": "argument"
},
{
"api_name": "common.MQTTMessage",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tests.common.load_fixture",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "common.MQTTMessage",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 82,
"usage_type": "call"
}
] |
3919536002
|
# standard python libs
import os
import re
import html
import json
import random
import hashlib
import lxml.html
import lxml.etree
import unicodedata
import urllib.request
from datetime import datetime
from urllib.parse import urlparse
from urllib.parse import urlsplit
# non-standard libs which must be installed
from textstat.textstat import textstat
import lxml.html
# custom webxray classes
from webxray.ParseURL import ParseURL
from webxray.Utilities import Utilities
class OutputStore:
"""
This class receives data from the browser, processes it, and stores it in the db
"""
def __init__(self, db_name, db_engine):
self.db_name = db_name
self.utilities = Utilities()
self.url_parser = ParseURL()
self.debug = False
if db_engine == 'sqlite':
from webxray.SQLiteDriver import SQLiteDriver
self.sql_driver = SQLiteDriver(self.db_name)
elif db_engine == 'postgres':
from webxray.PostgreSQLDriver import PostgreSQLDriver
self.sql_driver = PostgreSQLDriver(self.db_name)
else:
print('INVALID DB ENGINE FOR %s, QUITTING!' % db_engine)
quit()
self.config = self.sql_driver.get_config()
# __init__
def close(self):
"""
Just to make sure we close the db connection.
"""
self.sql_driver.close()
# close
def store_scan(self, params):
"""
This function pre-processes data from the browser, inserts it into
database, and handles linking various entries across tables.
"""
# unpack params
browser_output = params['browser_output']
client_id = params['client_id']
crawl_id = params['crawl_id']
crawl_timestamp = params['crawl_timestamp']
crawl_sequence = params['crawl_sequence']
# client_ip is optional
if 'client_ip' in params:
client_ip = params['client_ip']
else:
client_ip = None
if self.debug: print('going to store scan %s' % browser_output['start_url'])
# keep track of domains
page_3p_cookie_domains = set()
page_3p_dom_storage_domains = set()
page_3p_request_domains = set()
page_3p_response_domains = set()
page_3p_websocket_domains = set()
# convert from timestamp to datetime object that will go to the db
accessed = datetime.fromtimestamp(browser_output['accessed'])
# first make sure we don't have it already
if self.sql_driver.page_exists(browser_output['start_url'],accessed):
return {'success': False, 'result': 'exists in db already'}
# if we have no responses the page didn't load at all and we skip
# unless we are using basic driver and then it's ok
if len(browser_output['responses']) == 0 and browser_output['browser_type'] != 'basic':
return {'success': False, 'result': 'no responses received'}
# ignore any malformed unicode characters
page_source = browser_output['page_source'].encode('utf-8', 'ignore').decode()
# store source
if self.config['store_source']:
if self.debug: print('going to store source %s' % browser_output['start_url'])
page_source_md5 = self.store_file(page_source, False, 'page_source')
else:
page_source_md5 = None
# store readability_html
if self.config['store_page_text'] and browser_output['page_text']:
if self.debug: print('going to store readability_html')
# ignore any malformed unicode characters
readability_html = browser_output['readability_html'].encode('utf-8', 'ignore').decode().strip()
readability_source_md5 = self.store_file(readability_html, False, 'readability_html')
# store_page_text handles some addition operations
if self.debug: print('going to store page_text')
page_text_id = self.store_page_text(readability_html,readability_source_md5)
else:
page_text_id = None
# process info on the start_url domain
if self.debug: print('going to parse start/final_url %s' % browser_output['start_url'])
start_url = browser_output['start_url']
start_url_domain_info = self.url_parser.get_parsed_domain_info(start_url)
if start_url_domain_info['success'] == False:
err_msg = 'unable to parse start_url_domain_info info for %s with error %s' % (browser_output['start_url'], start_url_domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
return {'success': False, 'result': 'could not parse start_url'}
else:
# needed for comparisons later on
start_url_domain = start_url_domain_info['result']['domain']
# add start_url domain and get id
start_url_domain_id = self.sql_driver.add_domain(start_url_domain_info['result'])
# process info on the final_url domain
# note: we use the final_url domain as the benchmark for determine 1p/3p
final_url = browser_output['final_url']
final_url_domain_info = self.url_parser.get_parsed_domain_info(final_url)
if final_url_domain_info['success'] == False:
err_msg = 'unable to parse final_url_domain_info info for %s with error %s' % (browser_output['final_url'], final_url_domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
return {'success': False, 'result': 'could not parse final_url'}
else:
final_url_domain = final_url_domain_info['result']['domain']
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
final_url_domain_id = self.sql_driver.add_domain(final_url_domain_info['result'])
# check if the page has redirected to a new domain
if start_url_domain != final_url_domain:
page_domain_redirect = True
else:
page_domain_redirect = False
# this is semi-redundant but ensures that any config changes made while
# a result is queued are followed
if self.config['client_reject_redirects'] and page_domain_redirect:
return {'success': False, 'result': 'rejecting redirect'}
# if the final page is https (often after a redirect), mark it appropriately
if browser_output['final_url'][:5] == 'https':
page_is_ssl = True
else:
page_is_ssl = False
# (optionally) process and store links, this allows us to go back later and do deeper scans
# as well as do more with policies
# links starts as empty list
links = []
# keep track of link counts as helpful for filtering pages
link_count_internal = 0
link_count_external = 0
if self.config['store_links']:
if self.debug: print('going to process links %s' % browser_output['start_url'])
# we use the list of policy_link_terms to flag that a link *might*
# be for a policy, we check if it actually is policy in PolicyCollector.py
policy_link_terms = self.utilities.get_policy_link_terms()
# process links, duplicates get ignored by db
for link in browser_output['all_links']:
# skip if href not valid
if not self.utilities.is_url_valid(link['href']): continue
# unpack values and catch any unicode errors
link_text = link['text'].encode('utf-8', 'ignore').decode()
link_url = link['href'].encode('utf-8', 'ignore').decode()
# get rid of trailing # and /
if link_url.strip()[-1:] == '#': link_url = link_url.strip()[:-1]
if link_url.strip()[-1:] == '/': link_url = link_url.strip()[:-1]
# sometimes the text will be a dict (very rarely)
# so we convert to string
link_text = str(link_text).strip()
# clean up white space and remove line breaks
link_text = re.sub('\n|\r|\t|\s+',' ',link_text.strip())
link_url = re.sub('\n|\r|\t|\s+',' ',link_url.strip())
# catch nulls
link_text = link_text.replace('\x00','NULL_REPLACED_FOR_PSQL')
link_url = link_url.replace('\x00','NULL_REPLACED_FOR_PSQL')
# update counts
if link['internal']:
link_count_internal += 1
else:
link_count_external += 1
# flag links that could be policies, default False
link_is_policy = False
# determine if a policy term appears in the link
for policy_term in policy_link_terms:
if policy_term in link_text.lower():
link_is_policy = True
break
link_domain_info = self.url_parser.get_parsed_domain_info(link_url)
if link_domain_info['success'] == False:
# don't bother with storing errors
link_domain_id = None
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
link_domain_id = self.sql_driver.add_domain(link_domain_info['result'])
links.append({
'url' : link_url,
'text' : link_text,
'is_internal' : link['internal'],
'is_policy' : link_is_policy,
'domain_id' : link_domain_id
})
# if we got the screen shot we get the hash and store it to the file table
screen_shot_md5 = None
if browser_output['screen_shot'] and self.config['store_screen_shot']:
if self.debug: print('going to store screen shot %s' % browser_output['start_url'])
# store file to get md5
screen_shot_md5 = self.store_file(browser_output['screen_shot'],True,'screen_shot')
# if we have timestamp it is also an 'accessed' field from
# a page load so we convert that as well
if crawl_timestamp:
crawl_timestamp = datetime.fromtimestamp(crawl_timestamp)
# ignore any malformed unicode characters
if browser_output['title']:
browser_output['title'] = browser_output['title'].encode('utf-8', 'ignore').decode()
if browser_output['meta_desc']:
browser_output['meta_desc'] = browser_output['meta_desc'].encode('utf-8', 'ignore').decode()
if browser_output['lang']:
browser_output['lang'] = browser_output['lang'].encode('utf-8', 'ignore').decode()
# now we know link counts we can store the page
if self.debug: print('going to store page %s' % browser_output['start_url'])
page_id = self.sql_driver.add_page({
'accessed' : accessed,
'browser_type' : browser_output['browser_type'],
'browser_version' : browser_output['browser_version'],
'browser_prewait' : browser_output['prewait'],
'browser_no_event_wait' : browser_output['no_event_wait'],
'browser_max_wait' : browser_output['max_wait'],
'page_load_strategy' : browser_output['page_load_strategy'],
'title' : browser_output['title'],
'meta_desc' : browser_output['meta_desc'],
'lang' : browser_output['lang'],
'start_url' : browser_output['start_url'],
'final_url' : browser_output['final_url'],
'is_ssl' : page_is_ssl,
'page_domain_redirect' : page_domain_redirect,
'link_count_internal' : link_count_internal,
'link_count_external' : link_count_external,
'load_time' : browser_output['load_time'],
'start_url_domain_id' : start_url_domain_id,
'final_url_domain_id' : final_url_domain_id,
'client_id' : client_id,
'client_timezone' : browser_output['client_timezone'],
'client_ip' : client_ip,
'page_text_id' : page_text_id,
'screen_shot_md5' : screen_shot_md5,
'page_source_md5' : page_source_md5,
'crawl_id' : crawl_id,
'crawl_timestamp' : crawl_timestamp,
'crawl_sequence' : crawl_sequence
})
# STORE LINKS
if self.config['store_links']:
if self.debug: print('going to store links %s' % browser_output['start_url'])
for link in links:
link_id = self.sql_driver.add_link(link)
if link_id: self.sql_driver.join_link_to_page(page_id,link_id)
# PROCESS DOM_STORAGE
if self.config['store_dom_storage']:
if self.debug: print('going to process dom storage %s' % browser_output['start_url'])
for dom_storage in browser_output['dom_storage']:
# parse domain from the security_origin, which is equivalent to a url
domain_info = self.url_parser.get_parsed_domain_info(dom_storage['security_origin'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (dom_storage['security_origin'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
dom_storage['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark if third-party storage
if final_url_domain != domain_info['result']['domain']:
dom_storage['is_3p'] = True
else:
dom_storage['is_3p'] = False
# key to page
dom_storage['page_id'] = page_id
# replace null b/c postgres will die otherwise
dom_storage['key'] = dom_storage['key'].replace('\x00','NULL_REPLACED_FOR_PSQL')
dom_storage['value'] = dom_storage['value'].replace('\x00','NULL_REPLACED_FOR_PSQL')
# there types of illegal utf-8 characters that psql doesn't like, eg trying to store
# '\uded5' gives this error when storing in psql:
# 'UnicodeEncodeError: 'utf-8' codec can't encode character '\uded5' in position 0: surrogates not allowed'
#
# to overcome the above, we use python's backslashreplace to keep the original data in
# a way that won't cause our queries to die
# see https://docs.python.org/3/library/codecs.html#error-handlers
dom_storage['key'] = dom_storage['key'].encode('utf-8','backslashreplace')
dom_storage['value'] = dom_storage['value'].encode('utf-8','backslashreplace')
# now that we've encoded with backslashes we decode to get the semi-original data
dom_storage['key'] = dom_storage['key'].decode('utf-8')
dom_storage['value'] = dom_storage['value'].decode('utf-8')
# all done with this item
self.sql_driver.add_dom_storage(dom_storage)
# update domains
if dom_storage['is_3p']:
page_3p_dom_storage_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
# PROCESS LOAD FINISH
if self.debug: print('going to process load finish data %s' % browser_output['start_url'])
load_finish_data = {}
for load_finish_event in browser_output['load_finish_events']:
load_finish_data[load_finish_event['request_id']] = load_finish_event['encoded_data_length']
# RESPONSE EXTRA HEADERS
if self.debug: print('going to process response extra header data %s' % browser_output['start_url'])
http_cookies = []
internal_id_to_resp_ex_headers = {}
for response_extra_header in browser_output['response_extra_headers']:
response_extra_header['page_id'] = page_id
response_extra_header['cookies_set'] = None
# to check for domain leakage in headers we make a big string keyed to the internal id
if response_extra_header['request_id'] not in internal_id_to_resp_ex_headers:
internal_id_to_resp_ex_headers[response_extra_header['request_id']] = str(response_extra_header['headers'])
else:
internal_id_to_resp_ex_headers[response_extra_header['request_id']] += str(response_extra_header['headers'])
for item in response_extra_header['headers']:
if item.lower() == 'set-cookie':
response_extra_header['cookies_set'] = response_extra_header['headers'][item]
# when we add cookies later on we mark those that came from response headers,
# note we try/pass on this in case we can't parse
for cookie in response_extra_header['cookies_set'].split('\n'):
if 'domain' in cookie.lower():
try:
name = re.match('^(.+?)=',cookie)[0][:-1]
domain = re.match('^.+domain=(.+?)(;|$)',cookie.lower())[1]
if domain[0] == '.': domain = domain[1:]
http_cookies.append((domain,name))
except:
pass
if self.config['store_response_xtra_headers']:
self.sql_driver.add_response_extra_header(response_extra_header)
# PROCESS RESPONSES
response_received_req_ids = []
if self.debug: print('going to process response data %s' % browser_output['start_url'])
for response in browser_output['responses']:
# defaut values that may get over-written
response['file_md5'] = None
response['is_data'] = False
response['is_3p'] = None
response['is_ssl'] = None
response['page_domain_in_headers'] = False
# first handle non-http urls and optionally store content
if re.match('^(data|about|chrome|blob|javascript).+', response['url']):
if 'base64' in response['url'].lower() or 'image' in response['type'].lower():
is_base64 = True
else:
is_base64 = False
# store_file follows the config as far as actually storing the file goes
# and will either return the md5 or None
# make sure we're following our configuration
if self.config['store_files'] and (self.config['store_base64'] or is_base64 == False):
response['file_md5'] = self.store_file(response['url'],is_base64,response['type'])
else:
response['file_md5'] = None
response['url'] = None
response['is_data'] = True
response['domain_id'] = None
else:
# parse, store, and get id of domain; if fails skip
domain_info = self.url_parser.get_parsed_domain_info(response['url'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (response['url'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
response_domain = domain_info['result']['domain']
response['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# now add ip
if response['remote_ip_address']:
self.sql_driver.add_domain_ip_addr(response['domain_id'],response['remote_ip_address'])
# mark third-party responses based on final_url domain
if response_domain != final_url_domain:
response['is_3p'] = True
else:
response['is_3p'] = False
# determine if encrypted
if response['url'][:5] == 'https' or response['url'][:3] == 'wss':
response['is_ssl'] = True
else:
response['is_ssl'] = False
# keep track of the request ids of each reponse to mark as received
response_received_req_ids.append(response['request_id'])
# we do no more processing at this point
if not self.config['store_responses']:
continue
# lower case the type, simplifies db queries
response['type'] = response['type'].lower()
# store the security details if they exist
if response['security_details'] and self.config['store_security_details']:
response['security_details_id'] = self.sql_driver.add_security_details(response['security_details'])
else:
response['security_details_id'] = None
# store the size of the request
if response['request_id'] in load_finish_data:
response['final_data_length'] = load_finish_data[response['request_id']]
else:
response['final_data_length'] = None
# parse off args/etc
# consider anything before the "?" to be the element_url
try:
response['base_url'] = re.search('^(.+?)\?.+$', response['url']).group(1)
except:
response['base_url'] = response['url']
# attempt to parse off the extension
try:
response['extension'] = re.search('\.([0-9A-Za-z]+)$', response['base_url']).group(1).lower()
except:
response['extension'] = None
# First see if this request_id is present in response_bodies, and if
# the entry is not None, then we store it to the db if config says to.
if response['request_id'] in browser_output['response_bodies']:
if browser_output['response_bodies'][response['request_id']]:
# make sure we're following our configuration
is_base64 = browser_output['response_bodies'][response['request_id']]['is_base64']
if self.config['store_files'] and (self.config['store_base64'] or is_base64 == False):
response['file_md5'] = self.store_file(
browser_output['response_bodies'][response['request_id']]['body'],
is_base64,
response['type']
)
else:
response['file_md5'] = None
# link to page
response['page_id'] = page_id
# parse data headers, accounts for upper/lower case variations (eg 'set-cookie', 'Set-Cookie')
response['content_type'] = None
response['cookies_set'] = None
for item in response['response_headers']:
if item.lower() == 'content-type':
response['content_type'] = response['response_headers'][item]
if item.lower() == 'set-cookie':
response['cookies_set'] = response['response_headers'][item]
# if we have request_headers look for cookies sent
response['cookies_sent'] = None
if response['request_headers']:
for item in response['request_headers']:
if item.lower() == 'cookie':
response['cookies_sent'] = response['request_headers'][item]
# parse referer header
response['referer'] = None
for item in response['response_headers']:
if item.lower() == 'referer':
response['referer'] = response['response_headers'][item]
# check if domain leaked in referer
if response['request_id'] in internal_id_to_resp_ex_headers:
if final_url_domain in internal_id_to_resp_ex_headers[response['request_id']]:
response['page_domain_in_headers'] = True
# convert from timestamp to datetime object that will go to the db
response['timestamp'] = datetime.fromtimestamp(response['timestamp'])
# store
self.sql_driver.add_response(response)
# update domains
if response['is_3p']:
page_3p_response_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
# REQUEST EXTRA HEADERS
if self.debug: print('going to process request extra headers data %s' % browser_output['start_url'])
internal_id_to_req_ex_headers = {}
for request_extra_header in browser_output['request_extra_headers']:
request_extra_header['page_id'] = page_id
request_extra_header['cookies_sent'] = None
# to check for domain leakage in headers we make a big string keyed to the internal id
if request_extra_header['request_id'] not in internal_id_to_req_ex_headers:
internal_id_to_req_ex_headers[request_extra_header['request_id']] = str(request_extra_header['headers'])
else:
internal_id_to_req_ex_headers[request_extra_header['request_id']] += str(request_extra_header['headers'])
for item in request_extra_header['headers']:
if item.lower() == 'cookie':
request_extra_header['cookies_sent'] = request_extra_header['headers'][item]
if self.config['store_request_xtra_headers']:
self.sql_driver.add_request_extra_header(request_extra_header)
# PROCESS REQUESTS
if self.config['store_requests']:
if self.debug: print('going to process request data %s' % browser_output['start_url'])
for request in browser_output['requests']:
# defaut values that may get over-written
request['file_md5'] = None
request['is_data'] = False
request['is_3p'] = None
request['is_ssl'] = None
request['page_domain_in_headers'] = False
# first handle non-http urls and optionally store content
if re.match('^(data|about|chrome|blob|javascript).+', request['url']):
if 'base64' in request['url'].lower() or 'image' in request['url'].lower():
is_base64 = True
else:
is_base64 = False
# store_file follows the config as far as actually storing the file goes
# and will either return the md5 or None
# make sure we're following our configuration
if self.config['store_files'] and (self.config['store_base64'] or is_base64 == False):
request['file_md5'] = self.store_file(request['url'],is_base64,request['type'])
else:
request['file_md5'] = None
request['url'] = None
request['is_data'] = True
request['domain_id'] = None
else:
# parse, store, and get id of domain; if fails skip
domain_info = self.url_parser.get_parsed_domain_info(request['url'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (request['url'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
request_domain = domain_info['result']['domain']
request['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark third-party requests based on final_url domain
if request_domain != final_url_domain:
request['is_3p'] = True
else:
request['is_3p'] = False
# determine if encrypted
if request['url'][:5] == 'https' or request['url'][:3] == 'wss':
request['is_ssl'] = True
else:
request['is_ssl'] = False
# replace null b/c postgres will die otherwise
if request['post_data']:
request['post_data'] = request['post_data'].replace('\x00','NULL_REPLACED_FOR_PSQL')
# consider anything after the "?" to be the GET data
try:
get_string = re.search('^.+\?(.+)$', request['url']).group(1)
get_string = get_string.replace('\x00','NULL_REPLACED_FOR_PSQL')
get_data = {}
for key_val in get_string.split('&'):
get_data[key_val.split('=')[0]] = key_val.split('=')[1]
request['get_data'] = json.dumps(get_data)
except:
request['get_data'] = None
# mark if response received
if request['request_id'] in response_received_req_ids:
request['response_received'] = True
else:
request['response_received'] = None
# mark if the loading finished
if request['request_id'] in load_finish_data:
request['load_finished'] = True
else:
request['load_finished'] = None
# lower case the type, simplifies db queries
if request['type']: request['type'] = request['type'].lower()
# parse off args/etc
# consider anything before the "?" to be the element_url
try:
request['base_url'] = re.search('^(.+?)\?.+$', request['url']).group(1)
except:
request['base_url'] = request['url']
# attempt to parse off the extension
try:
request['extension'] = re.search('\.([0-9A-Za-z]+)$', request['base_url']).group(1).lower()
except:
request['extension'] = None
# link to page
request['page_id'] = page_id
# parse referer header
request['referer'] = None
for item in request['headers']:
if item.lower() == 'referer':
request['referer'] = request['headers'][item]
# check if domain leaked in headers
if request['request_id'] in internal_id_to_req_ex_headers:
if final_url_domain in internal_id_to_req_ex_headers[request['request_id']]:
request['page_domain_in_headers'] = True
# convert from timestamp to datetime object that will go to the db
request['timestamp'] = datetime.fromtimestamp(request['timestamp'])
# all done
self.sql_driver.add_request(request)
# update domains
if request['is_3p']:
page_3p_request_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
# PROCESS WEBSOCKETS
if self.config['store_websockets']:
if self.debug: print('going to process websocket data %s' % browser_output['start_url'])
ws_id_map = {}
for websocket in browser_output['websockets']:
domain_info = self.url_parser.get_parsed_domain_info(websocket['url'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (websocket['url'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
websocket['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark if third-party connection
if final_url_domain != domain_info['result']['domain']:
websocket['is_3p'] = True
else:
websocket['is_3p'] = False
websocket['page_id'] = page_id
this_websocket_id = self.sql_driver.add_websocket(websocket)
# update domains
if websocket['is_3p']:
page_3p_websocket_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
if websocket['request_id'] not in ws_id_map:
ws_id_map[websocket['request_id']] = this_websocket_id
else:
print('ERROR WS_REQ_ID ALREADY IN MAP')
# PROCESS WEBSOCKET EVENTS
if self.config['store_websockets'] and self.config['store_websocket_events']:
for websocket_event in browser_output['websocket_events']:
websocket_event['page_id'] = page_id
if websocket_event['request_id'] in ws_id_map:
websocket_event['websocket_id'] = ws_id_map[websocket_event['request_id']]
else:
websocket_event['websocket_id'] = None
# convert from timestamp to datetime object that will go to the db
websocket_event['timestamp'] = datetime.fromtimestamp(websocket_event['timestamp'])
self.sql_driver.add_websocket_event(websocket_event)
# PROCESS EVENT SOURCE MSGS
if self.config['store_event_source_msgs']:
if self.debug: print('going to process event source data %s' % browser_output['start_url'])
for event_source_msg in browser_output['event_source_msgs']:
event_source_msg['page_id'] = page_id
# convert from timestamp to datetime object that will go to the db
event_source_msg['timestamp'] = datetime.fromtimestamp(event_source_msg['timestamp'])
self.sql_driver.add_event_source_msg(event_source_msg)
# PROCESS COOKIES
if self.config['store_cookies']:
if self.debug: print('going to process cookies %s' % browser_output['start_url'])
for cookie in browser_output['cookies']:
# get the ip, fqdn, domain, pubsuffix, and tld
# we need the domain to figure out if cookies/elements are third-party
# note:
# url_parser fails on non-http, we should fix this, right now a lame hack is to prepend http://
# parse domain from the security_origin, which is equivalent to a url
domain_info = self.url_parser.get_parsed_domain_info('http://'+cookie['domain'])
if domain_info['success'] == False:
err_msg = 'unable to parse domain info for %s with error %s' % (cookie['domain'], domain_info['result'])
if self.debug: print(err_msg)
self.sql_driver.log_error({
'client_id' : client_id,
'target' : start_url,
'task' : 'output_store',
'msg' : err_msg
})
continue
else:
# self.sql_driver.add_domain both stores the new domain and returns its db row id
# if it is already in db just return the existing id
cookie['domain_id'] = self.sql_driver.add_domain(domain_info['result'])
# mark if third-party cookie
if final_url_domain != domain_info['result']['domain']:
cookie['is_3p'] = True
else:
cookie['is_3p'] = False
# key to page
cookie['page_id'] = page_id
# fix var names
cookie['http_only'] = cookie['httpOnly']
# attempt to convert cookie expiry from timestamp to datetime object, note we
# need try/except as python datetime object cannot have year > 9999 and some
# cookies do that
cookie['expires_timestamp'] = None
if cookie['expires']:
try:
cookie['expires_timestamp'] = datetime.fromtimestamp(cookie['expires'])
except:
pass
# this is optional, do fall-back
if 'sameSite' in cookie:
cookie['same_site'] = cookie['sameSite']
else:
cookie['same_site'] = None
# see if this cookie was set via http response
if cookie['domain'][0] == '.':
cookie_tuple = (cookie['domain'][1:],cookie['name'])
else:
cookie_tuple = (cookie['domain'],cookie['name'])
if cookie_tuple in http_cookies:
cookie['is_set_by_response'] = True
else:
cookie['is_set_by_response'] = False
# all done with this cookie
self.sql_driver.add_cookie(cookie)
# update domains
if cookie['is_3p']:
page_3p_cookie_domains.add((domain_info['result']['domain'],domain_info['result']['domain_owner_id']))
if self.debug: print('done storing scan %s' % browser_output['start_url'])
return {
'success' : True,
'page_id' : page_id,
'page_3p_request_domains' : page_3p_request_domains,
'page_3p_response_domains' : page_3p_response_domains,
'page_3p_websocket_domains' : page_3p_websocket_domains,
'page_3p_dom_storage_domains' : page_3p_dom_storage_domains,
'page_3p_cookie_domains' : page_3p_cookie_domains
}
# store_scan
def store_file(self,body,is_base64,type):
"""
Hashes and stores file, returns file_md5.
"""
# in theory we shouldn't get here if it is base64, so this is a fail-safe check
if not self.config['store_base64']:
if is_base64 or type.lower()=='image':
return None
# note hash is on original data, which we modify to remove \x00 before we store
file_md5 = hashlib.md5(body.encode()).hexdigest()
# store to db, note query will be ignored on conflict
# but since we calculate the md5 as above that is fine
self.sql_driver.add_file({
'md5' : file_md5,
'body' : body.replace('\x00','NULL_REPLACED_FOR_PSQL'),
'type' : type.lower(),
'is_base64' : is_base64
})
return file_md5
# store_file
def store_policy(self, browser_output, client_id, client_ip=None):
"""
We attempt to figure out if the text provided is a policy, if so
we store it to the database.
"""
# keep values in a dict here
policy = {}
# attempt to get_policy was a success, extract data from
# dict, since postgres cannot handle '\x00' we convert to
# string for several fields and use .replace('\x00',' ') to
# clean the input
policy['client_id'] = client_id
policy['client_ip'] = client_ip
policy['browser_type'] = browser_output['browser_type']
policy['browser_version'] = browser_output['browser_version']
policy['browser_prewait'] = browser_output['prewait']
policy['start_url'] = browser_output['start_url']
policy['final_url'] = browser_output['final_url']
policy['title'] = browser_output['title']
policy['meta_desc'] = browser_output['meta_desc']
policy['lang'] = browser_output['lang']
policy['fk_score'] = None
policy['fre_score'] = None
policy['word_count'] = None
policy['type'] = None
policy['match_term'] = None
policy['match_text'] = None
policy['match_text_type'] = None
policy['confidence'] = None
policy['page_text_id'] = None
policy['page_source_md5'] = None
# if readability failed we bail
if not browser_output['readability_html'] or not browser_output['page_text']:
self.sql_driver.close()
return {
'success' : False,
'result' : 'No readability result'
}
# ignore any malformed unicode characters
readability_html = browser_output['readability_html'].encode('utf-8', 'ignore').decode().strip()
page_text = browser_output['page_text'].encode('utf-8', 'ignore').decode().strip()
page_source = browser_output['page_source'].encode('utf-8', 'ignore').decode()
# bail on empty text
if len(page_text) == 0:
self.sql_driver.close()
return {
'success' : False,
'result' : 'Empty page text'
}
# load the source into lxml so we can do additional processing,
# if we fail we bail
try:
lxml_doc = lxml.html.fromstring(readability_html)
except:
return ({
'success': False,
'result': 'Could not parse readability_html with lxml'
})
# if the text is less than 500 words we ignore it
if len(page_text.split(' ')) < 500:
self.sql_driver.close()
return {
'success' : False,
'result' : 'Page text < 500 words'
}
# once we have the text we figure out if it is
# a policy, start false, override on match
is_policy = False
# first look for matches on page title
# we give this confidence of 100 as it is
# definitely a match
if policy['title']:
policy_type_result = self.determine_policy_type_from_text(policy['title'])
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = 'title'
policy['confidence'] = 100
# deep checks may generate false positives so
# they have confidence of 0 until they can
# be verified, note we may do this here
# or later on
deep_checks = True
if deep_checks:
policy['confidence'] = 0
# convert the url path to a sentence by replacing
# common delimiters with spaces and attempt matches
if self.debug: print('going to do checks on url path')
if not is_policy:
url_path_string = re.sub('[-|_|/|\.]',' ',urlsplit(policy['start_url']).path)
if len(url_path_string) > 0:
policy_type_result = self.determine_policy_type_from_text(url_path_string)
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = 'url_path'
if self.debug: print('going to do checks on meta desc')
if not is_policy and policy['meta_desc']:
policy_type_result = self.determine_policy_type_from_text(policy['meta_desc'])
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = 'meta_desc'
# iterate over all types of heading tags to extract text
# and check for policy matches. note we go in order of
# importance (eg h1->h7->span,etc)
if self.debug: print('going to do checks on heading tags')
if not is_policy:
for tag_type in ['h1','h2','h3','h4','h5','h6','h7','span','strong','em']:
if is_policy: break
tags = lxml_doc.cssselect(tag_type)
if len(tags) > 0:
for tag in tags:
tag_text = tag.text_content()
# if it is > 15 words it is likely not a heading
if len(tag_text.split(' ')) > 15: break
policy_type_result = self.determine_policy_type_from_text(tag_text)
if policy_type_result['success'] == True:
is_policy = True
policy['type'] = policy_type_result['result']['policy_type']
policy['match_term'] = policy_type_result['result']['match_term']
policy['match_text'] = policy_type_result['result']['match_text']
policy['match_text_type'] = tag_type
# if it is a policy we do additional processing
# before storing in db, otherwise we fail
# gracefully
if is_policy:
if self.debug: print('going to store readability_html')
readability_source_md5 = self.store_file(readability_html, False, 'readability_html')
if self.debug: print('going to store page_text')
# store_page_text handles some addition operations
if self.debug: print('going to store page_text')
policy['page_text_id'] = self.store_page_text(readability_html, readability_source_md5)
if self.debug: print(f"page_text_id is {policy['page_text_id']}")
if self.debug: print('going to store page_source')
policy['page_source_md5'] = self.store_file(page_source, False, 'page_source')
if self.debug: print('going to do reading ease scores')
# get readability scores, scores below zero are
# invalid so we null them
policy['fre_score'] = textstat.flesch_reading_ease(page_text)
if policy['fre_score'] <= 0:
policy['fre_score'] = None
policy['fk_score'] = textstat.flesch_kincaid_grade(page_text)
if policy['fk_score'] <= 0:
policy['fk_score'] = None
if self.debug: print('going to store policy')
# add to db and get id for this policy
policy_id = self.sql_driver.add_policy(policy)
if self.debug: print('going to link policy to pages')
# attach policy to all links with this url, not we can filter
# do only do internal links
for page_id, crawl_id in self.sql_driver.get_page_ids_from_link_url(policy['start_url'],internal_links_only=True):
self.sql_driver.attach_policy_to_page(policy_id,page_id)
self.sql_driver.attach_policy_to_crawl(policy_id,crawl_id)
if self.debug:
print(f'\t� Success: {policy["start_url"]}')
self.sql_driver.close()
return {'success': True}
else:
if self.debug:
print(f'\t👎 Fail: {policy["start_url"]}')
self.sql_driver.close()
return {
'success': False,
'result': 'Not policy'
}
# store_policy
def determine_policy_type_from_text(self, text):
"""
Determine if a given text fragment indicates
a given type of policy.
Returns dict.
"""
# clear whitespace
text = re.sub('\s+',' ',text)
# retrieve values from policy_terms.json
policy_verification_terms = self.utilities.get_policy_verification_terms()
policy_type_keys = []
for key in policy_verification_terms:
policy_type_keys.append(key)
# randomize the order we do our checks
random.shuffle(policy_type_keys)
# look for matches against verification terms
for policy_type in policy_type_keys:
for term in policy_verification_terms[policy_type]:
if term in text.lower():
return({
'success': True,
'result' :{
'policy_type': policy_type,
'match_term': term,
'match_text': text
}
})
# no match
return ({'success': False})
# determine_policy_type_from_text
def store_page_text(self,readability_html,readability_source_md5):
# the actual 'page_text' output from readability doesn't properly seperate words
# that use markup as a space. eg '<h3>this</h3><p>that</p>' becomes 'thisthat'
# whereas 'this that' is what a user would see in the browser
# to overcome the above issue we have to manually strip out html and do some
# cleaning of our own.
page_text = re.sub('<!--.+-->',' ', readability_html)
page_text = re.sub('<svg.+</svg>',' ', page_text)
page_text = re.sub('<.+?>', ' ', page_text)
page_text = re.sub('[\n|\r]', ' ', page_text)
page_text = re.sub('\s+', ' ', page_text)
page_text = unicodedata.normalize('NFKD',html.unescape(page_text.strip()))
# postgres can't handle nulls
page_text = page_text.replace('\x00','NULL_REPLACED_FOR_PSQL')
# return the id
return self.sql_driver.add_page_text({
'text' : page_text.replace('\x00',' '),
'word_count' : len(page_text.split()),
'readability_source_md5' : readability_source_md5
})
# store_page_text
# OutputStore
|
thezedwards/webXray
|
webxray/OutputStore.py
|
OutputStore.py
|
py
| 43,016 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "webxray.Utilities.Utilities",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "webxray.ParseURL.ParseURL",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "webxray.SQLiteDriver.SQLiteDriver",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "webxray.PostgreSQLDriver.PostgreSQLDriver",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 555,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 555,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 676,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 701,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 759,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 759,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 770,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 819,
"usage_type": "name"
},
{
"api_name": "hashlib.md5",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "lxml.html.html.fromstring",
"line_number": 942,
"usage_type": "call"
},
{
"api_name": "lxml.html.html",
"line_number": 942,
"usage_type": "attribute"
},
{
"api_name": "lxml.html",
"line_number": 942,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 985,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlsplit",
"line_number": 985,
"usage_type": "call"
},
{
"api_name": "textstat.textstat.textstat.flesch_reading_ease",
"line_number": 1047,
"usage_type": "call"
},
{
"api_name": "textstat.textstat.textstat",
"line_number": 1047,
"usage_type": "name"
},
{
"api_name": "textstat.textstat.textstat.flesch_kincaid_grade",
"line_number": 1051,
"usage_type": "call"
},
{
"api_name": "textstat.textstat.textstat",
"line_number": 1051,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 1090,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 1100,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1125,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1126,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1127,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1128,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1129,
"usage_type": "call"
},
{
"api_name": "unicodedata.normalize",
"line_number": 1130,
"usage_type": "call"
},
{
"api_name": "html.unescape",
"line_number": 1130,
"usage_type": "call"
}
] |
26625288006
|
from decimal import Decimal
from django import template
from livesettings import config_value
from product.utils import calc_discounted_by_percentage, find_best_auto_discount
from tax.templatetags import satchmo_tax
register = template.Library()
def sale_price(product):
"""Returns the sale price, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_sale_price(product)
else:
return untaxed_sale_price(product)
register.filter('sale_price', sale_price)
def untaxed_sale_price(product):
"""Returns the product unit price with the best auto discount applied."""
discount = find_best_auto_discount(product)
price = product.unit_price
if discount and discount.valid_for_product(product):
price = calc_discounted_by_percentage(price, discount.percentage)
return price
register.filter('untaxed_sale_price', untaxed_sale_price)
def taxed_sale_price(product):
"""Returns the product unit price with the best auto discount applied and taxes included."""
taxer = satchmo_tax._get_taxprocessor()
price = untaxed_sale_price(product)
price = price + taxer.by_price(product.taxClass, price)
return price
register.filter('taxed_sale_price', taxed_sale_price)
def discount_cart_total(cart, discount):
"""Returns the discounted total for this cart, with tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_cart_total(cart, discount)
else:
return untaxed_discount_cart_total(cart, discount)
register.filter('discount_cart_total', discount_cart_total)
def untaxed_discount_cart_total(cart, discount):
"""Returns the discounted total for this cart"""
total = Decimal('0.00')
for item in cart:
total += untaxed_discount_line_total(item, discount)
return total
register.filter('untaxed_discount_cart_total', untaxed_discount_cart_total)
def taxed_discount_cart_total(cart, discount):
"""Returns the discounted total for this cart with taxes included"""
total = Decimal('0.00')
for item in cart:
total += taxed_discount_line_total(item, discount)
return total
register.filter('taxed_discount_cart_total', taxed_discount_cart_total)
def discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_line_total(cartitem, discount)
else:
return untaxed_discount_line_total(cartitem, discount)
register.filter('discount_line_total', discount_line_total)
def untaxed_discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item"""
price = cartitem.line_total
if discount and discount.valid_for_product(cartitem.product):
price = calc_discounted_by_percentage(price, discount.percentage)
return price
register.filter('untaxed_discount_line_total', untaxed_discount_line_total)
def taxed_discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item with taxes included."""
price = untaxed_discount_line_total(cartitem, discount)
taxer = satchmo_tax._get_taxprocessor()
price = price + taxer.by_price(cartitem.product.taxClass, price)
return price
register.filter('taxed_discount_line_total', taxed_discount_line_total)
def discount_price(product, discount):
"""Returns the product price with the discount applied, including tax if that is the default.
Ex: product|discount_price:sale
"""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_price(product, discount)
else:
return untaxed_discount_price(product, discount)
register.filter('discount_price', discount_price)
def untaxed_discount_price(product, discount):
"""Returns the product price with the discount applied.
Ex: product|discount_price:sale
"""
up = product.unit_price
if discount and discount.valid_for_product(product):
pcnt = calc_discounted_by_percentage(up, discount.percentage)
return pcnt
else:
return up
register.filter('untaxed_discount_price', untaxed_discount_price)
def taxed_discount_price(product, discount):
"""Returns the product price with the discount applied, and taxes included.
Ex: product|discount_price:sale
"""
price = untaxed_discount_price(product, discount)
taxer = satchmo_tax._get_taxprocessor()
return price + taxer.by_price(product.taxClass, price)
register.filter('taxed_discount_price', taxed_discount_price)
def discount_ratio(discount):
"""Returns the discount as a ratio, making sure that the percent is under 1"""
pcnt = discount.percentage
if pcnt > 1:
pcnt = pcnt/100
return 1-pcnt
register.filter('discount_ratio', discount_ratio)
def discount_saved(product, discount):
"""Returns the amount saved by the discount, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_saved(product, discount)
else:
return untaxed_discount_saved(product, discount)
register.filter('discount_saved', discount_saved)
def untaxed_discount_saved(product, discount):
"""Returns the amount saved by the discount"""
if discount and discount.valid_for_product(product):
price = product.unit_price
discounted = untaxed_discount_price(product, discount)
saved = price - discounted
cents = Decimal("0.01")
return saved.quantize(cents)
else:
return Decimal('0.00')
register.filter('untaxed_discount_saved', untaxed_discount_saved)
def taxed_discount_saved(product, discount):
"""Returns the amount saved by the discount, after applying taxes."""
if discount and discount.valid_for_product(product):
price = product.unit_price
discounted = taxed_discount_price(product, discount)
saved = price - discounted
cents = Decimal("0.01")
return saved.quantize(cents)
else:
return Decimal('0.00')
register.filter('taxed_discount_saved', taxed_discount_saved)
|
dokterbob/satchmo
|
satchmo/apps/product/templatetags/satchmo_discounts.py
|
satchmo_discounts.py
|
py
| 6,222 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "django.template.Library",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "livesettings.config_value",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "product.utils",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "product.utils",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "product.utils.find_best_auto_discount",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "product.utils",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "product.utils.unit_price",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "product.utils",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "product.utils",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "product.utils.calc_discounted_by_percentage",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tax.templatetags.satchmo_tax._get_taxprocessor",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tax.templatetags.satchmo_tax",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "product.utils",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "product.utils.taxClass",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "product.utils",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "livesettings.config_value",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "livesettings.config_value",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "product.utils.calc_discounted_by_percentage",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tax.templatetags.satchmo_tax._get_taxprocessor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tax.templatetags.satchmo_tax",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "livesettings.config_value",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "product.utils",
"line_number": 105,
"usage_type": "argument"
},
{
"api_name": "product.utils",
"line_number": 107,
"usage_type": "argument"
},
{
"api_name": "product.utils.unit_price",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "product.utils",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "product.utils",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "product.utils.calc_discounted_by_percentage",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "product.utils",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "tax.templatetags.satchmo_tax._get_taxprocessor",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tax.templatetags.satchmo_tax",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "product.utils.taxClass",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "product.utils",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "livesettings.config_value",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "product.utils",
"line_number": 150,
"usage_type": "argument"
},
{
"api_name": "product.utils",
"line_number": 152,
"usage_type": "argument"
},
{
"api_name": "product.utils",
"line_number": 160,
"usage_type": "argument"
},
{
"api_name": "product.utils.unit_price",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "product.utils",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "product.utils",
"line_number": 162,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "product.utils",
"line_number": 174,
"usage_type": "argument"
},
{
"api_name": "product.utils.unit_price",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "product.utils",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "product.utils",
"line_number": 176,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 181,
"usage_type": "call"
}
] |
45636612723
|
import pytest
from page_objects.sign_in_page_object import SignInPage
from utils.read_excel import ExcelReader
@pytest.mark.usefixtures("setup")
class TestRegistration():
@pytest.mark.parametrize("reg_data", ExcelReader.get_reg_data())
def test_registration_initial_form(self, reg_data):
sign_in_page = SignInPage(self.driver)
sign_in_page.open_sign_in_page()
sign_in_page.open_registration_form(reg_data.email)
assert sign_in_page.is_register_button()
# def test_registration_main_form(self):
|
mcwilk/Selenium_automation
|
tests/registration_test.py
|
registration_test.py
|
py
| 544 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "page_objects.sign_in_page_object.SignInPage",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "utils.read_excel.ExcelReader.get_reg_data",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.read_excel.ExcelReader",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pytest.mark.usefixtures",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 8,
"usage_type": "attribute"
}
] |
16294942824
|
import functools
import os
import re
import requests
import csv
import sys
from datetime import time, timedelta
import argparse
#print(response.json())
class event_type:
GOAL = 0
PENALTY = 1
ASSIST = 2
class game_event:
def toPeriod(self, int_period):
int_period = int(int_period)
if int_period == 1:
return str(int_period) + 'st'
elif int_period == 2:
return str(int_period) + 'nd'
elif int_period == 3:
return str(int_period) + 'rd'
else:
return str(int_period) + 'th'
def __init__(self, id, name, start_time, end_time, period, participant, partNumber, event_type, penalty_duration = 0, event_subtype = ''):
self.id = id
self.name = name
self.participant = participant
self.partNumber = partNumber
self.start_time = start_time
self.end_time = end_time
self.period = self.toPeriod(period)
self.event_type = event_type
self.event_subtype = event_subtype
self.penalty_duration = penalty_duration
def score_sort(item1, item2):
if(item1.period < item2.period or (item1.period == item2.period and item1.start_time >= item2.start_time)):
return -1
else:
return 1
class score_track:
def __init__(self):
self.scores_ = []
def add_score(self, score):
self.scores_.append(score)
self.scores_ = sorted(self.scores_, key=functools.cmp_to_key(score_sort))
def score_str(self, period, time, team1, team2):
dict = {team1: 0, team2: 0}
for score in filter(lambda x: x.event_type == event_type.GOAL, self.scores_):
if score.period > period or (score.period == period and score.start_time < time):
continue
dict[score.name] += 1
vals = list(dict.values())
return '' + str(vals[0]) + ' -- ' + str(vals[1])
def collectGameTime(dateObj):
return timedelta(minutes=int(dateObj['minutes']), seconds=int(dateObj['seconds']))
def computePenalty(start, dur):
if start.seconds <= dur.seconds:
return timedelta(minutes=0, seconds=0)
return start - dur
def obtainGoalCode(dict):
if dict['isPowerplay']:
return 'PPG'
elif dict['isShorthanded']:
return 'SHG'
elif dict['isEmptyNet']:
return 'ENG'
elif dict['isPenaltyShot']:
return 'PSG'
else:
return 'REG'
def writeGameToFile(hockey_csv, response, date):
rj = response.json()
idTeamName = {}
out_writer = csv.writer(hockey_csv)
for team in rj['teams']:
idTeamName[team['id']] = team['name']
teamNames = list(idTeamName.values())
scores = score_track()
for goal in rj['goals']:
scores.add_score(game_event(goal['teamId'], idTeamName[goal['teamId']], collectGameTime(goal['gameTime']), collectGameTime(goal['gameTime']), goal['gameTime']['period'], goal['participant']['fullName'], goal['participant']['number'], event_type.GOAL, 0, obtainGoalCode(goal)))
for assist in goal['assists']:
scores.add_score(game_event(goal['teamId'], idTeamName[goal['teamId']], collectGameTime(goal['gameTime']), collectGameTime(goal['gameTime']), goal['gameTime']['period'], assist['fullName'], assist['number'], event_type.ASSIST, 0, obtainGoalCode(goal)))
for pen in rj['penalties']:
pen_period = int(pen['gameTime']['period'])
pen_starttime = collectGameTime(pen['gameTime'])
pen_endtime = pen_starttime
pen_duration = 0
if 'description' in pen['duration']:
pen_duration = int(re.findall("\d+", pen['duration']['description'])[0])
pen_endtime = computePenalty(collectGameTime(pen['gameTime']), timedelta(minutes=pen_duration))
scores.add_score(game_event(pen['teamId'], idTeamName[pen['teamId']], pen_starttime, pen_endtime , pen_period, pen['participant']['fullName'], pen['participant']['number'], event_type.PENALTY, pen_duration, pen['infraction']))
if pen_starttime.total_seconds() < pen_duration * 60 and pen_period < 3:
carryover_start = timedelta(minutes=20, seconds=0)
carryover_duration = timedelta(minutes=pen_duration) - pen_starttime
carryover_endtime = carryover_start - carryover_duration
scores.add_score(game_event(pen['teamId'], idTeamName[pen['teamId']], carryover_start, carryover_endtime, pen_period + 1, pen['participant']['fullName'], pen['participant']['number'], event_type.PENALTY, pen_duration, pen['infraction']))
for score in scores.scores_:
if score.event_type == event_type.GOAL:
out_writer.writerow([teamNames[0], teamNames[1], date, 'GOAL', score.event_subtype, score.participant, score.partNumber, score.name, score.start_time, score.end_time, score.period, 0, scores.score_str(score.period, score.start_time, teamNames[0], teamNames[1])])
if score.event_type == event_type.ASSIST:
out_writer.writerow([teamNames[0], teamNames[1], date, 'ASSIST', score.event_subtype, score.participant, score.partNumber, score.name, score.start_time, score.end_time, score.period, 0, scores.score_str(score.period, score.start_time, teamNames[0], teamNames[1])])
if score.event_type == event_type.PENALTY:
out_writer.writerow([teamNames[0], teamNames[1], date, 'PENALTY', score.event_subtype, score.participant, score.partNumber, score.name, score.start_time, score.end_time, score.period, score.penalty_duration, scores.score_str(score.period, score.start_time, teamNames[0], teamNames[1])])
def main():
parser = argparse.ArgumentParser('Collect data from VIAHA webpage and dump to csv spreadsheets.')
parser.add_argument('-s','--separate', dest='separate', action='store_const', const=True, default=False, help='If enabled, games will be split into separate files.')
parser.add_argument('scheduleId', type=int, nargs='?', help='Provide the ID of the schedule for the games you want to collect.')
parser.add_argument('teamId', type=int, nargs='?', help='Provide the team you are interested in from the provided schedule')
args=parser.parse_args()
if args.scheduleId is None or args.teamId is None:
raise Exception('Cannot run script without a schedule and team ID')
gameId = sys.argv[1]
scheduleUrl = 'https://api.hisports.app/api/games'
paramStr = '?filter={{"where":{{"and":[{{"scheduleId":{}}},{{"or":[{{"homeTeamId":{}}},{{"awayTeamId":{}}}]}}]}},"include":["arena","schedule","group","teamStats"],"order":["startTime ASC","id DESC"],"limit":null,"skip":null}}'.format(args.scheduleId, args.teamId, args.teamId)
headers = {'authorization' : 'API-Key f75fa549e81421f19dc929bc91f88820b6d09421'}
sess = requests.Session()
req = requests.Request('GET', scheduleUrl, headers=headers)
prep = req.prepare()
prep.url += paramStr
resp = sess.send(prep)
collectfilename = 'games-season-{}-{}-{}.csv'.format(resp.json()[0]['seasonId'], args.scheduleId, args.teamId)
if args.separate == False:
if os.path.isfile(collectfilename):
os.remove(collectfilename)
with open(collectfilename, 'a') as file:
out_writer = csv.writer(file)
out_writer.writerow(['Home Team', 'Away Team', 'Date', 'Event', 'Event Type', 'Player Name', 'Player Number', 'Player Team', 'Start Time', 'End Time', 'Period', 'Penalty Mins', 'Score'])
for game in resp.json():
gameUrl = 'https://api.hisports.app/api/games/{}/boxScore'.format(game['id'])
req = requests.Request('GET', gameUrl, headers=headers)
resp = sess.send(req.prepare())
if args.separate:
with open('game-{}-{}-{}.csv'.format(game['date'], args.scheduleId, args.teamId), 'w+') as file:
out_writer = csv.writer(file)
out_writer.writerow(['Home Team', 'Away Team', 'Date', 'Event', 'Event Type', 'Player Name', 'Player Number', 'Player Team', 'Start Time', 'End Time', 'Period', 'Penalty Mins', 'Score'])
writeGameToFile(file, resp, game['date'])
else:
with open(collectfilename, 'a') as file:
writeGameToFile(file, resp, game['date'])
if __name__ == '__main__':
main()
|
SolidSnackDrive/hockepy_viaha
|
hockey.py
|
hockey.py
|
py
| 8,307 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.cmp_to_key",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.time",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "requests.Request",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "requests.Request",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 159,
"usage_type": "call"
}
] |
20859673703
|
import pymunk
import pymunk.pygame_util
import pygame
from classes.ammo.ammo_box import AmmoBox
from classes.coin.coin import Coin
import os
import random
import math
from functions.math import get_xys, get_distance
class Enemy:
def __init__(self, game, space, radius, pos):
self.game = game
self.body = pymunk.Body()
self.body.position = pos
self.radius = 20
self.image = pygame.transform.scale(self.original_image, (radius * 2, radius * 2))
self.rect = self.image.get_rect()
self.shape = pymunk.Circle(self.body, radius)
self.shape.collision_type = game.collision_types["ENEMY"]
self.shape.elasticity = 0.8
self.shape.friction = 1
self.shape.mass = radius / 10
self.shape.color = self.color
space.add(self.body, self.shape)
self.health_bar = pygame.surface.Surface((120, 30))
self.health_bar_size = (120, 30)
self.s = 3
# IMPLEMENTING PATH FINDING
# self.graph = graph
self.game.graph.A_star(
self.game.graph.map[round(self.body.position[0] // self.game.TILE_SIZE)][
round(self.body.position[1] // self.game.TILE_SIZE)],
self.game.graph.map[round(self.game.player.body.position[0] // self.game.TILE_SIZE)][
round(self.game.player.body.position[1] // self.game.TILE_SIZE)]
)
# print(self.game.graph.A_star(
# self.game.graph.map[round(self.body.position[0] // self.game.TILE_SIZE)][
# round(self.body.position[1] // self.game.TILE_SIZE)],
# self.game.graph.map[round(self.game.player.body.position[0] // self.game.TILE_SIZE)][
# round(self.game.player.body.position[1] // self.game.TILE_SIZE)]
# )[self.game.graph.map[round(self.body.position[0] // self.game.TILE_SIZE)][
# round(self.body.position[1] // self.game.TILE_SIZE)]])
self.path = []
self.create_path()
# print(len(self.path))
def create_path(self):
routes = self.game.graph.A_star(
self.game.graph.map[round(self.body.position[0] // self.game.TILE_SIZE)][
round(self.body.position[1] // self.game.TILE_SIZE)],
self.game.graph.map[round(self.game.player.body.position[0] // self.game.TILE_SIZE)][
round(self.game.player.body.position[1] // self.game.TILE_SIZE)]
)
node = routes[self.game.graph.map[round(self.game.player.body.position[0] // self.game.TILE_SIZE)][
round(self.game.player.body.position[1] // self.game.TILE_SIZE)]]
while node is not None:
self.path.append(node)
node = routes[node]
self.path.pop()
def move(self):
# for item in self.path:
# x,y = self.game.get_position_by_player((item.x * self.game.TILE_SIZE,
# item.y * self.game.TILE_SIZE))
# pygame.draw.rect(self.game.window, (0, 255, 0),
# (round(x), round(y), self.game.TILE_SIZE, self.game.TILE_SIZE))
self.create_path()
if len(self.path) <= 0:
return
xy = (self.path[-1].x * self.game.TILE_SIZE + self.game.TILE_SIZE / 2, self.path[-1].y * self.game.TILE_SIZE + self.game.TILE_SIZE / 2)
xs, ys = get_xys(self.body.position, xy)
self.body.position = (self.body.position.x + xs * self.s, self.body.position.y + ys * self.s)
if get_distance(self.body.position, xy) < self.radius:
self.path.pop()
def update(self, game):
self.rect.center = game.get_position_by_player(self.body.position)
new_rect = self.image.get_rect(center=self.rect.center)
game.window.blit(self.image, new_rect)
def show_hp(self, game):
pygame.draw.rect(self.health_bar, (0, 0, 0), (0, 0, self.health_bar_size[0], self.health_bar_size[1]))
pygame.draw.rect(self.health_bar, (35, 189, 26), (2, 2, (self.health_bar_size[0] - 4) * (self.hp / self.max_hp),
self.health_bar_size[1] - 4))
x, y = game.get_position_by_player(self.body.position)
game.window.blit(self.health_bar, (x - 60, y - self.radius * 2 - 10))
# Call on delete
def __del__(self):
if random.random() > 0.6:
ammo_type = ["light", "medium"][random.randrange(2)]
self.game.ground_items.append(AmmoBox(self.game, self.body.position, ammo_type, random.randrange(1, 6)))
for i in range(2):
self.game.coins.append(Coin(self.game,
(self.body.position.x + 100 * (random.random() - 0.5),
self.body.position.y + (100 * random.random() * 0.5))))
self.game.space.remove(self.body, self.shape)
def special_attack(self):
pass
class BasicEnemy(Enemy):
color = (255, 250, 0, 100)
original_image = pygame.image.load(os.path.join("imgs", "basic.png"))
collision_damage = 5
spawn_cost = 2
def __init__(self, game, space, radius, pos):
super().__init__(game, space, radius, pos)
self.max_hp = 15
self.hp = 15
|
matej-kotrba/python-survival-game
|
classes/enemies/basic.py
|
basic.py
|
py
| 5,383 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "pymunk.Body",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.transform.scale",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pymunk.Circle",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.surface.Surface",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.surface",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "functions.math.get_xys",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "functions.math.get_distance",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "random.random",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "classes.ammo.ammo_box.AmmoBox",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "classes.coin.coin.Coin",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 109,
"usage_type": "attribute"
}
] |
35987738740
|
import torch
from tsf_baselines.modeling import build_network
ALGORITHMS = [
'BasicTransformerEncoderDecoder'
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
print('algorithm_name = {}'.format(algorithm_name))
return globals()[algorithm_name]
def build_algorithm(cfg):
algorithm = get_algorithm_class(cfg.ALGORITHM.NAME)(cfg)
return algorithm
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a time series forecasting algorithm.
Subclasses should implement the following:
- update()
- predict()
"""
def __init__(self, cfg):
super(Algorithm, self).__init__()
self.cfg = cfg
self.device = self._acquire_device()
def _acquire_device(self):
# print('self.cfg = {}'.format(self.cfg))
if self.cfg.MODEL.USE_GPU:
# os.environ["CUDA_VISIBLE_DEVICES"] = str(self.cfg.MODEL.DEVICE) if not self.args.use_multi_gpu else self.args.devices
device = torch.device('cuda:{}'.format(self.cfg.MODEL.DEVICE))
print('Use GPU: cuda:{}'.format(self.cfg.MODEL.DEVICE))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def update(self, minibatches):
"""
Perform one update step, given a list of (x, y) tuples for all
environments.
"""
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
class BasicTransformerEncDec(Algorithm):
def __init__(self, cfg):
super(BasicTransformerEncDec, self).__init__(cfg)
self.cfg = cfg
# Backbone
self.model = build_network(cfg)
# Loss function
self.loss_mse = torch.nn.MSELoss()
# Optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.cfg.SOLVER.BASE_LR)
# other declarations
pass
def _process_one_batch(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
# decoder input
if self.cfg.DATASETS.PADDING == 0:
dec_inp = torch.zeros([batch_y.shape[0], self.cfg.MODEL.PRED_LEN, batch_y.shape[-1]]).float()
elif self.DATASETS.PADDING == 1:
dec_inp = torch.ones([batch_y.shape[0], self.cfg.MODEL.PRED_LEN, batch_y.shape[-1]]).float()
dec_inp = torch.cat([batch_y[:, :self.cfg.MODEL.LABEL_LEN, :], dec_inp], dim=1).float().to(self.device)
# encoder - decoder
if self.cfg.MODEL.OUTPUT_ATTENTION:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
if self.cfg.DATASETS.INVERSE:
outputs = dataset_object.inverse_transform(outputs)
f_dim = -1 if self.cfg.DATASETS.FEATURES == 'MS' else 0
batch_y = batch_y[:, -self.cfg.MODEL.PRED_LEN:, f_dim:].to(self.device)
return outputs, batch_y
def update(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
outputs, batch_y = self._process_one_batch(dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark)
loss = self.loss_mse(outputs, batch_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
def predict(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
outputs, batch_y = self._process_one_batch(dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark)
return outputs, batch_y
|
zhaoyang10/time-series-forecasting-baselines
|
tsf_baselines/algorithm/build.py
|
build.py
|
py
| 3,942 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tsf_baselines.modeling.build_network",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 82,
"usage_type": "call"
}
] |
69894678589
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, monotonically_increasing_id
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql import types as t
# reading in the AWS config information from the dl.cfg file
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] =config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] =config['AWS']['AWS_SECRET_ACCESS_KEY']
print(os.environ['AWS_ACCESS_KEY_ID'] )
print(os.environ['AWS_SECRET_ACCESS_KEY'] )
#def create_spark_session():
# spark = SparkSession \
# .builder \
# .config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
# .getOrCreate()
# print("spark session created")
# return spark
def create_spark_session():
"""
This creates a Spark session, specifying the hadoop package to use, the S3 buckets and reads in the AWS ID and key as
environment variables
Parameters:
None
Returns:
Spark session object
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.config("spark.hadoop.fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem") \
.config("spark.hadoop.fs.s3a.awsAccessKeyId", os.environ['AWS_ACCESS_KEY_ID']) \
.config("spark.hadoop.fs.s3a.awsSecretAccessKey", os.environ['AWS_SECRET_ACCESS_KEY']) \
.getOrCreate()
print("spark session created")
return spark
def process_song_data(spark, input_data, output_data):
"""
This reads in song and artist data as csv files from the udacity s3 bucket as a spark dataframe
and then uses spark_sql to insert select columns into a parquet file format back into the user-generated S3 bucket
Parameters:
spark: A spark session object
input_data: A string representing the udacity-generated s3 bucket root
output_data: A string representing the user-generated s3 bucket root
Output:
No output returned: but two parquet files written to user-generated s3 bucket
"""
# get filepath to song data file
song_data = input_data +"song_data/*/*/*/*.json"
print(song_data)
# read song data file
print("reading in song data")
df = spark.read.json(song_data)
# extract columns to create songs table
df.createOrReplaceTempView("songs_table_df")
songs_table = spark.sql("""
SELECT song_id, title, artist_id,year, duration
FROM songs_table_df
ORDER by song_id
""")
# write songs table to parquet files partitioned by year and artist
songs_table_path = output_data + "songs_table.parquet"
print("read to songs table to parquet format")
songs_table.write.mode("overwrite").partitionBy("year","artist_id").parquet(songs_table_path)
# extract columns to create artists table
df.createOrReplaceTempView("artist_table_df")
artists_table = spark.sql( """
SELECT artist_id AS artist_id,
artist_name AS name,
artist_location AS location,
artist_latitude AS latitude,
artist_longitute AS longitude
FROM artist_table_df
""")
# write artists table to parquet files
artists_table_path = output_data + "artists_table.parquet"
print("write to artist table")
artists_table.write.mode("overwrite").parquet(artist_table_path)
def process_log_data(spark, input_data, output_data):
"""
This reads in log_data from the udacity-generated s3 bucket, where the data relates to songs played, and this is written to a parquet file.
It then takes a subset of the log_data, creates time- and date- stamps by using a udf with lambdas, and this is written to a parquet file.
Song data is then read into a data frame and joined with log data to create a joined table, which is written to a parquet file.
Parameters:
spark: A spark session object
input_data: A string representing the udacity-generated s3 bucket root
output_data: A string representing the user-generated s3 bucket root
Returns:
users_table: A spark dataframe holding user information
time_table: A spark dataframe holding time information
songplays_table: A spark dataframe holding songplay information
"""
# get filepath to log data file
log_data = input_data + "log_data/*.json"
print("reading in log data")
# read log data file
df_log = spark.read.json(log_data)
# filter by actions for song plays
df_log = df_log.filter(df.page == 'NextSong')
# extract columns for users table
df_log.createOrReplaceTempView("users_table_df")
users_table = spark.sql("""
SELECT DISTINCT userId AS userid,
firstName AS first_name,
lastName AS last_name,
gender,
level
FROM users_table_df
ORDER BY last_name
""")
print("writing to parquet format")
# write users table to parquet files
users_table_path = output_data + "users_table.parquet"
users_table.write.mode("overwrite").parquet(users_table_path)
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: datetime.fromtimestamp((x/1000.0)),TimestampType())
df_log = df_log.withColumn("timestamp", gettimestamp("ts"))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: datetime.fromtimestamp(ts/1000.0).strfrmtime('%Y-%m-%d %H:%M:%S'))
df_log = df_log.withColumn("datetime",get_datetime("ts"))
# extract columns to create time table
df_log.createOrReplaceTempView("time_table_df")
time_table = spark.sql("""SELECT DISTINCT
datetime as start_time,
hour(timestamp) as hour,
day(timestamp) as day,
weekofyear(timestamp) as week,
month(timestamp) as month,
year(timestamp) as year,
dayofweek(timestamp) as weekday
FROM time_table_df
ORDER BY start_time
""")
# write time table to parquet files partitioned by year and month
time_table_path = output_data + "time_table.parquet"
time_table.write.mode("overwrite").partitionBy("year","month").parquet(time_table_path)
# read in song data to use for songplays table
song_df = spark.read.json(song_data)
#join log and song df together
df_log_song_df_joined = df_log_filtered.join(df_song, (df_log_filtered.artist == df_song.artist_name) & (df_log_filtered.song == df_song.title))
# extract columns from joined song and log datasets to create songplays table
df_log_song_df_joined.createOrReplaceTempView("songplays_table_df")
songplays_table = spark.sql("""
SELECT songplay_id AS songplay_id,
timestamp AS start_time,
userId AS user_id,
level AS level,
song_id AS song_id,
artist_id AS artist_id,
sessionId AS session_id,
location AS location,
userAgent AS user_agent
FROM songplays_table_DF
ORDER BY (user_id, session_id)
""")
# write songplays table to parquet files partitioned by year and month
songplays_table_path = output_data + "songplays_table.parquet"
songplays_table.write.mode("overwrite").partitionBy("year","month").parquet(songplays_table_path)
return users_table, time_table, songplays_table
def main():
"""
Main function for the code.
It creates a spark session, defines the paths of the input and ouput buckets, and call the two functions
process_song_data and process_log_data
"""
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-lake/output_data/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
|
greggwilliams58/data-lake
|
etl.py
|
etl.py
|
py
| 8,843 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "configparser.ConfigParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession.builder.config",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.functions.udf",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.udf",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 155,
"usage_type": "name"
}
] |
44137255205
|
#pip3 install wikiepdia-api
#pip3 install elasticsearch
#pip3 install nltk
#pip3 install gensim
#pip3 install pandas
#pip3 install tabulate
import pickle
import wikipediaapi
from model.WikiPage import WikiPage
from elasticsearch import Elasticsearch
import json
from time import sleep
import gensim
from gensim import corpora, models
import nltk
from nltk.stem import WordNetLemmatizer, SnowballStemmer
import pandas as pd
from tabulate import tabulate
nltk.download('stopwords')
nltk.download('wordnet')
wiki = wikipediaapi.Wikipedia('en', extract_format=wikipediaapi.ExtractFormat.WIKI)
index = "ir_project"
es = Elasticsearch()
################################################# PAGES DOWNLOAD AND INDEXING ##########################################
def getPagesfromCategory(category, limit):
pages = []
count = 0
for el in wiki.page(category).categorymembers.values():
if el.namespace == wikipediaapi.Namespace.MAIN:
pages.append(WikiPage(el))
count += 1
print("{}) {} ".format(count, el))
if count >= limit:
break
print(category + ": download DONE")
return pages
def setNormalizedCitations(pages):
numbers = []
for page in pages:
numbers.append(page.citations)
maximum = max(numbers)
for page in pages:
page.setCitationsNorm( round((page.citations - 0) / (maximum - 0), 4) )
return pages
def getAllPages(limit):
actors = getPagesfromCategory("Category:Golden Globe Award-winning producers", limit)
guitar_companies = getPagesfromCategory("Category:Guitar manufacturing companies of the United States", limit)
bands = getPagesfromCategory("Category:Grammy Lifetime Achievement Award winners", limit)
pages = setNormalizedCitations(actors + guitar_companies + bands)
# write collection to files
pages_json = []
for page in pages:
pages_json.append(dict(page))
with open('pages.json', 'w') as f:
json.dump(pages_json, f, indent=4)
def createIndex(data):
# read index config from json file
with open('index-config.json') as f:
client_body = json.load(f)
# wipe and create index
if es.indices.exists(index):
es.indices.delete(index=index)
es.indices.create(index=index, ignore=400, body=client_body)
for page in data:
es.index(index=index, id=page["url"].replace(" ", "_"), body=page)
############################################## TOPIC MODELING ##########################################################
def lemmatize_stemming(text):
stemmer = SnowballStemmer('english')
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
def getTopics(recalculate):
with open("pages.json", "r") as read_file:
data = json.load(read_file)
corpus = [] # list of strings: list of docs
for page in data:
corpus.append(page["abstract"])
processed_docs = [] # list of lists: list of tokenized docs
for doc in corpus:
processed_docs.append(preprocess(doc))
dictionary = gensim.corpora.Dictionary(processed_docs)
dictionary.filter_extremes(no_below=5, keep_n=100000)
if recalculate:
print("Recalculating topics...")
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=3, id2word=dictionary, passes=2, workers=2)
with open("lda_model.pk", 'wb') as pickle_file:
pickle.dump(lda_model, pickle_file)
else:
with open("lda_model.pk", 'rb') as pickle_file:
lda_model = pickle.load(pickle_file)
# calculates topic for each document
final_docs = []
for page in data:
document = dictionary.doc2bow(preprocess(page["abstract"]))
index, score = sorted(lda_model[document], key=lambda tup: -1 * tup[1])[0]
page["topic"] = index
final_docs.append(page)
return(lda_model, final_docs)
################################################## SEARCH ################################################################
def print_results(results):
df = pd.DataFrame(columns=['score', 'title', "citations", "citations_norm", "topic", "url"])
for hit in results['hits']['hits']:
df.loc[len(df)] =[hit['_score'], hit['_source']['title'], hit['_source']['citations'],hit['_source']['citations_norm'], hit['_source']['topic'], hit['_source']['url']]
print(tabulate(df, headers='keys', tablefmt='psql', showindex=False))
def search(query=None):
results = es.search(index=index, body={ "from" : 0, "size" : 12, "query": {"match": query}})
print_results(results)
def search_phrase(query=None):
results = es.search(index=index, body={"query": {"match_phrase": query}})
print_results(results)
def search_fuzzy(query):
results = es.search(index=index, body={"query": {"fuzzy": query}})
print_results(results)
def search_boolean(query):
results = es.search(index=index, body={"query": {"bool": query}})
print_results(results)
def search_with_topic(query, topic):
results = es.search(index=index, body={"query": {"bool": {"must": { "match": query }, "filter": {"term": {"topic": topic}}}}})
print_results(results)
def queries_samples():
print("\nquery: {query: {match: {abstract:is an american pianist}}}")
print("Notes: it returns both alive and dead pianists (is/was) due to the analyzer")
search(query={"abstract":"is an american pianist"})
print("\n\nquery: {query: {match_phrase: {text:was an american pianist}}}")
print("Notes: it returns only dead pianist")
search_phrase(query={"text":"was an american pianist"})
print("\n\nquery: {query: {match_phrase: {text:is an american pianist}}}")
print("Notes: it returns only alive pianist")
search_phrase(query={"text":"is an american pianist"})
print("\n\nquery: {query: {fuzzy: {title: {value: batles}}}}")
print("Notes: it returns \"The Beatles\" despite the misspelling ")
search_fuzzy(query={"title": {"value": "batles"}})
print("\n\nquery: {query: {bool: {must: {match: {abstract: guitarist}},must_not: [{match: {abstract: company}}, {match: {abstract: manufacturer}}],must: {range: {citations_norm: {gt: 0.500}}}}}}")
print("Notes: it return only guitarists that have a lot of citations in wikiepdia")
search_boolean(query={"must": {"match": {"abstract": "guitarist"}},
"must_not": [{"match": {"abstract": "company"}}, {"match": {"abstract": "manufacturer"}}],
"must": {"range": {"citations_norm": {"gt": "0.500"}}}
}
)
print("\n\nquery: {query: {bool: {must: {match: {abstract: guitarist}},must: {match: {text: drugs}}}}}")
print("Notes: it returns all the guitarists that have a relation with drugs")
search_boolean(query={"must": {"match": {"abstract": "guitarist"}},
"must": {"match": {"text": "drugs"}}
}
)
print("\n\nquery: { query: {match: {abstract: philanthropist}}}")
print("Notes: it returns all the philantropist from the corpus. They are all producers")
search(query={"abstract": "philanthropist"})
print("\n\nquery: {query: {match_phrase: {text: philanthropist}}}")
print("Notes: Since i intentionally declare \"philanthropist\" as synonym of \"rock\" in the text_analyzer filter, this query returns rock stars ")
search_phrase(query={"text": "philanthropist"})
print("\n\n")
def menu():
while True:
for idx, topic in model.print_topics(-1):
print('Topic {}: {}'.format(idx, topic))
print("\ninsert a keyword")
q = input()
print("insert topic id")
topic = input()
search_with_topic(query={"abstract": q}, topic=topic)
print("\n\n")
if __name__ == '__main__':
# getAllPages(100)
print("please wait for indexing...")
model, docs = getTopics(False)
createIndex(docs)
sleep(5)
queries_samples()
menu()
|
rAlvaPrincipe/wikipedia-search-engine
|
Wiki.py
|
Wiki.py
|
py
| 8,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nltk.download",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "wikipediaapi.Wikipedia",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "wikipediaapi.ExtractFormat",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wikipediaapi.Namespace",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "model.WikiPage.WikiPage",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "nltk.stem.SnowballStemmer",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "gensim.utils.simple_preprocess",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "gensim.utils",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "gensim.parsing",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "gensim.corpora.Dictionary",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "gensim.corpora",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "gensim.models.LdaMulticore",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "gensim.models",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "tabulate.tabulate",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "model.WikiPage.print_topics",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "model.WikiPage",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "model.WikiPage",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 231,
"usage_type": "call"
}
] |
38520392642
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
import time
sys.path.append("../tools/")
from email_preprocess import preprocess
#imports
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
#########################################################
### your code goes here ###
sample_size_list = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
##Loop to change sample Size
for j in sample_size_list:
features_train, features_test, labels_train, labels_test = preprocess("../tools/word_data_unix.pkl","../tools/email_authors.pkl",j)
loop = [0,1,2,3,4,5,6,7,8,9]
print("Test sample Size:",features_train.size)
##Loop to change the var_smoothing
for i in loop:
num=1/(10)**i
gnb = GaussianNB(var_smoothing=num)
time0=time.time()
pred = gnb.fit(features_train, labels_train).predict(features_test)
time1=time.time()
acc=accuracy_score(labels_test,pred)
print("Test sample_Size: ",j," Accuracy for ",num,": ", acc,"Ellapsed time: ",time1-time0)
i=i+1
j=1+1
#########################################################
|
Vkadel/machineLearningNB
|
nb_author_id.py
|
nb_author_id.py
|
py
| 1,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "email_preprocess.preprocess",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 44,
"usage_type": "call"
}
] |
18132721237
|
from flask import Flask, redirect, render_template, request, url_for, session, flash
from datetime import timedelta
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.secret_key = "hello"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.sqlite3' # Things you have to set up before creating a database
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.permanent_session_lifetime = timedelta(minutes=5) # Deciding the session time
db = SQLAlchemy(app) # creating a database
class users(db.Model):
_id = db.Column("id", db.Integer, primary_key=True)
name = db.Column(db.String(100))
email = db.Column(db.String(100))
def __init__(self, name, email):
self.name = name
self.email = email
@app.route('/')
def home():
return render_template("Home.html")
@app.route("/login", methods=["POST",'GET'])
def login():
if request.method == "POST":
session.permanent = True
user = request.form['nm']
session['user'] = user
# 先以名字進行查詢
found_user = users.query.filter_by(name=user).first()
#如果有的話就把user的email加進去session list裡面, 如果沒有的話就加入一筆新資料到資料庫
if found_user:
flash(f"Welcome back {user}!")
session['email'] = found_user.email
else:
flash(f"Hello {user}!, Nice to meet you!!")
usr = users(user ,"")
db.session.add(usr)
db.session.commit()
return redirect(url_for('user_page'))
else:
if "user" in session:
flash("Already Logged in!")
return redirect(url_for('user_page'))
return render_template("login.html")
@app.route('/logout')
def logout():
if 'user' in session:
user = session['user']
flash(f"You have been logged out, {user}!", "info")
session.pop('user', None)
session.pop('email', None)
return redirect(url_for('login') )
@app.route('/user', methods=["POST",'GET'])
def user_page():
email = None
if 'user' in session:
user = session['user']
if request.method == "POST":
email = request.form['email'] # 使用者輸入的email
session['email'] = email #也建立一個session
found_user = users.query.filter_by(name=user).first() # 找到user之後要做的事情
found_user.email = email # 更新使用者新輸入的email
db.session.commit() # 每次更新完就要儲存 commit 一次
flash("Email was saved!!")
else:
if "email" in session:
email = session['email']
return render_template("user_page.html",content=user, email=email)
else:
flash("You are not logged in! ")
return redirect(url_for('login'))
@app.route("/view")
def view():
return render_template("view.html", values=users.query.all())
if __name__ == '__main__':
with app.app_context():
db.create_all()
app.run(debug=True)
|
JayChen1060920909/Projects
|
Login-Logout.py
|
Login-Logout.py
|
py
| 3,056 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "flask.session.permanent",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.session.pop",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "flask.session.pop",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 90,
"usage_type": "call"
}
] |
42940796937
|
from typing import List, Tuple
import networkx as nx
import numpy as np
from matplotlib import pyplot as plt
import time
import copy
from node import Node
def get_graph(node: Node) -> Tuple[nx.Graph, List, List]:
board_size = node.state.shape[0]
G = nx.grid_2d_graph(board_size, board_size)
diagonals = []
for x,y in G:
x2 = x-1
y2 = y+1
if y2 >= board_size or x2 < 0:
continue
edge = ((x, y), (x2,y2))
diagonals.append(edge)
G.add_edges_from(diagonals)
pos = {}
colour_map = []
theta = -(1/4) * np.pi
costheta = np.cos(theta)
sintheta = np.sin(theta)
rotation_matrix = np.array([
[costheta, -sintheta],
[sintheta, costheta]
])
for x,y in G:
coords = (x,y)
pos[coords] = np.dot(rotation_matrix, (y,-x))
if node.state[coords] == 1:
colour_map.append("red")
elif node.state[coords] == -1:
colour_map.append("blue")
else:
colour_map.append("grey")
return G, pos, colour_map
def visualize_hex_node_state(node: Node, done: bool=False) -> None:
G, pos, colour_map = get_graph(node)
nx.draw(G, pos=pos,
node_color=colour_map,
with_labels=True,
node_size=600)
plt.draw()
plt.pause(0.001)
if done:
plt.close()
if __name__ == "__main__":
plt.figure(figsize=(5,5))
plt.ion()
plt.show()
test_state = np.zeros(shape=(7,7))
test_state[0, 1] = 1
test_node = Node(state=test_state)
visualize_hex_node_state(test_node)
new_node = copy.copy(test_node)
new_node.state[0,2] = -1
visualize_hex_node_state(new_node)
|
Mathipe98/IT3105-Projects
|
Project 2/visualizer.py
|
visualizer.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "node.Node",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "node.state",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "networkx.grid_2d_graph",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "node.state",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "node.state",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "networkx.Graph",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "node.Node",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "networkx.draw",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "node.Node",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 63,
"usage_type": "call"
}
] |
70835728188
|
import json
fs = open("G:\python\Analysis"+"\\"+'score.json', encoding='utf-8')
ft = open("G:\python\Analysis"+"\\"+'template.json', encoding='utf-8')
res1 = fs.read()
data = json.loads(res1)
res2 = ft.read()
template = json.loads(res2)
scoreKey = []
templateKey = template.keys()
goal = {}
for key in data:
user_id = str(key)
cases = data[key]['cases']
cid = []
res = []
for case in cases:
if case["score"] == 100:
cid.append(case['case_id'])
for i in templateKey:
if i in cid:
res.append(1)
else:
res.append(0)
goal[user_id] = res
json_str = json.dumps(goal, indent=4, ensure_ascii=False)
with open("G:\python\Analysis"+"\\"+"flag.json", 'w', encoding='utf-8') as json_file:
json_file.write(json_str)
|
nju161250023/Analysis
|
createFlag.py
|
createFlag.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 33,
"usage_type": "call"
}
] |
6056862050
|
from datetime import datetime
import csv
def logReceivedGossip(file,gossipID,spreader,audience,awardedSP,targetCitizensSP,receivingAudienceKnownRumours,citizen_list,rumourTarget,sentiment):
now = datetime.now()
date_time = now.strftime("%m/%d/%Y %H:%M:%S:%f")
# get total rumour count
for key in citizen_list: kt = sum(len(x['knownRumours']) for x in citizen_list.values() if x)
#'time,key,id,spreader,audience,sp,originalsp,audienceKnownRumours,totalRumours,'
with open(file, 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([str(date_time),str(gossipID),spreader,audience,str(awardedSP),str(targetCitizensSP),str(len(receivingAudienceKnownRumours)),rumourTarget,sentiment,kt])
def logUpdateMessage(message,file,action='a'):
f = open(file, action)
f.write(message)
f.close()
|
murchie85/gossipSimulator
|
game/functions/logging.py
|
logging.py
|
py
| 822 |
python
|
en
|
code
| 25 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 15,
"usage_type": "call"
}
] |
33062234730
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*
import requests
class MetrikaAPI(object):
def __init__(self, counter_id, token, host='https://api-metrika.yandex.ru'):
self.counter_id = counter_id
self.token = token
self.host = host
def _get_url(self, url='/stat/v1/data', params=None, data=None, method='GET'):
req = requests.request(
method=method,
url=self.host + url,
params=params,
data=data,
headers={'Authorization': 'OAuth ' + self.token},
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
print(req.content)
raise
except Exception:
print("Unexpected exception")
raise
return req
def get_sources_visits(self):
req = self._get_url(params=dict(
metrics='ym:s:visits',
id=self.counter_id,
))
return req.json()
def get_sources_users(self):
req = self._get_url(params=dict(
metrics='ym:s:users',
id=self.counter_id,
))
return req.json()
def get_sources_pageviews(self):
req = self._get_url(params=dict(
metrics='ym:s:pageviews',
id=self.counter_id,
))
return req.json()
def main():
d = MetrikaAPI(44138734, 'тут мог бы быть токен')
vis = d.get_sources_visits()
us = d.get_sources_users()
view = d.get_sources_pageviews()
print('Всего визитов: {}'.format(vis['data'][0]['metrics']))
print('Всего посетителей: {}'.format(us['data'][0]['metrics']))
print('Всего просмотров: {}'.format(view['data'][0]['metrics']))
if __name__ == '__main__':
main()
|
swetlanka/py3
|
3-5/3-5.py
|
3-5.py
|
py
| 1,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.request",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 24,
"usage_type": "attribute"
}
] |
17953524197
|
# -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
from lichee import plugin
from lichee import config
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'mse_loss')
class MSELoss:
@classmethod
def build(cls, cfg):
return nn.MSELoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'cross_entropy')
class CrossEntropyLoss:
@classmethod
def build(cls, cfg):
return nn.CrossEntropyLoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'neg_log_likelihood')
class NLLLoss:
@classmethod
def build(cls, cfg):
return nn.NLLLoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'binary_cross_entropy')
class BinaryCrossEntropyLoss:
@classmethod
def build(cls, cfg):
return nn.BCEWithLogitsLoss()
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'binary_focal_loss')
class BinaryFocalLoss(nn.Module):
"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param reduction: `none`|`mean`|`sum`
:param **kwargs
balance_index: (int) balance class index, should be specific when alpha is float
"""
def __init__(self, alpha=[1.0, 1.0], gamma=2, ignore_index=None, reduction='mean'):
super(BinaryFocalLoss, self).__init__()
if alpha is None:
alpha = [0.25, 0.75]
self.alpha = alpha
self.gamma = gamma
self.smooth = 1e-6
self.ignore_index = ignore_index
self.reduction = reduction
assert self.reduction in ['none', 'mean', 'sum']
if self.alpha is None:
self.alpha = torch.ones(2)
elif isinstance(self.alpha, (list, np.ndarray)):
self.alpha = np.asarray(self.alpha)
self.alpha = np.reshape(self.alpha, (2))
assert self.alpha.shape[0] == 2, \
'the `alpha` shape is not match the number of class'
elif isinstance(self.alpha, (float, int)):
self.alpha = np.asarray([self.alpha, 1.0 - self.alpha], dtype=np.float).view(2)
else:
raise TypeError('{} not supported'.format(type(self.alpha)))
self.one_hot_eye = None
@classmethod
def set_config_default(cls, cfg):
d_c = {'loss_alpha': [1.0, 1.0],
'loss_gamma': 2,
'loss_ignore_index': None,
'loss_reduction': 'mean'}
for key, value in d_c.items():
if key not in cfg.PARAM:
cfg.PARAM[key] = value
@classmethod
def build(cls, cfg):
cls.set_config_default(cfg)
return cls(alpha=cfg.PARAM["loss_alpha"],
gamma=cfg.PARAM["loss_gamma"],
ignore_index=cfg.PARAM["loss_ignore_index"],
reduction=cfg.PARAM["loss_reduction"])
def forward(self, output, target):
prob = torch.sigmoid(output)
prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)
if self.one_hot_eye == None:
self.one_hot_eye = torch.eye(2).cuda(target.device.index)
target = self.one_hot_eye[target]
pos_mask = (target == 1).float()
neg_mask = (target == 0).float()
pos_loss = -self.alpha[0] * torch.pow(torch.sub(1.0, prob), self.gamma) * torch.log(prob) * pos_mask
neg_loss = -self.alpha[1] * torch.pow(prob, self.gamma) * \
torch.log(torch.sub(1.0, prob)) * neg_mask
neg_loss = neg_loss.sum()
pos_loss = pos_loss.sum()
num_pos = pos_mask.view(pos_mask.size(0), -1).sum()
num_neg = neg_mask.view(neg_mask.size(0), -1).sum()
if num_pos == 0:
loss = neg_loss
else:
loss = pos_loss / num_pos + neg_loss / num_neg
return loss
@plugin.register_plugin(plugin.PluginType.MODULE_LOSS, 'focal_loss')
class FocalLoss(nn.Module):
"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param smooth: (float,double) smooth value when cross entropy
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
"""
def __init__(self, num_class, alpha=[0.25, 0.75], gamma=2, balance_index=-1, size_average=True):
super(FocalLoss, self).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
self.eps = 1e-6
if isinstance(self.alpha, (list, tuple)):
assert len(self.alpha) == self.num_class
self.alpha = torch.Tensor(list(self.alpha))
elif isinstance(self.alpha, (float, int)):
assert 0 < self.alpha < 1.0, 'alpha should be in `(0,1)`)'
assert balance_index > -1
alpha = torch.ones((self.num_class))
alpha *= 1 - self.alpha
alpha[balance_index] = self.alpha
self.alpha = alpha
elif isinstance(self.alpha, torch.Tensor):
self.alpha = self.alpha
else:
raise TypeError('Not support alpha type, expect `int|float|list|tuple|torch.Tensor`')
@classmethod
def set_config_default(cls, cfg):
d_c = {'loss_alpha': [0.25, 0.75],
'loss_gamma': 2,
'loss_balance_index': -1,
'loss_size_average': True}
for key, value in d_c.items():
if key not in cfg.PARAM:
cfg.PARAM[key] = value
@classmethod
def build(cls, cfg):
cls.set_config_default(cfg)
return cls(num_class=config.get_cfg().DATASET.CONFIG.NUM_CLASS,
alpha=cfg.PARAM["loss_alpha"],
gamma=cfg.PARAM["loss_gamma"],
balance_index=cfg.PARAM["loss_balance_index"],
size_average=cfg.PARAM["loss_size_average"])
def forward(self, logit, target):
if logit.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.transpose(1, 2).contiguous() # [N,C,d1*d2..] -> [N,d1*d2..,C]
logit = logit.view(-1, logit.size(-1)) # [N,d1*d2..,C]-> [N*d1*d2..,C]
target = target.view(-1, 1) # [N,d1,d2,...]->[N*d1*d2*...,1]
# -----------legacy way------------
# idx = target.cpu().long()
# one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()
# one_hot_key = one_hot_key.scatter_(1, idx, 1)
# if one_hot_key.device != logit.device:
# one_hot_key = one_hot_key.to(logit.device)
# pt = (one_hot_key * logit).sum(1) + epsilon
# ----------memory saving way--------
pt = logit.gather(1, target).view(-1) + self.eps # avoid apply
logpt = pt.log()
if self.alpha.device != logpt.device:
alpha = self.alpha.to(logpt.device)
alpha_class = alpha.gather(0, target.view(-1))
logpt = alpha_class * logpt
loss = -1 * torch.pow(torch.sub(1.0, pt), self.gamma) * logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
|
Tencent/Lichee
|
lichee/module/torch/loss/loss.py
|
loss.py
|
py
| 8,023 |
python
|
en
|
code
| 295 |
github-code
|
6
|
[
{
"api_name": "torch.nn.MSELoss",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.register_plugin",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "lichee.plugin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.PluginType",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.register_plugin",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "lichee.plugin",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.PluginType",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.register_plugin",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "lichee.plugin",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.PluginType",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.register_plugin",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lichee.plugin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.PluginType",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.ones",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "torch.sigmoid",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.sub",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.sub",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "lichee.plugin.register_plugin",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "lichee.plugin",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.PluginType",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "lichee.config.get_cfg",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "lichee.config",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "torch.pow",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.sub",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "lichee.plugin.register_plugin",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "lichee.plugin",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "lichee.plugin.PluginType",
"line_number": 129,
"usage_type": "attribute"
}
] |
28400031595
|
import os
import time
from datetime import datetime
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import random
import numpy as np
import utils.config as config
import matplotlib.pyplot as plt
import os, psutil
import functools
from skimage.measure import label as sk_label
from skimage.measure import regionprops as sk_regions
from skimage.transform import resize
# let all of print can be flush = ture
print = functools.partial(print, flush=True)
#-------- Dataloder --------
# After augmnetation with resize, crop spleen area and than transofermer
class BoxCrop(object):
'''
Croping image by bounding box label after augmentation
input: keys=["image", "label"]
label:
[[x1,y1,x2,y2,z1,z2,class]...]
image:
[1,x,y,z]
output dictionary add
im_info: [x,y,z,scale_x_y,scale_z]
num_box: 1 (All is one in our data)
'''
def __init__(self,keys):
self.keys = keys
def __call__(self, data):
d = dict(data)
image = d['image']
label = d['label']
# only one label
if type(label) == type(np.array([])):
label_list = label.tolist()
else:
# more than one label
# select the first label
label_list = eval(label)[0]
if label_list[1]>=label_list[3] or label_list[0]>=label_list[2] or label_list[4]>=label_list[5]:
raise RuntimeError(f"{d['image_meta_dict']['filename_or_obj']} bounding box error")
#print(f"{d['image_meta_dict']['filename_or_obj']} bounding box error ")
out_image = image[0, int(label_list[1]):int(label_list[3]), int(label_list[0]):int(label_list[2]), int(label_list[4]):int(label_list[5])]
d['image'] = np.expand_dims(out_image,axis=0)
d['label'] = label_list[6]
#print(d['image'].shape)
return d
# Dulicated dataset by num_samples
class Dulicated(object):
'''
Dulicated data for augmnetation
'''
def __init__(self,
keys,
num_samples: int = 1):
self.keys = keys
self.num_samples = num_samples
def __call__(self, data):
d = dict(data)
image = d['image']
label = d['label']
results: List[Dict[Hashable, np.ndarray]] = [dict(data) for _ in range(self.num_samples)]
for key in data.keys():
for i in range(self.num_samples):
results[i][key] = data[key]
return results
#return d
# True label
class Annotate(object):
'''
transform mask to bounding box label after augmentation
check the image shape to know scale_x_y, scale_z
input: keys=["image", "label"]
output dictionary add
im_info: [x,y,z,scale_x_y,scale_z]
num_box: 1 (All is one in our data)
'''
def __init__(self,keys):
self.keys = keys
def __call__(self, data):
d = dict(data)
#image = d[self.keys[0]]
#label = d[self.keys[1]]
image = d['image']
label = d['label']
label = label.squeeze(0)
annotations = np.zeros((1, 7))
annotation = mask2boundingbox(label)
if annotation == 0:
annotation = annotations
raise ValueError('Dataloader data no annotations')
#print("Dataloader data no annotations")
else:
# add class label
cls = d['class']
annotation = np.array(annotation)
annotation = np.append(annotation, cls)
#annotation = np.expand_dims(annotation,0)
#print(annotation.shape)
#print(image.shape)
d['label'] = annotation
return d
def mask2boundingbox(label):
if torch.is_tensor(label):
label = label.numpy()
sk_mask = sk_label(label)
regions = sk_regions(label.astype(np.uint8))
#global top, left, low, bottom, right, height
#print(regions)
# check regions is empty
if not regions:
return 0
for region in regions:
# print('[INFO]bbox: ', region.bbox)
# region.bbox (x1,y1,z1,x2,y2,z2)
# top, left, low, bottom, right, height = region.bbox
y1, x1, z1, y2, x2, z2 = region.bbox
# return left, top, right, bottom, low, height
return x1, y1, x2, y2, z1, z2
#-------- Running setting --------
'''
def adjust_learning_rate_by_step(optimizer, epoch, init_lr, decay_rate=.5 ,lr_decay_epoch=40):
#Sets the learning rate to initial LR decayed by e^(-0.1*epochs)
lr = init_lr * (decay_rate ** (epoch // lr_decay_epoch))
for param_group in optimizer.param_groups:
#param_group['lr'] = param_group['lr'] * math.exp(-decay_rate*epoch)
param_group['lr'] = lr
#lr = init_lr * (0.1**(epoch // lr_decay_epoch))
#print('LR is set to {}'.format(param_group['lr']))
return optimizer , lr
def adjust_learning_rate(optimizer, epoch, init_lr, decay_rate=.5):
#Sets the learning rate to initial LR decayed by e^(-0.1*epochs)
lr = init_lr * decay_rate
for param_group in optimizer.param_groups:
#param_group['lr'] = param_group['lr'] * math.exp(-decay_rate*epoch)
param_group['lr'] = lr
#lr = init_lr * (0.1**(epoch // lr_decay_epoch))
#print('LR is set to {}'.format(param_group['lr']))
return optimizer , lr
'''
def train(model, device, data_num, epochs, optimizer, loss_function, train_loader, valid_loader, early_stop, scheduler, check_path):
# Let ini config file can be writted
#global best_metric
#global best_metric_epoch
#val_interval = 2
best_metric = -1
best_metric_epoch = -1
trigger_times = 0
#epoch_loss_values = list()
writer = SummaryWriter()
for epoch in range(epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{epochs}")
# record ram memory used
process = psutil.Process(os.getpid())
print(f'RAM used:{process.memory_info().rss/ 1024 ** 3} GB')
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data['image'].to(device), batch_data['label'].long().to(device)
optimizer.zero_grad()
#inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
#print(f'outputs:{outputs.size()}')
#print(f'labels:{labels.size()}')
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = data_num // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
config.epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
# Early stopping & save best weights by using validation
metric = validation(model, valid_loader, device)
scheduler.step(metric)
# checkpoint setting
if metric > best_metric:
# reset trigger_times
trigger_times = 0
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), f"{check_path}/{best_metric}.pth")
print('trigger times:', trigger_times)
print("saved new best metric model")
else:
trigger_times += 1
print('trigger times:', trigger_times)
# Save last 3 epoch weight
if early_stop - trigger_times <= 3 or epochs - epoch <= 3:
torch.save(model.state_dict(), f"{check_path}/{metric}_last.pth")
print("save last metric model")
print(
"current epoch: {} current accuracy: {:.4f} best accuracy: {:.4f} at epoch {}".format(
epoch + 1, metric, best_metric, best_metric_epoch
)
)
writer.add_scalar("val_accuracy", metric, epoch + 1)
# early stop
if trigger_times >= early_stop:
print('Early stopping!\nStart to test process.')
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
return model
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
config.best_metric = best_metric
config.best_metric_epoch = best_metric_epoch
writer.close()
#print(f'training_torch best_metric:{best_metric}',flush =True)
#print(f'training_torch config.best_metric:{config.best_metric}',flush =True)
return model
class AngleLoss_predict(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss_predict, self).__init__()
self.gamma = gamma
self.it = 1
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
cos_theta, phi_theta = input
target = target.view(-1, 1) # size=(B,1)
index = cos_theta.data * 0.0 # size=(B, Classnum)
# index = index.scatter(1, target.data.view(-1, 1).long(), 1)
#index = index.byte()
index = index.bool()
index = Variable(index)
# index = Variable(torch.randn(1,2)).byte()
self.lamb = max(self.LambdaMin, self.LambdaMax / (1 + 0.1 * self.it))
output = cos_theta * 1.0 # size=(B,Classnum)
output1 = output.clone()
# output1[index1] = output[index] - cos_theta[index] * (1.0 + 0) / (1 + self.lamb)
# output1[index1] = output[index] + phi_theta[index] * (1.0 + 0) / (1 + self.lamb)
output[index] = output1[index]- cos_theta[index] * (1.0 + 0) / (1 + self.lamb)+ phi_theta[index] * (1.0 + 0) / (1 + self.lamb)
return(output)
def validation(model, val_loader, device):
#metric_values = list()
model.eval()
with torch.no_grad():
num_correct = 0.0
metric_count = 0
for val_data in val_loader:
val_images, val_labels = val_data['image'].to(device), val_data['label'].to(device)
val_outputs = model(val_images)
# base on AngleLoss
if isinstance(val_outputs, tuple):
val_outputs = AngleLoss_predict()(val_outputs,val_labels)
value = torch.eq(val_outputs.argmax(dim=1), val_labels)
metric_count += len(value)
num_correct += value.sum().item()
metric = num_correct / metric_count
config.metric_values.append(metric)
#print(f'validation metric:{config.metric_values}',flush =True)
return metric
def plot_loss_metric(epoch_loss_values,metric_values,save_path):
plt.figure("train", (12, 6))
plt.subplot(1, 2, 1)
plt.title("Epoch Average Loss")
x = [i + 1 for i in range(len(epoch_loss_values))]
y = epoch_loss_values
plt.xlabel("epoch")
plt.plot(x, y)
plt.subplot(1, 2, 2)
plt.title("Val Accuracy")
x = [i + 1 for i in range(len(metric_values))]
y = metric_values
plt.xlabel("epoch")
plt.plot(x, y)
plt.savefig(f'{save_path}/train_loss_metric.png')
def kfold_split(file, kfold, seed, type, fold):
if type == 'pos':
d = {}
file_list = ['file']
file_list.extend([f'pos_split_df_{i}' for i in range(kfold)])
d['file'] = file
for i in range(kfold):
d[f'test_pos_df_{i}'] = d[file_list[i]].groupby(["gender","age_range","spleen_injury_class"],group_keys=False).apply(lambda x: x.sample(frac=1/(kfold-i),random_state=1))
d[f'pos_split_df_{i}'] = d[file_list[i]].drop(d[f'test_pos_df_{i}'].index.to_list())
output_file = d[f'test_pos_df_{fold}']
elif type == 'neg':
file_list = [f'neg_split_df_{i}' for i in range(kfold)]
file_list = np.array_split(file.sample(frac=1,random_state=seed), kfold)
output_file = file_list[fold]
return output_file
def Data_progressing(pos_file, neg_file, box_df, imbalance_data_ratio, data_split_ratio, seed, fold, save_file = False, cropping = True):
# Pos data progress
for index, row in pos_file.iterrows():
if row['OIS']==row['OIS']:
pos_file.loc[index,'spleen_injury_grade'] = row['OIS']
else:
pos_file.loc[index,'spleen_injury_grade'] = row['R_check']
new_col= 'age_range'
new_col_2 = 'spleen_injury_class'
bins = [0,30,100]
bins_2 = [0,2,5]
label_2 = ['OIS 1,2','OIS 3,4,5']
pos_file[new_col] = pd.cut(x=pos_file.age, bins=bins)
pos_file[new_col_2] = pd.cut(x=pos_file.spleen_injury_grade, bins=bins_2, labels=label_2)
# positive need select column and split in kfold
test_pos_df = kfold_split(pos_file, int(1/data_split_ratio[2]), seed, 'pos', fold)
train_pos_file = pos_file.drop(test_pos_df.index.to_list())
valid_pos_df = train_pos_file.groupby(['gender','age_range','spleen_injury_class'],group_keys=False).apply(lambda x: x.sample(frac=data_split_ratio[1]/(1-data_split_ratio[2]),random_state=seed))
train_pos_df = train_pos_file.drop(valid_pos_df.index.to_list())
# negative only need split in kfold
neg_sel_df = neg_file.sample(n=len(pos_file),random_state=seed)
test_neg_df = kfold_split(neg_sel_df, int(1/data_split_ratio[2]), seed, 'neg', fold)
train_neg_file = neg_file.drop(test_neg_df.index.to_list())
valid_neg_df = train_neg_file.sample(n=len(valid_pos_df),random_state=seed)
train_neg_df = train_neg_file.drop(valid_neg_df.index.to_list()).sample(n=len(train_pos_df)*imbalance_data_ratio,random_state=seed)
train_df = pd.concat([train_neg_df,train_pos_df])
valid_df = pd.concat([valid_neg_df,valid_pos_df])
test_df = pd.concat([test_neg_df,test_pos_df])
train_data = box_df[box_df.Path.isin(train_df.source.to_list())]
valid_data = box_df[box_df.Path.isin(valid_df.source.to_list())]
test_data = box_df[box_df.Path.isin(test_df.source.to_list())]
train_df['spleen_injury'] = np.array([0 if i else 1 for i in train_df.spleen_injury_class.isna().tolist()])
valid_df['spleen_injury'] = np.array([0 if i else 1 for i in valid_df.spleen_injury_class.isna().tolist()])
test_df['spleen_injury'] = np.array([0 if i else 1 for i in test_df.spleen_injury_class.isna().tolist()])
if save_file:
test_df_output = pd.merge(test_data.loc[:,['ID','Path','BBox','Posibility']],test_df,left_on='Path',right_on='source',suffixes = ['','_x'])
valid_df_output = pd.merge(test_data.loc[:,['ID','Path','BBox','Posibility']],test_df,left_on='Path',right_on='source',suffixes = ['','_x'])
test_df_output = test_df_output.drop(['ID_x'],axis=1)
valid_df_output = valid_df_output.drop(['ID_x'],axis=1)
test_df_output = test_df_output.loc[:,test_df_output.columns[~test_df_output.columns.str.contains('Unnamed')]]
valid_df_output = valid_df_output.loc[:,valid_df_output.columns[~valid_df_output.columns.str.contains('Unnamed')]]
valid_df_output.to_csv(f'{save_file}/fold{fold}_valid.csv',index = False)
test_df_output.to_csv(f'{save_file}/fold{fold}_test.csv',index = False)
if cropping:
train_data_dicts = []
for index,row in train_data.iterrows():
image = row['Path']
label = row['BBox']
train_data_dicts.append({'image':image,'label':label})
valid_data_dicts = []
for index,row in valid_data.iterrows():
image = row['Path']
label = row['BBox']
valid_data_dicts.append({'image':image,'label':label})
test_data_dicts = []
for index,row in test_data.iterrows():
image = row['Path']
label = row['BBox']
test_data_dicts.append({'image':image,'label':label})
else:
train_data_dicts =[
{"image": image_name, "label": label_name}
for image_name, label_name in zip([i for i in train_df.source.tolist()], [i for i in train_df.spleen_injury.tolist()] )
]
valid_data_dicts =[
{"image": image_name, "label": label_name}
for image_name, label_name in zip([i for i in valid_df_output.source.tolist()], [i for i in valid_df_output.spleen_injury.tolist()] )
]
test_data_dicts =[
{"image": image_name, "label": label_name}
for image_name, label_name in zip([i for i in test_df_output.source.tolist()], [i for i in test_df_output.spleen_injury.tolist()] )
]
return train_data_dicts, valid_data_dicts, test_data_dicts
class FocalLoss(nn.Module):
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
"""
focal_loss损失函数, -α(1-yi)**γ *ce_loss(xi,yi)
步骤详细的实现了 focal_loss损失函数.
:param alpha: 阿尔法α,类别权重. 当α是列表时,为各类别权重,当α为常数时,类别权重为[α, 1-α, 1-α, ....],常用于 目标检测算法中抑制背景类 , retainnet中设置为0.25
:param gamma: 伽马γ,难易样本调节参数. retainnet中设置为2
:param num_classes: 类别数量
:param size_average: 损失计算方式,默认取均值
"""
super(FocalLoss, self).__init__()
if alpha is None: # alpha 是平衡因子
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, list):
self.alpha = torch.Tensor(alpha)
else:
self.alpha = torch.zeros(class_num)
self.alpha[0] += alpha
self.alpha[1:] += (1-alpha)
self.gamma = gamma # 指数
self.class_num = class_num # 类别数目
self.size_average = size_average # 返回的loss是否需要mean一下
def forward(self, preds, labels):
"""
focal_loss损失计算
:param preds: 预测类别. size:[B,N,C] or [B,C] 分别对应与检测与分类任务, B 批次, N检测框数, C类别数
:param labels: 实际类别. size:[B,N] or [B]
:return:
"""
# assert preds.dim()==2 and labels.dim()==1
preds = preds.view(-1,preds.size(-1))
self.alpha = self.alpha.to(preds.device)
preds_softmax = F.softmax(preds, dim=1) # 这里并没有直接使用log_softmax, 因为后面会用到softmax的结果(当然你也可以使用log_softmax,然后进行exp操作)
preds_softmax = preds_softmax.clamp(min=0.0001,max=1.0) # 避免數值過小 進log後 loss 為nan
preds_logsoft = torch.log(preds_softmax)
preds_softmax = preds_softmax.gather(1,labels.view(-1,1)) # 这部分实现nll_loss ( crossempty = log_softmax + nll )
preds_logsoft = preds_logsoft.gather(1,labels.view(-1,1))
self.alpha = self.alpha.gather(0,labels.view(-1))
loss = -torch.mul(torch.pow((1-preds_softmax), self.gamma), preds_logsoft) # torch.pow((1-preds_softmax), self.gamma) 为focal loss中 (1-pt)**γ
loss = torch.mul(self.alpha, loss.t())
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
|
houhsein/Spleen_injury_detection
|
classification/utils/training_torch_utils.py
|
training_torch_utils.py
|
py
| 19,509 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.is_tensor",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "skimage.measure.label",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "skimage.measure.regionprops",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "psutil.Process",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "utils.config.epoch_loss_values.append",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "utils.config.epoch_loss_values",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "utils.config",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "utils.config.best_metric",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "utils.config",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "utils.config.best_metric_epoch",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "utils.config",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "utils.config.metric_values.append",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "utils.config.metric_values",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "utils.config",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "numpy.array_split",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "torch.log",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "torch.mul",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "torch.mul",
"line_number": 453,
"usage_type": "call"
}
] |
25760579432
|
from opencage.geocoder import OpenCageGeocode
import xlrd
import xlwt
from xlwt import Workbook
import pandas as pd
key ="fd4f682cf2014f3fbd321ab141454138"
# get api key from: https://opencagedata.com
geocoder = OpenCageGeocode(key)
loc = ("/Users/ashwinisriram/Documents/Lat long/corrected.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
sheet.cell_value(0, 0)
# Workbook is created
wb = Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Sheet 1')
# Define a dictionary containing data
data={'Customer_code':[],'City':[],'State':[]}
branch_district = ""
for r in range(4000,4500):
customer_code=str(sheet.cell_value(r, 0))
# sheet1.write(i, 1, sheet.cell_value(r, 1))
# sheet1.write(i, 2, sheet.cell_value(r, 2))
branch = str(sheet.cell_value(r, 1))
district= str(sheet.cell_value(r, 2))
data['Customer_code'].append(customer_code)
data['City'].append(branch)
data['State'].append(district)
df=pd.DataFrame(data)
# Convert the dictionary into DataFrame
# Observe the result
print(df)
list_lat = [] # create empty lists
list_long = []
link=[]
for index, row in df.iterrows(): # iterate over rows in dataframe
City = row['City']
State = row['State']
query = str(City)+','+str(State)
results = geocoder.geocode(query)
try:
lat = results[0]['geometry']['lat']
long = results[0]['geometry']['lng']
list_lat.append(lat)
list_long.append(long)
link.append("http://www.google.com/maps/place/"+str(lat)+','+str(long))
except IndexError:
list_lat.append('Nan')
list_long.append('Nan')
link.append("link unavailable")
# create new columns from lists
df['lat'] = list_lat
df['lon'] = list_long
df['link']=link
# function to find the coordinate
# of a given city
print(df)
# create excel writer object
writer = pd.ExcelWriter('output2.xlsx')
# write dataframe to excel
df.to_excel(writer,'sheet2')
# save the excel
writer.save()
print('DataFrame is written successfully to Excel File.')
|
Ashwini-Sriram/Latlong
|
alter.py
|
alter.py
|
py
| 2,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "opencage.geocoder.OpenCageGeocode",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "xlwt.Workbook",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pandas.ExcelWriter",
"line_number": 103,
"usage_type": "call"
}
] |
22525757483
|
"""
Author: Matthew Smith (45326242)
Date: 15/04/2021
Title: AERO4450 Design Report Progress Check
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import scipy.sparse as sps
import scipy.sparse.linalg as splinalg
# Parameters
M_N2 = 28 # g/mol
M_F2 = 44 # g/mol
M_O2 = 32 # g/mol
mdot_a = 900 # g/s
mdot_f = 30 # g/s
T_in = 800 # K
T_act = 40000 # K
A = 5*10**11 # (1/s)
T_ref = 298.15 # K
# Thermodynamic Properties (formation enthalpy,Cp)
P = {'F2': (0,2000),
'FO2': (-12,2200),
'O2': (0,1090),
'N2': (0,1170)
}
# Preliminaries
# Task 1
b = (0.77*M_O2)/(0.23*M_N2)
m_air = 2*M_O2/0.23
Zst = M_F2/(M_F2 + m_air)
AFst = m_air/M_F2
Zavg = mdot_f/(mdot_a + mdot_f)
AFavg = mdot_a/mdot_f
print("Zst =",Zst)
print("AFst =",AFst)
print("Zavg =",Zavg)
print("AFavg =",AFavg)
# Task 2
Y_pc_max = 2*(M_F2/2 + M_O2)/(2*b*M_N2 + 2*(M_F2/2 + M_O2))
# Define the piecewise function Ypc(Z)
def Y_pc(Z):
if Z <= Zst:
grad = Y_pc_max/Zst
c = 0
Y = grad*Z + c
if Z > Zst:
grad = -Y_pc_max/(1-Zst)
c = -grad
Y = grad*Z + c
return Y
# Plot Y_pc(Z)
plt.figure(figsize=(10,8))
plt.plot([0,Zst],[0,Y_pc_max],'b-')
plt.plot([Zst,1],[Y_pc_max,0],'b-')
plt.plot([0,Zst],[Y_pc_max,Y_pc_max],'r--')
plt.plot([Zst,Zst],[0,Y_pc_max],'r--')
plt.xticks([0.0,0.137,0.2,0.4,0.6,0.8,1.0])
plt.yticks([0.0,0.2,0.335,0.4,0.6,0.8,1.0])
plt.xlabel("Mixture Fraction (Z)")
plt.ylabel("Mass Fraction (Y)")
plt.title("Mass Fraction of FO2 vs. Mixture Fraction")
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
print("Ymax =",Y_pc_max)
# Task 4
# Find ao and af
ao = M_O2/(M_F2/2 + M_O2)
af = 0.5*M_F2/(M_F2/2 + M_O2)
print("ao =",ao)
print("af =",af)
def Y_O2(Z,Y_FO2):
Y = 0.23*(1-Z) - ao*Y_FO2
# Ensure that Y is non-negative
if Y < 0:
return 0
else:
return Y
def Y_F2(Z,Y_FO2):
Y = Z - af*Y_FO2
# Ensure that Y is non-negative
if Y < 0:
return 0
else:
return Y
# YN2 is a conserved scalar
def Y_N2(Z):
return 0.77*(1-Z)
# Sum of all Y's should be 1
def Y_Total(Z,Y_FO2):
return Y_O2(Z,Y_FO2) + Y_N2(Z) + Y_F2(Z,Y_FO2) + Y_FO2
# Create lists for all mass fractions
Zs = np.linspace(0,1,200)
O2 = [Y_O2(Z,Y_pc(Z)) for Z in Zs]
F2 = [Y_F2(Z,Y_pc(Z)) for Z in Zs]
N2 = [Y_N2(Z) for Z in Zs]
FO2 = [Y_pc(Z) for Z in Zs]
Total = [Y_Total(Z,Y_pc(Z)) for Z in Zs]
# Plot the mass fractions vs. Z
plt.figure(figsize=(10,8))
plt.plot(Zs,O2,'c-',label='O2')
plt.plot(Zs,F2,'m-',label='F2')
plt.plot(Zs,N2,'g-',label='N2')
plt.plot(Zs,Total,'k-',label='Sum')
plt.plot(Zs,FO2,'b-',label='FO2')
plt.plot([Zst,Zst],[0,1],'r--',label='Zst')
plt.xlabel("Mixture Fraction (Z)")
plt.ylabel("Mass Fraction (Y)")
plt.xlim(0,1)
plt.ylim(0,1.1)
plt.yticks([0.0,0.2,0.23,0.4,0.6,0.77,0.8,1.0])
plt.legend()
plt.show()
# Task 5
def phi(prop,Z,c):
# Y_FO2 depends on combustion progress
Y_FO2 = c*Y_pc(Z)
# Define formation enthalpy
if prop == 'h_f':
val = (P['F2'][0]*Y_F2(Z,Y_FO2) + P['FO2'][0]*Y_FO2 +
P['O2'][0]*Y_O2(Z,Y_FO2) + P['N2'][0]*Y_N2(Z))*10**6
# Define heat capacity
if prop == 'Cp':
val = (P['F2'][1]*Y_F2(Z,Y_FO2) + P['FO2'][1]*Y_FO2 +
P['O2'][1]*Y_O2(Z,Y_FO2) + P['N2'][1]*Y_N2(Z))
# Define total enthalpy
if prop == 'h':
val = phi('h_f',Z,c)*Y_FO2 + (T_in - T_ref)*phi('Cp',Z,c)
return val
# Task 6
# TotaL enthalpy is a conserved scalar
def h(Z,c):
return phi('h',0,c) + Z*(phi('h',1,c) - phi('h',0,c))
def T(Z,c):
return T_ref + (h(Z,c) - phi('h_f',Z,c))/phi('Cp',Z,c)
def W(Z,c):
Y_FO2 = c*Y_pc(Z)
return Y_F2(Z,Y_FO2)*Y_O2(Z,Y_FO2)*A*np.exp(-T_act/T(Z,c))
# Task 7
Zs = np.linspace(0,1,500)
# Plot the temperature vs. Z for different combustion progresses
plot1 = []
plot2 = []
plot3 = []
plot4 = []
for z in Zs:
for c in [0,1/3,2/3,1]:
if c == 1/3:
plot1.append(T(z,c))
if c == 2/3:
plot2.append(T(z,c))
if c == 0:
plot3.append(T(z,c))
if c == 1:
plot4.append(T(z,c))
plt.figure(figsize=(10,8))
plt.plot(Zs,plot1,'r-',label='c = 1/3')
plt.plot(Zs,plot2,'b-',label='c = 2/3')
plt.plot(Zs,plot3,'g-',label='c = 0')
plt.plot(Zs,plot4,'m-',label='c = 1')
plt.title('Temperature vs. Z for Different c Values')
plt.xlabel('Mixture Fraction (Z)')
plt.ylabel('Temperature (K)')
plt.xlim(0,1)
plt.ylim(500,3500)
plt.yticks([500,800,1000,1500,2000,2500,3000,3500])
plt.legend()
plt.show()
# Plot the reaction rate vs. Z for different combustion progresses
plot1 = []
plot2 = []
for z in Zs:
for c in [1/3,2/3]:
if c == 1/3:
plot1.append(W(z,c))
if c == 2/3:
plot2.append(W(z,c))
plt.figure(figsize=(10,8))
plt.plot(Zs,plot1,'r-',label='c = 1/3')
plt.plot(Zs,plot2,'b-',label='c = 2/3')
plt.title('Reaction Rate vs. Z for Different c Values')
plt.xlabel('Mixture Fraction (Z)')
plt.ylabel('W (1/s)')
plt.xlim(0,1)
plt.legend()
plt.show()
# Flamelet Model
# Task 1
nZ = 101
dZ = 1/(nZ-1)
Z_values = np.linspace(0,1,nZ)
# Define flamelet model that output the steady-state mass fractions for a given
# Nst
def flamelet_model(Nst):
W_max = 500
# Set time-step and CFL number
dt = 0.01/W_max
CFL = dt*Nst/(dZ**2)
t = 0
# Initial conditions
current_Y = np.array([Y_pc(z) for z in Z_values])
# Initial reaction rates
current_W = np.zeros(nZ)
for i in range(1,nZ-1):
c = current_Y[i]/Y_pc(i*dZ)
current_W[i] = W(i*dZ,c)
# Define implicit coefficient matrix
implicit_matrix = ((1+2*CFL) * sps.eye(nZ, k=0)
-CFL * sps.eye(nZ, k=-1)
-CFL * sps.eye(nZ, k=+1))
# Dirichlet boundary conditions
B = implicit_matrix.tolil()
B[0,:], B[nZ-1,:] = 0, 0
B[0,0], B[nZ-1,nZ-1] = 1, 1
implicit_matrix = B.tocsr()
# Begin general updates until steady-state solution is achieved or FO2 goes
# extinct
previous_Y = np.zeros(nZ)
while abs(np.amax(current_Y) - np.amax(previous_Y)) > 1*10**-7:
t += dt
previous_Y = current_Y.copy()
# Use sparse matrix solver
current_Y = splinalg.spsolve(implicit_matrix,(previous_Y+current_W*dt))
# Update reaction rates
for i in range(1,nZ-1):
c = current_Y[i]/Y_pc(i*dZ)
current_W[i] = W(i*dZ,c)
print('Number of time steps used =', t/dt)
return current_Y
# Task 2
# Show steady-state solution for Nst = 30 (subcritical)
Y_ss = flamelet_model(30)
Ypc = [Y_pc(Z) for Z in Z_values]
plt.figure(figsize=(10,8))
plt.plot(Z_values,Y_ss,'b-',label='Steady-State Solution')
plt.plot(Z_values,Ypc,'r--',label='Y_pc(Z)')
plt.title('Mass Fraction of FO2 vs. Mixture Fraction for Nst = 30')
plt.xlabel('Mixture Fraction (Z)')
plt.ylabel('Mass Fraction (Y)')
plt.xlim(0,1)
plt.ylim(0,0.4)
plt.legend()
plt.show()
# Task 3
# Golden ratio
gr = (math.sqrt(5) + 1) / 2
# Define Golden-Section Search function
def gss(f, a, b, tol=0.01):
# Find initial c and d values
c = b - (b - a) / gr
d = a + (b - a) / gr
while abs(b - a) > tol:
# If f(c) goes to extinction, return 100
if np.amax(f(c)) < 10**-3:
x = 100
# If f(c) reaches steady-state, return max Y_FO2
else:
x = np.amax(f(c))
# If f(d) goes to extinction, return 100
if np.amax(f(d)) < 10**-3:
y = 100
# If f(d) reaches steady-state, return max Y_FO2
else:
y = np.amax(f(d))
# When f(c) and f(d) go to extinction, a = a, b = c
if x and y == 100:
b = c
c = b - (b - a) / gr
d = a + (b - a) / gr
continue
# When f(c) and f(d) both have a steady solution, a = d, b = b
if x and y > 10**-3:
a = d
c = b - (b - a) / gr
d = a + (b - a) / gr
continue
# If f(c) < f(d), b = d, a = a
if x < y:
b = d
else:
a = c
c = b - (b - a) / gr
d = a + (b - a) / gr
return (b + a) / 2
#print(gss(flamelet_model,50.5,51,tol=0.01))
# ^^ uncomment this if you want to see the golden search result
# It takes roughly 5 mins to run though
# Critical value found form golden search
Ncr = 50.64387
print('Ncr =',Ncr)
# Plot critical solution
Y_ss = flamelet_model(Ncr)
Ypc = [Y_pc(Z) for Z in Z_values]
plt.figure(figsize=(10,8))
plt.plot(Z_values,Y_ss,'b-',label='Steady-State Solution')
plt.plot(Z_values,Ypc,'r--',label='Y_pc(Z)')
plt.title('Mass Fration of FO2 vs. Mixture Fraction for Ncr')
plt.xlabel('Mixture Fraction (Z)')
plt.ylabel('Mass Fraction (Y)')
plt.xlim(0,1)
plt.ylim(0,0.4)
plt.legend()
plt.show()
# Plot critical temperatures
Temps = np.zeros(nZ)
Temps[0] = T(0,0)
Temps[nZ-1] = T(1,0)
for i in range(1,nZ-1):
c = Y_ss[i]/Y_pc(i*dZ)
Temps[i] = T(i*dZ,c)
T_a = np.amax(Temps)
plt.figure(figsize=(10,8))
plt.plot(Z_values,Temps,'b-')
plt.plot([0,1],[T_a,T_a],'r--')
plt.title('Temperature vs. Mixture Fraction')
plt.xlabel('Mixture Fraction (Z)')
plt.ylabel('Temperature (K)')
plt.xlim(0,1)
plt.ylim(750,3000)
plt.yticks([750,1000,1250,1500,1750,2000,2250,2500,2750,2812.34,3000])
plt.show()
print('Adiabatic Temp =',T_a)
# Task 4
# Find residence time
t_res = (Zavg - Zavg**2)/(2*Ncr)
print('Residence Time =',t_res)
|
msmit677/AERO4450
|
AERO4450_Combustion_Modelling.py
|
AERO4450_Combustion_Modelling.py
|
py
| 9,866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.eye",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.eye",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.eye",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.spsolve",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 368,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 376,
"usage_type": "name"
}
] |
14188272016
|
from fastapi import FastAPI
app = FastAPI()
COLUMN_NAME = "name"
COLUMN_ID = "id"
FAKE_DB = [
{"id": 1, "name": "Vladimir"},
{"id": 2, "name": "Polina"},
{"id": 3, "name": "Aleksander"}
]
def find_friend_name(friend_id, db_name):
for row in db_name:
if row.get(COLUMN_ID) == friend_id:
return row.get(COLUMN_NAME)
return None
@app.get("/friends/{friend_id}")
async def get_friend_name(friend_id: int):
friend_name = find_friend_name(friend_id, FAKE_DB)
if friend_name is None:
return {"error": f"No such friend with id {friend_id}"}
return {"friend_name": friend_name}
@app.get("/")
async def root():
return {"message": "Hello world!"}
|
DanilaLabydin/Python-tasks-solving-practice
|
app/main.py
|
main.py
|
py
| 715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 4,
"usage_type": "call"
}
] |
74281365627
|
from __future__ import annotations
import re
from dataclasses import asdict, dataclass
from typing import Optional, Sized, TypeVar
import torch
import torchinfo
from accelerate.utils.random import set_seed
from torch.utils.data import DataLoader
from simpletrainer.utils.common import pretty_repr
T = TypeVar('T')
set_seed = set_seed
@dataclass
class DataInfo:
batch_size: int
batch_size_per_device: int
num_sampels: Optional[int]
num_batches_per_epoch: Optional[int]
def __repr__(self) -> str:
return pretty_repr(asdict(self), self.__class__.__name__)
def get_batch_size_from_dataloader(dataloader: DataLoader) -> int:
if dataloader.batch_size is None:
try:
return dataloader.batch_sampler.batch_size # type: ignore
except AttributeError:
raise ValueError(
'Can not get batch size from dataloader, does not support `BatchSampler` with varying batch size yet.'
)
else:
return dataloader.batch_size
def get_num_samples_from_dataloader(dataloader: DataLoader) -> Optional[int]:
if isinstance(dataloader.dataset, Sized):
return len(dataloader.dataset)
elif isinstance(dataloader.sampler, Sized):
return len(dataloader.sampler)
else:
sampler = getattr(dataloader.batch_sampler, 'sampler')
if isinstance(sampler, Sized):
return len(sampler)
else:
return
def get_data_info(dataloader: DataLoader, world_size: int = 1) -> DataInfo:
num_samples = get_num_samples_from_dataloader(dataloader)
try:
num_batches_per_epoch = len(dataloader)
except:
num_batches_per_epoch = None
batch_size_per_device = get_batch_size_from_dataloader(dataloader)
batch_size = batch_size_per_device * world_size
return DataInfo(
batch_size=batch_size,
batch_size_per_device=batch_size_per_device,
num_sampels=num_samples,
num_batches_per_epoch=num_batches_per_epoch,
)
def get_model_info(
model: torch.nn.Module,
input_data: Optional[torchinfo.torchinfo.INPUT_DATA_TYPE] = None,
device: Optional[torch.device] = None,
) -> torchinfo.ModelStatistics:
try:
model_statistics = torchinfo.summary(model, input_data=input_data, verbose=0, device=device)
except Exception:
model_statistics = torchinfo.summary(model, verbose=0, device=device)
return model_statistics
def get_parameter_id_group_map(
optimizer: torch.optim.Optimizer,
) -> dict[int, str]:
parameter_id_group_map = {}
for group, params in enumerate(optimizer.param_groups):
for param in params['params']:
parameter_id_group_map[id(param)] = str(group)
return parameter_id_group_map
def get_params_with_pattern(model: torch.nn.Module, pattern: re.Pattern):
params = []
for name, param in model.named_parameters():
if pattern.search(name):
params.append(param)
return params
def get_module_learning_rate_summary(module: torch.nn.Module, optimizer: torch.optim.Optimizer):
lr_dict: dict[str, float] = {}
names = {param: name for name, param in module.named_parameters()}
for group in optimizer.param_groups:
if 'lr' not in group:
continue
lr = group['lr']
for param in group['params']:
if param.requires_grad:
lr_dict[names[param]] = lr
else:
lr_dict[names[param]] = 0.0
return lr_dict
def get_module_parameter_summary(model: torch.nn.Module):
parameter_mean: dict[str, float] = {}
parameter_std: dict[str, float] = {}
for name, param in model.named_parameters():
if param.data.numel() > 0:
parameter_mean[name] = float(param.data.mean().item())
if param.data.numel() > 1:
parameter_std[name] = float(param.data.std().item())
return parameter_mean, parameter_std
def get_module_gradient_summary(model: torch.nn.Module):
gradient_mean: dict[str, float] = {}
gradient_std: dict[str, float] = {}
for name, param in model.named_parameters():
if param.grad is not None:
if param.grad.is_sparse:
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0:
gradient_mean[name] = float(grad_data.mean().item())
if grad_data.numel() > 1:
gradient_std[name] = float(grad_data.std().item())
return gradient_mean, gradient_std
|
Moka-AI/simpletrainer
|
simpletrainer/utils/torch.py
|
torch.py
|
py
| 4,653 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "typing.TypeVar",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "accelerate.utils.random.set_seed",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "simpletrainer.utils.common.pretty_repr",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dataclasses.asdict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Sized",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "typing.Sized",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "typing.Sized",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "typing.Optional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "torchinfo.torchinfo",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torchinfo.summary",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torchinfo.summary",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torchinfo.ModelStatistics",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "re.Pattern",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "torch.prod",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 142,
"usage_type": "call"
}
] |
34214358930
|
import json
# Set file paths
basePath = 'D:\\NTCIR-12_MathIR_arXiv_Corpus\\'
inputPath = basePath + "output_FeatAna\\"
index_file = 'inverse_semantic_index_formula_catalog(physics_all).json'
#basePath = 'D:\\NTCIR-12_MathIR_Wikipedia_Corpus\\'
#inputPath = basePath + "output_RE\\"
#index_file = 'inverse_semantic_index_formula_catalog(Wikipedia).json'
# Load inverse index
with open(inputPath + index_file,'r',encoding='utf8') as f:
formula_index = json.load(f)
# Load example queries
with open('../examples_list/formula_examples.json', 'r', encoding='utf8') as f:
example_queries = json.load(f)
results = {}
for example_query in example_queries:
GoldID = example_query['GoldID']
FormulaName = example_query['formula_name']
# retrieve only results that are common in all query word results
common_results = {}
for query_word in FormulaName.split():
try:
for formula in formula_index[query_word].items():
try:
common_results[formula[0]] += 1
except:
common_results[formula[0]] = 1
except:
pass
ranking = {}
for common_result in common_results.items():
if True: #common_result[1] == len(FormulaName.split()):
for query_word in FormulaName.split():
try:
ranking[common_result[0]] += formula_index[query_word][common_result[0]]
except:
try:
ranking[common_result[0]] = formula_index[query_word][common_result[0]]
except:
pass
result = {k: v for k, v in sorted(ranking.items(), key=lambda item: item[1],reverse=True)}
results[GoldID] = (FormulaName,result)
# output to csv
csv_list = []
csv_list.append("GoldID\tName\tFormula\t(Score,Rank)\tDCG\tnDCG\n")
for result in results.items():
# display only first hits or ranking cutoff
displayed = False
counter = 0
for formula in result[1][1].items():
if counter < 10: # True: #displayed == False:
csv_list.append(result[0] + "\t" + result[1][0] + "\t"
+ formula[0].replace("\t","").replace("\n","") + "\t\t\t\n")
displayed = True
counter += 1
with open("inverse_formula_index_results.csv", 'w', encoding='utf8') as f:
f.writelines(csv_list)
print("end")
|
pratyushshukla19/Minor-Project-2
|
semanticsearch/modes13-15/evaluate_inverse_formula_index.py
|
evaluate_inverse_formula_index.py
|
py
| 2,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
}
] |
41149275833
|
""" Internet Validators
- ValidateEmail
- ValidateIP
- ValidateURL
"""
import socket
import re
from email_validator import validate_email, EmailNotValidError
from flask_validator import Validator
class ValidateEmail(Validator):
""" Validate Email type.
Check if the new value is a valid e-mail.
Using this library to validate https://github.com/JoshData/python-email-validator
Args:
field: SQLAlchemy column to validate
allow_null: (bool) Allow null values
allow_smtputf8: (bool) Set to False to prohibit internationalized addresses that would require the SMTPUTF8.
check_deliverability: (bool) Set to False to skip the domain name resolution check.
allow_empty_local (bool) Set to True to allow an empty local part (i.e. @example.com),
e.g. for validating Postfix aliases.
allow_null: (bool) Allow null values
throw_exception: (bool) Throw a ValidateError if the validation fails
"""
allow_smtputf8 = True
check_deliverability = True
allow_empty_local = False
def __init__(self, field, allow_smtputf8=True,check_deliverability=True, allow_empty_local=False,
allow_null=True, throw_exception=False, message=None):
self.allow_smtputf8 = allow_smtputf8
self.check_deliverability = check_deliverability
self.allow_empty_local = allow_empty_local
Validator.__init__(self, field, allow_null, throw_exception, message)
def check_value(self, value):
try:
validate_email(value,
allow_smtputf8=self.allow_smtputf8,
check_deliverability=self.check_deliverability,
allow_empty_local=self.allow_empty_local)
return True
except EmailNotValidError:
return False
class ValidateIP(Validator):
""" Validate Regex
Compare a value against a regular expresion
Args:
field: SQLAlchemy column to validate
ipv6: Match against IPV6
allow_null: (bool) Allow null values
throw_exception: (bool) Throw a ValidateError if the validation fails
"""
ipv6 = None
def __init__(self, field, ipv6=False, allow_null=True, throw_exception=False, message=None):
self.ipv6 = ipv6
Validator.__init__(self, field, allow_null, throw_exception, message)
def check_value(self, value):
try:
if not self.ipv6:
socket.inet_pton(socket.AF_INET, value)
else:
socket.inet_pton(socket.AF_INET6, value)
return True
except socket.error:
return False
class ValidateURL(Validator):
""" Validate URL
Check if the values is a valid URL
Args:
field: SQLAlchemy column to validate
allow_null: (bool) Allow null values. Default True
throw_exception: (bool) Throw a ValidateError if the validation fails
"""
regex = r'^[a-z]+://(?P<host>[^/:]+)(?P<port>:[0-9]+)?(?P<path>\/.*)?$'
def check_value(self, value):
if re.match(self.regex, value):
return True
else:
return False
|
xeBuz/Flask-Validator
|
flask_validator/constraints/internet.py
|
internet.py
|
py
| 3,190 |
python
|
en
|
code
| 28 |
github-code
|
6
|
[
{
"api_name": "flask_validator.Validator",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask_validator.Validator.__init__",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask_validator.Validator",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "email_validator.validate_email",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "email_validator.EmailNotValidError",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask_validator.Validator",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask_validator.Validator.__init__",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "flask_validator.Validator",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "socket.inet_pton",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "socket.inet_pton",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET6",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "socket.error",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask_validator.Validator",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 101,
"usage_type": "call"
}
] |
5005445920
|
from __future__ import annotations
from pathlib import Path
from typing import Any, cast
import _testutils
import pytest
from lxml.html import (
HtmlElement as HtmlElement,
find_class,
find_rel_links,
iterlinks,
make_links_absolute,
parse,
resolve_base_href,
rewrite_links,
)
reveal_type = getattr(_testutils, "reveal_type_wrapper")
def test_input_content_type(h_filepath: Path) -> None:
fio = open(h_filepath, "rb")
tree = parse(h_filepath)
for bad_input in [h_filepath, fio, tree]:
with pytest.raises(
AttributeError, match="object has no attribute 'find_rel_links'"
):
_ = find_rel_links(cast(Any, bad_input), "stylesheet")
fio.close()
links = find_rel_links(str(h_filepath), "stylesheet")
reveal_type(links)
assert links == find_rel_links(tree.getroot(), "stylesheet")
assert links == find_rel_links(h_filepath.read_text(), "stylesheet")
assert links == find_rel_links(h_filepath.read_bytes(), "stylesheet")
def test_find_class(h_filepath: Path) -> None:
elems = find_class(h_filepath.read_text(), "single")
reveal_type(elems)
for e in elems:
reveal_type(e)
def test_iterlinks(h_filepath: Path) -> None:
results = iterlinks(h_filepath.read_text())
reveal_type(results)
for r in results:
assert len(r) == 4
reveal_type(r[0])
reveal_type(r[1])
reveal_type(r[2])
reveal_type(r[3])
class TestOutputType:
BASE = "http://dummy.link"
def test_make_links_absolute(self, h_filepath: Path) -> None:
in_data1 = h_filepath.read_bytes()
with pytest.raises(
TypeError, match="No base_url given, and the document has no base_url"
):
out_data1 = make_links_absolute(in_data1)
out_data1 = make_links_absolute(in_data1, self.BASE)
assert type(in_data1) == type(out_data1)
in_data2 = h_filepath.read_text()
with pytest.raises(TypeError, match="Cannot mix str and non-str"):
out_data2 = make_links_absolute(
in_data2, cast(Any, self.BASE.encode("ascii"))
)
out_data2 = make_links_absolute(in_data2, self.BASE)
assert type(in_data2) == type(out_data2)
tree = parse(h_filepath)
in_data3 = tree.getroot()
out_data3 = make_links_absolute(in_data3, self.BASE)
assert type(in_data3) == type(out_data3)
def test_resolve_base_href(self, h_filepath: Path) -> None:
in_data1 = h_filepath.read_bytes()
out_data1 = resolve_base_href(in_data1)
assert type(in_data1) == type(out_data1)
in_data2 = h_filepath.read_text()
out_data2 = resolve_base_href(in_data2)
assert type(in_data2) == type(out_data2)
tree = parse(h_filepath)
in_data3 = tree.getroot()
out_data3 = resolve_base_href(in_data3)
assert type(in_data3) == type(out_data3)
def test_rewrite_links(self, h_filepath: Path) -> None:
in_data1 = h_filepath.read_bytes()
out_data1 = rewrite_links(in_data1, lambda _: self.BASE)
assert type(in_data1) == type(out_data1)
in_data2 = h_filepath.read_text()
with pytest.raises(TypeError, match="can only concatenate str"):
out_data2 = rewrite_links(
in_data2, lambda _: cast(Any, self.BASE.encode("ASCII"))
)
out_data2 = rewrite_links(in_data2, lambda _: self.BASE)
assert type(in_data2) == type(out_data2)
tree = parse(h_filepath)
in_data3 = tree.getroot()
out_data3 = rewrite_links(in_data3, lambda _: None)
assert type(in_data3) == type(out_data3)
|
abelcheung/types-lxml
|
test-rt/test_html_link_funcs.py
|
test_html_link_funcs.py
|
py
| 3,706 |
python
|
en
|
code
| 23 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "lxml.html.parse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "lxml.html.find_rel_links",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "lxml.html.find_rel_links",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "lxml.html.find_rel_links",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lxml.html.find_rel_links",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "lxml.html.find_rel_links",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "lxml.html.find_class",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "lxml.html.iterlinks",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "lxml.html.make_links_absolute",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "lxml.html.make_links_absolute",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "lxml.html.make_links_absolute",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "lxml.html.make_links_absolute",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "lxml.html.parse",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "lxml.html.make_links_absolute",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "lxml.html.resolve_base_href",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "lxml.html.resolve_base_href",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "lxml.html.parse",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "lxml.html.resolve_base_href",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "lxml.html.rewrite_links",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "lxml.html.rewrite_links",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 99,
"usage_type": "argument"
},
{
"api_name": "lxml.html.rewrite_links",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "lxml.html.parse",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "lxml.html.rewrite_links",
"line_number": 105,
"usage_type": "call"
}
] |
77938817
|
"""
file structure:
flip_labels_and_scans.py
scan_directrory - raw scans folder
label_directrory - labels folder
save_dir_scan - flipped scans folder (where they will be saved)
save_dir_labels - flipped labels folder (where they will be saved)
This script flips nii (nifti) labels and scans along the sagittal plane. The plane flipping occurs on can be modified by changing the transformation matrix in the flip3dlabel and flip3dscan functions.
This script assumes the following file naming conventions:
scans: "scanIdentifier_somesuffix.nii"
labels: "scanIdentifier_50um_segmentation_IE-label.nii", the suffix can be modified by altering "label_name" in the "scan_flip_iterator" function.
note that scanIdentifier should be unique.
"""
#imports
import numpy as np
import SimpleITK as sitk
import os
#specify directory of scans you would like to flip
scan_directrory = 'scan_154um'
#specify directory of where labelmaps are
label_directrory = 'lab'
#specify directory where you want to save flipped scans
save_dir_scan = 'scan_save'
#specify directory where you want to save flipped labels
save_dir_labels = 'lab_save'
def get_center(img):
"""
This function returns the physical center point of a 3d sitk image
:param img: The sitk image we are trying to find the center of
:return: The physical center point of the image
"""
width, height, depth = img.GetSize()
return img.TransformIndexToPhysicalPoint((int(np.ceil(width/2)),
int(np.ceil(height/2)),
int(np.ceil(depth/2))))
def flip3dlabel(img):
"""
This function flips the sitk label passeed to it with NN interpolation
:param img: An sitk labelmap
:return: The flipped label
"""
affineTrans = sitk.AffineTransform(3)
image_center = get_center(img)
affineTrans.SetMatrix([-1,0,0,0,1,0,0,0,1])
affineTrans.SetCenter(image_center)
flipped = sitk.Resample(img, affineTrans,sitk.sitkNearestNeighbor)
return flipped
def flip3dscan(img,lab):
"""
This function flips the sitk image passeed to it with BSpline interpolation
:param img: An sitk image
:param lab: An sitk label associated with the given image - used to maintain alignment
:return: The flipped image
"""
affineTrans = sitk.AffineTransform(3)
image_center = get_center(lab)
affineTrans.SetMatrix([-1,0,0,0,1,0,0,0,1])
affineTrans.SetCenter(image_center)
interpolator = sitk.sitkBSpline
flipped = sitk.Resample(img, img, affineTrans,
interpolator, -2000)
return flipped
def label_flip_iterator(file):
"""
This function is called each time a label is flipped. Naming and saving is done here.
:param file: filename of label
"""
prefix = file.split("_")[0] #get the sample prefix IE '1932L'
name_without_filetype = file.split(".nii")[0] #file name before the extension (.nii)
newname = name_without_filetype+"_flipped.nii"
lab = sitk.ReadImage(label_directrory+'/'+file)
flipped_lab = flip3dlabel(lab)
sitk.WriteImage(flipped_lab,save_dir_labels+"/"+newname)#labels are saved with _flipped appended to their original names
def scan_flip_iterator(file):
"""
This function is called each time a scan is flipped. Naming and saving is done here.
:param file: filename of scan
"""
prefix = file.split("_")[0] #get the scan prefix IE '1932L'
name_without_filetype = file.split(".nii")[0] #everything before the extension (.nii)
newname = name_without_filetype+"_flipped.nii"
label_name = prefix+"_50um_segmentation_IE-label_flipped.nii" #labels corresponding to scans need this naming convention following prefix
im = sitk.ReadImage(scan_directrory+"/"+file)
lab = sitk.ReadImage(save_dir_labels+'/'+label_name)
flipped_im = flip3dscan(im,lab) #flip the image with respect to its already flipped label
sitk.WriteImage(flipped_im,save_dir_scan+"/"+newname) #scans are saved with _flipped appended to their original names
dir=os.listdir(label_directrory)
for i in range(0,len(dir)): #iterate through the directory of labels
label_flip_iterator(dir[i])
dir=os.listdir(scan_directrory)
for i in range(0,len(dir)): #iterate through the directory of raw scans
scan_flip_iterator(dir[i])
|
kylerioux/python_ML_scripts
|
3d_image_preprocessing/flip_scans_and_labels.py
|
flip_scans_and_labels.py
|
py
| 4,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.ceil",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "SimpleITK.AffineTransform",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Resample",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkNearestNeighbor",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.AffineTransform",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkBSpline",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.Resample",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "SimpleITK.ReadImage",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "SimpleITK.ReadImage",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "SimpleITK.ReadImage",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 112,
"usage_type": "call"
}
] |
73083659707
|
import numpy as np
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
def has_converged(centers, new_centers):
return set([tuple(a) for a in centers]) == set([tuple(a) for a in new_centers])
def kmeans(X, K):
# centroids = X[np.random.choice(X.shape[0], K, replace=False)]
centroids = np.array([[1.0, 1.0], [5.0, 7.0]])
it = 0
while True:
it += 1
D = cdist(X, centroids)
labels = np.argmin(D, axis=1)
new_centroids = np.zeros((K, X.shape[1]))
for k in range(K):
new_centroids[k, :] = np.mean(X[labels == k, :], axis=0)
display(X, K, labels)
plt.show()
if has_converged(centroids, new_centroids):
break
centroids = new_centroids
return labels, centroids
def display(X, K, labels):
for i in range(K):
X0 = X[labels == i, :]
plt.plot(X0[:, 0], X0[:, 1], '.')
def error(X, K, labels):
sum = 0
for i in range(K):
X0 = X[labels == i, :]
sum += np.std(X0)
print(sum / K)
def random_data():
for i in range(6):
mean = 200 * np.random.random_sample((1, 2))
X0 = np.random.multivariate_normal(mean[0], [[10, 0], [0, 10]], np.random.randint(20, 50))
if i == 0:
X = X0
else:
X = np.concatenate((X, X0))
return X
from sklearn.cluster import KMeans
A = np.array([[1.0, 1.5, 3.0, 5.0, 3.5, 4.5, 3.5]])
B = np.array([[1.0, 2.0, 4.0, 7.0, 5.0, 5.0, 4.5]])
X = np.append(A.T, B.T, axis=1)
# X = random_data()
for K in range(2, 10):
(labels, centroids) = kmeans(X, K)
display(X, K, labels)
plt.show()
error(X, K, labels)
cls = KMeans(n_clusters=K, random_state=0)
cls.fit(X)
lbl = cls.labels_
display(X, K, lbl)
plt.show()
|
cuongdd2/cs582
|
lab6/prob4.py
|
prob4.py
|
py
| 1,807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.cdist",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.std",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random.random_sample",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.multivariate_normal",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
}
] |
74732381948
|
import numpy as np
import tensorflow as tf
import cv2
def colormap_jet(img):
color_image = cv2.applyColorMap(np.uint8(img), cv2.COLORMAP_JET)
return color_image
def color_disparity(disparity):
with tf.variable_scope('color_disparity'):
batch_size = disparity.shape[0]
color_maps = []
for i in range(batch_size):
color_disp = tf.py_func(colormap_jet, [-disparity[i]], tf.uint8)
color_maps.append(color_disp)
color_batch = tf.stack(color_maps, axis=0)
return color_batch
def count_text_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
|
fabiotosi92/monoResMatch-Tensorflow
|
utils.py
|
utils.py
|
py
| 677 |
python
|
en
|
code
| 117 |
github-code
|
6
|
[
{
"api_name": "cv2.applyColorMap",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.COLORMAP_JET",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.py_func",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tensorflow.uint8",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.stack",
"line_number": 18,
"usage_type": "call"
}
] |
14098998919
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===================================
Timer --- Create a timer decorator.
===================================
Largely this module was simply practice on writing decorators.
Might need to review logging best practices. I don't want the logger from
this module to emit anything, but it seems tedious to place that burden
on any module that imports from here.
.. seealso::
:mod:`cProfile`
:mod:`pstats`
:mod:`timeit`
:magic:`timeit`
"""
import datetime
import functools
import logging
from os import scandir
from runpy import run_path
import time
from timeit import Timer
from IPython.core.getipython import get_ipython
# noinspection PyProtectedMember
from IPython.core.magics.execution import _format_time as format_delta
logging.basicConfig(level=logging.INFO)
def timer(func):
"""Print the runtime of the decorated function.
Utilizes `time.perf_counter`.
.. todo:: Begin using the :mod:`timeit` module.
There are more specialized ways of profiling things in
other modules; however, this works for a rough estimate.
Parameters
----------
func : function
Function to profile
Returns
-------
value : float
Output of function :func:`time.perf_counter()`.
"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
logging.info(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
# class ModuleTimer()
# I mean while we're practicing decorators throw this in the mix
def debug(func):
"""Print the function signature and return value"""
@functools.wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args] # 1
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] # 2
signature = ", ".join(args_repr + kwargs_repr) # 3
print(f"Calling {func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"{func.__name__!r} returned {value!r}") # 4
return value
return wrapper_debug
def exc_timer(statement, setup=None):
"""A non-decorator implementation that uses `timeit`."""
t = Timer(stmt=statement, setup=setup) # outside the try/except
try:
return t.timeit()
except Exception: # noqa E722
t.print_exc()
class ArgReparser:
"""Class decorator that echoes out the arguments a function was called with."""
def __init__(self, func):
"""Initialize the reparser with the function it wraps."""
self.func = func
def __call__(self, *args, **kwargs):
print("entering function " + self.func.__name__)
i = 0
for arg in args:
print("arg {0}: {1}".format(i, arg))
i = i + 1
return self.func(*args, **kwargs)
def time_dir(directory=None):
"""How long does it take to exec(compile(file)) every file in the startup dir?"""
if directory is None:
directory = get_ipython().startup_dir
result = []
for i in scandir("."):
if i.name.endswith(".py"):
file = i.name
print(file)
print(time.time())
start_time = time.time()
exec(compile(open(file).read(), "timer", "exec"))
end = time.time()
diff = end - start_time
print(f"{diff}")
result.append((file, diff))
return result
class LineWatcher:
"""Class that implements a basic timer.
Registers the `start` and `stop` methods with the IPython events API.
"""
def __init__(self):
"""Define the classes start_time parameter."""
self.start_time = self.start()
def start(self):
"""Return `time.time`."""
return time.time()
def __repr__(self):
return f"{self.__class__.__name__} {self.start_time}"
def stop(self):
"""Determine the difference between start time and end time."""
stop_time = time.time()
diff = abs(stop_time - self.start_time)
print("time: {}".format(format_delta(diff)))
return diff
def load_ipython_extension(ip=None, line_watcher=None):
"""Initialize a `LineWatcher` and register start and stop with IPython."""
if ip is None:
ip = get_ipython()
if ip is None:
return
if line_watcher is None:
line_watcher = LineWatcher()
ip.events.register("pre_run_cell", line_watcher.start)
ip.events.register("post_run_cell", line_watcher.stop)
def unload_ipython_extension(ip=None, line_watcher=None):
if ip is None:
ip = get_ipython()
if ip is None:
return
if line_watcher is None:
line_watcher = LineWatcher()
ip.events.unregister("pre_run_cell", line_watcher.start)
ip.events.unregister("post_run_cell", line_watcher.stop)
|
farisachugthai/dynamic_ipython
|
default_profile/util/timer.py
|
timer.py
|
py
| 5,023 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "time.perf_counter",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "timeit.Timer",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "IPython.core.getipython.get_ipython",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "IPython.core.magics.execution._format_time",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "IPython.core.getipython.get_ipython",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "IPython.core.getipython.get_ipython",
"line_number": 177,
"usage_type": "call"
}
] |
26436839942
|
from PIL import Image
imgx = 512
imgy = 512
image = Image.new("RGB",(imgx,imgy))
for x in range(imgx):
for y in range(imgy):
if ((x//64)%2 == 1) or ((x//64)%2 == 2) and (y//64)%2 == 1 or ((y//64)%2 == 2):
image.putpixel ((x,y), (0,0,0) )
else:
image.putpixel ((x,y), (250,0,0) )
image.save("demo_image.png", "PNG")
|
gbroady19/CS550
|
intropil.py
|
intropil.py
|
py
| 334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.new",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 6,
"usage_type": "name"
}
] |
13563263971
|
import requests
import time
import json
from hoshino import aiorequests
apiroot = 'https://help.tencentbot.top'
async def getprofile(viewer_id: int, interval: int = 1, full: bool = False) -> dict:
reqid = json.loads(await aiorequests.get(f'{apiroot}/enqueue?full={full}&target_viewer_id={viewer_id}').content.decode('utf8'))['reqeust_id']
if reqid is None:
return "id err"
while True:
query = json.loads(await aiorequests.get(f'{apiroot}/query?request_id={reqid}').content.decode('utf8'))
status = query['status']
if status == 'done':
return query['data']
elif status == 'queue':
time.sleep(interval)
else: # notfound or else
return "queue"
async def queryarena(defs: list, page: int) -> dict:
return json.loads(await aiorequests.get(f'{apiroot}/arena?def={",".join([str(x) for x in defs])}&page={page}').content.decode('utf8'))
|
pcrbot/arena_query_push
|
queryapi.py
|
queryapi.py
|
py
| 933 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hoshino.aiorequests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hoshino.aiorequests",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "hoshino.aiorequests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "hoshino.aiorequests",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "hoshino.aiorequests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "hoshino.aiorequests",
"line_number": 25,
"usage_type": "name"
}
] |
12646834769
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
df = pd.read_csv('dividedsamples/training.csv')
dfval = pd.read_csv('dividedsamples/testing.csv')
train_features = df.copy()
test_features = dfval.copy()
train_labels = train_features.pop('price')
test_labels = test_features.pop('price')
regressor = LinearRegression()
regressor.fit(train_features, train_labels)
coeff_df = pd.DataFrame(regressor.coef_, train_features.columns, columns=['Coefficient'])
print(coeff_df)
y_pred = regressor.predict(test_features)
boi = pd.DataFrame({'Actual': test_labels, 'Predicted': y_pred})
print(boi)
|
WayneFerrao/autofocus
|
linreg.py
|
linreg.py
|
py
| 717 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
}
] |
40527670685
|
from django import forms
from django.forms import TextInput, SplitDateTimeWidget
class NumberInput(TextInput):
"""
HTML5 Number input
Left for backwards compatibility
"""
input_type = 'number'
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'form-control datepicker',
'size': '10', 'type': 'date'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'form-control timepicker',
'size': '8', 'type': 'time'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class LteAdminSplitDateTime (forms.SplitDateTimeWidget):
#template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
|
ricardochaves/django-adminlte
|
adminlte/widgets.py
|
widgets.py
|
py
| 1,631 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.forms.TextInput",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.DateInput",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.forms.Media",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.forms.TimeInput",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.forms.Media",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.forms.SplitDateTimeWidget",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.forms.MultiWidget.__init__",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.forms.MultiWidget",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 54,
"usage_type": "name"
}
] |
4041441314
|
__author__ = 'yueli'
import numpy as np
import matplotlib.pyplot as plt
from config.config import *
mrList = np.linspace(1, 13, 13)
negativeList = [-1, -1, -10, -10, -1, -1, -1, -10 ,-1, -1, -1, -10, -10]
noMapReplyList = np.linspace(0, 0, 13)
rlocSet1 = [-10, 1, 1, 1, -10, -10, 1, 1, 1, -10, 1, 1, 1]
rlocSet2 = [-10, 2, 2, 2, 2, 2, 2, 2, 2, -10, 2, 2, 2]
rlocSet3 = [-10, -10, 3, 3, -10, -10, -10, 3, -10, -10, -10, 3, 3]
plt.xlim(0.5, 13.5)
plt.ylim(-1.5, 3.5)
plt.scatter(mrList,negativeList, color = 'blue')
plt.scatter(mrList,noMapReplyList, color = 'yellow')
plt.scatter(mrList,rlocSet1, color = 'purple')
plt.scatter(mrList,rlocSet2, color = 'red')
plt.scatter(mrList,rlocSet3, color = 'green')
plt.xlabel("13 different Map Resolvers")
plt.ylabel("Responses from MRs")
plt.title("Responses from 13 MRs for EID-153.16.49.112 at liege(by MR)")
plt.xticks(mrList, ['MR1', 'MR2', 'MR3', 'MR4', 'MR5', 'MR6', 'MR7', 'MR8', 'MR9', 'MR10', 'MR11', 'MR12', 'MR13', 'MR14', 'MR15'])
plt.yticks([-1, 0, 1, 2, 3], ['Negative Reply', 'No Map Reply', '82.121.231.67', '192.168.1.66', '132.227.85.231'])
# plt.savefig(
# os.path.join(PLOT_DIR, 'Plot_variable_MR', 'Plot_variable_MR.eps'),
# dpi=300,
# transparent=True
# )
plt.show()
|
hansomesong/TracesAnalyzer
|
Plot/Plot_variable_MR/Plot_variable_MR.py
|
Plot_variable_MR.py
|
py
| 1,248 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.linspace",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
}
] |
14400394656
|
#!/usr/bin/env python3.8
def missing_element(arr1, arr2):
arr1.sort()
arr2.sort()
for num1, num2 in zip(arr1, arr2):
if num1 != num2:
return num1
return -1
def missing_element1(arr1, arr2):
count = {}
output = []
for i in arr1:
if i in count:
count[i] += 1
else:
count[i] = 1
for i in arr2:
if i in count:
count[i] -= 1
else:
count[i] = -1
for k in count:
if count[k] > 0:
output.append(k)
return output
import collections
def missing_element2(arr1, arr2):
count = collections.defaultdict(int)
output = []
for i in arr2:
count[i] += 1
for i in arr1:
if count[i] == 0:
output.append(i)
else:
count[i] -= 1
return output
def missing_element3(arr1, arr2):
return sum(arr1) - sum(arr2)
def missing_element4(arr1, arr2):
result = 0
for num in arr1+arr2:
result ^= num
return result
arr1 = [5,5,7,7]
arr2 = [5,7,7]
print(missing_element4(arr1,arr2))
print( ord("A")^ord("A"))
|
dnootana/Python
|
concepts/arrays/find_missing_element.py
|
find_missing_element.py
|
py
| 950 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 36,
"usage_type": "call"
}
] |
32644908087
|
"""Guide Eye 01 module"""
from functools import partial
from mgear.shifter.component import guide
from mgear.core import transform, pyqt
from mgear.vendor.Qt import QtWidgets, QtCore
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQDockWidget
from . import settingsUI as sui
# guide info
AUTHOR = "Jeremie Passerin, Miquel Campos"
URL = ", www.miquletd.com"
EMAIL = ", "
VERSION = [1, 0, 0]
TYPE = "eye_01"
NAME = "eye"
DESCRIPTION = "eye control rig"
##########################################################
# CLASS
##########################################################
class Guide(guide.ComponentGuide):
"""Component Guide Class"""
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
def postInit(self):
"""Initialize the position for the guide"""
self.save_transform = ["root", "look"]
def addObjects(self):
"""Add the Guide Root, blade and locators"""
# eye guide
self.root = self.addRoot()
vTemp = transform.getOffsetPosition(self.root, [0, 0, 1])
self.look = self.addLoc("look", self.root, vTemp)
centers = [self.root, self.look]
self.dispcrv = self.addDispCurve("crv", centers)
def addParameters(self):
"""Add the configurations settings"""
self.pUpVDir = self.addEnumParam(
"upVectorDirection", ["X", "Y", "Z"], 1)
self.pIkRefArray = self.addParam("ikrefarray", "string", "")
self.pUseIndex = self.addParam("useIndex", "bool", False)
self.pParentJointIndex = self.addParam(
"parentJointIndex", "long", -1, None, None)
##########################################################
# Setting Page
##########################################################
class settingsTab(QtWidgets.QDialog, sui.Ui_Form):
"""The Component settings UI"""
def __init__(self, parent=None):
super(settingsTab, self).__init__(parent)
self.setupUi(self)
class componentSettings(MayaQWidgetDockableMixin, guide.componentMainSettings):
"""Create the component setting window"""
def __init__(self, parent=None):
self.toolName = TYPE
# Delete old instances of the componet settings window.
pyqt.deleteInstances(self, MayaQDockWidget)
super(self.__class__, self).__init__(parent=parent)
self.settingsTab = settingsTab()
self.setup_componentSettingWindow()
self.create_componentControls()
self.populate_componentControls()
self.create_componentLayout()
self.create_componentConnections()
def setup_componentSettingWindow(self):
self.mayaMainWindow = pyqt.maya_main_window()
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(TYPE)
self.resize(350, 350)
def create_componentControls(self):
return
def populate_componentControls(self):
"""Populate Controls
Populate the controls values from the custom attributes of the
component.
"""
# populate tab
self.tabs.insertTab(1, self.settingsTab, "Component Settings")
# populate component settings
self.settingsTab.upVectorDirection_comboBox.setCurrentIndex(
self.root.attr("upVectorDirection").get())
ikRefArrayItems = self.root.attr("ikrefarray").get().split(",")
for item in ikRefArrayItems:
self.settingsTab.ikRefArray_listWidget.addItem(item)
def create_componentLayout(self):
self.settings_layout = QtWidgets.QVBoxLayout()
self.settings_layout.addWidget(self.tabs)
self.settings_layout.addWidget(self.close_button)
self.setLayout(self.settings_layout)
def create_componentConnections(self):
cBox = self.settingsTab.upVectorDirection_comboBox
cBox.currentIndexChanged.connect(
partial(self.updateComboBox,
self.settingsTab.upVectorDirection_comboBox,
"upVectorDirection"))
self.settingsTab.ikRefArrayAdd_pushButton.clicked.connect(
partial(self.addItem2listWidget,
self.settingsTab.ikRefArray_listWidget,
"ikrefarray"))
self.settingsTab.ikRefArrayRemove_pushButton.clicked.connect(
partial(self.removeSelectedFromListWidget,
self.settingsTab.ikRefArray_listWidget,
"ikrefarray"))
self.settingsTab.ikRefArray_listWidget.installEventFilter(self)
def eventFilter(self, sender, event):
if event.type() == QtCore.QEvent.ChildRemoved:
if sender == self.settingsTab.ikRefArray_listWidget:
self.updateListAttr(sender, "ikrefarray")
return True
else:
return QtWidgets.QDialog.eventFilter(self, sender, event)
def dockCloseEventTriggered(self):
pyqt.deleteInstances(self, MayaQDockWidget)
|
mgear-dev/mgear4
|
release/scripts/mgear/shifter_classic_components/eye_01/guide.py
|
guide.py
|
py
| 5,095 |
python
|
en
|
code
| 209 |
github-code
|
6
|
[
{
"api_name": "mgear.shifter.component.guide.ComponentGuide",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "mgear.shifter.component.guide",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "mgear.core.transform.getOffsetPosition",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "mgear.core.transform",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets.QDialog",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "maya.app.general.mayaMixin.MayaQWidgetDockableMixin",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "mgear.shifter.component.guide.componentMainSettings",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "mgear.shifter.component.guide",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "mgear.core.pyqt.deleteInstances",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "maya.app.general.mayaMixin.MayaQDockWidget",
"line_number": 86,
"usage_type": "argument"
},
{
"api_name": "mgear.core.pyqt",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "mgear.core.pyqt.maya_main_window",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "mgear.core.pyqt",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "mgear.vendor.Qt.QtCore.Qt",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "mgear.vendor.Qt.QtCore",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets.QVBoxLayout",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "mgear.vendor.Qt.QtCore.QEvent",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "mgear.vendor.Qt.QtCore",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets.QDialog.eventFilter",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets.QDialog",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "mgear.vendor.Qt.QtWidgets",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "mgear.core.pyqt.deleteInstances",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "maya.app.general.mayaMixin.MayaQDockWidget",
"line_number": 163,
"usage_type": "argument"
},
{
"api_name": "mgear.core.pyqt",
"line_number": 163,
"usage_type": "name"
}
] |
3344378919
|
import logging
import sys
from loguru import logger
from starlette.config import Config
from starlette.datastructures import Secret
from app.core.logger import InterceptHandler
config = Config(".env")
API_PREFIX = "/api"
VERSION = "0.1.0"
DEBUG: bool = config("DEBUG", cast=bool, default=False)
MAX_CONNECTIONS_COUNT: int = config("MAX_CONNECTIONS_COUNT", cast=int, default=10)
MIN_CONNECTIONS_COUNT: int = config("MIN_CONNECTIONS_COUNT", cast=int, default=10)
HOST: str = config("HOST", cast=str, default="0.0.0.0")
PORT: int = config("PORT", cast=int, default=35100)
SECRET_KEY: Secret = config("SECRET_KEY", cast=Secret, default="")
PROJECT_NAME: str = config("PROJECT_NAME", default="augmentation")
# logging configuration
LOGGING_LEVEL = logging.DEBUG if DEBUG else logging.INFO
logging.basicConfig(
handlers=[InterceptHandler(level=LOGGING_LEVEL)], level=LOGGING_LEVEL
)
logger.configure(handlers=[{"sink": sys.stderr, "level": LOGGING_LEVEL}])
FASTTEXT_PATH = config("FASTTEXT_PATH", default="./model/cc.vi.300.vec")
PHOBERT_PATH = config("PHOBERT_PATH", default="./model/PhoBERT_base_fairseq")
STOPWORD_PATH = config("STOPWORD_PATH", default="./data/vietnamese-stopwords.txt")
IRRELEVANT_WORD_PATH = config("IRRELEVANT_WORD_PATH", default="./data/irrelevant_words.txt")
EDIT_DISTANCE_PATH = config("EDIT_DISTANCE_PATH", default="./data/edit_distance.txt")
MAX_CACHE_SIZE = config("MAX_CACHE_SIZE", cast=int, default=1000)
PHO_NLP_URL = config("PHO_NLP_URL", default="http://172.29.13.23:20217/")
VN_CORE_PATH = config("VN_CORE_PATH", default="http://172.29.13.23")
VN_CORE_PORT = config("VN_CORE_PORT", cast=int, default=20215)
|
hieunt2501/text-augmentation
|
app/core/config.py
|
config.py
|
py
| 1,648 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "starlette.config.Config",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "starlette.datastructures.Secret",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "logging.DEBUG",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "app.core.logger.InterceptHandler",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "loguru.logger.configure",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 29,
"usage_type": "attribute"
}
] |
14712079581
|
from typing import List, Optional
from fastapi import APIRouter, Header
from fastapi.exceptions import HTTPException
from server.models.subscription import (
ExchangeKlineSubscriptionRequest,
ExchangeSubscription,
ExchangeSubscriptionType,
)
router = APIRouter()
@router.get("/")
async def list(x_connection_id: str = Header()) -> List[ExchangeSubscription]:
return await ExchangeSubscription.find(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.connection == x_connection_id,
).to_list()
@router.get("/{symbol:path}/interval/{interval:path}/")
async def retrieve(
symbol: str,
interval: str,
x_connection_id: str = Header(),
) -> Optional[ExchangeSubscription]:
try:
return await ExchangeSubscription.find_one(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.symbol == symbol,
ExchangeSubscription.interval == interval,
ExchangeSubscription.connection == x_connection_id,
)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/{symbol:path}/")
async def list_symbol(
symbol: str,
x_connection_id: str = Header(),
) -> List[ExchangeSubscription]:
try:
return await ExchangeSubscription.find(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.symbol == symbol,
ExchangeSubscription.connection == x_connection_id,
).to_list()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post("/")
async def create(
request: ExchangeKlineSubscriptionRequest, x_connection_id: str = Header()
) -> ExchangeSubscription:
try:
existing_subscription = await ExchangeSubscription.find_one(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.symbol == request.symbol,
ExchangeSubscription.interval == request.interval,
ExchangeSubscription.connection == x_connection_id,
)
if existing_subscription:
return existing_subscription
subscription = ExchangeSubscription(
type=ExchangeSubscriptionType.KLINE,
interval=request.interval,
connection=x_connection_id,
symbol=request.symbol,
)
return await subscription.create()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.delete("/")
async def destroy(
request: ExchangeKlineSubscriptionRequest, x_connection_id: str = Header()
):
try:
item = await ExchangeSubscription.find_one(
ExchangeSubscription.type == ExchangeSubscriptionType.KLINE,
ExchangeSubscription.interval == request.interval,
ExchangeSubscription.connection == x_connection_id,
ExchangeSubscription.symbol == request.symbol,
)
if not item:
raise HTTPException(status_code=400, detail="subscription not found")
await ExchangeSubscription.delete(item)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
|
masked-trader/raccoon-exchange-service
|
src/server/routes/subscription/kline.py
|
kline.py
|
py
| 3,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "fastapi.Header",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.find",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.type",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType.KLINE",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.connection",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "fastapi.Header",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.find_one",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.type",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType.KLINE",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.symbol",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.interval",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.connection",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "fastapi.exceptions.HTTPException",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "fastapi.Header",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.find",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.type",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType.KLINE",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.symbol",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.connection",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "fastapi.exceptions.HTTPException",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeKlineSubscriptionRequest",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "fastapi.Header",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.find_one",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.type",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType.KLINE",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.symbol",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.interval",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.connection",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType.KLINE",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "fastapi.exceptions.HTTPException",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeKlineSubscriptionRequest",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "fastapi.Header",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.find_one",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.type",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType.KLINE",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscriptionType",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.interval",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.connection",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.symbol",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "fastapi.exceptions.HTTPException",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription.delete",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "server.models.subscription.ExchangeSubscription",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "fastapi.exceptions.HTTPException",
"line_number": 103,
"usage_type": "call"
}
] |
74866931066
|
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import os
class HobsHeader(object):
sim_head = '"SIMULATED EQUIVALENT"'
obs_head = '"OBSERVED VALUE"'
obs_name = '"OBSERVATION NAME"'
date = 'DATE'
dyear = 'DECIMAL_YEAR'
header = {sim_head: None,
obs_head: None,
obs_name: None,
date: None,
dyear: None}
class HobsOut(dict):
"""
Reads output data from Hobs file and prepares it for post processing.
Class sets observations to an ordered dictionary based on observation name
If observation name is consistant for a site, a time series is created
for plotting!
Parameters
----------
filename : str
hobs filename
strip_after : str
flag to indicate a character to strip the hobs label after for
grouping wells.
Example: OBS_1
OBS_2
strip_after could be set to "_" and then all OBS observations will
be stored under the OBS key. This is extremely useful for plotting
and calculating statistics
"""
def __init__(self, filename, strip_after=""):
super(HobsOut, self).__init__()
self.name = filename
self._strip_after = strip_after
self._dataframe = None
self.__read_hobs_output()
def __read_hobs_output(self):
"""
Method to read a hobs output file. Dynamically sets header information
and reads associated values.
Sets values to HobsOut dictionary
"""
with open(self.name) as hobout:
for ix, line in enumerate(hobout):
if ix == 0:
self.__set_header(line)
else:
self.__set_dictionary_values(line)
def __set_dictionary_values(self, line):
"""
Method to set incoming hobs line to dictionary data values
Args:
line: (str)
"""
t = line.strip().split()
obsname = t[HobsHeader.header[HobsHeader.obs_name]]
dict_name = obsname
if self._strip_after:
dict_name = obsname.split(self._strip_after)[0]
simval = float(t[HobsHeader.header[HobsHeader.sim_head]])
obsval = float(t[HobsHeader.header[HobsHeader.obs_head]])
residual = simval - obsval
date = self.__set_datetime_object(t[HobsHeader.header[HobsHeader.date]])
decimal_date = float(t[HobsHeader.header[HobsHeader.dyear]])
if dict_name in self:
self[dict_name]['simval'].append(simval)
self[dict_name]['obsval'].append(obsval)
self[dict_name]['date'].append(date)
self[dict_name]['decimal_date'].append(decimal_date)
self[dict_name]['residual'].append(residual)
self[dict_name]["obsname"].append(obsname)
else:
self[dict_name] = {"obsname": [obsname], "date": [date],
"decimal_date": [decimal_date],
"simval": [simval], "obsval": [obsval],
"residual": [residual]}
def __set_header(self, line):
"""
Reads header line and sets header index
Parameters
----------
line : str
first line of the HOB file
"""
n = 0
s = ""
for i in line:
s += i
if s in HobsHeader.header:
HobsHeader.header[s] = n
n += 1
s = ""
elif s in (" ", "\t", "\n"):
s = ""
else:
pass
for key, value in HobsHeader.header.items():
if value is None:
raise AssertionError("HobsHeader headings must be updated")
def __set_datetime_object(self, s):
"""
Reformats a string of YYYY-mm-dd to a datetime object
Parameters
----------
s : str
string of YYYY-mm-dd
Returns
-------
datetime.date
"""
return dt.datetime.strptime(s, "%Y-%m-%d")
def __get_date_string(self, date):
"""
Parmaeters
----------
date: datetime.datetime object
Returns
-------
string
"""
return date.strftime("%Y/%m/%d")
@property
def obsnames(self):
"""
Return a list of obsnames from the HobsOut dictionary
"""
return self.keys()
def to_dataframe(self):
"""
Method to get a pandas dataframe object of the
HOBs data.
Returns
-------
pd.DataFrame
"""
import pandas as pd
if self._dataframe is None:
df = None
for hobsname, d in self.items():
t = pd.DataFrame(d)
if df is None:
df = t
else:
df = pd.concat([df, t], ignore_index=True)
self._dataframe = df
return self._dataframe
def get_sum_squared_errors(self, obsname):
"""
Returns the sum of squared errors from the residual
Parameters
----------
obsname : str
observation name
Returns
-------
float: sum of square error
"""
return sum([i**2 for i in self[obsname]['residual']])
def get_rmse(self, obsname):
"""
Returns the RMSE from the residual
Parameters
----------
obsname : str
observation name
Returns
-------
float: rmse
"""
return np.sqrt(np.mean([i**2 for i in self[obsname]['residual']]))
def get_number_observations(self, obsname):
"""
Returns the number of observations for an obsname
Parameters
----------
obsname : str
observation name
Returns
-------
int
"""
return len(self[obsname]['simval'])
def get_maximum_residual(self, obsname):
"""
Returns the datetime.date and maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
index = data.index(max(data))
date = self[obsname]['date'][index]
return date, max(data)
def get_minimum_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
index = data.index(min(data))
date = self[obsname]['date'][index]
return date, min(data)
def get_mean_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
return np.mean(data)
def get_median_residual(self, obsname):
"""
Returns the datetime.date, minimum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, residual)
"""
data = self[obsname]['residual']
return np.median(data)
def get_maximum_residual_heads(self, obsname):
"""
Returns the datetime.date, simulated, and observed
heads at the maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, simulated head, observed head)
"""
resid = self[obsname]['residual']
index = resid.index(max(resid))
observed = self[obsname]['obsval'][index]
simulated = self[obsname]['simval'][index]
date = self[obsname]['date'][index]
return date, simulated, observed
def get_minimum_residual_heads(self, obsname):
"""
Returns the datetime.date, simulated, and observed
heads at the maximum residual value
Parameters
----------
obsname : str
observation name
Returns
-------
tuple: (datetime.date, simulated head, observed head)
"""
resid = self[obsname]['residual']
index = resid.index(min(resid))
observed = self[obsname]['obsval'][index]
simulated = self[obsname]['simval'][index]
date = self[obsname]['date'][index]
return date, simulated, observed
def get_residual_bias(self, filter=None):
"""
Method to determine the bias of measurements +-
by checking the residual. Returns fraction of residuals
> 0.
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false to use
Returns
-------
(float) fraction of residuals greater than zero
"""
nobs = 0.
ngreaterzero = 0.
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
residual = np.array(meta_data['residual'])
rgreaterzero = sum((residual > 0))
nobs += residual.size
ngreaterzero += rgreaterzero
try:
bias = ngreaterzero / nobs
except ZeroDivisionError:
raise ZeroDivisionError("No observations found!")
return bias
def write_dbf(self, dbfname, filter=None):
"""
Method to write a dbf file from a the HOBS dictionary
Parameters
----------
dbfname : str
dbf file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
import shapefile
data = []
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
for ix, val in enumerate(meta_data['simval']):
data.append([obsname,
self.__get_date_string(meta_data['date'][ix]),
val,
meta_data['obsval'][ix],
meta_data['residual'][ix]])
try:
# traps for pyshp 1 vs. pyshp 2
w = shapefile.Writer(dbf=dbfname)
except Exception:
w = shapefile.Writer()
w.field("HOBSNAME", fieldType="C")
w.field("HobsDate", fieldType="D")
w.field("HeadSim", fieldType='N', decimal=8)
w.field("HeadObs", fieldType="N", decimal=8)
w.field("Residual", fieldType="N", decimal=8)
for rec in data:
w.record(*rec)
try:
w.save(dbf=dbfname)
except AttributeError:
w.close()
def write_min_max_residual_dbf(self, dbfname, filter=None):
"""
Method to write a dbf of transient observations
using observation statistics
Parameters
----------
dbfname : str
dbf file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
import shapefile
data = []
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
max_date, resid_max = self.get_maximum_residual(obsname)
min_date, resid_min = self.get_minimum_residual(obsname)
simval_max, obsval_max = self.get_maximum_residual_heads(obsname)[1:]
simval_min, obsval_min = self.get_minimum_residual_heads(obsname)[1:]
data.append([obsname,
self.get_number_observations(obsname),
self.__get_date_string(max_date), resid_max,
self.__get_date_string(min_date), resid_min,
simval_max, obsval_max, simval_min, obsval_min])
try:
# traps for pyshp 1 vs. pyshp 2
w = shapefile.Writer(dbf=dbfname)
except Exception:
w = shapefile.Writer()
w.field("HOBSNAME", fieldType="C")
w.field("FREQUENCY", fieldType="N")
w.field("MaxDate", fieldType="C")
w.field("MaxResid", fieldType='N', decimal=8)
w.field("MinDate", fieldType="C", decimal=8)
w.field("MinResid", fieldType="N", decimal=8)
w.field("MaxHeadSim", fieldType="N", decimal=8)
w.field("MaxHeadObs", fieldType="N", decimal=8)
w.field("MinHeadSim", fieldType="N", decimal=8)
w.field("MinHeadObs", fieldType="N", decimal=8)
for rec in data:
w.record(*rec)
try:
w.save(dbf=dbfname)
except AttributeError:
w.close()
def __filter(self, obsname, filter):
"""
Boolean filetering method, checks if observation name
is in the filter.
Parameters
----------
obsname : str
observation name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
Returns
-------
bool: True if obsname in filter
"""
if filter is None:
return False
elif isinstance(filter, list) or isinstance(filter, tuple):
if obsname in list:
return True
elif isinstance(filter, str):
if obsname == filter:
return True
elif callable(filter):
if filter(obsname):
return True
else:
raise Exception("Filter is not an appropriate type")
return False
def write_summary_statistics_csv(self, csvname, filter=None):
"""
Method to write summary calibration statistics to a
CSV file for analysis and reports
Parameters
----------
csvname : str
csv file name
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
"""
data = []
header = ["Well name", "Average", "Median",
"Minimum", "Maximum", "RMSE ft", "Frequency"]
for obsname, meta_data in sorted(self.items()):
if self.__filter(obsname, filter):
continue
resid_mean = self.get_mean_residual(obsname)
resid_median = self.get_median_residual(obsname)
resid_max = self.get_maximum_residual(obsname)[-1]
resid_min = self.get_minimum_residual(obsname)[-1]
rmse = self.get_rmse(obsname)
frequency = self.get_number_observations(obsname)
data.append((obsname, resid_mean, resid_median,
resid_min, resid_max, rmse, frequency))
data = np.array(data, dtype=[('id', 'O'), ('mean', float),
('med', float), ('min', float),
('max', float), ('rmse', float),
('num', np.int)])
with open(csvname, "w") as foo:
foo.write(",".join(header) + "\n")
np.savetxt(foo, data, fmt="%15s,%.2f,%2f,%2f,%2f,%2f,%d")
def plot(self, obsname, *args, **kwargs):
"""
Plotting functionality from the hobs dictionary
Parameters
----------
obsname: str
hobs package observation name
*args: matplotlib args
**kwargs: matplotlib kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
simulated = True
if "observed" in kwargs:
simulated = False
kwargs.pop('observed')
observed = True
if "simulated" in kwargs:
observed = False
kwargs.pop('simulated')
if obsname not in self:
raise AssertionError("Obsname {}: not valid".format(obsname))
axes = False
if 'ax' in kwargs:
ax = kwargs.pop('ax')
axes = True
if not axes:
ax = plt.subplot(111)
obsval = self[obsname]['obsval']
simval = self[obsname]['simval']
date = self[obsname]['date']
if observed:
kwargs['label'] = "Observed"
kwargs['color'] = 'r'
ax.plot(date, obsval, *args, **kwargs)
if simulated:
kwargs['label'] = "Simulated"
kwargs['color'] = 'b'
ax.plot(date, simval, *args, **kwargs)
return ax
def plot_measured_vs_simulated(self, filter=None, **kwargs):
"""
Plots measured vs. simulated data along a 1:1 profile.
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
**kwargs: matplotlib.pyplot plotting kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
axes = plt.subplot(111)
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
simulated = meta_data['simval']
observed = meta_data['obsval']
axes.plot(observed, simulated, 'bo', markeredgecolor='k')
return axes
def plot_simulated_vs_residual(self, filter=None,
histogram=False, **kwargs):
"""
Creates a matplotlib plot of simulated heads vs residual
Parameters
----------
filter: (str, list, tuple, or function)
filtering criteria for writing statistics.
Function must return True for filter out, false for write to file
histogram: (bool)
Boolean variable that defines either a scatter plot (False)
or a histogram (True) of residuals
**kwargs: matplotlib.pyplot plotting kwargs
Returns
-------
matplotlib.pyplot.axes object
"""
axes = plt.subplot(111)
if not histogram:
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
residual = meta_data['residual']
observed = meta_data['obsval']
axes.plot(observed, residual, 'bo', markeredgecolor="k")
else:
bins = np.arange(-25, 26, 5)
d = {}
for ix, abin in enumerate(bins):
frequency = 0
for obsname, meta_data in self.items():
if self.__filter(obsname, filter):
continue
for residual in meta_data['residual']:
if ix == 0:
if residual < abin:
frequency += 1
elif ix == (len(bins) - 1):
if residual > abin:
frequency += 1
else:
if bins[ix - 1] <= residual < abin:
frequency += 1
if ix == 0:
name = "Less than {}".format(abin)
elif ix == (len(bins) - 1):
name = "Greater than {}".format(abin)
else:
name = "{} to {}".format(bins[ix - 1] + 1, abin)
d[ix + 1] = {'name': name,
'frequency': frequency}
tick_num = []
tick_name = []
for index, meta_data in sorted(d.items()):
axes.bar(index, meta_data['frequency'], width=0.8,
**kwargs)
tick_num.append(index)
tick_name.append(meta_data['name'])
plt.xticks(tick_num, tick_name, rotation=45, fontsize=10)
plt.xlim([0.5, len(tick_num) + 1])
plt.subplots_adjust(left=0.12, bottom=0.22,
right=0.90, top=0.90,
wspace=0.20, hspace=0.20)
plt.ylabel("Frequency")
return axes
if __name__ == "__main__":
ws = r'C:\Users\jlarsen\Desktop\Lucerne\Lucerne_OWHM\V0_initial_from_MODOPTIM\output'
hobs_name = "hobs.out"
tmp = HobsOut(os.path.join(ws, hobs_name))
tmp.plot("04N01W01R04S", "o-")
plt.legend(loc=0, numpoints=1)
plt.show()
print('break')
|
jlarsen-usgs/HydrographTools
|
hobs_output.py
|
hobs_output.py
|
py
| 22,180 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.strptime",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "shapefile.Writer",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "shapefile.Writer",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "shapefile.Writer",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "shapefile.Writer",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 556,
"usage_type": "attribute"
},
{
"api_name": "numpy.savetxt",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 597,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 633,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 668,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 668,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 725,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 726,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 729,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 738,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 738,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 740,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 740,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 741,
"usage_type": "name"
}
] |
18660136090
|
import os
import sys
try:
from dreamberd import interprete
except ModuleNotFoundError:
sys.exit("Use -m keyword.")
from argparse import ArgumentParser
parser = ArgumentParser(
prog="DreamBerd Interpreter (Python)",
description="The perfect programming language.",
)
parser.add_argument("content", help="The file or code to run.")
args = parser.parse_args()
if os.path.exists(args.content):
with open(args.content, "r", encoding="utf-8") as file:
content: str = file.read()
else:
content = args.content
interprete(content)
|
AWeirdScratcher/dreamberd-interpreter
|
dreamberd/__main__.py
|
__main__.py
|
py
| 559 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.exit",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "dreamberd.interprete",
"line_number": 24,
"usage_type": "call"
}
] |
10233608865
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from twitchio import User, PartialUser, Chatter, PartialChatter, Channel, Clip
from .errors import BadArgument
if TYPE_CHECKING:
from .core import Context
__all__ = (
"convert_Chatter",
"convert_Clip",
"convert_Channel",
"convert_PartialChatter",
"convert_PartialUser",
"convert_User",
)
async def convert_Chatter(ctx: Context, arg: str) -> Chatter:
"""
Converts the argument into a chatter in the chat. If the chatter is not found, BadArgument is raised.
"""
arg = arg.lstrip("@")
resp = [x for x in filter(lambda c: c.name == arg, ctx.chatters or tuple())]
if not resp:
raise BadArgument(f"The user '{arg}' was not found in {ctx.channel.name}'s chat.")
return resp[0]
async def convert_PartialChatter(ctx: Context, arg: str) -> PartialChatter:
"""
Converts the argument into a chatter in the chat. As opposed to Chatter converter, this will return a PartialChatter regardless of the cache state.
"""
return PartialChatter(ctx._ws, name=arg.lstrip("@"), channel=ctx.channel, message=None)
async def convert_Clip(ctx: Context, arg: str) -> Clip:
finder = re.search(r"(https://clips.twitch.tv/)?(?P<slug>.*)", arg)
if not finder:
raise RuntimeError(
"regex failed to match"
) # this should never ever raise, but its here to make type checkers happy
slug = finder.group("slug")
clips = await ctx.bot.fetch_clips([slug])
if not clips:
raise BadArgument(f"Clip '{slug}' was not found")
return clips[0]
async def convert_User(ctx: Context, arg: str) -> User:
"""
Similar to convert_Chatter, but fetches from the twitch API instead,
returning a :class:`twitchio.User` instead of a :class:`twitchio.Chatter`.
To use this, you most have a valid client id and API token or client secret
"""
arg = arg.lstrip("@")
user = await ctx.bot.fetch_users(names=[arg])
if not user:
raise BadArgument(f"User '{arg}' was not found.")
return user[0]
async def convert_PartialUser(ctx: Context, arg: str) -> User:
"""
This is simply a shorthand to :ref:`~convert_User`, as fetching from the api will return a full user model
"""
return await convert_User(ctx, arg)
async def convert_Channel(ctx: Context, arg: str) -> Channel:
if arg not in ctx.bot._connection._cache:
raise BadArgument(f"Not connected to channel '{arg}'")
return ctx.bot.get_channel(arg)
_mapping = {
User: convert_User,
PartialUser: convert_PartialUser,
Channel: convert_Channel,
Chatter: convert_Chatter,
PartialChatter: convert_PartialChatter,
Clip: convert_Clip,
}
|
PythonistaGuild/TwitchIO
|
twitchio/ext/commands/builtin_converter.py
|
builtin_converter.py
|
py
| 2,755 |
python
|
en
|
code
| 714 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "core.Context",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "errors.BadArgument",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "twitchio.Chatter",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "core.Context",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "twitchio.PartialChatter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "twitchio.PartialChatter",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "core.Context",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "errors.BadArgument",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "twitchio.Clip",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "core.Context",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "errors.BadArgument",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "twitchio.User",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "core.Context",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "twitchio.User",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "core.Context",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "errors.BadArgument",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "twitchio.Channel",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "twitchio.User",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "twitchio.PartialUser",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "twitchio.Channel",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "twitchio.Chatter",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "twitchio.PartialChatter",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "twitchio.Clip",
"line_number": 89,
"usage_type": "name"
}
] |
27094902824
|
from typing import Any, Callable, Dict
from torchvision import transforms as T
from rikai.types.vision import Image
"""
Adapted from https://github.com/pytorch/pytorch.github.io/blob/site/assets/hub/pytorch_vision_resnet.ipynb
""" # noqa E501
def pre_processing(options: Dict[str, Any]) -> Callable:
"""
All pre-trained models expect input images normalized in the same way, i.e.
mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W
are expected to be at least 224. The images have to be loaded in to a range
of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std =
[0.229, 0.224, 0.225].
"""
return T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
def post_processing(options: Dict[str, Any]) -> Callable:
def post_process_func(batch):
results = []
for result in batch:
results.append(result.detach().cpu().tolist())
return results
return post_process_func
OUTPUT_SCHEMA = "array<float>"
|
World-shi/rikai
|
python/rikai/contrib/torchhub/pytorch/vision/resnet.py
|
resnet.py
|
py
| 1,166 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 30,
"usage_type": "name"
}
] |
3574016217
|
"""Python module for common workflows and library methods.
Authors: Prasad Hegde
"""
import os
import json
import pathlib
import inspect
import random
import string
class Workflows():
"""
Common Workflows and library methods
"""
def get_config_data(self, test_method):
"""
This routine retuns the config data specific to the test case
:param test_method: Name of the test method
:return: tuple containing global_config and test_args
"""
path = pathlib.Path(inspect.getfile(self.__class__)).parent.absolute()
config_path = os.path.join(path, "config.json")
with open(config_path) as f_in:
config_data = json.load(f_in)
return config_data["global_config"], config_data["test_args"][self.__class__.__name__]\
[test_method]
@staticmethod
def generate_new_email(length=16, suffix=None):
"""
This routine generates a new email id
:param length: Length of the email(int)
:param suffix: domain(str)
:return: email id (str)
"""
retval = ''.join(random.choice(string.ascii_lowercase + string.digits) \
for i in range(length))
return retval + suffix if suffix else retval
@staticmethod
def verify_response_header(expected_header, actual_header):
"""
This routine is used to validate expected response header against actual
:param expected_header: dict
:param actual_header: dict
:return: Boolean
"""
if not any(item in actual_header.items() for item in expected_header.items()):
return False
return True
@staticmethod
def verify_response_time(expected_response_time, actual_response_time):
"""
This routine is used to verify response time of api call
:param actual_response_time: sec
:return: Boolean
"""
if actual_response_time <= expected_response_time:
return True
return False
@staticmethod
def update_user_details(test_args, **kwargs):
"""
This Routine is used to build user details
:param test_args: test args of the test method
:param kwargs: first_name, last_name, dob, image_url, email_id
:return: user data (dict)
"""
first_name = kwargs.get('first_name', test_args["updated_user_details"]["first_name"])
last_name = kwargs.get('last_name', test_args["updated_user_details"]["last_name"])
dob = kwargs.get('dob', test_args["updated_user_details"]["dob"])
image_url = kwargs.get('image_url', test_args["updated_user_details"]["image_url"])
email = kwargs.get('email_id', None)
user_data = {"first_name": first_name, "last_name": last_name, "date_of_birth": dob,
"image_url": image_url}
if email:
user_data["email"] = email
return user_data
|
prasadhegde60/showoff.ie
|
workflows/workflows.py
|
workflows.py
|
py
| 3,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "inspect.getfile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 39,
"usage_type": "attribute"
}
] |
23515271400
|
# First Solution: 58476KB / 200ms / 674B
def BS(array,start,end):
while start<=end:
mid = (start+end)//2
if array[mid][1] == 1 and array[mid-1][1]==2: return mid
elif array[mid][1] == 2: start = mid+1
else: end = mid-1
return None
def Solution(data):
data = sorted(data.items(), key=lambda x:(-x[1],x[0]))
midpoint = BS(data,0,N+M-1)
if midpoint == None:
print(0)
else:
print(midpoint)
stdout.write('\n'.join(map(str,dict(data[:midpoint]).keys())))
from sys import stdin,stdout
from collections import Counter
N, M = map(int, stdin.readline().split())
data = Counter([stdin.readline().rstrip() for _ in range(N+M)])
Solution(data)
# ---------------------------------------------------------
# More Advanced Solution: 41884KB / 124ms / 272B
from sys import stdin,stdout
N, M = map(int, stdin.readline().split())
hear = set([stdin.readline().rstrip() for _ in range(N)])
see = set([stdin.readline().rstrip() for _ in range(M)])
common = sorted(list(hear & see))
print(len(common))
stdout.write('\n'.join(common))
|
Soohee410/Algorithm-in-Python
|
BOJ/Silver/1764.py
|
1764.py
|
py
| 1,094 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "sys.stdin.readline",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sys.stdin.readline",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sys.stdin.readline",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sys.stdin.readline",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "sys.stdout.write",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 39,
"usage_type": "name"
}
] |
38703775254
|
import json
def getAdminAccount():
with open("./Data/admins.json", "r") as file:
JSON = file.read()
accounts = json.loads(JSON)
return accounts
def getAccount():
with open("./Data/accounts.json", "r") as file:
JSON = file.read()
accounts = json.loads(JSON)
return accounts
|
Coincoin008/DrawPlz-localhost-version-
|
getAccounts.py
|
getAccounts.py
|
py
| 333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
}
] |
72067617789
|
import math as ma
import multiprocessing as mp
import time
def first_test(n):
""" test naïf de primalité
retourne True si l'entier est premier, et inversement
n : un entier naturel
"""
for a in range(2, int(ma.sqrt(n) + 1)):
if n % a == 0:
return False
return True
def pi(x):
"""retourne le nombre de premiers inférieurs à x
X : un réel
"""
cpt = 0
for n in range(1, int(x)):
if first_test(n):
cpt += 1
return cpt
def gen_carmichael(t):
"""retourne tous les nombres de Carmichael inférieurs à x
t : un réel
"""
res = []
for x in range(3, int(t), 2): # les nombres de Carmichael sont impairs
valid = False
for y in range(2, x):
if ma.gcd(x, y) == 1:
if pow(y, x-1, x) != 1:
valid = False
break
else:
valid = True
if valid:
res.append(x)
return res
def worker_proc(x):
valid = False
for y in range(2, x):
if ma.gcd(x, y) == 1:
if pow(y, x - 1, x) != 1:
return
else:
valid = True
if valid:
print(x)
def gen_carmichael_mp(t):
"""retourne tous les nombres de Carmichael inférieurs à t
version multiprocess
t : un réel
"""
pool = mp.Pool(processes=mp.cpu_count())
pool.map(worker_proc, range(3, int(t), 2))
def gen_carmichael_3(k):
""" genère les nombres de Carmicheal de longueur binaire k à partir de trois diviseurs premiers
k : un entier
"""
# t = int(t)
prime = []
for n in range(3, 2 ** k, 2):
if first_test(n):
prime.append(n)
res = []
for i_a, a in enumerate(prime):
for i_b, b in enumerate(prime[:i_a]):
ab = a * b
for c in prime[:i_b]:
# on a obtenu 3 premiers, on teste si leur produit est Carmichael
# worker_proc(a * b * c)
tst = ab * c - 1
if tst.bit_length() != k:
continue
if tst % 2 == 0 and tst % (a - 1) == 0 and tst % (b - 1) == 0 and tst % (c - 1) == 0 and a % (b * c) != 0:
res.append(tst + 1)
return sorted(res)
def gen_carmichael_3_all(t):
""" genère un nombre de Carmicheal inférieur t à partir de trois diviseurs premiers
version sans contrainte de taille
"""
t = int(t)
prime = []
for n in range(3, t, 2):
if first_test(n):
prime.append(n)
res = []
for i_a, a in enumerate(prime):
for i_b, b in enumerate(prime[:i_a]):
ab = a * b
for c in prime[:i_b]:
tst = ab * c - 1
if tst % 2 == 0 and tst % (a - 1) == 0 and tst % (b - 1) == 0 and tst % (c - 1) == 0 and a % (b * c) != 0:
res.append(tst + 1)
return sorted(res)
def gen_carmichael_2(p):
"""retourne tous les nombre de carmichael de la forme pqr pour un p donné"""
prime = []
for n in range(3, 2 * p * (p ** 2 + 1), 2):
if n == p:
continue
if first_test(n):
prime.append(n)
res = []
for i_r, r in enumerate(prime):
for q in prime[:i_r]:
tst = p * q * r - 1
if tst % 2 == 0 and tst % (p - 1) == 0 and tst % (q - 1) == 0 and tst % (r - 1) == 0 and r % (p * q) != 0:
res.append(tst + 1)
return sorted(res)
if __name__ == '__main__':
t = time.time()
gen_carmichael_mp(10000)
print("mt : ", str(time.time() - t))
t = time.time()
print(gen_carmichael(64000))
print("naif : ", str(time.time() - t))
t = time.time()
print(gen_carmichael_3_all(100))
print("3 : ", str(time.time() - t))
|
BasileLewan/ProjetCOMPLEX
|
Ex2.py
|
Ex2.py
|
py
| 3,830 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.sqrt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.gcd",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "math.gcd",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 139,
"usage_type": "call"
}
] |
650432757
|
import os
import luigi
import json
import z5py
import numpy as np
from ..cluster_tasks import WorkflowBase
from ..relabel import RelabelWorkflow
from ..relabel import find_uniques as unique_tasks
from ..node_labels import NodeLabelWorkflow
from ..features import RegionFeaturesWorkflow
from .. import write as write_tasks
from . import size_filter_blocks as size_filter_tasks
from . import background_size_filter as bg_tasks
from . import filling_size_filter as filling_tasks
from . import filter_blocks as filter_tasks
from . import id_filter as id_tasks
from . import orphan_assignments as orphan_tasks
from . import graph_watershed_assignments as gws_tasks
from . import graph_connected_components as cc_tasks
class SizeFilterWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
size_threshold = luigi.IntParameter()
hmap_path = luigi.Parameter(default='')
hmap_key = luigi.Parameter(default='')
relabel = luigi.BoolParameter(default=True)
preserve_zeros = luigi.BoolParameter(default=False)
def _bg_filter(self, dep):
filter_task = getattr(bg_tasks,
self._get_task_name('BackgroundSizeFilter'))
dep = filter_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
dependency=dep)
return dep
def _ws_filter(self, dep):
filter_task = getattr(filling_tasks,
self._get_task_name('FillingSizeFilter'))
dep = filter_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
hmap_path=self.hmap_path,
hmap_key=self.hmap_key,
preserve_zeros=self.preserve_zeros,
dependency=dep)
return dep
def requires(self):
un_task = getattr(unique_tasks,
self._get_task_name('FindUniques'))
dep = un_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
return_counts=True,
dependency=self.dependency,
prefix='size_filter')
sf_task = getattr(size_filter_tasks,
self._get_task_name('SizeFilterBlocks'))
dep = sf_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
size_threshold=self.size_threshold,
dependency=dep)
if self.hmap_path == '' or self.hmap_path is None:
assert self.hmap_key == '' or self.hmap_key is None
dep = self._bg_filter(dep)
else:
assert self.hmap_key != ''
dep = self._ws_filter(dep)
if self.relabel:
dep = RelabelWorkflow(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
target=self.target,
input_path=self.output_path,
input_key=self.output_key,
assignment_path=self.output_path,
assignment_key='relabel_size_filter',
prefix='size_filter',
dependency=dep)
return dep
@staticmethod
def get_config():
configs = super(SizeFilterWorkflow, SizeFilterWorkflow).get_config()
configs.update({'size_filter_blocks': size_filter_tasks.SizeFilterBlocksLocal.default_task_config(),
'background_size_filter': bg_tasks.BackgroundSizeFilterLocal.default_task_config(),
'filling_size_filter': filling_tasks.FillingSizeFilterLocal.default_task_config(),
**RelabelWorkflow.get_config()})
return configs
class FilterLabelsWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
label_path = luigi.Parameter()
label_key = luigi.Parameter()
node_label_path = luigi.Parameter()
node_label_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
filter_labels = luigi.ListParameter()
def requires(self):
dep = NodeLabelWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
target=self.target, max_jobs=self.max_jobs,
ws_path=self.input_path, ws_key=self.input_key,
input_path=self.label_path, input_key=self.label_key,
output_path=self.node_label_path,
output_key=self.node_label_key,
prefix='filter_labels', max_overlap=True,
dependency=self.dependency)
id_task = getattr(id_tasks,
self._get_task_name('IdFilter'))
id_filter_path = os.path.join(self.output_path, 'filtered_ids.json')
dep = id_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
dependency=dep, max_jobs=self.max_jobs,
node_label_path=self.node_label_path,
node_label_key=self.node_label_key,
output_path=id_filter_path,
filter_labels=self.filter_labels)
filter_task = getattr(filter_tasks,
self._get_task_name('FilterBlocks'))
dep = filter_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
dependency=dep, max_jobs=self.max_jobs,
input_path=self.input_path, input_key=self.input_key,
filter_path=id_filter_path,
output_path=self.output_path, output_key=self.output_key)
return dep
@staticmethod
def get_config():
configs = super(FilterLabelsWorkflow, FilterLabelsWorkflow).get_config()
configs.update({'id_filter':
id_tasks.IdFilterLocal.default_task_config(),
'filter_blocks':
filter_tasks.FilterBlocksLocal.default_task_config(),
**NodeLabelWorkflow.get_config()})
return configs
class ApplyThreshold(luigi.Task):
feature_path = luigi.Parameter()
feature_key = luigi.Parameter()
out_path = luigi.Parameter()
threshold = luigi.FloatParameter()
threshold_mode = luigi.Parameter(default='less')
dependency = luigi.TaskParameter()
threshold_modes = ('less', 'greater', 'equal')
def requires(self):
return self.dependency
def run(self):
f = z5py.File(self.feature_path)
ds = f[self.feature_key]
feats = ds[:, 0]
assert self.threshold_mode in self.threshold_modes
if self.threshold_mode == 'less':
filter_ids = feats < self.threshold
elif self.threshold_mode == 'greater':
filter_ids = feats > self.threshold
elif self.threshold_mode == 'equal':
filter_ids = feats == self.threshold
filter_ids = np.where(filter_ids)[0].tolist()
with open(self.out_path, 'w') as f:
json.dump(filter_ids, f)
def output(self):
return luigi.LocalTarget(self.out_path)
class FilterByThresholdWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
seg_in_path = luigi.Parameter()
seg_in_key = luigi.Parameter()
seg_out_path = luigi.Parameter()
seg_out_key = luigi.Parameter()
threshold = luigi.FloatParameter()
relabel = luigi.BoolParameter(default=True)
def requires(self):
# calculate the region features
feat_path = os.path.join(self.tmp_folder, 'reg_feats.n5')
feat_key = 'feats'
dep = RegionFeaturesWorkflow(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
target=self.target, config_dir=self.config_dir,
input_path=self.input_path, input_key=self.input_key,
labels_path=self.seg_in_path, labels_key=self.seg_in_key,
output_path=feat_path, output_key=feat_key)
# apply threshold to get the ids to filter out
id_filter_path = os.path.join(self.tmp_folder, 'filtered_ids.json')
dep = ApplyThreshold(feature_path=feat_path, feature_key=feat_key,
out_path=id_filter_path, threshold=self.threshold,
dependency=dep)
# filter all blocks
filter_task = getattr(filter_tasks,
self._get_task_name('FilterBlocks'))
dep = filter_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
dependency=dep, max_jobs=self.max_jobs,
input_path=self.seg_in_path, input_key=self.seg_in_key,
filter_path=id_filter_path,
output_path=self.seg_out_path, output_key=self.seg_out_key)
if self.relabel:
dep = RelabelWorkflow(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
target=self.target,
input_path=self.seg_out_path,
input_key=self.seg_out_key,
assignment_path=self.seg_out_path,
assignment_key='assignments/relabel_filter',
prefix='pp_filter_by_threshold',
dependency=dep)
return dep
@staticmethod
def get_config():
configs = super(FilterByThresholdWorkflow, FilterByThresholdWorkflow).get_config()
configs.update({'filter_blocks': filter_tasks.FilterBlocksLocal.default_task_config(),
**RegionFeaturesWorkflow.get_config()})
return configs
class FilterOrphansWorkflow(WorkflowBase):
graph_path = luigi.Parameter()
graph_key = luigi.Parameter()
path = luigi.Parameter()
segmentation_key = luigi.Parameter()
assignment_key = luigi.Parameter()
output_path = luigi.Parameter()
assignment_out_key = luigi.Parameter()
output_key = luigi.Parameter(default=None)
relabel = luigi.BoolParameter(default=False)
def requires(self):
assert False, "FIXME not debugged yet"
dep = self.dependency
orphan_task = getattr(orphan_tasks,
self._get_task_name('OrphanAssignments'))
dep = orphan_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
graph_path=self.graph_path, graph_key=self.graph_key,
assignment_path=self.path, assignment_key=self.assignment_key,
output_path=self.path, output_key=self.assignment_out_key,
relabel=self.relabel)
if self.output_key is not None:
write_task = getattr(write_tasks,
self._get_task_name('Write'))
dep = write_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
input_path=self.path, input_key=self.segmentation_key,
output_path=self.path, output_key=self.output_key,
assignment_path=self.output_path, assignment_key=self.assignment_out_key,
identifier='filter-orphans')
return dep
@staticmethod
def get_config():
configs = super(FilterOrphansWorkflow, FilterOrphansWorkflow).get_config()
configs.update({'orphan_assignments': orphan_tasks.OrphanAssignmentsLocal.default_task_config(),
'write': write_tasks.WriteLocal.default_task_config()})
return configs
class ConnectedComponentsWorkflow(WorkflowBase):
problem_path = luigi.Parameter()
graph_key = luigi.Parameter()
assignment_path = luigi.Parameter()
assignment_key = luigi.Parameter()
output_path = luigi.Parameter()
assignment_out_key = luigi.Parameter()
output_key = luigi.Parameter(default='')
path = luigi.Parameter(default='')
fragments_key = luigi.Parameter(default='')
def requires(self):
cc_task = getattr(cc_tasks,
self._get_task_name('GraphConnectedComponents'))
dep = cc_task(max_jobs=self.max_jobs, tmp_folder=self.tmp_folder,
config_dir=self.config_dir,
problem_path=self.problem_path,
graph_key=self.graph_key,
assignment_path=self.assignment_path,
assignment_key=self.assignment_key,
output_path=self.output_path,
output_key=self.assignment_out_key,
dependency=self.dependency)
if self.output_key != '':
write_task = getattr(write_tasks,
self._get_task_name('Write'))
assert self.fragments_key != '' and self.path != ''
dep = write_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
input_path=self.path, input_key=self.fragments_key,
output_path=self.output_path, output_key=self.output_key,
assignment_path=self.output_path,
assignment_key=self.assignment_out_key,
identifier='graph-connected-components')
return dep
@staticmethod
def get_config():
configs = super(ConnectedComponentsWorkflow, ConnectedComponentsWorkflow).get_config()
configs.update({'graph_connected_components': cc_tasks.GraphConnectedComponentsLocal.default_task_config(),
'write': write_tasks.WriteLocal.default_task_config()})
return configs
class SizeFilterAndGraphWatershedWorkflow(WorkflowBase):
problem_path = luigi.Parameter()
graph_key = luigi.Parameter()
features_key = luigi.Parameter()
#
path = luigi.Parameter()
# path to the merged segmentation
segmentation_key = luigi.Parameter()
# path to the underlying fragments
fragments_key = luigi.Parameter(default='')
# path to the fragment segment assignment
assignment_key = luigi.Parameter()
# the size filter threshold
size_threshold = luigi.IntParameter(default=None)
target_number = luigi.IntParameter(default=None)
relabel = luigi.BoolParameter(default=False)
from_costs = luigi.BoolParameter(default=False)
output_path = luigi.Parameter()
assignment_out_key = luigi.Parameter()
output_key = luigi.Parameter(default='')
def find_sizes(self, dep):
# find segemnts that should be merged according to the size filter
un_task = getattr(unique_tasks,
self._get_task_name('FindUniques'))
dep = un_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.path,
input_key=self.segmentation_key,
return_counts=True,
dependency=dep,
prefix='size-filter-and-graph-watershed')
sf_task = getattr(size_filter_tasks,
self._get_task_name('SizeFilterBlocks'))
dep = sf_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.path,
input_key=self.segmentation_key,
size_threshold=self.size_threshold,
target_number=self.target_number,
dependency=dep)
return dep
def requires(self):
assert (self.size_threshold is None) != (self.target_number is None)
dep = self.dependency
# find the sizes for all segments
dep = self.find_sizes(dep)
# run graph watershed to merge in all small segments
filter_path = os.path.join(self.tmp_folder, 'discard_ids.npy')
gws_task = getattr(gws_tasks,
self._get_task_name('GraphWatershedAssignments'))
dep = gws_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
problem_path=self.problem_path, graph_key=self.graph_key,
features_key=self.features_key,
assignment_path=self.path, assignment_key=self.assignment_key,
output_path=self.output_path, output_key=self.assignment_out_key,
filter_nodes_path=filter_path,
relabel=self.relabel, from_costs=self.from_costs)
if self.output_key != '':
assert self.fragments_key != ''
write_task = getattr(write_tasks,
self._get_task_name('Write'))
dep = write_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir, dependency=dep,
input_path=self.path, input_key=self.fragments_key,
output_path=self.output_path, output_key=self.output_key,
assignment_path=self.output_path, assignment_key=self.assignment_out_key,
identifier='size-filter-graph-ws')
return dep
@staticmethod
def get_config():
configs = super(SizeFilterAndGraphWatershedWorkflow,
SizeFilterAndGraphWatershedWorkflow).get_config()
configs.update({'size_filter_blocks': size_filter_tasks.SizeFilterBlocksLocal.default_task_config(),
'graph_watershed_assignments':
gws_tasks.GraphWatershedAssignmentsLocal.default_task_config(),
'write': write_tasks.WriteLocal.default_task_config()})
return configs
|
constantinpape/cluster_tools
|
cluster_tools/postprocess/postprocess_workflow.py
|
postprocess_workflow.py
|
py
| 19,543 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "cluster_tasks.WorkflowBase",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "luigi.IntParameter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "relabel.find_uniques",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "relabel.RelabelWorkflow",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "relabel.RelabelWorkflow.get_config",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "relabel.RelabelWorkflow",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "cluster_tasks.WorkflowBase",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "luigi.ListParameter",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "node_labels.NodeLabelWorkflow",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "node_labels.NodeLabelWorkflow.get_config",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "node_labels.NodeLabelWorkflow",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "luigi.Task",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "luigi.FloatParameter",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "luigi.TaskParameter",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "z5py.File",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "luigi.LocalTarget",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "cluster_tasks.WorkflowBase",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "luigi.FloatParameter",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "features.RegionFeaturesWorkflow",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "relabel.RelabelWorkflow",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "features.RegionFeaturesWorkflow.get_config",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "features.RegionFeaturesWorkflow",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "cluster_tasks.WorkflowBase",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "cluster_tasks.WorkflowBase",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "cluster_tasks.WorkflowBase",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "luigi.IntParameter",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "luigi.IntParameter",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "relabel.find_uniques",
"line_number": 371,
"usage_type": "argument"
},
{
"api_name": "os.path.join",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 401,
"usage_type": "attribute"
}
] |
24698015874
|
##
# The model uses elements from both the Transformer Encoder as introduced in
# “Attention is All You Need” (https://arxiv.org/pdf/1706.03762.pdf) and the
# Message Passing Neural Network (MPNN) as described in "Neural Message Passing
# for Quantum Chemistry" paper (https://arxiv.org/pdf/1704.01212.pdf) .
#
# The overall architecture most closely resembles the Transformer Encoder with
# stacked encoder blocks and layers connected through residual connections with
# layer norm. In this case however the encoder blocks are build up of two
# message passing layers, followed by three different types of attention layers
# with a final pointwise feed-forward network.
#
# Both message passing layers use a slightly modified version of the edge
# networks as detailed in the MPNN paper. The first layer allows message passing
# between bonded atoms, whereas the second layer does so for the atom pairs for
# which we need to predict the scalar coupling constant. Unlike the attention
# layers the message passing layers' parameters are tied across blocks.
#
# The three attention layers are:
# 1. distance based gaussian attention
# 2. graph distance based attention
# 3. scaled dot product self attention
#
# Although the final layers in the block resemble the encoder blocks of the
# Transformer model, there are several additional layers designed specifically
# to capture the structure and relationships among atoms in a molecule.
#
# Much of the code is adopted from the annotated version of the Transformer
# paper, which can be found here
# (http://nlp.seas.harvard.edu/2018/04/03/attention.html).
import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from fcnet import FullyConnectedNet, hidden_layer
from scatter import scatter_mean
from layernorm import LayerNorm
def clones(module, N):
"""Produce N identical layers."""
return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super().__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
def _gather_nodes(x, idx, sz_last_dim):
idx = idx.unsqueeze(-1).expand(-1, -1, sz_last_dim)
return x.gather(1, idx)
class ENNMessage(nn.Module):
"""
The edge network message passing function from the MPNN paper. Optionally
adds and additional cosine angle based attention mechanism over incoming
messages.
"""
PAD_VAL = -999
def __init__(self, d_model, d_edge, kernel_sz, enn_args={}, ann_args=None):
super().__init__()
assert kernel_sz <= d_model
self.d_model, self.kernel_sz = d_model, kernel_sz
self.enn = FullyConnectedNet(d_edge, d_model*kernel_sz, **enn_args)
if ann_args: self.ann = FullyConnectedNet(1, d_model, **ann_args)
else: self.ann = None
def forward(self, x, edges, pairs_idx, angles=None, angles_idx=None, t=0):
"""Note that edges and pairs_idx raw inputs are for a unidirectional
graph. They are expanded to allow bidirectional message passing."""
if t==0:
self.set_a_mat(edges)
if self.ann: self.set_attn(angles)
# concat reversed pairs_idx for bidirectional message passing
self.pairs_idx = torch.cat([pairs_idx, pairs_idx[:,:,[1,0]]], dim=1)
return self.add_message(torch.zeros_like(x), x, angles_idx)
def set_a_mat(self, edges):
n_edges = edges.size(1)
a_vect = self.enn(edges)
a_vect = a_vect / (self.kernel_sz ** .5) # rescale
mask = edges[:,:,0,None].expand(a_vect.size())==self.PAD_VAL
a_vect = a_vect.masked_fill(mask, 0.0)
self.a_mat = a_vect.view(-1, n_edges, self.d_model, self.kernel_sz)
# concat a_mats for bidirectional message passing
self.a_mat = torch.cat([self.a_mat, self.a_mat], dim=1)
def set_attn(self, angles):
angles = angles.unsqueeze(-1)
self.attn = self.ann(angles)
mask = angles.expand(self.attn.size())==self.PAD_VAL
self.attn = self.attn.masked_fill(mask, 0.0)
def add_message(self, m, x, angles_idx=None):
"""Add message for atom_{i}: m_{i} += sum_{j}[attn_{ij} A_{ij}x_{j}]."""
# select the 'x_{j}' feeding into the 'm_{i}'
x_in = _gather_nodes(x, self.pairs_idx[:,:,1], self.d_model)
# do the matrix multiplication 'A_{ij}x_{j}'
if self.kernel_sz==self.d_model: # full matrix multiplcation
ax = (x_in.unsqueeze(-2) @ self.a_mat).squeeze(-2)
else: # do a convolution
x_padded = F.pad(x_in, self.n_pad)
x_unfolded = x_padded.unfold(-1, self.kernel_sz, 1)
ax = (x_unfolded * self.a_mat).sum(-1)
# apply atttention
if self.ann:
n_pairs = self.pairs_idx.size(1)
# average all attn(angle_{ijk}) per edge_{ij}.
# i.e.: attn_{ij} = sum_{k}[attn(angle_{ijk})] / n_angles_{ij}
ave_att = scatter_mean(self.attn, angles_idx, num=n_pairs, dim=1,
out=torch.ones_like(ax))
ax = ave_att * ax
# sum up all 'A_{ij}h_{j}' per node 'i'
idx_0 = self.pairs_idx[:,:,0,None].expand(-1, -1, self.d_model)
return m.scatter_add(1, idx_0, ax)
@property
def n_pad(self):
k = self.kernel_sz
return (k // 2, k // 2 - int(k % 2 == 0))
class MultiHeadedDistAttention(nn.Module):
"""Generalizes the euclidean and graph distance based attention layers."""
def __init__(self, h, d_model):
super().__init__()
self.d_model, self.d_k, self.h = d_model, d_model // h, h
self.attn = None
self.linears = clones(nn.Linear(d_model, d_model), 2)
def forward(self, dists, x, mask):
batch_size = x.size(0)
x = self.linears[0](x).view(batch_size, -1, self.h, self.d_k)
x, self.attn = self.apply_attn(dists, x, mask)
x = x.view(batch_size, -1, self.h * self.d_k)
return self.linears[-1](x)
def apply_attn(self, dists, x, mask):
attn = self.create_raw_attn(dists, mask)
attn = attn.transpose(-2,-1).transpose(1, 2)
x = x.transpose(1, 2)
x = torch.matmul(attn, x)
x = x.transpose(1, 2).contiguous()
return x, attn
def create_raw_attn(self, dists, mask):
pass
class MultiHeadedGraphDistAttention(MultiHeadedDistAttention):
"""Attention based on an embedding of the graph distance matrix."""
MAX_GRAPH_DIST = 10
def __init__(self, h, d_model):
super().__init__(h, d_model)
self.embedding = nn.Embedding(self.MAX_GRAPH_DIST+1, h)
def create_raw_attn(self, dists, mask):
emb_dists = self.embedding(dists)
mask = mask.unsqueeze(-1).expand(emb_dists.size())
emb_dists = emb_dists.masked_fill(mask==0, -1e9)
return F.softmax(emb_dists, dim=-2).masked_fill(mask==0, 0)
class MultiHeadedEuclDistAttention(MultiHeadedDistAttention):
"""Attention based on a parameterized normal pdf taking a molecule's
euclidean distance matrix as input."""
def __init__(self, h, d_model):
super().__init__(h, d_model)
self.log_prec = nn.Parameter(torch.Tensor(1, 1, 1, h))
self.locs = nn.Parameter(torch.Tensor(1, 1, 1, h))
nn.init.normal_(self.log_prec, mean=0.0, std=0.1)
nn.init.normal_(self.locs, mean=0.0, std=1.0)
def create_raw_attn(self, dists, mask):
dists = dists.unsqueeze(-1).expand(-1, -1, -1, self.h)
z = torch.exp(self.log_prec) * (dists - self.locs)
pdf = torch.exp(-0.5 * z ** 2)
return pdf / pdf.sum(dim=-2, keepdim=True).clamp(1e-9)
def attention(query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'."""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None: scores = scores.masked_fill(mask==0, -1e9)
p_attn = F.softmax(scores, dim=-1).masked_fill(mask==0, 0)
if dropout is not None: p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedSelfAttention(nn.Module):
"""Applies self-attention as described in the Transformer paper."""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
self.d_model, self.d_k, self.h = d_model, d_model // h, h
self.attn = None
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.dropout = nn.Dropout(p=dropout) if dropout > 0.0 else None
def forward(self, x, mask):
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
batch_size = x.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l in self.linears[:3]
]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask, self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.d_model)
return self.linears[-1](x)
class AttendingLayer(nn.Module):
"""Stacks the three attention layers and the pointwise feedforward net."""
def __init__(self, size, eucl_dist_attn, graph_dist_attn, self_attn, ff,
dropout):
super().__init__()
self.eucl_dist_attn = eucl_dist_attn
self.graph_dist_attn = graph_dist_attn
self.self_attn = self_attn
self.ff = ff
self.subconns = clones(SublayerConnection(size, dropout), 4)
self.size = size
def forward(self, x, eucl_dists, graph_dists, mask):
eucl_dist_sub = lambda x: self.eucl_dist_attn(eucl_dists, x, mask)
x = self.subconns[0](x, eucl_dist_sub)
graph_dist_sub = lambda x: self.graph_dist_attn(graph_dists, x, mask)
x = self.subconns[1](x, graph_dist_sub)
self_sub = lambda x: self.self_attn(x, mask)
x = self.subconns[2](x, self_sub)
return self.subconns[3](x, self.ff)
class MessagePassingLayer(nn.Module):
"""Stacks the bond and scalar coupling pair message passing layers."""
def __init__(self, size, bond_mess, sc_mess, dropout, N):
super().__init__()
self.bond_mess = bond_mess
self.sc_mess = sc_mess
self.linears = clones(nn.Linear(size, size), 2*N)
self.subconns = clones(SublayerConnection(size, dropout), 2*N)
def forward(self, x, bond_x, sc_pair_x, angles, mask, bond_idx, sc_idx,
angles_idx, t=0):
bond_sub = lambda x: self.linears[2*t](
self.bond_mess(x, bond_x, bond_idx, angles, angles_idx, t))
x = self.subconns[2*t](x, bond_sub)
sc_sub = lambda x: self.linears[(2*t)+1](
self.sc_mess(x, sc_pair_x, sc_idx, t=t))
return self.subconns[(2*t)+1](x, sc_sub)
class Encoder(nn.Module):
"""Encoder stacks N attention layers and one message passing layer."""
def __init__(self, mess_pass_layer, attn_layer, N):
super().__init__()
self.mess_pass_layer = mess_pass_layer
self.attn_layers = clones(attn_layer, N)
self.norm = LayerNorm(attn_layer.size)
def forward(self, x, bond_x, sc_pair_x, eucl_dists, graph_dists, angles,
mask, bond_idx, sc_idx, angles_idx):
"""Pass the inputs (and mask) through each block in turn. Note that for
each block the same message passing layer is used."""
for t, attn_layer in enumerate(self.attn_layers):
x = self.mess_pass_layer(x, bond_x, sc_pair_x, angles, mask,
bond_idx, sc_idx, angles_idx, t)
x = attn_layer(x, eucl_dists, graph_dists, mask)
return self.norm(x)
# After N blocks of message passing and attending, the encoded atom states are
# transferred to the head of the model: a customized feed-forward net for
# predicting the scalar coupling (sc) constant.
# First the relevant pairs of atom states for each sc constant in the batch
# are selected, concatenated and stacked. Also concatenated to the encoded
# states are a set of raw molecule and sc pair specific features. These states
# are fed into a residual block comprised of a dense layer followed by a type
# specific dense layer of dimension 'd_ff' (the same as the dimension used for
# the pointwise feed-forward net).
# The processed states are passed through to a relatively small feed-forward
# net, which predicts each sc contribution seperately plus a residual.
# Ultimately, the predictions of these contributions and the residual are summed
# to predict the sc constant.
def create_contrib_head(d_in, d_ff, act, dropout=0.0, layer_norm=True):
layers = hidden_layer(d_in, d_ff, False, dropout, layer_norm, act)
layers += hidden_layer(d_ff, 1, False, 0.0) # output layer
return nn.Sequential(*layers)
class ContribsNet(nn.Module):
"""The feed-forward net used for the sc contribution and final sc constant
predictions."""
N_CONTRIBS = 5
CONTIB_SCALES = [1, 250, 45, 35, 500] # scales used to make the 5 predictions of similar magnitude
def __init__(self, d_in, d_ff, vec_in, act, dropout=0.0, layer_norm=True):
super().__init__()
contrib_head = create_contrib_head(d_in, d_ff, act, dropout, layer_norm)
self.blocks = clones(contrib_head, self.N_CONTRIBS)
def forward(self, x):
ys = torch.cat(
[b(x)/s for b,s in zip(self.blocks, self.CONTIB_SCALES)], dim=-1)
return torch.cat([ys[:,:-1], ys.sum(dim=-1, keepdim=True)], dim=-1)
class MyCustomHead(nn.Module):
"""Joins the sc type specific residual block with the sc contribution
feed-forward net."""
PAD_VAL = -999
N_TYPES = 8
def __init__(self, d_input, d_ff, d_ff_contribs, pre_layers=[],
post_layers=[], act=nn.ReLU(True), dropout=3*[0.], norm=False):
super().__init__()
fc_pre = hidden_layer(d_input, d_ff, False, dropout[0], norm, act)
self.preproc = nn.Sequential(*fc_pre)
fc_type = hidden_layer(d_ff, d_input, False, dropout[1], norm, act)
self.types_net = clones(nn.Sequential(*fc_type), self.N_TYPES)
self.contribs_net = ContribsNet(
d_input, d_ff_contribs, d_ff, act, dropout[2], layer_norm=norm)
def forward(self, x, sc_types):
# stack inputs with a .view for easier processing
x, sc_types = x.view(-1, x.size(-1)), sc_types.view(-1)
mask = sc_types != self.PAD_VAL
x, sc_types = x[mask], sc_types[mask]
x_ = self.preproc(x)
x_types = torch.zeros_like(x)
for i in range(self.N_TYPES):
t_idx = sc_types==i
if torch.any(t_idx): x_types[t_idx] = self.types_net[i](x_[t_idx])
else: x_types = x_types + 0.0 * self.types_net[i](x_) # fake call (only necessary for distributed training - to make sure all processes have gradients for all parameters)
x = x + x_types
return self.contribs_net(x)
class Transformer(nn.Module):
"""Molecule transformer with message passing."""
def __init__(self, d_atom, d_bond, d_sc_pair, d_sc_mol, N=6, d_model=512,
d_ff=2048, d_ff_contrib=128, h=8, dropout=0.1, kernel_sz=128,
enn_args={}, ann_args={}):
super().__init__()
assert d_model % h == 0
self.d_model = d_model
c = copy.deepcopy
bond_mess = ENNMessage(d_model, d_bond, kernel_sz, enn_args, ann_args)
sc_mess = ENNMessage(d_model, d_sc_pair, kernel_sz, enn_args)
eucl_dist_attn = MultiHeadedEuclDistAttention(h, d_model)
graph_dist_attn = MultiHeadedGraphDistAttention(h, d_model)
self_attn = MultiHeadedSelfAttention(h, d_model, dropout)
ff = FullyConnectedNet(d_model, d_model, [d_ff], dropout=[dropout])
message_passing_layer = MessagePassingLayer(
d_model, bond_mess, sc_mess, dropout, N)
attending_layer = AttendingLayer(
d_model, c(eucl_dist_attn), c(graph_dist_attn), c(self_attn), c(ff),
dropout
)
self.projection = nn.Linear(d_atom, d_model)
self.encoder = Encoder(message_passing_layer, attending_layer, N)
self.write_head = MyCustomHead(
2 * d_model + d_sc_mol, d_ff, d_ff_contrib, norm=True)
def forward(self, atom_x, bond_x, sc_pair_x, sc_mol_x, eucl_dists,
graph_dists, angles, mask, bond_idx, sc_idx, angles_idx,
sc_types):
x = self.encoder(
self.projection(atom_x), bond_x, sc_pair_x, eucl_dists, graph_dists,
angles, mask, bond_idx, sc_idx, angles_idx
)
# for each sc constant in the batch select and concat the relevant pairs
# of atom states.
x = torch.cat(
[_gather_nodes(x, sc_idx[:,:,0], self.d_model),
_gather_nodes(x, sc_idx[:,:,1], self.d_model),
sc_mol_x], dim=-1
)
return self.write_head(x, sc_types)
|
robinniesert/kaggle-champs
|
model.py
|
model.py
|
py
| 17,738 |
python
|
en
|
code
| 48 |
github-code
|
6
|
[
{
"api_name": "torch.nn.ModuleList",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "layernorm.LayerNorm",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "fcnet.FullyConnectedNet",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "fcnet.FullyConnectedNet",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.pad",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "scatter.scatter_mean",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "torch.matmul",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "torch.exp",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "torch.exp",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "torch.matmul",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "layernorm.LayerNorm",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "fcnet.hidden_layer",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "fcnet.hidden_layer",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "fcnet.hidden_layer",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "fcnet.hidden_layer",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "torch.any",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 363,
"usage_type": "attribute"
},
{
"api_name": "fcnet.FullyConnectedNet",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 392,
"usage_type": "call"
}
] |
4755956537
|
import argparse
import os
import sys
import time
import json
import pickle
from nltk.corpus import wordnet as wn
import numpy as np
import torch
import random
from aligner import Aligner
import log
logger = log.get_logger('root')
logger.propagate = False
def get_print_result(sample_group: dict, sample_result: dict, nonce_word):
candidates = sample_group['candidates']
info = sample_group['common_ancestor_info']
print_str = "\n===============================================================================================\n"
print_str += f'Number of Candidates: {len(candidates)}\n\n'
print_str += f"\nCommon {info['relation']} synset: {info['ancestor_name']}\n{wn.synset(info['ancestor_name']).definition()}\n\n"
# Use the line below for CoDA5
# print_str += f"\nCommon {info['relation']} synset: {info['ancestor_synset']}\n{info['ancestor_definition']}\n\n"
for candidate_no, candidate in enumerate(candidates):
print_str += f"\n{candidate_no+1}) Synset: {candidate['synset_name']} ({candidate['words_in_contexts'][0]})\n"
print_str += f"Definition: {candidate['definition']}\n"
print_str += f"Context: {candidate['contexts'][0]}\n"
print_str += "\n\n"
print_str += f"Predicted alignment: {sample_result['predicted_alignment']}\n"
print_str += f"Alignment Score: {sample_result['alignment_score']}\n"
return print_str
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# file parameters
parser.add_argument('--output_folder', default=None, type=str, required=True)
parser.add_argument('--data_file', default=None, type=str, required=True)
# parameters for the model to generate definitions
parser.add_argument('--model_cls', choices=['bert','roberta','gpt-2'], default='gpt-2')
parser.add_argument('--model_name', type=str, default='gpt2-medium')
parser.add_argument('--word_type', choices=['n','v'], default='n')
parser.add_argument('--nonce_word', type=str, default='bkatuhla')
parser.add_argument('--max_def_len', type=int, default=48,
help='maximum definition length after tokenization')
parser.add_argument('--max_batch_size', type=int, default=48,
help='maximum batch size')
parser.add_argument('--gpu_id', type=int, default=0,
help='id of the gpu that will be used during evaluations')
parser.add_argument('--seed', type=int, default=42,
help='seed for selecting random train samples for one of few shot evaluation')
args = parser.parse_args()
random.seed(args.seed)
with open(args.data_file, 'r') as handle:
CoDA = json.load(handle)
data = args.data_file.split("/")[-1][:-5] # don't take .json
print_file = f'{args.model_name}_on_{data}_{args.word_type}_nonce_{args.nonce_word}_some_results.txt'
save_file = f'{args.model_name}_on_{data}_{args.word_type}_nonce_{args.nonce_word}_results.pickle'
if not os.path.exists(args.output_folder):
os.mkdir(args.output_folder)
f_out = open(os.path.join(args.output_folder, print_file), "w", encoding='UTF-8')
f_out.close()
aligner = Aligner(
model_cls=args.model_cls,
pretrained_model=args.model_name,
gpu_id=args.gpu_id,
max_def_len=args.max_def_len,
max_batch_size=args.max_batch_size,
nonce_word=args.nonce_word,
word_type=args.word_type
)
sample_result = {}
sample_result[''] = []
sample_result['target_scores'] = []
sample_result['predicted_alignment'] = []
sample_result['alignment_score'] = []
all_results = []
sample_groups = CoDA[args.word_type]
for group_no, sample_group in enumerate(sample_groups):
target_scores, predicted_alignment, aligment_score = aligner.align(sample_group)
sample_result = {}
sample_result['target_scores'] = target_scores
sample_result['predicted_alignment'] = predicted_alignment
sample_result['alignment_score'] = aligment_score
all_results.append(sample_result)
if (group_no+1) % 25 == 0:
logger.info(f'{group_no+1}/{len(sample_groups)} synset groups processed')
if (group_no+1) % (len(sample_groups) // 20) == 0:
with open(os.path.join(args.output_folder, print_file), "a", encoding='UTF-8') as f_out:
f_out.write(get_print_result(sample_group, sample_result, args.nonce_word))
with open(os.path.join(args.output_folder, save_file), "wb") as handle:
pickle.dump(all_results, handle)
with open(os.path.join(args.output_folder, save_file), "wb") as handle:
pickle.dump(all_results, handle)
|
lksenel/CoDA21
|
Evaluation/evaluate_PLMs.py
|
evaluate_PLMs.py
|
py
| 4,847 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "log.get_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet.synset",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "aligner.Aligner",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "aligner.align",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 121,
"usage_type": "call"
}
] |
29528497286
|
#!/usr/bin/python3
import html
import re
import random
import json
import requests
from bs4 import BeautifulSoup
PATTERN = re.compile(r'/video(\d+)/.*')
def _fetch_page(page_number):
url = 'https://www.xvideos.com/porn/portugues/' + str(page_number)
res = requests.get(url)
if res.status_code != 200:
raise Exception('Response Error: ' + str(res.status_code))
return BeautifulSoup(res.text, 'html.parser')
def _find_videos(soup):
for element in soup.select('.thumb-block > .thumb-under > p > a'):
try:
reference = PATTERN.match(element['href']).group(1)
except AttributeError:
pass
yield element['title'], reference, element['href']
def _get_comments(video_ref):
url_mask = 'https://www.xvideos.com/threads/video-comments/get-posts/top/{0}/0/0'
url = url_mask.format(video_ref)
res = requests.post(url)
if res.status_code != 200:
raise Exception('Response Error: ' + str(res.status_code))
json_obj = json.loads(res.text)['posts']
json_obj = json_obj['posts']
try:
for attr, val in json_obj.items():
content = html.unescape(val['message'])
author = html.unescape(val['name'])
if '<a href=' not in content:
yield author, content
except (AttributeError, IndexError) as e:
raise IndexError
def choose_random_porn_comment():
for _ in range(10):
page = _fetch_page(random.randint(1, 40))
videos = _find_videos(page)
try:
title, reference, url = random.choice(list(videos))
comments = _get_comments(reference)
author, content = random.choice(list(comments))
except IndexError:
continue
return author, content, title, url
raise Exception('Too hard')
def _fetch_tag_page(page_number, tag):
if tag is not None:
url = 'https://www.xvideos.com/?k='+ str(tag) +'&p=' + str(page_number)
else:
url = 'https://www.xvideos.com/new/' + str(page_number)
res = requests.get(url)
if res.status_code != 200:
raise Exception('Response Error: ' + str(res.status_code))
return BeautifulSoup(res.text, 'html.parser')
def choose_random_video(tag=None):
for _ in range(10):
page = _fetch_tag_page(random.randint(1, 4), tag)
videos = _find_videos(page)
try:
title, reference, url = random.choice(list(videos))
url = 'https://xvideos.com'+url
return url
except IndexError:
raise Exception('Response Error: Bad search term')
raise Exception('Too hard')
def main():
# comment = choose_random_porn_comment()
# print(*comment, sep='\n')
video = choose_random_video()
print(video, sep='\n')
if __name__ == '__main__':
main()
|
marquesgabriel/bot-xvideos-telegram
|
xvideos.py
|
xvideos.py
|
py
| 2,846 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "html.unescape",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "html.unescape",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 84,
"usage_type": "call"
}
] |
71455581947
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler
class ClassifierComparison:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
self.data = None
self.data_test = None
self.target_test = None
self.target = None
self.model_lr = None
self.model_knn = None
self.model_mlp = None
def load_data(self):
df = pd.read_csv(self.dataset_path)
df1 = df.copy(deep=True) # making a copy of the dataframe to protect original data
# define the columns to be encoded and scaled
categorical_columns = ['sex', 'exng', 'caa', 'cp', 'fbs', 'restecg', 'slp', 'thall']
continious_columns = ["age", "trtbps", "chol", "thalachh", "oldpeak"]
# encoding the categorical columns
df1 = pd.get_dummies(df1, columns=categorical_columns, drop_first=True)
# %%
# # defining the features and target
X = df1.drop(['output'], axis=1)
y = df1[['output']]
# # instantiating the scaler
scaler = RobustScaler()
# # scaling the continuous featuree
X[continious_columns] = scaler.fit_transform(
X[continious_columns]) # Transform the continious column to have unit variance and zero mean
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
self.data = X_train
self.target = y_train
self.data_test = X_test
self.target_test = y_test
def train_models(self):
self.model_lr = LogisticRegression()
self.model_lr.fit(self.data, self.target)
self.model_knn = KNeighborsClassifier()
self.model_knn.fit(self.data, self.target)
self.model_mlp = MLPClassifier()
self.model_mlp.fit(self.data.astype(float), self.target)
def predict(self):
lr_predictions = self.model_lr.predict(self.data_test)
knn_predictions = self.model_knn.predict(self.data_test)
mlp_predictions = self.model_mlp.predict(self.data_test)
return lr_predictions, knn_predictions, mlp_predictions
def compare_metrics(self):
lr_predictions, knn_predictions, mlp_predictions = self.predict()
lr_accuracy = accuracy_score(self.target_test, lr_predictions)
knn_accuracy = accuracy_score(self.target_test, knn_predictions)
mlp_accuracy = accuracy_score(self.target_test, mlp_predictions)
print(f"Logistic Regression Accuracy: {lr_accuracy:.4f}")
print(f"KNN Accuracy: {knn_accuracy:.4f}")
print(f"MLP Accuracy: {mlp_accuracy:.4f}")
def plot_roc_auc_curves(self):
lr_probabilities = self.model_lr.predict_proba(self.data_test)[:, 1]
knn_probabilities = self.model_knn.predict_proba(self.data_test)[:, 1]
mlp_probabilities = self.model_mlp.predict_proba(self.data_test)[:, 1]
lr_auc = roc_auc_score(self.target_test, lr_probabilities)
knn_auc = roc_auc_score(self.target_test, knn_probabilities)
mlp_auc = roc_auc_score(self.target_test, mlp_probabilities)
fpr_lr, tpr_lr, _ = roc_curve(self.target_test, lr_probabilities)
fpr_knn, tpr_knn, _ = roc_curve(self.target_test, knn_probabilities)
fpr_mlp, tpr_mlp, _ = roc_curve(self.target_test, mlp_probabilities)
plt.figure(figsize=(6, 3))
plt.plot(fpr_lr, tpr_lr, label=f"Logistic Regression (AUC = {lr_auc:.2f})")
plt.plot(fpr_knn, tpr_knn, label=f"KNN (AUC = {knn_auc:.2f})")
plt.plot(fpr_mlp, tpr_mlp, label=f"MLP (AUC = {mlp_auc:.2f})")
plt.plot([0, 1], [0, 1], linestyle='--', color='black')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves')
plt.legend()
plt.tight_layout()
plt.show()
# Usage Example
cc = ClassifierComparison(Path('/Users/anmolgorakshakar/Downloads/heart.csv'))
cc.load_data()
cc.train_models()
cc.compare_metrics()
cc.plot_roc_auc_curves()
|
anmol6536/binder_project
|
hw6_comparing_models.py
|
hw6_comparing_models.py
|
py
| 4,387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.RobustScaler",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.neural_network.MLPClassifier",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 108,
"usage_type": "call"
}
] |
21881567037
|
# app.py
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import pandas as pd
import uvicorn
app = FastAPI()
# This middleware is required in order to accept requests from other domains such as a React app running on 'localhost:3000'
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
df = pd.read_csv('./sentiment140/training.1600000.processed.noemoticon.csv',
names=['score', 'id', 'date', 'col4', 'author', 'tweet'])
# Load your trained model
model = MultinomialNB()
vectorizer = CountVectorizer()
# Assuming df is your DataFrame from which you trained the model
X_train_vectorized = vectorizer.fit_transform(df['tweet'])
y_train = df['score'].astype(str)
model.fit(X_train_vectorized, y_train)
class SentimentRequest(BaseModel):
text: str
class SentimentResponse(BaseModel):
prediction: int
@app.post("/predict_sentiment", response_model=SentimentResponse)
def predict_sentiment(request: SentimentRequest):
global df # declare df as global so we can use it in this endpoint
text = request.text
# Vectorize the input text
text_vectorized = vectorizer.transform([text])
# Make prediction
prediction = model.predict(text_vectorized)[0]
# Append this prediction to the model
new_entry = pd.DataFrame({'score': [prediction], 'tweet': [text]})
df = pd.concat([df, new_entry], ignore_index=True)
df.to_csv('./sentiment140/training.1600000.processed.noemoticon.csv', index=False)
return {"prediction": prediction}
@app.get("/get_last_5")
def get_last_5():
global df
last_5_entries = df.tail(5)
last_5_entries_trimmed = last_5_entries[['score', 'tweet']].to_dict(orient='records')
return last_5_entries_trimmed
if __name__ == "__main__":
uvicorn.run("app:app", port=8000, reload=True)
|
pnavab/tweet-sentiment-NLP
|
app.py
|
app.py
|
py
| 2,079 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.MultinomialNB",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "uvicorn.run",
"line_number": 67,
"usage_type": "call"
}
] |
33837428124
|
import array
import struct
import sys
from collections import namedtuple
import plotly.express as px
import numpy as np
from scipy.ndimage import uniform_filter1d
from statsmodels.nonparametric.smoothers_lowess import lowess
import matplotlib.pyplot as plt
from math import degrees, atan
import scipy.signal
TYPE_DIGITAL = 0
TYPE_ANALOG = 1
expected_version = 0
AnalogData = namedtuple('AnalogData', ('begin_time', 'sample_rate', 'downsample', 'num_samples', 'samples'))
def parse_analog(f):
# Parse header
identifier = f.read(8)
if identifier != b"<SALEAE>":
raise Exception("Not a saleae file")
version, datatype = struct.unpack('=ii', f.read(8))
if version != expected_version or datatype != TYPE_ANALOG:
raise Exception("Unexpected data type: {}".format(datatype))
# Parse analog-specific data
begin_time, sample_rate, downsample, num_samples = struct.unpack('=dqqq', f.read(32))
# Parse samples
samples = array.array("f")
samples.fromfile(f, num_samples)
return AnalogData(begin_time, sample_rate, downsample, num_samples, samples)
if __name__ == '__main__':
times = []
volts = []
anchor = 0
filename = sys.argv[1]
print("Opening " + filename)
with open(filename, 'rb') as f:
data = parse_analog(f)
# Print out all analog data
print("Begin time: {}".format(data.begin_time))
print("Sample rate: {}".format(data.sample_rate))
print("Downsample: {}".format(data.downsample))
print("Number of samples: {}".format(data.num_samples))
j = 0
for idx, voltage in enumerate(data.samples):
sample_num = idx * data.downsample
#thing/(thing/sec) = thing*(sec/thing) = sec
time = data.begin_time + (float(sample_num) / data.sample_rate)
times.append(time)
volts.append(min(voltage,1.3345))
j = j + 1
volts = scipy.ndimage.median_filter(volts, int((data.sample_rate/data.downsample)*.002)+1)
#volts = uniform_filter1d(volts, size=int((data.sample_rate/data.downsample)*.002))
"""
filtered = lowess(volts, times, frac=0.0005)
plt.plot(filtered[:, 0], filtered[:, 1], 'r-', linewidth=3)
plt.show()
"""
upper_bound = lower_bound = volts[0]
for i in range(0,int(data.num_samples*.2)):
upper_bound = max(upper_bound, volts[i])
lower_bound = min(lower_bound, volts[i])
v_noise = .0
sample_size = .3
slope_range = int(data.num_samples*.05)
temp_threshold = 0.0
angle_threshold = 30.0
tslope_range = 10
"""
for s in range(100,11000,100):
i = 0
while i < int(data.num_samples*sample_size):
l_b = max(i-s,0)
r_b = min(i+s,data.num_samples)
v_noise = volts[r_b] - volts[l_b]
if temp_threshold <= abs(degrees(atan(v_noise/((times[r_b]-times[l_b]))))):
temp_threshold = abs(degrees(atan(v_noise/((times[r_b]-times[l_b])))))
print("({},{})({},{})".format(times[l_b], volts[l_b], times[r_b], volts[r_b]))
i = i + 1
print("Temp Threshold: {}".format(temp_threshold))
if temp_threshold < angle_threshold:
angle_threshold = temp_threshold
slope_range = s
"""
print("Angle Threshold: {}".format(angle_threshold))
start = 0
state = 0
#red is horizontal, b is rise, green is fall
colors = ['r','b','g']
i = 1
angle_threshold = 1
slope_range = int(data.num_samples*.002)
while i < data.num_samples:
l_b = max(i-slope_range,0)
r_b = min(i+slope_range,data.num_samples-1)
v_noise = volts[r_b] - volts[l_b]
angle = degrees(atan(v_noise/((times[r_b]-times[l_b]))))
if abs(angle) <= angle_threshold and state != 0:
#print("Horizontal line detected: {}\n".format(angle))
plt.plot(times[start:i], volts[start:i], colors[state])
state = 0
start = i
elif angle > angle_threshold and state != 1:
#print("Rise detected: {}\n".format(angle))
plt.plot(times[start:i], volts[start:i], colors[state])
state = 1
start = i
elif angle < -angle_threshold and state != 2:
#print("Descent detected: {}\n".format(angle))
plt.plot(times[start:i], volts[start:i], colors[state])
state = 2
start = i
i = i + 1
plt.plot(times[start:i], volts[start:i], colors[state])
#plt.plot(times, volts)
plt.show()
|
nkelly1322/analog_analysis
|
AnalogAnalysis.py
|
AnalogAnalysis.py
|
py
| 4,559 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage.ndimage.median_filter",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.ndimage",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "math.degrees",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 137,
"usage_type": "name"
}
] |
5569399042
|
"""Display image captured from image sensor"""
import numpy as np
import cv2
import socket
import tkinter
import pandas as pd
import datetime
import time
import os
class ImageGUI(object):
def __init__(self):
#self.buffer_size = 128 * 128 * 3 # picture size
self.buffer_size = (16384 * 2 + 2048 * 2) # picture size
self.img_buf_index = 0
self.img_buf_size = 3
self.img_buf = np.array([[[0] * 128] * 128] * self.img_buf_size)
self.array_buf = np.array([[0]*(128*128)]*3)
self.array_out_buf = np.array([[0]*(4*8*64)]*2)
self.array_out_shape = np.array([[[0] * 64] * 32] * 2)
self.array_pod_out = np.array([[0] * 64] * 32)
# udp must send bytes object
self.enquire_command = bytes([int('0x55', 16), 1]) # 0x55, 0x01
self.start_command = bytes([int('0x55', 16), 2])
self.stop_command = bytes([int('0x55', 16), 3])
self.stop_reply = bytes([int('0xaa', 16), int('0xf3', 16)])
# 创建主窗口,用于容纳其它组件
self.root = tkinter.Tk()
# 给主窗口设置标题内容
self.root.title("University of Macau AMSV Image Sensor Control")
self.root.geometry('500x300')
# 创建一个输入框,并设置尺寸
self.input_ip = tkinter.Entry(self.root,width=50)
# 创建一个回显列表
self.display_info = tkinter.Listbox(self.root, width=50)
# 创建按钮
#self.result_button = tkinter.Button(self.root, command = self.find_position, text = "查询")
self.connect_button = tkinter.Button(self.root, command = self.connect_fun, text = "Connect")
self.image_start_button = tkinter.Button(self.root, command = self.trans_start, text = "Start")
self.image_stop_button = tkinter.Button(self.root, command = self.image_save_stop, text = "Save")
# 完成布局
def gui_arrang(self):
self.input_ip.pack()
self.connect_button.place(x=100,y=220,height=50,width=100)
self.image_start_button.place(x=200,y=220,height=50,width=100)
self.image_stop_button.place(x=300,y=220,height=50,width=100)
self.display_info.pack()
def connect_fun(self, print_en = 1):
self.ip_addr = self.input_ip.get()
self.udp_server_ip_addr = self.ip_addr # target IP address
self.udp_port = 7 # port
self.soc=socket.socket()
self.soc.connect((self.udp_server_ip_addr, self.udp_port))
if(print_en == 1):
self.display_info.insert(0,"Connect successfully")
self.soc.close()
def trans_start(self): # This function will be executed when 'Start' button is clicked
## 为回显列表赋值
#self.display_info.insert(0,input_str)
#end=self.soc.send(self.start_command) # send 'start' command
while True:
t1_init = time.perf_counter()
self.connect_fun(0)
#for mean_cnt in range(10):
#buf_index = 0
#print('Tcp send')
self.soc.send(self.start_command) # send 'start' command
int_dat = []
cmd_dat = []
cmd_rec = []
############## receive data and cmd ###############
#print('Tcp receive')
cmd_rec = self.soc.recv(self.buffer_size)
for i in cmd_rec[:]: # transform bytes into int
cmd_dat.append(int(i))
if (int(cmd_dat[0]) == int('0x55', 16)) and (int(cmd_dat[1]) == int('0', 16)):
int_dat = cmd_dat[2:]
total_len = len(int_dat)
#print('Tcp receive num:', total_len)
while total_len < (16384 * 2 + 2048 * 2):
#if total_len > 36000:
# break
tcp_dat = self.soc.recv(self.buffer_size) # receive data again
for i in tcp_dat[:]: # transform bytes into int
int_dat.append(int(i))
total_len = len(int_dat)
#print('Tcp receive num:', total_len)
#if total_len < (16384 * 2 + 2048 * 2):
# print('TCP data lost! Receive Num:', total_len)
# self.soc.close()
# self.connect_fun()
# continue
self.array_buf[0][:] = np.array(int_dat[0:16384]) # 曝光前 Pod 数据
self.array_buf[1][:] = np.array(int_dat[16384:32768])
array_out_temp1 = np.array(int_dat[32768: (32768 + 2048)])
array_out_temp2 = np.array(int_dat[(32768 + 2048) : (32768 + 4096)]) # 曝光后 Pod 数据
# change the 8bit array_out_buf data into 64 bit
array_shape_temp1 = array_out_temp1.reshape((32, 64), order = 'F')
array_shape_temp2 = array_out_temp2.reshape((32, 64), order = 'F')
self.array_out_shape[0] = array_shape_temp1 # FPGA输出的 OUTR OUTL OUTU OUTD 数据
# self.array_out_shape[1] = array_shape_temp2 # Chip输出的 OUTR OUTL OUTU OUTD 数据
self.array_pod_out = array_shape_temp2 # Chip输出的 OUT 对应的Pod数据
self.array_buf[2] = self.array_buf[0] - self.array_buf[1]
self.img_buf[0] = self.tcp_data2mat(self.array_buf[2]) # reform bytes data into picture structure
img = np.mat(self.img_buf[0].astype(np.uint8)) # transform img_data into uint8 matrix
x, y = img.shape[0:2]
img_test1 = cv2.resize(img, (int(y * 6), int(x * 6))) # picture reshape (scaling)
#print('Open-CV show picture')
cv2.imshow('frame', img_test1)
if cv2.waitKey(1) & 0xFF == ord('c'):
#self.buf_img = img
#self.buf_tcp_dat = before_array
return 0
else:
print('Frame lost! ERROR_code:' + str(cmd_dat[:2]))
continue
self.soc.close()
frame_rate = 1/(time.perf_counter() - t1_init)
print('Frame Rate:%5.3f' % frame_rate, '/s')
def tcp_data2mat(self, int_dat):
#temp = np.array(int_dat)
#self.img_buf_index = (self.img_buf_index + 1) % self.img_buf_size
t1 = int_dat.reshape(-1, 16)
t2 = int_dat.reshape(-1, 16).T
t3 = t2[0][:].reshape(64,4,2,2)
pic = np.array([[0]*128]*128) # generate a 128x128 zero array
for i in range(16):
for j in range(64):
for k in range(4):
pic[2*j ][8*i+2*k ] = t2[i][0+4*k+16*j]
pic[2*j ][8*i+2*k+1] = t2[i][1+4*k+16*j]
pic[2*j+1][8*i+2*k+1] = t2[i][2+4*k+16*j]
pic[2*j+1][8*i+2*k ] = t2[i][3+4*k+16*j]
return pic
def image_save_stop(self):
# stop transfer
self.connect_fun(0)
end=self.soc.send(self.stop_command)
image_dat = self.soc.recv(10)
# create folder
folder_name = "./Pic_data/" + time.strftime("%Y%m%d%H%M%S")
dir_exist = os.path.isdir(folder_name)
if not dir_exist:
os.makedirs(folder_name)
#time_info = time.strftime("%Y%m%d%H%M%S")
# save data
save = pd.DataFrame(self.img_buf[0])
save.to_csv(folder_name + '/img_data.csv')
save = pd.DataFrame(self.array_buf[0])
save.to_csv(folder_name + '/before_exposure.csv')
save = pd.DataFrame(self.array_buf[1])
save.to_csv(folder_name + '/after_exposure.csv')
save = pd.DataFrame(self.array_buf[2])
save.to_csv(folder_name + '/sub_data.csv')
#save = pd.DataFrame(self.array_out_buf[0])
#save.to_csv(folder_name + '/out_chip_data.csv')
save = pd.DataFrame(self.array_out_shape[0])
save.to_csv(folder_name + '/out_fpga_data.csv')
#save = pd.DataFrame(self.array_out_shape[1])
#save.to_csv(folder_name + '/out_chip_data.csv')
save = pd.DataFrame(self.array_pod_out)
save.to_csv(folder_name + '/out_pod_data.csv')
#save = pd.DataFrame(self.array_out_buf[1])
#save.to_csv(folder_name + '/out_fpga_data.csv')
if(image_dat == self.stop_reply):
self.display_info.insert(0,'Stop and Save successfully!')
def image_show(self):
# Image show
cap = cv2.VideoCapture(0)
cap.open(0)
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def main():
# 初始化对象
FL = ImageGUI()
# 进行布局
FL.gui_arrang()
# 主程序执行
tkinter.mainloop()
pass
if __name__ == "__main__":
main()
########### UDP client Transfer #########
#from socket import *
#HOST = '192.168.1.10'
#PORT = 8080
#BUFSIZ = 1024
#ADDRESS = (HOST, PORT)
#udpClientSocket = socket(AF_INET, SOCK_DGRAM)
#
#while True:
# data = bytes([int('0xFE', 16), 0,2,0,1])
# if not data:
# break
#
# # 发送数据
# udpClientSocket.sendto(data, ADDRESS)
# # 接收数据
# data, ADDR = udpClientSocket.recvfrom(BUFSIZ)
# if not data:
# break
# print("服务器端响应:", data)
#
#udpClientSocket.close()
######## TCP Client Transfer #########
#client_sock = socket.socket()
#client_sock.connect(('192.168.1.10', 7))
## 发送个连接信息
#stop_command = bytes([int('0x55', 16), 3]) # udp must send bytes object
#client_sock.send(stop_command)
#while 1:
# recv_dat = client_sock.recv(1024)
# print(recv_dat)
# # 有关输入的
# aa = input("echo >>:")
# if aa == 'exit':
# break
# while not aa:
# aa = input("echo >>:")
# # 重点就是上下两句
# client_sock.send(aa.encode('utf-8'))
#client_sock.close()
|
yg99992/Image_transfer_open_source
|
python_code/Image_show.py
|
Image_show.py
|
py
| 10,237 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tkinter.Listbox",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.mat",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "tkinter.mainloop",
"line_number": 226,
"usage_type": "call"
}
] |
12746754821
|
import requests
from bs4 import BeautifulSoup as bs
import smtplib
URL = "https://www.amazon.in/9500-15-6-inch-i7-10750H-NVIDIA1650-Graphics/dp/B08BZPRWR5/ref=sr_1_4?dchild=1&keywords=Dell+XPS+15&qid=1602254565&sr=8-4"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36",
}
def check_price():
"""Check Price of Product"""
page = requests.get(URL, headers=headers)
soup = bs(page.content, 'html.parser')
title = soup.find(id="productTitle").get_text()
price = soup.find(id="priceblock_ourprice").get_text()
price.replace("₹", "")
price.replace(",", "")
price.replace(" ", "")
price.replace("\\xa;", "")
converted_price = float(price[0:5])
if (converted_price < 2000000.00):
send_mail()
print(converted_price)
def send_mail():
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("[email protected]", "Baahubali")
subject = "Price went down for DELL XPS 15"
body = ("Check it out: https://www.amazon.de/Dell-Generation-i7-10750H-N18P-G62-DDR4-2933MHz/dp/B088TWQ1V8/ref=sr_1_1?__mk_de_DE=%C3%85M%C3%85%C5%BD%C3%95%C3%91&crid=1QODNEAOK4F7R&dchild=1&keywords=dell+xps+15&qid=1602067797&quartzVehicle=93-295&replacementKeywords=dell+xps&sprefix=Dell+XPS+%2Caps%2C281&sr=8-1")
msg = f"Subject: {subject} \n\n {body}"
server.sendmail(
"[email protected]",
"[email protected]",
msg
)
print("Email sent")
server.quit()
check_price()
|
Programmer-X31/PythonProjects
|
Project Amazon Scraper/main.py
|
main.py
|
py
| 1,679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP",
"line_number": 30,
"usage_type": "call"
}
] |
6018276646
|
# https://pypi.org/project/emoji/
from PIL import Image, ImageDraw, ImageFont
import emoji
print(emoji.demojize('Python 👍'))
print(emoji.emojize("Python :thumbs_up:"))
# 创建一个空白的RGBA模式图像
img = Image.new('RGBA', (200, 200), color='white')
# 获取Emoji字符的Unicode字符串
emoji_unicode = emoji.emojize(':thumbs_up:')
# 获取绘制对象和字体
draw = ImageDraw.Draw(img)
font_path = r'H:\Snippets\Program-Learning\Python\modules\utils\SourceHanSansCN-Medium.otf'
emoji_font_path = r'H:\Snippets\Program-Learning\Python\modules\utils\SEGUIEMJ.TTF'
font = ImageFont.truetype(font_path, 24, encoding='unic')
emoji_font = ImageFont.truetype(emoji_font_path, 24)
# 创建图像和绘图对象
image = Image.new("RGB", (200, 200), (255, 255, 255))
draw = ImageDraw.Draw(image)
# 绘制文本
text = "Hello, 世界 👍"
x, y = 50, 50
for char in text:
# 如果是 emoji
if char.encode('unicode_escape').decode('utf-8').startswith('\\U'):
draw.text((x, y+8), char, font=emoji_font,
fill=None, embedded_color=True)
size = draw.textlength(char, font=emoji_font)
else:
draw.text((x, y), char, font=font, fill=(0, 0, 0))
size = draw.textlength(char, font=font)
x += size
# 显示图像
# image.show()
original_list = ['❤❤️']
new_list = ["".join([char for char in string if char.encode(
'unicode_escape').decode('utf-8') != '\\ufe0f']) for string in original_list]
print(new_list)
|
Yuelioi/Program-Learning
|
Python/modules/utils/_emoji.py
|
_emoji.py
|
py
| 1,542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "emoji.demojize",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "emoji.emojize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "emoji.emojize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 26,
"usage_type": "name"
}
] |
30502016476
|
from django.urls import path
from . import views
app_name = 'home'
urlpatterns = [
path('', views.index, name='index'),
path('login/', views.login_view, name='login'),
path('registration/', views.registration, name='registration'),
path('logout/', views.logout_view, name='logout'),
path('profile/', views.profile_view, name='profile'),
path('profile/edit/', views.edit_profile_view, name='profile_edit')
]
|
Arctik124/tekken_league
|
home/urls.py
|
urls.py
|
py
| 434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
}
] |
39485139620
|
#!/usr/bin/env python3
"""lc3_achi.py -- achivements module"""
import time
import dataset
from flask import session
lc3_achivements = [{'id': 0, 'hidden': False, 'title': 'Sleepless', 'desc': 'Submit a correct flag at night'},
{'id': 3, 'hidden': False, 'title': 'CTF Initiate', 'desc': 'Solve one problem'}
]
def check_and_set(dbfile, id):
db = dataset.connect('sqlite:///ctf.db')
achis = db.query('''select a.achi_id from achivements a
where a.user_id = :user_id''', user_id=session['user_id'])
achi = [a['achi_id'] for a in list(achis)]
if id in achi:
db.executable.close()
return False
else:
new_achi = dict(achi_id=id, user_id=session['user_id'])
db['achivements'].insert(new_achi)
db.executable.close()
return True
def chkachi(dbfile, action, **kw):
new_achi = False
return new_achi
|
Himanshukr000/CTF-DOCKERS
|
lc3ctf/examples/lc3achi/lc3achi.py
|
lc3achi.py
|
py
| 922 |
python
|
en
|
code
| 25 |
github-code
|
6
|
[
{
"api_name": "dataset.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 22,
"usage_type": "name"
}
] |
6018330446
|
from playwright.sync_api import sync_playwright
def test_props():
with sync_playwright() as p:
browser = p.chromium.launch(headless=False)
page = browser.new_page()
page.goto("https://image.baidu.com/")
# 上传文件
file_path = r"C:/Users/yl/Desktop/1.png"
page.locator("input[type=file]").set_input_files(file_path)
# # 填充
# page.fill("#username", "yuellili")
# # 点击
# page.click("#submit")
# # 获取 iframe 元素
# page.frame_locator("iframe")
# 获取classs属性
# page.get_attribute(selector=".video-title.tit", name="class")
# 设置下拉列表
page.select_option(".province", label="湖南省")
page.select_option(".city", value="长沙市")
def main():
# test_xpath()
# test_css()
# test_playwright_selector()
test_props()
if __name__ == "__main__":
main()
|
Yuelioi/Program-Learning
|
Python/modules/web/Playwright/元素操作.py
|
元素操作.py
|
py
| 987 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "playwright.sync_api.sync_playwright",
"line_number": 5,
"usage_type": "call"
}
] |
21833663632
|
from flask import Flask, render_template, request
from werkzeug.utils import redirect
from scrapper import weather_search
app = Flask("Weather Scrapper")
@app.route("/")
def home():
area = request.args.get('area')
if area:
weather_element = weather_search(area)
print(weather_element)
return render_template("index.html", searchingBy=area, today_temp=weather_element[0], high_temp=weather_element[3], low_temp=weather_element[2])
app.run ('0.0.0.0', port=5000)
|
Sunggjinn/weather-closet
|
app.py
|
app.py
|
py
| 509 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "scrapper.weather_search",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
}
] |
35940096218
|
import pygame
import random
from typing import Callable
from pygame import Vector2, Rect, Color, Surface
pygame.init()
class Signal:
def __init__(self):
self.handlers = []
def connect(self, handler: callable) -> None:
self.handlers.append(handler)
def disconnect(self, handler: callable) -> None:
self.handlers.remove(handler)
def disconnect_all(self) -> None:
self.handlers.clear()
def emit(self, *args, **kwargs) -> None:
for handler in self.handlers:
handler(*args, **kwargs)
class Buff:
def __init__(self):
self.name = "Buff"
self.desc = ""
def __str__(self) -> str:
return f"{self.name}({self.desc})"
def use(self, player: "Player") -> None:
pass
class HealthBuff(Buff):
def __init__(self, value: int):
super().__init__()
self.name = "勇往直前"
self.desc = f"恢复{value}点生命值."
self.value = value
def use(self, player: "Player") -> None:
player.health += self.value
class BulletDamage(Buff):
def __init__(self, value: int):
super().__init__()
self.name = "血脉觉醒"
self.desc = f"每颗子弹增加{value}点伤害."
self.value = value
def use(self, player: "Player") -> None:
player.gun.bullet_damage += self.value
class BulletSpeed(Buff):
def __init__(self, value: int):
super().__init__()
self.name = "速战速决"
self.desc = f"子弹速度增加{value}点."
self.value = value
def use(self, player: "Player") -> None:
player.gun.bullet_speed += self.value
class FireRateBuff(Buff):
def __init__(self, value: int):
super().__init__()
self.name = "唯快不破"
self.desc = f"枪的射速增加{value}点."
self.value = value
def use(self, player: "Player") -> None:
player.gun.firing_rate += self.value
class FireBulletCountBuff(Buff):
def __init__(self, value: int):
super().__init__()
self.name = "火力覆盖"
self.desc = f"发射子弹的颗数增加{value}颗."
self.value = value
def use(self, player: "Player") -> None:
player.gun.fire_bullet_count += self.value
class BulletKnockbackForce(Buff):
def __init__(self, value: int):
super().__init__()
self.name = "大力出奇迹"
self.desc = f"击退力度增加{value}点."
self.value = value
def use(self, player: "Player") -> None:
player.gun.bullet_knockback_force += self.value
Buffs = [
(HealthBuff, 5, 15),
(BulletDamage, 1, 30),
(BulletSpeed, 20, 100),
(FireRateBuff, 1, 2),
(FireBulletCountBuff, 1, 2),
(BulletKnockbackForce, 1, 5)
]
class Node2D:
def __init__(self, parent: "Node2D", pos: Vector2, size: Vector2, z_index: int = 0):
self.parent = parent
self.pos = pos
self.size = size
self.collision_rect = Rect(pos, size)
self.z_index = z_index
self.visible = True
self.can_paused = True
self.children = []
self.can_collide = False
self.has_collided_signal = Signal()
self.set_parent(parent)
def update(self, delta: float) -> None:
pass
def draw(self, surface: Surface) -> None:
if not self.visible: return
def set_parent(self, parent: "Node2D") -> None:
self.parent = parent
if parent is None:
return
if not self in parent.children:
parent.children.append(self)
def add_child(self, child: "Node2D") -> None:
self.children.append(child)
child.set_parent(self)
def remove_child(self, child: "Node2D") -> None:
if child not in self.children:
return
self.children.remove(child)
child.set_parent(None)
def remove_all_children(self) -> None:
for child in self.children[:]:
self.children.remove(child)
child.set_parent(None)
def remove(self) -> None:
if self.parent is None:
return
self.parent.remove_child(self)
self.parent = None
def get_all_children(self) -> list:
all_children = []
for child in self.children:
all_children.append(child)
all_children.extend(child.get_all_children())
return all_children
def get_root(self) -> "Root":
if isinstance(self, Root):
return self
if isinstance(self.parent, Root):
return self.parent
return self.parent.get_root()
def get_rect(self) -> Rect:
return Rect(self.pos, self.size)
def add_in_group(self, name: str) -> None:
root = self.get_root()
if root is None: return
group = root.groups.get(name)
if group is None:
group = []
root.groups[name] = group
group.append(self)
class Root(Node2D):
instance = None
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self):
super().__init__(None, Vector2(0, 0), Vector2(pygame.display.get_surface().get_size()))
self.groups = {}
self.delta = 0.0
self.clear_color = Color(0, 0, 0)
self.mouse_pos = Vector2(0, 0)
self.__pause_time = 0
self.__pause_duration = 0
self.__is_paused = False
def update(self, delta: float) -> None:
self.delta = delta
self.mouse_pos = pygame.mouse.get_pos()
def get_nodes_in_group(self, name: str) -> list:
group = self.groups.get(name)
if group is None:
return []
return group
def get_first_node_in_group(self, name: str) -> Node2D:
group = self.groups.get(name)
if group is None:
return None
return group[0]
def pause(self, value: bool) -> None:
self.__is_paused = value
if value:
self.__pause_time = pygame.time.get_ticks()
else:
self.__pause_duration += pygame.time.get_ticks() - self.__pause_time
def is_paused(self) -> bool:
return self.__is_paused
def get_ticks(self, offset: int = 0) -> int:
return pygame.time.get_ticks() - self.__pause_duration + offset
class Sprite2D(Node2D):
def __init__(self, parent: Node2D, pos: Vector2, image: Surface):
super().__init__(parent, pos, Vector2(image.get_size()))
self.image = image
self.size = Vector2(image.get_size())
def draw(self, surface: Surface) -> None:
super().draw(surface)
surface.blit(self.image, self.pos)
class HealthBar(Sprite2D):
def __init__(self, parent: Node2D, max_health: int, pos: Vector2, size: Vector2, border: int = 1):
super().__init__(parent, pos, Surface(size))
self.max_health = max_health
self.health = max_health
self.border = border
self.border_color = Color(255, 255, 255)
self.value_color = Color(255, 0, 0)
self.z_index = 5
self.image.set_colorkey((0, 0, 0))
def draw(self, surface: Surface) -> None:
self.image.fill((0, 0, 0))
pygame.draw.rect(self.image, self.border_color, (0, 0, self.size.x, self.size.y), self.border)
pygame.draw.rect(self.image, self.value_color, (self.border, self.border, (self.size.x - self.border * 2) * self.health * 1.0 / self.max_health, self.size.y - self.border * 2))
super().draw(surface)
class Bullet(Sprite2D):
def __init__(self, parent: Node2D, pos: Vector2, direction: Vector2):
super().__init__(parent, pos, Surface((10, 10)))
self.speed = 800
self.damage = 5
self.knockback_force = 5
self.can_penetrate = False
self.direction = direction
self.z_index = 2
self.image.set_colorkey((0, 0, 0))
self.can_collide = True
self.pos -= self.size / 2
def update(self, delta: float) -> None:
self.pos += self.speed * self.direction * delta
self.collision_rect.topleft = self.pos
rect = pygame.display.get_surface().get_rect()
if self.pos.x < 0 or self.pos.x > rect.width or self.pos.y < 0 or self.pos.y > rect.height:
self.remove()
def draw(self, surface: Surface) -> None:
pygame.draw.circle(self.image, (0, 255, 0), self.size / 2, self.size.x / 2)
super().draw(surface)
class Gun(Node2D):
def __init__(self, parent: Node2D):
super().__init__(parent, Vector2(parent.get_rect().center), Vector2())
self.z_index = 3
self.__laste_fire_time = 0
self.firing_rate = 3
self.bullet_damage = 5
self.bullet_speed = 800
self.bullet_knockback_force = 5
self.bullet_can_penetrate = False
self.fire_bullet_count = 1
def _create_bullet(self, direction: Vector2) -> None:
bullet = Bullet(self, self.pos.copy(), direction)
bullet.can_penetrate = self.bullet_can_penetrate
bullet.damage = self.bullet_damage
bullet.speed = self.bullet_speed
bullet.knockback_force = self.bullet_knockback_force
def _create_multiple_bullets(self, count: int, base_direction: Vector2, rotate_angle: float) -> None:
for i in range(1, count + 1):
angle = base_direction
if self.fire_bullet_count % 2 == 0:
if i == 1:
angle = base_direction.rotate(rotate_angle / 2)
else:
angle = base_direction.rotate((i-1) * rotate_angle + rotate_angle / 2)
else:
angle = base_direction.rotate(i * rotate_angle)
self._create_bullet(angle)
def fire(self, direction: Vector2) -> None:
if self.get_root().get_ticks() - self.__laste_fire_time < 1000.0 / self.firing_rate:
return
half = self.fire_bullet_count // 2
self._create_multiple_bullets(half, direction, 5)
if not self.fire_bullet_count % 2 == 0:
self._create_bullet(direction)
self._create_multiple_bullets(half, direction, -5)
self.__laste_fire_time = self.get_root().get_ticks()
class Player(Sprite2D):
def __init__(self, parent: Node2D, pos: Vector2):
super().__init__(parent, pos, Surface(Vector2(60, 60)))
self.speed = 500
self.z_index = 1
self.can_collide = True
self.image.fill((255, 0, 0))
self.limit_rect = Rect(pygame.display.get_surface().get_rect())
self.died_signal = Signal()
self.max_health = 100
self.health = self.max_health
self.score = 0
self.kill_count = 0
self.add_in_group("player")
self.gun = Gun(self)
self.gun.bullet_damage = 50
self.gun.firing_rate = 5
self.gun.bullet_speed = 650
self.gun.bullet_knockback_force = 5
self.gun.fire_bullet_count = 1
# self.gun.bullet_can_penetrate = True
self._init_data = self._get_init_data()
def __str__(self) -> str:
return f"""Player(
bullet_damage: {self.gun.bullet_damage},
firing_rate: {self.gun.firing_rate},
bullet_speed: {self.gun.bullet_speed},
bullet_knockback_force: {self.gun.bullet_knockback_force},
fire_bullet_count: {self.gun.fire_bullet_count},
health: {self.health},
)"""
def update(self, delta: float) -> None:
keys = pygame.key.get_pressed()
direction = Vector2()
if keys[pygame.K_w]:
direction.y = -1
if keys[pygame.K_s]:
direction.y = 1
if keys[pygame.K_a]:
direction.x = -1
if keys[pygame.K_d]:
direction.x = 1
pos = self.pos
direction = direction.normalize() if direction.length() != 0 else direction
pos += direction * self.speed * delta
if pos.x < self.limit_rect.left:
pos.x = self.limit_rect.left
if pos.x > self.limit_rect.right - self.size.x:
pos.x = self.limit_rect.right - self.size.x
if pos.y < self.limit_rect.top:
pos.y = self.limit_rect.top
if pos.y > self.limit_rect.bottom - self.size.y:
pos.y = self.limit_rect.bottom - self.size.y
self.pos = pos
self.collision_rect.topleft = self.pos
self.gun.pos = Vector2(self.get_rect().center)
shoot_direction = Vector2(pygame.mouse.get_pos()) - self.get_rect().center
shoot_direction = shoot_direction.normalize() if shoot_direction.length()!= 0 else shoot_direction
# if pygame.mouse.get_pressed()[0]:
self.gun.fire(shoot_direction)
def set_health(self, health: int) -> None:
self.health = pygame.math.clamp(health, 0, self.max_health)
if self.health <= 0:
self.died_signal.emit()
def _get_init_data(self) -> dict:
return {
"pos": self.pos.copy(),
"speed": self.speed,
"health": self.health,
"score": self.score,
"kill_count": self.kill_count,
"max_health": self.max_health,
"bullet_can_penetrate": self.gun.bullet_can_penetrate,
"bullet_damage": self.gun.bullet_damage,
"firing_rate": self.gun.firing_rate,
"bullet_speed": self.gun.bullet_speed,
"bullet_knockback_force": self.gun.bullet_knockback_force,
"fire_bullet_count": self.gun.fire_bullet_count,
}
def restore_init_data(self) -> None:
self.pos = self._init_data["pos"]
self.speed = self._init_data["speed"]
self.health = self._init_data["health"]
self.score = self._init_data["score"]
self.kill_count = self._init_data["kill_count"]
self.max_health = self._init_data["max_health"]
self.gun.bullet_can_penetrate = self._init_data["bullet_can_penetrate"]
self.gun.bullet_damage = self._init_data["bullet_damage"]
self.gun.firing_rate = self._init_data["firing_rate"]
self.gun.bullet_speed = self._init_data["bullet_speed"]
self.gun.bullet_knockback_force = self._init_data["bullet_knockback_force"]
self.gun.fire_bullet_count = self._init_data["fire_bullet_count"]
class Cursor(Sprite2D):
def __init__(self, parent: Node2D):
super().__init__(parent, Vector2(0, 0), Surface((12, 12)))
self.z_index = 9999
self.thickness = 2
self.color = Color((0, 255, 0))
self.image.set_colorkey((0, 0, 0))
self.can_paused = False
pygame.mouse.set_visible(False)
def update(self, delta: float) -> None:
self.pos = pygame.mouse.get_pos()
def draw(self, surface: Surface) -> None:
pygame.draw.line(self.image, self.color, Vector2(self.size.x / 2 - self.thickness / 2, 0), Vector2(self.size.x / 2 - self.thickness / 2, self.size.y), self.thickness)
pygame.draw.line(self.image, self.color, Vector2(0, self.size.y / 2 - self.thickness / 2), Vector2(self.size.x, self.size.y / 2 - self.thickness / 2), self.thickness)
super().draw(surface)
class Enemy(Sprite2D):
init_data = {}
def __init__(self, parent: Node2D, pos: Vector2):
super().__init__(parent, pos, Surface((30, 30)))
self.speed = 80
self.z_index = 0
self.image.fill((255, 255, 255))
self.can_collide = True
self.player = self.get_root().get_first_node_in_group("player")
self.max_health = 500
self.health = self.max_health
Enemy.init_data = self._get_init_data()
self.health_bar = HealthBar(self, self.max_health, Vector2(self.pos.x, self.pos.y - 15), Vector2(self.size.x, 8), 2)
self.has_collided_signal.connect(self._on_has_collided_signal)
def update(self, delta: float) -> None:
self.collision_rect.topleft = self.pos
self.health_bar.pos = Vector2(self.pos.x, self.pos.y - 15)
direction = Vector2(self.player.get_rect().center) - Vector2(self.get_rect().center)
if direction.length()!= 0:
direction = direction.normalize()
self.pos += direction * self.speed * delta
def draw(self, surface: Surface) -> None:
pygame.draw.circle(self.image, (255, 0, 0), self.size / 2, 7.5)
super().draw(surface)
def _get_init_data(self) -> dict:
return {
"speed": self.speed,
"health": self.health,
"max_health": self.max_health,
}
def _on_has_collided_signal(self, node: Node2D) -> None:
if isinstance(node, Bullet):
self.health -= node.damage
self.health_bar.health = self.health
self.pos += node.direction * node.knockback_force
if self.health <= 0:
self.player.score += 5
self.player.kill_count += 1
self.remove()
if not node.can_penetrate:
node.remove()
if isinstance(node, Player):
self.player.score -= 10
self.player.set_health(self.player.health - 10)
self.player.kill_count += 1
self.remove()
class Lable(Node2D):
def __init__(self, parent: Node2D, pos: Vector2, text: str = ""):
super().__init__(parent, pos, Vector2())
self.font = pygame.font.SysFont("SimHei", 30)
self.font_color = Color(255, 255, 255)
self.text_surfaces = []
self.__text = text
self.set_text(text)
def update(self, delta: float) -> None:
self.text_surfaces.clear()
lines = self.__text.split("\n")
line_height = self.font.get_linesize()
y = self.pos.y
max_width = 0
for line in lines:
text_surface = self.font.render(line, True, self.font_color)
self.text_surfaces.append((text_surface, Vector2(self.pos.x, y)))
y += line_height
if text_surface.get_width() > max_width:
max_width = text_surface.get_width()
self.size = Vector2(max_width, len(lines) * line_height)
def draw(self, surface: Surface) -> None:
for text_surface, pos in self.text_surfaces:
surface.blit(text_surface, pos)
def set_text(self, text: str) -> None:
self.__text = text
# self.text_surfaces.clear()
# lines = self.__text.split("\n")
# line_height = self.font.get_linesize()
# y = self.pos.y
# max_width = 0
# for line in lines:
# text_surface = self.font.render(line, True, self.font_color)
# self.text_surfaces.append((text_surface, Vector2(self.pos.x, y)))
# y += line_height
# if text_surface.get_width() > max_width:
# max_width = text_surface.get_width()
# self.size = Vector2(max_width, len(lines) * line_height)
def get_text(self) -> str:
return self.__text
class Button(Node2D):
def __init__(self, parent: Node2D, pos: Vector2, text: str):
super().__init__(parent, pos, Vector2())
self.padding = Vector2(10)
self.is_pressed = False
self.text_lbl = Lable(self, pos + self.padding, text)
self.bg_color = Color(0, 0, 0)
self.border_color = Color(255, 255, 255)
self.border_width = 3
self.hot_keys = []
self.hot_key_pressed = False
self.set_text(text)
self.pressed_singal = Signal()
def update(self, delta: float) -> None:
self.size = Vector2(self.text_lbl.size) + self.padding * 2
self.text_lbl.pos = self.pos + self.padding
self.text_lbl.z_index = self.z_index
self.text_lbl.can_paused = self.can_paused
self.text_lbl.visible = self.visible
if self.visible:
if pygame.mouse.get_pressed()[0] and self.get_rect().collidepoint(pygame.mouse.get_pos()) and not self.is_pressed:
self.pressed_singal.emit()
self.is_pressed = True
if not pygame.mouse.get_pressed()[0]:
self.is_pressed = False
keys = pygame.key.get_pressed()
for key in self.hot_keys:
if keys[key]:
if self.hot_key_pressed:
break
self.pressed_singal.emit()
self.hot_key_pressed = True
break
else:
self.hot_key_pressed = False
def draw(self, surface: Surface) -> None:
pygame.draw.rect(surface, self.bg_color, Rect(self.pos.x, self.pos.y, self.size.x, self.size.y))
pygame.draw.rect(surface, self.border_color, Rect(self.pos.x, self.pos.y, self.size.x, self.size.y), self.border_width)
def set_text(self, text: str) -> None:
self.text_lbl.set_text(text)
self.size = Vector2(self.text_lbl.size) + self.padding * 2
def get_text(self) -> str:
return self.text_lbl.get_text()
class BuffPanel(Sprite2D):
def __init__(self, parent: Node2D):
super().__init__(parent, Vector2(0, 0), Surface(pygame.display.get_surface().get_size(), pygame.SRCALPHA))
self.image.fill(Color(0, 0, 0, 100))
self.buff_btns = []
self.buff_btn1 = Button(self, Vector2(10, 10), "buff1")
self.buff_btn2 = Button(self, Vector2(120, 10), "buff2")
self.buff_btn3 = Button(self, Vector2(230, 10), "buff3")
self.buff_btns.append(self.buff_btn1)
self.buff_btns.append(self.buff_btn2)
self.buff_btns.append(self.buff_btn3)
self.player = self.get_root().get_first_node_in_group("player")
self.visible = False
self.can_paused = False
for btn in self.buff_btns:
btn.can_paused = self.can_paused
def update(self, delta: float) -> None:
width = 0
max_height = 0
for btn in self.buff_btns:
btn.z_index = self.z_index
width += btn.size.x
if btn.size.y > max_height:
max_height = btn.size.y
pos = Vector2((self.size.x - width - 2 * 20) / 2, (self.size.y - max_height) / 2)
for btn in self.buff_btns:
btn.pos = pos.copy()
pos.x += btn.size.x + 20
btn.visible = self.visible
def draw(self, surface: Surface) -> None:
super().draw(surface)
def _on_buff_btn_pressed(self, buff: Buff) -> None:
buff.use(self.player)
self.visible = False
self.get_root().pause(not self.get_root().is_paused())
def _bind_buff(self, btn: Button) -> None:
b = random.choice(Buffs)
buff = b[0](random.randint(b[1], b[2]))
btn.set_text(f"{buff.name}\n{buff.desc}")
btn.pressed_singal.disconnect_all()
btn.pressed_singal.connect(lambda: self._on_buff_btn_pressed(buff))
def display(self) -> None:
for btn in self.buff_btns:
self._bind_buff(btn)
self.visible = True
class GameOverPanel(Sprite2D):
def __init__(self, parent: Node2D):
super().__init__(parent, Vector2(0, 0), Surface(pygame.display.get_surface().get_size(), pygame.SRCALPHA))
self.image.fill(Color(0, 0, 0, 100))
self.z_index = 99
self.can_paused = False
self.visible = False
self.lbl = Lable(self, Vector2(), "游戏结束")
self.lbl.font_color = Color(255, 0, 0)
self.lbl.font = pygame.font.SysFont("SimHei", 50)
self.lbl.can_paused = False
self.restart_bnt = Button(self, Vector2(10, 10), "重新开始")
self.restart_bnt.can_paused = False
def update(self, delta: float) -> None:
self.lbl.visible = self.visible
self.restart_bnt.visible = self.visible
self.lbl.z_index = self.z_index
self.restart_bnt.z_index = self.z_index
self.lbl.pos = (self.size - self.lbl.size) / 2
self.lbl.pos.y -= 100
self.restart_bnt.pos = (self.size - self.restart_bnt.size) / 2
self.restart_bnt.pos.y += self.lbl.size.y
class TopUI(Node2D):
def __init__(self, parent: Node2D):
super().__init__(parent, Vector2(0, 0), Vector2(pygame.display.get_surface().get_size()))
self.z_index = 99
self.add_in_group("top_ui")
self.player = self.get_root().get_first_node_in_group("player")
self.player_health_bar = HealthBar(self, self.player.max_health, Vector2(), Vector2(400, 20), 3)
self.player_health_bar.z_index = self.z_index
self.player_health_bar.pos.x = (self.size.x - self.player_health_bar.size.x) / 2
self.player_health_bar.pos.y = self.size.y - self.player_health_bar.size.y - 10
self.player_health_lbl = Lable(self, Vector2(10, 10))
self.player_health_lbl.z_index = self.z_index
self.player_health_lbl.pos.y = self.player_health_bar.pos.y - self.player_health_lbl.size.y - 5
self.score_lbl = Lable(self, Vector2(10, 10))
self.score_lbl.z_index = self.z_index
self.timer_lbl = Lable(self, Vector2(10, 10))
self.timer_lbl.z_index = self.z_index
self.kill_count_lbl = Lable(self, Vector2(10, 10))
self.kill_count_lbl.z_index = self.z_index
self.buff_panel = BuffPanel(self)
self.buff_panel.z_index = self.z_index + 1
self.buff_panel.visible = False
pause_btn = Button(self, Vector2(10, 10), "暂停")
pause_btn.visible = False
pause_btn.can_paused = False
pause_btn.hot_keys.append(pygame.K_ESCAPE)
def _on_pause_btn_pressed():
if self.over_panel.visible: return
if self.buff_panel.visible: return
self.get_root().pause(not self.get_root().is_paused())
pause_btn.pressed_singal.connect(_on_pause_btn_pressed)
self.over_panel = GameOverPanel(self)
self.over_panel.z_index = self.z_index + 1
def update(self, delta: float) -> None:
self.player_health_bar.health = self.player.health
lbl_text = f"{self.player.health}/{self.player.max_health}"
self.player_health_lbl.set_text(lbl_text)
self.player_health_lbl.pos.x = (self.size.x - self.player_health_lbl.size.x) / 2
self.score_lbl.set_text(f"分数: {self.player.score}")
self.timer_lbl.pos.x = (self.size.x - self.timer_lbl.size.x) / 2
self.kill_count_lbl.pos.x = self.size.x - self.kill_count_lbl.size.x - 10
self.kill_count_lbl.set_text(f"击杀: {self.player.kill_count}")
def _convert_time(self, time: int) -> str:
minutes = time // 60
seconds = time % 60
return f"{minutes:02}:{seconds:02}"
def update_timer_lbl(self, offset: int) -> None:
self.timer_lbl.set_text(f"{self._convert_time(int(self.get_root().get_ticks(offset) / 1000))}")
class MainScene(Node2D):
def __init__(self, root: Root):
super().__init__(root, Vector2(0, 0), Vector2(pygame.display.get_surface().get_size()))
self.z_index = 0
self.max_enemy_count = 10
self.create_enemies_range = 100
self.update_buff_time = 10
self.enemy_health = 500
self.enemy_speed = 80
Cursor(self)
self.player = Player(self, self.size / 2)
self.enemies = Node2D(self, Vector2(0, 0), Vector2(0, 0))
self.top_ui = TopUI(self)
self.player.died_signal.connect(self.game_over)
self.top_ui.over_panel.restart_bnt.pressed_singal.connect(self._on_game_over_btn_pressed)
self.start_time = self.get_root().get_ticks()
self.over_time = 0
def update(self, delta: float) -> None:
self.top_ui.update_timer_lbl(-self.over_time)
if len(self.enemies.children) < self.max_enemy_count:
pos = Vector2()
flag = random.randrange(0, 4)
if flag == 0:
pos = Vector2(random.randint(0, self.size.x), random.randint(-self.create_enemies_range, 0))
elif flag == 1:
pos = Vector2(random.randint(0, self.size.x), random.randint(self.size.y, self.size.y + self.create_enemies_range))
elif flag == 2:
pos = Vector2(random.randint(-self.create_enemies_range, 0), random.randint(0, self.size.y))
else:
pos = Vector2(random.randint(self.size.x, self.size.x + self.create_enemies_range), random.randint(0, self.size.y))
enemy = Enemy(self.enemies, pos)
enemy.max_health = self.enemy_health
enemy.health = self.enemy_health
enemy.speed = self.enemy_speed
for enemy in self.enemies.children[:]:
if enemy.health <= 0:
self.enemies.remove_child(enemy)
if self.get_root().get_ticks() - self.start_time >= self.update_buff_time * 1000:
if int((self.get_root().get_ticks() - self.start_time) / 1000) % self.update_buff_time == 0:
self.get_root().pause(True)
self.top_ui.buff_panel.display()
self.enemy_health += 100
self.enemy_speed += 5
self.start_time = self.get_root().get_ticks()
def game_over(self) -> None:
self.get_root().pause(True)
self.top_ui.over_panel.visible = True
self.over_time = self.get_root().get_ticks()
def _on_game_over_btn_pressed(self) -> None:
self.get_root().pause(False)
self.player.restore_init_data()
self.enemy_health = Enemy.init_data["health"]
self.enemy_speed = Enemy.init_data["speed"]
self.enemies.remove_all_children()
self.player.gun.remove_all_children()
self.top_ui.over_panel.visible = False
self.start_time = self.get_root().get_ticks()
class Game:
def __init__(self):
self.screen = pygame.display.set_mode((1280, 720))
self.clock = pygame.time.Clock()
self.running = True
self.root = Root()
self.root.clear_color = Color(47, 47, 47)
MainScene(self.root)
def run(self) -> None:
while self.running:
self.clock.tick(120)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
self.screen.fill(self.root.clear_color)
delta = self.clock.get_time() / 1000
self.root.update(delta)
for node in sorted(self.root.get_all_children(), key=lambda node: node.z_index):
if self.root.is_paused() and node.can_paused:
if node.visible:
node.draw(self.screen)
continue
node.update(delta)
if node.visible:
node.draw(self.screen)
if isinstance(node, Bullet):
for other_node in self.root.get_all_children():
if node.parent == other_node: continue
if not isinstance(other_node, Enemy): continue
if node.collision_rect.colliderect(other_node.collision_rect):
other_node.has_collided_signal.emit(node)
if not node.can_penetrate:
break
if isinstance(node, Player):
for other_node in self.root.get_all_children():
if not isinstance(other_node, Enemy): continue
if node.collision_rect.colliderect(other_node.collision_rect):
other_node.has_collided_signal.emit(node)
pygame.display.flip()
pygame.quit()
Game().run()
|
cliegc/simple-roguelike-game
|
main.py
|
main.py
|
py
| 32,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "pygame.Rect",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "pygame.Rect",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "pygame.Surface",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "pygame.Surface",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "pygame.draw.rect",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 303,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "pygame.Surface",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "pygame.draw.circle",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "pygame.Surface",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "pygame.K_w",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_s",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_a",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_d",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.clamp",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "pygame.math",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.set_visible",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 508,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 512,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "pygame.draw.line",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 515,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "pygame.draw.line",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 516,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 524,
"usage_type": "name"
},
{
"api_name": "pygame.Surface",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 552,
"usage_type": "name"
},
{
"api_name": "pygame.draw.circle",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 553,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 585,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 587,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 587,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 588,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 610,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 638,
"usage_type": "name"
},
{
"api_name": "pygame.Vector2",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 659,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 659,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 662,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 662,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 665,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 665,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 677,
"usage_type": "name"
},
{
"api_name": "pygame.draw.rect",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 678,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 679,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 695,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 695,
"usage_type": "attribute"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 695,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 730,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 740,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 756,
"usage_type": "attribute"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 756,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 762,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 763,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 764,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 764,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 767,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 789,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 789,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 789,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 800,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 807,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 810,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 817,
"usage_type": "call"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 820,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 860,
"usage_type": "call"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 860,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 860,
"usage_type": "attribute"
},
{
"api_name": "pygame.Vector2",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 883,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 884,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 886,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 886,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 888,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 888,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 890,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 890,
"usage_type": "call"
},
{
"api_name": "pygame.Vector2",
"line_number": 892,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 892,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 932,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 932,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 933,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 933,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 936,
"usage_type": "call"
},
{
"api_name": "pygame.event.get",
"line_number": 945,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 945,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 946,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 979,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 979,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 981,
"usage_type": "call"
}
] |
2794147606
|
#ENTRENAMIENTO DE RED CONVOLUCIONAL 2D - CLASIFICACION HSI
#Se utiliza PCA para reduccion dimensional y estraccion de caracteristicas espectrales. A la red convolucional se introduce
#una ventana sxs de la imagen original para la generacion de caracteristicas espaciales a partir de la convolucion.
#Se utiliza como capa de salida un clasificador tipo Multinomial logistic regression. Todas las capas utilizan entrenamiento supervisado.
import warnings
warnings.filterwarnings('ignore')
from package.cargarHsi import CargarHsi
from package.prepararDatos import PrepararDatos
from package.PCA import princiapalComponentAnalysis
from package.MorphologicalProfiles import morphologicalProfiles
from package.dataLogger import DataLogger
from keras import layers
from keras import models
from keras import regularizers
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
import os
#CARGAR IMAGEN HSI Y GROUND TRUTH
numTest = 10
dataSet = 'Urban'
test = 'pcaCNN2D' # pcaCNN2D eapCNN2D
fe_eap = False # false for PCA, true for EAP
ventana = 9 #VENTANA 2D de PROCESAMIENTO
data = CargarHsi(dataSet)
imagen = data.imagen
groundTruth = data.groundTruth
#CREAR FICHERO DATA LOGGER
logger = DataLogger(fileName = dataSet, folder = test, save = True)
#ANALISIS DE COMPONENTES PRINCIPALES
pca = princiapalComponentAnalysis()
#imagenFE = pca.pca_calculate(imagen, varianza=0.95)
imagenFE = pca.pca_calculate(imagen, componentes=18)
print(imagenFE.shape)
#ESTIMACIÓN DE EXTENDED ATTRIBUTE PROFILES
if fe_eap:
mp = morphologicalProfiles()
imagenFE = mp.EAP(imagenFE, num_thresholds=6) #####################
print(imagenFE.shape)
OA = 0
vectOA = np.zeros(numTest)
for i in range(0, numTest):
#PREPARAR DATOS PARA ENTRENAMIENTO
preparar = PrepararDatos(imagenFE, groundTruth, False)
datosEntrenamiento, etiquetasEntrenamiento, datosValidacion, etiquetasValidacion = preparar.extraerDatos2D(50,30,ventana)
datosPrueba, etiquetasPrueba = preparar.extraerDatosPrueba2D(ventana)
#DEFINICION RED CONVOLUCIONAL
model = models.Sequential()
model.add(layers.Conv2D(48, (5, 5), kernel_regularizer=regularizers.l2(0.001),activation='relu', input_shape=(datosEntrenamiento.shape[1],datosEntrenamiento.shape[2],datosEntrenamiento.shape[3])))
#model.add(layers.MaxPooling2D((2,2), data_format='channels_last', strides=(1,1), padding='same'))
model.add(layers.Conv2D(96, (3, 3), kernel_regularizer=regularizers.l2(0.001),activation='relu'))
#model.add(layers.MaxPooling2D((2,2), data_format='channels_last', strides=(1,1), padding='same'))
model.add(layers.Conv2D(96, (3, 3), kernel_regularizer=regularizers.l2(0.001),activation='relu'))
#model.add(layers.MaxPooling2D((2,2), data_format='channels_last', strides=(1,1), padding='same'))
#CAPA FULLY CONNECTED
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1024, kernel_regularizer=regularizers.l2(0.001), activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1024, kernel_regularizer=regularizers.l2(0.001), activation='relu'))
#AÑADE UN CLASIFICADOR MLR EN EL TOPE DE LA CONVNET
model.add(layers.Dense(groundTruth.max()+1, activation='softmax'))
print(model.summary())
#ENTRENAMIENTO DE LA RED CONVOLUCIONAL
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(datosEntrenamiento,etiquetasEntrenamiento,epochs=35,batch_size=512,validation_data=(datosValidacion, etiquetasValidacion))
#EVALUAR MODELO
test_loss, test_acc = model.evaluate(datosPrueba, etiquetasPrueba)
vectOA[i] = test_acc
OA = OA+test_acc
#LOGGER DATOS DE ENTRENAMIENTO
logger.savedataTrain(history)
#GUARDAR MODELO DE RED CONVOLUCIONAL
model.save(os.path.join(logger.path,test+str(i)+'.h5'))
#GENERAR MAPA FINAL DE CLASIFICACIÓN
print('dataOA = '+ str(vectOA))
print('OA = '+ str(OA/numTest))
datosSalida = model.predict(datosPrueba)
datosSalida = preparar.predictionToImage(datosSalida)
#GRAFICAS
data.graficarHsi_VS(groundTruth, datosSalida)
data.graficar_history(history)
K.clear_session()
logger.close()
|
davidruizhidalgo/unsupervisedRemoteSensing
|
2_Redes Supervisadas/hsi_CNN2D.py
|
hsi_CNN2D.py
|
py
| 4,213 |
python
|
es
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "package.cargarHsi.CargarHsi",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "package.dataLogger.DataLogger",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "package.PCA.princiapalComponentAnalysis",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "package.MorphologicalProfiles.morphologicalProfiles",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "package.prepararDatos.PrepararDatos",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "keras.layers.Dense",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "keras.layers",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "keras.backend.clear_session",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 94,
"usage_type": "name"
}
] |
26053689790
|
from itertools import permutations
vowels = ["о", "а"]
consonants = ["в", "т", "р"]
result = set()
for index, i in enumerate(permutations("авторота")):
correct = True
for symbol_index in range(0, len(i) - 1):
if (i[symbol_index] in vowels and i[symbol_index + 1] in vowels) or \
(i[symbol_index] in consonants and i[symbol_index + 1] in consonants):
correct = False
break
if correct:
result.add(i)
print(len(result))
|
Woolfer0097/UGE_IT
|
8 task/236.py
|
236.py
|
py
| 501 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.permutations",
"line_number": 7,
"usage_type": "call"
}
] |
30297740447
|
# Seq2Seq model with attention
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from copy import copy
def init_weights(m):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_an_epoch(model, iterator, optimizer, criterion, clip, device, scheduler, tf_ratio=1.):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch[0].float().to(device)
trg = copy(batch[0]).float().to(device)
optimizer.zero_grad()
output = model(src, trg, tf_ratio).permute(1, 0, 2)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
scheduler.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion, device):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch[0].float().to(device)
trg = copy(batch[0]).float().to(device)
output = model(src, trg, 0).permute(1, 0, 2) #turn off teacher forcing
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
class Encoder(nn.Module):
def __init__(self, num_layers=2, input_dim=10, emb_dim=64, enc_hid_dim=128, dec_hid_dim=128, dropout=0.5):
super().__init__()
self.embedding = nn.Linear(input_dim, emb_dim)
self.norm = nn.LayerNorm(emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True, batch_first = True, num_layers=num_layers)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
embedded = self.norm(embedded)
#embedded = [batch size, n_time_steps, emb dim]
outputs, hidden = self.rnn(embedded)
#outputs = [batch size, n_time_steps, hid dim * num directions]
#hidden = [n layers * num directions, batch size, hid dim]
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
#hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]
#outputs are always from the last layer
#hidden = [batch size, dec hid dim]
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.norm = nn.LayerNorm((enc_hid_dim * 2) + dec_hid_dim)
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Linear(dec_hid_dim, 1, bias = False)
def forward(self, hidden, encoder_outputs):
#hidden = [batch size, dec hid dim]
#encoder_outputs = [batch size, src len, enc hid dim * 2]
src_len = encoder_outputs.shape[1]
#repeat decoder hidden state src_len times
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
#hidden = [batch size, src len, dec hid dim]
#encoder_outputs = [batch size, src len, enc hid dim * 2]
energy = torch.tanh(self.attn(self.norm(torch.cat((hidden, encoder_outputs), dim = 2))))
#energy = [batch size, src len, dec hid dim]
attention = self.v(energy).squeeze(2)
#attention= [batch size, src len]
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, attention, num_layer=1, output_dim=10, emb_dim=64, enc_hid_dim=128, dec_hid_dim=128, dropout=0.5):
super().__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Linear(output_dim, emb_dim)
self.norm = nn.LayerNorm(emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim, num_layers=num_layer)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs, dec_hid=None):
#input = [batch size, n_features]
#hidden = [batch size, dec hid dim]
#encoder_outputs = [batch size, n_time_steps, enc hid dim * 2]
input = input.unsqueeze(1)
#input = [batch size, 1, n_features]
embedded = self.dropout(self.embedding(input)).permute(1, 0, 2)
embedded = self.norm(embedded)
#embedded = [1, batch_size, emb dim]
a = self.attention(hidden, encoder_outputs)
#a = [batch size, src len]
a = a.unsqueeze(1)
#a = [batch size, 1, src len]
weighted = torch.bmm(a, encoder_outputs)
#weighted = [batch size, 1, enc hid dim * 2]
weighted = weighted.permute(1, 0, 2)
#weighted = [1, batch size, enc hid dim * 2]
rnn_input = torch.cat((embedded, weighted), dim = 2)
#rnn_input = [1, batch size, (enc hid dim * 2) + emb dim]
if dec_hid == None:
output, dec_hid = self.rnn(rnn_input)
else:
output, dec_hid = self.rnn(rnn_input, dec_hid)
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim = 1))
#prediction = [batch size, output dim]
return prediction, dec_hid
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[0]
trg_len = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#encoder_outputs is all hidden states of the input sequence, back and forwards
#hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src)
#first input to the decoder (start token = [0, ..., 0])
input = torch.zeros_like(trg[:, 0, :]).to(self.device)
dec_hid = None
for t in range(0, trg_len-1):
#insert input token embedding, previous hidden state and all encoder hidden states
#receive output tensor (predictions) and new hidden state
output, dec_hid = self.decoder(input, hidden, encoder_outputs, dec_hid)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
hidden = dec_hid[-1, ...]
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[:, t, :] if teacher_force else output
return outputs
|
three0-s/KT-ETRI
|
model.py
|
model.py
|
py
| 7,755 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.init.normal_",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.nn.GRU",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.tanh",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.tanh",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "torch.nn.GRU",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "torch.bmm",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 182,
"usage_type": "call"
}
] |
31698964136
|
import pytorch_lightning as pl
import torch
from src.training_class import CFG, BertModule
if __name__ == "__main__":
torch.cuda.empty_cache()
model = BertModule()
trainer = pl.Trainer(
accelerator="gpu",
devices=1,
max_epochs=CFG.epochs,
precision=32,
gradient_clip_val=1e-1,
fast_dev_run=False,
profiler=None,
accumulate_grad_batches=4,
callbacks=None,
)
trainer.fit(model)
trainer.validate(model)
|
ArturYasnov/Quora-Insincere-Questions-using-BERT
|
train.py
|
train.py
|
py
| 500 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.cuda.empty_cache",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "src.training_class.BertModule",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytorch_lightning.Trainer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "src.training_class.CFG.epochs",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "src.training_class.CFG",
"line_number": 13,
"usage_type": "name"
}
] |
6907369998
|
from flask import Flask, request, redirect
from twilio.twiml.messaging_response import MessagingResponse
from firebase import firebase
# from flask_cors import CORS
from twilio.rest import Client
import pyrebase
config = {
"apiKey": "AIzaSyAEEO1frXfzyL6MCkRvgGz7qURfsTLajRc",
"authDomain" : "covid-19-fake-news-detector.firebaseapp.com",
"databaseURL" : "https://covid-19-fake-news-detector.firebaseio.com",
"projectId" : "covid-19-fake-news-detector",
"storageBucket" : "covid-19-fake-news-detector.appspot.com",
"messagingSenderId" : "401417810179",
"appId" : "1:401417810179:web:b5c7dac2f172bfdc11f936",
"measurementId" : "G-59YT063WPN"
}
fb = pyrebase.initialize_app(config)
db = fb.database()
app = Flask(__name__)
app.config.from_object(__name__)
firebase = firebase.FirebaseApplication("https://covid-19-fake-news-detector.firebaseio.com/", None)
@app.route("/status", methods=['POST'])
def sms_status(key):
update = firebase.get('/Incoming/'+key['name'],'status')
from_whatsapp_no = 'whatsapp:+14155238886'
to_whatsapp_no = 'whatsapp:+9189********'
account = "ACa0b9328e73aae3240844*******"
token = "cdd6da1ea1baf8050d20005d*******"
client = Client(account,token)
return str(client.messages.create(body= update, from_ =from_whatsapp_no, to = to_whatsapp_no))
@app.route("/sms", methods=['POST'])
def sms_reply():
# Fetch the message
usrid = request.form.get('From')
print(usrid)
msg = request.form.get('Body')
#json format for firebase
data = {
"userid": usrid,
"news": msg,
"status": "Wait, we are processing your request"
}
print("coming")
#Create db
key = firebase.post('/Incoming', data)
print(key['name'])
#read db
update = firebase.get('/Incoming/'+key['name'],'status')
print(update)
# Create reply
resp = MessagingResponse()
resp.message(update)
return str(resp)
# else:
# default = "Wait, we are processing your request"
# return (default)
if __name__ == "__main__":
app.run(debug=True)
|
mayankchauhan96/Fake-news-detector
|
app.py
|
app.py
|
py
| 2,112 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pyrebase.initialize_app",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "firebase.firebase",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "firebase.firebase.FirebaseApplication",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "firebase.firebase.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "firebase.firebase",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "twilio.rest.Client",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "firebase.firebase.post",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "firebase.firebase",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "firebase.firebase.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "firebase.firebase",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "twilio.twiml.messaging_response.MessagingResponse",
"line_number": 67,
"usage_type": "call"
}
] |
28509751362
|
# coding: utf-8
"""
Messente API
[Messente](https://messente.com) is a global provider of messaging and user verification services. * Send and receive SMS, Viber, WhatsApp and Telegram messages. * Manage contacts and groups. * Fetch detailed info about phone numbers. * Blacklist phone numbers to make sure you're not sending any unwanted messages. Messente builds [tools](https://messente.com/documentation) to help organizations connect their services to people anywhere in the world. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from messente_api.configuration import Configuration
class DeliveryReportResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'statuses': 'list[DeliveryResult]',
'to': 'str',
'omnimessage_id': 'str'
}
attribute_map = {
'statuses': 'statuses',
'to': 'to',
'omnimessage_id': 'omnimessage_id'
}
def __init__(self, statuses=None, to=None, omnimessage_id=None, local_vars_configuration=None): # noqa: E501
"""DeliveryReportResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._statuses = None
self._to = None
self._omnimessage_id = None
self.discriminator = None
self.statuses = statuses
self.to = to
self.omnimessage_id = omnimessage_id
@property
def statuses(self):
"""Gets the statuses of this DeliveryReportResponse. # noqa: E501
Contains the delivery reports for each channel, ordered by send order # noqa: E501
:return: The statuses of this DeliveryReportResponse. # noqa: E501
:rtype: list[DeliveryResult]
"""
return self._statuses
@statuses.setter
def statuses(self, statuses):
"""Sets the statuses of this DeliveryReportResponse.
Contains the delivery reports for each channel, ordered by send order # noqa: E501
:param statuses: The statuses of this DeliveryReportResponse. # noqa: E501
:type statuses: list[DeliveryResult]
"""
if self.local_vars_configuration.client_side_validation and statuses is None: # noqa: E501
raise ValueError("Invalid value for `statuses`, must not be `None`") # noqa: E501
self._statuses = statuses
@property
def to(self):
"""Gets the to of this DeliveryReportResponse. # noqa: E501
Phone number in e.164 format # noqa: E501
:return: The to of this DeliveryReportResponse. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this DeliveryReportResponse.
Phone number in e.164 format # noqa: E501
:param to: The to of this DeliveryReportResponse. # noqa: E501
:type to: str
"""
if self.local_vars_configuration.client_side_validation and to is None: # noqa: E501
raise ValueError("Invalid value for `to`, must not be `None`") # noqa: E501
self._to = to
@property
def omnimessage_id(self):
"""Gets the omnimessage_id of this DeliveryReportResponse. # noqa: E501
Unique identifier for the omnimessage # noqa: E501
:return: The omnimessage_id of this DeliveryReportResponse. # noqa: E501
:rtype: str
"""
return self._omnimessage_id
@omnimessage_id.setter
def omnimessage_id(self, omnimessage_id):
"""Sets the omnimessage_id of this DeliveryReportResponse.
Unique identifier for the omnimessage # noqa: E501
:param omnimessage_id: The omnimessage_id of this DeliveryReportResponse. # noqa: E501
:type omnimessage_id: str
"""
if self.local_vars_configuration.client_side_validation and omnimessage_id is None: # noqa: E501
raise ValueError("Invalid value for `omnimessage_id`, must not be `None`") # noqa: E501
self._omnimessage_id = omnimessage_id
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeliveryReportResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DeliveryReportResponse):
return True
return self.to_dict() != other.to_dict()
|
messente/messente-api-python
|
messente_api/models/delivery_report_response.py
|
delivery_report_response.py
|
py
| 6,393 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "messente_api.configuration.Configuration",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "inspect.getargspec",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 172,
"usage_type": "call"
}
] |
71889450427
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.views.generic import CreateView
from levangersundet.forms import DeltagerForm
from post_office import mail
class TestCreateView(CreateView):
form_class = DeltagerForm
template_name = 'test.html'
def get_success_url(self):
return '/nb/%s/' % self.testtype
def form_valid(self, form):
response = super(TestCreateView, self).form_valid(form)
mail.send(
[self.object.epost],
settings.SERVER_EMAIL,
template=self.testtype,
context={'deltager': self.object},
priority='now'
)
mail.send(
[settings.TEST_NOTIFY_EMAIL],
settings.SERVER_EMAIL,
template='%s_notify' % self.testtype,
context={'deltager': self.object},
priority='now'
)
return response
def dispatch(self, *args, **kwargs):
self.testtype = kwargs.get('testtype', False)
return super(TestCreateView, self).dispatch(*args, **kwargs)
|
fivethreeo/jsdev
|
mainapp/views.py
|
views.py
|
py
| 1,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.CreateView",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "levangersundet.forms.DeltagerForm",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "post_office.mail.send",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "post_office.mail",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SERVER_EMAIL",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "post_office.mail.send",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "post_office.mail",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.TEST_NOTIFY_EMAIL",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SERVER_EMAIL",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 26,
"usage_type": "name"
}
] |
14018247881
|
import cv2
import numpy as np
def motion_detector(videofile):
window_raw = "Raw video"
window_preprocessed = "Preprocessed video"
window_motion = "Video motion"
window_finished = "Thermal Video"
window_test1 = "Test1"
cv2.namedWindow(window_raw)
cv2.namedWindow(window_preprocessed)
cv2.namedWindow(window_motion)
cv2.namedWindow(window_finished)
cv2.namedWindow(window_test1)
cv2.moveWindow(window_raw, 0, 0)
cv2.moveWindow(window_preprocessed, 320, 0)
cv2.moveWindow(window_motion, 0, 265)
cv2.moveWindow(window_finished, 320, 265)
cv2.moveWindow(window_test1, 640, 0)
# Setup video windows so that they don't overlap
# Load video file
if videofile is None:
print("Could not find video file")
return
previous_frame = None
frame_width = int(videofile.get(3))
frame_height = int(videofile.get(4))
size = (frame_width, frame_height)
outer_bounds = [frame_width, 0, frame_height, 0] #[xmin, xmax, ymin, ymax]
result = cv2.VideoWriter('Results/Gas_detection.mp4',cv2.VideoWriter_fourcc(*'MP4V'), 18, size)
high_activity_areas = [outer_bounds]
activity_percentage = 0.8
activity_area_pixel_margin = 50
while True:
# 1. Load image
ret, frame = videofile.read()
if ret:
cv2.imshow(window_raw, frame)
# # 2. Prepare image; grayscale and blur
prepared_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
prepared_frame = cv2.GaussianBlur(src=prepared_frame, ksize=(7,7), sigmaX=0)
cv2.imshow(window_preprocessed, prepared_frame)
# 3. Set previous frame and continue if there is None
if (previous_frame is None):
previous_frame = prepared_frame
continue
# calculate difference and update previous frame
diff_frame = cv2.absdiff(src1=previous_frame, src2=prepared_frame)
previous_frame = prepared_frame
# 4. Dilute the image a bit to make differences more seeable; more suitable for contour detection
kernel = np.ones((1, 1))
diff_frame = cv2.dilate(diff_frame, kernel, 1)
# 5. Only take different areas that are different enough (>20 / 255)
thresh_frame = cv2.threshold(src=diff_frame, thresh=3, maxval=255, type=cv2.THRESH_BINARY)[1]
cv2.imshow(window_motion, thresh_frame)
finished_frame = frame
contours, _ = cv2.findContours(image=thresh_frame, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 5:
# too small: skip!
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(img=finished_frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 0), thickness=2)
cv2.imshow(window_finished, finished_frame)
result.write(finished_frame)
#----------------------------------------------------------------------------
# for contour in contours:
# contour_placed_in_area = False
# if cv2.contourArea(contour) < 5:
# # too small: skip!
# continue
# [x, y, w, h] = cv2.boundingRect(contour)
# contour_border = [x, x+w, y, y+h]
# for area_border in high_activity_areas:
# # for i in range(0, 4):
# # if(abs(contour_border[i]-area_border[i])>activity_area_pixel_margin):
# # continue
# cont = cv2.drawContours(frame, area_border, -1, (255,0,0), 1)
# if(cv2.pointPolygonTest(cont, (x,y), True)):
# continue
# area_border = [min(area_border[0], contour_border[0]), max(area_border[1], contour_border[1]), min(area_border[2], contour_border[2]), max(area_border[3], contour_border[3])]
# cv2.rectangle(img=frame, pt1=(outer_bounds[0], outer_bounds[2]), pt2=(outer_bounds[1], outer_bounds[3]), color=(0, 0, 255), thickness=2)
# cv2.imshow(window_test1, frame)
#----------------------------------------------------------------------------
else:
break
# press escape to exit
if (cv2.waitKey(30) == 27):
return 0
cv2.destroyAllWindows()
# videofile.release()
# result.release()
return 1
# def main():
# cap = cv2.VideoCapture('/Users/MORFRE/Pictures/Mongstad/Flir dataset nov 2022/112ppm hydrogen/Leak/MOV_1669.mp4')
# motion_detector(cap)
|
Christdej/gas-analysis
|
src/gas_analysis/gas_detection.py
|
gas_detection.py
|
py
| 4,763 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "cv2.namedWindow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.moveWindow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.moveWindow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.moveWindow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.moveWindow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.moveWindow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.absdiff",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 122,
"usage_type": "call"
}
] |
28220979750
|
#Import Necessary Packages
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
import ruptures as rpt
from statistics import stdev
import pandas as pd
def load_rms(path, sect, ref):
raw_string = open('../../' + path + '/rmsd_' + sect + '_ref_' + ref + '.txt').readlines()
#Convert data to fload
raw = np.zeros(len(raw_string))
for i in range(len(raw_string)):
raw[i] = float(raw_string[i])*10
return raw
def plot_compare(RMSD_mean, RMSD_err, Label, sect, n, ref):
rmsd_n = RMSD_mean[:,n]
rmsd_err_n = RMSD_err[:,n]
section = sect[n]
# bar_color = ['gray', 'gray', 'pink', 'blue', 'pink', 'red', 'red']
num = np.linspace(1, len(Label)+1, num = len(Label))
fig = plt.figure(figsize=(18,10))
ax1 = fig.add_subplot(111)
ax1.set_title("RMSD for " + section + ' to ' + ref)
ax1.set_ylabel(r'RMSD($\AA$)')
ax1.bar(num, rmsd_n)
plt.xticks(num, Label, fontsize=14)
plt.errorbar(num, rmsd_n, yerr= rmsd_err_n, fmt='o', color='black')
fig.savefig('RMSD_compare_' + section + '_' + ref + '.png')
plt.close(fig)
def plot_kernel_mut(df, sect_name, sect_file, xmin, xmax):
ax = sns.kdeplot(data = df, fill=True, alpha=0.5, common_grid = True)
plt.setp(ax.get_legend().get_texts(), fontsize='12') # for legend text
plt.xlabel(r'RMSD($\AA$)', fontsize = 14)
plt.xlim(xmin, xmax)
plt.xticks(fontsize = 13)
plt.yticks(fontsize = 13)
plt.ylabel(r'Normalized Density', fontsize = 14)
plt.title(str(sect_name) + r' RMSD Relative to WT Closed', fontsize = 15)
plt.savefig('mutate_RMSD_' + str(sect_file) + '.png')
plt.close()
def plot_kernel_cmpr_lig(apo_df, AD_df, BBR_df, mut, sect, n):
df = pd.concat([apo_df, AD_df, BBR_df])
sns.kdeplot(data = df, fill=True, alpha=0.5, common_norm = True, common_grid = True)
plt.xlabel(r'RMSD($\AA$)', fontsize = 14)
plt.ylabel(r'Normalized Density', fontsize = 14)
plt.title(sect + ' RMSD Compared to WT Closed', fontsize = 15)
plt.savefig('mutate_RMSD_' + sect + '_' + mut + '.png')
plt.close()
def rmsd_sect(sect, file_path_close, file_path_close_AD, file_path_close_BBR, ref, n):
rmsd_1sug = load_rms(file_path_close[0], sect, ref[n])
rmsd_apo = load_rms(file_path_close[1], sect, ref[n])
rmsd_L192F = load_rms(file_path_close[2], sect, ref[n])
rmsd_E276F = load_rms(file_path_close[3], sect, ref[n])
rmsd_F280Y = load_rms(file_path_close[4], sect, ref[n])
rmsd_L195F = load_rms(file_path_close[5], sect, ref[n])
rmsd_F196A = load_rms(file_path_close[6], sect, ref[n])
rmsd_V287T = load_rms(file_path_close[7], sect, ref[n])
rmsd_L192F_AD = load_rms(file_path_close_AD[0], sect, ref[n])
rmsd_L192F_BBR = load_rms(file_path_close_BBR[0], sect, ref[n])
rmsd_E276F_AD = load_rms(file_path_close_AD[1], sect, ref[n])
rmsd_E276F_BBR = load_rms(file_path_close_BBR[1], sect, ref[n])
rmsd_F280Y_AD = load_rms(file_path_close_AD[2], sect, ref[n])
rmsd_F280Y_BBR = load_rms(file_path_close_BBR[2], sect, ref[n])
rmsd_L195F_AD = load_rms(file_path_close_AD[3], sect, ref[n])
rmsd_L195F_BBR = load_rms(file_path_close_BBR[3], sect, ref[n])
rmsd_F196A_AD = load_rms(file_path_close_AD[4], sect, ref[n])
rmsd_F196A_BBR = load_rms(file_path_close_BBR[4], sect, ref[n])
rmsd_V287T_AD = load_rms(file_path_close_AD[4], sect, ref[n])
rmsd_V287T_BBR = load_rms(file_path_close_BBR[4], sect, ref[n])
return rmsd_1sug, rmsd_apo, rmsd_L192F, rmsd_E276F, rmsd_F280Y, rmsd_L195F, rmsd_F196A, rmsd_V287T, rmsd_L192F_AD, rmsd_E276F_AD, rmsd_F280Y_AD, rmsd_L195F_AD, rmsd_F196A_AD, rmsd_V287T_AD, rmsd_L192F_BBR, rmsd_E276F_BBR, rmsd_F280Y_BBR, rmsd_L195F_BBR, rmsd_F196A_BBR, rmsd_V287T_BBR
#File paths for all input files
file_path = ['../Apo_dis/analysis', 'L192F/Apo/analysis', 'E276F/Apo/analysis', 'F280Y/Apo/analysis', 'L195F/Apo/analysis', 'F196A/Apo/analysis', 'V287T/Apo/analysis'] #Indices to rank in order of closest activity to WT to Furthest
file_path_close = ['../Apo_1SUG/analysis/1sug', '../Apo_dis/analysis', 'L192F/Apo/analysis', 'E276F/Apo/analysis', 'F280Y/Apo/analysis', 'L195F/Apo/analysis', 'F196A/Apo/analysis', 'V287T/Apo/analysis']
file_path_close_AD = ['L192F/AD/analysis', 'E276F/AD/analysis', 'F280Y/AD/analysis', 'L195F/AD/analysis', 'F196A/AD/analysis', 'V287T/AD/analysis']
file_path_close_BBR = ['L192F/BBR/analysis', 'E276F/BBR/analysis', 'F280Y/BBR/analysis', 'L195F/BBR/analysis', 'F196A/BBR/analysis', 'V287T/BBR/analysis']
sections = ['WPD', 'WPD_a3', 'SBL', 'beg', 'P', 'CYS', 'a3', 'a3_top', 'a4', 'a5', 'a6', 'a6_bot', 'a7', 'Q']
ref = ['open', 'closed', 'self', 'F196A', 'V287T']
#open all files
RMSD_mean = np.zeros((len(file_path), len(sections))) #Mean for reference open
RMSD_err = np.zeros((len(file_path), len(sections))) #SEM for reference open
RMSD_mean_close = np.zeros((len(file_path_close), len(sections))) #Mean for reference closed
RMSD_err_close = np.zeros((len(file_path_close), len(sections))) #SEM for reference closed
RMSD_mean_close_AD = np.zeros((len(file_path_close_AD), len(sections))) #Mean for reference closed
RMSD_err_close_AD = np.zeros((len(file_path_close_AD), len(sections))) #SEM for reference closed
RMSD_mean_close_BBR = np.zeros((len(file_path_close_BBR), len(sections))) #Mean for reference closed
RMSD_err_close_BBR = np.zeros((len(file_path_close_BBR), len(sections))) #SEM for reference closed
#Save all rmsd values for a3_top, a4_top, and a6 helix
rmsd_a3_1sug, rmsd_a3_apo, rmsd_a3_L192F, rmsd_a3_E276F, rmsd_a3_F280Y, rmsd_a3_L195F, rmsd_a3_F196A, rmsd_a3_V287T, rmsd_a3_L192F_AD, rmsd_a3_E276F_AD, rmsd_a3_F280Y_AD, rmsd_a3_L195F_AD, rmsd_a3_F196A_AD, rmsd_a3_V287T_AD, rmsd_a3_L192F_BBR, rmsd_a3_E276F_BBR, rmsd_a3_F280Y_BBR, rmsd_a3_L195F_BBR, rmsd_a3_F196A_BBR, rmsd_a3_V287T_BBR = rmsd_sect('a3', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a3_top_1sug, rmsd_a3_top_apo, rmsd_a3_top_L192F, rmsd_a3_top_E276F, rmsd_a3_top_F280Y, rmsd_a3_top_L195F, rmsd_a3_top_F196A, rmsd_a3_top_V287T, rmsd_a3_top_L192F_AD, rmsd_a3_top_E276F_AD, rmsd_a3_top_F280Y_AD, rmsd_a3_top_L195F_AD, rmsd_a3_top_F196A_AD, rmsd_a3_top_V287T_AD, rmsd_a3_top_L192F_BBR, rmsd_a3_top_E276F_BBR, rmsd_a3_top_F280Y_BBR, rmsd_a3_top_L195F_BBR, rmsd_a3_top_F196A_BBR, rmsd_a3_top_V287T_BBR = rmsd_sect('a3_top', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a4_1sug, rmsd_a4_apo, rmsd_a4_L192F, rmsd_a4_E276F, rmsd_a4_F280Y, rmsd_a4_L195F, rmsd_a4_F196A, rmsd_a4_V287T, rmsd_a4_L192F_AD, rmsd_a4_E276F_AD, rmsd_a4_F280Y_AD, rmsd_a4_L195F_AD, rmsd_a4_F196A_AD, rmsd_a4_V287T_AD, rmsd_a4_L192F_BBR, rmsd_a4_E276F_BBR, rmsd_a4_F280Y_BBR, rmsd_a4_L195F_BBR, rmsd_a4_F196A_BBR, rmsd_a4_V287T_BBR = rmsd_sect('a4', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a6_1sug, rmsd_a6_apo, rmsd_a6_L192F, rmsd_a6_E276F, rmsd_a6_F280Y, rmsd_a6_L195F, rmsd_a6_F196A, rmsd_a6_V287T, rmsd_a6_L192F_AD, rmsd_a6_E276F_AD, rmsd_a6_F280Y_AD, rmsd_a6_L195F_AD, rmsd_a6_F196A_AD, rmsd_a6_V287T_AD, rmsd_a6_L192F_BBR, rmsd_a6_E276F_BBR, rmsd_a6_F280Y_BBR, rmsd_a6_L195F_BBR, rmsd_a6_F196A_BBR, rmsd_a6_V287T_BBR = rmsd_sect('a6', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_a6_bot_1sug, rmsd_a6_bot_apo, rmsd_a6_bot_L192F, rmsd_a6_bot_E276F, rmsd_a6_bot_F280Y, rmsd_a6_bot_L195F, rmsd_a6_bot_F196A, rmsd_a6_bot_V287T, rmsd_a6_bot_L192F_AD, rmsd_a6_bot_E276F_AD, rmsd_a6_bot_F280Y_AD, rmsd_a6_bot_L195F_AD, rmsd_a6_bot_F196A_AD, rmsd_a6_bot_V287T_AD, rmsd_a6_bot_L192F_BBR, rmsd_a6_bot_E276F_BBR, rmsd_a6_bot_F280Y_BBR, rmsd_a6_bot_L195F_BBR, rmsd_a6_bot_F196A_BBR, rmsd_a6_bot_V287T_BBR = rmsd_sect('a6_bot', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_CYS_1sug, rmsd_CYS_apo, rmsd_CYS_L192F, rmsd_CYS_E276F, rmsd_CYS_F280Y, rmsd_CYS_L195F, rmsd_CYS_F196A, rmsd_CYS_V287T, rmsd_CYS_L192F_AD, rmsd_CYS_E276F_AD, rmsd_CYS_F280Y_AD, rmsd_CYS_L195F_AD, rmsd_CYS_F196A_AD, rmsd_CYS_V287T_AD, rmsd_CYS_L192F_BBR, rmsd_CYS_E276F_BBR, rmsd_CYS_F280Y_BBR, rmsd_CYS_L195F_BBR, rmsd_CYS_F196A_BBR, rmsd_CYS_V287T_BBR = rmsd_sect('CYS', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
rmsd_beg_1sug, rmsd_beg_apo, rmsd_beg_L192F, rmsd_beg_E276F, rmsd_beg_F280Y, rmsd_beg_L195F, rmsd_beg_F196A, rmsd_beg_V287T, rmsd_beg_L192F_AD, rmsd_beg_E276F_AD, rmsd_beg_F280Y_AD, rmsd_beg_L195F_AD, rmsd_beg_F196A_AD, rmsd_beg_V287T_AD, rmsd_beg_L192F_BBR, rmsd_beg_E276F_BBR, rmsd_beg_F280Y_BBR, rmsd_beg_L195F_BBR, rmsd_beg_F196A_BBR, rmsd_beg_V287T_BBR = rmsd_sect('beg', file_path_close, file_path_close_AD, file_path_close_BBR, ref, 1)
for i in range(len(file_path_close)):
for j in range(len(sections)):
#Load Data for reference open
rmsd_Apo = load_rms(file_path_close[i], sections[j], ref[1])
#Mean and SEM for each trajectory
RMSD_mean_close[i][j] = np.mean(rmsd_Apo)
RMSD_err_close[i][j] = stats.sem(rmsd_Apo)
for i in range(len(file_path)):
#Load Data for reference open
rmsd = load_rms(file_path[i], sections[j], ref[0])
#Mean and SEM for each trajectory
RMSD_mean[i][j] = np.mean(rmsd)
RMSD_err[i][j] = stats.sem(rmsd)
for i in range(len(file_path_close_AD)):
#Load Data for reference open
rmsd_AD = load_rms(file_path_close_AD[i], sections[j], ref[1])
RMSD_mean_close_AD[i][j] = np.mean(rmsd_AD)
RMSD_err_close_AD[i][j] = stats.sem(rmsd_AD)
rmsd_BBR = load_rms(file_path_close_BBR[i], sections[j], ref[1])
RMSD_mean_close_BBR[i][j] = np.mean(rmsd_BBR)
RMSD_err_close_BBR[i][j] = stats.sem(rmsd_BBR)
#Name Labels
Label = ['WT', 'L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
Label_close = ['WT Close', 'WT Open', 'L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
Labels_mut = ['L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
#Plot all compared to WT Open
for i in range(len(sections)):
plot_compare(RMSD_mean, RMSD_err, Label, sections, i, ref[0])
plot_compare(RMSD_mean_close, RMSD_err_close, Label_close, sections, i, ref[1])
#Determine % difference from WT
RMSD_diff = np.zeros((len(sections), len(Labels_mut)))
for i in range(1, len(Label)):
n = i-1
for j in range(len(sections)):
WT = RMSD_mean[0][j]
Mut = RMSD_mean[i][j]
RMSD_diff[j][n] = ((Mut-WT)/((Mut+WT)/2)) * 100
#Plot table comparing residue interactions to WT
ax = plt.figure(figsize=(12, 6), frameon=False) # no visible frame
ax = sns.heatmap(RMSD_diff, annot=False, cmap = 'jet', cbar = True, cbar_kws={'label': 'Percentage Difference from WT'}, vmin = 0, vmax = 150, xticklabels = Labels_mut, yticklabels = sections)
#ax.add_artist(lines.Line2D([0, 20], [7, 7], color = 'black', linestyle= '--', linewidth = 4))
plt.title('Section RMSD Compared to WT')
plt.savefig('mutate_RMSD_Apo.png')
plt.close()
RMSD_mean_mut = np.zeros((len(Label_close), len(sections))) #Mean for reference open
RMSD_err_mut = np.zeros((len(Label_close), len(sections))) #SEM for reference open
#Plot self and two references
for i in [0, 2, 3, 4]:
for j in range(len(sections)):
#Load Data
RMSD_mean_mut[0][j] = RMSD_mean_close[0][j]
RMSD_err_mut[0][j] = RMSD_err_close[0][j]
for k in range(1, len(Label_close)):
rmsd = load_rms(file_path_close[k], sections[j], ref[i])
RMSD_mean_mut[k][j] = np.mean(rmsd)
RMSD_err_mut[k][j] = stats.sem(rmsd)
plot_compare(RMSD_mean_mut, RMSD_err_mut, Label_close, sections, j, ref[i])
#Plot Kernel DEnsity Estimate Plot
#Compare a3_top for L192F, E276F, L195F, V287T
a3_top_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a3_top_apo})
a3_top_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a3_top_1sug})
a3_top_L192F_df = pd.DataFrame({'L192F': rmsd_a3_top_L192F})
a3_top_L195F_df = pd.DataFrame({'L195F': rmsd_a3_top_L195F})
a3_top_F280Y_df = pd.DataFrame({'F280Y': rmsd_a3_top_F280Y})
a3_top_E276F_df = pd.DataFrame({'E276F': rmsd_a3_top_E276F})
a3_top_F196A_df = pd.DataFrame({'F196A': rmsd_a3_top_F196A})
a3_top_V287T_df = pd.DataFrame({'V287T': rmsd_a3_top_V287T})
a3_top_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a3_top_L192F_AD})
a3_top_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a3_top_L195F_AD})
a3_top_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a3_top_F280Y_AD})
a3_top_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a3_top_E276F_AD})
a3_top_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a3_top_F196A_AD})
a3_top_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a3_top_V287T_AD})
a3_top_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a3_top_L192F_BBR})
a3_top_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a3_top_L195F_BBR})
a3_top_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a3_top_F280Y_BBR})
a3_top_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a3_top_E276F_BBR})
a3_top_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a3_top_F196A_BBR})
a3_top_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a3_top_V287T_BBR})
df = pd.concat([a3_top_Apo_open_df, a3_top_Apo_close_df, a3_top_L192F_df, a3_top_E276F_df, a3_top_V287T_df, a3_top_F196A_df, a3_top_F280Y_df, a3_top_L195F_df])
plot_kernel_mut(df, r'Top of $\alpha$3', 'a3_top_all', 0, 2)
df = pd.concat([a3_top_L192F_df, a3_top_E276F_df, a3_top_V287T_df, a3_top_F196A_df, a3_top_F280Y_df, a3_top_L195F_df])
plot_kernel_mut(df, r'Top of $\alpha$3', 'a3_top_mut_all', 0, 2)
df = pd.concat([a3_top_Apo_open_df, a3_top_Apo_close_df, a3_top_V287T_df, a3_top_F280Y_df])
plot_kernel_mut(df, r'Top of $\alpha$3', 'a3_top_extr', 0, 2)
plot_kernel_cmpr_lig(a3_top_L192F_df, a3_top_L192F_AD_df, a3_top_L192F_BBR_df, 'L192F', sections[7], 7)
plot_kernel_cmpr_lig(a3_top_L195F_df, a3_top_L195F_AD_df, a3_top_L195F_BBR_df, 'L195F', sections[7], 7)
plot_kernel_cmpr_lig(a3_top_E276F_df, a3_top_E276F_AD_df, a3_top_E276F_BBR_df, 'E276F', sections[7], 7)
plot_kernel_cmpr_lig(a3_top_V287T_df, a3_top_V287T_AD_df, a3_top_V287T_BBR_df, 'V287T', sections[7], 7)
#Compare a3_top for L192F, E276F, L195F, V287T
a3_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a3_apo})
a3_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a3_1sug})
a3_L192F_df = pd.DataFrame({'L192F': rmsd_a3_L192F})
a3_L195F_df = pd.DataFrame({'L195F': rmsd_a3_L195F})
a3_F280Y_df = pd.DataFrame({'F280Y': rmsd_a3_F280Y})
a3_E276F_df = pd.DataFrame({'E276F': rmsd_a3_E276F})
a3_F196A_df = pd.DataFrame({'F196A': rmsd_a3_F196A})
a3_V287T_df = pd.DataFrame({'V287T': rmsd_a3_V287T})
a3_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a3_L192F_AD})
a3_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a3_L195F_AD})
a3_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a3_F280Y_AD})
a3_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a3_E276F_AD})
a3_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a3_F196A_AD})
a3_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a3_V287T_AD})
a3_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a3_L192F_BBR})
a3_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a3_L195F_BBR})
a3_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a3_F280Y_BBR})
a3_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a3_E276F_BBR})
a3_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a3_F196A_BBR})
a3_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a3_V287T_BBR})
df = pd.concat([a3_Apo_open_df, a3_Apo_close_df, a3_L192F_df, a3_E276F_df, a3_V287T_df, a3_F196A_df, a3_F280Y_df, a3_L195F_df])
plot_kernel_mut(df, r'$\alpha$3', 'a3_all', 0, 2)
df = pd.concat([a3_L192F_df, a3_E276F_df, a3_V287T_df, a3_F196A_df, a3_F280Y_df, a3_L195F_df])
plot_kernel_mut(df, r'$\alpha$3', 'a3_mut_all', 0, 2)
df = pd.concat([a3_Apo_open_df, a3_Apo_close_df, a3_V287T_df, a3_F280Y_df])
plot_kernel_mut(df, r'$\alpha$3', 'a3_mut_extr', 0, 2)
plot_kernel_cmpr_lig(a3_L192F_df, a3_L192F_AD_df, a3_L192F_BBR_df, 'L192F', sections[6], 6)
plot_kernel_cmpr_lig(a3_L195F_df, a3_L195F_AD_df, a3_L195F_BBR_df, 'L195F', sections[6], 6)
plot_kernel_cmpr_lig(a3_E276F_df, a3_E276F_AD_df, a3_E276F_BBR_df, 'E276F', sections[6], 6)
plot_kernel_cmpr_lig(a3_V287T_df, a3_V287T_AD_df, a3_V287T_BBR_df, 'V287T', sections[6], 6)
#Compare a4 for L192F, E276F, L195F, V287T
a4_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a4_apo})
a4_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a4_1sug})
a4_L192F_df = pd.DataFrame({'L192F': rmsd_a4_L192F})
a4_L195F_df = pd.DataFrame({'L195F': rmsd_a4_L195F})
a4_F280Y_df = pd.DataFrame({'F280Y': rmsd_a4_F280Y})
a4_E276F_df = pd.DataFrame({'E276F': rmsd_a4_E276F})
a4_F196A_df = pd.DataFrame({'F196A': rmsd_a4_F196A})
a4_V287T_df = pd.DataFrame({'V287T': rmsd_a4_V287T})
a4_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a4_L192F_AD})
a4_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a4_L195F_AD})
a4_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a4_F280Y_AD})
a4_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a4_E276F_AD})
a4_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a4_F196A_AD})
a4_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a4_V287T_AD})
a4_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a4_L192F_BBR})
a4_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a4_L195F_BBR})
a4_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a4_F280Y_BBR})
a4_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a4_E276F_BBR})
a4_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a4_F196A_BBR})
a4_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a4_V287T_BBR})
df = pd.concat([a4_Apo_open_df, a4_Apo_close_df, a4_L192F_df, a4_E276F_df, a4_V287T_df, a4_F196A_df, a4_F280Y_df, a4_L195F_df])
plot_kernel_mut(df, r'$\alpha$4', 'a4_all', 0, 1.5)
df = pd.concat([a4_L192F_df, a4_E276F_df, a4_V287T_df, a4_F196A_df, a4_F280Y_df, a4_L195F_df])
plot_kernel_mut(df, r'$\alpha$4', 'a4_mut_all',0, 1.5)
df = pd.concat([a4_Apo_open_df, a4_Apo_close_df, a4_V287T_df, a4_F196A_df, a4_F280Y_df])
plot_kernel_mut(df, r'$\alpha$4', 'a4', 0, 1.5)
plot_kernel_cmpr_lig(a4_F196A_df, a4_F196A_AD_df, a4_F196A_BBR_df, 'F196A', sections[8], 8)
plot_kernel_cmpr_lig(a4_F280Y_df, a4_F280Y_AD_df, a4_F280Y_BBR_df, 'F280Y', sections[8], 8)
#a4_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a4_apo_rapo})
#a4_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a4_1sug_rapo})
#a4_F196A_df = pd.DataFrame({'F196A': rmsd_a4_F196A_rapo})
#a4_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a4_F196A_AD_rapo})
#a4_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a4_F196A_BBR_rapo})
#df = pd.concat([a4_Apo_open_df, a4_Apo_close_df, a4_F196A_df, a4_F196A_AD_df, a4_F196A_BBR_df])
#ax = plt.figure(figsize=(12, 6), frameon=False) # no visible frame
#sns.kdeplot(data = df, fill=True, alpha=0.5, common_norm = True, common_grid = True)
#plt.xlabel(r'RMSD($\AA$)')
#plt.ylabel(r'Normalized Density')
#plt.title(r'$\alpha$-4 RMSD Compared to Apo F196A')
#plt.savefig('mutate_RMSD_a4_ref_F196A.png')
#plt.close()
#a6 comparison
a6_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_a6_apo})
a6_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_a6_1sug})
a6_L192F_df = pd.DataFrame({'L192F': rmsd_a6_L192F})
a6_L195F_df = pd.DataFrame({'L195F': rmsd_a6_L195F})
a6_F280Y_df = pd.DataFrame({'F280Y': rmsd_a6_F280Y})
a6_E276F_df = pd.DataFrame({'E276F': rmsd_a6_E276F})
a6_F196A_df = pd.DataFrame({'F196A': rmsd_a6_F196A})
a6_V287T_df = pd.DataFrame({'V287T': rmsd_a6_V287T})
a6_L192F_AD_df = pd.DataFrame({'L192F AD': rmsd_a6_L192F_AD})
a6_L195F_AD_df = pd.DataFrame({'L195F AD': rmsd_a6_L195F_AD})
a6_F280Y_AD_df = pd.DataFrame({'F280Y AD': rmsd_a6_F280Y_AD})
a6_E276F_AD_df = pd.DataFrame({'E276F AD': rmsd_a6_E276F_AD})
a6_F196A_AD_df = pd.DataFrame({'F196A AD': rmsd_a6_F196A_AD})
a6_V287T_AD_df = pd.DataFrame({'V287T AD': rmsd_a6_V287T_AD})
a6_L192F_BBR_df = pd.DataFrame({'L192F BBR': rmsd_a6_L192F_BBR})
a6_L195F_BBR_df = pd.DataFrame({'L195F BBR': rmsd_a6_L195F_BBR})
a6_F280Y_BBR_df = pd.DataFrame({'F280Y BBR': rmsd_a6_F280Y_BBR})
a6_E276F_BBR_df = pd.DataFrame({'E276F BBR': rmsd_a6_E276F_BBR})
a6_F196A_BBR_df = pd.DataFrame({'F196A BBR': rmsd_a6_F196A_BBR})
a6_V287T_BBR_df = pd.DataFrame({'V287T BBR': rmsd_a6_V287T_BBR})
df = pd.concat([a6_Apo_open_df, a6_Apo_close_df, a6_L192F_df, a6_E276F_df, a6_V287T_df, a6_F196A_df, a6_F280Y_df, a6_L195F_df])
plot_kernel_mut(df, r'$\alpha$6', 'a6_all', 0, 2)
df = pd.concat([a6_L192F_df, a6_E276F_df, a6_V287T_df, a6_F196A_df, a6_F280Y_df, a6_L195F_df])
plot_kernel_mut(df, r'$\alpha$6', 'a6_mut_all', 0, 2)
plot_kernel_cmpr_lig(a6_L192F_df, a6_L192F_AD_df, a6_L192F_BBR_df, 'L192F', sections[11], 11)
plot_kernel_cmpr_lig(a6_L195F_df, a6_L195F_AD_df, a6_L195F_BBR_df, 'L195F', sections[11], 11)
plot_kernel_cmpr_lig(a6_E276F_df, a6_E276F_AD_df, a6_E276F_BBR_df, 'E276F', sections[11], 11)
plot_kernel_cmpr_lig(a6_V287T_df, a6_V287T_AD_df, a6_V287T_BBR_df, 'V287T', sections[11], 11)
#Just CYS215
cys_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_CYS_apo})
cys_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_CYS_1sug})
cys_L192F_df = pd.DataFrame({'L192F': rmsd_CYS_L192F})
cys_L195F_df = pd.DataFrame({'L195F': rmsd_CYS_L195F})
cys_F280Y_df = pd.DataFrame({'F280Y': rmsd_CYS_F280Y})
cys_E276F_df = pd.DataFrame({'E276F': rmsd_CYS_E276F})
cys_F196A_df = pd.DataFrame({'F196A': rmsd_CYS_F196A})
cys_V287T_df = pd.DataFrame({'V287T': rmsd_CYS_V287T})
df = pd.concat([cys_Apo_open_df, cys_Apo_close_df, cys_L192F_df, cys_E276F_df, cys_V287T_df, cys_F196A_df, cys_F280Y_df, cys_L195F_df])
plot_kernel_mut(df, 'CYS215', 'cys_all', 0, 1)
df = pd.concat([cys_L192F_df, cys_E276F_df, cys_V287T_df, cys_F196A_df, cys_F280Y_df, cys_L195F_df])
plot_kernel_mut(df, 'CYS215', 'cys_mut_all', 0, 1)
rmsd_cys = [rmsd_CYS_1sug, rmsd_CYS_apo, rmsd_CYS_F196A]
ax = plt.figure(figsize=(12, 6), frameon=False) # no visible frame
sns.kdeplot(data = rmsd_cys, fill=True, alpha=0.5)
plt.title('CYS215 RMSD Compared to WT Closed')
plt.savefig('mutate_RMSD_cys_F196A.png')
plt.close()
#BEG loop (L1)
beg_Apo_open_df = pd.DataFrame({'Apo Open':rmsd_beg_apo})
beg_Apo_close_df = pd.DataFrame({'Apo Closed': rmsd_beg_1sug})
beg_L192F_df = pd.DataFrame({'L192F': rmsd_beg_L192F})
beg_L195F_df = pd.DataFrame({'L195F': rmsd_beg_L195F})
beg_F280Y_df = pd.DataFrame({'F280Y': rmsd_beg_F280Y})
beg_E276F_df = pd.DataFrame({'E276F': rmsd_beg_E276F})
beg_F196A_df = pd.DataFrame({'F196A': rmsd_beg_F196A})
beg_V287T_df = pd.DataFrame({'V287T': rmsd_beg_V287T})
df = pd.concat([beg_Apo_open_df, beg_Apo_close_df, beg_L192F_df, beg_E276F_df, beg_V287T_df, beg_F196A_df, beg_F280Y_df, beg_L195F_df])
plot_kernel_mut(df, 'L1', 'beg_all', 0, 4)
df = pd.concat([beg_L192F_df, beg_E276F_df, beg_V287T_df, beg_F196A_df, beg_F280Y_df, beg_L195F_df])
plot_kernel_mut(df, 'L1', 'beg_mut_all', 0, 4)
#Determine p-values for each of the sections of focus
file_p = open('p_values_mut.txt', 'w')
p = np.zeros((5, 7))
st, p[0,0] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_L192F, equal_var = False) #Welch's t-test
st, p[0,1] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_E276F, equal_var = False) #Welch's t-test
st, p[0,2] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_F280Y, equal_var = False) #Welch's t-test
st, p[0,3] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_L195F, equal_var = False) #Welch's t-test
st, p[0,4] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_F196A, equal_var = False) #Welch's t-test
st, p[0,5] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_V287T, equal_var = False) #Welch's t-test
st, p[0,6] = stats.ttest_ind(rmsd_a3_top_apo, rmsd_a3_top_1sug, equal_var = False) #Welch's t-test
st, p[1,0] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_L192F, equal_var = False) #Welch's t-test
st, p[1,1] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_E276F, equal_var = False) #Welch's t-test
st, p[1,2] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_F280Y, equal_var = False) #Welch's t-test
st, p[1,3] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_L195F, equal_var = False) #Welch's t-test
st, p[1,4] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_F196A, equal_var = False) #Welch's t-test
st, p[1,5] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_V287T, equal_var = False) #Welch's t-test
st, p[1,6] = stats.ttest_ind(rmsd_a3_apo, rmsd_a3_1sug, equal_var = False) #Welch's t-test
st, p[1,0] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_L192F, equal_var = False) #Welch's t-test
st, p[1,1] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_E276F, equal_var = False) #Welch's t-test
st, p[1,2] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_F280Y, equal_var = False) #Welch's t-test
st, p[1,3] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_L195F, equal_var = False) #Welch's t-test
st, p[1,4] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_F196A, equal_var = False) #Welch's t-test
st, p[1,5] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_V287T, equal_var = False) #Welch's t-test
st, p[1,6] = stats.ttest_ind(rmsd_a4_apo, rmsd_a4_1sug, equal_var = False) #Welch's t-test
st, p[2,0] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_L192F, equal_var = False) #Welch's t-test
st, p[2,1] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_E276F, equal_var = False) #Welch's t-test
st, p[2,2] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_F280Y, equal_var = False) #Welch's t-test
st, p[2,3] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_L195F, equal_var = False) #Welch's t-test
st, p[2,4] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_F196A, equal_var = False) #Welch's t-test
st, p[2,5] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_V287T, equal_var = False) #Welch's t-test
st, p[2,6] = stats.ttest_ind(rmsd_a6_bot_apo, rmsd_a6_bot_1sug, equal_var = False) #Welch's t-test
st, p[3,0] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_L192F, equal_var = False) #Welch's t-test
st, p[3,1] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_E276F, equal_var = False) #Welch's t-test
st, p[3,2] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_F280Y, equal_var = False) #Welch's t-test
st, p[3,3] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_L195F, equal_var = False) #Welch's t-test
st, p[3,4] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_F196A, equal_var = False) #Welch's t-test
st, p[3,5] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_V287T, equal_var = False) #Welch's t-test
st, p[3,6] = stats.ttest_ind(rmsd_CYS_apo, rmsd_CYS_1sug, equal_var = False) #Welch's t-test
sections_mini = ['a3_top', 'a3', 'a4', 'a6_bot']
Labels_mut = ['L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T', 'Apo Closed']
file_p.write('P values of RMSD with Apo closed reference structure Relative to Apo Open \n')
for i in range(len(sections_mini)):
file_p.write(str(sections_mini[i]) + '\n')
for j in range(len(Labels_mut)):
file_p.write(Labels_mut[j] + ': ' + str(p[i,j]) + '\n')
p = np.zeros((5, 12))
st, p[0,0] = stats.ttest_ind(rmsd_a3_top_L192F, rmsd_a3_top_L192F_AD, equal_var = False) #Welch's t-test
st, p[0,1] = stats.ttest_ind(rmsd_a3_top_E276F, rmsd_a3_top_E276F_AD, equal_var = False) #Welch's t-test
st, p[0,2] = stats.ttest_ind(rmsd_a3_top_F280Y, rmsd_a3_top_F280Y_AD, equal_var = False) #Welch's t-test
st, p[0,3] = stats.ttest_ind(rmsd_a3_top_L195F, rmsd_a3_top_L195F_AD, equal_var = False) #Welch's t-test
st, p[0,4] = stats.ttest_ind(rmsd_a3_top_F196A, rmsd_a3_top_F196A_AD, equal_var = False) #Welch's t-test
st, p[0,5] = stats.ttest_ind(rmsd_a3_top_V287T, rmsd_a3_top_V287T_AD, equal_var = False) #Welch's t-test
st, p[0,6] = stats.ttest_ind(rmsd_a3_top_L192F, rmsd_a3_top_L192F_BBR, equal_var = False) #Welch's t-test
st, p[0,7] = stats.ttest_ind(rmsd_a3_top_E276F, rmsd_a3_top_E276F_BBR, equal_var = False) #Welch's t-test
st, p[0,8] = stats.ttest_ind(rmsd_a3_top_F280Y, rmsd_a3_top_F280Y_BBR, equal_var = False) #Welch's t-test
st, p[0,9] = stats.ttest_ind(rmsd_a3_top_L195F, rmsd_a3_top_L195F_BBR, equal_var = False) #Welch's t-test
st, p[0,10] = stats.ttest_ind(rmsd_a3_top_F196A, rmsd_a3_top_F196A_BBR, equal_var = False) #Welch's t-test
st, p[0,11] = stats.ttest_ind(rmsd_a3_top_V287T, rmsd_a3_top_V287T_BBR, equal_var = False) #Welch's t-test
st, p[1,0] = stats.ttest_ind(rmsd_a3_L192F, rmsd_a3_L192F_AD, equal_var = False) #Welch's t-test
st, p[1,1] = stats.ttest_ind(rmsd_a3_E276F, rmsd_a3_E276F_AD, equal_var = False) #Welch's t-test
st, p[1,2] = stats.ttest_ind(rmsd_a3_F280Y, rmsd_a3_F280Y_AD, equal_var = False) #Welch's t-test
st, p[1,3] = stats.ttest_ind(rmsd_a3_L195F, rmsd_a3_L195F_AD, equal_var = False) #Welch's t-test
st, p[1,4] = stats.ttest_ind(rmsd_a3_F196A, rmsd_a3_F196A_AD, equal_var = False) #Welch's t-test
st, p[1,5] = stats.ttest_ind(rmsd_a3_V287T, rmsd_a3_V287T_AD, equal_var = False) #Welch's t-test
st, p[1,6] = stats.ttest_ind(rmsd_a3_L192F, rmsd_a3_L192F_BBR, equal_var = False) #Welch's t-test
st, p[1,7] = stats.ttest_ind(rmsd_a3_E276F, rmsd_a3_E276F_BBR, equal_var = False) #Welch's t-test
st, p[1,8] = stats.ttest_ind(rmsd_a3_F280Y, rmsd_a3_F280Y_BBR, equal_var = False) #Welch's t-test
st, p[1,9] = stats.ttest_ind(rmsd_a3_L195F, rmsd_a3_L195F_BBR, equal_var = False) #Welch's t-test
st, p[1,10] = stats.ttest_ind(rmsd_a3_F196A, rmsd_a3_F196A_BBR, equal_var = False) #Welch's t-test
st, p[1,11] = stats.ttest_ind(rmsd_a3_V287T, rmsd_a3_V287T_BBR, equal_var = False) #Welch's t-test
st, p[2,0] = stats.ttest_ind(rmsd_a4_L192F, rmsd_a4_L192F_AD, equal_var = False) #Welch's t-test
st, p[2,1] = stats.ttest_ind(rmsd_a4_E276F, rmsd_a4_E276F_AD, equal_var = False) #Welch's t-test
st, p[2,2] = stats.ttest_ind(rmsd_a4_F280Y, rmsd_a4_F280Y_AD, equal_var = False) #Welch's t-test
st, p[2,3] = stats.ttest_ind(rmsd_a4_L195F, rmsd_a4_L195F_AD, equal_var = False) #Welch's t-test
st, p[2,4] = stats.ttest_ind(rmsd_a4_F196A, rmsd_a4_F196A_AD, equal_var = False) #Welch's t-test
st, p[2,5] = stats.ttest_ind(rmsd_a4_V287T, rmsd_a4_V287T_AD, equal_var = False) #Welch's t-test
st, p[2,6] = stats.ttest_ind(rmsd_a4_L192F, rmsd_a4_L192F_BBR, equal_var = False) #Welch's t-test
st, p[2,7] = stats.ttest_ind(rmsd_a4_E276F, rmsd_a4_E276F_BBR, equal_var = False) #Welch's t-test
st, p[2,8] = stats.ttest_ind(rmsd_a4_F280Y, rmsd_a4_F280Y_BBR, equal_var = False) #Welch's t-test
st, p[2,9] = stats.ttest_ind(rmsd_a4_L195F, rmsd_a4_L195F_BBR, equal_var = False) #Welch's t-test
st, p[2,10] = stats.ttest_ind(rmsd_a4_F196A, rmsd_a4_F196A_BBR, equal_var = False) #Welch's t-test
st, p[2,11] = stats.ttest_ind(rmsd_a4_V287T, rmsd_a4_V287T_BBR, equal_var = False) #Welch's t-test
sections_mini = ['a3_top', 'a3', 'a4']
Labels_mut = ['L192F', 'E276F', 'F280Y', 'L195F', 'F196A', 'V287T']
file_p.write('P values of RMSD with Apo closed reference structure Relative to Apo Mut \n')
for i in range(len(sections_mini)):
file_p.write(str(sections_mini[i]) + '\n')
for j in range(len(Labels_mut)):
n = j+6
file_p.write(Labels_mut[j] + ' AD: ' + str(p[i,j]) + '\n')
file_p.write(Labels_mut[j] + ' BBR: ' + str(p[i,n]) + '\n')
|
ajfriedman22/PTP1B
|
compare_mutant_scripts/rmsd_mut_compare.py
|
rmsd_mut_compare.py
|
py
| 30,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.setp",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pandas.concat",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "scipy.stats.sem",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "scipy.stats.sem",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "scipy.stats.sem",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "scipy.stats.sem",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "scipy.stats.sem",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "seaborn.kdeplot",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 384,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 395,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 396,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 401,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 418,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 421,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 433,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 449,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 457,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 466,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 467,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 470,
"usage_type": "name"
}
] |
8266532966
|
import logging
import shutil
import sys
import click
from cekit.cache.artifact import ArtifactCache
from cekit.config import Config
from cekit.crypto import SUPPORTED_HASH_ALGORITHMS
from cekit.descriptor.resource import create_resource
from cekit.log import setup_logging
from cekit.tools import Map
from cekit.version import __version__
setup_logging()
LOGGER = logging.getLogger("cekit")
CONFIG = Config()
@click.group(context_settings=dict(max_content_width=100))
@click.option("-v", "--verbose", help="Enable verbose output.", is_flag=True)
@click.option(
"--config",
metavar="PATH",
help="Path to configuration file.",
default="~/.cekit/config",
show_default=True,
)
@click.option(
"--work-dir",
metavar="PATH",
help="Location of the working directory.",
default="~/.cekit",
show_default=True,
)
@click.version_option(message="%(version)s", version=__version__)
def cli(config, verbose, work_dir):
pass
@cli.command(name="ls", short_help="List cached artifacts")
def ls():
CacheCli.prepare().ls()
@cli.command(name="add", short_help="Add artifact to cache")
@click.argument("location", metavar="LOCATION")
@click.option("--md5", metavar="CHECKSUM", help="The md5 checksum of the artifact.")
@click.option("--sha1", metavar="CHECKSUM", help="The sha1 checksum of the artifact.")
@click.option(
"--sha256", metavar="CHECKSUM", help="The sha256 checksum of the artifact."
)
@click.option(
"--sha512", metavar="CHECKSUM", help="The sha512 checksum of the artifact."
)
def add(location, md5, sha1, sha256, sha512):
if not (md5 or sha1 or sha256 or sha512):
raise click.UsageError("At least one checksum must be provided")
CacheCli.prepare().add(location, md5, sha1, sha256, sha512)
@cli.command(name="rm", short_help="Remove artifact from cache")
@click.argument("uuid", metavar="UUID")
def rm(uuid):
CacheCli.prepare().rm(uuid)
@cli.command(name="clear", short_help="Remove all artifacts from the cache")
def clear():
CacheCli.prepare().clear()
class CacheCli:
@staticmethod
def prepare():
"""Returns an initialized object of CacheCli"""
return CacheCli(Map(click.get_current_context().parent.params))
def __init__(self, args):
# TODO: logging is used only when adding the artifact, we need to find out if it would be possible to do it better
if args.verbose:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
CONFIG.configure(args.config, {"work_dir": args.work_dir})
def add(self, location, md5, sha1, sha256, sha512):
artifact_cache = ArtifactCache()
resource = {}
resource["url"] = location
if md5:
resource["md5"] = md5
if sha1:
resource["sha1"] = sha1
if sha256:
resource["sha256"] = sha256
if sha512:
resource["sha512"] = sha512
artifact = create_resource(resource)
cached = artifact_cache.cached(artifact)
if cached:
click.echo("Artifact {} is already cached!".format(location))
sys.exit(0)
try:
artifact_id = artifact_cache.add(artifact)
click.echo(
"Artifact {} cached with UUID '{}'".format(location, artifact_id)
)
except Exception as ex:
click.secho(
"Cannot cache artifact {}: {}".format(location, str(ex)), fg="red"
)
sys.exit(1)
def ls(self):
artifact_cache = ArtifactCache()
artifacts = artifact_cache.list()
if artifacts:
for artifact_filename, artifact in artifacts.items():
click.echo(
"\n{}:".format(
click.style(
artifact_filename.split(".")[0], fg="green", bold=True
)
)
)
for alg in SUPPORTED_HASH_ALGORITHMS:
if alg in artifact and artifact[alg]:
click.echo(
" {}: {}".format(
click.style(alg, bold=True), artifact[alg]
)
)
if artifact["names"]:
click.echo(" {}:".format(click.style("names", bold=True)))
for name in artifact["names"]:
click.echo(" - %s" % name)
else:
click.echo("No artifacts cached!")
def rm(self, uuid: str):
artifact_cache = ArtifactCache()
try:
artifact_cache.delete(uuid)
click.echo("Artifact with UUID '{}' removed".format(uuid))
except Exception:
click.secho(
"Artifact with UUID '{}' doesn't exists in the cache".format(uuid),
fg="yellow",
)
sys.exit(1)
def clear(self):
"""
Removes the artifact cache directory with all artifacts.
Use with caution!
"""
artifact_cache = ArtifactCache()
if not click.confirm(
"Are you sure to remove all artifacts from cache?", show_default=True
):
return
try:
shutil.rmtree(artifact_cache.cache_dir)
click.echo("Artifact cache cleared!")
except Exception:
click.secho(
"An error occurred while removing the artifact cache directory '{}'".format(
artifact_cache.cache_dir
),
fg="red",
)
sys.exit(1)
if __name__ == "__main__":
cli()
|
cekit/cekit
|
cekit/cache/cli.py
|
cli.py
|
py
| 5,730 |
python
|
en
|
code
| 70 |
github-code
|
6
|
[
{
"api_name": "cekit.log.setup_logging",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cekit.config.Config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "click.group",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "click.version_option",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cekit.version.__version__",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "click.UsageError",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cekit.tools.Map",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "click.get_current_context",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "cekit.cache.artifact.ArtifactCache",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cekit.descriptor.resource.create_resource",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "click.secho",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "cekit.cache.artifact.ArtifactCache",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "click.style",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cekit.crypto.SUPPORTED_HASH_ALGORITHMS",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "click.style",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "click.style",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cekit.cache.artifact.ArtifactCache",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "click.secho",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "cekit.cache.artifact.ArtifactCache",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "click.confirm",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "click.secho",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 189,
"usage_type": "call"
}
] |
25549591929
|
import logging
import os
import sys
def configLogger():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(_get_logfile_name())
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
root.addHandler(file_handler)
def _get_logfile_name():
executable_name = os.path.basename(sys.argv[0]).split('.')[0]
return '/tmp/logs/%s.log' % executable_name
|
cipriantruica/news_diffusion
|
news-spreading-master/logger/logger.py
|
logger.py
|
py
| 529 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
}
] |
1918587531
|
import sys
import datetime
import csv
from os import path
""""
This section describes all functions called when the program is started.
"""
def startup():
create_date_file()
create_bought_file()
create_sold_file()
def create_date_file():
"""Check if there is already a file present containing the date set as current date. If not: create one."""
if path.exists('date.txt') == False:
date = str(datetime.date.today())
file = open('date.txt', 'w')
file.write(date)
file.close()
def create_bought_file():
"""Check if there is already a bought.csv file present. If not: create one"""
if path.exists('bought.csv') == False:
with open('bought.csv', 'w', newline='') as csvfile:
bought_creation = csv.writer(csvfile)
bought_creation.writerow(['id', 'product_name', 'buy_date', 'buy_price', 'expiration_date'])
def create_sold_file():
"""Check if there is already a sold.csv file present. If not: create one"""
if path.exists('sold.csv') == False:
with open('sold.csv', 'w', newline='') as csvfile:
sold_creation = csv.writer(csvfile)
sold_creation.writerow(['id', 'bought_id', 'sell_date', 'sell_price'])
|
YorrickvB/SuperpyV2
|
startup.py
|
startup.py
|
py
| 1,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 33,
"usage_type": "call"
}
] |
10176346880
|
import requests
import re
import warnings
import json
class NHGIS:
'''API wrapper for the IPUMS NHGIS API.
API Documentation: https://developer.ipums.org/docs/get-started/
Arguments:
api_key: Authorization key required for use of the IPUMS API. *Required*
API keys can be obtained here: https://account.ipums.org/api_keys
'''
def __init__(self, api_key):
NHGIS.header = {"Authorization": api_key}
self.documentation_link = 'https://developer.ipums.org/docs/get-started/'
self.geographic_extent_options = {"010": "Nation",
"020": "Region",
"030": "Division2",
"040": "State",
"050": "State-County",
"140": "State-County-Census Tract",
"155": "State-Place-County",
"160": "State-Place",
"250": "American Indian Area/Alaska Native Area Hawaiian Home Land",
"310": "Metropolitan Statistical Area/Micropolitan Statistical Area",
"500": "State-Congressional District"}
self.dataset = Dataset
self.time_series = TimeSeries
def dataset_metadata(self, dataset=None, data_table=None):
'''
Returns metadata for IPUMS datasets.
Arguments:
dataset: Default: None. If None, metadata for all available datasets is returned. If the name of a
a dataset is given, metadata for the specified dataset is returned.
data_table: Default: None. Data table can only be specified when `dataset` != None.
If specified, metadata for the specified table is returned.
'''
if not dataset and not data_table:
url = "https://api.ipums.org/metadata/nhgis/datasets?version=v1"
elif dataset and not data_table:
url = f"https://api.ipums.org/metadata/nhgis/datasets/{dataset}?version=v1"
elif dataset and data_table:
url = f"https://api.ipums.org/metadata/nhgis/datasets/{dataset}/data_tables/{data_table}?version=v1"
elif not dataset and data_table:
raise ValueError('A dataset must be provided when data_table != None')
response = requests.get(url, headers=self.header)
return self._validate_response(response)
def time_series_metadata(self, data_table=None):
'''
Returns metadata for available time-series tables.
Arguments:
data_table: Default: None. If None, metadata for all data tables is returned.
If specified, metadata for the specified table is returned.
'''
if not data_table:
url = "https://api.ipums.org/metadata/nhgis/time_series_tables?version=v1"
else:
url = f"https://api.ipums.org/metadata/nhgis/time_series_tables/{data_table}?version=v1"
response = requests.get(url, self.header)
return self._validate_response(response)
def shapefile_metadata(self):
'''
Returns metadata for available shapefiles.
Arguments:
No arguments are available for this method.
'''
url = "https://api.ipums.org/metadata/nhgis/shapefiles?version=v1"
response = requests.get(url, self.header)
return self._validate_response(response)
def _validate_response(self, response):
if response.status_code == 200:
return response.json()
else:
raise ValueError(f'''A {response.status_code} error code was returned.\n
The following reason was given: {response.reason}
API Keys can be obtained here: https://account.ipums.org/api_keys
API documentation is here: {self.documentation_link}
If need be, api error code documentation can be found here
https://developer.mozilla.org/en-US/docs/Web/HTTP/Status''')
def _validate_shapefiles(self, shapefiles):
supported_files = [file['name'] for file in self.shapefile_metadata()]
for file in shapefiles:
if file not in supported_files:
raise ValueError(f'Shapefile: {file} could not be found.')
def _create_payload(self, datasets=None, time_series_tables=None, shapefiles=None,
data_format='csv_no_header', breakdown_and_data_type_layout='separate_files',
time_series_table_layout=None, geographic_extents=None, description=None):
'''
Receives Dataset class objects and returns a json payload formatted according to the specifications
of the IPUMS API.
'''
payload = {}
if datasets:
if isinstance(datasets, list):
payload['datasets'] = {}
for dataset in datasets:
if isinstance(dataset, Dataset):
if dataset.extent_required and not geographic_extents:
warnings.warn(f'''
Geographic extent is required for
Dataset {dataset.dataset} with the provided
geographic levels. It is recommended that this
extent be provided. By default, all geo extents are requested.
Geograpghic extent options can be accessed with the
NHGIS.geographic_extent_options attribute.''')
geographic_extents = ['*']
payload['datasets'].update(dataset.to_payload())
elif isinstance(dataset, dict):
payload['datasets'].update(dataset)
warnings.warn('''Validation is not provided for
dictionary inputs.
Use of NHGIS.dataset is recommended.''')
else:
raise ValueError(f'datasets cannot be datatype: {type(dataset)}')
if time_series_tables:
payload['time_series_tables'] = {}
if isinstance(time_series_tables, list):
for table in time_series_tables:
if isinstance(table, TimeSeries):
payload['time_series_tables'].update(table.to_payload())
elif isinstance(table, dict):
payload['time_series_tables'].update(table)
warnings.warn('''Validation is not provided for
dictionary inputs.
Use of NHGIS.time_series is recommended.''')
else:
raise ValueError(f'time_series_tables cannot be datatype: {type(table)}')
elif isinstance(time_series_tables, TimeSeries):
payload['time_series_tables'].update(time_series_tables.to_payload())
else:
raise ValueError('time_series_tables must be a list or a TimeSeries instance.')
if shapefiles:
payload['shapefiles'] = shapefiles
if time_series_tables:
payload['time_series_table_layout'] = time_series_table_layout
if geographic_extents:
payload['geographic_extents'] = geographic_extents
payload['data_format'] = data_format
if description:
payload['description'] = description
else:
payload['description'] = 'ipumspy extract'
if breakdown_and_data_type_layout:
payload['breakdown_and_data_type_layout'] = breakdown_and_data_type_layout
payload_json = json.dumps(payload)
payload_json = json.loads(payload_json)
return payload_json
def create_extract(self, datasets=None, time_series_tables=None, shapefiles=None,
data_format='csv_no_header', breakdown_and_data_type_layout='separate_files',
time_series_table_layout=None, geographic_extents=None, description=None):
'''
Submit a data extract request to the IPUMS NHGIS API.
Currently, the IPUMS API does not support downloading directly from the API.
An email notification will be received confirming your extract request.
'''
url = "https://api.ipums.org/extracts/?product=nhgis&version=v1"
if shapefiles:
self._validate_shapefiles(shapefiles)
payload = self._create_payload(datasets=datasets, time_series_tables=time_series_tables, shapefiles=shapefiles,
data_format=data_format, breakdown_and_data_type_layout=breakdown_and_data_type_layout,
time_series_table_layout=time_series_table_layout,
geographic_extents=geographic_extents, description=description)
result = requests.post(url, headers=self.header, json=payload).json()
if 'number' in result:
self.extract_number = result['number']
return result
def extract_status(self, status_only = True):
'''
Returns that status of the most recent data extract request.
'''
url = f"https://api.ipums.org/extracts/{self.extract_number}?product=nhgis&version=v1"
response = requests.get(url, headers=self.header)
if status_only:
return response.json()['status']
else:
return response.json()
class Dataset(NHGIS):
'''A wrapper for creating validating requests to the IPUMS NHGIS API.
This class is used to format the json data structure for the NHGIS class.'''
def __init__(self, dataset, data_tables, geog_levels, years=None, breakdowns=[]):
self._validate(dataset, data_tables, geog_levels, years=years, breakdowns=breakdowns)
self.dataset = dataset
self.data_tables = data_tables
self.geog_levels = geog_levels
self.breakdowns = breakdowns
def _validate(self, dataset, data_tables, geog_levels, years=None, breakdowns=[]):
self.validate_types(dataset, data_tables, geog_levels, years, breakdowns)
metadata = self.dataset_metadata(dataset=dataset)
self.validate_data_tables(metadata, data_tables)
self.validate_geog_levels(metadata, geog_levels)
self.validate_years(metadata, years)
self.validate_breakdowns(metadata, breakdowns)
def validate_data_tables(self, metadata, data_tables):
supported_tables = [value['name'] for value in metadata['data_tables']]
for table in data_tables:
if table not in supported_tables:
raise ValueError(f'''Data Table: {table} is not supported for dataset: {metadata["name"]}''')
def validate_geog_levels(self, metadata, geog_levels):
supported_levels = [value['name'] for value in metadata['geog_levels']]
for level in geog_levels:
if level not in supported_levels:
raise ValueError(f'''Geo level: {level} is not supported for dataset: {metadata["name"]}''')
self.extent_required = False
for level in metadata['geog_levels']:
if level['name'] in geog_levels:
if level['has_geog_extent_selection']:
warnings.warn(f"""
Geographic level: '{level['name']}' for Dataset: '{metadata['name']}'
requires geog_extent selection when extraction is made.
Available geographic extent options can be accessed with the
`NHGIS.geographic_extent_options` attribute.
The `NHGIS.create_extract` method has a default geog_extent of ['*']
""")
self.extent_required = True
def validate_breakdowns(self, metadata, breakdowns):
if breakdowns:
if not 'breakdowns' in metadata:
raise ValueError(f'Breakdowns are not supported for {metadata["name"]}')
supported_breakdowns_list = metadata['breakdowns']
supported_breakdowns = []
for entry in supported_breakdowns_list:
supported_breakdowns += [value['name'] for value in entry['breakdown_values']]
for breakdown in breakdowns:
if breakdown not in supported_breakdowns:
raise ValueError(f'''Breakdown: {breakdown} is not supported for dataset: {metadata["name"]}''')
def is_multiyear(self, metadata):
year_count = re.findall('(\d{4})', metadata['name'])
if year_count:
count = len(set(year_count))
else:
count = 1
if count > 1:
return True
def year_range(self, metadata):
years = re.findall('(\d{4})', metadata['name'])
if years:
years = [int(year) for year in years]
return [year for year in range(years[0], years[1] + 1)]
def validate_years(self, metadata, years):
multiyear = self.is_multiyear(metadata)
if multiyear and not years:
raise ValueError(f'{metadata["name"]} has multiple years. `years` cannot be set to `None`.')
if multiyear and years:
supported_years = self.year_range(metadata)
for year in years:
if year not in supported_years:
raise ValueError(f'''Year: {year} is not support for dataset: {metadata["name"]}.''')
if not multiyear and years:
if len(years) > 1:
raise ValueError(f'Dataset: {metadata["name"]} is not a multi year dataset, but multiple years were given.')
supported_year = int(re.findall('(\d{4})', metadata["name"])[0])
if not supported_year == years[0]:
raise ValueError(f'Dataset: {metadata["name"]} supports the year {supported_year}, but {years[0]} was given.')
if not multiyear and not years:
self.years = []
self.years = years
def validate_types(self, dataset, data_tables, geog_levels, years, breakdowns):
if type(dataset) != str:
raise ValueError('dataset variable must be string.')
if not type(data_tables) == list:
raise ValueError('data_tables variable must be a list.')
if not all(isinstance(item, str) for item in data_tables):
raise ValueError('data_tables variable must be a list of strings.')
if not type(geog_levels) == list:
raise ValueError('geog_levels variable must be a list.')
if not all(isinstance(item, str) for item in geog_levels):
raise ValueError('geog_levels variable must be a list of strings.')
if years:
if type(years) != list:
raise ValueError('year variable must be a list for multi year datasets.')
if not all(isinstance(item, int) for item in years):
raise ValueError('year variable must be a list of integers for multi year datasets.')
if breakdowns:
if type(breakdowns) != list:
raise ValueError('If breakdowns != None, the variable must be a list.')
if not all(isinstance(item, str) for item in breakdowns):
raise ValueError('If breakdowns != None, the variable must be a list of strings.')
def __repr__(self):
return f'''Dataset(dataset: {self.dataset},
Number of tables: {len(self.data_tables)},
Number of geographies: {len(self.geog_levels)},
Number of breakdowns: {len(self.breakdowns) if self.breakdowns else self.breakdowns},
years: {self.years})'''
def to_payload(self):
payload = {self.dataset: {
"years": [str(year) for year in self.years] if self.years else [],
"breakdown_values": self.breakdowns,
"data_tables": self.data_tables,
"geog_levels": self.geog_levels
}}
return payload
class TimeSeries(NHGIS):
'''A wrapper for creating validating requests to the IPUMS NHGIS API.
This class is used to format the json data structure for the NHGIS class.'''
def __init__(self, data_table, geog_levels='macro'):
self.validate(data_table, geog_levels)
def validate(self, data_table, geog_levels):
self.validate_types(data_table, geog_levels)
metadata = self.time_series_metadata(data_table=data_table)
self.data_table = data_table
self.validate_geog_levels(metadata, geog_levels)
def validate_types(self, data_table, geog_levels):
if type(data_table) != str:
raise ValueError('`data_table` variable must be a string.')
if geog_levels != 'macro':
if type(geog_levels) != list:
raise ValueError('If `geog_levels` != "macro" `geog_levels` must be a list.')
if not all(isinstance(item, str) for item in geog_levels):
raise ValueError('If `geog_levels` != "macro" `geog_levels` must be a list of strings.')
def validate_geog_levels(self, metadata, geog_levels):
if geog_levels == 'macro':
self.geog_levels = [metadata['geog_levels'][0]['name']]
else:
supported_levels = [value['name'] for value in metadata['geog_levels']]
for level in geog_levels:
if level not in supported_levels:
raise ValueError(f'Time Series {metadata["name"]} does not support geo level: "{level}"')
self.geog_levels = geog_levels
def to_payload(self):
payload = {self.data_table: {
'geog_levels': self.geog_levels}}
return payload
|
joelsewhere/ipumspy
|
ipumspy.py
|
ipumspy.py
|
py
| 18,512 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 309,
"usage_type": "call"
}
] |
73996523069
|
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import random
import os
import glob
import sys
import wandb
import gru_models
import build_vocab
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# TODO: Get rid of these, just pass them in to dataset.
BATCH_SIZE=16
TRAIN_TOKEN_LEN=256
#from vocab_building impor t load_tokenized_file, load_vocab, decode_vocab, nlp, get_vocab_indx_vector
def find_files(path): return glob.glob(path)
class RNN_Dataset_multiple_sources(torch.utils.data.Dataset):
def __init__(
self,
sequence_length,
type
):
folder = "../vocabs_and_tokens/" + type + "/"
data_folder = "../data/" + type + "/"
vocab_file = folder + "*.pt"
token_files = folder + "*.pkl"
self.sequence_length = sequence_length
self.all_categories, self.n_categories = self.setup_categories(data_folder)
self.load_words(vocab_file, token_files)
self.uniq_words = len(self.vocab)
# data_folder needs to be like '../data/reviews/'
def setup_categories(self, data_folder):
all_categories = []
for filename in find_files(data_folder + '*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found.')
print('# categories:', n_categories, all_categories)
#all_categories.remove('garden')
#all_categories.remove('music')
#all_categories.remove('small_combined')
#n_categories_languages = len(all_categories)
#print('# categories:', n_categories_languages, all_categories)
return all_categories, n_categories
def load_words(self, vocab_file, token_files):
# We want the vocab to be constructed from all sources, but we need the raw token sets for each seperately.
# The category vector can just be a simple index vector.
self.vocab = build_vocab.load_vocab(find_files(vocab_file)[0])
token_files = find_files(token_files)
# This is only setup to handle two different categories right now
self.raw_tokens_1 = build_vocab.load_tokenized_file(token_files[0])
self.raw_tokens_2 = build_vocab.load_tokenized_file(token_files[1])
self.num_samples_1 = len(self.raw_tokens_1)
self.num_samples_2 = len(self.raw_tokens_2)
# This is iffy, because we aren't actually going through all of the "samples"
self.num_samples = max(1, ((self.num_samples_1 + self.num_samples_2) // TRAIN_TOKEN_LEN)) # Split raw tokens into groups of TRAIN_TOKEN_LEN
self.num_batches = max(1, self.num_samples // BATCH_SIZE)
print('Number of raw_tokens: ', len(self.raw_tokens_1 + self.raw_tokens_2))
print('Number of samples in a batch: ', self.num_samples)
print('Number of batches: ', self.num_batches)
return 1
def random_choice(self, l):
return l[random.randint(0, len(l)-1)]
def category_tensor(self, category):
li = self.all_categories.index(category)
if li == 0:
tensor = torch.zeros(self.sequence_length).to(device).long()
else:
tensor = torch.ones(self.sequence_length).to(device).long()
return tensor, li
def __len__(self):
return self.num_samples
def __getitem__(self, index):
# This should pick a random source, grab it's category, and then grab a sequence associated with it.
# Pick random category
string_category= self.random_choice(self.all_categories)
category, category_index = self.category_tensor(string_category)
# Pick the right token samples based on the category
if category_index == 0:
current_sample = self.raw_tokens_1
else:
current_sample = self.raw_tokens_2
# We cut off the potential of it being too long
random_index = random.randint(0, len(current_sample) - (self.sequence_length + 1))
end_index = random_index + self.sequence_length
return ( # might break if it gets the very end?
torch.tensor(current_sample[random_index:end_index]).to(device), # x
torch.tensor(current_sample[random_index+1:end_index+1]).to(device), # y
category
)
def train(dataset, model, max_epochs, batch_size, cat = False):
train_losses = []
model.train()
dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(max_epochs):
total_loss = 0
for batch, (x, y, category) in enumerate(dataloader):
hidden_states = model.init_hidden(batch_size)
#print('x size: ', x.size()) # 16, 256
#print('category size: ', category.size()) # 16, 256
optimizer.zero_grad()
if cat:
y_pred, hidden_states = model(x, hidden_states, batch_size, category)
else:
y_pred, hidden_states = model(x, hidden_states, batch_size)
#print('y_pred size: ', y_pred.size()) # [16, 4822] for cells, [16, 256, 4822] normal
#print('y_pred transposed size: ', y_pred.transpose(1, 2).size()) # [16, 4822, 256]
loss = criterion(y_pred.transpose(1, 2), y)
total_loss += loss.item()
loss.backward()
optimizer.step()
print({ 'epoch': epoch, 'batch': batch, 'loss': loss.item() })
wandb.log({"loss":loss.item()})
batch_loss = total_loss/batch_size
train_losses.append(batch_loss)
wandb.log({"batch_loss":batch_loss})
return train_losses
def predict_with_category(dataset, model, text, category, next_words=100):
model.eval()
prediction = build_vocab.get_vocab_indx_vector(dataset.vocab, build_vocab.load_spacy, text)
tokens = torch.tensor(prediction).to(device)
# Get category tensor
li = dataset.all_categories.index(category)
if li == 0:
category = torch.zeros(len(prediction)).to(device).long()
else:
category = torch.ones(len(prediction)).to(device).long()
print('cat size: ', category.size())
print('prediction size: ', tokens.size())
state_h = model.init_hidden(1) # num_layers, batch_size, lstm_size
# Prime generation by feeding in initial input:
for p in range(len(tokens)-1):
_, state_h = model(tokens[p].view(1,-1), state_h, 1, category[p].view(1,-1))
#print('state_h size: ', state_h.size())
last_token = tokens[-1]
for i in range(0, next_words):
y_pred, state_h = model(last_token.view(1,-1), state_h, 1, category[0].view(1,-1))
#print('y_pred size: ', y_pred.size()) # [16, 256, 12923], should be [1, 1, 12923]
#print('y_pred[0][-1] size: ', y_pred[0][-1].size())
last_word_logits = y_pred[0][-1]
# These are the probabilities
p = torch.nn.functional.softmax(last_word_logits, dim=0)
word_index = torch.multinomial(p, 1)[0]
top_values = torch.topk(p, 5)
#top_words = top_values.indices
#top_probs = top_values.values
#print('word index: ', word_index)
#print('top_words: ', top_words.tolist())
#top_word_pred = decode_vocab(dataset.vocab, [word_index])
#top_words_pred = decode_vocab(dataset.vocab, top_words.tolist())
#print('The top word predicted was: ', top_word_pred)
#print('The top five predictions were: ', top_words_pred)
#print('Their probabilites are: ', top_probs)
prediction.append(word_index)
last_token = torch.tensor([word_index]).to(device)
final_prediction = build_vocab.decode_vocab(dataset.vocab, prediction)
return final_prediction
def train_wrapper(type, hidden_size, num_epochs):
# Create dataset
dataset = RNN_Dataset_multiple_sources(TRAIN_TOKEN_LEN, type)
input_size = dataset.uniq_words # Should be size of vocab?
n_layers = 3
print('----------------------')
print('Original GRU')
run = wandb.init(name='Original GRU',
project='controllableRNN',
config={
'dataset':type,
'epochs':num_epochs,
'hidden_size':hidden_size
},
reinit=True
)
# Model with normal pytorch GRU
category_model = gru_models.GRU_category(input_size, hidden_size, input_size, n_layers).to(device)
file_path = f"gru_trained_cat_" + type + ".pt"
losses_cat = train(dataset, category_model, num_epochs, BATCH_SIZE, cat=True)
torch.save(category_model.state_dict(), file_path)
run.finish()
"""print('----------------------')
print('Original GRU with cells')
# Model with GRU Cells
cells_category_model = gru_models.GRU_with_cells_category(input_size, hidden_size, input_size, n_layers).to(device)
file_path = f"gru_trained_cat_cells_" + type + ".pt"
losses_cat_cells = train(dataset, cells_category_model, num_epochs, BATCH_SIZE, True)
torch.save(cells_category_model.state_dict(), file_path)"""
print('----------------------')
print('Edited GRU')
run = wandb.init(name='Edited GRU',
project='controllableRNN',
config={
'dataset':type,
'epochs':num_epochs,
'hidden_size':hidden_size
},
reinit=True
)
# Model with edited GRU Cells
cells_category_edited_model = gru_models.GRU_with_cells_category_edited(input_size, hidden_size, input_size, n_layers).to(device)
file_path = f"gru_trained_cat_cells_edited_" + type + ".pt"
losses_cat_cells_edited = train(dataset, cells_category_edited_model, num_epochs, BATCH_SIZE, True)
torch.save(cells_category_edited_model.state_dict(), file_path)
run.finish()
# Create loss graph and save
"""fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(len(losses_cat)), losses_cat, label="original")
#ax.plot(range(len(losses_cat_cells)), losses_cat_cells, label="original with cells")
ax.plot(range(len(losses_cat_cells_edited)), losses_cat_cells_edited, label="edited")
plt.title("Loss over time")
plt.xlabel("Time")
plt.ylabel("Loss")
plt.legend()
plt.savefig('loss_' + str(type) + "_" + str(num_epochs) + "_" + str(hidden_size) + '.png')"""
def main():
wandb.login() # login to wandb
# Uncomment these to use arguments
#arguments = sys.argv[1:]
#type, num_epochs, hidden_size = arguments
#num_epochs = int(num_epochs)
#hidden_size = int(hidden_size)
print('TRAINING LANGUAGES- HIDDEN_SIZE-256 NUM_EPOCHS-300')
train_wrapper(type='books', hidden_size=256, num_epochs=50)
print('TRAINING LANGUAGES- HIDDEN_SIZE-512 NUM_EPOCHS-300')
train_wrapper(type='books', hidden_size=512, num_epochs=50)
print('TRAINING LANGUAGES- HIDDEN_SIZE-1024 NUM_EPOCHS-300')
train_wrapper(type='books', hidden_size=1024, num_epochs=50)
if __name__ == "__main__":
main()
|
JayOrten/controllableRNN
|
scripts/train_gru.py
|
train_gru.py
|
py
| 11,462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "build_vocab.load_vocab",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "build_vocab.load_tokenized_file",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "build_vocab.load_tokenized_file",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "wandb.log",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "wandb.log",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "build_vocab.get_vocab_indx_vector",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "build_vocab.load_spacy",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "torch.multinomial",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.topk",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "build_vocab.decode_vocab",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "wandb.init",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "gru_models.GRU_category",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "wandb.init",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "gru_models.GRU_with_cells_category_edited",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "wandb.login",
"line_number": 298,
"usage_type": "call"
}
] |
14177953222
|
"""Holds environmental variables, sets up custom logger."""
import logging
import os
log = logging.getLogger(name="log")
# declare environment constants
COSMOSDB_CONNECTION_STRING: str = os.environ["COSMOSDB_CONNECTION_STRING"]
COSMOSDB_DATABASE_ID: str = os.environ["COSMOSDB_DATABASE_ID"]
COSMOSDB_CONTAINER_ID: str = os.environ["COSMOSDB_CONTAINER_ID"]
BLOB_SERVICE_CONNECTION_STRING: str = os.environ["BLOB_CONNECTION_STRING"]
BLOB_CONTAINER_NAME: str = os.environ["BLOB_CONTAINER_NAME"]
def logger(
logging_format: str = "%(levelname)s, %(name)s.%(funcName)s: %(message)s",
level: int = logging.INFO,
) -> None:
"""
Sets up custom logger.
Parameters:
format (str, optional): Logging format. Defaults to "%(name)s%(funcName)s: %(message)s".
level (int, optional): Logging level. Defaults to logging.INFO.
Returns:
None
"""
log.debug(msg="Setting up custom logger.")
log.setLevel(level=level)
handler = logging.StreamHandler(stream=None)
formatter = logging.Formatter(fmt=logging_format)
handler.setFormatter(fmt=formatter)
if log.hasHandlers():
log.handlers.clear()
log.addHandler(handler)
|
wieczorekgrzegorz/ksef-krportal-communication
|
utilities/setup.py
|
setup.py
|
py
| 1,194 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 36,
"usage_type": "call"
}
] |
16413370006
|
import json
import os
import pygame
from pprint import pformat
from pyggui.core import TRANSPARENT
pygame.font.init()
_DEFAULT_THEME_PATH = 'assets/themes/default_theme.json'
_DEFAULT_THEME_PATH = os.path.join(os.path.dirname(__file__), _DEFAULT_THEME_PATH)
NULL_THEME_DEFAULTS = {
"col": TRANSPARENT,
"width": 0,
"padding": [0, 0],
"radius": 0,
"font": {
"name": "calibri",
"size": 32
}
}
class Theme:
def __init__(self, file=None):
self.file = file
if self.file is None:
self.file = _DEFAULT_THEME_PATH
self.changed = False
self._all_styles = self._load_theme_json(self.file)
self._top_level_theme = self._all_styles.get("*")
self._styles = self._top_level_theme.copy()
@staticmethod
def _load_theme_json(file):
with open(file, mode='r') as theme_file:
themes = json.load(theme_file)
return themes
def get_widget_theme(self, widget=None):
widget_theme = self._top_level_theme.copy()
if widget is None:
return widget_theme
widget_type = widget.__class__.__name__.lower()
# Update the widget theme with the widget type level attributes
widget_type_theme = self._all_styles.get(widget_type, {})
widget_theme |= widget_type_theme
new_theme = Theme()
new_theme._styles = widget_theme.copy()
return new_theme
def __getitem__(self, item):
value = self._styles.get(item, None)
if value is None and any(key in item for key in NULL_THEME_DEFAULTS.keys()):
value = [v for k, v in NULL_THEME_DEFAULTS.items() if k == item or k in item][0]
return value
def __setitem__(self, item, value):
old_styles = self._styles.copy()
if item in self._styles:
self._styles["bg-col"] = value
self.changed = self.changed or old_styles != self._styles
def __repr__(self):
return pformat(self._styles)
@property
def font(self):
font_dict = self['font']
name = font_dict.get('name')
size = font_dict.get('size')
try:
font_ = pygame.font.Font(name, size)
except FileNotFoundError:
font_ = pygame.font.SysFont(name, size)
font_.set_bold(font_dict.get('bold', False))
font_.set_italic(font_dict.get('italic', False))
font_.set_underline(font_dict.get('underline', False))
return font_
def copy(self):
copy = Theme()
copy._styles = self._styles.copy()
return copy
if __name__ == '__main__':
theme = Theme()
print(theme)
|
sam57719/PygGUI
|
pyggui/theme.py
|
theme.py
|
py
| 2,672 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.font.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyggui.core.TRANSPARENT",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 87,
"usage_type": "attribute"
}
] |
41685726399
|
import boto3
import sys
import time
# input value 'ansible-controller' while running the instance
#import json
ec2_client = boto3.client('ec2', region_name = "us-east-1")
instances = ec2_client.describe_instances()
for reservation in instances['Reservations']:
for instance in reservation["Instances"]:
if instance["Tags"][0]["Value"] == sys.argv[1]:
response = ec2_client.start_instances(InstanceIds=[instance["InstanceId"]])
istance_id = instance["InstanceId"]
while True:
print('checking the status........')
time.sleep(3)
status_value = ec2_client.describe_instance_status(InstanceIds=[istance_id])
if len(status_value['InstanceStatuses']) != 0:
if status_value['InstanceStatuses'][0]['InstanceState']['Name'] == 'running':
print(f"{sys.argv[1]} EC2 Instance got:- {status_value['InstanceStatuses'][0]['InstanceState']['Name']}")
break
print ("Press Enter to continue ..." )
input()
|
sudhann92/project-repo
|
aws-python/aws-boto-start-instance.py
|
aws-boto-start-instance.py
|
py
| 979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
}
] |
72137353787
|
import logging
import json
from discord import Interaction, app_commands, Role
from discord.app_commands import Choice
from discord.ext.commands import Bot, Cog
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
with open("config.json") as cfg_json:
cfg = json.loads(cfg_json.read())
owner_id = cfg["owner_id"]
async def log_reply(ctx: Interaction, response: str, ephemeral=True):
log = f"From {ctx.user}: {response}"
if ctx.guild:
log = f"From {ctx.user} in {ctx.guild.name}: {response}"
logger.info(log)
await ctx.response.send_message(response, ephemeral=ephemeral)
async def is_owner(ctx: Interaction) -> bool:
if ctx.user.id != owner_id:
await log_reply(ctx, f"**Error:** Only my owner can use this command")
return False
return True
class CommandError(Exception):
def __init__(self, msg: str):
self.msg = msg
super().__init__(msg)
@classmethod
async def send_err(cls, ctx: Interaction, msg: str):
self = cls(msg)
await log_reply(ctx, self.msg)
return self
class OwnerCog(Cog):
"""Commands that can only be used by the bot's owner"""
def __init__(self, bot: Bot):
self.bot = bot
@app_commands.command(name="sync")
@app_commands.check(is_owner)
async def sync_commands(self, ctx: Interaction):
"""(Owner only) Syncs app command info with Discord"""
await self.bot.tree.sync()
await log_reply(ctx, "Synced command tree with Discord")
emote = app_commands.Group(
name="emote",
description=("(Owner only) Modify DGG emote translations"),
)
@emote.command(name="add")
@app_commands.describe(
dgg_version="The emote as it's used in DGG",
disc_version="The emote as it's used in Discord",
)
@app_commands.check(is_owner)
async def add_emote(self, ctx: Interaction, dgg_version: str, disc_version: str):
"""(Owner only) Add or modify a DGG emote translation"""
self.bot.emotes[dgg_version] = disc_version
self.bot.save_cfg()
await log_reply(ctx, f"Translating {dgg_version} to {str(disc_version)}")
@emote.command(name="remove")
@app_commands.describe(dgg_version="The emote to remove (in DGG format)")
@app_commands.check(is_owner)
async def remove_emote(self, ctx: Interaction, dgg_version: str):
"""(Owner only) Remove a DGG emote translation"""
if dgg_version in self.bot.emotes.keys():
removed_emote = self.bot.emotes.pop(dgg_version)
self.bot.save_cfg()
await log_reply(ctx, f"Removed {removed_emote} from emotes")
else:
await log_reply(ctx, f"Couldn't find emote {dgg_version}")
config = app_commands.Group(
name="config",
description=("(Owner only) Modify the bot's config file"),
)
@config.command(name="remove")
@app_commands.choices(
mode=[
Choice(name="phrase", value="phrase"),
Choice(name="relay", value="relay"),
]
)
@app_commands.check(is_owner)
async def config_remove(self, ctx: Interaction, mode: str, value: str):
"""Remove a relay or phrase from the config file"""
if mode == "phrase" and value in self.bot.phrases:
del self.bot.phrases[value]
self.bot.save_cfg()
await log_reply(ctx, f"Removed '{value}' from phrases", ephemeral=False)
elif mode == "relay" and value in self.bot.relays:
del self.bot.relays[value]
self.bot.save_cfg()
await log_reply(ctx, f"Removed '{value}' from relays", ephemeral=False)
else:
await log_reply(ctx, f"Couldn't find '{value}' in {mode}s")
class PublicCog(Cog):
"""Commands that can be used by anybody"""
def __init__(self, bot: Bot):
self.bot = bot
async def get_relay_channel(self, ctx: Interaction) -> int:
if not ctx.guild:
err = "**Error:** This command is only usable in servers"
raise await CommandError(err).send_err(ctx, err)
if "dgg-relay-mod" not in (role.name for role in ctx.user.roles):
err = "**Error:** This command requires the 'dgg-relay-mod' role"
raise await CommandError(err).send_err(ctx, err)
relay_channel = None
for channel in ctx.guild.channels:
if channel.name == "dgg-relay":
relay_channel = channel.id
break
if not relay_channel:
err = f"**Error:** No '#dgg-relay' channel found in '{ctx.guild.name}'"
raise await CommandError(err).send_err(ctx, err)
return relay_channel
relay = app_commands.Group(
name="relay",
description="Relays DGG messages to servers",
)
@relay.command(name="add")
@app_commands.describe(dgg_username="The DGG user you want to relay messages from")
async def relay_add(self, ctx: Interaction, dgg_username: str):
"""Add a DGG user whose messages get forwarded to this server (case sensitive!)"""
relay_channel = await self.get_relay_channel(ctx)
if dgg_username not in self.bot.relays:
self.bot.relays[dgg_username] = []
logger.info(f"Added new relay list '{dgg_username}'")
if relay_channel not in self.bot.relays[dgg_username]:
self.bot.relays[dgg_username].append(relay_channel)
response = (
f"Messages from '{dgg_username}' will be relayed to '{ctx.guild.name}'"
)
else:
response = f"**Error:** '{dgg_username}' is already being relayed to '{ctx.guild.name}'"
self.bot.save_cfg()
await log_reply(ctx, response, ephemeral=False)
@relay.command(name="remove")
@app_commands.describe(dgg_username="The DGG user you want to stop relaying")
async def relay_remove(self, ctx: Interaction, dgg_username: str):
"""Remove a DGG user's relay from this server"""
relay_channel = await self.get_relay_channel(ctx)
response = None
if dgg_username in self.bot.relays.keys():
if relay_channel in self.bot.relays[dgg_username]:
self.bot.relays[dgg_username].remove(relay_channel)
response = f"Removed '{dgg_username}' relay from '{ctx.guild.name}'"
if not self.bot.relays[dgg_username]:
self.bot.relays.pop(dgg_username)
logger.info(f"Removed empty relay list for '{dgg_username}'")
self.bot.save_cfg()
if not response:
response = (
f"**Error:** '{dgg_username}' isn't being relayed to '{ctx.guild.name}'"
" (try the '/relay list' command)"
)
await log_reply(ctx, response, ephemeral=False)
@relay.command(name="list")
async def relay_list(self, ctx: Interaction):
"""Lists DGG users currently being relayed to this server."""
relay_channel = await self.get_relay_channel(ctx)
relays = []
for nickname in self.bot.relays:
for channel in self.bot.relays[nickname]:
if channel == relay_channel:
relays.append(nickname)
relays = "', '".join(relays)
response = f"This server gets messages from: '{relays}'"
if not relays:
response = "No relays are active for this server."
await log_reply(ctx, response, ephemeral=False)
live_notifications = app_commands.Group(
name="live-notifications",
description="Configure live notifications for Destiny",
)
@live_notifications.command(name="on")
async def live_notifications_on(self, ctx: Interaction):
"""Enable live notifications for this server"""
relay_channel = await self.get_relay_channel(ctx)
if relay_channel not in self.bot.live["channels"].keys():
self.bot.live["channels"][relay_channel] = {"role": None}
self.bot.live["channels"][relay_channel]["enabled"] = True
self.bot.save_cfg()
response = f"Live notifications enabled for {ctx.guild.name}"
await log_reply(ctx, response, ephemeral=False)
@live_notifications.command(name="off")
async def live_notifications_off(self, ctx: Interaction):
"""Disable live notifications for this server"""
relay_channel = await self.get_relay_channel(ctx)
if relay_channel not in self.bot.live["channels"].keys():
self.bot.live["channels"][relay_channel] = {"role": None}
self.bot.live["channels"][relay_channel]["enabled"] = False
self.bot.save_cfg()
response = f"Live notifications disabled for {ctx.guild.name}"
await log_reply(ctx, response, ephemeral=False)
@live_notifications.command(name="role")
@app_commands.describe(role="The role that will be pinged")
async def live_notifications_role(self, ctx: Interaction, role: Role):
"""Set a role that gets pinged for live notifications"""
relay_channel = await self.get_relay_channel(ctx)
if relay_channel not in self.bot.live["channels"].keys():
self.bot.live["channels"][relay_channel] = {"enabled": True}
self.bot.live["channels"][relay_channel]["role"] = role.id
self.bot.save_cfg()
response = (
f'"<@&{role.id}>" will be pinged for live notifications in {ctx.guild.name}'
)
await log_reply(ctx, response, ephemeral=False)
def check_prefs(self, disc_user):
if disc_user not in self.bot.user_prefs.keys():
self.bot.user_prefs[disc_user] = {"detect_presence": False, "ignores": []}
logger.info(f"Added new user '{disc_user}' to preferences list")
phrase = app_commands.Group(
name="phrase",
description="Relays DGG messages to users",
)
@phrase.command(name="add")
@app_commands.describe(
phrase="The phrase you want forwarded to you (most likely your DGG username)"
)
async def phrase_add(self, ctx: Interaction, phrase: str):
"""Add a phrase (usually a username) that will be forwarded
to you when it's used in DGG (case insensitive)"""
self.check_prefs(ctx.user.id)
if phrase not in self.bot.phrases:
self.bot.phrases[phrase] = []
logger.info(f"Added new phrase list for '{phrase}'")
if ctx.user.id not in self.bot.phrases[phrase]:
self.bot.phrases[phrase].append(ctx.user.id)
response = f"Forwarding '{phrase}' to {ctx.user}"
else:
response = f"**Error:** '{phrase}' is already being forwarded to {ctx.user}"
self.bot.save_cfg()
await log_reply(ctx, response)
@phrase.command(name="remove")
@app_commands.describe(phrase="The phrase you want to stop being forwarded")
async def phrase_remove(self, ctx: Interaction, phrase: str):
"""Stop a phrase from being forwarded to you"""
self.check_prefs(ctx.user.id)
response = None
if phrase in self.bot.phrases:
if ctx.user.id in self.bot.phrases[phrase]:
self.bot.phrases[phrase].remove(ctx.user.id)
response = f"No longer forwarding '{phrase}' to {ctx.user}"
if not self.bot.phrases[phrase]:
self.bot.phrases.pop(phrase)
logger.info(f"Removed empty phrase list '{phrase}'")
self.bot.save_cfg()
if not response:
response = (
f"**Error:** '{phrase}' isn't being forwarded to {ctx.user}"
" (try the '/phrase list' command)"
)
await log_reply(ctx, response)
@phrase.command(name="list")
async def phrase_list(self, ctx: Interaction):
"""List the phrases currently being forwarded to you"""
disc_user = ctx.user.id
user_phrases = []
for phrase in self.bot.phrases:
for user_id in self.bot.phrases[phrase]:
if user_id == disc_user:
user_phrases.append(phrase)
user_phrases = "', '".join(user_phrases)
response = f"Your phrases: '{user_phrases}'"
if not user_phrases:
response = "No phrases are being forwarded to you."
await log_reply(ctx, response)
@phrase.command(name="detect-dgg-presence")
@app_commands.describe(mode="Set to True to detect DGG presence. Default is False.")
async def detect_dgg_presence(self, ctx: Interaction, mode: bool):
"""Change behavior of the /phrase command by controlling when the bot messages you."""
self.check_prefs(ctx.user.id)
self.bot.user_prefs[ctx.user.id]["detect_presence"] = mode
self.bot.save_cfg()
word = "enabled" if mode else "disabled"
response = f"Presence detection {word} for {ctx.user.name}"
await log_reply(ctx, response)
ignore = app_commands.Group(
name="ignore",
description="Configure your DGG Relay ignore list",
)
@ignore.command(name="add")
@app_commands.describe(dgg_username="The user in DGG you want to ignore")
async def add_ignore(self, ctx: Interaction, dgg_username: str):
"""Ignore messages from a DGG user"""
self.check_prefs(ctx.user.id)
ignores = self.bot.user_prefs[ctx.user.id]["ignores"]
ignores.append(dgg_username)
self.bot.user_prefs[ctx.user.id]["ignores"] = list(set(ignores))
self.bot.save_cfg()
response = f"'{dgg_username}' added to your ignore list"
await log_reply(ctx, response)
@ignore.command(name="remove")
@app_commands.describe(dgg_username="The user in DGG you want to unignore")
async def add_ignore(self, ctx: Interaction, dgg_username: str):
"""Remove someone from your ignore list"""
self.check_prefs(ctx.user.id)
ignores = self.bot.user_prefs[ctx.user.id]["ignores"]
if dgg_username not in ignores:
await log_reply(ctx, f"'{dgg_username}' is not in your ignore list")
return
self.bot.user_prefs[ctx.user.id]["ignores"].remove(dgg_username)
self.bot.save_cfg()
response = f"'{dgg_username}' removed from your ignore list"
await log_reply(ctx, response)
|
tenacious210/dgg-relay
|
cogs.py
|
cogs.py
|
py
| 14,701 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discord.Interaction",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.command",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.check",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Group",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.check",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.check",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Group",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.choices",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "discord.app_commands.check",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Group",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Group",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "discord.Role",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Group",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Group",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 331,
"usage_type": "name"
}
] |
12300903394
|
import numpy as np
from sklearn.datasets import make_classification
import pytest
from pygbm.binning import BinMapper
from pygbm.grower import TreeGrower
from pygbm import GradientBoostingRegressor
from pygbm import GradientBoostingClassifier
X, y = make_classification(n_samples=150, n_classes=2, n_features=5,
n_informative=3, n_redundant=0,
random_state=0)
def test_plot_grower(tmpdir):
pytest.importorskip('graphviz')
from pygbm.plotting import plot_tree
X_binned = BinMapper().fit_transform(X)
gradients = np.asarray(y, dtype=np.float32).copy()
hessians = np.ones(1, dtype=np.float32)
grower = TreeGrower(X_binned, gradients, hessians, max_leaf_nodes=5)
grower.grow()
filename = tmpdir.join('plot_grower.pdf')
plot_tree(grower, view=False, filename=filename)
assert filename.exists()
def test_plot_estimator(tmpdir):
pytest.importorskip('graphviz')
from pygbm.plotting import plot_tree
n_trees = 3
est = GradientBoostingRegressor(max_iter=n_trees)
est.fit(X, y)
for i in range(n_trees):
filename = tmpdir.join('plot_predictor.pdf')
plot_tree(est, tree_index=i, view=False, filename=filename)
assert filename.exists()
def test_plot_estimator_and_lightgbm(tmpdir):
pytest.importorskip('graphviz')
lightgbm = pytest.importorskip('lightgbm')
from pygbm.plotting import plot_tree
n_classes = 3
X, y = make_classification(n_samples=150, n_classes=n_classes,
n_features=5, n_informative=3, n_redundant=0,
random_state=0)
n_trees = 3
est_pygbm = GradientBoostingClassifier(max_iter=n_trees,
n_iter_no_change=None)
est_pygbm.fit(X, y)
est_lightgbm = lightgbm.LGBMClassifier(n_estimators=n_trees)
est_lightgbm.fit(X, y)
n_total_trees = n_trees * n_classes
for i in range(n_total_trees):
filename = tmpdir.join('plot_mixed_predictors.pdf')
plot_tree(est_pygbm, est_lightgbm=est_lightgbm, tree_index=i,
view=False, filename=filename)
assert filename.exists()
|
ogrisel/pygbm
|
tests/test_plotting.py
|
test_plotting.py
|
py
| 2,201 |
python
|
en
|
code
| 175 |
github-code
|
6
|
[
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pytest.importorskip",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygbm.binning.BinMapper",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygbm.grower.TreeGrower",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygbm.plotting.plot_tree",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.importorskip",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pygbm.GradientBoostingRegressor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygbm.plotting.plot_tree",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pytest.importorskip",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pytest.importorskip",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygbm.GradientBoostingClassifier",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pygbm.plotting.plot_tree",
"line_number": 61,
"usage_type": "call"
}
] |
38958617780
|
from unittest import mock
from django.test import TestCase
from django.urls import resolve, reverse
from nltk import word_tokenize
from .models import ScrapeHistory
from .views import WordCountView
class ScrapeTest(TestCase):
def _mock_response(
self,
status=200,
content="CONTENT",
json_data=None,
raise_for_status=None):
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
def setUp(self):
self.url = reverse("wordcount")
self.post_params = {
"word": "fit",
"url": "https://www.virtusize.jp/"
}
def test_wordcount_url_resolves(self):
self.assertEqual(resolve(self.url).func.view_class, WordCountView)
def test_missing_param(self):
params = {
"word": "ABC"
}
response = self.client.post(
self.url, params, content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_invalid_link_value(self):
params = {
"word": "WORd",
"url": "https://virtuse"
}
response = self.client.post(
self.url, params, content_type='application/json')
self.assertEqual(response.status_code, 500)
@mock.patch("scrape.views.requests.get")
def test_wordcount_request_to_url(self, m):
mock_resp = self._mock_response(
content="<html><body>this is fit</body></html>")
m.return_value = mock_resp
response = WordCountView.scrap_url_and_word_count(self,
"fit", "https://www.virtusize.jp/")
self.assertEqual(response, 1)
@mock.patch("scrape.views.WordCountView.scrap_url_and_word_count", return_value=1)
def test_wordcount_success_result(self, m):
response = self.client.post(
self.url, self.post_params, content_type='application/json')
result = response.json()
test_content = {
"status": "ok",
"count": 1
}
self.assertEqual(result, test_content)
@mock.patch("scrape.views.WordCountView.scrap_url_and_word_count", return_value=1)
def test_save_to_db(self, m):
_ = ScrapeHistory.objects.create(
url="https://www.virtusize.jp/",
word="fit",
word_count=1
)
_ = self.client.post(
self.url, self.post_params, content_type='application/json')
self.assertEqual(ScrapeHistory.objects.last().word_count, 1)
self.assertNotEqual(ScrapeHistory.objects.last().word_count, 13)
def test_word_tokenize(self):
txt = "<html><body>Virtusize works for innovative idea. <'new idea'> idea-ly Although there are lot of new ideas but it focuses e-commerce</body></html>"
words = word_tokenize(txt)
self.assertEqual(words.count("idea"), 2)
self.assertNotEqual(words.count("idea"), 1)
|
iqbalalo/word_counter
|
src/scrape/tests.py
|
tests.py
|
py
| 3,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "views.WordCountView",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "django.urls.resolve",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "views.WordCountView.scrap_url_and_word_count",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "views.WordCountView",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "models.ScrapeHistory.objects.create",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "models.ScrapeHistory.objects",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "models.ScrapeHistory",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "models.ScrapeHistory.objects.last",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "models.ScrapeHistory.objects",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "models.ScrapeHistory",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "models.ScrapeHistory.objects.last",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "models.ScrapeHistory.objects",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "models.ScrapeHistory",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 113,
"usage_type": "call"
}
] |
7661449629
|
import numpy as np
import netCDF4
from datetime import datetime, timedelta
from glob import glob
import os, sys
"""
This program is used to read input data.
"""
#******************************************
# Edit here (input file directories)
#------------------------------------------
slpbasedir = "/mnt/nas02/data/CMIP6"
tabasedir = "/mnt/nas02/data/CMIP6"
uabasedir = "/mnt/nas02/data/CMIP6"
vabasedir = "/mnt/nas02/data/CMIP6"
tsbasedir = "/mnt/nas02/data/CMIP6"
topobasedir= "/mnt/nas02/data/CMIP6"
landbasedir= "/mnt/nas02/data/CMIP6"
prbasedir = "/mnt/nas02/data/CMIP6" # not used for detection
#******************************************
dbasedir = {
"slp":slpbasedir,
"ta" :tabasedir,
"ua" :uabasedir,
"va" :vabasedir,
"sst":tsbasedir,
"topo":topobasedir,
"land":landbasedir,
"pr" :prbasedir, # not used for detection
}
dvar = {
"slp":"psl",
"ta" :"ta",
"ua" :"ua",
"va" :"va",
"sst":"ts",
"topo":"orog",
"land":"sftlf",
"pr" :"pr", # not used for detection
}
def ret_lats(model):
return np.load(slpbasedir + "/%s/lat.npy"%(model))
# MIROC6: -88.92773535 ~ 88.92773535, d=~1.4007664
def ret_lons(model):
return np.load(slpbasedir + "/%s/lon.npy"%(model))
# MIROC6: 0 ~ 358.59375, d=1.40625
def ret_ny(model):
return len(ret_lats(model))
def ret_nx(model):
return len(ret_lons(model))
# MIROC6: (128, 256)
def ret_miss(model):
modelname = model.split(".")[0]
if modelname=="MIROC6": miss_in= 9.969209968386869e+36
elif modelname=="MRI-ESM2-0": miss_in= 9.969209968386869e+36
elif modelname=="MPI-ESM1-2-HR":miss_in= 9.969209968386869e+36
return miss_in
def Load_6hrPlev(model, var, DTime, plev):
modelname, expr, ens = model.split(".")
vname = dvar[var]
iplev = [850, 500, 250].index(plev)
# Search file
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_6hrPlev*.nc"%(vname)
lsrcpath = glob(ssearch)
for srcpath in lsrcpath:
stime = os.path.basename(srcpath).split("_")[6].split(".")[0]
stime0, stime1 = stime.split("-")
dtime0 = datetime.strptime(stime0, "%Y%m%d%H%M")
dtime1 = datetime.strptime(stime1, "%Y%m%d%H%M")
if (dtime0<=DTime)&(DTime<=dtime1):
break
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
#print(srcpath)
# Find time index
basetime = {
("MIROC6","piControl"): datetime(3200,1,1),
("MRI-ESM2-0","piControl"): datetime(1850,1,1),
("MRI-ESM2-0","historical"): datetime(1850,1,1),
("MPI-ESM1-2-HR","piControl"): datetime(1850,1,1),
}[modelname,expr]
dtime0 = basetime + timedelta(days=float(nc.variables["time"][0]))
idxtime = int((DTime - dtime0).total_seconds()/21600) # 6-hour = 21600 sec
#print(DTime, dtime0)
#print(idxtime)
return nc.variables[vname][idxtime, iplev]
def Load_6hrSfc(model, var, DTime):
modelname, expr, ens = model.split(".")
vname = dvar[var]
# Search file
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_6hrPlev*.nc"%(vname)
lsrcpath = np.sort(glob(ssearch))
for srcpath in lsrcpath:
stime = os.path.basename(srcpath).split("_")[6].split(".")[0]
stime0, stime1 = stime.split("-")
dtime0 = datetime.strptime(stime0, "%Y%m%d%H%M")
dtime1 = datetime.strptime(stime1, "%Y%m%d%H%M")
if (dtime0<=DTime)&(DTime<=dtime1):
break
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
#print(srcpath)
# Find time index
basetime = {
("MIROC6","piControl"): datetime(3200,1,1),
("MRI-ESM2-0","piControl"): datetime(1850,1,1),
("MRI-ESM2-0","historical"): datetime(1850,1,1),
}[modelname,expr]
dtime0 = basetime + timedelta(days=float(nc.variables["time"][0]))
idxtime = int((DTime - dtime0).total_seconds()/21600) # 6-hour = 21600 sec
return nc.variables[vname][idxtime]
#return nc.variables[vname].shape
def Load_monSfc(model, var, Year, Mon):
modelname, expr, ens = model.split(".")
vname = dvar[var]
DTime = datetime(Year,Mon,1)
# Search file
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_Amon*.nc"%(vname)
lsrcpath = np.sort(glob(ssearch))
for srcpath in lsrcpath:
stime = os.path.basename(srcpath).split("_")[6].split(".")[0]
stime0, stime1 = stime.split("-")
dtime0 = datetime.strptime(stime0, "%Y%m")
dtime1 = datetime.strptime(stime1, "%Y%m")
if (dtime0<=DTime)&(DTime<=dtime1):
break
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
#print(srcpath)
#print(nc.variables["time"][:])
#print(len(nc.variables["time"][:]))
# Find time index
Year0,Mon0 = dtime0.timetuple()[:2]
Year1,Mon1 = dtime1.timetuple()[:2]
idxtime = int(Year-Year0)*12 -Mon0 + Mon
#print(idxtime)
return nc.variables[vname][idxtime]
def Load_const(model, var):
vname = dvar[var]
srcdir = dbasedir[var] + "/%s"%(model)
ssearch = srcdir + "/%s_*.nc"%(vname)
lsrcpath= glob(ssearch)
srcpath = lsrcpath[0]
nc = netCDF4.Dataset(srcpath)
#print(nc.variables)
return nc.variables[vname][:]
|
nbykutsumi/wsd
|
dataloader_CMIP6.py
|
dataloader_CMIP6.py
|
py
| 5,339 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 176,
"usage_type": "call"
}
] |
18266500320
|
"""This is Slate's Linear Algebra Compiler. This module is
responsible for generating C++ kernel functions representing
symbolic linear algebra expressions written in Slate.
This linear algebra compiler uses both Firedrake's form compiler,
the Two-Stage Form Compiler (TSFC) and COFFEE's kernel abstract
syntax tree (AST) optimizer. TSFC provides this compiler with
appropriate kernel functions (in C) for evaluating integral
expressions (finite element variational forms written in UFL).
COFFEE's AST base helps with the construction of code blocks
throughout the kernel returned by: `compile_expression`.
The Eigen C++ library (http://eigen.tuxfamily.org/) is required, as
all low-level numerical linear algebra operations are performed using
this templated function library.
"""
from coffee import base as ast
from firedrake.constant import Constant
from firedrake.tsfc_interface import SplitKernel, KernelInfo
from firedrake.slate.slac.kernel_builder import LocalKernelBuilder
from firedrake import op2
from itertools import chain
from pyop2.utils import get_petsc_dir
from pyop2.datatypes import as_cstr
from tsfc.parameters import SCALAR_TYPE
import firedrake.slate.slate as slate
import numpy as np
__all__ = ['compile_expression']
PETSC_DIR = get_petsc_dir()
cell_to_facets_dtype = np.dtype(np.int8)
def compile_expression(slate_expr, tsfc_parameters=None):
"""Takes a Slate expression `slate_expr` and returns the appropriate
:class:`firedrake.op2.Kernel` object representing the Slate expression.
:arg slate_expr: a :class:'TensorBase' expression.
:arg tsfc_parameters: an optional `dict` of form compiler parameters to
be passed onto TSFC during the compilation of
ufl forms.
Returns: A `tuple` containing a `SplitKernel(idx, kinfo)`
"""
if not isinstance(slate_expr, slate.TensorBase):
raise ValueError("Expecting a `TensorBase` object, not %s" % type(slate_expr))
# TODO: Get PyOP2 to write into mixed dats
if slate_expr.is_mixed:
raise NotImplementedError("Compiling mixed slate expressions")
if len(slate_expr.ufl_domains()) > 1:
raise NotImplementedError("Multiple domains not implemented.")
# If the expression has already been symbolically compiled, then
# simply reuse the produced kernel.
if slate_expr._metakernel_cache is not None:
return slate_expr._metakernel_cache
# Create a builder for the Slate expression
builder = LocalKernelBuilder(expression=slate_expr,
tsfc_parameters=tsfc_parameters)
# Keep track of declared temporaries
declared_temps = {}
statements = []
# Declare terminal tensor temporaries
terminal_declarations = terminal_temporaries(builder, declared_temps)
statements.extend(terminal_declarations)
# Generate assembly calls for tensor assembly
subkernel_calls = tensor_assembly_calls(builder)
statements.extend(subkernel_calls)
# Create coefficient temporaries if necessary
if builder.action_coefficients:
coefficient_temps = coefficient_temporaries(builder, declared_temps)
statements.extend(coefficient_temps)
# Create auxiliary temporaries if necessary
if builder.aux_exprs:
aux_temps = auxiliary_temporaries(builder, declared_temps)
statements.extend(aux_temps)
# Generate the kernel information with complete AST
kinfo = generate_kernel_ast(builder, statements, declared_temps)
# Cache the resulting kernel
idx = tuple([0]*slate_expr.rank)
kernel = (SplitKernel(idx, kinfo),)
slate_expr._metakernel_cache = kernel
return kernel
def generate_kernel_ast(builder, statements, declared_temps):
"""Glues together the complete AST for the Slate expression
contained in the :class:`LocalKernelBuilder`.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg statements: A list of COFFEE objects containing all
assembly calls and temporary declarations.
:arg declared_temps: A `dict` containing all previously
declared temporaries.
Return: A `KernelInfo` object describing the complete AST.
"""
slate_expr = builder.expression
if slate_expr.rank == 0:
# Scalars are treated as 1x1 MatrixBase objects
shape = (1,)
else:
shape = slate_expr.shape
# Now we create the result statement by declaring its eigen type and
# using Eigen::Map to move between Eigen and C data structs.
statements.append(ast.FlatBlock("/* Map eigen tensor into C struct */\n"))
result_sym = ast.Symbol("T%d" % len(declared_temps))
result_data_sym = ast.Symbol("A%d" % len(declared_temps))
result_type = "Eigen::Map<%s >" % eigen_matrixbase_type(shape)
result = ast.Decl(SCALAR_TYPE, ast.Symbol(result_data_sym, shape))
result_statement = ast.FlatBlock("%s %s((%s *)%s);\n" % (result_type,
result_sym,
SCALAR_TYPE,
result_data_sym))
statements.append(result_statement)
# Generate the complete c++ string performing the linear algebra operations
# on Eigen matrices/vectors
statements.append(ast.FlatBlock("/* Linear algebra expression */\n"))
cpp_string = ast.FlatBlock(metaphrase_slate_to_cpp(slate_expr,
declared_temps))
statements.append(ast.Incr(result_sym, cpp_string))
# Generate arguments for the macro kernel
args = [result, ast.Decl("%s **" % SCALAR_TYPE, builder.coord_sym)]
# Orientation information
if builder.oriented:
args.append(ast.Decl("int **", builder.cell_orientations_sym))
# Coefficient information
expr_coeffs = slate_expr.coefficients()
for c in expr_coeffs:
if isinstance(c, Constant):
ctype = "%s *" % SCALAR_TYPE
else:
ctype = "%s **" % SCALAR_TYPE
args.extend([ast.Decl(ctype, csym) for csym in builder.coefficient(c)])
# Facet information
if builder.needs_cell_facets:
args.append(ast.Decl("%s *" % as_cstr(cell_to_facets_dtype),
builder.cell_facet_sym))
# NOTE: We need to be careful about the ordering here. Mesh layers are
# added as the final argument to the kernel.
if builder.needs_mesh_layers:
args.append(ast.Decl("int", builder.mesh_layer_sym))
# Macro kernel
macro_kernel_name = "compile_slate"
stmts = ast.Block(statements)
macro_kernel = ast.FunDecl("void", macro_kernel_name, args,
stmts, pred=["static", "inline"])
# Construct the final ast
kernel_ast = ast.Node(builder.templated_subkernels + [macro_kernel])
# Now we wrap up the kernel ast as a PyOP2 kernel and include the
# Eigen header files
include_dirs = builder.include_dirs
include_dirs.extend(["%s/include/eigen3/" % d for d in PETSC_DIR])
op2kernel = op2.Kernel(kernel_ast,
macro_kernel_name,
cpp=True,
include_dirs=include_dirs,
headers=['#include <Eigen/Dense>',
'#define restrict __restrict'])
# Send back a "TSFC-like" SplitKernel object with an
# index and KernelInfo
kinfo = KernelInfo(kernel=op2kernel,
integral_type=builder.integral_type,
oriented=builder.oriented,
subdomain_id="otherwise",
domain_number=0,
coefficient_map=tuple(range(len(expr_coeffs))),
needs_cell_facets=builder.needs_cell_facets,
pass_layer_arg=builder.needs_mesh_layers)
return kinfo
def auxiliary_temporaries(builder, declared_temps):
"""Generates statements for assigning auxiliary temporaries
for nodes in an expression with "high" reference count.
Expressions which require additional temporaries are provided
by the :class:`LocalKernelBuilder`.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg declared_temps: A `dict` containing all previously
declared temporaries. This dictionary
is updated as auxiliary expressions
are assigned temporaries.
"""
statements = [ast.FlatBlock("/* Auxiliary temporaries */\n")]
results = [ast.FlatBlock("/* Assign auxiliary temps */\n")]
for exp in builder.aux_exprs:
if exp not in declared_temps:
t = ast.Symbol("auxT%d" % len(declared_temps))
result = metaphrase_slate_to_cpp(exp, declared_temps)
tensor_type = eigen_matrixbase_type(shape=exp.shape)
statements.append(ast.Decl(tensor_type, t))
statements.append(ast.FlatBlock("%s.setZero();\n" % t))
results.append(ast.Assign(t, result))
declared_temps[exp] = t
statements.extend(results)
return statements
def coefficient_temporaries(builder, declared_temps):
"""Generates coefficient temporary statements for assigning
coefficients to vector temporaries.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg declared_temps: A `dict` keeping track of all declared
temporaries. This dictionary is updated
as coefficients are assigned temporaries.
Action computations require creating coefficient temporaries to
compute the matrix-vector product. The temporaries are created by
inspecting the function space of the coefficient to compute node
and dof extents. The coefficient is then assigned values by looping
over both the node extent and dof extent (double FOR-loop). A double
FOR-loop is needed for each function space (if the function space is
mixed, then a loop will be constructed for each component space).
The general structure of each coefficient loop will be:
FOR (i1=0; i1<node_extent; i1++):
FOR (j1=0; j1<dof_extent; j1++):
wT0[offset + (dof_extent * i1) + j1] = w_0_0[i1][j1]
wT1[offset + (dof_extent * i1) + j1] = w_1_0[i1][j1]
.
.
.
where wT0, wT1, ... are temporaries for coefficients sharing the
same node and dof extents. The offset is computed based on whether
the function space is mixed. The offset is always 0 for non-mixed
coefficients. If the coefficient is mixed, then the offset is
incremented by the total number of nodal unknowns associated with
the component spaces of the mixed space.
"""
statements = [ast.FlatBlock("/* Coefficient temporaries */\n")]
i_sym = ast.Symbol("i1")
j_sym = ast.Symbol("j1")
loops = [ast.FlatBlock("/* Loops for coefficient temps */\n")]
for (nodes, dofs), cinfo_list in builder.action_coefficients.items():
# Collect all coefficients which share the same node/dof extent
assignments = []
for cinfo in cinfo_list:
fs_i = cinfo.space_index
offset = cinfo.offset_index
c_shape = cinfo.shape
actee = cinfo.coefficient
if actee not in declared_temps:
# Declare and initialize coefficient temporary
c_type = eigen_matrixbase_type(shape=c_shape)
t = ast.Symbol("wT%d" % len(declared_temps))
statements.append(ast.Decl(c_type, t))
statements.append(ast.FlatBlock("%s.setZero();\n" % t))
declared_temps[actee] = t
# Assigning coefficient values into temporary
coeff_sym = ast.Symbol(builder.coefficient(actee)[fs_i],
rank=(i_sym, j_sym))
index = ast.Sum(offset,
ast.Sum(ast.Prod(dofs, i_sym), j_sym))
coeff_temp = ast.Symbol(t, rank=(index,))
assignments.append(ast.Assign(coeff_temp, coeff_sym))
# Inner-loop running over dof extent
inner_loop = ast.For(ast.Decl("unsigned int", j_sym, init=0),
ast.Less(j_sym, dofs),
ast.Incr(j_sym, 1),
assignments)
# Outer-loop running over node extent
loop = ast.For(ast.Decl("unsigned int", i_sym, init=0),
ast.Less(i_sym, nodes),
ast.Incr(i_sym, 1),
inner_loop)
loops.append(loop)
statements.extend(loops)
return statements
def tensor_assembly_calls(builder):
"""Generates a block of statements for assembling the local
finite element tensors.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information and
assembly calls.
"""
statements = [ast.FlatBlock("/* Assemble local tensors */\n")]
# Cell integrals are straightforward. Just splat them out.
statements.extend(builder.assembly_calls["cell"])
if builder.needs_cell_facets:
# The for-loop will have the general structure:
#
# FOR (facet=0; facet<num_facets; facet++):
# IF (facet is interior):
# *interior calls
# ELSE IF (facet is exterior):
# *exterior calls
#
# If only interior (exterior) facets are present,
# then only a single IF-statement checking for interior
# (exterior) facets will be present within the loop. The
# cell facets are labelled `1` for interior, and `0` for
# exterior.
statements.append(ast.FlatBlock("/* Loop over cell facets */\n"))
int_calls = list(chain(*[builder.assembly_calls[it_type]
for it_type in ("interior_facet",
"interior_facet_vert")]))
ext_calls = list(chain(*[builder.assembly_calls[it_type]
for it_type in ("exterior_facet",
"exterior_facet_vert")]))
# Compute the number of facets to loop over
domain = builder.expression.ufl_domain()
if domain.cell_set._extruded:
num_facets = domain.ufl_cell()._cells[0].num_facets()
else:
num_facets = domain.ufl_cell().num_facets()
if_ext = ast.Eq(ast.Symbol(builder.cell_facet_sym,
rank=(builder.it_sym,)), 0)
if_int = ast.Eq(ast.Symbol(builder.cell_facet_sym,
rank=(builder.it_sym,)), 1)
body = []
if ext_calls:
body.append(ast.If(if_ext, (ast.Block(ext_calls,
open_scope=True),)))
if int_calls:
body.append(ast.If(if_int, (ast.Block(int_calls,
open_scope=True),)))
statements.append(ast.For(ast.Decl("unsigned int",
builder.it_sym, init=0),
ast.Less(builder.it_sym, num_facets),
ast.Incr(builder.it_sym, 1), body))
if builder.needs_mesh_layers:
# In the presence of interior horizontal facet calls, an
# IF-ELIF-ELSE block is generated using the mesh levels
# as conditions for which calls are needed:
#
# IF (layer == bottom_layer):
# *bottom calls
# ELSE IF (layer == top_layer):
# *top calls
# ELSE:
# *top calls
# *bottom calls
#
# Any extruded top or bottom calls for extruded facets are
# included within the appropriate mesh-level IF-blocks. If
# no interior horizontal facet calls are present, then
# standard IF-blocks are generated for exterior top/bottom
# facet calls when appropriate:
#
# IF (layer == bottom_layer):
# *bottom calls
#
# IF (layer == top_layer):
# *top calls
#
# The mesh level is an integer provided as a macro kernel
# argument.
# FIXME: No variable layers assumption
statements.append(ast.FlatBlock("/* Mesh levels: */\n"))
num_layers = builder.expression.ufl_domain().topological.layers - 1
int_top = builder.assembly_calls["interior_facet_horiz_top"]
int_btm = builder.assembly_calls["interior_facet_horiz_bottom"]
ext_top = builder.assembly_calls["exterior_facet_top"]
ext_btm = builder.assembly_calls["exterior_facet_bottom"]
bottom = ast.Block(int_top + ext_btm, open_scope=True)
top = ast.Block(int_btm + ext_top, open_scope=True)
rest = ast.Block(int_btm + int_top, open_scope=True)
statements.append(ast.If(ast.Eq(builder.mesh_layer_sym, 0),
(bottom,
ast.If(ast.Eq(builder.mesh_layer_sym,
num_layers - 1),
(top, rest)))))
return statements
def terminal_temporaries(builder, declared_temps):
"""Generates statements for assigning auxiliary temporaries
for nodes in an expression with "high" reference count.
Expressions which require additional temporaries are provided
by the :class:`LocalKernelBuilder`.
:arg builder: The :class:`LocalKernelBuilder` containing
all relevant expression information.
:arg declared_temps: A `dict` keeping track of all declared
temporaries. This dictionary is updated
as terminal tensors are assigned temporaries.
"""
statements = [ast.FlatBlock("/* Declare and initialize */\n")]
for exp in builder.temps:
t = builder.temps[exp]
statements.append(ast.Decl(eigen_matrixbase_type(exp.shape), t))
statements.append(ast.FlatBlock("%s.setZero();\n" % t))
declared_temps[exp] = t
return statements
def parenthesize(arg, prec=None, parent=None):
"""Parenthesizes an expression."""
if prec is None or parent is None or prec >= parent:
return arg
return "(%s)" % arg
def metaphrase_slate_to_cpp(expr, temps, prec=None):
"""Translates a Slate expression into its equivalent representation in
the Eigen C++ syntax.
:arg expr: a :class:`slate.TensorBase` expression.
:arg temps: a `dict` of temporaries which map a given expression to its
corresponding representation as a `coffee.Symbol` object.
:arg prec: an argument dictating the order of precedence in the linear
algebra operations. This ensures that parentheticals are placed
appropriately and the order in which linear algebra operations
are performed are correct.
Returns
This function returns a `string` which represents the C/C++ code
representation of the `slate.TensorBase` expr.
"""
# If the tensor is terminal, it has already been declared.
# Coefficients in action expressions will have been declared by now,
# as well as any other nodes with high reference count.
if expr in temps:
return temps[expr].gencode()
elif isinstance(expr, slate.Transpose):
tensor, = expr.operands
return "(%s).transpose()" % metaphrase_slate_to_cpp(tensor, temps)
elif isinstance(expr, slate.Inverse):
tensor, = expr.operands
return "(%s).inverse()" % metaphrase_slate_to_cpp(tensor, temps)
elif isinstance(expr, slate.Negative):
tensor, = expr.operands
result = "-%s" % metaphrase_slate_to_cpp(tensor, temps, expr.prec)
return parenthesize(result, expr.prec, prec)
elif isinstance(expr, (slate.Add, slate.Sub, slate.Mul)):
op = {slate.Add: '+',
slate.Sub: '-',
slate.Mul: '*'}[type(expr)]
A, B = expr.operands
result = "%s %s %s" % (metaphrase_slate_to_cpp(A, temps, expr.prec),
op,
metaphrase_slate_to_cpp(B, temps, expr.prec))
return parenthesize(result, expr.prec, prec)
elif isinstance(expr, slate.Action):
tensor, = expr.operands
c, = expr.actee
result = "(%s) * %s" % (metaphrase_slate_to_cpp(tensor,
temps,
expr.prec), temps[c])
return parenthesize(result, expr.prec, prec)
else:
raise NotImplementedError("Type %s not supported.", type(expr))
def eigen_matrixbase_type(shape):
"""Returns the Eigen::Matrix declaration of the tensor.
:arg shape: a tuple of integers the denote the shape of the
:class:`slate.TensorBase` object.
Returns: Returns a string indicating the appropriate declaration of the
`slate.TensorBase` object in the appropriate Eigen C++ template
library syntax.
"""
if len(shape) == 0:
rows = 1
cols = 1
elif len(shape) == 1:
rows = shape[0]
cols = 1
else:
if not len(shape) == 2:
raise NotImplementedError(
"%d-rank tensors are not supported." % len(shape)
)
rows = shape[0]
cols = shape[1]
if cols != 1:
order = ", Eigen::RowMajor"
else:
order = ""
return "Eigen::Matrix<double, %d, %d%s>" % (rows, cols, order)
|
hixio-mh/firedrake
|
firedrake/slate/slac/compiler.py
|
compiler.py
|
py
| 22,060 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "pyop2.utils.get_petsc_dir",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.int8",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate.TensorBase",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slac.kernel_builder.LocalKernelBuilder",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "firedrake.tsfc_interface.SplitKernel",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tsfc.parameters.SCALAR_TYPE",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "coffee.base",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "tsfc.parameters.SCALAR_TYPE",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "coffee.base.Incr",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "tsfc.parameters.SCALAR_TYPE",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "firedrake.constant.Constant",
"line_number": 156,
"usage_type": "argument"
},
{
"api_name": "tsfc.parameters.SCALAR_TYPE",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "tsfc.parameters.SCALAR_TYPE",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "pyop2.datatypes.as_cstr",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "coffee.base.Decl",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "coffee.base.Block",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "coffee.base.FunDecl",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "coffee.base.Node",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "firedrake.op2.Kernel",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "firedrake.op2",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "firedrake.tsfc_interface.KernelInfo",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "coffee.base.Assign",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "coffee.base.Sum",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "coffee.base.Sum",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "coffee.base.Prod",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "coffee.base.Assign",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "coffee.base.For",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "coffee.base.Less",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "coffee.base.Incr",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "coffee.base.For",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "coffee.base.Less",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "coffee.base.Incr",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "itertools.chain",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "coffee.base.Eq",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "coffee.base.Eq",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "coffee.base.Symbol",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "coffee.base.If",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "coffee.base.Block",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "coffee.base.If",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "coffee.base.Block",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "coffee.base.For",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "coffee.base.Less",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "coffee.base.Incr",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "coffee.base.Block",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "coffee.base.Block",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "coffee.base.Block",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "coffee.base.If",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "coffee.base.Eq",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "coffee.base.If",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 419,
"usage_type": "name"
},
{
"api_name": "coffee.base.Eq",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "coffee.base.Decl",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "coffee.base.FlatBlock",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "coffee.base",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Transpose",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Inverse",
"line_number": 481,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Negative",
"line_number": 485,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Add",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Sub",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate.Mul",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate.Add",
"line_number": 491,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Sub",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 492,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Mul",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 493,
"usage_type": "name"
},
{
"api_name": "firedrake.slate.slate.Action",
"line_number": 501,
"usage_type": "attribute"
},
{
"api_name": "firedrake.slate.slate",
"line_number": 501,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.