hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c30a8241bc4eb176e2d35bfc53ddbf79b7ca685f | 77 | py | Python | test/settings/test_kafka_consumer_config.py | DebasishMaji/PI | e293982cae8f8755d28d7b3de22966dc74759b90 | [
"Apache-2.0"
]
| null | null | null | test/settings/test_kafka_consumer_config.py | DebasishMaji/PI | e293982cae8f8755d28d7b3de22966dc74759b90 | [
"Apache-2.0"
]
| null | null | null | test/settings/test_kafka_consumer_config.py | DebasishMaji/PI | e293982cae8f8755d28d7b3de22966dc74759b90 | [
"Apache-2.0"
]
| null | null | null | import unittest
class TestKafkaConsumerConfig(unittest.TestCase):
pass
| 12.833333 | 49 | 0.805195 | 58 | 0.753247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c30ea52dd60b15b77f690236c9544837627ac0f7 | 7,684 | py | Python | Pycraft/StartupAnimation.py | demirdogukan/InsiderPycraft | 5567107326fbd222a7df6aabf4ab265e0a157636 | [
"MIT"
]
| 22 | 2021-03-25T17:47:45.000Z | 2022-03-29T01:56:12.000Z | Pycraft/StartupAnimation.py | demirdogukan/InsiderPycraft | 5567107326fbd222a7df6aabf4ab265e0a157636 | [
"MIT"
]
| 1 | 2021-12-22T16:12:59.000Z | 2021-12-22T16:12:59.000Z | Pycraft/StartupAnimation.py | demirdogukan/InsiderPycraft | 5567107326fbd222a7df6aabf4ab265e0a157636 | [
"MIT"
]
| 3 | 2021-09-05T14:10:05.000Z | 2022-01-10T12:57:34.000Z | if not __name__ == "__main__":
print("Started <Pycraft_StartupAnimation>")
class GenerateStartupScreen:
def __init__(self):
pass
def Start(self):
try:
self.Display.fill(self.BackgroundCol)
self.mod_Pygame__.display.flip()
self.mod_Pygame__.display.set_caption(f"Pycraft: v{self.version}: Welcome")
PresentsFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 35)
PycraftFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 60)
NameFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 45)
NameText = NameFont.render("Tom Jebbo", True, self.FontCol)
NameTextWidth = NameText.get_width()
NameTextHeight = NameText.get_height()
PresentsText = PresentsFont.render("presents", True, self.FontCol)
PycraftText = PycraftFont.render("Pycraft", True, self.FontCol)
PycraftTextWidth = PycraftText.get_width()
PycraftTextHeight = PycraftText.get_height()
iteration = 0
clock = self.mod_Pygame__.time.Clock()
if self.RunFullStartup == True:
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*2):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
self.Display.blit(PresentsText, ((((self.realWidth-NameTextWidth)/2)+120), ((self.realHeight-NameTextHeight)/2)+30))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, (self.realHeight-PycraftTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
y = 0
while True:
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, ((self.realHeight-PycraftTextHeight)/2)-y))
y += 2
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
if ((self.realHeight-PycraftTextHeight)/2)-y <= 0:
self.RunFullStartup = False
return None
except Exception as Message:
self.RunFullStartup = False
return Message
else:
print("You need to run this as part of Pycraft")
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.withdraw()
messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file")
quit() | 53.361111 | 141 | 0.511322 | 7,307 | 0.950937 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.051275 |
c30f35ba35fbd12a0fe79d62b724f5343db144f1 | 2,985 | py | Python | kattishunter/kattis/submission.py | ParksProjets/kattis-hunter | c4990edf59fba6d91d22fdc126673781ab423d0f | [
"MIT"
]
| null | null | null | kattishunter/kattis/submission.py | ParksProjets/kattis-hunter | c4990edf59fba6d91d22fdc126673781ab423d0f | [
"MIT"
]
| null | null | null | kattishunter/kattis/submission.py | ParksProjets/kattis-hunter | c4990edf59fba6d91d22fdc126673781ab423d0f | [
"MIT"
]
| null | null | null | """
Submit files for a Kattis problem.
Copyright (C) 2019, Guillaume Gonnet
This project is under the MIT license.
"""
import os.path as path
import re
from typing import Dict, List, Text
import requests
import logging
from .login import login
logger = logging.getLogger(__name__)
# Base headers to use.
HEADERS = {
"Accept": "text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
}
def retreive_csrf_token(config: Dict, pid: Text, retry = True):
"Retreive CSRF token from the submit page."
# Setup headers to send.
headers = HEADERS.copy()
headers["User-Agent"] = config["cache"]["user-agent"]
# Make the GET request.
url = config["url"]["submit"].format(pid=pid)
cookies = config["cache"].get("cookies", {})
res = requests.get(url, headers=headers, cookies=cookies,
allow_redirects=False)
config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()}
# Not logged, try to login first.
if res.status_code != 200:
if not retry:
logger.critical("Can't retrieve submit page from Kattis.")
login(config)
return retreive_csrf_token(config, pid, False)
# Find the CSRF token in response body.
pattern = r"name=\"csrf_token\".*?value=\"([0-9a-z]+)\""
match = re.search(pattern, res.text)
if match is None:
logger.critical("Can't find CSRF token in submit page.")
return match.group(1)
def read_file(filename: Text):
"Read a single file to send."
with open(filename, "rb") as file:
return file.read()
def read_files(files: List[Text]):
"Read files to send."
return [(
"sub_file[]",
(path.basename(file), read_file(file), "application/octet-stream")
) for file in files]
def submit_kattis(config: Dict, pid: Text, files: List[Text]):
"Submit files to a Kattis problem."
# Setup headers to send.
headers = HEADERS.copy()
headers["User-Agent"] = config["cache"]["user-agent"]
# Setup data to send.
data = {
"csrf_token": retreive_csrf_token(config, pid),
"type": "files",
"sub_code": "",
"problem": pid,
"language": "C++",
"submit": "Submit",
"submit_ctr": 10
}
# URL, files and cookies to use.
url = config["url"]["submit"].format(pid=pid)
files = read_files(files)
cookies = config["cache"]["cookies"]
# Make the POST request.
logger.debug("Submitting %d files for '%s'.", len(files), pid)
res = requests.post(url, data=data, files=files, headers=headers,
cookies=cookies)
config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()}
# Find submisson ID.
match = re.match(r"^.*/submissions/([0-9]+)$", res.url)
if not match:
logger.critical("Can't find submission ID from URL '%s'.", res.url)
sid = match.group(1)
logger.debug("Files sent to submission %s.", sid)
return sid
| 26.415929 | 83 | 0.622781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,159 | 0.388275 |
c30f9cd42abaa561a1c9cda944cad65e60a4fabe | 785 | py | Python | scripts/set_health_led.py | alanmitchell/mini-monitor | 5d60e1f69fc61d53f3090a159445595a9987c36a | [
"Apache-2.0"
]
| 7 | 2016-01-11T23:54:31.000Z | 2022-02-16T11:58:16.000Z | scripts/set_health_led.py | alanmitchell/mini-monitor | 5d60e1f69fc61d53f3090a159445595a9987c36a | [
"Apache-2.0"
]
| null | null | null | scripts/set_health_led.py | alanmitchell/mini-monitor | 5d60e1f69fc61d53f3090a159445595a9987c36a | [
"Apache-2.0"
]
| 5 | 2015-12-17T15:22:45.000Z | 2018-08-13T17:40:38.000Z | #!/usr/bin/env python3
"""Script to do basic health checks of the system and turn on an LED on
BCM pin 12 (pin 32 on header) if they pass, turn Off otherwise.
"""
import time
import RPi.GPIO as GPIO
import subprocess
# The BCM pin number that the LED is wired to. When the pin
# is at 3.3V the LED is On.
LED_PIN = 12
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LED_PIN, GPIO.OUT)
# ----- Test for Internet availability.
# Try to ping for a minute before declaring that the Internet
# is not available
internet_available = False
for i in range(12):
if subprocess.call('/bin/ping -q -c1 8.8.8.8', shell=True) == 0:
internet_available = True
break
time.sleep(5)
# Set LED according to results of test
GPIO.output(LED_PIN, internet_available)
| 25.322581 | 71 | 0.71465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.547771 |
c30fa03b89f6de54fae9f895f13e66390afbeacc | 2,676 | py | Python | vqa_txt_data/compare_experiment_results.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
]
| null | null | null | vqa_txt_data/compare_experiment_results.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
]
| null | null | null | vqa_txt_data/compare_experiment_results.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
]
| null | null | null | import json
import numpy as np
from tqdm import tqdm
# Change these based on experiment
#exp_dataset = 'mask_char_oov_test_set.db'
#exp_name = 'results_test_mask_char'
#exp_dataset = 'mask_2_oov_test_set.db'
#exp_name = 'results_test_mask_2'
#exp_dataset = 'mask_2_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_2_ensemble_all_5'
#exp_dataset = 'synonyms_mask_char_l03_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_char_l03'
#exp_dataset = 'synonyms_mask_char_03m_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_char_03m'
#exp_dataset = 'synonyms_mask_2_03l_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_2_03l'
exp_dataset = 'mask_2_oov_test_set.db'
exp_name = 'results_test_synonyms_mask_2_fixed'
q_list_file = '/scratch/cluster/billyang/vqa_dataset/txt_db/oov_datasets/{}/questions_changed.json'.format(exp_dataset)
exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_trained/{}/results_3000_all.json'.format(exp_name)
#exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/{}/results_3000_all.json'.format(exp_name)
q_list = json.load(open(q_list_file))
exp_ans_list = json.load(open(exp_ans_file))
baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_trained/results_test_normal_test/results_3000_all.json'))
#baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/results_test_normal_test_fixed/results_3000_all.json'))
exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list}
baseline_ans = {o['question_id']: o['answer'] for o in baseline_ans_list}
gt_ans = json.load(open('oov_test_full_answers.json'))
results = {}
results['num_questions'] = len(q_list)
exp_tot_score = 0
bl_tot_score = 0
rtw = []
wtr = []
def getscore(answer, answers, scores):
if answer in answers:
return scores[answers.index(answer)]
return 0
for qid in tqdm(q_list):
exp_score = getscore(exp_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores'])
exp_tot_score += exp_score
bl_score = getscore(baseline_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores'])
bl_tot_score += bl_score
if exp_score > 0 and bl_score == 0:
wtr.append(qid)
if bl_score > 0 and exp_score == 0:
rtw.append(qid)
results['exp_score'] = exp_tot_score / len(q_list)
results['bl_score'] = bl_tot_score / len(q_list)
results['rtw'] = rtw
results['wtr'] = wtr
results['rtw_count'] = len(rtw)
results['wtr_count'] = len(wtr)
print("dumping")
json.dump(results, open('{}.json'.format(exp_name), 'w'))
# get new scores
# find answers wrong to right
# find answers right to wrong
| 32.240964 | 155 | 0.763079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,463 | 0.546712 |
c31181ed7742f029eee26ce5c90c82ae4b887fbd | 528 | py | Python | app/articles/forms.py | AlexRAV/flask-blog | df8036e01794914ca0e88856ed93f8a91cc1d47a | [
"BSD-3-Clause"
]
| null | null | null | app/articles/forms.py | AlexRAV/flask-blog | df8036e01794914ca0e88856ed93f8a91cc1d47a | [
"BSD-3-Clause"
]
| null | null | null | app/articles/forms.py | AlexRAV/flask-blog | df8036e01794914ca0e88856ed93f8a91cc1d47a | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Article forms."""
from flask_wtf import Form, FlaskForm
from wtforms import PasswordField, StringField, TextAreaField
from wtforms.validators import DataRequired, Email, EqualTo, Length
class NewArticleForm(FlaskForm):
title = StringField('Article title', validators=[DataRequired(), Length(min=5, max=200)])
body = TextAreaField('Article body', validators=[DataRequired(), Length(min=50)])
class NewCommentForm(FlaskForm):
body = TextAreaField('Comment', validators=[DataRequired()])
| 35.2 | 93 | 0.746212 | 309 | 0.585227 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.153409 |
c3119f2506c627ca857b498eb0bfe45c4bd66fbc | 9,582 | py | Python | dataanalysis.py | Rev-Jiang/Python | c91d5724a6843f095bfe1a05f65d9fc885e01b88 | [
"MIT"
]
| null | null | null | dataanalysis.py | Rev-Jiang/Python | c91d5724a6843f095bfe1a05f65d9fc885e01b88 | [
"MIT"
]
| null | null | null | dataanalysis.py | Rev-Jiang/Python | c91d5724a6843f095bfe1a05f65d9fc885e01b88 | [
"MIT"
]
| null | null | null | #-*- coding: UTF-8 -*-
#上句表示可用中文注释,否则默认ASCII码保存
# Filename : dataanalysis.py
# author by : Rev_997
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def isiterable(obj):
try:
iter(obj)
return True
except TypeError:#not iterable
return False
#if it is not list or NumPy, transfer it
if not isinstance(x,list) and isiterable(x):
x=list(x)
#is and is not are used to judge if the varible is None, as None is unique.
a=None
a is None
import datetime
dt=datetime(2011,10,29,20,30,21)
dt.day
dt.minute
dt.date()
dt.time()
#datetime could be transfered to string by function striftime
dt.strftime('%m/%d/%Y %H:%M')
#string could be transfered to datetime by function strptime
datetime.strptime('20091031','%Y%m%d')
#substitute 0 for minutes and seconds
dt.replace(minute=0,second=0)
#the difference of two datetime objects produce a datetime.timedelta
dt2=datetime(2011,11,15,22,30)
delta=dt2-dt
delta
type(delta)
#add a timedelta to a datetime -- get a now datetime
dt+delta
#if elif else
if x:
pass
elif:
pass
else:
pass
#for
for value in collection:
#do something wuth value
#continue
#break
for a,b,c in iterator:
#do something
#while
x=256
total=0
while x>0:
if total>500:
break
total+=x
x=x//2
def attempt_float(x):
try:
return float(x)
except:
return x
#once the float(x) is invalid, the except works
def attempt_float(x):
try:
return float(x)
except(TypeError,ValueError):
return x
#catch the abnormity
#value=true-expr if condition else false-expr
#same as
'''
if condition:
value=true-expr
else:
value=false-expr
'''
#about tuple
tup=4,5,6
tup
#(4,5,6)
#transfer to tuple
tuple([4,0,2])
tuple('string')
#tuple use + to generate longer tuple
#tuple.append()
#tuple.count()
#list.append()
#list.insert()
#list.pop()
#list.remove()
#list.extend()
#list.sort()
import bisect
c=[1,2,2,2,3,4,7]
#find the suitable position
bisect.bisect(c,2)
#insert the new number
bisect.insort(c,6)
###attention: bisect is suitable for ordered sequence
#----------------------------------------------------------------
#some function of list
#enumerate
for i,value in enumerate(collection):
#do something with value
some_list=['foo','bar','baz']
mapping=dict((v,i) for i,v in enumerate(some_list))
mapping
#sorted
sorted([7,2,4,6,3,5,2])
sorted('horse race')
#powerful with set
sorted(set('this is just some string'))
#zip
seq1=['foo','bar','baz']
seq2=['one','two','three']
zip(seq1,seq2)
seq3=[False,True]
zip(seq1,seq2,seq3)
#several arrays iterate together with zip
for i,(a,b) in enumerate(zip(seq1,seq2)):
print('%d: %s, %s' % (i,a,b))
#unzip
pitchers=[('Nolan','Ryan'),('Roger','Clemens'),('Schilling','Curt')]
first_names,last_names=zip(*pitchers)# * is meant zip(seq[0],seq[1],...,seq[len(seq)-1])
first_names
last_names
#reversed
list(reversed(range(10)))
#dictionary
empty_dict={}d1={'a':'some value','b':[1,2,3,4]}
d1
#delete
del d1[5]
#or
ret=d1.pop('dummy')
ret
#get keys and values
d1.keys()
d1.values()
#combine two dictionaries
d1.update({'b':'foo','c':12})
d1
#match two list to be dictionary
'''
mapping={}
for key,value in zip(key_list,value_list):
mapping[key]=value
'''
mapping=dict(zip(range(5),reversed(range(5))))
mapping
#brief way to express circulation by dict
'''
if key in some_dict:
value=some_dict[key]
else:
value=default_value
'''
value=some_dict.get(key,default_values)
#the vlaue of dictionary is set as other list
'''
words=['apple','bat','bar','atom','book']
by_letter={}
for word in words:
letter=word[0]
if letter not in by_letter:
by_letter[letter]=[word]
else:
by_letter[letter].append(word)
by_letter
'''
by_letter.setdefault(letter,[]).append(word)
#or use defaultdict class in Module collections
from collections import defaultdict
by_letter=defaultdict(list)
for word in words:
by_letter[word[0]].append(word)
#the key of dictionary should be of hashability--unchangable
hash('string')
hash((1,2,(2,3)))
hash((1,2,[3,4]))#no hashability as list is changable
#to change a list to tuple is the easiest way to make it a key
d={}
d[tuple([1,2,3])]=5
d
#set
set([2,2,2,1,3,3])
{2,2,2,1,3,3}
a={1,2,3,4,5}
b={3,4,5,6,7,8}
#intersection
a|b
#union
a&b
#difference
a-b
#symmetric difference
a^b
#if is subset
a_set={1,2,3,4,5}
{1,2,3}.issubset(a_set)
a_set.issuperset({1,2,3})
#set could use the == to judge if the same
{1,2,3}=={3,2,1}
#the operation of the sets
a.add(x)
a.remove(x)
a.union(b)
a.intersection(b)
a.difference(b)
a.symmetric_difference(b)
a.issubset(b)
a.issuperset(b)
a.isdisjoint(b)
#the derivative of list&set&dictionary
'''
[expr for val in collection if condition]
is the same as
result=[]
for val in collection:
if condition:
result.append(expr)
'''
#list
#[expr for val in collection if condition]
strings=['a','as','bat','car','dove','python']
[x.upper() for x in strings if len(x)>2]
#dicrionary
#dict_comp={key-expr:value-expr for value in collection if condition}
loc_mapping={val:index for index, val in enumerate(string)}
loc_mapping
#or
loc_mapping=dict((val,idx) for idx, val in enumerate(string))
#set
#set_comp={expr for value in collection if condition}
unique_lengths={len(x) for x in strings}
unique_lengths
#list nesting derivative
all_data=[['Tom','Billy','Jeffery','Andrew','Wesley','Steven','Joe'],
['Susie','Casey','Jill','Ana','Eva','Jennifer','Stephanie']]
#find the names with two 'e' and put them in a new list
names_of_interest=[]
for name in all_data:
enough_es=[name for name in names if name.count('e')>2]
names_of_interest.extend(enough_es)
#which could be shorten as below:
result=[name for names in all_data for name in names
if name.count('e')>=2]
result
#flat a list consist of tuples
some_tuples=[(1,2,3),(4,5,6),(7,8,9)]
flattened=[x for tup in some_tuples for x in tup]
flattened
'''
flattened=[]
for tup in some_tuples:
for x in tup:
flattened.append(x)
'''
#which is different from:
[[x for x in tup] for tup in some_tuples]
#clean function
import re
def clean_strings(strings):
result=[]
for value in strings:
value=value.strip()
value=re.sub('[!#?]','',value) #Remove punctuation marks
value=value.title()
result.append(value)
return result
states=[' Alabama ','Georgia!','Georgia','georgia','FlOrIda','south carolina##','West virginia?']
clean_strings(states)
#or
def remove_punctuation(value):
return re.sub('[!#?]','',value)
clean_ops=[str.strip,remove_punctuation,str.title]
def clean_strings(strings,ops):
result=[]
for value in strings:
for function in ops:
value=function(value)
result.append(value)
return result
clean_strings(states,clean_ops)
#anonymous function
#lambda [arg1[, arg2, ... argN]]: expression
#exmaple 1
#use def define function
def add( x, y ):
return x + y
#use lambda expression
lambda x, y: x + y
#lambda permits default parameter
lambda x, y = 2: x + y
lambda *z: z
#call lambda function
a = lambda x, y: x + y
a( 1, 3 )
b = lambda x, y = 2: x + y
b( 1 )
b( 1, 3 )
c = lambda *z: z
c( 10, 'test')
#example2
#use def define function
def add( x, y ):
return x + y
#use lambda expression
lambda x, y: x + y
#lambda permits default parameter
lambda x, y = 2: x + y
lambda *z: z
#call lambda function
a = lambda x, y: x + y
a( 1, 3 )
b = lambda x, y = 2: x + y
b( 1 )
b( 1, 3 )
c = lambda *z: z
c( 10, 'test')
#example 3
def apply_to_list(some_list,f):
return [f(x) for x in some_list]
ints=[4,0,1,5,6]
apply_to_list(ints,lambda x:x*2)
#example 4
strings=['foo','card','bar','aaaa','abab']
strings.sort(key=lambda x: len(set(list(x))))
strings
#currying
'''
def add_numbers(x,y):
return x+y
add_five=lambda y:add_numbers(5,y)
'''
#partial function is to simplify the process
from functools import partial
add_five=partial(add_numbers,5)
#generator expression
gen=(x**2 for x in xxrange(100))
gen
#the same:
def _make_gen():
for x in xrange(100):
yield x**2
gen=_make_gen()
#generator expression could be used in any python function acceptable of generator
sum(x**2 for x in xrange(100))
dict((i,i**2) for i in xrange(5))
#itertools module
import itertools
first_letter=lambda x:x[0]
names=['Alan','Adam','Wes','Will','Albert','Steven']
for letter,names in itertools.groupby(names,first_letter):
print letter,list(names) #names is a genetator
#some functions in itertools
imap(func,*iterables)
ifilter(func,iterable)
combinations(iterable,k)
permutations(iterable,k)
groupby(iterable[,keyfunc])
#documents and operation system
path='xxx.txt'
f=open(path)
for line in f:
pass
#remove EOL of every line
lines=[x.rstrip() for x in open(path)]
lines
#set a empty-lineproof doc
with open('tmp.txt','w') as handle:
handle.writelines(x for x in open(path) if len(x)>1)
open('tmp.txt').readlines()
#some function to construct documents
read([size])
readlines([size])
write(str)
close()
flush()
seek(pos)
tell()
closed
| 20.08805 | 100 | 0.644124 | 0 | 0 | 63 | 0.00655 | 0 | 0 | 0 | 0 | 4,392 | 0.456644 |
c311dcd3f870bbdf6b67118d6ccc561653945f40 | 259 | py | Python | show_model_info.py | panovr/Brain-Tumor-Segmentation | bf1ac2360af46a484d632474ce93de339ad2b496 | [
"MIT"
]
| null | null | null | show_model_info.py | panovr/Brain-Tumor-Segmentation | bf1ac2360af46a484d632474ce93de339ad2b496 | [
"MIT"
]
| null | null | null | show_model_info.py | panovr/Brain-Tumor-Segmentation | bf1ac2360af46a484d632474ce93de339ad2b496 | [
"MIT"
]
| null | null | null | import bts.model as model
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 6
FILTER_LIST = [16,32,64,128,256]
unet_model = model.DynamicUNet(FILTER_LIST)
unet_model.summary(batch_size=BATCH_SIZE, device=device)
| 28.777778 | 69 | 0.783784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.042471 |
c31289e9e024e29ebc9122d648b85bcf484eedb5 | 1,031 | py | Python | docs/source/conf.py | andriis/bravado | 0d2ef182df4eb38641282e2f839c4dc813ee4349 | [
"BSD-3-Clause"
]
| 600 | 2015-05-20T00:37:21.000Z | 2022-03-09T03:48:38.000Z | docs/source/conf.py | andriis/bravado | 0d2ef182df4eb38641282e2f839c4dc813ee4349 | [
"BSD-3-Clause"
]
| 323 | 2015-05-19T22:35:29.000Z | 2021-12-09T12:55:09.000Z | docs/source/conf.py | andriis/bravado | 0d2ef182df4eb38641282e2f839c4dc813ee4349 | [
"BSD-3-Clause"
]
| 137 | 2015-05-14T19:51:58.000Z | 2022-01-31T19:36:32.000Z | # -*- coding: utf-8 -*-
import sphinx_rtd_theme
# -- General configuration -----------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bravado'
copyright = u'2013, Digium, Inc.; 2014-2015, Yelp, Inc'
exclude_patterns = []
pygments_style = 'sphinx'
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'bravado-pydoc'
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'bravado-core': ('https://bravado-core.readthedocs.io/en/latest/', None),
}
| 22.413043 | 77 | 0.645975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 639 | 0.619787 |
c313212d51b9e2cc91e003a4faa89dafdee74dd8 | 13,900 | py | Python | edgedb/_testbase.py | Fogapod/edgedb-python | 377805660e3455bef536412bd5467b435753b3a5 | [
"Apache-2.0"
]
| null | null | null | edgedb/_testbase.py | Fogapod/edgedb-python | 377805660e3455bef536412bd5467b435753b3a5 | [
"Apache-2.0"
]
| null | null | null | edgedb/_testbase.py | Fogapod/edgedb-python | 377805660e3455bef536412bd5467b435753b3a5 | [
"Apache-2.0"
]
| null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import atexit
import contextlib
import functools
import inspect
import json
import logging
import os
import re
import unittest
import edgedb
from edgedb import _cluster as edgedb_cluster
@contextlib.contextmanager
def silence_asyncio_long_exec_warning():
def flt(log_record):
msg = log_record.getMessage()
return not msg.startswith('Executing ')
logger = logging.getLogger('asyncio')
logger.addFilter(flt)
try:
yield
finally:
logger.removeFilter(flt)
_default_cluster = None
def _init_cluster(data_dir=None, *, cleanup_atexit=True):
if (not os.environ.get('EDGEDB_DEBUG_SERVER') and
not os.environ.get('EDGEDB_LOG_LEVEL')):
_env = {'EDGEDB_LOG_LEVEL': 'silent'}
else:
_env = {}
if data_dir is None:
cluster = edgedb_cluster.TempCluster(env=_env, testmode=True)
destroy = True
else:
cluster = edgedb_cluster.Cluster(data_dir=data_dir, env=_env)
destroy = False
if cluster.get_status() == 'not-initialized':
cluster.init()
cluster.start(port='dynamic')
cluster.set_superuser_password('test')
if cleanup_atexit:
atexit.register(_shutdown_cluster, cluster, destroy=destroy)
return cluster
def _start_cluster(*, cleanup_atexit=True):
global _default_cluster
if _default_cluster is None:
cluster_addr = os.environ.get('EDGEDB_TEST_CLUSTER_ADDR')
if cluster_addr:
conn_spec = json.loads(cluster_addr)
_default_cluster = edgedb_cluster.RunningCluster(**conn_spec)
else:
data_dir = os.environ.get('EDGEDB_TEST_DATA_DIR')
_default_cluster = _init_cluster(
data_dir=data_dir, cleanup_atexit=cleanup_atexit)
return _default_cluster
def _shutdown_cluster(cluster, *, destroy=True):
cluster.stop()
if destroy:
cluster.destroy()
class TestCaseMeta(type(unittest.TestCase)):
_database_names = set()
@staticmethod
def _iter_methods(bases, ns):
for base in bases:
for methname in dir(base):
if not methname.startswith('test_'):
continue
meth = getattr(base, methname)
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
for methname, meth in ns.items():
if not methname.startswith('test_'):
continue
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
@classmethod
def wrap(mcls, meth):
@functools.wraps(meth)
def wrapper(self, *args, __meth__=meth, **kwargs):
try_no = 1
while True:
try:
# There might be unobvious serializability
# anomalies across the test suite, so, rather
# than hunting them down every time, simply
# retry the test.
self.loop.run_until_complete(
__meth__(self, *args, **kwargs))
except edgedb.TransactionSerializationError:
if try_no == 3:
raise
else:
self.loop.run_until_complete(self.con.execute(
'ROLLBACK;'
))
try_no += 1
else:
break
return wrapper
@classmethod
def add_method(mcls, methname, ns, meth):
ns[methname] = mcls.wrap(meth)
def __new__(mcls, name, bases, ns):
for methname, meth in mcls._iter_methods(bases, ns.copy()):
if methname in ns:
del ns[methname]
mcls.add_method(methname, ns, meth)
cls = super().__new__(mcls, name, bases, ns)
if not ns.get('BASE_TEST_CLASS') and hasattr(cls, 'get_database_name'):
dbname = cls.get_database_name()
if name in mcls._database_names:
raise TypeError(
f'{name} wants duplicate database name: {dbname}')
mcls._database_names.add(name)
return cls
class TestCase(unittest.TestCase, metaclass=TestCaseMeta):
@classmethod
def setUpClass(cls):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
cls.loop = loop
@classmethod
def tearDownClass(cls):
cls.loop.close()
asyncio.set_event_loop(None)
def add_fail_notes(self, **kwargs):
if not hasattr(self, 'fail_notes'):
self.fail_notes = {}
self.fail_notes.update(kwargs)
@contextlib.contextmanager
def annotate(self, **kwargs):
# Annotate the test in case the nested block of code fails.
try:
yield
except Exception:
self.add_fail_notes(**kwargs)
raise
@contextlib.contextmanager
def assertRaisesRegex(self, exception, regex, msg=None,
**kwargs):
with super().assertRaisesRegex(exception, regex, msg=msg):
try:
yield
except BaseException as e:
if isinstance(e, exception):
for attr_name, expected_val in kwargs.items():
val = getattr(e, attr_name)
if val != expected_val:
raise self.failureException(
f'{exception.__name__} context attribute '
f'{attr_name!r} is {val} (expected '
f'{expected_val!r})') from e
raise
class ClusterTestCase(TestCase):
BASE_TEST_CLASS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.cluster = _start_cluster(cleanup_atexit=True)
class ConnectedTestCaseMixin:
@classmethod
async def connect(cls, *,
cluster=None,
database='edgedb',
user='edgedb',
password='test'):
conargs = cls.get_connect_args(
cluster=cluster, database=database, user=user, password=password)
return await edgedb.async_connect(**conargs)
@classmethod
def get_connect_args(cls, *,
cluster=None,
database='edgedb',
user='edgedb',
password='test'):
if cluster is None:
cluster = cls.cluster
conargs = cluster.get_connect_args().copy()
conargs.update(dict(user=user,
password=password,
database=database))
return conargs
class DatabaseTestCase(ClusterTestCase, ConnectedTestCaseMixin):
SETUP = None
TEARDOWN = None
SCHEMA = None
SETUP_METHOD = None
TEARDOWN_METHOD = None
# Some tests may want to manage transactions manually,
# in which case ISOLATED_METHODS will be False.
ISOLATED_METHODS = True
# Turns on "EdgeDB developer" mode which allows using restricted
# syntax like FROM SQL and similar. It allows modifying standard
# library (e.g. declaring casts).
INTERNAL_TESTMODE = True
BASE_TEST_CLASS = True
def setUp(self):
if self.INTERNAL_TESTMODE:
self.loop.run_until_complete(
self.con.execute(
'CONFIGURE SESSION SET __internal_testmode := true;'))
if self.ISOLATED_METHODS:
self.xact = self.con.transaction()
self.loop.run_until_complete(self.xact.start())
if self.SETUP_METHOD:
self.loop.run_until_complete(
self.con.execute(self.SETUP_METHOD))
super().setUp()
def tearDown(self):
try:
if self.TEARDOWN_METHOD:
self.loop.run_until_complete(
self.con.execute(self.TEARDOWN_METHOD))
finally:
try:
if self.ISOLATED_METHODS:
self.loop.run_until_complete(self.xact.rollback())
del self.xact
if self.con.is_in_transaction():
self.loop.run_until_complete(
self.con.execute('ROLLBACK'))
raise AssertionError(
'test connection is still in transaction '
'*after* the test')
if not self.ISOLATED_METHODS:
self.loop.run_until_complete(
self.con.execute('RESET ALIAS *;'))
finally:
super().tearDown()
@classmethod
def setUpClass(cls):
super().setUpClass()
dbname = cls.get_database_name()
cls.admin_conn = None
cls.con = None
class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP')
# Only open an extra admin connection if necessary.
if not class_set_up:
script = f'CREATE DATABASE {dbname};'
cls.admin_conn = cls.loop.run_until_complete(cls.connect())
cls.loop.run_until_complete(cls.admin_conn.execute(script))
cls.con = cls.loop.run_until_complete(cls.connect(database=dbname))
if not class_set_up:
script = cls.get_setup_script()
if script:
# The setup is expected to contain a CREATE MIGRATION,
# which needs to be wrapped in a transaction.
tx = cls.con.transaction()
cls.loop.run_until_complete(tx.start())
cls.loop.run_until_complete(cls.con.execute(script))
cls.loop.run_until_complete(tx.commit())
del tx
@classmethod
def get_database_name(cls):
if cls.__name__.startswith('TestEdgeQL'):
dbname = cls.__name__[len('TestEdgeQL'):]
elif cls.__name__.startswith('Test'):
dbname = cls.__name__[len('Test'):]
else:
dbname = cls.__name__
return dbname.lower()
@classmethod
def get_setup_script(cls):
script = ''
# Look at all SCHEMA entries and potentially create multiple
# modules, but always create the 'test' module.
schema = ['\nmodule test {}']
for name, val in cls.__dict__.items():
m = re.match(r'^SCHEMA(?:_(\w+))?', name)
if m:
module_name = (m.group(1) or 'test').lower().replace(
'__', '.')
with open(val, 'r') as sf:
module = sf.read()
schema.append(f'\nmodule {module_name} {{ {module} }}')
# Don't wrap the script into a transaction here, so that
# potentially it's easier to stitch multiple such scripts
# together in a fashion similar to what `edb inittestdb` does.
script += f'\nSTART MIGRATION TO {{ {"".join(schema)} }};'
script += f'\nPOPULATE MIGRATION; \nCOMMIT MIGRATION;'
if cls.SETUP:
if not isinstance(cls.SETUP, (list, tuple)):
scripts = [cls.SETUP]
else:
scripts = cls.SETUP
for scr in scripts:
if '\n' not in scr and os.path.exists(scr):
with open(scr, 'rt') as f:
setup = f.read()
else:
setup = scr
script += '\n' + setup
return script.strip(' \n')
@classmethod
def tearDownClass(cls):
script = ''
class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP')
if cls.TEARDOWN and not class_set_up:
script = cls.TEARDOWN.strip()
try:
if script:
cls.loop.run_until_complete(
cls.con.execute(script))
finally:
try:
cls.loop.run_until_complete(cls.con.aclose())
if not class_set_up:
dbname = cls.get_database_name()
script = f'DROP DATABASE {dbname};'
cls.loop.run_until_complete(
cls.admin_conn.execute(script))
finally:
try:
if cls.admin_conn is not None:
cls.loop.run_until_complete(
cls.admin_conn.aclose())
finally:
super().tearDownClass()
class AsyncQueryTestCase(DatabaseTestCase):
BASE_TEST_CLASS = True
class SyncQueryTestCase(DatabaseTestCase):
BASE_TEST_CLASS = True
def setUp(self):
super().setUp()
cls = type(self)
cls.async_con = cls.con
conargs = cls.get_connect_args().copy()
conargs.update(dict(database=cls.async_con.dbname))
cls.con = edgedb.connect(**conargs)
def tearDown(self):
cls = type(self)
cls.con.close()
cls.con = cls.async_con
del cls.async_con
_lock_cnt = 0
def gen_lock_key():
global _lock_cnt
_lock_cnt += 1
return os.getpid() * 1000 + _lock_cnt
| 30.151844 | 79 | 0.566403 | 11,150 | 0.802158 | 1,817 | 0.130719 | 7,936 | 0.570935 | 350 | 0.02518 | 2,480 | 0.178417 |
c315663a28ae143f4027a8b0b899801904c9cfc7 | 824 | py | Python | konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py | mlshenkai/KGQA | 08e72d68da6519aaca7f39fabf8c0194bebd0314 | [
"Apache-2.0"
]
| null | null | null | konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py | mlshenkai/KGQA | 08e72d68da6519aaca7f39fabf8c0194bebd0314 | [
"Apache-2.0"
]
| null | null | null | konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py | mlshenkai/KGQA | 08e72d68da6519aaca7f39fabf8c0194bebd0314 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# @Author: Kai Shen
# @Created Time: 2022/2/23 10:14 AM
# @Organization: YQN
# @Email: [email protected]
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, gamma=2, weight=None, ignore_index=-100):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.weight = weight
self.ignore_index = ignore_index
def forward(self, output, target):
"""
:param output: [N, CLASS]
:param target: [N,]
:return:
"""
logit = F.softmax(output, dim=1) # [N,CLASS]
pt = torch.exp(logit)
logit = (1 - pt) ** self.gamma * logit # [N, CLASS]
loss = F.nll_loss(logit, target, self.weight, ignore_index=self.ignore_index)
return loss
| 28.413793 | 85 | 0.598301 | 625 | 0.758495 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.292476 |
c3159e702eacd0f494cdd9cb0e3428247b34b8ae | 669 | py | Python | tests/biology/test_join_fasta.py | shandou/pyjanitor | d7842613b4e4a7532a88f673fd54e94c3ba5a96b | [
"MIT"
]
| 1 | 2021-03-25T10:46:57.000Z | 2021-03-25T10:46:57.000Z | tests/biology/test_join_fasta.py | shandou/pyjanitor | d7842613b4e4a7532a88f673fd54e94c3ba5a96b | [
"MIT"
]
| null | null | null | tests/biology/test_join_fasta.py | shandou/pyjanitor | d7842613b4e4a7532a88f673fd54e94c3ba5a96b | [
"MIT"
]
| null | null | null | import importlib
import os
import pytest
from helpers import running_on_ci
import janitor.biology # noqa: F403, F401
# Skip all tests if Biopython not installed
pytestmark = pytest.mark.skipif(
(importlib.util.find_spec("Bio") is None) & ~running_on_ci(),
reason="Biology tests relying on Biopython only required for CI",
)
@pytest.mark.biology
def test_join_fasta(biodf):
"""Test adding sequence from FASTA file in ``sequence`` column."""
df = biodf.join_fasta(
filename=os.path.join(pytest.TEST_DATA_DIR, "sequences.fasta"),
id_col="sequence_accession",
column_name="sequence",
)
assert "sequence" in df.columns
| 25.730769 | 71 | 0.714499 | 0 | 0 | 0 | 0 | 330 | 0.493274 | 0 | 0 | 246 | 0.367713 |
c3166bb775db3bf02b0cf82cc41168152ed9ad5b | 12,182 | py | Python | test/test_tilepyramid.py | ungarj/tilematrix | f5797cf2056f7de8de8f284db40b10943e5e40fb | [
"MIT"
]
| 16 | 2016-07-27T22:21:12.000Z | 2022-01-15T18:13:43.000Z | test/test_tilepyramid.py | ungarj/tilematrix | f5797cf2056f7de8de8f284db40b10943e5e40fb | [
"MIT"
]
| 30 | 2015-10-09T18:10:13.000Z | 2022-03-15T16:56:54.000Z | test/test_tilepyramid.py | ungarj/tilematrix | f5797cf2056f7de8de8f284db40b10943e5e40fb | [
"MIT"
]
| 2 | 2021-07-30T07:23:13.000Z | 2021-09-13T12:24:11.000Z | """TilePyramid creation."""
import pytest
from shapely.geometry import Point
from shapely.ops import unary_union
from types import GeneratorType
from tilematrix import TilePyramid, snap_bounds
def test_init():
"""Initialize TilePyramids."""
for tptype in ["geodetic", "mercator"]:
assert TilePyramid(tptype)
with pytest.raises(ValueError):
TilePyramid("invalid")
with pytest.raises(ValueError):
TilePyramid()
assert hash(TilePyramid(tptype))
def test_metatiling():
"""Metatiling setting."""
for metatiling in [1, 2, 4, 8, 16]:
assert TilePyramid("geodetic", metatiling=metatiling)
try:
TilePyramid("geodetic", metatiling=5)
raise Exception()
except ValueError:
pass
def test_tile_size():
"""Tile sizes."""
for tile_size in [128, 256, 512, 1024]:
tp = TilePyramid("geodetic", tile_size=tile_size)
assert tp.tile_size == tile_size
def test_intersect():
"""Get intersecting Tiles."""
# same metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 1, 1)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# smaller metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic", metatiling=2).tile(5, 1, 1)
control = {(5, 2, 2), (5, 2, 3), (5, 3, 3), (5, 3, 2)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# bigger metatiling
tp = TilePyramid("geodetic", metatiling=2)
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 0, 0)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
intersect_tile = TilePyramid("geodetic").tile(4, 12, 31)
control = {(4, 6, 15)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# different CRSes
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("mercator").tile(5, 1, 1)
try:
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
raise Exception()
except ValueError:
pass
def test_tilepyramid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic") == TilePyramid("geodetic")
assert TilePyramid("geodetic") != TilePyramid("geodetic", metatiling=2)
assert TilePyramid("geodetic") != TilePyramid("geodetic", tile_size=512)
assert TilePyramid("mercator") == TilePyramid("mercator")
assert TilePyramid("mercator") != TilePyramid("mercator", metatiling=2)
assert TilePyramid("mercator") != TilePyramid("mercator", tile_size=512)
# epsg based
assert TilePyramid(gepsg) == TilePyramid(gepsg)
assert TilePyramid(gepsg) != TilePyramid(gepsg, metatiling=2)
assert TilePyramid(gepsg) != TilePyramid(gepsg, tile_size=512)
# proj based
assert TilePyramid(gproj) == TilePyramid(gproj)
assert TilePyramid(gproj) != TilePyramid(gproj, metatiling=2)
assert TilePyramid(gproj) != TilePyramid(gproj, tile_size=512)
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds) == TilePyramid(abounds)
assert TilePyramid(gproj) != TilePyramid(abounds)
# other type
assert TilePyramid("geodetic") != "string"
def test_grid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic").grid == TilePyramid("geodetic").grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", metatiling=2).grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", tile_size=512).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator").grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", metatiling=2).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", tile_size=512).grid
# epsg based
assert TilePyramid(gepsg).grid == TilePyramid(gepsg).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, metatiling=2).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, tile_size=512).grid
# proj based
assert TilePyramid(gproj).grid == TilePyramid(gproj).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, metatiling=2).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, tile_size=512).grid
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds).grid == TilePyramid(abounds).grid
assert TilePyramid(gproj).grid != TilePyramid(abounds).grid
def test_tile_from_xy():
tp = TilePyramid("geodetic")
zoom = 5
# point inside tile
p_in = (0.5, 0.5, zoom)
control_in = [
((5, 15, 32), "rb"),
((5, 15, 32), "lb"),
((5, 15, 32), "rt"),
((5, 15, 32), "lt"),
]
for tile_id, on_edge_use in control_in:
tile = tp.tile_from_xy(*p_in, on_edge_use=on_edge_use)
assert tile.id == tile_id
assert Point(p_in[0], p_in[1]).within(tile.bbox())
# point is on tile edge
p_edge = (0, 0, zoom)
control_edge = [
((5, 16, 32), "rb"),
((5, 16, 31), "lb"),
((5, 15, 32), "rt"),
((5, 15, 31), "lt"),
]
for tile_id, on_edge_use in control_edge:
tile = tp.tile_from_xy(*p_edge, on_edge_use=on_edge_use)
assert tile.id == tile_id
assert Point(p_edge[0], p_edge[1]).touches(tile.bbox())
with pytest.raises(ValueError):
tp.tile_from_xy(180, -90, zoom, on_edge_use="rb")
with pytest.raises(ValueError):
tp.tile_from_xy(180, -90, zoom, on_edge_use="lb")
tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="rt")
assert tile.id == (5, 31, 0)
tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="lt")
assert tile.id == (5, 31, 63)
with pytest.raises(TypeError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="lt")
with pytest.raises(TypeError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="rt")
tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="rb")
assert tile.id == (5, 0, 0)
tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="lb")
assert tile.id == (5, 0, 63)
with pytest.raises(ValueError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="invalid")
def test_tiles_from_bounds(grid_definition_proj):
# global pyramids
tp = TilePyramid("geodetic")
parent = tp.tile(8, 5, 5)
from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)])
children = set([t.id for t in parent.get_children()])
assert from_bounds == children
# non-global pyramids
tp = TilePyramid(grid_definition_proj)
parent = tp.tile(8, 0, 0)
from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)])
children = set([t.id for t in parent.get_children()])
assert from_bounds == children
def test_tiles_from_bounds_batch_by_row():
tp = TilePyramid("geodetic")
bounds = (0, 0, 90, 90)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
else:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
assert tile.row == previous_tile.row
assert tile.row == previous_row + 1
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_column():
tp = TilePyramid("geodetic")
bounds = (0, 0, 90, 90)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="column")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_column = None
tiles = 0
for tile_column in tp.tiles_from_bounds(bounds, zoom, batch_by="column"):
assert isinstance(tile_column, GeneratorType)
previous_tile = None
for tile in tile_column:
tiles += 1
if previous_column is None:
if previous_tile is not None:
assert tile.row == previous_tile.row + 1
else:
if previous_tile is not None:
assert tile.row == previous_tile.row + 1
assert tile.col == previous_tile.col
assert tile.col == previous_column + 1
previous_tile = tile
previous_column = tile.col
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_row_antimeridian_bounds():
tp = TilePyramid("geodetic")
bounds = (0, 0, 185, 95)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col > previous_tile.col
else:
if previous_tile is not None:
assert tile.col > previous_tile.col
assert tile.row == previous_tile.row
assert tile.row > previous_row
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_row_both_antimeridian_bounds():
tp = TilePyramid("geodetic")
bounds = (-185, 0, 185, 95)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
else:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
assert tile.row == previous_tile.row
assert tile.row == previous_row + 1
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_snap_bounds():
bounds = (0, 1, 2, 3)
tp = TilePyramid("geodetic")
zoom = 8
snapped = snap_bounds(bounds=bounds, tile_pyramid=tp, zoom=zoom)
control = unary_union(
[tile.bbox() for tile in tp.tiles_from_bounds(bounds, zoom)]
).bounds
assert snapped == control
pixelbuffer = 10
snapped = snap_bounds(
bounds=bounds, tile_pyramid=tp, zoom=zoom, pixelbuffer=pixelbuffer
)
control = unary_union(
[tile.bbox(pixelbuffer) for tile in tp.tiles_from_bounds(bounds, zoom)]
).bounds
assert snapped == control
def test_deprecated():
tp = TilePyramid("geodetic")
assert tp.type
assert tp.srid
assert tp.tile_x_size(0)
assert tp.tile_y_size(0)
assert tp.tile_height(0)
assert tp.tile_width(0)
| 34.315493 | 86 | 0.64111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,057 | 0.086767 |
c316f38f732cf3a6b4ada4ff98b624a3bbbf8f67 | 1,180 | py | Python | setup.py | giovannicuriel/report_builder | e728e77d7647f248198e39521278ed246171b256 | [
"BSD-2-Clause"
]
| null | null | null | setup.py | giovannicuriel/report_builder | e728e77d7647f248198e39521278ed246171b256 | [
"BSD-2-Clause"
]
| null | null | null | setup.py | giovannicuriel/report_builder | e728e77d7647f248198e39521278ed246171b256 | [
"BSD-2-Clause"
]
| 1 | 2019-11-25T12:51:29.000Z | 2019-11-25T12:51:29.000Z | # -*- coding: utf-8 -*-
"""
setup.py script
"""
import io
from collections import OrderedDict
from setuptools import setup, find_packages
with io.open('README.md', 'rt', encoding='utf8') as f:
README = f.read()
setup(
name='reportbuilder',
version='0.0.1',
url='http://github.com/giovannicuriel/report-builder',
project_urls=OrderedDict((
('Code', 'https://github.com/giovannicuriel/report-builder.git'),
('Issue tracker', 'https://github.com/giovannicuriel/report-builder/issues'),
)),
license='BSD-2-Clause',
author='Giovanni Curiel dos Santos',
author_email='[email protected]',
description='Sample package for Python training courses',
long_description=README,
packages=["reportbuilder"],
include_package_data=True,
zip_safe=False,
platforms=[any],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
install_requires=[
'flask==1.1.1'
],
entry_points={
'console_scripts': [
'report-builder = reportbuilder.app:main'
]
}
)
| 26.818182 | 85 | 0.634746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.49322 |
c3184306de4eb3bd08a9f52149a34046ea7333f3 | 2,296 | py | Python | pyfunds/option.py | lucaruzzola/pyfunds | 498c5a0a3eb9423ca9f267b8d8c47f0f23987f3d | [
"MIT"
]
| 6 | 2021-08-16T16:15:05.000Z | 2022-03-21T15:46:29.000Z | pyfunds/option.py | lucaruzzola/pyfunds | 498c5a0a3eb9423ca9f267b8d8c47f0f23987f3d | [
"MIT"
]
| null | null | null | pyfunds/option.py | lucaruzzola/pyfunds | 498c5a0a3eb9423ca9f267b8d8c47f0f23987f3d | [
"MIT"
]
| null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Generic, TypeVar
T = TypeVar("T")
U = TypeVar("U")
class NoElement(Exception):
pass
class Option(ABC, Generic[T]):
def __init__(self):
super().__init__()
@staticmethod
def apply(value: T):
return Some(value) if value is not None else Nothing()
@abstractmethod
def _is_empty(self) -> bool:
pass
@abstractmethod
def get(self) -> T:
pass
def get_or_else(self, default: T) -> T:
return default if self._is_empty() else self.get()
@staticmethod
def when(condition: bool, value: T) -> Option[T]:
return Some(value) if condition else Nothing()
def map(self, f: Callable[[T], U]) -> Option[U]:
return Some(f(self.get())) if not self._is_empty() else self
def flat_map(self, f: Callable[[T], Option[U]]) -> Option[U]:
return f(self.get()) if not self._is_empty() else self
def fold(self, default: U, fs: Callable[[T], U]) -> U:
return default if self._is_empty() else fs(self.get())
def __str__(self) -> str:
return f"Option is {'Some' if not self._is_empty() else 'Nothing'}" + (
f", with value: {self.get().__repr__()} of type {type(self.get())}"
if not self._is_empty()
else ""
)
def __repr__(self) -> str:
return "pyfunds.Option"
def __eq__(self, other: Option[T]) -> bool:
if self._is_empty():
return other._is_empty()
elif other._is_empty():
return False
else:
return self.get() == other.get()
def __ne__(self, other: Option[T]) -> bool:
return not self == other
class Some(Option[T]):
def __init__(self, value: T):
super().__init__()
self._value = value
def _is_empty(self) -> bool:
return False
def get(self) -> T:
return self._value
def __repr__(self) -> str:
return f"pyfunds.Some({self.get()})"
class Nothing(Option[T]):
def __init__(self):
super().__init__()
def _is_empty(self) -> bool:
return True
def get(self) -> T:
raise NoElement
def __repr__(self) -> str:
return "pyfunds.Nothing"
| 23.916667 | 79 | 0.582753 | 2,131 | 0.928136 | 0 | 0 | 336 | 0.146341 | 0 | 0 | 197 | 0.085801 |
c318b45aea4400e446baec5f077cb60419864b6f | 1,237 | py | Python | generate_dataset/visualize_mask.py | Kaju-Bubanja/PoseCNN | c2f7c4e8f98bc7c67d5cbc0be3167d3cb3bea396 | [
"MIT"
]
| 20 | 2018-08-30T08:02:56.000Z | 2021-09-15T12:22:22.000Z | generate_dataset/visualize_mask.py | Kaju-Bubanja/PoseCNN | c2f7c4e8f98bc7c67d5cbc0be3167d3cb3bea396 | [
"MIT"
]
| null | null | null | generate_dataset/visualize_mask.py | Kaju-Bubanja/PoseCNN | c2f7c4e8f98bc7c67d5cbc0be3167d3cb3bea396 | [
"MIT"
]
| 5 | 2018-10-16T15:01:15.000Z | 2020-08-29T03:52:51.000Z | import cv2
import rosbag
import rospy
from cv_bridge import CvBridge
def main():
# bag = rosbag.Bag("/home/satco/PycharmProjects/PoseCNN/bag/dataset_one_box.bag")
bag = rosbag.Bag("/home/satco/PycharmProjects/PoseCNN/bag/test.bag")
# topics = ["/camera1/color/image_raw", "/camera2/color/image_raw"]
topics = ["/camera/color/image_raw"]
# counter = -20
counter = 0
bridge = CvBridge()
for topic, msg, t in bag.read_messages(topics=topics, start_time=rospy.Time(1537799716, 30952)):
print(msg.header.stamp)
# if topic == "/camera1/color/image_raw":
if topic == "/camera/color/image_raw":
# print(msg.header.stamp)
if counter < 0:
counter += 1
continue
# print("Showing image " + str(counter))
image = bridge.imgmsg_to_cv2(msg, "bgr8")
mask_name = "data/images/cube" + str(counter) + ".png"
mask = cv2.imread(mask_name)
alpha = 0.5
image_with_mask = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
cv2.imshow("Image with mask", image_with_mask)
cv2.waitKey(5000)
counter += 1
if __name__ == "__main__":
main() | 35.342857 | 100 | 0.595796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 426 | 0.344382 |
c318c41ae02a5b1bce71b7e42ebcd848cf95e1f3 | 909 | py | Python | itembase/core/urls/location_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
]
| null | null | null | itembase/core/urls/location_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
]
| 9 | 2020-01-17T14:16:08.000Z | 2020-02-18T15:07:40.000Z | itembase/core/urls/location_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
]
| null | null | null | from django.urls import path
from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, \
LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, \
LocationUpdateView
app_name = "locations"
urlpatterns = [
path("", LocationListView.as_view(), name="list"),
path("new/", LocationCreateView.as_view(), name="new"),
path("edit/<int:pk>/", LocationUpdateView.as_view(), name="edit"),
path("delete/<int:pk>/", LocationDeleteView.as_view(), name="delete"),
path("<int:pk>/", LocationDetailView.as_view(), name="view"),
path('<int:pk>/address-new/', LocationAddressCreateView.as_view(), name='address-new'),
path('address/<int:pk>', LocationAddressDetailView.as_view(), name='address-view'),
path('address/edit/<int:pk>', LocationAddressUpdateView.as_view(), name='address-edit'),
]
| 50.5 | 110 | 0.729373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.220022 |
c318d66c0ef14a2821d36ef3adf7ffcb264139ea | 3,455 | py | Python | web/addons/product_margin/wizard/product_margin.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
]
| 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/addons/product_margin/wizard/product_margin.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
]
| null | null | null | odoo/addons/product_margin/wizard/product_margin.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
]
| 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_margin(osv.osv_memory):
_name = 'product.margin'
_description = 'Product Margin'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
'invoice_state': fields.selection([
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid'),
], 'Invoice State', select=True, required=True),
}
_defaults = {
'from_date': time.strftime('%Y-01-01'),
'to_date': time.strftime('%Y-12-31'),
'invoice_state': "open_paid",
}
def action_open_window(self, cr, uid, ids, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the ID or list of IDs if we want more than one
@return:
"""
context = dict(context or {})
def ref(module, xml_id):
proxy = self.pool.get('ir.model.data')
return proxy.get_object_reference(cr, uid, module, xml_id)
model, search_view_id = ref('product', 'product_search_form_view')
model, graph_view_id = ref('product_margin', 'view_product_margin_graph')
model, form_view_id = ref('product_margin', 'view_product_margin_form')
model, tree_view_id = ref('product_margin', 'view_product_margin_tree')
#get the current product.margin object to obtain the values from it
records = self.browse(cr, uid, ids, context=context)
record = records[0]
context.update(invoice_state=record.invoice_state)
if record.from_date:
context.update(date_from=record.from_date)
if record.to_date:
context.update(date_to=record.to_date)
views = [
(tree_view_id, 'tree'),
(form_view_id, 'form'),
(graph_view_id, 'graph')
]
return {
'name': _('Product Margins'),
'context': context,
'view_type': 'form',
"view_mode": 'tree,form,graph',
'res_model': 'product.product',
'type': 'ir.actions.act_window',
'views': views,
'view_id': False,
'search_view_id': search_view_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 35.618557 | 81 | 0.589001 | 2,320 | 0.671102 | 0 | 0 | 0 | 0 | 0 | 0 | 1,930 | 0.558288 |
c31b0a85e27980acdba6410b67f84602e15446a0 | 1,038 | py | Python | scripts/analysis_one.py | VikkiMba/Programmable-matter | d340c0b370a7e610892ffd8351f7aa576928d05c | [
"MIT"
]
| null | null | null | scripts/analysis_one.py | VikkiMba/Programmable-matter | d340c0b370a7e610892ffd8351f7aa576928d05c | [
"MIT"
]
| null | null | null | scripts/analysis_one.py | VikkiMba/Programmable-matter | d340c0b370a7e610892ffd8351f7aa576928d05c | [
"MIT"
]
| null | null | null | name = input('Enter file name: ')
lst=list()
lst2=list()
with open(name) as f:
for line in f:
#print(line)
blops=line.rstrip()
blop=blops.split()
#for val in blop:
my_lst = [float(val) for val in blop]#list_comprehension
for num in my_lst:
if num <= 3.5:
lst.append(num)
if num >=4: lst2.append(num)
#num = float(val)
#print(num)
#text = f.read()
#print(text)
#print(type(text))
#print(type(line))
#print(blop)
#print(type(blop))
#print(lst)
#print(lst2)
import itertools
import matplotlib.pyplot as plt
import seaborn as sns
#for (f, b) in zip(lst2 ,lst):
#print (f, b)
#print(type(my_lst))
with open('neu_sam_4b.csv', 'w') as fh:
for (f, b) in zip(lst, lst2):
print(f,',',b, file=fh)
ext=lst
force=lst2
plt.plot(ext, force)
plt.xlabel('Extension')
plt.ylabel('Force')
plt.title('sample with 0.25wt%')
plt.tight_layout()
plt.show()
#for digit in lst:
#print(digit, file=fh) | 20.352941 | 64 | 0.578998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.362235 |
c31bd0f2505a1c4be1c52fbd6469723bb696bfa9 | 2,470 | py | Python | account/models.py | Hasanozzaman-Khan/Django-User-Authentication | 96482a51ed01bbdc7092d6ca34383054967a8aa0 | [
"MIT"
]
| null | null | null | account/models.py | Hasanozzaman-Khan/Django-User-Authentication | 96482a51ed01bbdc7092d6ca34383054967a8aa0 | [
"MIT"
]
| null | null | null | account/models.py | Hasanozzaman-Khan/Django-User-Authentication | 96482a51ed01bbdc7092d6ca34383054967a8aa0 | [
"MIT"
]
| null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from PIL import Image
# Create your models here.
class Home(models.Model):
pass
class CustomUserManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, first_name, last_name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError("User must have an email address.")
email = self.normalize_email(email)
user = self.model(email=email, first_name=first_name, last_name=last_name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, first_name, last_name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, first_name, last_name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class CustomRegisterModel(AbstractBaseUser, PermissionsMixin):
""" Database model for users in the system """
email = models.EmailField(max_length=255, unique=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_email_verified = models.BooleanField(default=False)
objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.first_name + " " + self.last_name
def get_short_name(self):
"""Retrieve short name of user"""
return self.first_name
def __str__(self):
"""Return string representation of our user"""
return self.email
class ProfileModel(models.Model):
user = models.OneToOneField(CustomRegisterModel, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_picture')
def __str__(self):
return f"{self.user.first_name}'s profile"
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| 30.493827 | 90 | 0.676923 | 2,288 | 0.926316 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.175304 |
c31dc50c8e3e9b895471f34a5cb531f2da5f9d94 | 316 | py | Python | Numeric Patterns/numericpattern37.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
]
| null | null | null | Numeric Patterns/numericpattern37.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
]
| 471 | 2022-01-15T07:07:18.000Z | 2022-02-28T16:01:42.000Z | Numeric Patterns/numericpattern37.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
]
| 2 | 2022-01-17T09:43:16.000Z | 2022-01-29T15:15:47.000Z | height = int(input())
for i in range(1,height+1) :
for j in range(1, i+1):
m = i*j
if(m <= 9):
print("",m,end = " ")
else:
print(m,end = " ")
print()
# Sample Input :- 5
# Output :-
# 1
# 2 4
# 3 6 9
# 4 8 12 16
# 5 10 15 20 25
| 13.73913 | 33 | 0.379747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.281646 |
c31e95b8404220c927502906ac5a4aee6be489dd | 2,617 | py | Python | reviewboard/search/testing.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
]
| null | null | null | reviewboard/search/testing.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
]
| null | null | null | reviewboard/search/testing.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
]
| null | null | null | """Search-related testing utilities."""
import tempfile
import time
from contextlib import contextmanager
import haystack
from django.conf import settings
from django.core.management import call_command
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.admin.siteconfig import load_site_config
def reindex_search():
"""Rebuild the search index."""
call_command('rebuild_index', interactive=False)
# On Whoosh, the above is asynchronous, and we can end up trying to read
# before we end up writing, occasionally breaking tests. We need to
# introduce just a bit of a delay.
#
# Yeah, this is still sketchy, but we can't turn off the async behavior
# or receive notification that the write has completed.
time.sleep(0.1)
@contextmanager
def search_enabled(on_the_fly_indexing=False, backend_id='whoosh'):
"""Temporarily enable indexed search.
Args:
on_the_fly_indexing (bool, optional):
Whether or not to enable on-the-fly indexing.
backend_id (unicode, optional):
The search backend to enable. Valid options are "whoosh" (default)
and "elasticsearch".
"""
siteconfig = SiteConfiguration.objects.get_current()
old_backend_id = siteconfig.get('search_backend_id')
old_backend_settings = siteconfig.get('search_backend_settings')
if backend_id == 'whoosh':
backend_settings = {
'PATH': tempfile.mkdtemp(suffix='search-index',
dir=settings.SITE_DATA_DIR),
'STORAGE': 'file',
}
elif backend_id == 'elasticsearch':
backend_settings = {
'INDEX_NAME': 'reviewboard-tests',
'URL': 'http://es.example.com:9200/',
}
else:
raise NotImplementedError('Unexpected backend ID "%s"' % backend_id)
siteconfig.settings.update({
'search_enable': True,
'search_backend_id': backend_id,
'search_backend_settings': {
backend_id: backend_settings,
},
'search_on_the_fly_indexing': on_the_fly_indexing,
})
siteconfig.save(update_fields=('settings',))
load_site_config()
try:
yield
haystack.connections['default'].reset_sessions()
finally:
siteconfig.settings.update({
'search_enable': False,
'search_backend_id': old_backend_id,
'search_backend_settings': old_backend_settings,
'search_on_the_fly_indexing': False,
})
siteconfig.save(update_fields=('settings',))
load_site_config()
| 30.788235 | 78 | 0.659534 | 0 | 0 | 1,814 | 0.69316 | 1,830 | 0.699274 | 0 | 0 | 1,104 | 0.421857 |
c31e9aaa2f138851e26e15e8a729a624dea9ce5b | 6,434 | py | Python | pydron/config/config.py | DelphianCalamity/pydron | 1518dc71b5cf64fde563b864db2a4de74e092c8e | [
"MIT"
]
| 5 | 2020-04-06T15:20:56.000Z | 2022-01-05T23:11:13.000Z | pydron/config/config.py | mahmoudimus/pydron | a7b484dec8bcc2730ba9bd76bc63bf3362c05e4d | [
"MIT"
]
| null | null | null | pydron/config/config.py | mahmoudimus/pydron | a7b484dec8bcc2730ba9bd76bc63bf3362c05e4d | [
"MIT"
]
| 2 | 2020-11-27T20:21:34.000Z | 2021-02-26T23:02:11.000Z | # Copyright (C) 2015 Stefan C. Mueller
import json
import os.path
from remoot import pythonstarter, smartstarter
import anycall
from pydron.backend import worker
from pydron.interpreter import scheduler, strategies
from twisted.internet import defer
preload_packages = []
def load_config(configfile=None):
if not configfile:
candidates = []
if "PYDRON_CONF" in os.environ:
candidates.append(os.environ["PYDRON_CONF"])
candidates.append(os.path.abspath("pydron.conf"))
candidates.append(os.path.expanduser("~/pydron.conf"))
candidates.append("/etc/pydron.conf")
for candidate in candidates:
if os.path.exists(candidate):
configfile = candidate
break
else:
raise ValueError("Config file could not be found. Looked for %s" % repr(candidates))
with open(configfile, 'r') as f:
cfg = json.load(f)
def convert(obj):
if isinstance(obj, dict):
return {k:convert(v) for k,v in obj.iteritems()}
elif isinstance(obj, list):
return [convert(v) for v in obj]
elif isinstance(obj, unicode):
return str(obj)
else:
return obj
cfg = convert(cfg)
return cfg
def create_scheduler(config, pool):
if "scheduler" not in config:
strategy_name = "trivial"
else:
strategy_name = config["scheduler"]
if strategy_name == "trivial":
strategy = strategies.TrivialSchedulingStrategy(pool)
strategy = strategies.VerifySchedulingStrategy(strategy)
else:
raise ValueError("Unsupported scheduler: %s" % strategy_name)
return scheduler.Scheduler(pool, strategy)
def create_pool(config, rpcsystem, error_handler):
"""
starts workers and returns a pool of them.
Returns two callbacks:
* The first callbacks with the pool as
soon as there is one worker. Errbacks if all starters
failed to create a worker.
* The second calls back once all workers have been
started. This one can be cancelled.
The given `error_handler` is invoked for every failed start.
"""
starters = []
for starter_conf in config["workers"]:
starters.extend(_create_starters(starter_conf, rpcsystem))
pool = worker.Pool()
ds = []
for i, starter in enumerate(starters):
d = starter.start()
def success(worker, i, starter):
worker.nicename = "#%s" % i
pool.add_worker(worker)
def fail(failure):
error_handler(failure)
return failure
d.addCallback(success, i, starter)
ds.append(d)
d = defer.DeferredList(ds, fireOnOneErrback=True, consumeErrors=True)
def on_success(result):
return pool
def on_fail(firsterror):
return firsterror.value.subFailure
d.addCallbacks(on_success, on_fail)
return d
def create_rpc_system(conf):
port_range = _parse_port_range(conf.get("data_ports", 0))
return anycall.create_tcp_rpc_system(port_range = port_range)
def _create_starters(conf, rpcsystem):
global preload_packages
import pydron
data_ports = _parse_port_range(conf.get("data_ports", 0))
preconnect = conf.get("preconnect", True)
if 0 in data_ports:
# use automatically selected ports. this is not compatible
# with preconnect
preconnect = False
data_ports = [0]
if data_ports != [0] and len(data_ports) <= conf["cores"]:
if 0 not in data_ports:
raise ValueError("Not enough ports configured for %r" % conf)
starters = []
for i in range(conf["cores"]):
starter_type = conf["type"]
if starter_type == "multicore":
starter = _multicore_starter(conf, rpcsystem)
elif starter_type == "ssh":
starter = _ssh_starter(conf, rpcsystem)
elif starter_type == "cloud":
starter = _ec2_starter(conf, rpcsystem)
else:
raise ValueError("Not supported worker type %s" % repr(starter_type))
if data_ports == [0]:
port = 0
else:
port = data_ports[i]
smart = smartstarter.SmartStarter(starter,
rpcsystem,
anycall.create_tcp_rpc_system,
list(preload_packages)+[pydron],
preconnect = preconnect,
data_port = port)
starters.append(worker.WorkerStarter(smart))
return starters
def _multicore_starter(conf, rpcsystem):
return pythonstarter.LocalStarter()
def _ssh_starter(conf, rpcsystem):
starter = pythonstarter.SSHStarter(conf["hostname"],
username=conf["username"],
password=conf.get("password", None),
private_key_files=conf.get("private_key_files", []),
private_keys=conf.get("private_keys", []),
tmp_dir=conf.get("tmp_dir", "/tmp"))
return starter
def _ec2_starter(conf, rpcsystem):
starter = pythonstarter.EC2Starter(username=conf["username"],
provider=conf["provider"],
provider_keyid=conf["accesskeyid"],
provider_key=conf["accesskey"],
image_id=conf["imageid"],
size_id=conf["sizeid"],
public_key_file=conf["publickey"],
private_key_file=conf["privatekey"],
tmp_dir=conf.get("tmp_dir", "/tmp"))
return starter
def _parse_port_range(ports):
try:
return [int(ports)]
except ValueError:
pass
if isinstance(ports, list):
return [int(x) for x in ports]
min_port, max_port = str(ports).split('-', 1)
min_port = int(min_port)
max_port = int(max_port)
return range(min_port, max_port + 1)
| 32.331658 | 96 | 0.562791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,039 | 0.161486 |
c31eedaebb01514423f430e25d6d4f8b0f2cba6b | 4,678 | py | Python | astropy/tests/plugins/display.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
]
| null | null | null | astropy/tests/plugins/display.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
]
| 10 | 2017-03-15T16:14:43.000Z | 2018-11-22T14:40:54.000Z | astropy/tests/plugins/display.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
]
| 1 | 2020-01-23T00:41:10.000Z | 2020-01-23T00:41:10.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from astropy.tests.helper import ignore_warnings
from astropy.utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from astropy import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg not in ['Astropy', 'astropy_helpers']:
s = "\nRunning tests with {} version {}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {}.\n\n".format(" ".join(dirs))
s += "Date: {}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += f"Platform: {plat}\n\n"
s += f"Executable: {sys.executable}\n\n"
s += f"Full Python Version: \n{sys.version}\n\n"
s += "encodings: sys: {}, locale: {}, filesystem: {}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
s += '\n'
s += f"byteorder: {sys.byteorder}\n"
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in PYTEST_HEADER_MODULES.items():
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += f"{module_display}: not available\n"
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += f"{module_display}: {version}\n"
# Helpers version
if 'astropy_helpers' in TESTED_VERSIONS:
astropy_helpers_version = TESTED_VERSIONS['astropy_helpers']
else:
try:
from astropy.version import astropy_helpers_version
except ImportError:
astropy_helpers_version = None
if astropy_helpers_version:
s += f"astropy_helpers: {astropy_helpers_version}\n"
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, str):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {}.\n".format(", ".join(opts))
return s
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
| 32.943662 | 78 | 0.614793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,679 | 0.358914 |
c31f343b321d0b0d195053ec8d68315783bcc174 | 2,782 | py | Python | packages/api-server/api_server/routes/lifts.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
]
| null | null | null | packages/api-server/api_server/routes/lifts.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
]
| null | null | null | packages/api-server/api_server/routes/lifts.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
]
| null | null | null | from typing import Any, List, cast
from fastapi import Depends
from rx import operators as rxops
from api_server.base_app import BaseApp
from api_server.fast_io import FastIORouter, WatchRequest
from api_server.models import Lift, LiftHealth, LiftRequest, LiftState
from api_server.repositories import RmfRepository
from .utils import rx_watcher
class LiftsRouter(FastIORouter):
def __init__(self, app: BaseApp):
super().__init__(tags=["Lifts"])
@self.get("", response_model=List[Lift])
async def get_lifts(rmf_repo: RmfRepository = Depends(app.rmf_repo)):
return await rmf_repo.get_lifts()
@self.get("/{lift_name}/state", response_model=LiftState)
async def get_lift_state(
lift_name: str, rmf_repo: RmfRepository = Depends(app.rmf_repo)
):
"""
Available in socket.io
"""
return await rmf_repo.get_lift_state(lift_name)
@self.watch("/{lift_name}/state")
async def watch_lift_state(req: WatchRequest, lift_name: str):
lift_state = await get_lift_state(lift_name, RmfRepository(req.user))
if lift_state is not None:
await req.emit(lift_state.dict())
rx_watcher(
req,
app.rmf_events().lift_states.pipe(
rxops.filter(lambda x: cast(LiftState, x).lift_name == lift_name),
rxops.map(cast(Any, lambda x: cast(LiftState, x).dict())),
),
)
@self.get("/{lift_name}/health", response_model=LiftHealth)
async def get_lift_health(
lift_name: str, rmf_repo: RmfRepository = Depends(app.rmf_repo)
):
"""
Available in socket.io
"""
return await rmf_repo.get_lift_health(lift_name)
@self.watch("/{lift_name}/health")
async def watch_lift_health(req: WatchRequest, lift_name: str):
health = await get_lift_health(lift_name, RmfRepository(req.user))
if health is not None:
await req.emit(health.dict())
rx_watcher(
req,
app.rmf_events().lift_health.pipe(
rxops.filter(lambda x: cast(LiftHealth, x).id_ == lift_name),
rxops.map(cast(Any, lambda x: cast(LiftHealth, x).dict())),
),
)
@self.post("/{lift_name}/request")
def _post_lift_request(
lift_name: str,
lift_request: LiftRequest,
):
app.rmf_gateway().request_lift(
lift_name,
lift_request.destination,
lift_request.request_type,
lift_request.door_mode,
)
| 36.12987 | 86 | 0.586988 | 2,430 | 0.873472 | 0 | 0 | 2,259 | 0.812006 | 1,637 | 0.588426 | 221 | 0.079439 |
c320f2c59ba3ca84a73f6a79313b9f9398f03283 | 5,228 | py | Python | src/opnsense/scripts/suricata/queryAlertLog.py | ass-a2s/opnsense-core | a0634d180325f6afe3be7f514b4470e47ff5eb75 | [
"BSD-2-Clause"
]
| 2 | 2019-03-15T03:35:54.000Z | 2019-03-15T07:50:36.000Z | src/opnsense/scripts/suricata/queryAlertLog.py | ass-a2s/opnsense-core | a0634d180325f6afe3be7f514b4470e47ff5eb75 | [
"BSD-2-Clause"
]
| null | null | null | src/opnsense/scripts/suricata/queryAlertLog.py | ass-a2s/opnsense-core | a0634d180325f6afe3be7f514b4470e47ff5eb75 | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/local/bin/python3.6
"""
Copyright (c) 2015-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
query suricata alert log
"""
import sys
import os.path
import re
import sre_constants
import shlex
import ujson
sys.path.insert(0, "/usr/local/opnsense/site-python")
from log_helper import reverse_log_reader
from params import update_params
from lib import suricata_alert_log
if __name__ == '__main__':
# handle parameters
parameters = {'limit': '0', 'offset': '0', 'filter': '', 'fileid': ''}
update_params(parameters)
# choose logfile by number
if parameters['fileid'].isdigit():
suricata_log = '%s.%d' % (suricata_alert_log, int(parameters['fileid']))
else:
suricata_log = suricata_alert_log
if parameters['limit'].isdigit():
limit = int(parameters['limit'])
else:
limit = 0
if parameters['offset'].isdigit():
offset = int(parameters['offset'])
else:
offset = 0
data_filters = {}
data_filters_comp = {}
for filter_txt in shlex.split(parameters['filter']):
filterField = filter_txt.split('/')[0]
if filter_txt.find('/') > -1:
data_filters[filterField] = '/'.join(filter_txt.split('/')[1:])
filter_regexp = data_filters[filterField]
filter_regexp = filter_regexp.replace('*', '.*')
filter_regexp = filter_regexp.lower()
try:
data_filters_comp[filterField] = re.compile(filter_regexp)
except sre_constants.error:
# remove illegal expression
# del data_filters[filterField]
data_filters_comp[filterField] = re.compile('.*')
# filter one specific log line
if 'filepos' in data_filters and data_filters['filepos'].isdigit():
log_start_pos = int(data_filters['filepos'])
else:
log_start_pos = None
# query suricata eve log
result = {'filters': data_filters, 'rows': [], 'total_rows': 0, 'origin': suricata_log.split('/')[-1]}
if os.path.exists(suricata_log):
for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos):
try:
record = ujson.loads(line['line'])
except ValueError:
# can not handle line
record = {}
# only process valid alert items
if 'alert' in record:
# add position in file
record['filepos'] = line['pos']
record['fileid'] = parameters['fileid']
# flatten structure
record['alert_sid'] = record['alert']['signature_id']
record['alert_action'] = record['alert']['action']
record['alert'] = record['alert']['signature']
# use filters on data (using regular expressions)
do_output = True
for filterKeys in data_filters:
filter_hit = False
for filterKey in filterKeys.split(','):
if filterKey in record and data_filters_comp[filterKeys].match(
('%s' % record[filterKey]).lower()):
filter_hit = True
if not filter_hit:
do_output = False
if do_output:
result['total_rows'] += 1
if (len(result['rows']) < limit or limit == 0) and result['total_rows'] >= offset:
result['rows'].append(record)
elif result['total_rows'] > offset + limit:
# do not fetch data until end of file...
break
# only try to fetch one line when filepos is given
if log_start_pos is not None:
break
# output results
print(ujson.dumps(result))
| 39.606061 | 106 | 0.599273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,305 | 0.440895 |
c3214282673aaeda28c84c61fc6d8f9be877c23b | 3,592 | py | Python | cairis/gui/DictionaryListCtrl.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
]
| null | null | null | cairis/gui/DictionaryListCtrl.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
]
| null | null | null | cairis/gui/DictionaryListCtrl.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
]
| null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from DictionaryEntryDialog import DictionaryEntryDialog
class DictionaryListCtrl(wx.ListCtrl):
def __init__(self,parent):
wx.ListCtrl.__init__(self,parent,PROJECTSETTINGS_LISTDICTIONARY_ID,size=wx.DefaultSize,style=wx.LC_REPORT | wx.LC_SORT_ASCENDING)
self.keys = []
self.InsertColumn(0,'Name')
self.SetColumnWidth(0,150)
self.InsertColumn(1,'Definition')
self.SetColumnWidth(1,300)
self.theSelectedIdx = -1
self.theMenu = wx.Menu()
self.theMenu.Append(DICTIONARYLISTCTRL_MENUADD_ID,'Add')
self.theMenu.Append(DICTIONARYLISTCTRL_MENUDELETE_ID,'Delete')
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK,self.OnRightDown)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onEntryActivated)
wx.EVT_MENU(self.theMenu,DICTIONARYLISTCTRL_MENUADD_ID,self.onAddEntry)
wx.EVT_MENU(self.theMenu,DICTIONARYLISTCTRL_MENUDELETE_ID,self.onDeleteEntry)
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def OnRightDown(self,evt):
self.PopupMenu(self.theMenu)
def onAddEntry(self,evt):
dlg = DictionaryEntryDialog(self)
if (dlg.ShowModal() == DICTIONARYENTRY_BUTTONCOMMIT_ID):
name = dlg.name()
definition = dlg.definition()
idx = self.GetItemCount()
self.InsertStringItem(idx,name)
self.SetStringItem(idx,1,definition)
def onDeleteEntry(self,evt):
if (self.theSelectedIdx == -1):
errorText = 'No entry selected'
errorLabel = 'Delete definition'
dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
selectedValue = self.GetItemText(self.theSelectedIdx)
self.DeleteItem(self.theSelectedIdx)
def onEntryActivated(self,evt):
self.theSelectedIdx = evt.GetIndex()
name = self.GetItemText(self.theSelectedIdx)
definition = self.GetItem(self.theSelectedIdx,1)
dlg = DictionaryEntryDialog(self,name,definition.GetText())
if (dlg.ShowModal() == DICTIONARYENTRY_BUTTONCOMMIT_ID):
self.SetStringItem(self.theSelectedIdx,0,dlg.name())
self.SetStringItem(self.theSelectedIdx,1,dlg.definition())
def load(self,entries):
self.keys = entries.keys()
self.keys.sort()
for name in self.keys:
idx = self.GetItemCount()
self.InsertStringItem(idx,name)
self.SetStringItem(idx,1,entries[name])
def dimensions(self):
entries = []
for x in range(self.GetItemCount()):
name = self.GetItemText(x)
definition = self.GetItem(x,1)
entries.append((name,definition.GetText()))
return entries
| 37.030928 | 133 | 0.733018 | 2,693 | 0.749722 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.236637 |
c32367d43e08138167f815beb65fbee346856f66 | 1,965 | py | Python | old_test/test-large.py | briandobbins/pynio | 1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c | [
"Apache-2.0"
]
| null | null | null | old_test/test-large.py | briandobbins/pynio | 1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c | [
"Apache-2.0"
]
| null | null | null | old_test/test-large.py | briandobbins/pynio | 1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c | [
"Apache-2.0"
]
| null | null | null | from __future__ import print_function, division
import numpy as np
import Nio
import time, os
#
# Creating a file
#
init_time = time.clock()
ncfile = 'test-large.nc'
if (os.path.exists(ncfile)):
os.system("/bin/rm -f " + ncfile)
opt = Nio.options()
opt.Format = "LargeFile"
opt.PreFill = False
file = Nio.open_file(ncfile, 'w', options=opt)
file.title = "Testing large files and dimensions"
file.create_dimension('big', 2500000000)
bigvar = file.create_variable('bigvar', "b", ('big',))
print("created bigvar")
# note it is incredibly slow to write a scalar to a large file variable
# so create an temporary variable x that will get assigned in steps
x = np.empty(1000000,dtype = 'int8')
#print x
x[:] = 42
t = list(range(0,2500000000,1000000))
ii = 0
for i in t:
if (i == 0):
continue
print(t[ii],i)
bigvar[t[ii]:i] = x[:]
ii += 1
x[:] = 84
bigvar[2499000000:2500000000] = x[:]
bigvar[-1] = 84
bigvar.units = "big var units"
#print bigvar[-1]
print(bigvar.dimensions)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print(file)
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
#quit()
#
# Reading a file
#
print('opening file for read')
print('elapsed time: ',time.clock() - init_time)
file = Nio.open_file(ncfile, 'r')
print('file is open')
print('elapsed time: ',time.clock() - init_time)
print(file.dimensions)
print(list(file.variables.keys()))
print(file)
print("reading variable")
print('elapsed time: ',time.clock() - init_time)
x = file.variables['bigvar']
print(x[0],x[1000000],x[249000000],x[2499999999])
print("max and min")
min = x[:].min()
max = x[:].max()
print(min, max)
print('elapsed time: ',time.clock() - init_time)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
| 23.674699 | 71 | 0.689567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.316539 |
c323fd0a281a1d87543bdabbbe5b4427e5eec191 | 36,178 | py | Python | eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 31960e1e1cfb75c5d354844ea9e07d5295442823 | [
"Apache-2.0"
]
| 442 | 2020-03-15T20:56:36.000Z | 2022-03-31T22:13:07.000Z | eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 31960e1e1cfb75c5d354844ea9e07d5295442823 | [
"Apache-2.0"
]
| 57 | 2020-03-15T22:09:56.000Z | 2022-03-31T13:17:06.000Z | eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 31960e1e1cfb75c5d354844ea9e07d5295442823 | [
"Apache-2.0"
]
| 59 | 2020-03-15T21:19:10.000Z | 2022-03-31T15:01:31.000Z | #This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
ecs = boto3.client("ecs")
# loop through ECS Clusters
def list_clusters(cache):
response = cache.get("list_clusters")
if response:
return response
cache["list_clusters"] = ecs.list_clusters()
return cache["list_clusters"]
@registry.register_check("ecs")
def ecs_cluster_container_insights_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.1] ECS clusters should have container insights enabled"""
response = list_clusters(cache)
myEcsClusters = response["clusterArns"]
for clusters in myEcsClusters:
clusterArn = str(clusters)
try:
response = ecs.describe_clusters(clusters=[clusterArn])
for clusterinfo in response["clusters"]:
clusterName = str(clusterinfo["clusterName"])
ecsClusterArn = str(clusterinfo["clusterArn"])
for settings in clusterinfo["settings"]:
contInsightsCheck = str(settings["value"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow()
.replace(tzinfo=datetime.timezone.utc)
.isoformat()
)
if contInsightsCheck == "disabled":
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-container-insights-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[ECS.1] ECS clusters should have container insights enabled",
"Description": "ECS cluster "
+ clusterName
+ " does not have container insights enabled. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-container-insights-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.1] ECS clusters should have container insights enabled",
"Description": "ECS cluster "
+ clusterName
+ " has container insights enabled.",
"Remediation": {
"Recommendation": {
"Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_cluster_default_provider_strategy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured"""
response = list_clusters(cache)
myEcsClusters = response["clusterArns"]
for clusters in myEcsClusters:
clusterArn = str(clusters)
try:
response = ecs.describe_clusters(clusters=[clusterArn])
for clusterinfo in response["clusters"]:
clusterName = str(clusterinfo["clusterName"])
ecsClusterArn = str(clusterinfo["clusterArn"])
defaultProviderStratCheck = str(clusterinfo["defaultCapacityProviderStrategy"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if defaultProviderStratCheck == "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured",
"Description": "ECS cluster "
+ clusterName
+ " does not have a default provider strategy configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured",
"Description": "ECS cluster "
+ clusterName
+ " has a default provider strategy configured.",
"Remediation": {
"Recommendation": {
"Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_task_definition_privileged_container_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.3] ECS Task Definitions should not run privileged containers if not required"""
for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']:
try:
response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]
taskDefinitionArn = str(response['taskDefinitionArn'])
tdefFamily = str(response["family"])
# Loop container definitions
for cdef in response["containerDefinitions"]:
# ISO Time
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
cdefName = str(cdef["name"])
# We are going to assume that if there is not a privileged flag...that it is ;)
try:
privCheck = str(cdef["privileged"])
except:
privCheck = 'UNKNOWN'
if privCheck != 'False':
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Privilege Escalation"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has defined a Privileged container, which should be avoided unless absolutely necessary. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Privilege Escalation"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has not defined a Privileged container.",
"Remediation": {
"Recommendation": {
"Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_task_definition_security_labels_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured"""
for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']:
try:
response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]
taskDefinitionArn = str(response["taskDefinitionArn"])
tdefFamily = str(response["family"])
# If there is a network mode of "awsvpc" it is likely a Fargate task - even though EC2 compute can run with that...
# time for some funky edge cases, keep that in mind before you yeet an issue at me, please ;)
if str(response["networkMode"]) == 'awsvpc':
continue
else:
# Loop container definitions
for cdef in response["containerDefinitions"]:
# ISO Time
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
cdefName = str(cdef["name"])
try:
# This is a passing check
secOpts = str(cdef["dockerSecurityOptions"])
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has Docker Security Options configured.",
"Remediation": {
"Recommendation": {
"Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName,
'DockerSecurityOptions': secOpts
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
except:
secOpts = str('["NO_OPTIONS"]')
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " does not have any Docker Security Options configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName,
'DockerSecurityOptions': secOpts
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
except Exception as e:
print(e) | 57.60828 | 254 | 0.409199 | 0 | 0 | 34,833 | 0.962823 | 34,961 | 0.966361 | 0 | 0 | 14,280 | 0.394715 |
c324c7d6ffabe1bf0c4f2f6e3eba09b511032c92 | 7,470 | py | Python | Mask/Interpolate slider without prepolate.py | typedev/RoboFont-1 | 307c3c953a338f58cd0070aa5b1bb737bde08cc9 | [
"MIT"
]
| 1 | 2016-03-27T17:07:16.000Z | 2016-03-27T17:07:16.000Z | Mask/Interpolate slider without prepolate.py | typedev/RoboFont-1 | 307c3c953a338f58cd0070aa5b1bb737bde08cc9 | [
"MIT"
]
| null | null | null | Mask/Interpolate slider without prepolate.py | typedev/RoboFont-1 | 307c3c953a338f58cd0070aa5b1bb737bde08cc9 | [
"MIT"
]
| null | null | null | """
This slider controls interpolation between foreground and mask layers.
Initial position for slider is at 1.0 (current foreground outline)
Sliding left to 0.0 interpolates to mask
Sliding right to 3.0 extrapolates away from mask.
NOTE:
Running this script opens an observer on the current glyph in the Glyph View window.
The slider window must then be closed before it can be used on another glyph.
"""
from fontTools.misc.transform import Transform
from vanilla import *
g = CurrentGlyph()
g.prepareUndo('interpolate with mask')
################### PREPOLATION ###################################
## Auto contour order and startpoints for foreground:
#g.autoContourOrder()
#for c in g:
# c.autoStartSegment()
## Auto contour order and startpoints for mask:
g.flipLayers("foreground", "mask")
#g.autoContourOrder()
#for c in g:
# c.autoStartSegment()
## Gather point info for mask layer:
maskpoints = []
for i in range(len(g)):
maskpoints.append([])
for j in range(len(g[i])):
maskpoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y))
## Gather point info for foreground layer:
g.flipLayers("mask", "foreground")
forepoints = []
for i in range(len(g)):
forepoints.append([])
for j in range(len(g[i])):
forepoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y))
## Compare length of each contour in mask and foreground:
n = 0
print '-------------------------------'
print 'Checking ' + str(g.name) + ' without auto ordering'
def gradient(point1, point2):
grad = (point2[1] - point1[1])/(point2[0] - point1[0] + 0.9)
return grad
mismatched = []
if len(maskpoints) == len(forepoints):
for i in range(len(forepoints)):
print '-------------------------------'
if len(forepoints[i]) == len(maskpoints[i]):
print 'Contour ' + str(i) + ' matches'
else:
n = n + 1
print 'Contour ' + str(i) + ':'
print str(len(forepoints[i])) + ' points in foreground'
print str(len(maskpoints[i])) + ' points in mask'
print '-------------------------------'
if len(forepoints[i]) > len(maskpoints[i]):
count = len(maskpoints[i])
prob = 'mask'
else:
count = len(forepoints[i])
prob = 'foreground'
for j in range(-1,count - 1):
def foregradient(a,b):
foregrad = gradient(forepoints[a][b],forepoints[a][b+1])
return foregrad
def maskgradient(a,b):
maskgrad = gradient(maskpoints[a][b],maskpoints[a][b+1])
return maskgrad
foregrad = foregradient(i,j)
maskgrad = maskgradient(i,j)
if foregrad > 20:
foregrad = 100
if maskgrad > 20:
maskgrad = 100
if foregrad < -20:
foregrad = -100
if maskgrad < -20:
maskgrad = -100
if abs(foregrad - maskgrad) > 0.4:
mismatched.append(j+1)
mismatched = [mismatched[0]]
## Find second problem:
if prob == 'foreground':
foregrad = foregradient(i,j)
maskgrad = maskgradient(i,j+1)
else:
foregrad = foregradient(i,j+1)
maskgrad = maskgradient(i,j)
if foregrad > 20:
foregrad = 100
if maskgrad > 20:
maskgrad = 100
if foregrad < -20:
foregrad = -100
if maskgrad < -20:
maskgrad = -100
if abs(foregrad - maskgrad) > 0.4:
mismatched.append(j+1)
if abs(len(forepoints[i]) - len(maskpoints[i])) == 1:
if len(mismatched) == 1:
print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1)
else:
print 'Check amongst the last few points'
else:
if len(mismatched) == 2:
print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1)
print 'Check between points ' + str(mismatched[1]) + ' and ' + str(mismatched[1] + 1)
elif len(mismatched) == 1:
print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1)
print 'Check amongst the last few points'
else:
print 'Check amongst the last few points'
else:
print '-------------------------------'
print 'Foreground has ' + str(len(forepoints)) + ' contours'
print 'Mask has ' + str(len(maskpoints)) + ' contours'
print '-------------------------------'
################### INTERP SLIDER ###################################
## Collect mask points:
g.flipLayers("foreground", "mask")
all_mask_points = []
all_mask_points_length = []
for i in range(len(g)):
all_mask_points.append([])
for j in range(len(g[i].points)):
all_mask_points[i].append((g[i].points[j].x, g[i].points[j].y))
all_mask_points_length.append(j)
## Collect initial foreground points:
g.flipLayers("mask", "foreground")
all_fore_points = []
all_fore_points_length = []
for i in range(len(g)):
all_fore_points.append([])
for j in range(len(g[i].points)):
all_fore_points[i].append((g[i].points[j].x, g[i].points[j].y))
all_fore_points_length.append(j)
## Check for compatibility:
if n > 0:
pass
else:
## if compatible, interpolate:
def interp_fore(Glif, int_val):
for i in range(len(Glif)):
for j in range(len(Glif[i].points)):
fore_point = all_fore_points[i][j]
mask_point = all_mask_points[i][j]
Glif[i].points[j].x = mask_point[0] + ((fore_point[0] - mask_point[0]) * int_val)
Glif[i].points[j].y = mask_point[1] + ((fore_point[1] - mask_point[1]) * int_val)
class InterpWithMaskWindow:
def __init__(self, glyph):
if glyph is None:
print "There should be a glyph window selected."
return
self.glyph = glyph
self.w = Window((600, 36),"Interpolate Foreground with Mask (no AutoOrder):")
self.w.int = Slider((10, 6, -10, 22), value=1,
maxValue=3,
minValue=0,
callback=self.adjust)
self.w.open()
def adjust(self, sender):
int_val = self.w.int.get()
print round(int_val, 2)
Glif = self.glyph
interp_fore(Glif, int_val)
Glif.update()
OpenWindow(InterpWithMaskWindow, CurrentGlyph())
g.update()
g.performUndo()
t = Transform().translate(0, 0)
g.transform(t, doComponents=True)
g.update()
| 30.614754 | 105 | 0.493574 | 792 | 0.106024 | 0 | 0 | 0 | 0 | 0 | 0 | 1,800 | 0.240964 |
c3261a4d2211366618f8d261cfec66b8e3825641 | 429 | py | Python | ex062.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
]
| null | null | null | ex062.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
]
| null | null | null | ex062.py | paulo-caixeta/Exercicios_Curso_Python | 3b77925499c174ea9ff81dec65d6319125219b9a | [
"MIT"
]
| null | null | null | # Continuação do ex061 (Termos de PA)
print('Gerador de PA')
print('-=' * 10)
primeiro = int(input('Primeiro termo: '))
razão = int(input('Razão: '))
i = 0
n = 10
novos = 10
total = 0
while novos != 0:
total = total + novos
while i < total:
termo = primeiro + razão * i
i += 1
print(termo, end=' -> ')
print('PAUSA')
novos = int(input('Deseja mostrar mais termos? Quantos? '))
print('FIM') | 23.833333 | 63 | 0.578089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.329493 |
c3266d2fbd585c2d178a2034896380f763b83e0c | 4,151 | py | Python | order/tests.py | DanLivassan/bookstore | f054c3dcb7d6b57c24f98ea28a23de0061d2ccf2 | [
"MIT"
]
| null | null | null | order/tests.py | DanLivassan/bookstore | f054c3dcb7d6b57c24f98ea28a23de0061d2ccf2 | [
"MIT"
]
| 1 | 2022-02-25T01:38:50.000Z | 2022-02-25T01:38:50.000Z | order/tests.py | DanLivassan/bookstore | f054c3dcb7d6b57c24f98ea28a23de0061d2ccf2 | [
"MIT"
]
| null | null | null | from random import randint
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from order.serializers import OrderSerializer
from product.models import Product
from order.models import Order
from rest_framework import status
from rest_framework.test import APIClient
MAX_PER_PAGE = 5
def sample_user(email='[email protected]', password='Sstring1'):
"""Creste a sample user"""
return get_user_model().objects.create_user(email, password)
def sample_product(title="My Product", price=1000, description="My product description"):
raw_product = {
'title': title,
'price': price,
'description': description,
}
return Product.objects.create(**raw_product)
def sample_order(user, products):
raw_order = {
# 'products': [product, product],
'user': user
}
order = Order.objects.create(**raw_order)
for product in products:
order.products.add(product)
return order
class ModelTests(TestCase):
def test_order_creation_and_str(self):
"""Test that order is created and str is showed properly"""
product1 = sample_product()
product2 = sample_product()
user = sample_user()
order = sample_order(user, [product1, product2])
self.assertEqual(2, len(order.products.all()))
self.assertIn(user.email, str(order))
self.assertIn("2", str(order))
class SerializerTests(TestCase):
def test_order_serializer(self):
"""Test that order serializer"""
product1 = sample_product()
product2 = sample_product()
user = sample_user()
order = sample_order(user, [product1, product2])
serialized_data = OrderSerializer(order)
data = serialized_data.data
self.assertEqual(len(data['products']), 2)
self.assertEqual(data['total'], 2000)
class PublicApiTests(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_that_write_methods_fails(self):
"""Test if unauthenticated user perform write methods fails"""
url = reverse('product-list', args=['v1'])
post_response = self.client.post(url, {})
put_response = self.client.put(url, {})
patch_response = self.client.patch(url, {})
self.assertEqual(status.HTTP_403_FORBIDDEN,
post_response.status_code)
self.assertEqual(status.HTTP_403_FORBIDDEN,
put_response.status_code)
self.assertEqual(status.HTTP_403_FORBIDDEN,
patch_response.status_code)
def test_get_order_list(self):
"""Test that orders are retrieved porperly and if the sum of prices matches with sum of products price"""
url = reverse('order-list', args=['v1'])
total = 0
products = []
for i in range(5):
price = randint(1000, 2000)
total += price
products.append(sample_product(price=price))
user = sample_user()
sample_order(user=user, products=products)
response = self.client.get(url)
api_order = response.data['results'][0]
self.assertEqual(api_order['total'], total)
self.assertEqual(len(api_order['products']), 5)
class PrivateApiTest(TestCase):
def setUp(self) -> None:
self.user = sample_user(email='[email protected]')
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_create_a_new_order(self):
"""Test that an order are created porperly"""
url = reverse('order-list', args=['v1'])
sample_product()
sample_product()
products = Product.objects.all()
user = sample_user()
order_payload = {
'user': user.id,
'products_ids': [p.id for p in products]
}
response = self.client.post(url, order_payload)
orders = Order.objects.all()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(orders.count(), 1)
self.assertEqual(orders[0].products.count(), 2)
| 33.747967 | 113 | 0.645387 | 3,134 | 0.754999 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.14358 |
c326bebf1fd8cf9fedf46e490c5cf11624fd3c7e | 6,950 | py | Python | sam-app/tests/unit/test_apns.py | mgacy/Adequate-Backend | 7f62f692a3fff53f825e597289515bffadb8f25c | [
"MIT"
]
| 1 | 2021-06-03T07:27:18.000Z | 2021-06-03T07:27:18.000Z | sam-app/tests/unit/test_apns.py | mgacy/Adequate-Backend | 7f62f692a3fff53f825e597289515bffadb8f25c | [
"MIT"
]
| 3 | 2021-04-06T18:36:02.000Z | 2021-06-16T04:22:27.000Z | sam-app/tests/unit/test_apns.py | mgacy/Adequate-Backend | 7f62f692a3fff53f825e597289515bffadb8f25c | [
"MIT"
]
| null | null | null | import unittest
from .mocks import BotoSessionMock
from push_notification import apns
class APNSTestCase(unittest.TestCase):
def_apns_category = 'MGDailyDealCategory'
# def setUp(self):
# def tearDown(self):
# push_notification
# push_background
# make_new_deal_message
# make_delta_message
def test_make_delta_comment_1(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'commentCount'
delta_value = 5
message = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "commentCount", '
'"delta-value": 5}'
)
result = apns.make_delta_message(message)
self.assertEqual(result, expected)
def test_make_delta_status_1(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'launchStatus'
delta_value = 'launch'
message = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
result = apns.make_delta_message(message)
self.assertEqual(result, expected)
# publish_message
def test_publish_delta_status_prod(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
# deal_id = 'a6k5A000000kP9LQAU'
# delta_type = 'launchStatus'
# delta_value = 'launch'
# message = (
# '{"aps": {"content-available": 1}, '
# f'"deal-id": "{deal_id}", '
# f'"delta-type": "{delta_type}", '
# f'"delta-value": "{delta_value}"'
# '}'
# )
session = BotoSessionMock()
default_message='default message'
apns_server = 'prod'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_delta_status_dev(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
session = BotoSessionMock()
default_message='default message'
apns_server = 'dev'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS_SANDBOX": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_delta_status_both(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
session = BotoSessionMock()
default_message='default message'
apns_server = 'both'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}", '
'"APNS_SANDBOX": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_invalid_server(self):
session = BotoSessionMock()
topic_arn='fake_topic_arn'
apns_server = 'meh'
apns_message ='{"aps": {"content-available": 1}'
default_message='default message'
self.assertRaises(
ValueError, apns.publish_message, session, topic_arn, apns_server, apns_message, default_message)
# _make_background_notification
def test_make_background_notification_no_additional(self):
additional = None
expected = {
'aps': {
'content-available': 1
}
}
result = apns._make_background_notification(additional)
self.assertEqual(result, expected)
def test_make_background_notification_with_additional(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'commentCount'
delta_value = 5
additional = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = {
'aps': {
'content-available': 1
},
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
result = apns._make_background_notification(additional)
self.assertDictEqual(result, expected)
# _make_notification
# def test_make_notification_1(self):
# raise_for_status
| 29.079498 | 109 | 0.489784 | 6,861 | 0.987194 | 0 | 0 | 0 | 0 | 0 | 0 | 2,474 | 0.355971 |
c327543b799027a0d190954bd8149ab8b7d7603f | 809 | py | Python | scrapets/extract.py | ownport/scrapets | e52609aae4d55fb9d4315f90d4e2fe3804ef8ff6 | [
"MIT"
]
| 2 | 2017-06-22T15:45:52.000Z | 2019-08-23T03:34:40.000Z | scrapets/extract.py | ownport/scrapets | e52609aae4d55fb9d4315f90d4e2fe3804ef8ff6 | [
"MIT"
]
| 9 | 2016-10-23T17:56:34.000Z | 2016-12-12T10:39:23.000Z | scrapets/extract.py | ownport/scrapets | e52609aae4d55fb9d4315f90d4e2fe3804ef8ff6 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from HTMLParser import HTMLParser
# -------------------------------------------------------
#
# LinkExtractor: extract links from html page
#
class BaseExtractor(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._links = []
@property
def links(self):
return self._links
class LinkExtractor(BaseExtractor):
def handle_starttag(self, tag, attrs):
if tag == 'a':
links = [v for k,v in attrs if k == 'href' and v not in self._links]
self._links.extend(links)
class ImageLinkExtractor(BaseExtractor):
def handle_starttag(self, tag, attrs):
if tag == 'img':
links = [v for k,v in attrs if k == 'src' and v not in self._links]
self._links.extend(links)
| 20.74359 | 80 | 0.566131 | 631 | 0.779975 | 0 | 0 | 58 | 0.071693 | 0 | 0 | 148 | 0.182942 |
c327b1f258b5961f3d1085b7f824f0cb5ee2f32a | 2,227 | py | Python | tour/forms.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
]
| null | null | null | tour/forms.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
]
| null | null | null | tour/forms.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
]
| null | null | null | ## Django Packages
from django import forms
from django_select2 import forms as s2forms
## App packages
from .models import *
from datetime import datetime
from bootstrap_datepicker_plus import DatePickerInput, TimePickerInput, DateTimePickerInput, MonthPickerInput, YearPickerInput
from tags_input import fields
from lib.classes import CustomTagsInputField
############################################################################
############################################################################
class TourForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'] = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'data-validation': 'required'}),
required=False)
self.fields['description'] = forms.CharField(
widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 4, 'data-validation': 'required'}),
required=False)
self.fields['tour_tag'] = CustomTagsInputField(
TourTag.objects.filter(is_actived=True),
create_missing=True,
required=False,
)
#
class Meta:
model = Tour
fields = (
'name',
'description',
'tour_tag'
)
class TourSearchForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'] = forms.CharField(
label='Username',
widget=forms.TextInput(attrs={'class': 'form-control'}),
required=False
)
self.fields['name'] = forms.CharField(
label='Tour Name',
widget=forms.TextInput(attrs={'class': 'form-control'}),
required=False
)
self.fields['tour_tag'] = CustomTagsInputField(
TourTag.objects.filter(is_actived=True),
create_missing=False,
required=False,
)
def _my(self, username):
self.fields['username'] = forms.CharField(
label='',
widget=forms.TextInput(attrs={'class': 'form-control d-none', 'value': username}),
required=False
)
| 31.814286 | 133 | 0.562191 | 1,710 | 0.767849 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.216435 |
c327d2cfdab4947c294367dcb469b4dd7dc0ab92 | 485 | py | Python | bots/test_analyseGithub.py | RSE2-D2/RSE2-D2 | eb535669cbc476b67d7cb6e1092eb0babe2f24df | [
"MIT"
]
| 3 | 2020-04-02T09:39:44.000Z | 2020-04-02T14:26:48.000Z | bots/test_analyseGithub.py | RSE2-D2/RSE2-D2 | eb535669cbc476b67d7cb6e1092eb0babe2f24df | [
"MIT"
]
| 16 | 2020-04-02T08:21:16.000Z | 2020-04-02T15:44:29.000Z | bots/test_analyseGithub.py | RSE2-D2/RSE2-D2 | eb535669cbc476b67d7cb6e1092eb0babe2f24df | [
"MIT"
]
| 1 | 2020-04-02T08:36:41.000Z | 2020-04-02T08:36:41.000Z | import analyseGithub
def test_containsGithubURL_empty():
assert not analyseGithub.containsGitHubURL("")
def test_containsGithubURL_noUrl():
assert not analyseGithub.containsGitHubURL("Some test tweet")
def test_containsGithubURL_url():
repo = "https://github.com/git/git"
assert analyseGithub.containsGitHubURL(repo)
def test_extractGitHubLink():
repo = "https://github.com/git/git"
assert analyseGithub.extractGitHubLink(f"{repo} more tweet") == "git/git"
| 30.3125 | 77 | 0.762887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.214433 |
c3283cdb2fefed11f9dc322c324670fa2d4fbccd | 1,069 | py | Python | tests/unit/utils/filebuffer_test.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
]
| 2 | 2019-03-30T02:12:56.000Z | 2021-03-08T18:59:46.000Z | tests/unit/utils/filebuffer_test.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
]
| null | null | null | tests/unit/utils/filebuffer_test.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
]
| 1 | 2020-12-04T11:28:06.000Z | 2020-12-04T11:28:06.000Z | # -*- coding: utf-8 -*-
'''
tests.unit.utils.filebuffer_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`Pedro Algarvio ([email protected])`
:copyright: © 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import salt libs
from saltunittest import TestCase, TestLoader, TextTestRunner
from salt.utils.filebuffer import BufferedReader, InvalidFileMode
class TestFileBuffer(TestCase):
def test_read_only_mode(self):
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='a')
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='ab')
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='w')
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='wb')
if __name__ == "__main__":
loader = TestLoader()
tests = loader.loadTestsFromTestCase(TestFileBuffer)
TextTestRunner(verbosity=1).run(tests)
| 30.542857 | 75 | 0.663237 | 463 | 0.43271 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.356075 |
c328c8c92438a707d941d9592d5c8e996b8cd217 | 2,881 | py | Python | ranking_baselines/ARCII/rank_metrics.py | dileep1996/mnsrf_ranking_suggestion | 5bd241fb49f08fa4937539991e12e5a502d5a072 | [
"MIT"
]
| 1 | 2020-02-04T18:27:25.000Z | 2020-02-04T18:27:25.000Z | ranking_baselines/DRMM/rank_metrics.py | dileep1996/mnsrf_ranking_suggestion | 5bd241fb49f08fa4937539991e12e5a502d5a072 | [
"MIT"
]
| null | null | null | ranking_baselines/DRMM/rank_metrics.py | dileep1996/mnsrf_ranking_suggestion | 5bd241fb49f08fa4937539991e12e5a502d5a072 | [
"MIT"
]
| null | null | null | ###############################################################################
# Author: Wasi Ahmad
# Project: ARC-II: Convolutional Matching Model
# Date Created: 7/18/2017
#
# File Description: This script contains ranking evaluation functions.
###############################################################################
import torch, numpy
def mean_average_precision(logits, target):
"""
Compute mean average precision.
:param logits: 2d tensor [batch_size x num_clicks_per_query]
:param target: 2d tensor [batch_size x num_clicks_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
map = 0
for i in range(indices.size(0)):
average_precision = 0
num_rel = 0
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
num_rel += 1
average_precision += num_rel / (j + 1)
average_precision = average_precision / num_rel
map += average_precision
return map / indices.size(0)
def NDCG(logits, target, k):
"""
Compute normalized discounted cumulative gain.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
assert logits.size(1) >= k, 'NDCG@K cannot be computed, invalid value of K.'
sorted, indices = torch.sort(logits, 1, descending=True)
NDCG = 0
for i in range(indices.size(0)):
DCG_ref = 0
num_rel_docs = torch.nonzero(target[i].data).size(0)
for j in range(indices.size(1)):
if j == k:
break
if target[i, indices[i, j].data[0]].data[0] == 1:
DCG_ref += 1 / numpy.log2(j + 2)
DCG_gt = 0
for j in range(num_rel_docs):
if j == k:
break
DCG_gt += 1 / numpy.log2(j + 2)
NDCG += DCG_ref / DCG_gt
return NDCG / indices.size(0)
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
total_reciprocal_rank = 0
for i in range(indices.size(0)):
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
total_reciprocal_rank += 1.0 / (j + 1)
break
return total_reciprocal_rank / logits.size(0)
| 34.710843 | 81 | 0.560222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,081 | 0.375217 |
c3292201406d3697087e8916c4dd2621e50dc55a | 192 | py | Python | src/wwucs/bot/__init__.py | reillysiemens/wwucs-bot | 9e48ba5dc981e36cd8b18345bcbd3768c3deeeb8 | [
"0BSD"
]
| null | null | null | src/wwucs/bot/__init__.py | reillysiemens/wwucs-bot | 9e48ba5dc981e36cd8b18345bcbd3768c3deeeb8 | [
"0BSD"
]
| null | null | null | src/wwucs/bot/__init__.py | reillysiemens/wwucs-bot | 9e48ba5dc981e36cd8b18345bcbd3768c3deeeb8 | [
"0BSD"
]
| null | null | null | """WWUCS Bot module."""
__all__ = [
"__author__",
"__email__",
"__version__",
]
__author__ = "Reilly Tucker Siemens"
__email__ = "[email protected]"
__version__ = "0.1.0"
| 16 | 38 | 0.651042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.598958 |
c3295559778e2a7c61a68e36cb3971cb3e83f7f7 | 10,638 | py | Python | deploy/python/det_keypoint_unite_infer.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
]
| null | null | null | deploy/python/det_keypoint_unite_infer.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
]
| null | null | null | deploy/python/det_keypoint_unite_infer.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import cv2
import math
import numpy as np
import paddle
import yaml
from det_keypoint_unite_utils import argsparser
from preprocess import decode_image
from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log
from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint
from visualize import visualize_pose
from benchmark_utils import PaddleInferBenchmark
from utils import get_current_memory_mb
from keypoint_postprocess import translate_to_ori_images
KEYPOINT_SUPPORT_MODELS = {
'HigherHRNet': 'keypoint_bottomup',
'HRNet': 'keypoint_topdown'
}
def predict_with_given_det(image, det_res, keypoint_detector,
keypoint_batch_size, run_benchmark):
rec_images, records, det_rects = keypoint_detector.get_person_from_rect(
image, det_res)
keypoint_vector = []
score_vector = []
rect_vector = det_rects
keypoint_results = keypoint_detector.predict_image(
rec_images, run_benchmark, repeats=10, visual=False)
keypoint_vector, score_vector = translate_to_ori_images(keypoint_results,
np.array(records))
keypoint_res = {}
keypoint_res['keypoint'] = [
keypoint_vector.tolist(), score_vector.tolist()
] if len(keypoint_vector) > 0 else [[], []]
keypoint_res['bbox'] = rect_vector
return keypoint_res
def topdown_unite_predict(detector,
topdown_keypoint_detector,
image_list,
keypoint_batch_size=1,
save_res=False):
det_timer = detector.get_timer()
store_res = []
for i, img_file in enumerate(image_list):
# Decode image in advance in det + pose prediction
det_timer.preprocess_time_s.start()
image, _ = decode_image(img_file, {})
det_timer.preprocess_time_s.end()
if FLAGS.run_benchmark:
results = detector.predict_image(
[image], run_benchmark=True, repeats=10)
cm, gm, gu = get_current_memory_mb()
detector.cpu_mem += cm
detector.gpu_mem += gm
detector.gpu_util += gu
else:
results = detector.predict_image([image], visual=False)
results = detector.filter_box(results, FLAGS.det_threshold)
if results['boxes_num'] > 0:
keypoint_res = predict_with_given_det(
image, results, topdown_keypoint_detector, keypoint_batch_size,
FLAGS.run_benchmark)
if save_res:
save_name = img_file if isinstance(img_file, str) else i
store_res.append([
save_name, keypoint_res['bbox'],
[keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
])
else:
results["keypoint"] = [[], []]
keypoint_res = results
if FLAGS.run_benchmark:
cm, gm, gu = get_current_memory_mb()
topdown_keypoint_detector.cpu_mem += cm
topdown_keypoint_detector.gpu_mem += gm
topdown_keypoint_detector.gpu_util += gu
else:
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
visualize_pose(
img_file,
keypoint_res,
visual_thresh=FLAGS.keypoint_threshold,
save_dir=FLAGS.output_dir)
if save_res:
"""
1) store_res: a list of image_data
2) image_data: [imageid, rects, [keypoints, scores]]
3) rects: list of rect [xmin, ymin, xmax, ymax]
4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
5) scores: mean of all joint conf
"""
with open("det_keypoint_unite_image_results.json", 'w') as wf:
json.dump(store_res, wf, indent=4)
def topdown_unite_predict_video(detector,
topdown_keypoint_detector,
camera_id,
keypoint_batch_size=1,
save_res=False):
video_name = 'output.mp4'
if camera_id != -1:
capture = cv2.VideoCapture(camera_id)
else:
capture = cv2.VideoCapture(FLAGS.video_file)
video_name = os.path.split(FLAGS.video_file)[-1]
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("fps: %d, frame_count: %d" % (fps, frame_count))
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
out_path = os.path.join(FLAGS.output_dir, video_name)
fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
index = 0
store_res = []
while (1):
ret, frame = capture.read()
if not ret:
break
index += 1
print('detect frame: %d' % (index))
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = detector.predict_image([frame2], visual=False)
results = detector.filter_box(results, FLAGS.det_threshold)
if results['boxes_num'] == 0:
writer.write(frame)
continue
keypoint_res = predict_with_given_det(
frame2, results, topdown_keypoint_detector, keypoint_batch_size,
FLAGS.run_benchmark)
im = visualize_pose(
frame,
keypoint_res,
visual_thresh=FLAGS.keypoint_threshold,
returnimg=True)
if save_res:
store_res.append([
index, keypoint_res['bbox'],
[keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
])
writer.write(im)
if camera_id != -1:
cv2.imshow('Mask Detection', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
writer.release()
print('output_video saved to: {}'.format(out_path))
if save_res:
"""
1) store_res: a list of frame_data
2) frame_data: [frameid, rects, [keypoints, scores]]
3) rects: list of rect [xmin, ymin, xmax, ymax]
4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
5) scores: mean of all joint conf
"""
with open("det_keypoint_unite_video_results.json", 'w') as wf:
json.dump(store_res, wf, indent=4)
def main():
deploy_file = os.path.join(FLAGS.det_model_dir, 'infer_cfg.yml')
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
arch = yml_conf['arch']
detector_func = 'Detector'
if arch == 'PicoDet':
detector_func = 'DetectorPicoDet'
detector = eval(detector_func)(FLAGS.det_model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
threshold=FLAGS.det_threshold)
topdown_keypoint_detector = KeyPointDetector(
FLAGS.keypoint_model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
batch_size=FLAGS.keypoint_batch_size,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
use_dark=FLAGS.use_dark)
keypoint_arch = topdown_keypoint_detector.pred_config.arch
assert KEYPOINT_SUPPORT_MODELS[
keypoint_arch] == 'keypoint_topdown', 'Detection-Keypoint unite inference only supports topdown models.'
# predict from video file or camera video stream
if FLAGS.video_file is not None or FLAGS.camera_id != -1:
topdown_unite_predict_video(detector, topdown_keypoint_detector,
FLAGS.camera_id, FLAGS.keypoint_batch_size,
FLAGS.save_res)
else:
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
topdown_unite_predict(detector, topdown_keypoint_detector, img_list,
FLAGS.keypoint_batch_size, FLAGS.save_res)
if not FLAGS.run_benchmark:
detector.det_times.info(average=True)
topdown_keypoint_detector.det_times.info(average=True)
else:
mode = FLAGS.run_mode
det_model_dir = FLAGS.det_model_dir
det_model_info = {
'model_name': det_model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(detector, img_list, det_model_info, name='Det')
keypoint_model_dir = FLAGS.keypoint_model_dir
keypoint_model_info = {
'model_name': keypoint_model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(topdown_keypoint_detector, img_list, keypoint_model_info,
FLAGS.keypoint_batch_size, 'KeyPoint')
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU'
], "device should be CPU, GPU or XPU"
main()
| 39.254613 | 112 | 0.611957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,976 | 0.185749 |
c329db170d0245164f12a99cffcce2a4d1c0ef5a | 551 | py | Python | plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
]
| 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
]
| 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
]
| 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import DiskDetachInput, DiskDetachOutput, Input, Component
class DiskDetach(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="disk_detach", description=Component.DESCRIPTION, input=DiskDetachInput(), output=DiskDetachOutput()
)
def run(self, params={}):
return self.connection.client.disk_detach(
params.get(Input.ZONE), params.get(Input.INSTANCE), params.get(Input.DEVICENAME)
)
| 34.4375 | 117 | 0.718693 | 438 | 0.794918 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.023593 |
c32a765467990449b567bcb5b74b49876b530290 | 431 | py | Python | troupon/payment/serializers.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
]
| 14 | 2016-01-12T07:31:09.000Z | 2021-11-20T19:29:35.000Z | troupon/payment/serializers.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
]
| 52 | 2015-09-02T14:54:43.000Z | 2016-08-01T08:22:21.000Z | troupon/payment/serializers.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
]
| 17 | 2015-09-30T13:18:48.000Z | 2021-11-18T16:25:12.000Z | """Serializers for the payment app."""
from rest_framework import serializers
from models import Purchases
class TransactionSerializer(serializers.ModelSerializer):
"""Serializer for Transaction instances.
"""
class Meta:
model = Purchases
fields = ('id', 'item', 'price', 'quantity', 'title',
'description', 'stripe_transaction_id',
'stripe_transaction_status')
| 26.9375 | 61 | 0.654292 | 320 | 0.742459 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.424594 |
c32ab97ad989123fa02793d4bdfb1b13b2fa964a | 4,817 | py | Python | mla/kmeans.py | anshulg5/MLAlgorithms | 6c12ebe64016eabb9527fb1f18be81cd3ff0c599 | [
"MIT"
]
| 1 | 2020-04-22T22:03:51.000Z | 2020-04-22T22:03:51.000Z | mla/kmeans.py | anshulg5/MLAlgorithms | 6c12ebe64016eabb9527fb1f18be81cd3ff0c599 | [
"MIT"
]
| 1 | 2021-06-25T15:40:35.000Z | 2021-06-25T15:40:35.000Z | mla/kmeans.py | anshulg5/MLAlgorithms | 6c12ebe64016eabb9527fb1f18be81cd3ff0c599 | [
"MIT"
]
| 2 | 2019-07-21T13:19:17.000Z | 2020-12-28T05:46:37.000Z | import random
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from mla.base import BaseEstimator
from mla.metrics.distance import euclidean_distance
random.seed(1111)
class KMeans(BaseEstimator):
"""Partition a dataset into K clusters.
Finds clusters by repeatedly assigning each data point to the cluster with
the nearest centroid and iterating until the assignments converge (meaning
they don't change during an iteration) or the maximum number of iterations
is reached.
Parameters
----------
K : int
The number of clusters into which the dataset is partitioned.
max_iters: int
The maximum iterations of assigning points to the nearest cluster.
Short-circuited by the assignments converging on their own.
init: str, default 'random'
The name of the method used to initialize the first clustering.
'random' - Randomly select values from the dataset as the K centroids.
'++' - Select a random first centroid from the dataset, then select
K - 1 more centroids by choosing values from the dataset with a
probability distribution proportional to the squared distance
from each point's closest existing cluster. Attempts to create
larger distances between initial clusters to improve convergence
rates and avoid degenerate cases.
"""
y_required = False
def __init__(self, K=5, max_iters=100, init='random'):
self.K = K
self.max_iters = max_iters
self.clusters = [[] for _ in range(self.K)]
self.centroids = []
self.init = init
def _initialize_cetroids(self, init):
"""Set the initial centroids."""
if init == 'random':
self.centroids = [self.X[x] for x in
random.sample(range(self.n_samples), self.K)]
elif init == '++':
self.centroids = [random.choice(self.X)]
while len(self.centroids) < self.K:
self.centroids.append(self._choose_next_center())
else:
raise ValueError('Unknown type of init parameter')
def _predict(self, X=None):
"""Perform the clustering on the dataset."""
self._initialize_cetroids(self.init)
centroids = self.centroids
for _ in range(self.max_iters):
self._assign(centroids)
centroids_old = centroids
centroids = [self._get_centroid(cluster) for cluster in self.clusters]
if self._is_converged(centroids_old, centroids):
break
self.centroids = centroids
return self._get_predictions()
def _get_predictions(self):
predictions = np.empty(self.n_samples)
for i, cluster in enumerate(self.clusters):
for index in cluster:
predictions[index] = i
return predictions
def _assign(self, centroids):
for row in range(self.n_samples):
for i, cluster in enumerate(self.clusters):
if row in cluster:
self.clusters[i].remove(row)
break
closest = self._closest(row, centroids)
self.clusters[closest].append(row)
def _closest(self, fpoint, centroids):
closest_index = None
closest_distance = None
for i, point in enumerate(centroids):
dist = euclidean_distance(self.X[fpoint], point)
if closest_index is None or dist < closest_distance:
closest_index = i
closest_distance = dist
return closest_index
def _get_centroid(self, cluster):
"""Get values by indices and take the mean."""
return [np.mean(np.take(self.X[:, i], cluster)) for i in range(self.n_features)]
def _dist_from_centers(self):
return np.array([min([euclidean_distance(x, c) for c in self.centroids]) for x in self.X])
def _choose_next_center(self):
distances = self._dist_from_centers()
probs = distances / distances.sum()
cumprobs = probs.cumsum()
r = random.random()
ind = np.where(cumprobs >= r)[0][0]
return self.X[ind]
def _is_converged(self, centroids_old, centroids):
return True if sum([euclidean_distance(centroids_old[i], centroids[i]) for i in range(self.K)]) == 0 else False
def plot(self, data=None):
sns.set(style="white")
if data is None:
data = self.X
for i, index in enumerate(self.clusters):
point = np.array(data[index]).T
plt.scatter(*point, c=sns.color_palette("hls", self.K + 1)[i])
for point in self.centroids:
plt.scatter(*point, marker='x', linewidths=10)
plt.show()
| 34.654676 | 119 | 0.622379 | 4,619 | 0.958896 | 0 | 0 | 0 | 0 | 0 | 0 | 1,389 | 0.288354 |
c32ad26a1993eb568f93f3377b6a0497a0eab914 | 10,741 | py | Python | train_classifier.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
]
| null | null | null | train_classifier.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
]
| null | null | null | train_classifier.py | justusmattern/dist-embeds | 2a5fd97bcfc3eed5c7f11e76d82c4ff49709cbe8 | [
"MIT"
]
| null | null | null | import os
import sys
import argparse
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# from sru import *
import dataloader
import modules
class Model(nn.Module):
def __init__(self, embedding, hidden_size=150, depth=1, dropout=0.3, cnn=False, nclasses=2, args=None):
super(Model, self).__init__()
self.cnn = cnn
self.drop = nn.Dropout(dropout)
self.args = args
self.emb_layer = modules.EmbeddingLayer(
embs = dataloader.load_embedding(embedding), dist_embeds = self.args.dist_embeds
)
self.word2id = self.emb_layer.word2id
if cnn:
self.encoder = modules.CNN_Text(
self.emb_layer.n_d,
widths = [3,4,5],
filters=hidden_size
)
d_out = 3*hidden_size
else:
self.encoder = nn.LSTM(
self.emb_layer.n_d,
hidden_size//2,
depth,
dropout = dropout,
# batch_first=True,
bidirectional=True
)
d_out = hidden_size
# else:
# self.encoder = SRU(
# emb_layer.n_d,
# args.d,
# args.depth,
# dropout = args.dropout,
# )
# d_out = args.d
self.out = nn.Linear(d_out, nclasses)
def forward(self, input):
if self.cnn:
input = input.t()
if self.args.dist_embeds:
emb, kl_loss = self.emb_layer(input)
else:
emb = self.emb_layer(input)
emb = self.drop(emb)
if self.cnn:
output = self.encoder(emb)
else:
output, hidden = self.encoder(emb)
# output = output[-1]
output = torch.max(output, dim=0)[0].squeeze()
output = self.drop(output)
if self.args.dist_embeds:
return self.out(output), kl_loss
else:
return self.out(output)
def text_pred(self, text, batch_size=32):
batches_x = dataloader.create_batches_x(
text,
batch_size, ##TODO
self.word2id
)
outs = []
with torch.no_grad():
for x in batches_x:
x = Variable(x)
if self.cnn:
x = x.t()
emb = self.emb_layer(x)
if self.cnn:
output = self.encoder(emb)
else:
output, hidden = self.encoder(emb)
# output = output[-1]
output = torch.max(output, dim=0)[0]
outs.append(F.softmax(self.out(output), dim=-1))
return torch.cat(outs, dim=0)
def eval_model(niter, model, input_x, input_y):
model.eval()
# N = len(valid_x)
# criterion = nn.CrossEntropyLoss()
correct = 0.0
cnt = 0.
# total_loss = 0.0
with torch.no_grad():
for x, y in zip(input_x, input_y):
x, y = Variable(x, volatile=True), Variable(y)
if model.args.dist_embeds:
output, kl_loss = model(x)
else:
output = model(x)
# loss = criterion(output, y)
# total_loss += loss.item()*x.size(1)
pred = output.data.max(1)[1]
correct += pred.eq(y.data).cpu().sum()
cnt += y.numel()
model.train()
return correct.item()/cnt
def train_model(epoch, model, optimizer,
train_x, train_y,
test_x, test_y,
best_test, save_path):
model.train()
niter = epoch*len(train_x)
criterion = nn.CrossEntropyLoss()
cnt = 0
for x, y in zip(train_x, train_y):
niter += 1
cnt += 1
model.zero_grad()
x, y = Variable(x), Variable(y)
if model.args.dist_embeds:
output, kl_loss = model(x)
ce_loss = criterion(output, y)
loss = ce_loss + model.args.kl_weight*kl_loss
else:
output = model(x)
loss = criterion(output, y)
loss.backward()
optimizer.step()
test_acc = eval_model(niter, model, test_x, test_y)
if model.args.dist_embeds:
sys.stdout.write("Epoch={} iter={} lr={:.6f} train_loss_class={:.6f} train_loss_kl={:.6f} train_loss_ovr = {:.6f} test_err={:.6f}\n".format(
epoch, niter,
optimizer.param_groups[0]['lr'],
ce_loss.item(), kl_loss.item(), loss.item(),
test_acc
))
else:
sys.stdout.write("Epoch={} iter={} lr={:.6f} train_loss = {:.6f} test_err={:.6f}\n".format(
epoch, niter,
optimizer.param_groups[0]['lr'],
loss.item(),
test_acc
))
if test_acc > best_test:
best_test = test_acc
if save_path:
torch.save(model.state_dict(), save_path)
# test_err = eval_model(niter, model, test_x, test_y)
sys.stdout.write("\n")
return best_test
def save_data(data, labels, path, type='train'):
with open(os.path.join(path, type+'.txt'), 'w') as ofile:
for text, label in zip(data, labels):
ofile.write('{} {}\n'.format(label, ' '.join(text)))
def main(args):
if args.dataset == 'mr':
# data, label = dataloader.read_MR(args.path)
# train_x, train_y, test_x, test_y = dataloader.cv_split2(
# data, label,
# nfold=10,
# valid_id=args.cv
# )
#
# if args.save_data_split:
# save_data(train_x, train_y, args.path, 'train')
# save_data(test_x, test_y, args.path, 'test')
train_x, train_y = dataloader.read_corpus('data/mr/train.txt')
test_x, test_y = dataloader.read_corpus('data/mr/test.txt')
elif args.dataset == 'imdb':
train_x, train_y = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb',
'train_tok.csv'),
clean=False, MR=True, shuffle=True)
test_x, test_y = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb',
'test_tok.csv'),
clean=False, MR=True, shuffle=True)
else:
train_x, train_y = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/'
'train_tok.csv'.format(args.dataset),
clean=False, MR=False, shuffle=True)
test_x, test_y = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/'
'test_tok.csv'.format(args.dataset),
clean=False, MR=False, shuffle=True)
nclasses = max(train_y) + 1
# elif args.dataset == 'subj':
# data, label = dataloader.read_SUBJ(args.path)
# elif args.dataset == 'cr':
# data, label = dataloader.read_CR(args.path)
# elif args.dataset == 'mpqa':
# data, label = dataloader.read_MPQA(args.path)
# elif args.dataset == 'trec':
# train_x, train_y, test_x, test_y = dataloader.read_TREC(args.path)
# data = train_x + test_x
# label = None
# elif args.dataset == 'sst':
# train_x, train_y, valid_x, valid_y, test_x, test_y = dataloader.read_SST(args.path)
# data = train_x + valid_x + test_x
# label = None
# else:
# raise Exception("unknown dataset: {}".format(args.dataset))
# if args.dataset == 'trec':
# elif args.dataset != 'sst':
# train_x, train_y, valid_x, valid_y, test_x, test_y = dataloader.cv_split(
# data, label,
# nfold = 10,
# test_id = args.cv
# )
model = Model(args.embedding, args.d, args.depth, args.dropout, args.cnn, nclasses, args=args).cuda()
need_grad = lambda x: x.requires_grad
optimizer = optim.Adam(
filter(need_grad, model.parameters()),
lr = args.lr
)
train_x, train_y = dataloader.create_batches(
train_x, train_y,
args.batch_size,
model.word2id,
)
# valid_x, valid_y = dataloader.create_batches(
# valid_x, valid_y,
# args.batch_size,
# emb_layer.word2id,
# )
test_x, test_y = dataloader.create_batches(
test_x, test_y,
args.batch_size,
model.word2id,
)
best_test = 0
# test_err = 1e+8
for epoch in range(args.max_epoch):
best_test = train_model(epoch, model, optimizer,
train_x, train_y,
# valid_x, valid_y,
test_x, test_y,
best_test, args.save_path
)
if args.lr_decay>0:
optimizer.param_groups[0]['lr'] *= args.lr_decay
# sys.stdout.write("best_valid: {:.6f}\n".format(
# best_valid
# ))
sys.stdout.write("test_err: {:.6f}\n".format(
best_test
))
if __name__ == "__main__":
argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')
argparser.add_argument("--cnn", action='store_true', help="whether to use cnn")
argparser.add_argument("--lstm", action='store_true', help="whether to use lstm")
argparser.add_argument("--dataset", type=str, default="mr", help="which dataset")
argparser.add_argument("--embedding", type=str, required=True, help="word vectors")
argparser.add_argument("--batch_size", "--batch", type=int, default=32)
argparser.add_argument("--max_epoch", type=int, default=70)
argparser.add_argument("--d", type=int, default=150)
argparser.add_argument("--dropout", type=float, default=0.3)
argparser.add_argument("--depth", type=int, default=1)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--lr_decay", type=float, default=0)
argparser.add_argument("--cv", type=int, default=0)
argparser.add_argument("--save_path", type=str, default='')
argparser.add_argument("--save_data_split", action='store_true', help="whether to save train/test split")
argparser.add_argument("--gpu_id", type=int, default=0)
argparser.add_argument("--kl_weight", type=float, default = 0.001)
argparser.add_argument("--dist_embeds", action='store_true')
args = argparser.parse_args()
# args.save_path = os.path.join(args.save_path, args.dataset)
print (args)
torch.cuda.set_device(args.gpu_id)
main(args)
| 34.536977 | 147 | 0.553114 | 2,615 | 0.24346 | 0 | 0 | 0 | 0 | 0 | 0 | 2,797 | 0.260404 |
c32b3afbd77078a6af646cb681c0da4280c9fc0a | 1,844 | py | Python | app/request/queue.py | infrared5/massroute-pi | c2e16d655058c6c5531ec66f8a82fe41ad4e8427 | [
"MIT"
]
| null | null | null | app/request/queue.py | infrared5/massroute-pi | c2e16d655058c6c5531ec66f8a82fe41ad4e8427 | [
"MIT"
]
| null | null | null | app/request/queue.py | infrared5/massroute-pi | c2e16d655058c6c5531ec66f8a82fe41ad4e8427 | [
"MIT"
]
| null | null | null | import logging
from time import sleep
logger = logging.getLogger(__name__)
class StopRequestQueue:
cursor = 0
queue = None
service = None
current_request = None
request_delay = 0 # seconds
def __init__(self, service, request_delay=10):
self.queue = []
self.service = service
self.request_delay = request_delay
def add_request(self, request):
self.queue.append(request)
logger.info("Request added for %r. Queue length at %d" % (request.stop_id, len(self.queue)))
def success(self, data):
logger.debug("Success returned")
# could become none upon stop(), considered inactive
if self.current_request is not None:
for component in self.current_request.components:
component.data(data)
sleep(self.request_delay)
self.next()
def failure(self, error):
logger.debug("Failure returned")
# could become none upon stop(), considered inactive
if self.current_request is not None:
for component in self.current_request.components:
component.error(error)
sleep(self.request_delay)
self.next()
def next(self, increment=True):
logger.info('next()')
self.cursor = self.cursor + 1 if increment else self.cursor
if self.cursor < len(self.queue):
self.current_request = self.queue[self.cursor]
self.service.access(self.current_request.stop_id, self)
"""
Not allowing wrapped cursor.
:next() is run through, then this queue is exited and the service
availability is checked again, starting the sequence again.
"""
# self.cursor = 0 if self.cursor == len(self.queue) - 1 else self.cursor + 1
def start(self):
logger.info('start()')
self.cursor = 0
self.next(False)
logger.info('start() - out')
def stop(self):
del self.queue[:]
self.current_request = None
| 28.8125 | 96 | 0.679501 | 1,766 | 0.957701 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.262473 |
c32c528f23adfd98c6057b14b36d3ef97d2f6fbf | 5,826 | py | Python | trimap_module.py | lnugraha/trimap_generator | a279562b0d0f387896330cf88549e67618d1eb7f | [
"MIT"
]
| 168 | 2018-04-14T09:46:03.000Z | 2022-03-29T08:14:11.000Z | trimap_module.py | lnugraha/trimap_generator | a279562b0d0f387896330cf88549e67618d1eb7f | [
"MIT"
]
| 7 | 2018-05-14T12:54:23.000Z | 2021-10-12T01:16:20.000Z | trimap_module.py | lnugraha/trimap_generator | a279562b0d0f387896330cf88549e67618d1eb7f | [
"MIT"
]
| 35 | 2019-05-13T03:13:11.000Z | 2022-03-22T11:55:58.000Z | #!/usr/bin/env python
import cv2, os, sys
import numpy as np
def extractImage(path):
# error handller if the intended path is not found
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
return image
def checkImage(image):
"""
Args:
image: input image to be checked
Returns:
binary image
Raises:
RGB image, grayscale image, all-black, and all-white image
"""
if len(image.shape) > 2:
print("ERROR: non-binary image (RGB)"); sys.exit();
smallest = image.min(axis=0).min(axis=0) # lowest pixel value: 0 (black)
largest = image.max(axis=0).max(axis=0) # highest pixel value: 1 (white)
if (smallest == 0 and largest == 0):
print("ERROR: non-binary image (all black)"); sys.exit()
elif (smallest == 255 and largest == 255):
print("ERROR: non-binary image (all white)"); sys.exit()
elif (smallest > 0 or largest < 255 ):
print("ERROR: non-binary image (grayscale)"); sys.exit()
else:
return True
class Toolbox:
def __init__(self, image):
self.image = image
@property
def printImage(self):
"""
Print image into a file for checking purpose
unitTest = Toolbox(image);
unitTest.printImage(image);
"""
f = open("image_results.dat", "w+")
for i in range(0, self.image.shape[0]):
for j in range(0, self.image.shape[1]):
f.write("%d " %self.image[i,j])
f.write("\n")
f.close()
@property
def displayImage(self):
"""
Display the image on a window
Press any key to exit
"""
cv2.imshow('Displayed Image', self.image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def saveImage(self, title, extension):
"""
Save as a specific image format (bmp, png, or jpeg)
"""
cv2.imwrite("{}.{}".format(title,extension), self.image)
def morph_open(self, image, kernel):
"""
Remove all white noises or speckles outside images
Need to tune the kernel size
Instruction:
unit01 = Toolbox(image);
kernel = np.ones( (9,9), np.uint8 );
morph = unit01.morph_open(input_image, kernel);
"""
bin_open = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel)
return bin_open
def morph_close(self, image, kernel):
"""
Remove all black noises or speckles inside images
Need to tune the kernel size
Instruction:
unit01 = Toolbox(image);
kernel = np.ones( (11,11)_, np.uint8 );
morph = unit01.morph_close(input_image, kernel);
"""
bin_close = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel)
return bin_close
def trimap(image, name, size, number, erosion=False):
"""
This function creates a trimap based on simple dilation algorithm
Inputs [4]: a binary image (black & white only), name of the image, dilation pixels
the last argument is optional; i.e., how many iterations will the image get eroded
Output : a trimap
"""
checkImage(image)
row = image.shape[0]
col = image.shape[1]
pixels = 2*size + 1 ## Double and plus 1 to have an odd-sized kernel
kernel = np.ones((pixels,pixels),np.uint8) ## Pixel of extension I get
if erosion is not False:
erosion = int(erosion)
erosion_kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.erode(image, erosion_kernel, iterations=erosion) ## How many erosion do you expect
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground annihilation
if cv2.countNonZero(image) == 0:
print("ERROR: foreground has been entirely eroded")
sys.exit()
dilation = cv2.dilate(image, kernel, iterations = 1)
dilation = np.where(dilation == 255, 127, dilation) ## WHITE to GRAY
remake = np.where(dilation != 127, 0, dilation) ## Smoothing
remake = np.where(image > 127, 200, dilation) ## mark the tumor inside GRAY
remake = np.where(remake < 127, 0, remake) ## Embelishment
remake = np.where(remake > 200, 0, remake) ## Embelishment
remake = np.where(remake == 200, 255, remake) ## GRAY to WHITE
#############################################
# Ensures only three pixel values available #
# TODO: Optimization with Cython #
#############################################
for i in range(0,row):
for j in range (0,col):
if (remake[i,j] != 0 and remake[i,j] != 255):
remake[i,j] = 127
path = "./images/results/" ## Change the directory
new_name = '{}px_'.format(size) + name + '_{}.png'.format(number)
cv2.imwrite(os.path.join(path, new_name) , remake)
#############################################
### TESTING SECTION ###
#############################################
if __name__ == '__main__':
path = "./images/test_images/test_image_11.png"
image = extractImage(path)
size = 10
number = path[-5]
title = "test_image"
unit01 = Toolbox(image);
kernel1 = np.ones( (11,11), np.uint8 )
unit01.displayImage
opening = unit01.morph_close(image,kernel1)
trimap(opening, title, size, number, erosion=False)
unit02 = Toolbox(opening)
unit02.displayImage
########################################################
## Default instruction (no binary opening or closing ##
## trimap(image, title, size, number, erosion=False); ##
########################################################
| 35.309091 | 120 | 0.562993 | 1,781 | 0.305699 | 0 | 0 | 653 | 0.112084 | 0 | 0 | 2,697 | 0.462925 |
c32c82635b0888813302ed2b3bc7efe4aeb79fdb | 4,047 | py | Python | integration/keeper_secrets_manager_ansible/tests/keeper_init.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
]
| 9 | 2022-01-10T18:39:45.000Z | 2022-03-06T03:51:41.000Z | integration/keeper_secrets_manager_ansible/tests/keeper_init.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
]
| 10 | 2022-01-27T00:51:05.000Z | 2022-03-30T08:42:01.000Z | integration/keeper_secrets_manager_ansible/tests/keeper_init.py | Keeper-Security/secrets-manager | 0044dec7f323ae2e531f52ef2435bd7205949fe9 | [
"MIT"
]
| 6 | 2021-12-17T18:59:26.000Z | 2022-03-28T16:47:28.000Z | import unittest
from unittest.mock import patch
import os
from .ansible_test_framework import AnsibleTestFramework, RecordMaker
import keeper_secrets_manager_ansible.plugins
import tempfile
records = {
"TRd_567FkHy-CeGsAzs8aA": RecordMaker.make_record(
uid="TRd_567FkHy-CeGsAzs8aA",
title="JW-F1-R1",
fields={
"password": "ddd"
}
),
"A_7YpGBUgRTeDEQLhVRo0Q": RecordMaker.make_file(
uid="A_7YpGBUgRTeDEQLhVRo0Q",
title="JW-F1-R2-File",
files=[
{"name": "nailing it.mp4", "type": "video/mp4", "url": "http://localhost/abc", "data": "ABC123"},
{"name": "video_file.mp4", "type": "video/mp4", "url": "http://localhost/xzy", "data": "XYZ123"},
]
)
}
def mocked_get_secrets(*args):
if len(args) > 0:
uid = args[0][0]
ret = [records[uid]]
else:
ret = [records[x] for x in records]
return ret
class KeeperInitTest(unittest.TestCase):
def setUp(self):
self.yml_file_name = "test_keeper.yml"
self.json_file_name = "test_keeper.json"
# Add in addition Python libs. This includes the base
# module for Keeper Ansible and the Keeper SDK.
self.base_dir = os.path.dirname(os.path.realpath(__file__))
self.ansible_base_dir = os.path.join(self.base_dir, "ansible_example")
self.yml_file = os.path.join(os.path.join(self.ansible_base_dir, self.yml_file_name))
self.json_file = os.path.join(os.path.join(self.ansible_base_dir, self.json_file_name))
for file in [self.yml_file, self.json_file]:
if os.path.exists(file) is True:
os.unlink(file)
def tearDown(self):
for file in [self.yml_file, self.json_file]:
if os.path.exists(file) is True:
os.unlink(file)
def _common(self):
with tempfile.TemporaryDirectory() as temp_dir:
a = AnsibleTestFramework(
base_dir=self.ansible_base_dir,
playbook=os.path.join("playbooks", "keeper_init.yml"),
inventory=os.path.join("inventory", "all"),
plugin_base_dir=os.path.join(os.path.dirname(keeper_secrets_manager_ansible.plugins.__file__)),
vars={
"keeper_token": "US:XXXXXX",
"keeper_config_file": self.yml_file_name,
"show_config": True
}
)
r, out, err = a.run()
result = r[0]["localhost"]
self.assertEqual(result["ok"], 2, "1 things didn't happen")
self.assertEqual(result["failures"], 0, "failures was not 0")
self.assertEqual(result["changed"], 0, "0 things didn't change")
self.assertTrue(os.path.exists(self.yml_file), "test_keeper.yml does not exist")
a = AnsibleTestFramework(
base_dir=self.ansible_base_dir,
playbook=os.path.join("playbooks", "keeper_init.yml"),
inventory=os.path.join("inventory", "all"),
plugin_base_dir=os.path.join(os.path.dirname(keeper_secrets_manager_ansible.plugins.__file__)),
vars={
"keeper_token": "US:XXXXXX",
"keeper_config_file": self.json_file_name,
"show_config": False
}
)
r, out, err = a.run()
result = r[0]["localhost"]
self.assertEqual(result["ok"], 2, "1 things didn't happen")
self.assertEqual(result["failures"], 0, "failures was not 0")
self.assertEqual(result["changed"], 0, "0 things didn't change")
self.assertTrue(os.path.exists(self.json_file), "test_keeper.json does not exist")
# @unittest.skip
@patch("keeper_secrets_manager_core.core.SecretsManager.get_secrets", side_effect=mocked_get_secrets)
def test_keeper_get_mock(self, _):
self._common()
@unittest.skip
def test_keeper_get_live(self):
self._common()
| 37.472222 | 111 | 0.596244 | 3,104 | 0.766988 | 0 | 0 | 236 | 0.058315 | 0 | 0 | 998 | 0.246602 |
c32db06fd5b31b4a1c0ead0ae4470b3896648d58 | 1,180 | py | Python | wonambi/attr/__init__.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
]
| 63 | 2017-12-30T08:11:17.000Z | 2022-01-28T10:34:20.000Z | wonambi/attr/__init__.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
]
| 23 | 2017-09-08T08:29:49.000Z | 2022-03-17T08:19:13.000Z | wonambi/attr/__init__.py | wonambi-python/wonambi | 4e2834cdd799576d1a231ecb48dfe4da1364fe3a | [
"BSD-3-Clause"
]
| 12 | 2017-09-18T12:48:36.000Z | 2021-09-22T07:16:07.000Z | """Packages containing all the possible attributes to recordings, such as
- channels (module "chan") with class:
- Chan
- anatomical info (module "anat") with class:
- Surf
- annotations and sleep scores (module "annotations") with class:
- Annotations
Possibly include forward and inverse models.
These attributes are only "attached" to the DataType, there should not be any
consistency check when you load them. The risk is that attributes do not refer
to the correct datatype, but the advantage is that we cannot keep track of all
the possible inconsistencies (f.e. if the channel names are not the same
between the actual channels and those stored in the Channels class).
In addition, these classes are often used in isolation, even without a dataset,
so do not assume that any of the classes in the module can call the main
dataset. In other words, these classes shouldn't have methods calling the
datatype, but there can be functions in the modules that use both the
dataset and the classes below.
"""
from .chan import Channels
from .anat import Brain, Surf, Freesurfer
from .annotations import Annotations, create_empty_annotations
| 43.703704 | 79 | 0.766102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,047 | 0.887288 |
c32fe65d24a5f464b2f3a2a3ac48a2c68f408fd3 | 1,418 | py | Python | Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py | LCS2-IIITD/summarization_bias | d66846bb7657439347f4714f2672350447474c5a | [
"MIT"
]
| 1 | 2020-11-11T19:48:10.000Z | 2020-11-11T19:48:10.000Z | Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py | LCS2-IIITD/summarization_bias | d66846bb7657439347f4714f2672350447474c5a | [
"MIT"
]
| null | null | null | Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py | LCS2-IIITD/summarization_bias | d66846bb7657439347f4714f2672350447474c5a | [
"MIT"
]
| null | null | null | """
=========== What is Matter Parameters ===================
"""
#tups = [(125.0, 1.0), (125.0, 1.5), (125.0, 2.0), (125.0, 2.5), (125.0, 3.0), (150.0, 1.0), (150.0, 1.5), (150.0, 2.0), (150.0, 2.5), (150.0, 3.0), (175.0, 1.0), (175.0, 1.5), (175.0, 2.0), (175.0, 2.5), (175.0, 3.0), (200.0, 1.0), (200.0, 1.5), (200.0, 2.0), (200.0, 2.5), (200.0, 3.0), (225.0, 1.0), (225.0, 1.5), (225.0, 2.0), (225.0, 2.5), (225.0, 3.0), (250.0, 1.0), (250.0, 1.5), (250.0, 2.0), (250.0, 2.5), (250.0, 3.0)]
"""
=========== DUC Data ==========
"""
#tups = [(64.0, 1.0), (64.0, 1.5), (64.0, 2.0), (64.0, 2.5), (70.0, 1.0), (70.0, 1.5), (70.0, 2.0), (70.0, 2.5), (76.0, 1.0), (76.0, 1.5), (76.0, 2.0), (76.0, 2.5), (82.0, 1.0), (82.0, 1.5), (82.0, 2.0), (82.0, 2.5), (88.0, 1.0), (88.0, 1.5), (88.0, 2.0), (88.0, 2.5), (96.0, 1.0), (96.0, 1.5), (96.0, 2.0), (96.0, 2.5), (100.0, 1.0), (100.0, 1.5), (100.0, 2.0), (100.0, 2.5)]
#b = [1.0,1.5,2.0,2.5,3.0]
# alpha should be from [10,40]
#a = range(len(segpool)+10,len(segpool)+60,10)
#tups = list(itertools.product(a,b))
#print "Alll combinations ", tups
#tups = [(125, 1.0), (125, 1.5), (125, 2.0), (125, 2.5), (125, 3.0), (135, 1.0), (135, 1.5), (135, 2.0), (135, 2.5), (135, 3.0), (145, 1.0), (145, 1.5), (145, 2.0), (145, 2.5), (145, 3.0), (155, 1.0), (155, 1.5), (155, 2.0), (155, 2.5), (155, 3.0), (165, 1.0), (165, 1.5), (165, 2.0), (165, 2.5), (165, 3.0)]
#thresholds = [83] | 78.777778 | 428 | 0.43512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,405 | 0.990832 |
c33017f8651ee60d0cc6f759fc632d532c899c80 | 3,213 | py | Python | deploy/terraform/tasks.py | kinecosystem/blockchain-ops | fc21bbd2d3d405844857a8b3413718bacbaad294 | [
"MIT"
]
| 15 | 2018-08-08T23:47:53.000Z | 2020-02-13T17:14:15.000Z | deploy/terraform/tasks.py | kinfoundation/stellar-ops | fc21bbd2d3d405844857a8b3413718bacbaad294 | [
"MIT"
]
| 21 | 2018-10-16T09:20:32.000Z | 2019-12-15T19:01:56.000Z | deploy/terraform/tasks.py | yonikashi/blocktest | db044d74afc62f80f8f74060830347e82dd03adb | [
"MIT"
]
| 9 | 2018-11-05T17:28:55.000Z | 2019-08-02T20:10:14.000Z | """Call various Terraform actions."""
import os
import os.path
from invoke import task
import jinja2
import yaml
TERRAFORM_VERSION = '0.11.7'
@task
def install(c, ostype='linux', version=TERRAFORM_VERSION):
"""Download a local version of Terraform."""
if ostype == 'mac':
ostype = 'darwin'
file = f'terraform_{version}_{ostype}_amd64.zip'
if os.path.exists('terraform'):
print('Terraform file found')
return
print(f'Downloading Terraform {version}')
c.run(f'wget -q https://releases.hashicorp.com/terraform/{version}/{file}')
c.run(f'unzip {file}')
c.run(f'rm {file}')
MAIN_TF_FILE = 'stellar-network.tf'
@task
def template(c, vars_file='vars.yml'):
"""Process Terraform file taht require templating.
Terraform and HCL has limitations that can be easily solved using template
languages like Jinja.
For example, avoiding redundancy when calling a module multiple times with
just a single different variable value every time.
"""
print('generating terraform files from templates')
with open(vars_file) as f:
variables = yaml.load(f)
for root, _, files in os.walk("."):
for file in files:
stripped_file, ext = os.path.splitext(file)
if ext != '.j2':
continue
out_file = f'{root}/{stripped_file}'
print(f'processing file {root}/{file}')
with open(f'{root}/{file}') as f:
tmplate = jinja2.Template(f.read(), extensions=['jinja2.ext.do'])
out = tmplate.render(variables, env_vars=os.environ)
with open(out_file, 'w') as f:
f.write(out)
c.run(f'./terraform fmt {out_file}')
@task(template)
def init(c):
"""Call terraform init."""
print('initializing')
c.run('./terraform init')
@task(init)
def new_workspace(c, vars_file='vars.yml'):
"""Set terraform workspace."""
print('setting workspace')
with open(vars_file) as f:
variables = yaml.load(f)
workspace = variables['stellar']['network_name']
c.run(f'./terraform workspace new {workspace}')
@task(init)
def workspace(c, vars_file='vars.yml'):
"""Set terraform workspace."""
print('setting workspace')
with open(vars_file) as f:
variables = yaml.load(f)
workspace = variables['stellar']['network_name']
c.run(f'./terraform workspace select {workspace}')
@task(workspace)
def modules(c):
"""Call terraform get."""
print('getting modules')
c.run('./terraform get')
@task(modules)
def plan(c, destroy=False):
"""Call terraform plan."""
print('planning')
c.run('./terraform plan {}'.format('-destroy' if destroy else ''))
@task(modules)
def apply(c, yes=False):
"""Call terraform destroy."""
print('applying')
c.run('./terraform apply {}'.format('-auto-approve' if yes else ''))
@task(modules)
def destroy(c, yes=False):
"""Call terraform destroy."""
print('destroying')
c.run('./terraform destroy {}'.format('-auto-approve' if yes else ''))
@task(modules)
def output(c):
"""Call terraform output."""
print('printing output')
c.run('./terraform output')
| 24.157895 | 81 | 0.62714 | 0 | 0 | 0 | 0 | 3,000 | 0.933707 | 0 | 0 | 1,470 | 0.457516 |
c331cb67fa44126ad7899136fc1a363b37ea7fe2 | 263 | py | Python | gdal/swig/python/scripts/gdal2xyz.py | Sokigo-GLS/gdal | 595f74bf60dff89fc5df53f9f4c3e40fc835e909 | [
"MIT"
]
| null | null | null | gdal/swig/python/scripts/gdal2xyz.py | Sokigo-GLS/gdal | 595f74bf60dff89fc5df53f9f4c3e40fc835e909 | [
"MIT"
]
| null | null | null | gdal/swig/python/scripts/gdal2xyz.py | Sokigo-GLS/gdal | 595f74bf60dff89fc5df53f9f4c3e40fc835e909 | [
"MIT"
]
| null | null | null | import sys
# import osgeo.utils.gdal2xyz as a convenience to use as a script
from osgeo.utils.gdal2xyz import * # noqa
from osgeo.utils.gdal2xyz import main
from osgeo.gdal import deprecation_warn
deprecation_warn('gdal2xyz', 'utils')
sys.exit(main(sys.argv))
| 26.3 | 65 | 0.787072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.334601 |
c33241bd3d20aeeac4e2cda557798ad660937ce2 | 587 | py | Python | inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py | yuya-mochimaru-np/inferencia | e09f298d0a80672fc5bb9383e23c941290eff334 | [
"Apache-2.0"
]
| null | null | null | inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py | yuya-mochimaru-np/inferencia | e09f298d0a80672fc5bb9383e23c941290eff334 | [
"Apache-2.0"
]
| 5 | 2021-07-25T23:19:29.000Z | 2021-07-26T23:35:13.000Z | inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py | yuya-mochimaru-np/inferencia | e09f298d0a80672fc5bb9383e23c941290eff334 | [
"Apache-2.0"
]
| 1 | 2021-09-18T12:06:13.000Z | 2021-09-18T12:06:13.000Z | from .body_reid_model_name import BodyReidModelName
class BodyReidModelFactory():
def create(model_name,
model_path,
model_precision):
if model_name == BodyReidModelName.osnet_x0_25.value:
from .model.osnet.osnet_x0_25 import OSNetX025
return OSNetX025(model_path,
model_precision)
else:
msg = "{} is not implemented. Choose from {}.".format(
model_name,
BodyReidModelName.names()
)
raise NotImplementedError(msg)
| 32.611111 | 66 | 0.577513 | 532 | 0.906303 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.068143 |
c3327d62d6a5e087ae5d5a099ea856c563dc576f | 3,931 | py | Python | cheatingbee/twitter.py | exoskellyman/cheatingbee | 1dd0710f9be8f40c3f23aa5bcac568588ac8feeb | [
"MIT"
]
| null | null | null | cheatingbee/twitter.py | exoskellyman/cheatingbee | 1dd0710f9be8f40c3f23aa5bcac568588ac8feeb | [
"MIT"
]
| null | null | null | cheatingbee/twitter.py | exoskellyman/cheatingbee | 1dd0710f9be8f40c3f23aa5bcac568588ac8feeb | [
"MIT"
]
| null | null | null | import datetime
import io
import os
import tweepy
from dotenv import load_dotenv
from PIL import Image, ImageDraw, ImageFont
class Twitter:
"""
A class used to manage the connection with the Twitter API
...
Methods
-------
post_tweet(solver_answers, nyt_answers, pangrams)
Creates the tweet text and posts a picture with todays answers
"""
def __init__(self):
load_dotenv()
api_key = os.environ.get('TWITTER_API')
api_key_secret = os.environ.get('TWITTER_API_SECRET')
access_token = os.environ.get('TWITTER_ACCESS')
access_token_secret = os.environ.get('TWITTER_ACCESS_SECRET')
auth = tweepy.OAuthHandler(api_key, api_key_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
def post_tweet(self, solver_answers, nyt_answers, pangrams):
"""Composes the tweet text and posts a picture with todays answers
marked as NSFW to avoid spoilers
Parameters
----------
solver_answers: list, required
The answers returned by the solver
nyt_answers: list, required
The answers of todays New York Times Spelling Bee
pangrams: list, required
The pangrams in the answers of todays New York Times Spelling Bee
"""
pangrams.sort()
nyt_answers.sort()
text = ("Pangram(s):\n"
+ self.__make_rows(pangrams)
+ '\n\nAnswers:\n'
+ self.__make_rows(nyt_answers))
pic = self.__create_pic(text)
media = self.api.media_upload(
filename=str(datetime.date.today()),
file=pic,
possibly_sensitive=True)
if len(solver_answers) == len(nyt_answers):
tweet = "Cheating Bee got all {} answers on todays #SpellingBee!🐝🎓"
tweet = tweet + "\n\nNeed help with todays puzzle? Click the image below!"
tweet = tweet.format(len(nyt_answers))
else:
tweet = "Cheating Bee got {}/{} answers on todays #SpellingBee!🐝"
tweet = tweet + "\n\nNeed help with todays puzzle? Click the image below!"
tweet = tweet.format(len(solver_answers), len(nyt_answers))
self.api.update_status(status=tweet, media_ids=[media.media_id])
def __make_rows(self, word_list):
"""Formats a list of words into a string with rows five words long
Parameters
----------
word_list: list, required
A list of words
Returns
-------
str
The word list composed to a string with rows of five words
"""
text = ''
for i in range(0, len(word_list), 5):
if i + 5 < len(word_list):
text = text + ', '.join(word_list[i:i+5]) + ',\n'
else:
text = text + ', '.join(word_list[i:len(word_list)])
return text
def __create_pic(self, text):
"""Creates an image with and fills it with the text provided
Parameters
----------
text: str, required
The text string to be drawn on the picture
Returns
-------
file
The picture as a file object
"""
font_size = 20
# number of lines plus 3 for padding
height = (text.count('\n') + 3) * font_size
# longest line in string length times font size at a ratio of .65
width = int(
max([len(x) for x in text.splitlines()]) * font_size * 0.65)
pic = Image.new("RGB", (width, height), (255, 255, 255))
font = ImageFont.truetype("Pillow/Tests/fonts/FreeMono.ttf", font_size)
drawing = ImageDraw.Draw(pic)
drawing.multiline_text((10, 10), text, font=font, fill=(0, 0, 0))
b = io.BytesIO()
pic.save(b, 'png')
b.seek(0)
return b
| 34.787611 | 86 | 0.58204 | 3,812 | 0.967513 | 0 | 0 | 0 | 0 | 0 | 0 | 1,769 | 0.448985 |
c332b852b9ea902789bff01e3510374ac9b4407d | 106 | py | Python | variables.py | MuhweziDeo/python_refresher | 0d100f88524ff780f1cee8afabfee1025c648f8b | [
"MIT"
]
| null | null | null | variables.py | MuhweziDeo/python_refresher | 0d100f88524ff780f1cee8afabfee1025c648f8b | [
"MIT"
]
| null | null | null | variables.py | MuhweziDeo/python_refresher | 0d100f88524ff780f1cee8afabfee1025c648f8b | [
"MIT"
]
| null | null | null | x = 2
print(x)
# multiple assignment
a, b, c, d = (1, 2, 5, 9)
print(a, b, c, d)
print(type(str(a)))
| 8.833333 | 25 | 0.528302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.198113 |
c332e2fe6b727044df2454bc3e05a8e3dca73a1d | 4,773 | py | Python | examples/authentication/demo_auth.py | jordiyeh/safrs | eecfaf6d63ed44b9dc44b7b86c600db02989b512 | [
"MIT"
]
| null | null | null | examples/authentication/demo_auth.py | jordiyeh/safrs | eecfaf6d63ed44b9dc44b7b86c600db02989b512 | [
"MIT"
]
| null | null | null | examples/authentication/demo_auth.py | jordiyeh/safrs | eecfaf6d63ed44b9dc44b7b86c600db02989b512 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
#
# This is a demo application to demonstrate the functionality of the safrs_rest REST API with authentication
#
# you will have to install the requirements:
# pip3 install passlib flask_httpauth flask_login
#
# This script can be ran standalone like this:
# python3 demo_auth.py [Listener-IP]
# This will run the example on http://Listener-Ip:5000
#
# - A database is created and a item is added
# - User is created and the User endpoint is protected by user:admin & pass: adminPASS
# - swagger2 documentation is generated
#
import sys
import os
import logging
import builtins
from functools import wraps
from flask import Flask, redirect, jsonify, make_response
from flask import abort, request, g, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String
from safrs import SAFRSBase, SAFRSJSONEncoder, Api, jsonapi_rpc
from flask_swagger_ui import get_swaggerui_blueprint
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPBasicAuth
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from flask.ext.login import LoginManager, UserMixin, \
login_required, login_user, logout_user
db = SQLAlchemy()
auth = HTTPBasicAuth()
# Example sqla database object
class Item(SAFRSBase, db.Model):
'''
description: Item description
'''
__tablename__ = 'items'
id = Column(String, primary_key=True)
name = Column(String, default = '')
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'users'
id = db.Column(String, primary_key=True)
username = db.Column(db.String(32), index=True)
password_hash = db.Column(db.String(64))
custom_decorators = [auth.login_required]
@jsonapi_rpc(http_methods = ['POST'])
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
@jsonapi_rpc(http_methods = ['POST'])
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
@jsonapi_rpc(http_methods = ['POST'])
def generate_auth_token(self, expiration=600):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
@jsonapi_rpc(http_methods = ['POST'])
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
def start_app(app):
api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ] )
item = Item(name='test',email='em@il')
user = User(username='admin')
user.hash_password('adminPASS')
api.expose_object(Item)
api.expose_object(User)
# Set the JSON encoder used for object to json marshalling
app.json_encoder = SAFRSJSONEncoder
# Register the API at /api/docs
swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json')
app.register_blueprint(swaggerui_blueprint, url_prefix='/api')
print('Starting API: http://{}:{}/api'.format(HOST,PORT))
app.run(host=HOST, port = PORT)
#
# APP Initialization
#
app = Flask('demo_app')
app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://',
SQLALCHEMY_TRACK_MODIFICATIONS = False,
SECRET_KEY = b'sdqfjqsdfqizroqnxwc',
DEBUG = True)
HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 5000
db.init_app(app)
#
# Authentication and custom routes
#
@auth.verify_password
def verify_password(username_or_token, password):
user = User.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
print('Authentication Successful for "{}"'.format(user.username))
return True
@app.route('/')
def goto_api():
return redirect('/api')
@app.teardown_appcontext
def shutdown_session(exception=None):
'''cfr. http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/'''
db.session.remove()
# Start the application
with app.app_context():
db.create_all()
start_app(app)
| 31.82 | 109 | 0.673581 | 1,451 | 0.304002 | 0 | 0 | 1,565 | 0.327886 | 0 | 0 | 1,306 | 0.273622 |
c3335a14a14888a29737d6b5d92bb38bedb9c886 | 2,045 | py | Python | ev3/sensors/color.py | NewThingsCo/ev3-controller | 70d30617fa3ea6ef73a39a8c5360e8e4c72a9e98 | [
"BSD-2-Clause"
]
| 1 | 2019-08-06T10:16:39.000Z | 2019-08-06T10:16:39.000Z | ev3/sensors/color.py | NewThingsCo/ev3-controller | 70d30617fa3ea6ef73a39a8c5360e8e4c72a9e98 | [
"BSD-2-Clause"
]
| null | null | null | ev3/sensors/color.py | NewThingsCo/ev3-controller | 70d30617fa3ea6ef73a39a8c5360e8e4c72a9e98 | [
"BSD-2-Clause"
]
| 1 | 2018-03-06T10:59:50.000Z | 2018-03-06T10:59:50.000Z | import goless
import time
from sys import platform
if platform == "linux" or platform == "linux2":
import brickpi3
def start_color_sensor(brick, port, channel):
print("start color sensor")
setup_sensor(brick, port)
goless.go(run_color_sensor, brick, port, channel)
print("color sensor started")
def setup_sensor(brick, port):
brick.set_sensor_type(port, brick.SENSOR_TYPE.EV3_COLOR_REFLECTED)
error = True
while error:
time.sleep(0.1)
try:
brick.get_sensor(port)
error = False
except brickpi3.SensorError as error:
error = True
def run_color_sensor(brick, port, channel):
# sensor_modes = [(brick.SENSOR_TYPE.EV3_COLOR_REFLECTED, 'reflect'),
# (brick.SENSOR_TYPE.EV3_COLOR_AMBIENT, 'ambiet'),
# (brick.SENSOR_TYPE.EV3_COLOR_COLOR, 'color'),
# (brick.SENSOR_TYPE.EV3_COLOR_COLOR_COMPONENTS, 'color_components')]
sensor_modes = [(brick.SENSOR_TYPE.EV3_COLOR_COLOR, 'color')]
while True:
for sensor_mode in sensor_modes:
time.sleep(0.01)
sensor_value = read_sensor(brick, port, sensor_mode[0])
if sensor_value:
channel.send((sensor_mode[1], sensor_value))
if isinstance(sensor_value, brickpi3.SensorError):
break
def read_sensor(brick, port, sensor_type):
try:
brick.set_sensor_type(port, sensor_type)
time.sleep(0.01)
return brick.get_sensor(port)
except brickpi3.SensorError as error:
print("error color", error)
return error
if __name__ == '__main__':
print('for local testing read 100 color readings from port 1')
brick = brickpi3.BrickPi3()
readings = goless.chan()
start_color_sensor(brick, brick.PORT_3, readings)
for i in range(100):
case, val = goless.select([goless.rcase(readings)])
print(case, val)
print('100 reading are done, time to clean and exit')
brick.reset_all()
| 30.984848 | 89 | 0.642543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.230318 |
c333f525069086ebb8689eece355d91dd6b64f69 | 8,757 | py | Python | model/BPE.py | djmhunt/TTpy | 0f0997314bf0f54831494b2ef1a64f1bff95c097 | [
"MIT"
]
| null | null | null | model/BPE.py | djmhunt/TTpy | 0f0997314bf0f54831494b2ef1a64f1bff95c097 | [
"MIT"
]
| 4 | 2020-04-19T11:43:41.000Z | 2020-07-21T09:57:51.000Z | model/BPE.py | djmhunt/TTpy | 0f0997314bf0f54831494b2ef1a64f1bff95c097 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
:Author: Dominic Hunt
"""
import logging
import numpy as np
import scipy as sp
import collections
import itertools
from model.modelTemplate import Model
class BPE(Model):
"""The Bayesian predictor model
Attributes
----------
Name : string
The name of the class used when recording what has been used.
Parameters
----------
alpha : float, optional
Learning rate parameter
epsilon : float, optional
Noise parameter. The larger it is the less likely the model is to choose the highest expected reward
number_actions : integer, optional
The maximum number of valid actions the model can expect to receive.
Default 2.
number_cues : integer, optional
The initial maximum number of stimuli the model can expect to receive.
Default 1.
number_critics : integer, optional
The number of different reaction learning sets.
Default number_actions*number_cues
validRewards : list,np.ndarray, optional
The different reward values that can occur in the task. Default ``array([0, 1])``
action_codes : dict with string or int as keys and int values, optional
A dictionary used to convert between the action references used by the
task or dataset and references used in the models to describe the order
in which the action information is stored.
dirichletInit : float, optional
The initial values for values of the dirichlet distribution.
Normally 0, 1/2 or 1. Default 1
prior : array of floats in ``[0, 1]``, optional
Ignored in this case
stimFunc : function, optional
The function that transforms the stimulus into a form the model can
understand and a string to identify it later. Default is blankStim
rewFunc : function, optional
The function that transforms the reward into a form the model can
understand. Default is blankRew
decFunc : function, optional
The function that takes the internal values of the model and turns them
in to a decision. Default is model.decision.discrete.weightProb
See Also
--------
model.BP : This model is heavily based on that one
"""
def __init__(self, alpha=0.3, epsilon=0.1, dirichletInit=1, validRewards=np.array([0, 1]), **kwargs):
super(BPE, self).__init__(**kwargs)
self.alpha = alpha
self.epsilon = epsilon
self.validRew = validRewards
self.rewLoc = collections.OrderedDict(((k, v) for k, v in itertools.izip(self.validRew, range(len(self.validRew)))))
self.dirichletVals = np.ones((self.number_actions, self.number_cues, len(self.validRew))) * dirichletInit
self.expectations = self.updateExpectations(self.dirichletVals)
self.parameters["epsilon"] = self.epsilon
self.parameters["alpha"] = self.alpha
self.parameters["dirichletInit"] = dirichletInit
# Recorded information
self.recDirichletVals = []
def returnTaskState(self):
""" Returns all the relevant data for this model
Returns
-------
results : dict
The dictionary contains a series of keys including Name,
Probabilities, Actions and Events.
"""
results = self.standardResultOutput()
results["dirichletVals"] = np.array(self.recDirichletVals)
return results
def storeState(self):
"""
Stores the state of all the important variables so that they can be
accessed later
"""
self.storeStandardResults()
self.recDirichletVals.append(self.dirichletVals.copy())
def rewardExpectation(self, observation):
"""Calculate the estimated reward based on the action and stimuli
This contains parts that are task dependent
Parameters
----------
observation : {int | float | tuple}
The set of stimuli
Returns
-------
actionExpectations : array of floats
The expected rewards for each action
stimuli : list of floats
The processed observations
activeStimuli : list of [0, 1] mapping to [False, True]
A list of the stimuli that were or were not present
"""
activeStimuli, stimuli = self.stimulus_shaper.processStimulus(observation)
actionExpectations = self._actExpectations(self.dirichletVals, stimuli)
return actionExpectations, stimuli, activeStimuli
def delta(self, reward, expectation, action, stimuli):
"""
Calculates the comparison between the reward and the expectation
Parameters
----------
reward : float
The reward value
expectation : float
The expected reward value
action : int
The chosen action
stimuli : {int | float | tuple | None}
The stimuli received
Returns
-------
delta
"""
modReward = self.reward_shaper.processFeedback(reward, action, stimuli)
return modReward
def updateModel(self, delta, action, stimuli, stimuliFilter):
"""
Parameters
----------
delta : float
The difference between the reward and the expected reward
action : int
The action chosen by the model in this trialstep
stimuli : list of float
The weights of the different stimuli in this trialstep
stimuliFilter : list of bool
A list describing if a stimulus cue is present in this trialstep
"""
# Find the new activities
self._newExpect(action, delta, stimuli)
# Calculate the new probabilities
# We need to combine the expectations before calculating the probabilities
actionExpectations = self._actExpectations(self.dirichletVals, stimuli)
self.probabilities = self.calcProbabilities(actionExpectations)
def _newExpect(self, action, delta, stimuli):
self.dirichletVals[action, :, self.rewLoc[delta]] += self.alpha * stimuli/np.sum(stimuli)
self.expectations = self.updateExpectations(self.dirichletVals)
def _actExpectations(self, dirichletVals, stimuli):
# If there are multiple possible stimuli, filter by active stimuli and calculate
# calculate the expectations associated with each action.
if self.number_cues > 1:
actionExpectations = self.calcActExpectations(self.actStimMerge(dirichletVals, stimuli))
else:
actionExpectations = self.calcActExpectations(dirichletVals[:, 0, :])
return actionExpectations
def calcProbabilities(self, actionValues):
# type: (np.ndarray) -> np.ndarray
"""
Calculate the probabilities associated with the actions
Parameters
----------
actionValues : 1D ndArray of floats
Returns
-------
probArray : 1D ndArray of floats
The probabilities associated with the actionValues
"""
cbest = actionValues == max(actionValues)
deltaEpsilon = self.epsilon * (1 / self.number_actions)
bestEpsilon = (1 - self.epsilon) / np.sum(cbest) + deltaEpsilon
probArray = bestEpsilon * cbest + deltaEpsilon * (1 - cbest)
return probArray
def actorStimulusProbs(self):
"""
Calculates in the model-appropriate way the probability of each action.
Returns
-------
probabilities : 1D ndArray of floats
The probabilities associated with the action choices
"""
probabilities = self.calcProbabilities(self.expectedRewards)
return probabilities
def actStimMerge(self, dirichletVals, stimuli):
dirVals = dirichletVals * np.expand_dims(np.repeat([stimuli], self.number_actions, axis=0), 2)
actDirVals = np.sum(dirVals, 1)
return actDirVals
def calcActExpectations(self, dirichletVals):
actExpect = np.fromiter((np.sum(sp.stats.dirichlet(d).mean() * self.validRew) for d in dirichletVals), float, count=self.number_actions)
return actExpect
def updateExpectations(self, dirichletVals):
def meanFunc(p, r=[]):
return np.sum(sp.stats.dirichlet(p).mean() * r)
expectations = np.apply_along_axis(meanFunc, 2, dirichletVals, r=self.validRew)
return expectations
| 33.680769 | 145 | 0.628183 | 8,552 | 0.97659 | 0 | 0 | 0 | 0 | 0 | 0 | 4,917 | 0.561494 |
c336028d3170491bb761554d05258241830c82fc | 1,688 | py | Python | affiliates/banners/tests/__init__.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
]
| 15 | 2015-01-01T07:17:44.000Z | 2020-11-09T06:28:29.000Z | affiliates/banners/tests/__init__.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
]
| 16 | 2015-02-25T23:17:27.000Z | 2015-08-20T10:28:18.000Z | affiliates/banners/tests/__init__.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
]
| 12 | 2015-01-17T20:57:03.000Z | 2019-11-03T15:04:31.000Z | from django.db.models.signals import post_init
from factory import DjangoModelFactory, Sequence, SubFactory
from factory.django import mute_signals
from affiliates.banners import models
class CategoryFactory(DjangoModelFactory):
FACTORY_FOR = models.Category
name = Sequence(lambda n: 'test{0}'.format(n))
class BannerFactory(DjangoModelFactory):
ABSTRACT_FACTORY = True
category = SubFactory(CategoryFactory)
name = Sequence(lambda n: 'test{0}'.format(n))
destination = 'https://mozilla.org/'
visible = True
class ImageBannerFactory(BannerFactory):
FACTORY_FOR = models.ImageBanner
@mute_signals(post_init)
class ImageVariationFactory(DjangoModelFactory):
ABSTRACT_FACTORY = True
color = 'Blue'
locale = 'en-us'
image = 'uploads/image_banners/test.png'
class ImageBannerVariationFactory(ImageVariationFactory):
FACTORY_FOR = models.ImageBannerVariation
banner = SubFactory(ImageBannerFactory)
class TextBannerFactory(BannerFactory):
FACTORY_FOR = models.TextBanner
class TextBannerVariationFactory(DjangoModelFactory):
FACTORY_FOR = models.TextBannerVariation
banner = SubFactory(TextBannerFactory)
locale = 'en-us'
text = Sequence(lambda n: 'test{0}'.format(n))
class FirefoxUpgradeBannerFactory(BannerFactory):
FACTORY_FOR = models.FirefoxUpgradeBanner
@mute_signals(post_init)
class FirefoxUpgradeBannerVariationFactory(ImageVariationFactory):
FACTORY_FOR = models.FirefoxUpgradeBannerVariation
banner = SubFactory(FirefoxUpgradeBannerFactory)
image = 'uploads/firefox_upgrade_banners/test.png'
upgrade_image = 'uploads/firefox_upgrade_banners/test_upgrade.png'
| 25.575758 | 70 | 0.776659 | 1,423 | 0.843009 | 0 | 0 | 513 | 0.30391 | 0 | 0 | 193 | 0.114336 |
c337200f464a7d012b7b952e50ed5709111473ef | 7,996 | py | Python | cradlepy/framework/http.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
]
| null | null | null | cradlepy/framework/http.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
]
| null | null | null | cradlepy/framework/http.py | cblanquera/cradlepy | 1634fe38a0cc58f92dbfc2b0c84ace9d16821c3c | [
"MIT"
]
| null | null | null | from .request import Request
from .response import Response
class HttpRequestCookieTrait:
'Designed for the Request Object; Adds methods to store COOKIE data'
def get_cookies(self, *args):
'Returns COOKIE given name or all COOKIE'
return self.get('cookie', *args)
def remove_cookies(self, *args):
'Removes COOKIE given name or all COOKIE'
return self.remove('cookie', *args)
def has_cookies(self, *args):
'Returns true if has COOKIE given name or if COOKIE is set'
return self.exists('cookie', *args)
def set_cookies(self, data, *args):
'Sets COOKIE'
if isinstance(data, (list, dict, tuple)):
return self.set('cookie', data)
if len(args) == 0:
return self
return self.set('cookie', data, *args)
class HttpRequestGetTrait:
'Designed for the Request Object; Adds methods to store GET data'
def get_get(self, *args):
'Returns GET given name or all GET'
return self.get('get', *args)
def remove_get(self, *args):
'Removes GET given name or all GET'
return self.remove('get', *args)
def has_get(self, *args):
'Returns true if has GET given name or if GET is set'
return self.exists('get', *args)
def set_get(self, data, *args):
'Sets GET'
if isinstance(data, (list, dict, tuple)):
return self.set('get', data)
if len(args) == 0:
return self
return self.set('get', data, *args)
class HttpRequestPostTrait:
'Designed for the Request Object; Adds methods to store POST data'
def get_post(self, *args):
'Returns POST given name or all POST'
return self.get('post', *args)
def remove_post(self, *args):
'Removes POST given name or all POST'
return self.remove('post', *args)
def has_post(self, *args):
'Returns true if has POST given name or if POST is set'
return self.exists('post', *args)
def set_post(self, data, *args):
'Sets POST'
if isinstance(data, (list, dict, tuple)):
return self.set('post', data)
if len(args) == 0:
return self
return self.set('post', data, *args)
class HttpRequestServerTrait:
'Designed for the Request Object; Adds methods to store SERVER data'
def get_method(self):
'Returns method if set'
pass
def get_path(self, name = None):
'Returns path data given name or all path data'
pass
def get_query(self):
'Returns string query if set'
pass
def get_server(self, name = None):
'Returns SERVER data given name or all SERVER data'
pass
def has_server(self, name = None):
'Returns SERVER data given name or all SERVER data'
pass
def is_method(self, method):
'Returns true if method is the one given'
pass
def set_method(self, method):
'Sets request method'
pass
def set_path(self, path):
'Sets path given in string or array form'
pass
def set_query(self, query):
'Sets query string'
pass
def set_server(self, server):
'Sets SERVER'
pass
class HttpRequestSessionTrait:
'Designed for the Request Object; Adds methods to store SESSION data'
def get_session(self, *args):
'Returns SESSION given name or all SESSION'
return self.get('session', *args)
def remove_session(self, *args):
'Removes SESSION given name or all SESSION'
self.remove('session', *args)
#TODO: link session object
return self
def has_session(self, *args):
'Returns true if has SESSION given name or if SESSION is set'
return self.exists('session', *args)
def set_session(self, data, *args):
'Sets SESSION'
if isinstance(data, (list, dict, tuple)):
return self.set('session', data)
if len(args) == 0:
return self
self.set('session', data, *args)
#TODO: link session object
return self
class HttpRequest(
Request,
HttpRequestCookieTrait,
HttpRequestGetTrait,
HttpRequestPostTrait,
HttpRequestServerTrait,
HttpRequestSessionTrait
):
'Http Request Object'
def load(self):
'Loads default data given by WSGI'
pass
class HttpResponseHeaderTrait:
'Designed for the Response Object; Adds methods to process headers'
def add_header(self, name, value = None):
'Adds a header parameter'
pass
def get_headers(self, name = None):
'Returns either the header value given the name or the all headers'
pass
def remove_header(self, name):
'Removes a header parameter'
pass
class HttpResponsePageTrait:
'Designed for the Response Object; Adds methods to process REST type responses'
def add_meta(self, name, content):
'Adds a page meta item'
pass
def get_flash(self):
'Returns flash data'
pass
def get_meta(self, *args):
'Returns meta given path or all meta data'
pass
def get_page(self, *args):
'Returns page data given path or all page data'
pass
def has_page(self, *args):
'Returns true if theres any page data'
pass
def remove_page(self, *args):
'Removes arbitrary page data'
pass
def set_flash(self, message, type = 'info'):
'Sets a Page flash'
pass
def set_page(self, *args):
'Sets arbitrary page data'
pass
def set_title(self, title):
'Sets a Page title'
pass
class HttpResponseStatusTrait:
'Designed for the Response Object; Adds methods to process status codes'
def get_status(self):
'Returns the status code'
pass
def set_status(self, code, status):
'Sets a status code'
pass
class HttpResponse(
Response,
HttpResponseHeaderTrait,
HttpResponsePageTrait,
HttpResponseStatusTrait
):
'Http Response Object'
def load(self):
'Loads default data'
pass
class HttpRouterTrait:
'Designed for the HttpHandler we are parting this out to lessen the confusion'
def all(self, path, callback):
'Adds routing middleware for all methods'
pass
def delete(self, path, callback):
'Adds routing middleware for delete method'
pass
def get(self, path, callback):
'Adds routing middleware for get method'
pass
def get_router(self):
'Returns a router object'
pass
def post(self, path, callback):
'Adds routing middleware for post method'
pass
def put(self, path, callback):
'Adds routing middleware for put method'
pass
def route(self, method, path, callback):
'Adds routing middleware'
pass
def set_router(self, router):
'Sets the router to use'
pass
def trigger_route(self, method, path, *args):
'Manually trigger a route'
pass
class HttpRouterInterface:
'Handles method-path matching and routing'
def __init__(self, handler = None):
'Allow to pass a custom EventHandler'
pass
def process(self, request, *args):
'Process routes'
pass
def route(self, method, pattern, callback):
'Adds routing middleware'
pass
class HttpRouter(HttpRouterInterface):
'Handles method-path matching and routing'
def __init__(self, handler = None):
'Allow to pass a custom EventHandler'
pass
def process(self, request, *args):
'Process routes'
pass
def route(self, method, pattern, callback):
'Adds routing middleware'
pass
class HttpDispatcher:
pass
class HttpHandler:
pass
class HttpDispatcherTrait:
pass
class HttpTrait:
pass
| 23.380117 | 83 | 0.617184 | 7,902 | 0.988244 | 0 | 0 | 0 | 0 | 0 | 0 | 2,824 | 0.353177 |
c3372092201a1e6f33ba16a8e3cd911550232f4d | 326 | py | Python | src/plugins/command/main.py | AlexCaranha/MyLauncher | d15037d5e26eee61e851a938b432ee1107f441ab | [
"MIT"
]
| null | null | null | src/plugins/command/main.py | AlexCaranha/MyLauncher | d15037d5e26eee61e851a938b432ee1107f441ab | [
"MIT"
]
| null | null | null | src/plugins/command/main.py | AlexCaranha/MyLauncher | d15037d5e26eee61e851a938b432ee1107f441ab | [
"MIT"
]
| null | null | null |
import pluggy
hookimpl = pluggy.HookimplMarker('mylauncher')
def get_class():
return CommandPlugin()
class CommandPlugin:
@hookimpl
def setup(self):
print("Setup ...")
@hookimpl
def get_alias(self):
return "command"
@hookimpl
def execute(self, input:str):
return None
| 15.52381 | 46 | 0.631902 | 216 | 0.662577 | 0 | 0 | 179 | 0.54908 | 0 | 0 | 32 | 0.09816 |
c3374bd201ea3739cfe629bae1ecfda55d32a4e4 | 5,022 | py | Python | setup.py | kmike/UnbalancedDataset | 777f26cee73c04ae2f3d59e43c990cbfd1725b23 | [
"MIT"
]
| 6 | 2016-06-02T09:27:41.000Z | 2021-04-21T06:46:12.000Z | setup.py | kmike/UnbalancedDataset | 777f26cee73c04ae2f3d59e43c990cbfd1725b23 | [
"MIT"
]
| null | null | null | setup.py | kmike/UnbalancedDataset | 777f26cee73c04ae2f3d59e43c990cbfd1725b23 | [
"MIT"
]
| 1 | 2018-08-25T03:11:05.000Z | 2018-08-25T03:11:05.000Z | #! /usr/bin/env python
"""Toolbox for unbalanced dataset in machine learning."""
from setuptools import setup, find_packages
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
descr = """Toolbox for unbalanced dataset in machine learning."""
DISTNAME = 'unbalanced_dataset'
DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = 'Fernando Nogueira, Guillaume Lemaitre'
MAINTAINER_EMAIL = '[email protected], [email protected]'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by UnbalancedDataset to
# recursively build the compiled extensions in sub-packages is based on
# the Python import machinery.
builtins.__UNBALANCED_DATASET_SETUP__ = True
with open('unbalanced_dataset/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('unbalanced_dataset')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install UnbalancedDataset when Numpy is not yet
# present in the system.
from setuptools import setup
extra = {}
else:
print('To install UnbalancedDataset from source, you need numpy.' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager.')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=INSTALL_REQUIRES,
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
cmdclass={'build_py': build_py},
**extra
)
| 37.477612 | 79 | 0.597172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,041 | 0.406412 |
c33796a44fa6fcd9a0852b7e8f6f953771655ffa | 64 | py | Python | ores/scoring/models/__init__.py | elukey/ores | 18f6e2da642dd124daf4dc122d58032eb15485c9 | [
"MIT"
]
| 69 | 2015-07-15T15:04:12.000Z | 2018-08-20T15:00:27.000Z | ores/scoring/models/__init__.py | elukey/ores | 18f6e2da642dd124daf4dc122d58032eb15485c9 | [
"MIT"
]
| 146 | 2015-06-13T18:56:49.000Z | 2018-08-17T22:38:52.000Z | ores/scoring/models/__init__.py | elukey/ores | 18f6e2da642dd124daf4dc122d58032eb15485c9 | [
"MIT"
]
| 34 | 2018-10-15T16:58:50.000Z | 2022-03-08T20:01:34.000Z | from .rev_id_scorer import RevIdScorer
__all__ = [RevIdScorer]
| 16 | 38 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c3386296bb9f34b0112bf5ce7c89306471d38bbf | 4,311 | py | Python | conjuntos.py | Tiesco789/guppe | 464702a2d618e149439a9b5c763f82c5376d2c32 | [
"MIT"
]
| null | null | null | conjuntos.py | Tiesco789/guppe | 464702a2d618e149439a9b5c763f82c5376d2c32 | [
"MIT"
]
| null | null | null | conjuntos.py | Tiesco789/guppe | 464702a2d618e149439a9b5c763f82c5376d2c32 | [
"MIT"
]
| null | null | null | """
Conjuntos
— Conjunto em qualquer linguagem de programação, estamos fazendo referência à teoria de conjuntos da matemática
— Aqui no Python, os conjuntos são chamados de sets
Dito isto, da mesma forma que na matemática:
— Sets (conjuntos) não possuem valores duplicados;
— Sets (conjuntos) não possuem valores ordenados;
— Elementos não são acessados via índice, ou seja, conjuntos não são indexados;
Conjuntos são bons para se utilizar quando precisamos armazenar elementos mas não nos importamos com a ordenaçào deles.
Quando não precisamos se preocupar com chaves, valores e itens duplicados
Os conjuntos (sets) são referenciados em python com chaves {}
Diferença entre conjutnos (sets) e mapas (dicionários) em python:
— Um dicionário tem chave/valor
— Um conjunto tem apenas valor
# Definindo um conjunto
# Forma 1
s = set({1, 2, 3, 4, 5, 6, 7, 2, 3}) # Repare que temos valores repetidos
print(s)
print(type(s))
# OBS: Ao criar uim conjunto, caso seja adicionado um valor já existente, o mesmo será ignorado sem gerar error e nào fará parde do conjunto
# Forma 2
s = {1, 2, 3, 4, 5, 5}
print(s)
print(type(s))
# Podemos verificar se um determinado valor está contido em um conjunto
if 3 in s:
print('Encontrei o valor 3')
else:
print('Não encontrei o valor 3')
# Importante lembrar que, alem de não termos valores duplicados, os valores não são ordenados
dados = 99, 2, 34, 23, 2, 12, 1, 44, 5, 34
# Listas aceitam valores duplicados, então temos 10 elementos
lista = list(dados)
print(f"Lista: {lista} com {len(lista)} elementos")
# Tuplas aceitam valores duplicados, então temos 10 elementos
tupla = tuple(dados)
print(f"Tupla: {tupla} com {len(tupla)} elementos")
# Dicionários não aceitam chaves duplicadas, então temos 8 elementos
dicionario = {}.fromkeys(dados, 'dict')
print(f"Dicionário: {dicionario} com {len(dicionario)} elementos")
# Conjuntos não aceitam valores duplicados, então temos 8 elementos
conjunto = set(dados)
print(f"Conjunto: {conjunto} com {len(conjunto)} elementos")
# Assim como os outros conjuntos python, podemos colocar tipos de dados misturados em Sets
s = {1, 'b', True, 1.23, 44}
print(s)
print(type(s))
# Podemos iterar em um set normalmente
for valor in s:
print(valor)
# Usos interessantes com sets
# Imagine que fizemos um formulário de cadastro de visitantes em uma feira ou museu,
# os visitantes informam manualmente a cidade de onde vieram
# Nós adicionamos cada cidade em uma lista Python, já que em uma lista podemos adicionar novos elmentos e ter repetições
cidades = ['Belo Horizante', 'São Paulo', 'Campo Grande',
'Cuiaba', 'Campo Grande', 'São Paulo', 'Cuiaba']
print(cidades)
print(len(cidades))
# Agora precisamos saber quantas cidades distintas, ou seja, únicas, temos.
# O que você faria? Faria um loop na lista?
# Podemos utilizar o set para isso
print(len(set(cidades)))
s = {1, 2, 3}
s.add(4)
print(s)
s = {1, 2, 3}
s.remove(3)
print(s)
s.discard(2)
print(s)
# Copiando um conjunto para outro
# Forma 1 - Deep Copy
novo = s.copy()
print(novo)
novo.add(4)
print(novo)
print(s)
# Forma 2 - Shallow Copy
novo = s
novo.add(4)
print(novo)
print(s)
s = {1, 2, 3}
print(s)
s.clear()
print(s)
# Precisamos gerar qum conjunto com nomes de estudantes únicos
# Forma 1 - Utilizando union
# unicos1 = estudantes_python.union(estudantes_java)
# print(unicos1)
# Forma 2 - Utilizando o | pipe
unicos2 = estudantes_python | estudantes_java
print(unicos2)
# Gerar um conjunto de estudantes que estão em ambos os cursos
# Forma 1 - Utilizando union
ambos1 = estudantes_python.intersection(estudantes_java)
print(ambos1)
# Forma 2 - utilizando o &
ambos2 = estudantes_python & estudantes_java
print(ambos2)
# Métodos matemáticos de conjuntos
# Imagine que temos dois conjuntos: um contendo estudantes do curso Python e um
# Contendo estudantes do curso Java
estudantes_python = {'Pedro', 'Maria', 'Cláudia', 'João', 'Marcos', 'Patricia'}
estudantes_java = {'Ana', 'Maria', 'Cláudia', 'João', 'Marcos', 'Patricia'}
# Veja que alguns alins que estudam python também estudam java.
# Gerar um conjunto de estudantes que não estão no outro curso
so_python = estudantes_python.difference(estudantes_java)
print(so_python)
so_java = estudantes_java.difference(estudantes_python)
print(so_java)
"""
| 27.812903 | 140 | 0.740895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,384 | 0.999772 |
c338d3a2b98ef137f9b2463dce7a00499cad0407 | 1,346 | py | Python | tests/test_gpreg.py | cdgreenidge/gdec | 1ee6ab0156fa8f74683f5b7a7dfcb2c3f2a57d7f | [
"MIT"
]
| null | null | null | tests/test_gpreg.py | cdgreenidge/gdec | 1ee6ab0156fa8f74683f5b7a7dfcb2c3f2a57d7f | [
"MIT"
]
| null | null | null | tests/test_gpreg.py | cdgreenidge/gdec | 1ee6ab0156fa8f74683f5b7a7dfcb2c3f2a57d7f | [
"MIT"
]
| null | null | null | """Test gpreg.py."""
from typing import Tuple
import numpy as np
import pytest
from gdec import gpreg, npgp
@pytest.fixture(scope="module")
def dataset() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
np.random.seed(42)
amplitude = 1.0
lengthscale = 12
sigma = 0.5
n = 128
def spectrum_fn(w: np.ndarray) -> np.ndarray:
return npgp.rbf_spectrum(w, amplitude, lengthscale)
basis, freqs = npgp.real_fourier_basis(n)
coef_vars = npgp.rbf_spectrum(freqs, amplitude, lengthscale)
z = np.arange(n)
w = np.sqrt(coef_vars) * np.random.randn(n)
f = basis @ w
x = z.repeat(1)
f_x = f.repeat(1)
y = sigma * np.random.randn(*f_x.shape) + f_x
return x[:, None], y, z[:, None], f
def test_you_can_train_periodic_gp_regression_on_the_synthetic_dataset(dataset):
X, y, z, f = dataset
grid_size = np.unique(X).size
model = gpreg.PeriodicGPRegression()
model.fit(X, y, grid_size=grid_size)
f_est = model.predict(z)
error = np.max(np.abs(f - f_est))
assert error < 0.3
def test_training_pid_on_float_dataset_raises_value_error(dataset):
X, y, _, _ = dataset
X = X.astype(np.float32)
grid_size = np.unique(X).size
model = gpreg.PeriodicGPRegression()
with pytest.raises(ValueError):
model.fit(X, y, grid_size=grid_size)
| 26.392157 | 80 | 0.665676 | 0 | 0 | 0 | 0 | 639 | 0.47474 | 0 | 0 | 28 | 0.020802 |
c33952e0e337955829e818701b7429be3b750ed1 | 1,008 | py | Python | gizer/all_schema_engines.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
]
| null | null | null | gizer/all_schema_engines.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
]
| null | null | null | gizer/all_schema_engines.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "[email protected]"
from mongo_schema import schema_engine
import os
def get_schema_files(schemas_dirpath):
""" get list of js / json files resided in dirpath param. """
res = []
for fname in os.listdir(schemas_dirpath):
if fname.endswith('json') or fname.endswith('js'):
res.append(fname)
res.sort()
return res
def get_schema_engines_as_dict(schemas_dirpath):
""" Load schema engines into dict.
Basename of schema file should be the name of collection"""
js_schema_files = get_schema_files(schemas_dirpath)
schemas = {}
for fname in js_schema_files:
collection_name = os.path.splitext(os.path.basename(fname))[0]
schema_path = os.path.join(schemas_dirpath, fname)
schemas[collection_name] = \
schema_engine.create_schema_engine(collection_name, schema_path)
return schemas
| 33.6 | 76 | 0.709325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.271825 |
c33a308f4b93fcb1577dde001c5501e493e62b57 | 146 | py | Python | WeatherPy/config.py.py | Brownc03/python-api-challenge | 24af57a6652b2990e8bdbc1e8e01566a2e7878b8 | [
"ADSL"
]
| null | null | null | WeatherPy/config.py.py | Brownc03/python-api-challenge | 24af57a6652b2990e8bdbc1e8e01566a2e7878b8 | [
"ADSL"
]
| null | null | null | WeatherPy/config.py.py | Brownc03/python-api-challenge | 24af57a6652b2990e8bdbc1e8e01566a2e7878b8 | [
"ADSL"
]
| null | null | null | # OpenWeatherMap API Key
weather_api_key = "ae41fcf95db0d612b74e2b509abe9684"
# Google API Key
g_key = "AIzaSyCuF1rT6NscWq62bcBm0tZM7hKlaeWfONQ" | 29.2 | 52 | 0.842466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.787671 |
c33b670e9c5af9440c581f7412728d80706d9eb8 | 5,240 | py | Python | bin/runinterpret.py | christine-liu/somaticCNVpipeline | 254b709e611e56e5c891c663508ac79fa1093c07 | [
"MIT"
]
| null | null | null | bin/runinterpret.py | christine-liu/somaticCNVpipeline | 254b709e611e56e5c891c663508ac79fa1093c07 | [
"MIT"
]
| 2 | 2018-03-09T00:22:18.000Z | 2019-03-12T11:26:42.000Z | bin/runinterpret.py | christine-liu/somaticCNVpipeline | 254b709e611e56e5c891c663508ac79fa1093c07 | [
"MIT"
]
| 6 | 2018-03-09T02:10:49.000Z | 2020-05-14T09:19:11.000Z | #!usr/bin/python
import os
import numpy as np
import common
from interpret import qcfile, funcfile, analyzefiles
def runAll(args):
print('\n\n\nYou have requested to analyze CNV call data')
print('\tWARNING:')
print('\t\tIF USING ANY REFERENCES OTHER THAN THOSE I PROVIDE I CANNOT GUARANTEE RESULT ACCURACY')
print('\n')
#Set up environment#
args.AnalysisDirectory = common.fixDirName(args.AnalysisDirectory)
folderDict = {'LowessBinCounts': args.lowess,
'Segments': args.segments,
'PipelineStats': args.countstats}
for i in list(folderDict.keys()):
if not folderDict[i]:
folderDict[i] = args.AnalysisDirectory + i + '/'
else:
folderDict[i] = common.fixDirName(folderDict[i])
QCdir = args.AnalysisDirectory + 'QC/'
CNVdir = args.AnalysisDirectory + 'CNVlists/'
summaryDir = args.AnalysisDirectory + 'SummaryFiles/'
PloidyPlotDir = args.AnalysisDirectory + 'PloidyDeterminationPlots/'
CNplotDir = args.AnalysisDirectory + 'CopyNumberProfilePlots/'
ChromPlotDir = args.AnalysisDirectory + 'ChromosomeCopyNumberPlots/'
for i in [args.AnalysisDirectory, QCdir, CNVdir, summaryDir, PloidyPlotDir, CNplotDir, ChromPlotDir]:#
common.makeDir(i)
#get list of samples to process
#will involve checking infofile (if present) and whether required input files exist
sampleFiles = common.getSampleList(folderDict['Segments'], args.samples, 'segments')
sampleNames = [x.split('/')[-1].split('.')[0] for x in sampleFiles]
# info = common.importInfoFile(args.infofile, args.columns, 'interpret')
# if args.infofile:
# refArray = info
# else:
# thisDtype = info
# refArray = np.array(
# [ (x, 1, 'unk',) for x in sampleNames],
# dtype=thisDtype)
#QC assessment#
# qcfile.runQCone(sampleNames[0], args.species, folderDict['PipelineStats'], folderDict['LowessBinCounts'], folderDict['Segments'], QCdir, PloidyPlotDir)
argList = [(x, args.species, folderDict['PipelineStats'], folderDict['LowessBinCounts'], folderDict['Segments'], QCdir, PloidyPlotDir) for x in sampleNames]
common.daemon(qcfile.runQCone, argList, 'assess sample quality')
analysisSamples = []
ploidyDict = {}
genderDict = {}
mergeQCfile = summaryDir + 'QCmetrics.txt'
OUT = open(mergeQCfile, 'w')
OUT.write('Name\tReads\tMAPD\tCS\tPloidy\tGender\tPASS\n')
for i in sampleNames:
IN = open(QCdir + i + '.qcTEMP.txt', 'r')
data = IN.readline()
OUT.write(data)
data = data.rstrip().split('\t')
if data[-1] == 'True':
analysisSamples.append(i)
ploidyDict[i] = float(data[4])
genderDict[i] = data[-2]
IN.close()
os.remove(QCdir + i + '.qcTEMP.txt')
OUT.close()
os.rmdir(QCdir)
#FUnC: CNV filtering#
if args.nofilter:
print '\nFURTHER CODE IS ONLY DEVELOPED FOR WHEN FUnC IS IMPLEMENTED, EXITING NOW\n\n\n'
raise SystemExit
# funcfile.FUnCone(analysisSamples[0], args.species, folderDict['Segments'], CNVdir,
# ploidyDict[analysisSamples[0]], genderDict[analysisSamples[0]])
argList = [(x, args.species, folderDict['Segments'], CNVdir, ploidyDict[x], genderDict[x]) for x in analysisSamples]
common.daemon(funcfile.FUnCone, argList, 'remove unreliable CNV calls')
#CNV analysis#
# summaryStats = analyzefiles.analyzeOne(analysisSamples[0], args.species, CNVdir, folderDict['LowessBinCounts'], CNplotDir, ChromPlotDir, ploidyDict[analysisSamples[0]], genderDict[analysisSamples[0]])
# summaryStats = [summaryStats]
argList = [(x, args.species, CNVdir, folderDict['LowessBinCounts'], CNplotDir, ChromPlotDir, ploidyDict[x], genderDict[x]) for x in analysisSamples]
summaryStats = common.daemon(analyzefiles.analyzeOne, argList, 'create summary files')
cellStatsFile = summaryDir + 'CellStats.txt'
chromAmpFile = summaryDir + 'ChromosomeAmplifiedPercent.txt'
chromDelFile = summaryDir + 'ChromosomeDeletedPercent.txt'
#write summary statistics files#
with open(cellStatsFile, 'w') as CELL, open(chromAmpFile, 'w') as AMP, open(chromDelFile, 'w') as DEL:
CELL.write('Sample\tDeletionNumber\tAmplificationNumber\tTotalCNVnumber\tDeletedMB\tAmplifiedMB\tNetDNAalterdMB\n')
chromHeader = 'Sample\t' + '\t'.join(summaryStats[0]['chroms']) + '\n'
AMP.write(chromHeader)
DEL.write(chromHeader)
for i,j in enumerate(analysisSamples):
CELL.write(str(j + '\t'))
cellOut = [summaryStats[i]['cellStats']['delCount'],
summaryStats[i]['cellStats']['ampCount'],
summaryStats[i]['cellStats']['delCount'] + summaryStats[i]['cellStats']['ampCount'],
np.round(summaryStats[i]['cellStats']['delMB'], 3),
np.round(summaryStats[i]['cellStats']['ampMB'], 3),
np.round(summaryStats[i]['cellStats']['ampMB'] - summaryStats[i]['cellStats']['delMB'], 3)]
cellOut = '\t'.join(map(str, cellOut)) + '\n'
CELL.write(cellOut)
AMP.write(str(j + '\t'))
ampOut = [np.round(summaryStats[i]['chromAmp'][x], 3) for x in summaryStats[0]['chroms']]
ampOut = '\t'.join(map(str, ampOut)) + '\n'
AMP.write(ampOut)
DEL.write(str(j + '\t'))
delOut = [np.round(summaryStats[i]['chromDel'][x], 3) for x in summaryStats[0]['chroms']]
delOut = '\t'.join(map(str, delOut)) + '\n'
DEL.write(delOut)
print('\nCNV analysis complete\n\n\n')
| 31.566265 | 202 | 0.698473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,137 | 0.407824 |
c33c7a593798637e5989678bfdadfbeb83157154 | 29,527 | py | Python | mbio/EM/mrc.py | wzmao/mbio | af78cfdb47577199585179c3b04cc6cf3d6b401c | [
"MIT"
]
| 2 | 2015-05-28T12:23:02.000Z | 2018-05-25T14:01:17.000Z | mbio/EM/mrc.py | wzmao/mbio | af78cfdb47577199585179c3b04cc6cf3d6b401c | [
"MIT"
]
| null | null | null | mbio/EM/mrc.py | wzmao/mbio | af78cfdb47577199585179c3b04cc6cf3d6b401c | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""This module contains the MRC file class.
"""
__author__ = 'Wenzhi Mao'
__all__ = ['MRC']
class MRCHeader():
"""A header class for mrc file."""
def __init__(self, filename=None, **kwargs):
"""Provide the filename to parse or set it later."""
self.nx = self.ny = self.nz = None
self.mode = None
self.nxstart = self.nystart = self.nzstart = None
self.mx = self.my = self.mz = None
self.cella = [None] * 3
self.cellb = [None] * 3
self.mapc = None
self.mapr = None
self.maps = None
self.dmin = self.dmax = self.dmean = None
self.ispg = None
self.nsymbt = None
self.extra = None
self.origin = [None] * 3
self.map = None
self.machst = None
self.rms = None
self.nlabels = None
self.label = [None] * 10
self.symdata = None
self.xstart = self.ystart = self.zstart = None
if filename:
from os.path import exists, isfile
if exists(filename) and isfile(filename):
from .Cmrc import readHeader
compress = 1 if filename.lower().endswith('.gz') else 0
temp = readHeader(
filename=filename, header=self, compress=compress)
if isinstance(temp, tuple):
from ..IO.output import printError
if temp[0] == None:
printError(temp[1])
else:
printError("Couldn't parse the Error information.")
return None
else:
from numpy import array, argsort
self = temp
for i in xrange(10):
self.label[i] = self.label[i][:80]
if self.label[i].find('\0') != -1:
self.label[i] = self.label[i][
:self.label[i].find("\0")]
elif self.label[i] == ' ' * 80:
self.label[i] = ''
self.label[i] = self.label[i].rstrip()
if self.symdata:
self.symdata = self.symdata[:80]
if self.symdata.find('\0') != -1:
self.symdata = self.symdata[
:self.symdata.find('\0')]
if self.extra:
self.extra = self.extra[:80]
if self.extra.find('\0') != -1:
self.extra = self.extra[:self.extra.find('\0')]
if self.origin == [0, 0, 0]:
self.xstart, self.ystart, self.zstart = array(
[self.nxstart * self.cella[0] / self.mx, self.nystart * self.cella[1] / self.my, self.nzstart * self.cella[2] / self.mz])[argsort([self.mapc, self.mapr, self.maps])]
self.origin = list(array([self.xstart, self.ystart, self.zstart])[
[self.mapc - 1, self.mapr - 1, self.maps - 1]])
self.nxstart = self.nystart = self.nzstart = 0
else:
self.nxstart = self.nystart = self.nzstart = 0
self.xstart, self.ystart, self.zstart = array(
self.origin)[argsort([self.mapc, self.mapr, self.maps])]
else:
from ..IO.output import printError
printError("The file doesn't exists or is not a file.")
def parseHeader(self, filename=None, **kwargs):
"""Parse the MRC header information from the given file."""
if filename:
from os.path import exists, isfile
if exists(filename) and isfile(filename):
from .Cmrc import readHeader
compress = 1 if filename.lower().endswith('.gz') else 0
temp = readHeader(
filename=filename, header=self, compress=compress)
if isinstance(temp, tuple):
from ..IO.output import printError
if temp[0] == None:
printError(temp[1])
else:
printError("Couldn't parse the Error information.")
return None
else:
from numpy import array, argsort
self = temp
for i in xrange(10):
self.label[i] = self.label[i][:80]
if self.label[i].find('\0') != -1:
self.label[i] = self.label[i][
:self.label[i].find("\0")]
elif self.label[i] == ' ' * 80:
self.label[i] = ''
self.label[i] = self.label[i].rstrip()
if self.symdata:
self.symdata = self.symdata[:80]
if self.symdata.find('\0') != -1:
self.symdata = self.symdata[
:self.symdata.find('\0')]
if self.extra:
self.extra = self.extra[:80]
if self.extra.find('\0') != -1:
self.extra = self.extra[:self.extra.find('\0')]
if self.origin == [0, 0, 0]:
self.xstart, self.ystart, self.zstart = array(
[self.nxstart * self.cella[0] / self.mx, self.nystart * self.cella[1] / self.my, self.nzstart * self.cella[2] / self.mz])[argsort([self.mapc, self.mapr, self.maps])]
self.origin = list(array([self.xstart, self.ystart, self.zstart])[
[self.mapc - 1, self.mapr - 1, self.maps - 1]])
self.nxstart = self.nystart = self.nzstart = 0
else:
self.nxstart = self.nystart = self.nzstart = 0
self.xstart, self.ystart, self.zstart = array(
self.origin)[argsort([self.mapc, self.mapr, self.maps])]
else:
from ..IO.output import printError
printError("The file doesn't exists or is not a file.")
else:
from ..IO.output import printError
printError("The filename must be provided.")
def printInformation(self, **kwargs):
"""Print the information from the header."""
from ..IO.output import printInfo as p
p("Num of columns, rows and sections: {0} {1} {2}".format(
self.nx, self.ny, self.nz))
p("Mode: {0}".format(self.mode))
p("Num of First column, row, section: {0} {1} {2}".format(
self.nxstart, self.nystart, self.nzstart))
p("Num of intervals along x, y, z: {0} {1} {2}".format(
self.mx, self.my, self.mz))
p("Cell dimensions in angstroms: {0:.2f} {1:.2f} {2:.2f}".format(
self.cella[0], self.cella[1], self.cella[2]))
p("Cell angles in degrees: {0:.2f} {1:.2f} {2:.2f}".format(
self.cellb[0], self.cellb[1], self.cellb[2]))
p("Axis for cols, rows, sections: {0} {1} {2}".format(
self.mapc, self.mapr, self.maps))
p("Min, max, mean density value: {0:.6f} {1:.6f} {2:.6f}".format(
self.dmin, self.dmax, self.dmean))
p("Space group number: {0}".format(self.ispg))
p("Origin in X,Y,Z: {0:.4f} {1:.4f} {2:.4f}".format(
self.origin[0], self.origin[1], self.origin[2]))
p("Machine stamp: {0}".format(self.machst))
p("rms deviationfrom mean density: {0}".format(self.rms))
p("Num of labels being used: {0}".format(self.nlabels))
if self.nlabels != 0:
p("Labels:")
for i in self.label:
if i != "":
p("\t{0}".format(i))
p("Num of bytes for symmetry data: {0}".format(self.nsymbt))
if self.nsymbt != 0:
p("\t{0}".format(self.symdata))
def getMatrixShape(self, **kwargs):
"""Get the data shape from the header information.
Caution: it could be different with the data array."""
if (isinstance(self.nx, int) and
isinstance(self.ny, int) and isinstance(self.nz, int)):
return (self.nx, self.ny, self.nz)
else:
from ..IO.output import printError
printError("There is no header information here.")
return None
def __repr__(self):
return "MRCHeader"
def setValue(self, label, value=None, **kwargs):
"""Set the value for a label."""
setattr(self, label, value)
def getValue(self, label, default=None, **kwargs):
"""Get the value for a label."""
getattr(self, label, default)
class MRC():
"""This is a class to read and write MRC file.
The data will always been store as x,y,z oreder."""
def __init__(self, filename=None, **kwargs):
"""Parse data from the given file."""
self.header = MRCHeader()
self.data = None
if filename:
self.parseData(filename=filename, **kwargs)
def __getattr__(self, name, **kwargs):
if name in ['data', 'header']:
return getattr(self, name)
else:
try:
return getattr(self.header, name)
except:
return None
def __setattr__(self, name, value, **kwargs):
if name == 'data':
self.__dict__[name] = value
elif name == 'header':
self.__dict__[name] = value
else:
if name in self.header.__dict__.keys():
setattr(self.header, name, value)
elif name in self.__dict__.keys():
setattr(self, name, value)
else:
pass
def __repr__(self):
return "MRC"
def __str__(self):
return "MRC"
def __dir__(self, **kwargs):
return self.__dict__.keys() + self.header.__dict__.keys()
def parseHeader(self, filename=None, **kwargs):
"""Parse the header only from a given file.
If the data will be parsed in the future, the header will be overwrited
by the new data file's header."""
if filename:
from os.path import exists, isfile
if exists(filename) and isfile(filename):
self.header = MRCHeader(filename=filename)
else:
from ..IO.output import printError
printError("The file doesn't exists or is not a file.")
else:
from ..IO.output import printError
printError("The filename must be provided.")
def parseData(self, filename=None, **kwargs):
"""Parse the data and header from a given file.
If the header or data have already exists, all will be overwrited."""
if filename:
from os.path import exists, isfile
if exists(filename) and isfile(filename):
from .Cmrc import readData
from numpy import zeros, int8, int16, float32, uint8, uint16
from ..IO.output import printInfo, printError, printUpdateInfo
if getattr(self, 'header', None):
del self.header
if kwargs.get('output', True):
printUpdateInfo(
"Parsing the Header from file {0}.".format(filename))
self.header = MRCHeader(filename=filename)
if getattr(self, 'data', None):
printInfo("Some data exists already, overwrite it.")
del self.data
if self.header.mode in [3, 4]:
printError(
"Sorry, we don't support the complex format yet.")
del self.data
self.data = None
return None
else:
if self.header.mode == 0:
self.data = zeros(
(self.header.nz, self.header.ny, self.header.nx), dtype=int8)
elif self.header.mode == 1:
self.data = zeros(
(self.header.nz, self.header.ny, self.header.nx), dtype=int16)
elif self.header.mode == 2:
self.data = zeros(
(self.header.nz, self.header.ny, self.header.nx), dtype=float32)
elif self.header.mode == 5:
self.data = zeros(
(self.header.nz, self.header.ny, self.header.nx), dtype=uint8)
elif self.header.mode == 6:
self.data = zeros(
(self.header.nz, self.header.ny, self.header.nx), dtype=uint16)
else:
printError(
"Couldn't understand the mode {0}".format(self.header.mode))
del self.data
self.data = None
return None
if kwargs.get('output', True):
printUpdateInfo(
"Parsing the Data from file {0}.".format(filename))
self.data = self.data - 1
compress = 1 if filename.lower().endswith('.gz') else 0
temp = readData(
filename=filename, nsymbt=self.header.nsymbt,
datamode=self.header.mode, data=self.data,
size=self.header.nz * self.header.ny * self.header.nx,
compress=compress)
if isinstance(temp, tuple):
del self.data
self.data = None
if temp[0] == None:
printError(temp[1])
else:
printError("Couldn't parse the Error information.")
return None
else:
from numpy import transpose, argsort
if set([self.header.mapc, self.header.mapr, self.header.maps]) != set([1, 2, 3]):
printError(
"The MRC header contains no clear axis.(mapc, mapr and maps must cotain all 1,2,3.)")
printError("Keep the data as it.")
self.data = temp
return None
else:
temporder = [
self.header.maps, self.header.mapr, self.header.mapc]
self.data = transpose(temp, argsort(temporder))
del temp
if self.header.transend:
self.data.byteswap(True)
else:
printError("The file doesn't exists or is not a file.")
return None
else:
printError("The filename must be provided.")
return None
def writeData(self, filename, skipupdate=False, force=False, **kwargs):
"""Write the MRC file into file.
The header and data format will automaticly update.
You could skip the update using `skipupdate` option.
You could force it to overwrite files with `force` option."""
from ..IO.output import printInfo, printError
from os.path import exists, isfile
from numpy import transpose, array
if filename:
if exists(filename):
if not isfile(filename):
printError("The path is not a file.")
return None
else:
if not force:
back = raw_input(
"* File {0} exists, do you want to overwrite it?(y/n)".format(filename))
while back.strip().lower() not in ['y', 'n']:
back = raw_input(
"* File {0} exists, do you want to overwrite it?(y/n)".format(filename))
if back.strip().lower() == 'n':
printInfo("File not write.")
return None
else:
printError("The filename must be provided.")
return None
if isinstance(self.data, type(None)):
printError("No data to write.")
return None
find = False
for i in xrange(10):
if self.label[i].startswith("Written by mbio"):
find = True
from time import ctime
from .. import __version__
self.label[i] = "Written by mbio {0} {1}".format(
__version__, ctime())
self.label = self.label[:i] + \
self.label[i + 1:] + [self.label[i]]
self.label = [j for j in self.label if j != ""]
self.label = self.label + [""] * (10 - len(self.label))
break
if not find:
if self.nlabels != 10:
from time import ctime
from .. import __version__
self.label[self.nlabels] = "Written by mbio {0} {1}".format(
__version__, ctime())
self.nlabels += 1
if not skipupdate:
self.update()
from .Cmrc import writeData
if set([self.header.mapc, self.header.mapr, self.header.maps]) != set([1, 2, 3]):
printError(
"The MRC header contains no clear axis.(mapc, mapr and maps must cotain all 1,2,3.)")
printError("Change it automaticly.")
self.header.mapc, self.header.mapr, self.header.maps = 1, 2, 3
self.header.nxstart, self.header.nystart, self.header.nzstart = array(
[self.header.nxstart, self.header.nystart, self.header.nzstart])[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]]
if kwargs.get('output', True):
printInfo("Writing MRC to {0}".format(filename))
compress = 1 if filename.lower().endswith('.gz') else 0
temp = writeData(header=self.header, data=transpose(
self.data, (self.header.maps - 1, self.header.mapr - 1, self.header.mapc - 1)), filename=filename, compress=compress)
if isinstance(temp, tuple):
if temp[0] == None:
print temp
printError(temp[1])
else:
printError("Couldn't parse the Error information.")
return None
elif temp == 0:
return None
else:
printError("Couldn't parse the Error information.")
def update(self, **kwargs):
"""Update the MRC header information from the data array.
Update the MRC data format based on the `header.mode`
Include: nx, ny, nz, dmin, dmax, dmean, rms, nsymbt, nlabels and sort label
nxstart, nystart, nzstart, xstart, ystart, zstart, map.
Correct mapc, mapr and maps automaticly."""
from numpy import array, int8, int16, float32, uint8, uint16, argsort
from ..IO.output import printError
from platform import architecture
if set([self.header.mapc, self.header.mapr, self.header.maps]) != set([1, 2, 3]):
printError(
"The MRC header contains no clear axis.(mapc, mapr and maps must cotain all 1,2,3.)")
printError("Change it automaticly.")
self.header.mapc, self.header.mapr, self.header.maps = 1, 2, 3
self.header.nx, self.header.ny, self.header.nz = array(
self.data.shape)[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]]
if self.header.origin != [0., 0., 0.]:
self.header.nxstart = self.header.nystart = self.header.nzstart = 0
self.header.xstart, self.header.ystart, self.header.zstart = array(
self.header.origin)[argsort([self.header.mapc, self.header.mapr, self.header.maps])]
elif self.header.nxstart != 0 or self.header.nystart != 0 or self.header.nzstart != 0:
self.header.xstart, self.header.ystart, self.header.zstart = array(
[self.header.nxstart * self.header.cella[0] / self.header.mx, self.header.nystart * self.header.cella[1] / self.header.my, self.header.nzstart * self.header.cella[2] / self.header.mz])[argsort([self.header.mapc, self.header.mapr, self.header.maps])]
# self.header.nxstart, self.header.nystart, self.header.nzstart = array(
# [self.header.nxstart, self.header.nystart, self.header.nzstart])[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]]
else:
self.header.xstart, self.header.ystart, self.header.zstart = 0., 0., 0.
self.header.dmin = self.data.min()
self.header.dmax = self.data.max()
self.header.dmean = self.data.mean()
self.header.rms = (((self.data - self.data.mean()) ** 2).mean()) ** .5
# if architecture()[0].find('32')!=-1:
# temp1=0.
# temp2=0.
# temp3=0.
# for i in self.data:
# for j in i:
# for k in j:
# temp1+=k**2
# temp2+=k
# temp3+=1
# self.header.rms = (temp1/temp3-(temp2/temp3)**2)**.5
# else:
# self.header.rms = (((self.data - self.data.mean())**2).mean())**.5
if self.header.symdata:
self.header.nsymbt = 80
self.header.symdata = self.header.symdata[:80]
else:
self.header.nsymbt = 0
self.header.symdata = None
self.header.nlabels = sum(
[1 if i != "" else 0 for i in self.header.label])
self.header.label = [i[:80] for i in self.header.label if i != ""]
self.header.label = self.header.label + \
[""] * (10 - len(self.header.label))
self.header.map = "MAP "
if {0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode] != self.data.dtype:
self.data = array(self.data,
dtype={0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode])
def truncMatrix(self, index=[None, None, None, None, None, None], **kwargs):
"""Trunc the matrix by index. Related values will change accordingly.
You need provide the start and end index(will be included) of x,y and z.
Exapmle:
MRC.truncMatrix([xstart, xend, ystart, yend, zstart, zend])
You could use *None* to indicate start from begin or to the end.
"""
from ..IO.output import printError, printInfo
from numpy import array
if len(index) != 6:
printError("Must provide 6 indeces.")
return None
if index == [None] * 6:
printInfo("Nothing changed.")
return None
xstart, xend, ystart, yend, zstart, zend = index
if xstart == None:
xstart = 0
if ystart == None:
ystart = 0
if zstart == None:
zstart = 0
if xend == None:
xend = self.data.shape[0] + 1
else:
xend += 1
if yend == None:
yend = self.data.shape[1] + 1
else:
yend += 1
if zend == None:
zend = self.data.shape[2] + 1
else:
zend += 1
if not 0 <= xstart <= self.data.shape[0]:
printError("xstart is not in the range of x.")
return None
if not 0 <= xend <= self.data.shape[0]:
printError("xend is not in the range of x.")
return None
if not xstart < xend:
printError("xstart must less than xend.")
return None
if not 0 <= ystart <= self.data.shape[1]:
printError("ystart is not in the range of y.")
return None
if not 0 <= yend <= self.data.shape[1]:
printError("yend is not in the range of y.")
return None
if not ystart < yend:
printError("ystart must less than yend.")
return None
if not 0 <= zstart <= self.data.shape[2]:
printError("zstart is not in the range of z.")
return None
if not 0 <= zend <= self.data.shape[2]:
printError("zend is not in the range of z.")
return None
if not zstart < zend:
printError("zstart must less than zend.")
return None
self.data = self.data[xstart:xend, ystart:yend, zstart:zend]
xstep, ystep, zstep = array(
self.header.cella) * 1.0 / array([self.header.mx, self.header.my, self.header.mz])
self.header.xstart += xstart * xstep
self.header.ystart += ystart * ystep
self.header.zstart += zstart * zstep
if self.header.origin == [0, 0, 0]:
self.header.nxstart += xstart
self.header.nystart += ystart
self.header.nzstart += zstart
else:
self.header.nxstart = 0
self.header.nystart = 0
self.header.nzstart = 0
self.header.origin = list(array([self.header.xstart, self.header.ystart, self.header.zstart])[
[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]])
self.header.nx, self.header.ny, self.header.nz = array(
self.data.shape)[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]]
# def getMatrixShape(self, **kwargs):
# """Get the data shape from the header information.
# Caution: it could be different with the data array."""
# if (isinstance(self.header.nx, int) and
# isinstance(self.header.ny, int) and isinstance(self.header.nz, int)):
# return (self.header.nx, self.header.ny, self.header.nz)
# else:
# from ..IO.output import printError
# printError("There is no header information here.")
# return None
def getGridCoords(self, **kwargs):
"""Return the x, y and z coordinate for the whole grid."""
from numpy import array, arange, argsort
xstep, ystep, zstep = array(
self.header.cella) * 1.0 / array([self.header.mx, self.header.my, self.header.mz])
if self.header.origin == [0, 0, 0]:
xcoor = (self.header.nxstart + arange(self.header.nx)) * xstep
ycoor = (self.header.nystart + arange(self.header.ny)) * ystep
zcoor = (self.header.nzstart + arange(self.header.nz)) * zstep
coor = array([xcoor, ycoor, zcoor])[
argsort([self.header.mapc, self.header.mapr, self.header.maps])]
return list(coor)
else:
xcoor = arange(self.header.nx) * xstep + self.header.origin[0]
ycoor = arange(self.header.ny) * ystep + self.header.origin[1]
zcoor = arange(self.header.nz) * zstep + self.header.origin[2]
coor = array([xcoor, ycoor, zcoor])[
argsort([self.header.mapc, self.header.mapr, self.header.maps])]
return list(coor)
def getGridSteps(self, **kwargs):
"""Return the x, y and z coordinate steps."""
from numpy import array, arange, argsort
step = array(array(self.header.cella) * 1.0 /
array([self.header.mx, self.header.my, self.header.mz]))
step = step[
argsort([self.header.mapc, self.header.mapr, self.header.maps])]
return step
def getArray(self, **kwargs):
"""Get the data from the MRC class"""
return self.data
def setMode(self, mode=2, **kwargs):
"""Set the data format for the data.
The data will be change the format accordingly.
Data type :
0 image : signed 8-bit bytes range -128 to 127
1 image : 16-bit halfwords
2 image : 32-bit reals
3 transform : complex 16-bit integers (not support now)
4 transform : complex 32-bit reals (not support now)
5 image : unsigned 8-bit range 0 to 255
6 image : unsigned 16-bit range 0 to 65535"""
from numpy import array, int8, int16, float32, uint8, uint16
from ..IO.output import printError
if mode not in xrange(7):
printError("Mode must be 0,1,2,3,4,5,6.")
elif mode in [3, 4]:
printError("Sorry, the complex format is not supported now.")
self.header.mode = mode
if {0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode] != self.data.dtype:
self.data = array(self.data,
dtype={0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode])
def printInformation(self, **kwargs):
"""Print the information from the header."""
self.header.printInformation()
def __del__(self):
del self.data
del self
| 44.602719 | 265 | 0.507739 | 29,403 | 0.9958 | 0 | 0 | 0 | 0 | 0 | 0 | 6,119 | 0.207234 |
c33c9ccdc0ba66d9833eb045f4eb9b0711984aa5 | 2,416 | py | Python | links/management/commands/seed_data.py | darth-dodo/hackernews-backend | 402497a47271a90402624ed2c34b46ac08638440 | [
"MIT"
]
| 3 | 2020-04-20T09:15:39.000Z | 2020-05-25T18:27:44.000Z | links/management/commands/seed_data.py | darth-dodo/hackernews-backend | 402497a47271a90402624ed2c34b46ac08638440 | [
"MIT"
]
| null | null | null | links/management/commands/seed_data.py | darth-dodo/hackernews-backend | 402497a47271a90402624ed2c34b46ac08638440 | [
"MIT"
]
| 1 | 2022-01-29T06:05:15.000Z | 2022-01-29T06:05:15.000Z | from random import randint
from django.core.management.base import BaseCommand
from django.db import transaction
from faker import Faker
from hn_users.models import HNUser, User
from links.models import Link, Vote
faker = Faker()
class Command(BaseCommand):
help = "Generate Links from a small user subset"
def add_arguments(self, parser):
parser.add_argument("no_of_users", type=int, nargs="?", default=4)
parser.add_argument("no_of_links", type=int, nargs="?", default=20)
@transaction.atomic()
def handle(self, *args, **options):
no_of_users = options.get("no_of_users")
no_of_links = options.get("no_of_links")
for user in range(no_of_users):
user = self._create_user()
hn_user = self._create_hn_user(django_user=user)
for link in range(no_of_links):
generated_link = self._create_link()
generated_link.refresh_from_db()
self.stdout.write(
self.style.SUCCESS(
f"Link {generated_link.url} generated with {generated_link.link_votes.count()} votes"
)
)
def _create_link(self):
all_users_count = HNUser.objects.count()
number_of_users_who_voted = randint(1, all_users_count) # nosec
randomly_ordered_users = HNUser.objects.all().order_by("?") # nosec
random_users = randomly_ordered_users[:number_of_users_who_voted]
hn_user = HNUser.objects.all().order_by("?").first()
link = Link()
link.posted_by = hn_user
link.url = faker.url()
link.description = faker.text()
link.save()
for random_user in random_users:
vote = Vote()
vote.link = link
vote.user = random_user
vote.save()
return link
def _create_user(self):
simple_profile = faker.simple_profile()
user = User()
user.email = simple_profile["mail"]
user.username = simple_profile["username"]
user.first_name = simple_profile["name"].split(" ")[0]
user.last_name = simple_profile["name"].split(" ")[-1]
user.set_password(faker.password())
user.save()
return user
def _create_hn_user(self, django_user):
hn_user = HNUser()
hn_user.bio = faker.text()
hn_user.django_user = django_user
hn_user.save()
| 31.789474 | 105 | 0.624586 | 2,180 | 0.902318 | 0 | 0 | 640 | 0.264901 | 0 | 0 | 238 | 0.09851 |
c33fdea58a4606282019dc0ca418482457a10cef | 3,010 | py | Python | locations/spiders/cenex.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
]
| null | null | null | locations/spiders/cenex.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
]
| null | null | null | locations/spiders/cenex.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class CenexSpider(scrapy.Spider):
name = "cenex"
item_attributes = {"brand": "Cenex", "brand_wikidata": "Q5011381"}
allowed_domains = ["www.cenex.com"]
def start_requests(self):
yield scrapy.http.JsonRequest(
"https://www.cenex.com/Common/Services/InteractiveMap.svc/GetLocations",
method="POST",
data={
"SearchRequest": {
"Metadata": {"MapId": "", "Categories": []},
"Query": {
"SearchLat": 0,
"SearchLong": 0,
"LocationTypes": [1, 16, 15],
"Amenities": [],
"Organizations": ["28e93e82-edfa-418e-90aa-7ded057a0c68"],
"NELat": 90,
"NELong": 180,
"SWLat": -90,
"SWLong": -180,
},
},
"MapItemId": "40381d43-1c05-43e0-8477-78737b9974df",
"AllOrganizationIds": [
"b4ed9d2c-cc3b-4ce0-b642-79d75eac11fa",
"cb27078e-9b6a-4f4d-ac81-eb1d163a5ff6",
"68be9e56-ff49-4724-baf0-90fc833fb459",
"28e93e82-edfa-418e-90aa-7ded057a0c68",
],
"ServiceUrl": "https://locatorservice.chsinc.ds/api/search",
},
)
def parse(self, response):
result = json.loads(response.body_as_unicode())
for store in result["SearchResponse"]["Locations"]:
amenities = "|".join([a["Name"] for a in store["Amenities"]])
yield GeojsonPointItem(
lon=store["Long"],
lat=store["Lat"],
ref=store["LocationId"],
name=store["Name"],
addr_full=" ".join([store["Address1"], store["Address2"]]).strip(),
city=store["City"],
state=store["State"],
postcode=store["Zip"],
country="US",
phone=store["Phone"],
website=store["WebsiteUrl"],
opening_hours="24/7" if "24-Hour" in amenities else None,
extras={
"amenity:fuel": True,
"atm": "ATM" in amenities,
"car_wash": "Car Wash" in amenities,
"fuel:biodiesel": "Biodiesel" in amenities or None,
"fuel:diesel": "Diesel" in amenities or None,
"fuel:e85": "Flex Fuels" in amenities or None,
"fuel:HGV_diesel": "Truck Stop" in amenities or None,
"fuel:propane": "Propane" in amenities or None,
"hgv": "Truck Stop" in amenities or None,
"shop": "convenience" if "Convenience Store" in amenities else None,
},
)
| 40.675676 | 88 | 0.469435 | 2,912 | 0.967442 | 2,737 | 0.909302 | 0 | 0 | 0 | 0 | 1,001 | 0.332558 |
c3402d0f4328c3cbff771ca36bde6cdd1c05dd43 | 6,794 | py | Python | core/dbt/flags.py | tskleonard/dbt-core | c112050455e1f7b984c5c0d42a57a90a0d4d7053 | [
"Apache-2.0"
]
| null | null | null | core/dbt/flags.py | tskleonard/dbt-core | c112050455e1f7b984c5c0d42a57a90a0d4d7053 | [
"Apache-2.0"
]
| null | null | null | core/dbt/flags.py | tskleonard/dbt-core | c112050455e1f7b984c5c0d42a57a90a0d4d7053 | [
"Apache-2.0"
]
| null | null | null | import os
import multiprocessing
if os.name != "nt":
# https://bugs.python.org/issue41567
import multiprocessing.popen_spawn_posix # type: ignore
from pathlib import Path
from typing import Optional
# PROFILES_DIR must be set before the other flags
# It also gets set in main.py and in set_from_args because the rpc server
# doesn't go through exactly the same main arg processing.
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR))
STRICT_MODE = False # Only here for backwards compatibility
FULL_REFRESH = False # subcommand
STORE_FAILURES = False # subcommand
# Global CLI commands
USE_EXPERIMENTAL_PARSER = None
STATIC_PARSER = None
WARN_ERROR = None
WRITE_JSON = None
PARTIAL_PARSE = None
USE_COLORS = None
DEBUG = None
LOG_FORMAT = None
VERSION_CHECK = None
FAIL_FAST = None
SEND_ANONYMOUS_USAGE_STATS = None
PRINTER_WIDTH = 80
WHICH = None
INDIRECT_SELECTION = None
LOG_CACHE_EVENTS = None
EVENT_BUFFER_SIZE = 100000
QUIET = None
# Global CLI defaults. These flags are set from three places:
# CLI args, environment variables, and user_config (profiles.yml).
# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR
flag_defaults = {
"USE_EXPERIMENTAL_PARSER": False,
"STATIC_PARSER": True,
"WARN_ERROR": False,
"WRITE_JSON": True,
"PARTIAL_PARSE": True,
"USE_COLORS": True,
"PROFILES_DIR": DEFAULT_PROFILES_DIR,
"DEBUG": False,
"LOG_FORMAT": None,
"VERSION_CHECK": True,
"FAIL_FAST": False,
"SEND_ANONYMOUS_USAGE_STATS": True,
"PRINTER_WIDTH": 80,
"INDIRECT_SELECTION": "eager",
"LOG_CACHE_EVENTS": False,
"EVENT_BUFFER_SIZE": 100000,
"QUIET": False,
}
def env_set_truthy(key: str) -> Optional[str]:
"""Return the value if it was set to a "truthy" string value, or None
otherwise.
"""
value = os.getenv(key)
if not value or value.lower() in ("0", "false", "f"):
return None
return value
def env_set_bool(env_value):
if env_value in ("1", "t", "true", "y", "yes"):
return True
return False
def env_set_path(key: str) -> Optional[Path]:
value = os.getenv(key)
if value is None:
return value
else:
return Path(value)
MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING")
DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE")
ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH")
ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER")
def _get_context():
# TODO: change this back to use fork() on linux when we have made that safe
return multiprocessing.get_context("spawn")
# This is not a flag, it's a place to store the lock
MP_CONTEXT = _get_context()
def set_from_args(args, user_config):
# N.B. Multiple `globals` are purely for line length.
# Because `global` is a parser directive (as opposed to a language construct)
# black insists in putting them all on one line
global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER
global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT
global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS
global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET
STRICT_MODE = False # backwards compatibility
# cli args without user_config or env var option
FULL_REFRESH = getattr(args, "full_refresh", FULL_REFRESH)
STORE_FAILURES = getattr(args, "store_failures", STORE_FAILURES)
WHICH = getattr(args, "which", WHICH)
# global cli flags with env var and user_config alternatives
USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config)
STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config)
WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config)
WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config)
PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config)
USE_COLORS = get_flag_value("USE_COLORS", args, user_config)
PROFILES_DIR = get_flag_value("PROFILES_DIR", args, user_config)
DEBUG = get_flag_value("DEBUG", args, user_config)
LOG_FORMAT = get_flag_value("LOG_FORMAT", args, user_config)
VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config)
FAIL_FAST = get_flag_value("FAIL_FAST", args, user_config)
SEND_ANONYMOUS_USAGE_STATS = get_flag_value("SEND_ANONYMOUS_USAGE_STATS", args, user_config)
PRINTER_WIDTH = get_flag_value("PRINTER_WIDTH", args, user_config)
INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config)
LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config)
EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config)
QUIET = get_flag_value("QUIET", args, user_config)
def get_flag_value(flag, args, user_config):
lc_flag = flag.lower()
flag_value = getattr(args, lc_flag, None)
if flag_value is None:
# Environment variables use pattern 'DBT_{flag name}'
env_flag = f"DBT_{flag}"
env_value = os.getenv(env_flag)
if env_value is not None and env_value != "":
env_value = env_value.lower()
# non Boolean values
if flag in [
"LOG_FORMAT",
"PRINTER_WIDTH",
"PROFILES_DIR",
"INDIRECT_SELECTION",
"EVENT_BUFFER_SIZE",
]:
flag_value = env_value
else:
flag_value = env_set_bool(env_value)
elif user_config is not None and getattr(user_config, lc_flag, None) is not None:
flag_value = getattr(user_config, lc_flag)
else:
flag_value = flag_defaults[flag]
if flag in ["PRINTER_WIDTH", "EVENT_BUFFER_SIZE"]: # must be ints
flag_value = int(flag_value)
if flag == "PROFILES_DIR":
flag_value = os.path.abspath(flag_value)
return flag_value
def get_flag_dict():
return {
"use_experimental_parser": USE_EXPERIMENTAL_PARSER,
"static_parser": STATIC_PARSER,
"warn_error": WARN_ERROR,
"write_json": WRITE_JSON,
"partial_parse": PARTIAL_PARSE,
"use_colors": USE_COLORS,
"profiles_dir": PROFILES_DIR,
"debug": DEBUG,
"log_format": LOG_FORMAT,
"version_check": VERSION_CHECK,
"fail_fast": FAIL_FAST,
"send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS,
"printer_width": PRINTER_WIDTH,
"indirect_selection": INDIRECT_SELECTION,
"log_cache_events": LOG_CACHE_EVENTS,
"event_buffer_size": EVENT_BUFFER_SIZE,
"quiet": QUIET,
}
| 35.94709 | 97 | 0.704592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,258 | 0.332352 |
c341709c7b99e4263e43265985148f2594b1d447 | 2,223 | py | Python | DataShine/DataShine.py | monk-after-90s/DataShine | e707d5a737ad1aca84a2646aa6d39fcfe430b58d | [
"MIT"
]
| null | null | null | DataShine/DataShine.py | monk-after-90s/DataShine | e707d5a737ad1aca84a2646aa6d39fcfe430b58d | [
"MIT"
]
| null | null | null | DataShine/DataShine.py | monk-after-90s/DataShine | e707d5a737ad1aca84a2646aa6d39fcfe430b58d | [
"MIT"
]
| null | null | null | import asyncio
import functools
from copy import deepcopy
from ensureTaskCanceled import ensureTaskCanceled
def _no_closed(method):
'''
Can not be run when closed.
:return:
'''
@functools.wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
if self._closed:
raise RuntimeError(f'{repr(self)} is already closed.')
return method(*args, **kwargs)
return wrapper
class DataShine:
def __init__(self):
self._unlocked = asyncio.Event()
self._unlocked.set()
self._period_change_event = asyncio.Event()
self._data_container = None
self._q = asyncio.Queue()
self._q_hanler_task = asyncio.create_task(self._q_hanler())
self._closed = False
async def close(self):
'''
Close the DataShine instance.
:return:
'''
await ensureTaskCanceled(self._q_hanler_task)
self._closed = True
async def _q_hanler(self):
while True:
new_data = await self._q.get()
self._q.task_done()
self._data_container = new_data
self._period_change_event.clear()
self._period_change_event.set()
self._period_change_event.clear()
await asyncio.sleep(0)
@_no_closed
async def push_data(self, data):
'''
Set the lamp to carry a data to be taken, and shine the data to notify monitors new data coming.
:param data:
:return:
'''
self._q.put_nowait(data)
@property
def data(self):
'''
Query the data last pushed.
:return:
'''
return self._data_container
@_no_closed
async def wait_data_shine(self):
'''
Wait the shined data. If you wait too later, you will lose the chance to get the data. If you can not wait the data
in time every time but have to handle all the data, you can cache data in a instance of asyncio.Queue.
:return:
'''
await self._period_change_event.wait()
return deepcopy(self._data_container)
if __name__ == '__main__':
async def test():
pass
asyncio.create_task(test())
| 24.977528 | 123 | 0.604139 | 1,689 | 0.759784 | 0 | 0 | 1,022 | 0.459739 | 1,185 | 0.533063 | 668 | 0.300495 |
c343356ef27f41702366f05b06f61bd4669c4a8d | 13,886 | py | Python | src/python/deepseq2.py | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
]
| 8 | 2017-10-01T14:34:25.000Z | 2021-04-27T13:18:00.000Z | src/python/deepseq2.py | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
]
| 1 | 2020-01-23T17:17:18.000Z | 2020-01-23T17:17:18.000Z | src/python/deepseq2.py | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
]
| 1 | 2018-05-04T04:54:32.000Z | 2018-05-04T04:54:32.000Z | import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from src.python.baselines import *
from pymongo import MongoClient
from tqdm import tqdm
import tensorflow as tf
### Keras
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Activation
from keras.layers import Conv2D, Conv1D
from keras.layers import Dropout, BatchNormalization
from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Concatenate, Flatten, Reshape
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler
# from keras.losses import hinge, binary_crossentropy
from keras import backend as K
from sklearn.metrics import log_loss
import math
import argparse
sess = tf.Session()
K.set_session(sess)
LR = 0.001
BATCH_SIZE = 32
LONG_EXPOSURE = True
t0 = datetime(2014, 1, 1, 0, 0)
t1 = datetime(2014, 9, 1, 0, 0)
MAX_LENGTH = 2000
MIN_LENGTH = 30
def get_classes(db, onto, start=t0, end=t1):
q1 = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": start},
'Aspect': ASPECT}
q2 = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": start, "$lte": end},
'Aspect': ASPECT}
def helper(q):
seq2go, _ = GoAnnotationCollectionLoader(
db.goa_uniprot.find(q), db.goa_uniprot.count(q), ASPECT).load()
for i, (k, v) in enumerate(seq2go.items()):
sys.stdout.write("\r{0:.0f}%".format(100.0 * i / len(seq2go)))
seq2go[k] = onto.propagate(v)
return reduce(lambda x, y: set(x) | set(y), seq2go.values(), set())
return onto.sort(helper(q1) | helper(q2))
def get_training_and_validation_streams(db, limit=None):
q_train = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": t0},
'Aspect': ASPECT}
seq2go_trn, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_train), db.goa_uniprot.count(q_train), ASPECT).load()
query = {"_id": {"$in": unique(list(seq2go_trn.keys())).tolist()}}
count = limit if limit else db.uniprot.count(query)
source = db.uniprot.find(query).batch_size(10)
if limit: source = source.limit(limit)
stream_trn = DataStream(source, count, seq2go_trn)
q_valid = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": t0, "$lte": t1},
'Aspect': ASPECT}
seq2go_tst, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_valid), db.goa_uniprot.count(q_valid), ASPECT).load()
query = {"_id": {"$in": unique(list(seq2go_tst.keys())).tolist()}}
count = limit if limit else db.uniprot.count(query)
source = db.uniprot.find(query).batch_size(10)
if limit: source = source.limit(limit)
stream_tst = DataStream(source, count, seq2go_tst)
return stream_trn, stream_tst
class DataStream(object):
def __init__(self, source, count, seq2go):
self._count = count
self._source = source
self._seq2go = seq2go
def __iter__(self):
count = self._count
source = self._source
seq2go = self._seq2go
for k, seq in UniprotCollectionLoader(source, count):
if not MIN_LENGTH <= len(seq) <= MAX_LENGTH:
continue
x = [AA.aa2index[aa] for aa in seq]
yield k, x, seq2go[k]
def __len__(self):
return self._count
def step_decay(epoch):
initial_lrate = LR
drop = 0.5
epochs_drop = 1.0
lrate = max(0.0001, initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop)))
return lrate
def OriginalIception(inpt, num_channels=64):
# tower_0 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 3, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_2)
# tower_3 = MaxPooling1D(3, padding='same')(inpt)
# tower_3 = Conv1D(num_channels, 1, padding='same')(tower_3)
return Concatenate(axis=2)([tower_1, tower_2,])
def LargeInception(inpt, num_channels=64):
tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(inpt)
tower_1 = BatchNormalization()(tower_1)
tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(inpt)
tower_2 = BatchNormalization()(tower_2)
tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(tower_2)
return Concatenate(axis=2)([tower_1, tower_2])
def SmallInception(inpt, num_channels=150):
tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_1)
# tower_1 = BatchNormalization()(tower_1)
tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(num_channels, 15, padding='same', activation='relu')(tower_2)
# tower_2 = BatchNormalization()(tower_2)
return Concatenate(axis=2)([tower_1, tower_2])
def Classifier(inp1d, classes):
out = Dense(len(classes))(inp1d)
out = BatchNormalization()(out)
out = Activation('sigmoid')(out)
return out
def MotifNet(classes, opt):
inpt = Input(shape=(None,))
out = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
out = Conv1D(250, 15, activation='relu', padding='valid')(out)
out = Dropout(0.2)(out)
out = Conv1D(100, 15, activation='relu', padding='valid')(out)
out = SmallInception(out)
out = Dropout(0.2)(out)
out = SmallInception(out)
out = Dropout(0.2)(out)
out = Conv1D(250, 5, activation='relu', padding='valid')(out)
out = Dropout(0.2)(out)
out = Classifier(GlobalMaxPooling1D()(out), classes)
model = Model(inputs=[inpt], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def Inception(inpt, tower1=6, tower2=10):
tower_1 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(64, tower1, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(64, tower2, padding='same', activation='relu')(tower_2)
# tower_3 = MaxPooling1D(3, strides=1, padding='same')(inpt)
# tower_3 = Conv1D(64, 1, padding='same', activation='relu')(tower_3)
return Concatenate(axis=2)([tower_1, tower_2])
def ProteinInception(classes, opt):
inpt = Input(shape=(None,))
img = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
feats = Inception(Inception(img))
out = Classifier(GlobalMaxPooling1D()(feats), classes)
model = Model(inputs=[inpt], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def Features(inpt):
feats = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = GlobalMaxPooling1D()(feats)
return feats
def DeeperSeq(classes, opt):
inp = Input(shape=(None,))
out = Classifier(Features(inp), classes)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def batch_generator(stream, onto, classes):
s_cls = set(classes)
data = dict()
def labels2vec(lbl):
y = np.zeros(len(classes))
for go in onto.propagate(lbl, include_root=False):
if go not in s_cls:
continue
y[classes.index(go)] = 1
return y
def pad_seq(seq, max_length=MAX_LENGTH):
delta = max_length - len(seq)
left = [PAD for _ in range(delta // 2)]
right = [PAD for _ in range(delta - delta // 2)]
seq = left + seq + right
return np.asarray(seq)
def prepare_batch(sequences, labels):
b = max(map(len, sequences)) + 100
Y = np.asarray([labels2vec(lbl) for lbl in labels])
X = np.asarray([pad_seq(seq, b) for seq in sequences])
return X, Y
for k, x, y in stream:
lx = len(x)
if lx in data:
data[lx].append([k, x, y])
ids, seqs, lbls = zip(*data[lx])
if len(seqs) == BATCH_SIZE:
yield ids, prepare_batch(seqs, lbls)
del data[lx]
else:
data[lx] = [[k, x, y]]
for packet in data.values():
ids, seqs, lbls = zip(*packet)
yield ids, prepare_batch(seqs, lbls)
class LossHistory(Callback):
def __init__(self):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def train(model, gen_xy, length_xy, epoch, num_epochs,
history=LossHistory(), lrate=LearningRateScheduler(step_decay)):
pbar = tqdm(total=length_xy)
for _, (X, Y) in gen_xy:
model.fit(x=X, y=Y,
batch_size=BATCH_SIZE,
epochs=num_epochs if LONG_EXPOSURE else epoch + 1,
verbose=0,
validation_data=None,
initial_epoch=epoch,
callbacks=[history])
pbar.set_description("Training Loss:%.5f" % np.mean(history.losses))
pbar.update(len(Y))
pbar.close()
def zeroone2oneminusone(vec):
return np.add(np.multiply(np.array(vec), 2), -1)
def oneminusone2zeroone(vec):
return np.divide(np.add(np.array(vec), 1), 2)
def calc_loss(y_true, y_pred):
return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)])
def predict(model, gen_xy, length_xy, classes):
pbar = tqdm(total=length_xy, desc="Predicting...")
i, m, n = 0, length_xy, len(classes)
ids = list()
y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
for i, (keys, (X, Y)) in enumerate(gen_xy):
k = len(Y)
ids.extend(keys)
y_hat, y = model.predict(X), Y
y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y
pbar.update(k)
pbar.close()
return ids, y_true, y_pred
def evaluate(y_true, y_pred, classes):
y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
y_true = y_true[~np.all(y_true == 0, axis=1)]
prs, rcs, f1s = performance(y_pred, y_true, classes)
return calc_loss(y_true, y_pred), prs, rcs, f1s
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB"),
parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'],
default="F", help="Specify the ontology aspect.")
parser.add_argument("--init_epoch", type=int, default=0,
help="Which epoch to start training the model?")
parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'],
default="deepseq", help="Specify the model arch.")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
ASPECT = args.aspect # default: Molecular Function
client = MongoClient(args.mongo_url)
db = client['prot2vec']
print("Loading Ontology...")
onto = get_ontology(ASPECT)
# classes = get_classes(db, onto)
classes = onto.classes
classes.remove(onto.root)
assert onto.root not in classes
opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
if args.arch == 'inception':
model = ProteinInception(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
elif args.arch == 'deepseq':
model = DeeperSeq(classes, opt)
LONG_EXPOSURE = True
num_epochs = 20
elif args.arch == 'motifnet':
model = MotifNet(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
else:
print('Unknown model arch')
exit(0)
if args.resume:
model.load_weights(args.resume)
print("Loaded model from disk")
model.summary()
for epoch in range(args.init_epoch, num_epochs):
trn_stream, tst_stream = get_training_and_validation_streams(db)
train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs)
_, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes)
loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes)
i = np.argmax(f1s)
f_max = f1s[i]
print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)"
% (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i]))
model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max)
model.save_weights("checkpoints/%s.hdf5" % model_str)
with open("checkpoints/%s.json" % model_str, "w+") as f:
f.write(model.to_json())
np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
| 32.983373 | 124 | 0.631931 | 722 | 0.051995 | 1,581 | 0.113856 | 0 | 0 | 0 | 0 | 1,869 | 0.134596 |
c343f679c520b2ae7bc168a588760750faee9e80 | 5,977 | py | Python | inbima.py | SkoltechAI/inbima | 4c22a864208091e3fb41ea7703c463c4189e78d1 | [
"MIT"
]
| null | null | null | inbima.py | SkoltechAI/inbima | 4c22a864208091e3fb41ea7703c463c4189e78d1 | [
"MIT"
]
| null | null | null | inbima.py | SkoltechAI/inbima | 4c22a864208091e3fb41ea7703c463c4189e78d1 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
import openpyxl
import sys
from fs import FS
from journals import Journals
from utils import load_sheet
from utils import log
from word import Word
YEARS = [2017, 2018, 2019, 2020, 2021]
class InBiMa():
def __init__(self, is_new_folder=False):
self.fs = FS(is_new_folder)
if is_new_folder: return
self.wb = openpyxl.load_workbook(self.fs.get_path('cait.xlsx'))
log('Excel file is opened', 'res')
self.team = load_sheet(self.wb['team'])
self.grants = load_sheet(self.wb['grants'])
self.papers = load_sheet(self.wb['papers'])
self.journals = Journals(load_sheet(self.wb['journals']))
# self.journals_ref = self.load_journals_ref()
self.task = {
'authors': ['#cichocki'],
'grants': ['#megagrant1'],
}
log('Excel file is parsed', 'res')
# log('Journal info is loaded', 'res')
for uid in self.team.keys():
self.task['authors'] = [uid]
self.export_word_cv()
self.export_grant_papers()
self.export_stat()
def export_word_cv(self):
if len(self.task.get('authors', [])) != 1:
text = 'export_word_cv (task should contain only one author)'
log(text, 'err')
return
person = self.team.get(self.task['authors'][0])
if person is None:
text = 'export_word_cv (invalid team member uid in task)'
log(text, 'err')
return
uid = person['id']
stat = self.get_papers_stat(uid, YEARS)
photo_logo = self.fs.download_photo_logo()
photo_person = self.fs.download_photo(uid[1:], person.get('photo'))
self.word = Word(YEARS, self.get_papers)
self.word.add_person_info(person, photo_person, photo_logo)
self.word.add_person_stat(stat)
self.word.add_note(is_grant=True)
self.word.add_break()
self.word.add_paper_list(stat, author=person['id'])
fname = 'CAIT_' + person['surname'] + '_' + person['name'] + '.docx'
fpath = self.fs.get_path(fname)
self.word.save(fpath)
log(f'Document "{fpath}" is saved', 'res')
def export_grant_papers(self):
if len(self.task.get('grants', [])) != 1:
text = 'export_grant_papers (task should contain only one grant)'
log(text, 'err')
return
grant = self.grants.get(self.task['grants'][0])
if grant is None:
text = 'export_grant_papers (invalid grant uid in task)'
log(text, 'err')
return
uid = grant['id']
stat = self.get_papers_stat(years=YEARS, grant=uid)
photo_logo = self.fs.download_photo_logo()
head = grant.get('head', '')
head = self.team[head]
self.word = Word(YEARS, self.get_papers)
self.word.add_grant_info(grant, head, photo_logo)
self.word.add_note(is_grant=True)
self.word.add_break()
self.word.add_paper_list(stat, grant=uid, with_links=True)
fname = 'CAIT_' + uid[1:] + '.docx'
fpath = self.fs.get_path(fname)
self.word.save(fpath)
log(f'Document "{fpath}" is saved', 'res')
def export_stat(self):
stats = {}
for uid in self.team.keys():
if self.team[uid].get('active') != 'Yes':
continue
if self.team[uid].get('lead') != 'Yes':
continue
stats[uid] = self.get_papers_stat(uid, YEARS)
for uid, stat in stats.items():
x = YEARS
y = [stat[y]['total'] for y in YEARS]
plt.plot(x, y, marker='o', label=uid)
plt.legend(loc='best')
fpath = self.fs.get_path('plot.png')
plt.savefig(fpath)
log(f'Figure "{fpath}" is saved', 'res')
def get_papers(self, author=None, year=None, q=None, grant=None):
res = {}
for title, paper in self.papers.items():
if year and int(year) != int(paper['year']):
continue
if author and not author in paper['authors_parsed']:
continue
if grant and not grant in paper.get('grant', ''):
continue
if q is not None:
journal = self.journals.data[paper['journal']]
q1 = journal.get('sjr_q1', '')
q2 = journal.get('sjr_q2', '')
if q == 1 and len(q1) < 2:
continue
if q == 2 and (len(q1) >= 2 or len(q2) < 2):
continue
if q == 0 and (len(q1) >= 2 or len(q2) >= 2):
continue
res[title] = paper
res[title]['journal_object'] = self.journals.data[paper['journal']]
return res
def get_papers_stat(self, author=None, years=[], grant=None):
res = {}
for year in years:
res[year] = {
'q1': len(self.get_papers(author, year, q=1, grant=grant)),
'q2': len(self.get_papers(author, year, q=2, grant=grant)),
'q0': len(self.get_papers(author, year, q=0, grant=grant)),
'total': len(self.get_papers(author, year, grant=grant))
}
res['total'] = {
'q1': sum(res[year]['q1'] for year in years),
'q2': sum(res[year]['q2'] for year in years),
'q0': sum(res[year]['q0'] for year in years),
'total': sum(res[year]['total'] for year in years),
}
return res
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 0:
ibm = InBiMa()
elif len(args) == 1 and args[0] == '-f':
ibm = InBiMa(is_new_folder=True)
elif len(args) == 2 and args[0] == '-j':
journals = Journals()
journals.load_ref()
journals.log_ref(title=args[1])
else:
raise ValueError('Invalid arguments for script')
| 32.840659 | 79 | 0.543918 | 5,356 | 0.896102 | 0 | 0 | 0 | 0 | 0 | 0 | 919 | 0.153756 |
c3462530e3c62749cd08fd4db0ee3cc3926324bb | 2,183 | py | Python | UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py | kplachkov/UkDatabase | 51db3183a86d3b07e0f97cc685f6f47ad4a8fc2e | [
"Apache-2.0"
]
| null | null | null | UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py | kplachkov/UkDatabase | 51db3183a86d3b07e0f97cc685f6f47ad4a8fc2e | [
"Apache-2.0"
]
| 3 | 2018-04-02T20:32:51.000Z | 2019-02-09T16:19:39.000Z | UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py | kplachkov/UkDatabase | 51db3183a86d3b07e0f97cc685f6f47ad4a8fc2e | [
"Apache-2.0"
]
| null | null | null | import pymongo
from bson.json_util import dumps
from pymongo import MongoClient
from UkDatabaseAPI.database.database import Database
from UkDatabaseAPI.database.query_builder.mongo_query_builder import MongoQueryBuilder
MONGO_URI = "mongodb://localhost:27017"
"""str: The MongoDB URI."""
class MongoDB(Database):
def __init__(self):
"""Client for a MongoDB instance."""
# Opening db connection.
self.__client = MongoClient(MONGO_URI)
self.__db = self.__client.UkDatabase
def __del__(self):
"""Close the connection."""
self.close_connection()
def crate_collection_text_index(self):
"""Create a text index for the collection."""
self.__db.posts.create_index([('TEXT', pymongo.TEXT)], name='text', default_language='english')
def close_connection(self):
"""Close the connection."""
self.__client.close()
def find_posts(self, text: str, post_pub_date: str, number_of_results: int) -> str:
"""Find posts containing text or/and within a time range.
Args:
text: The text search criterion, from the URL argument.
post_pub_date: The date or time range search criterion, from the URL argument.
number_of_results: The number of results to return, from the URL argument.
Returns:
The posts containing the text or/and within a time range.
"""
queries = {}
if text:
queries.update(MongoQueryBuilder
.get_query_for_search_by_text(text))
if post_pub_date:
queries.update(MongoQueryBuilder
.get_query_for_search_by_post_date(post_pub_date))
result = self.__db.posts.find({"$and": [queries]}, {"score": {"$meta": "textScore"}})
if number_of_results:
# If int argument provided by the URL, the results are limited and sorted.
result = result.sort([("score", {"$meta": "textScore"})]).limit(number_of_results)
else:
# Return all matched results sorted.
result = result.sort([("score", {"$meta": "textScore"})])
return dumps(result)
| 38.982143 | 103 | 0.639945 | 1,890 | 0.865781 | 0 | 0 | 0 | 0 | 0 | 0 | 841 | 0.38525 |
c34648b7e6fe0e43164dec6e0c0022e1e1efabdd | 1,485 | py | Python | fb/forms.py | pure-python/brainmate | 79c83e707a4811dd881832d22f17c29f33c4d7f2 | [
"Apache-2.0"
]
| null | null | null | fb/forms.py | pure-python/brainmate | 79c83e707a4811dd881832d22f17c29f33c4d7f2 | [
"Apache-2.0"
]
| 1 | 2016-04-14T14:42:52.000Z | 2016-04-14T14:42:52.000Z | fb/forms.py | pure-python/brainmate | 79c83e707a4811dd881832d22f17c29f33c4d7f2 | [
"Apache-2.0"
]
| null | null | null | from django.forms import (
Form, CharField, Textarea, PasswordInput, ChoiceField, DateField,
ImageField, BooleanField, IntegerField, MultipleChoiceField
)
from django import forms
from fb.models import UserProfile
class UserPostForm(Form):
text = CharField(widget=Textarea(
attrs={'rows': 1, 'cols': 40, 'class': 'form-control','placeholder': "What's on your mind?"}))
class UserPostCommentForm(Form):
text = CharField(widget=Textarea(
attrs={'rows': 1, 'cols': 50, 'class': 'form-control','placeholder': "Write a comment..."}))
class UserLogin(Form):
username = CharField(max_length=30)
password = CharField(widget=PasswordInput)
class UserProfileForm(Form):
first_name = CharField(max_length=100, required=False)
last_name = CharField(max_length=100, required=False)
gender = ChoiceField(choices=UserProfile.GENDERS, required=False)
date_of_birth = DateField(required=False)
avatar = ImageField(required=False)
OPTIONS = (
("Cars", "Cars"),
("Dogs", "Dogs"),
("Sports", "Sports"),
)
interests = MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=OPTIONS, required=False)
class QuestionFrom(Form):
question_description = CharField(max_length=300)
points = IntegerField()
class AddAnswerForm(Form):
answer_description = CharField(max_length=30)
correct_answer = BooleanField(required=False)
| 31.595745 | 102 | 0.682828 | 1,246 | 0.839057 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.117172 |
c346562511e160197f5f2be08e436cdf509a8cc0 | 28,863 | py | Python | Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py | triump0870/Interactive_Programming_Python | 97e0f1f5639aecac683053ed742632db14dc6954 | [
"Apache-2.0"
]
| 1 | 2015-06-09T22:40:15.000Z | 2015-06-09T22:40:15.000Z | Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py | triump0870/Interactive_Programming_Python | 97e0f1f5639aecac683053ed742632db14dc6954 | [
"Apache-2.0"
]
| null | null | null | Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py | triump0870/Interactive_Programming_Python | 97e0f1f5639aecac683053ed742632db14dc6954 | [
"Apache-2.0"
]
| null | null | null | # Simple implementation of GalaxyInvanders game
# Rohan Roy (India) - 3 Nov 2013
# www.codeskulptor.org/#user23_fTVPDKIDhRdCfUp
VER = "1.0"
# "add various aliens"
import simplegui, math, random, time
#Global const
FIELD_WIDTH = 850
FIELD_HEIGHT = 500
TOP_MARGIN = 75
LEFT_MARGIN = 25
ALIEN_WIDTH = 48
ALIEN_HEIGHT = 55
PLAYER_SPEED = 10
BULLET_SPEED = 10
BULLET_POWER = 1
BONUS_SPEED = 10
ALIEN_SPEED = [3, 5]
# Images:
pImage = simplegui.load_image('https://dl.dropbox.com/s/zhnjucatewcmfs4/player.png')
aImages = []
for i in range(7):
aImages.append([])
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/0cck7w6r0mt8pzz/alien_1_1.png'))
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/j0kubnhzajbdngu/alien_1_2.png'))
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/zkeu6hqh9bakj25/alien_1_3.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/e75mkcylat70lnd/alien_2_1.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/pgjvaxg0z6rhco9/alien_2_2.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/en0hycfsi3cuzuo/alien_2_3.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/fu9weoll70acs8f/alien_3_1.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/b2rxru2nt5q2r1u/alien_3_2.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/x66vgj9fc2jlg53/alien_3_3.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/7o04ljg52kniyac/alien_4_1.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/b3v6tvami0rvl6r/alien_4_2.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/j451arcevsag36h/alien_4_3.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/jlhdigkm79nncnm/alien_5_1.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/wvlvjsa8yl6gka3/alien_5_2.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/rrg4y1tnsbrh04r/alien_5_3.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/oufyfy590tzf7cx/alien_6_1.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/p4ehd9f6mo2xfzc/alien_6_2.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/815gq3xyh6wmc0t/alien_6_3.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/bv4ycocuomsvj50/alien_7_1.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/krs2gtvdxxve79z/alien_7_2.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/v2wczi8lxwczq87/alien_7_3.png'))
#backgrounds
bckg = []
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ibfu2t9vrh4bhxd/back01.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/pcl8vzby25ovis8/back02.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/g8nwo1t9s4i9usg/back03.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ee8oilluf7pe98h/back04.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/7jfgjoxinzwwlx4/back05.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/wh01g2q3607snvz/back06.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/b72ltp2xii9utnr/back07.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/av73jek8egezs1w/back08.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ik54ttfklv3x3ai/back09.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/e9e6kpyg3yuoenc/back10.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/zrabwnnvlwvn7it/back11.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/a2infkx0rmn8b8m/back12.jpg"))
# sounds
sndPlayer = simplegui.load_sound('https://dl.dropbox.com/s/vl3as0o2m2wvlwu/player_shoot.wav')
sndAlien = simplegui.load_sound('https://dl.dropbox.com/s/m4x0tldpze29hcr/alien_shoot.wav')
sndPlayerExplosion = simplegui.load_sound('https://dl.dropbox.com/s/10fn2wh7kk7uoxh/explosion%2001.wav')
sndAlienHit = simplegui.load_sound('https://dl.dropbox.com/s/80qdvup27n8j6r1/alien_hit.wav')
sndAlienExplosion = simplegui.load_sound('https://dl.dropbox.com/s/qxm3je9vdlb469g/explosion_02.wav')
sndBonus = simplegui.load_sound('https://dl.dropbox.com/s/tzp7e20e5v19l01/bonus.wav')
sndPause = simplegui.load_sound('https://dl.dropbox.com/s/uzs9nixpd22asno/pause.wav')
sndTheme = simplegui.load_sound('https://dl.dropbox.com/s/52zo892uemfkuzm/theme_01.mp3')
sounds = [sndPlayer, sndAlien, sndPlayerExplosion, sndAlienExplosion, \
sndBonus, sndPause, sndTheme, sndAlienHit]
#Global variables
GameRunning = False
GameEnded = False
player_speed = 0
mes = ""
timer_counter = 0
lives = 0
level = 1
scores = 0
killed = 0
current_back = 0
paused = False
shoot_count = 0
level_time = []
ready, go = False, False
#player = [FIELD_WIDTH //2, FIELD_HEIGHT - 30 + TOP_MARGIN]
#game objects
user_bullet = []
weapon_level = 1
weapon_speed = BULLET_SPEED
alien_bullets = []
alien_fleet = None
player = None
frame = None
aTimer = None
dTimer = None
bonuses = []
dCounter = 0
back = False
bonus_count = [0, 0, 0, 0]
player_killed = False
player_killed_at = 0
level_map = []
for i in range(7):
level_map.append([])
level_map[0] = [ 0, 0, 0, 0]
level_map[1] = [129, 0, 0, 0]
level_map[2] = [195, 129, 0, 0]
level_map[3] = [255, 195, 60, 0]
level_map[4] = [255, 231, 195, 195]
level_map[5] = [255, 255, 231, 195]
level_map[6] = [255, 255, 255, 231]
def draw_text(canvas, text, point, size, delta, color):
canvas.draw_text(text, point, size, color[0])
canvas.draw_text(text, [point[0]-delta[0], \
point[1]-delta[1]], size, color[1])
class Bonus:
def __init__ (self, kind, point):
self.kind = kind
self.x = point[0]
self.y = point[1]
self.v = BONUS_SPEED #velocity
self.width = 36
self.height = 36
return self
def move(self):
self.y += self.v
return self
def draw(self, canvas):
if self.kind == 0: #speed of bullet
canvas.draw_circle([self.x, self.y], 15, 3, "LightBlue")
canvas.draw_text("WS", [self.x-12, self.y+5], self.width //2, "LightBlue")
elif self.kind == 1: #weapon level
canvas.draw_circle([self.x, self.y], 15, 3, "Red")
canvas.draw_text("WL", [self.x-12, self.y+5], self.width //2, "Red")
elif self.kind == 2: #life
canvas.draw_circle([self.x, self.y], 15, 3, "LightGreen")
canvas.draw_text("LF", [self.x-12, self.y+5], self.width //2, "LightGreen")
elif self.kind == 3: #weapon power
canvas.draw_circle([self.x, self.y], 15, 3, "8010df")
canvas.draw_text("WP", [self.x-12, self.y+5], self.width //2, "8010df")
return self
def execute(self):
global weapon_speed, weapon_level, player, scores, bonus_count
bonus_count[self.kind] += 1
if self.kind == 0: #speed of bullet
weapon_speed += 1
delta = round(math.pow(20, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
elif self.kind == 1: #weapon level
weapon_level += 1
delta = round(math.pow(30, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
elif self.kind == 2: #life
player.lives += 1
delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
elif self.kind == 3: #weapon power
player.power += 0.1
delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
sndBonus.play()
return self
def dHandler():
global dCounter, back, player_killed
dCounter += 1
if dCounter % 10 == 0:
if back:
frame.set_canvas_background("Red")
else:
frame.set_canvas_background("black")
back = not back;
if dCounter > 50:
dCounter = 0
player_killed = False
dTimer.stop()
frame.set_canvas_background("black")
class Bullet:
def __init__ (self, point, color, velocity):
self.x = point[0]
self.y = point[1]
self.color = color
self.v = velocity
self.width = 1
self.height = 1
def draw(self, canvas):
canvas.draw_line([self.x, self.y-5], [self.x, self.y+5], 3, self.color)
def move(self):
self.y += self.v
class Alien:
def __init__(self, point, kind):
self.x = point[0]
self.y = point[1]
self.kind = kind
self.flying = False
self.vy = 0
self.vx = 0
self.health = self.get_max_health()
self.width = 20
self.height = 20
def get_max_health(self):
return 1+0.6 * self.kind[1]
def shoot(self):
if len(alien_bullets)<level*2:
bullet = Bullet([self.x, self.y], "LightRed", BULLET_SPEED)
alien_bullets.append(bullet)
sndAlien.play()
def move(self, point):
if self.flying:
koef = 1.5
self.y += (self.vy / koef)
if self.x>player.x:
self.x -= (self.vx / koef)
else:
self.x += (self.vx / koef)
if self.vx<ALIEN_SPEED[0]:
self.vx += 1
if self.vy<ALIEN_SPEED[1]:
self.vy += 1
else:
self.x = point[0]
self.y = point[1]
def draw(self, canvas):
if aImages[self.kind[1]][self.kind[0]].get_width()==0:
w = 15
h = 15
canvas.draw_circle([self.x, self.y], 15, 5, "Red")
else:
# img = aImages[self.kind[1]][self.kind[0]]
img = aImages[self.kind[1]][self.kind[0]]
self.width = w = img.get_width()
self.height = h = img.get_height()
canvas.draw_image(img, (w//2, h//2), (w, h), (self.x, self.y), (w, h))
if self.health<>self.get_max_health():
ratio = w * (self.health*1.0) / self.get_max_health()
canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x+w//2, self.y-h//2-3], 4, "red")
canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x-w//2+ratio, self.y-h//2-3], 4, "green")
return canvas
class AliensFleet:
def __init__ (self, point):
def is_high_level(place):
map_ = (level-1)%7
row = level_map[map_][place[1]] #255 - 0
return (row & (1 << place[0]))<>0
self.x = point[0]
self.y = point[1]
self.aliens = []
self.pattern = [255, 255, 255, 255]
self.y_velocity = ALIEN_HEIGHT//3 + 1
self.x_velocity = - ALIEN_WIDTH//3 + 1
for i in range(self.get_aliens_count()):
point = self.get_alien_position(i)
place = self.get_alien_place(i)
alien_level = (level-1)//7 + is_high_level(place)
alien = Alien(point, [random.randrange(3), alien_level])
self.aliens.append(alien)
def get_aliens_count(self):
c = 0
for i in range(4):
for j in range(8):
if (self.pattern[i] & (1 << j))<>0:
c+=1
return c
def get_alien_position(self, n):
#returns a screen x, y of alien with number n
point = self.get_alien_place(n)
x = point[0]*(ALIEN_WIDTH + 3) + self.x
y = point[1]*(ALIEN_HEIGHT + 3) +self.y
point = [x, y]
return point
def get_alien_place(self, n):
#returns a fleet x, y of alien with number n
x, y, c = 0, 0, 0
for i in range(4):
for j in range(8):
if (self.pattern[i] & (1 << j))<>0:
if c==n:
x, y = j, i
c+=1
point = [x, y]
return point
def move_aliens(self):
i = 0
for alien in self.aliens:
point = self.get_alien_position(i)
alien.move(point)
i += 1
return self
def move_down(self):
self.y += self.y_velocity
if self.y>400:
player.explode()
self.y = 100
self.move_aliens()
def move_side(self):
self.x -= self.x_velocity
# check borders of fleet:
left = 8
right = -1
for i in range(len(self.aliens)):
point = self.get_alien_place(i)
if point[0]<left:
left = point[0]
if point[0]>right:
right = point[0]
if (self.x+(left+1)*60 < LEFT_MARGIN + 10) or (self.x + (right+1)*45>FIELD_WIDTH-LEFT_MARGIN-60):
self.x_velocity = -self.x_velocity
self.move_aliens()
def draw(self, canvas):
for alien in self.aliens:
alien.draw(canvas)
def make_shoot(self):
for alien in self.aliens:
if len(alien_bullets) < level * 3 + 1:
if random.randrange(101)<2: #
alien.shoot()
return self
def alien_fly(self):
i = 0
for alien in self.aliens:
if alien.flying:
i += 1
if (i<1+level) and (random.randrange(1000)<3) and (time.time()-level_time[len(level_time)-1]>60):
alien.flying=True
def check_death(self):
global scores, killed, player
i = 0
for bullet in user_bullet:
for i in range(len(self.aliens)):
alien = self.aliens[i]
if isBulletHit(bullet, alien):
if alien.health-player.power<=0:
point = self.get_alien_place(i)
sndAlienExplosion.play()
self.aliens.remove(alien)
x = ~int((1 << point[0]))
self.pattern[point[1]] = self.pattern[point[1]] & x
user_bullet.remove(bullet)
delta = round(math.pow(5, (1 + (1.0*level-1)/32))*5)
scores = scores + delta
killed += 1
x = random.randrange(1000)
if x<5:
bonus = Bonus(3, [alien.x, alien.y])
bonuses.append(bonus)
elif x<50:
bonus = Bonus(2, [alien.x, alien.y])
bonuses.append(bonus)
elif x<120:
bonus = Bonus(1, [alien.x, alien.y])
bonuses.append(bonus)
elif x<200:
bonus = Bonus(0, [alien.x, alien.y])
bonuses.append(bonus)
if killed % 500 == 0:
player.lives += 1
sndBonus.play()
break
else:
user_bullet.remove(bullet)
alien.health -= player.power
sndAlienHit.play()
i += 1
class Player:
def __init__(self, point, lives):
self.x = point[0]
self.y = point[1]
self.lives = 3
self.speed = player_speed
self.power = BULLET_POWER
self.width = 20
self.height = 20
def draw(self, canvas):
draw_user_image(canvas, [self.x, self.y])
def move(self):
self.x += player_speed
if self.x<LEFT_MARGIN*2:
self.x = LEFT_MARGIN*2
if self.x>FIELD_WIDTH:
self.x=FIELD_WIDTH
def draw_lives_counter(self, canvas):
if self.lives < 5:
for i in range(self.lives):
draw_user_image(canvas, [150+i*35, 15])
else:
draw_user_image(canvas, [150, 15])
canvas.draw_text(" x "+str(int(self.lives)), [170, 25], 25, "Yellow")
def explode(self):
global dTimer, alien_bullets, user_bullet, weapon_level, weapon_speed
global alien_fleet, player_killed_at, player_killed, player_speed
player_speed = 0
player_killed_at = time.time()
sndPlayerExplosion.play()
for alien in alien_fleet.aliens:
alien.flying = False
player_killed = True
alien_bullets = []
user_bullet = []
bonuses = []
weapon_level = level // 10 + 1
weapon_speed = BULLET_SPEED
self.lives -= 1
if self.lives<0:
stop_game()
dTimer = simplegui.create_timer(25, dHandler)
dTimer.start()
#helper functions
def dummy(key):
return key
def pause():
global paused
paused = not paused
sndPause.play()
def draw_user_image(canvas, point):
# draw a image of user ship
#
global player
if pImage.get_width()==0:
canvas.draw_circle(point, 12, 5, "Yellow")
else:
canvas.draw_image(pImage, (25, 36), (49, 72), point, (34, 50))
player.width = pImage.get_width()
player.height = pImage.get_height()
return canvas
def draw_lives(canvas):
# draw lives counter
canvas.draw_text("Lives : ", [30, 25], 25, "Red")
if player<>None:
player.draw_lives_counter(canvas)
return canvas
def draw_weapons(canvas):
canvas.draw_text("Weapon : ", [30, 60], 25, "Red")
canvas.draw_text("Rocket lvl: "+str(int(weapon_level)), [135, 60], 25, "Yellow")
canvas.draw_text("WS:"+str(weapon_speed/10.0), [280, 48], 10, "00c5fe")
canvas.draw_text("WP:"+str(player.power), [280, 61], 10, "00c5fe")
return canvas
def draw_level(canvas):
canvas.draw_text("Level : ", [FIELD_WIDTH-200, 50], 50, "Red")
canvas.draw_text(str(level), [FIELD_WIDTH-50, 50], 50, "Yellow")
return canvas
def draw_scores(canvas):
canvas.draw_text(str(int(scores)), [400, 50], 50, "LightBlue")
return canvas
def draw_screen(canvas):
# border of board
canvas.draw_image(bckg[current_back], (425, 250), (850, 500), \
(LEFT_MARGIN+FIELD_WIDTH//2, TOP_MARGIN+FIELD_HEIGHT//2),\
(FIELD_WIDTH, FIELD_HEIGHT))
canvas.draw_polygon([[LEFT_MARGIN, TOP_MARGIN],
[LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN],
[FIELD_WIDTH+LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN],
[FIELD_WIDTH+LEFT_MARGIN, TOP_MARGIN]], 2, 'Orange')
return canvas
def draw_start_screen(canvas):
img_count = 1 + len(aImages)*(len(aImages[0])) + len(bckg)
loaded_img_count = 0
if pImage.get_width()<>0:
loaded_img_count += 1
for bImage in bckg:
if bImage.get_width()<>0:
loaded_img_count += 1
for aImg in aImages:
for img in aImg:
if img.get_width()<>0:
loaded_img_count += 1
loaded_sounds = 0
for snd in sounds:
if snd <> None:
loaded_sounds += 1
draw_text(canvas, "SPACE INVANDERS", [220, 150], 50, [3, 3], ["blue", "yellow"])
canvas.draw_text("ver. - "+VER, [600, 170], 20, "yellow")
canvas.draw_text("03 nov. 2013", [600, 190], 20, "yellow")
draw_text(canvas, "CONTROLS:", [110, 210], 24, [2, 2], ["green", "yellow"])
draw_text(canvas, "Arrows - to left and right, space - to fire, P to pause game", [110, 240], 24, [2, 2], ["green", "yellow"])
draw_text(canvas, "Bonuses: ", [110, 280], 24, [2, 2], ["green", "yellow"])
b = Bonus(0, [125, 310])
b.draw(canvas)
draw_text(canvas, " - increase user's bullet speed", [150, 320], 24, [2, 2], ["green", "yellow"])
b = Bonus(1, [125, 350])
b.draw(canvas)
draw_text(canvas, " - increase user's bullet number", [150, 360], 24, [2, 2], ["green", "yellow"])
b = Bonus(2, [125, 390])
b.draw(canvas)
draw_text(canvas, " - add life", [150, 400], 24, [2, 2], ["green", "yellow"])
b = Bonus(3, [125, 430])
b.draw(canvas)
draw_text(canvas, " - increase weapon power", [150, 440], 24, [2, 2], ["green", "yellow"])
if loaded_img_count<img_count:
draw_text(canvas, "Please, wait for loading...", [280, 500], 40, [3, 3], ["Blue", "Yellow"])
s = "Loaded "+str(loaded_img_count)+" images of "+str(img_count)
draw_text(canvas, s, [110, 550], 20, [2, 2], ["Blue", "yellow"])
s = "Loaded "+str(loaded_sounds)+" sounds of "+str(len(sounds))
draw_text(canvas, s, [510, 550], 20, [2, 2], ["Blue", "yellow"])
else:
draw_text(canvas, "Click to start game", [300, 500], 40, [3, 3], ["Blue", "yellow"])
frame.set_mouseclick_handler(click_handler)
return canvas
def draw_end_screen(canvas):
draw_text(canvas, "Game over!", [350, 180], 50, [2, 2], ["Blue", "Yellow"])
draw_text(canvas, "Your score is "+str(int(scores)), [330, 240], 35, [2, 2], ["blue", "Yellow"])
draw_text(canvas, "You shoot "+str(int(shoot_count))+" times", [150, 320], 24, [2, 2], ["blue", "Yellow"])
draw_text(canvas, "You kill a "+str(killed)+" aliens", [150, 360], 24, [2, 2], ["blue", "Yellow"])
if shoot_count == 0:
s = "0"
else:
s = str(int(10000*float(killed)/shoot_count)/100.0)
draw_text(canvas, "Your accuracy is "+s+"%", [150, 400], 24, [2, 2], ["blue", "Yellow"])
i = 0
for bc in bonus_count:
b = Bonus(i, [505, 310 + 40*i])
b.draw(canvas)
draw_text(canvas, " - used "+str(bonus_count[i])+" times", [530, 320+40*i], 24, [2, 2], ["blue", "yellow"])
i += 1
draw_text(canvas, "Click to start new game", [300, 500], 40, [2, 2], ["blue", "Yellow"])
canvas.draw_text("ver. - "+VER, [600, 540], 15, "yellow");
return canvas
def draw_game_objects(canvas):
player.draw(canvas)
#draw_user_image(canvas, Player)
for bullet in alien_bullets:
bullet.draw(canvas)
for bullet in user_bullet:
bullet.draw(canvas)
for bonus in bonuses:
bonus.draw(canvas)
alien_fleet.draw(canvas)
readyGo()
if paused:
draw_text(canvas, "P A U S E", [380, 350], 50, [2, 2], ["Green", "Yellow"])
if ready:
draw_text(canvas, "R E A D Y", [380, 350], 50, [2, 2], ["Green", "Yellow"])
if go:
draw_text(canvas, "G O ! ! !", [380, 350], 50, [2, 2], ["Green", "Yellow"])
sndTheme.play()
return canvas
def moving_objects():
global timer_counter
if not GameRunning:
return None
if paused or ready or go or player_killed:
return None
timer_counter += 1
player.move()
for alien in alien_fleet.aliens:
if alien.flying:
alien.move([0,0])
if isBulletHit(alien, player):
player.explode()
if alien.y>FIELD_HEIGHT + TOP_MARGIN+20:
alien.y = TOP_MARGIN
for bonus in bonuses:
bonus.move();
if bonus.y > FIELD_HEIGHT + TOP_MARGIN+20:
bonuses.remove(bonus)
if isBulletHit(bonus, player):
bonus.execute()
bonuses.remove(bonus)
for bullet in user_bullet:
bullet.move()
alien_fleet.check_death()
for bullet in user_bullet:
if bullet.y<TOP_MARGIN+25:
user_bullet.remove(bullet)
# for bullet in alien_bullets:
bullets_to_delete = []
for bullet in list(alien_bullets):
bullet.move()
if bullet.y > FIELD_HEIGHT + TOP_MARGIN -10:
bullets_to_delete.append(bullet)
if isBulletHit(bullet, player):
player.explode()
for bullet in bullets_to_delete:
if bullet in alien_bullets:
alien_bullets.remove(bullet)
alien_fleet.make_shoot()
alien_fleet.alien_fly()
if level<30:
x = 60 - level
else:
x = 1
if timer_counter % x == 0:
alien_fleet.move_side()
if timer_counter % (100 + x) == 0:
alien_fleet.move_down()
if alien_fleet.get_aliens_count() == 0:
new_level()
# Handler to draw on canvas
def draw(canvas):
draw_screen(canvas)
canvas.draw_text(mes, [250, 250], 40, "Yellow")
######################
#check a begin of game
#
if GameEnded:
draw_end_screen(canvas)
elif not GameRunning:
draw_start_screen(canvas)
else:
##################
# game info
draw_lives(canvas)
draw_weapons(canvas)
draw_level(canvas)
draw_scores(canvas)
draw_game_objects(canvas)
return canvas
def readyGo():
global ready, go
ready = time.time()-level_time[len(level_time)-1]<0.7
go = (not ready) and time.time()-level_time[len(level_time)-1]<1.5
player_killed = time.time() - player_killed_at < 1.2
#Initialization and start of game
def start_game():
global GameRunning, alien_fleet, player, GameEnded
global scores, killed, level, level_time, bonus_count
scores = 0
bonus_count = [0, 0, 0, 0]
killed = 0
level = 0
GameEnded = False
GameRunning = True
new_level()
player = Player([FIELD_WIDTH //2, FIELD_HEIGHT + TOP_MARGIN-20], 3)
return None
def stop_game():
global GameRunning, GameEnded
# aTimer.stop()
GameEnded = True
GameRunning = False
level_time.append(time.time())
frame.set_keydown_handler(dummy)
frame.set_keyup_handler(dummy)
return None
# Handler for mouse click
def click_handler(position):
if not GameRunning:
start_game()
#else:
# stop_game()
return position
#### keydown_handler
def keydown(key):
global keypressed, mes, shoot_count, player_speed
keypressed = key
if (key == simplegui.KEY_MAP['p']) or \
(key == simplegui.KEY_MAP['P']):
pause()
else:
if (key == simplegui.KEY_MAP['right']):
#player.move('right')
player_speed = PLAYER_SPEED
elif (key == simplegui.KEY_MAP['left']):
# player.move('left')
player_speed = -PLAYER_SPEED
if (key == simplegui.KEY_MAP['space'])and(GameRunning):
if len(user_bullet) < weapon_level:
b = Bullet([player.x, player.y], "LightBlue", -weapon_speed)
user_bullet.append(b)
sndPlayer.play()
shoot_count += 1
return
#### keyup_handler to stop keydown
def keyup(key):
global player_speed
#if keytimer.is_running():
# keytimer.stop()
if (key == simplegui.KEY_MAP['right'])or(key == simplegui.KEY_MAP['left']):
player_speed = 0
return
def isBulletHit(bullet, obj):
if (bullet.y+bullet.height//2+2 > obj.y-obj.height // 2) and (bullet.y-bullet.height//2-2<obj.y+obj.height//2):
if (bullet.x+bullet.width//2 +2> obj.x - obj.width//2) and (bullet.x-bullet.width//2 -2< obj.x + obj.width//2):
return True
else:
return False
else:
return False
def new_level():
global level, alien_fleet, user_bullet, alien_bullets, current_back, player
global level_time, player_speed
level_time.append(time.time())
current_back = random.randrange(12)
level += 1
player_speed = 0
user_bullet = []
alien_bullets = []
alien_fleet = AliensFleet([250, 100])
if level % 10 == 0:
player.lives += 1
sndBonus.play()
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Galaxian", 900, 600, 0)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
aTimer = simplegui.create_timer(60, moving_objects)
aTimer.start()
# Start the frame animation
frame.start()
| 35.988778 | 131 | 0.562346 | 10,997 | 0.381007 | 0 | 0 | 0 | 0 | 0 | 0 | 4,679 | 0.162111 |
c3482f320f27c64a0db4f2f20db98025fee332ce | 1,664 | py | Python | components/py-flask-wa/app.py | ajayns/amoc-project | c22ae62789568c1a784f165fbd4547ac20c290a0 | [
"MIT"
]
| 26 | 2017-04-21T06:05:44.000Z | 2020-03-09T11:41:34.000Z | components/py-flask-wa/app.py | ajayns/amoc-project | c22ae62789568c1a784f165fbd4547ac20c290a0 | [
"MIT"
]
| 6 | 2017-04-16T03:53:28.000Z | 2019-02-26T07:02:48.000Z | components/py-flask-wa/app.py | ajayns/amoc-project | c22ae62789568c1a784f165fbd4547ac20c290a0 | [
"MIT"
]
| 5 | 2017-06-09T06:44:59.000Z | 2019-12-13T07:34:11.000Z | from flask import Flask, jsonify, request, render_template, redirect
from flask_pymongo import PyMongo
from werkzeug import secure_filename
import base64
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'restdb'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
mongo = PyMongo(app)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/w')
def webcam():
return render_template("webcam.html")
@app.route('/img')
def img():
i = request.query_string
f = open('a.png','wb')
f.write(i.decode('base64'))
return "success <img src='" + i + "'>"
@app.route('/hello')
def hello():
return "hello world"
@app.route('/star', methods=['GET'])
def get_all_stars():
star = mongo.db.stars
output = []
for s in star.find():
output.append({'name' : s['name'], 'distance' : s['distance']})
return jsonify(output)
@app.route('/star/', methods=['GET'])
def get_one_star(name):
star = mongo.db.stars
s = star.find_one({'name' : name})
if s:
output = {'name': s['name'], 'distance': s['distance']}
else:
output = "No such name"
return jsonify(output)
@app.route('/star', methods=['POST'])
def add_star():
star = mongo.db.stars
name = request.json['name']
distance = request.json['distance']
star_id = star.insert({'name': name, 'distance': distance})
new_star = star.find_one({'_id': star_id})
output = {'name' : new_star['name'], 'distance' : new_star['distance']}
return jsonify(output)
@app.route('/uploader', methods=['POST'])
def upload_file():
f = request.files['file']
f.save(secure_filename('1'))
return "uploaded"
if __name__ == '__main__':
app.run(debug=True)
| 24.115942 | 73 | 0.658053 | 0 | 0 | 0 | 0 | 1,298 | 0.780048 | 0 | 0 | 406 | 0.24399 |
c34951b6e45c7100c95839ca25a8df621a593d38 | 2,190 | py | Python | wc_lang/util.py | KarrLab/wc_lang | 113a8b473576fa9c13688d2deb71b4b2ab400a03 | [
"MIT"
]
| 7 | 2018-05-14T09:26:14.000Z | 2021-05-20T01:11:45.000Z | wc_lang/util.py | KarrLab/wc_lang | 113a8b473576fa9c13688d2deb71b4b2ab400a03 | [
"MIT"
]
| 142 | 2018-03-14T16:50:56.000Z | 2021-01-03T16:25:23.000Z | wc_lang/util.py | KarrLab/wc_lang | 113a8b473576fa9c13688d2deb71b4b2ab400a03 | [
"MIT"
]
| 4 | 2019-01-06T08:32:23.000Z | 2021-05-20T01:11:49.000Z | """ Utilities
:Author: Jonathan Karr <[email protected]>
:Date: 2016-11-10
:Copyright: 2016, Karr Lab
:License: MIT
"""
from obj_tables import get_models as base_get_models
from wc_lang import core
from wc_lang import io
from wc_utils.util import git
def get_model_size(model):
""" Get numbers of model components
Args:
model (:obj:`core.Model`): model
Returns:
:obj:`dict`: dictionary with numbers of each type of model component
"""
return {
"submodels": len(model.get_submodels()),
"compartments": len(model.get_compartments()),
"species_types": len(model.get_species_types()),
"species": len(model.get_species()),
"parameters": len(model.get_parameters()),
"references": len(model.get_references()),
"reactions": len(model.get_reactions()),
}
def get_model_summary(model):
""" Get textual summary of a model
Args:
model (:obj:`core.Model`): model
Returns:
:obj:`str`: textual summary of the model
"""
return "Model with:" \
+ "\n{:d} submodels".format(len(model.get_submodels())) \
+ "\n{:d} compartments".format(len(model.get_compartments())) \
+ "\n{:d} species types".format(len(model.get_species_types())) \
+ "\n{:d} species".format(len(model.get_species())) \
+ "\n{:d} parameters".format(len(model.get_parameters())) \
+ "\n{:d} references".format(len(model.get_references())) \
+ "\n{:d} dFBA objective reactions".format(len(model.get_dfba_obj_reactions())) \
+ "\n{:d} reactions".format(len(model.get_reactions())) \
+ "\n{:d} rate laws".format(len(model.get_rate_laws()))
def get_models(inline=True):
""" Get list of models
Args:
inline (:obj:`bool`, optional): if true, return inline models
Returns:
:obj:`list` of :obj:`class`: list of models
"""
return base_get_models(module=core, inline=inline)
def gen_ids(model):
""" Generate ids for model objects
Args:
model (:obj:`core.Model`): model
"""
for obj in model.get_related():
if hasattr(obj, 'gen_id'):
obj.id = obj.gen_id()
| 28.441558 | 89 | 0.616895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,018 | 0.46484 |
c349777d037bf08d8ee79327a13369ab404b7431 | 5,267 | py | Python | synapse/tests/test_tools_autodoc.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
]
| 1 | 2021-02-15T22:07:05.000Z | 2021-02-15T22:07:05.000Z | synapse/tests/test_tools_autodoc.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
]
| null | null | null | synapse/tests/test_tools_autodoc.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
]
| null | null | null | import synapse.common as s_common
import synapse.tests.utils as s_t_utils
import synapse.tools.autodoc as s_autodoc
class TestAutoDoc(s_t_utils.SynTest):
async def test_tools_autodoc_docmodel(self):
with self.getTestDir() as path:
argv = ['--doc-model', '--savedir', path]
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'datamodel_types.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('Base types are defined via Python classes.', s)
self.isin('synapse.models.inet.Addr', s)
self.isin('Regular types are derived from BaseTypes.', s)
self.isin(r'inet\:server', s)
with s_common.genfile(path, 'datamodel_forms.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('Forms are derived from types, or base types. Forms represent node types in the graph.', s)
self.isin(r'inet\:ipv4', s)
self.notin(r'file\:bytes:.created', s)
self.isin('Universal props are system level properties which may be present on every node.', s)
self.isin('.created', s)
self.notin('..created\n', s)
self.isin('An example of ``inet:dns:a``\\:', s)
async def test_tools_autodoc_confdefs(self):
with self.getTestDir() as path:
argv = ['--savedir', path, '--doc-conf',
'synapse.tests.test_lib_stormsvc.StormvarServiceCell']
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('autodoc-stormvarservicecell-conf', s)
self.isin('StormvarServiceCell Configuration Options', s)
self.isin('See :ref:`devops-cell-config` for', s)
self.isin('auth\\:passwd', s)
self.isin('Environment Variable\n ``SYN_STORMVARSERVICECELL_AUTH_PASSWD``', s)
self.isin('``--auth-passwd``', s)
argv.append('--doc-conf-reflink')
argv.append('`Configuring a Cell Service <https://synapse.docs.vertex.link/en/latest/synapse/devguides/devops_cell.html>`_')
# truncate the current file
with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd:
fd.truncate()
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('StormvarServiceCell Configuration Options', s)
self.isin('See `Configuring a Cell Service <https://synapse', s)
async def test_tools_autodoc_stormsvc(self):
with self.getTestDir() as path:
argv = ['--savedir', path, '--doc-storm',
'synapse.tests.test_lib_stormsvc.StormvarServiceCell']
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'stormsvc_stormvarservicecell.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('StormvarServiceCell Storm Service', s)
self.isin('This documentation is generated for version 0.0.1 of the service.', s)
self.isin('Storm Package\\: stormvar', s)
self.isin('.. _stormcmd-stormvar-magic:\n', s)
self.isin('magic\n-----', s)
self.isin('Test stormvar support', s)
self.isin('forms as input nodes', s)
self.isin('``test:str``', s)
self.isin('nodes in the graph', s)
self.isin('``test:comp``', s)
self.isin('nodedata with the following keys', s)
self.isin('``foo`` on ``inet:ipv4``', s)
async def test_tools_autodoc_stormtypes(self):
with self.getTestDir() as path:
argv = ['--savedir', path, '--doc-stormtypes']
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'stormtypes_libs.rst') as fd:
libbuf = fd.read()
libtext = libbuf.decode()
self.isin('.. _stormlibs-lib-print:\n\n$lib.print(mesg, \\*\\*kwargs)\n============================',
libtext)
self.isin('Print a message to the runtime.', libtext)
self.isin('\\*\\*kwargs (any): Keyword arguments to substitute into the mesg.', libtext)
self.isin('.. _stormlibs-lib-time:\n\n*********\n$lib.time\n*********', libtext)
self.isin('A Storm Library for interacting with timestamps.', libtext)
with s_common.genfile(path, 'stormtypes_prims.rst') as fd:
primbuf = fd.read()
primstext = primbuf.decode()
self.isin('.. _stormprims-storm-auth-user:\n\n*****************\nstorm\\:auth\\:user\n*****************', primstext)
self.isin('iden\n====\n\nThe User iden.', primstext)
| 41.801587 | 136 | 0.566926 | 5,147 | 0.977217 | 0 | 0 | 0 | 0 | 5,086 | 0.965635 | 2,021 | 0.38371 |
c34a6dd4d560ec8071e9109e3ca674e32bbace38 | 4,174 | py | Python | tf_idf.py | ricosr/retrieval_chatbot | 567e860f09771cae19e32b3bf20b5ce87266cda6 | [
"MIT"
]
| 16 | 2018-12-04T13:55:56.000Z | 2021-11-21T05:53:57.000Z | tf_idf.py | ricosr/retrieval_chatbot | 567e860f09771cae19e32b3bf20b5ce87266cda6 | [
"MIT"
]
| 5 | 2019-05-21T12:40:18.000Z | 2019-05-31T18:23:51.000Z | tf_idf.py | ricosr/retrieval_chatbot | 567e860f09771cae19e32b3bf20b5ce87266cda6 | [
"MIT"
]
| 4 | 2018-11-22T13:45:05.000Z | 2019-09-16T16:30:28.000Z | # -*- coding: utf-8 -*-
import pickle
import os
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.externals import joblib
from sklearn.metrics.pairwise import cosine_similarity
class TfIdf:
def __init__(self, config):
self.config = config
self.model_dict = {}
self.vector_context_ls = []
self.vector_utterrance_ls = []
self.load_models(self.config)
def load_models(self, config):
try:
for file_name, file_path in self.config.model_dict.items():
self.model_dict[file_name] = joblib.load(file_path)
except Exception as e:
pass
def select_model(self, file_name):
try:
self.current_model = self.model_dict[file_name]
except Exception as e:
pass
def predict_tfidf(self, utterances, context_ls):
for each_context in context_ls:
if each_context == (0, 0):
continue
self.vector_context_ls.append(self.current_model.transform(
[self.word_segment(each_context[0]) + self.word_segment(each_context[1])]))
self.vector_utterrance_ls.append(self.current_model.transform(
[self.word_segment(utterances) + self.word_segment(each_context[1])]))
def calculate_distances(self):
result_ls = []
for tfidf_c, tfidf_u in zip(self.vector_context_ls, self.vector_utterrance_ls):
result_ls.append(self.calculate_cos_similarity(tfidf_c, tfidf_u))
result_ls = self.normalization(result_ls)
self.vector_utterrance_ls.clear()
self.vector_context_ls.clear()
return result_ls
def calculate_cos_similarity(self, x, y):
x = x.reshape(1, -1)
y = y.reshape(1, -1)
return cosine_similarity(x, y)
def word_segment(self, chinese_characters):
seg_list = [each_word for each_word in jieba.cut(chinese_characters, cut_all=False)]
return " ".join(seg_list)
def normalization(self, ratio_ls):
max_ratio = max(ratio_ls)
min_ratio = min(ratio_ls)
if max_ratio == min_ratio:
return [1]*len(ratio_ls)
return [(each_ratio - min_ratio) / (max_ratio - min_ratio) for each_ratio in ratio_ls]
class TrainTfIdf:
def __init__(self, config):
self.config = config
self.files_dict = {}
self.load_stop_words(self.config)
def load_pickle(self, file=None):
if file:
with open(self.config.file_dict[file], 'rb') as fp:
self.files_dict[file] = pickle.load(fp)
else:
for file_name, path in self.config.file_dict.items():
with open(path, 'rb') as fp:
self.files_dict[file_name] = pickle.load(fp)
def word_segment(self, chinese_characters):
seg_list = [each_word for each_word in jieba.cut(chinese_characters, cut_all=False)]
return " ".join(seg_list)
def load_stop_words(self, config):
with open(config.stop_words, 'rb') as fpr:
self.stop_words = pickle.load(fpr)
# def remove_stop_words(self, cut_words):
# cut_words_ls = cut_words.split(' ')
# for i in range(len(cut_words_ls)):
# if cut_words_ls[i] in self.stop_words:
# cut_words_ls[i] = 0
# while True:
# if 0 in cut_words_ls:
# cut_words_ls.remove(0)
# else:
# break
# return ' '.join(cut_words_ls)
def train(self):
if not os.path.exists("model"):
os.mkdir("model")
for file_name, content in self.files_dict.items(): # content:[[question, answer]]
tmp_content = map(lambda each_chat: map(self.word_segment, each_chat), content)
content_str_ls = [' '.join(list(each_chat)) for each_chat in tmp_content]
# no_stop_content_ls = list(map(self.remove_stop_words, content_str_ls))
vectorizer = TfidfVectorizer(stop_words=self.stop_words)
vectorizer.fit_transform(content_str_ls)
joblib.dump(vectorizer, 'model/{}.pkl'.format(file_name))
| 36.614035 | 94 | 0.626977 | 3,952 | 0.946814 | 0 | 0 | 0 | 0 | 0 | 0 | 530 | 0.126977 |
c34b267716c64dbcac0061ea5f7b0de5338ff153 | 19,015 | py | Python | d373c7/pytorch/models/classifiers.py | t0kk35/d373c7 | 7780b97545e581244fb4fb74347bb1b052b9ec3f | [
"Apache-2.0"
]
| 1 | 2021-07-23T18:04:55.000Z | 2021-07-23T18:04:55.000Z | d373c7/pytorch/models/classifiers.py | t0kk35/d373c7 | 7780b97545e581244fb4fb74347bb1b052b9ec3f | [
"Apache-2.0"
]
| null | null | null | d373c7/pytorch/models/classifiers.py | t0kk35/d373c7 | 7780b97545e581244fb4fb74347bb1b052b9ec3f | [
"Apache-2.0"
]
| null | null | null | """
Module for classifier Models
(c) 2020 d373c7
"""
import logging
import torch
import torch.nn as nn
from .common import PyTorchModelException, ModelDefaults, _History, _ModelGenerated, _ModelStream
from .encoders import GeneratedAutoEncoder
from ..layers import LSTMBody, ConvolutionalBody1d, AttentionLastEntry, LinearEncoder, TensorDefinitionHead
from ..layers import TransformerBody, TailBinary
from ..loss import SingleLabelBCELoss
from ...features import TensorDefinition, TensorDefinitionMulti
from typing import List, Dict, Union
logger = logging.getLogger(__name__)
class BinaryClassifierHistory(_History):
loss_key = 'loss'
acc_key = 'acc'
def __init__(self, *args):
dl = self._val_argument(args)
h = {m: [] for m in [BinaryClassifierHistory.loss_key, BinaryClassifierHistory.acc_key]}
_History.__init__(self, dl, h)
self._running_loss = 0
self._running_correct_cnt = 0
self._running_count = 0
@staticmethod
def _reshape_label(pr: torch.Tensor, lb: torch.Tensor) -> torch.Tensor:
if pr.shape == lb.shape:
return lb
elif len(pr.shape)-1 == len(lb.shape) and pr.shape[-1] == 1:
return torch.unsqueeze(lb, dim=len(pr.shape)-1)
else:
raise PyTorchModelException(
f'Incompatible shapes for prediction and label. Got {pr.shape} and {lb.shape}. Can not safely compare'
)
def end_step(self, *args):
BinaryClassifierHistory._val_is_tensor(args[0])
BinaryClassifierHistory._val_is_tensor_list(args[1])
BinaryClassifierHistory._val_is_tensor(args[2])
pr, lb, loss = args[0], args[1][0], args[2]
lb = BinaryClassifierHistory._reshape_label(pr, lb)
self._running_loss += loss.item()
self._running_correct_cnt += torch.sum(torch.eq(torch.ge(pr, 0.5), lb)).item()
self._running_count += pr.shape[0]
super(BinaryClassifierHistory, self).end_step(pr, lb, loss)
def end_epoch(self):
self._history[BinaryClassifierHistory.loss_key].append(round(self._running_loss/self.steps, 4))
self._history[BinaryClassifierHistory.acc_key].append(round(self._running_correct_cnt/self.samples, 4))
self._running_correct_cnt = 0
self._running_count = 0
self._running_loss = 0
super(BinaryClassifierHistory, self).end_epoch()
def step_stats(self) -> Dict:
r = {
BinaryClassifierHistory.loss_key: round(self._running_loss/self.step, 4),
BinaryClassifierHistory.acc_key: round(self._running_correct_cnt/self._running_count, 4)
}
return r
def early_break(self) -> bool:
return False
class ClassifierDefaults(ModelDefaults):
def __init__(self):
super(ClassifierDefaults, self).__init__()
self.emb_dim(4, 100, 0.2)
self.linear_batch_norm = True
self.inter_layer_drop_out = 0.1
self.default_series_body = 'recurrent'
self.attention_drop_out = 0.0
self.convolutional_dense = True
self.convolutional_drop_out = 0.1
self.transformer_positional_logic = 'encoding'
self.transformer_positional_size = 16
self.transformer_drop_out = 0.2
def emb_dim(self, minimum: int, maximum: int, dropout: float):
self.set('emb_min_dim', minimum)
self.set('emb_max_dim', maximum)
self.set('emb_dropout', dropout)
@property
def linear_batch_norm(self) -> bool:
"""Define if a batch norm layer will be added before the final hidden layer.
:return: bool
"""
return self.get_bool('lin_batch_norm')
@linear_batch_norm.setter
def linear_batch_norm(self, flag: bool):
"""Set if a batch norm layer will be added before the final hidden layer.
:return: bool
"""
self.set('lin_batch_norm', flag)
@property
def inter_layer_drop_out(self) -> float:
"""Defines a value for the inter layer dropout between linear layers. If set, then dropout will be applied
between linear layers.
:return: A float value, the dropout aka p value to apply in the nn.Dropout layers.
"""
return self.get_float('lin_interlayer_drop_out')
@inter_layer_drop_out.setter
def inter_layer_drop_out(self, dropout: float):
"""Define a value for the inter layer dropout between linear layers. If set, then dropout will be applied
between linear layers.
:param dropout: The dropout aka p value to apply in the nn.Dropout layers.
"""
self.set('lin_interlayer_drop_out', dropout)
@property
def default_series_body(self) -> str:
"""Defines the default body type for series, which is a tensor of rank 3 (including batch).
This could be for instance 'recurrent'.
:return: A string value, the default body type to apply to a rank 3 tensor stream.
"""
return self.get_str('def_series_body')
@default_series_body.setter
def default_series_body(self, def_series_body: str):
"""Defines the default body type for series, which is a tensor of rank 3 (including batch).
This could be for instance 'recurrent'.
:param def_series_body: A string value, the default body type to apply to a rank 3 tensor stream.
"""
self.set('def_series_body', def_series_body)
@property
def attention_drop_out(self) -> float:
"""Define a value for the attention dropout. If set, then dropout will be applied after the attention layer.
:return: The dropout aka p value to apply in the nn.Dropout layers.
"""
return self.get_float('attn_drop_out')
@attention_drop_out.setter
def attention_drop_out(self, dropout: float):
"""Define a value for the attention dropout. If set, then dropout will be applied after the attention layer.
:param dropout: The dropout aka p value to apply in the nn.Dropout layers.
"""
self.set('attn_drop_out', dropout)
@property
def convolutional_drop_out(self) -> float:
"""Define a value for the attention dropout. If set, then dropout will be applied after the attention layer.
:return: The dropout aka p value to apply in the nn.Dropout layers.
"""
return self.get_float('conv_body_dropout')
@convolutional_drop_out.setter
def convolutional_drop_out(self, dropout: float):
"""Define a value for the attention dropout. If set, then dropout will be applied after the attention layer.
:param dropout: The dropout aka p value to apply in the nn.Dropout layers.
"""
self.set('conv_body_dropout', dropout)
@property
def convolutional_dense(self) -> bool:
"""Defines if convolutional bodies are dense. Dense bodies mean that the input to the layer is added to the
output. It forms a sort of residual connection. The input is concatenated along the features axis. This
allows the model to work with the input if that turns out to be useful.
:return: A boolean value, indicating if the input will be added to the output or not.
"""
return self.get_bool('conv_body_dense')
@convolutional_dense.setter
def convolutional_dense(self, dense: bool):
"""Defines if convolutional bodies are dense. Dense bodies mean that the input to the layer is added to the
output. It forms a sort of residual connection. The input is concatenated along the features axis. This
allows the model to work with the input if that turns out to be useful.
:param dense: A boolean value, indicating if the input will be added to the output or not.
"""
self.set('conv_body_dense', dense)
@property
def transformer_positional_logic(self) -> str:
"""Sets which positional logic is used in transformer blocks. 'encoding' : The system will use the encoding,
'embedding' : The system will use an embedding layer.
:return: A string value defining which positional logic to use.
"""
return self.get_str('trans_pos_logic')
@transformer_positional_logic.setter
def transformer_positional_logic(self, positional_logic: str):
"""Sets which positional logic is used in transformer blocks. 'encoding' : The system will use the encoding,
'embedding' : The system will use an embedding layer.
:param positional_logic: A string value defining which positional logic to use.
"""
self.set('trans_pos_logic', positional_logic)
@property
def transformer_positional_size(self) -> int:
"""Sets the positional size of transformer blocks. The size is the number of elements added to each transaction
in the series to help the model determine the position of transactions in the series.
:return: An integer value. The number of elements output by the positional logic
"""
return self.get_int('trans_pos_size')
@transformer_positional_size.setter
def transformer_positional_size(self, positional_size: int):
"""Sets the positional size of transformer blocks. The size is the number of elements added to each transaction
in the series to help the model determine the position of transactions in the series.
:param positional_size: An integer value. The number of elements output by the positional logic
"""
self.set('trans_pos_size', positional_size)
@property
def transformer_drop_out(self) -> float:
"""Defines the drop out to apply in the transformer layer
:return: An float value. The drop out value to apply in transformer layers
"""
return self.get_float('trans_dropout')
@transformer_drop_out.setter
def transformer_drop_out(self, dropout: float):
"""Defines the drop out to apply in the transformer layer
:param dropout: The drop out value to apply in transformer layers
"""
self.set('trans_dropout', dropout)
class GeneratedClassifier(_ModelGenerated):
"""Generate a Pytorch classifier model. This class will create a model that fits the input and label definition
of the TensorDefinition.
Args:
tensor_def: A TensorDefinition or TensorDefinitionMulti object describing the various input and output features
c_defaults: (Optional) ClassifierDefaults object defining the defaults which need to be used.
kwargs: Various named parameters which can be use to drive the type of classifier and the capacity of the model.
"""
def __init__(self, tensor_def: Union[TensorDefinition, TensorDefinitionMulti],
c_defaults=ClassifierDefaults(), **kwargs):
tensor_def_m = self.val_is_td_multi(tensor_def)
super(GeneratedClassifier, self).__init__(tensor_def_m, c_defaults)
# Set-up stream per tensor_definition
label_td = self.label_tensor_def(tensor_def_m)
feature_td = [td for td in self._tensor_def.tensor_definitions if td not in label_td]
streams = [_ModelStream(td.name) for td in feature_td]
if self.is_param_defined('transfer_from', kwargs):
# We're being asked to do transfer learning.
# TODO we'll need a bunch of validation here.
om = self.get_gen_model_parameter('transfer_from', kwargs)
logger.info(f'Transferring from model {om.__class__}')
# The Source model is an auto-encoder
if isinstance(om, GeneratedAutoEncoder):
self.set_up_heads(c_defaults, feature_td, streams)
# Copy and freeze the TensorDefinitionHead, this should normally be the first item.
for s, oms in zip(streams, om.streams):
for sly in oms:
if isinstance(sly, TensorDefinitionHead):
src = self.is_tensor_definition_head(sly)
trg = self.is_tensor_definition_head(s.layers[0])
trg.copy_state_dict(src)
trg.freeze()
logger.info(f'Transferred and froze TensorDefinitionHead {trg.tensor_definition.name}')
elif isinstance(sly, LinearEncoder):
# If no linear layers defined then try and copy the encoder linear_layers
if not self.is_param_defined('linear_layers', kwargs):
linear_layers = sly.layer_definition
# Add last layer. Because this is binary, it has to have size of 1.
linear_layers.append((1, 0.0))
tail = TailBinary(
sum(s.out_size for s in streams), linear_layers, c_defaults.linear_batch_norm
)
tail_state = tail.state_dict()
# Get state of the target layer, remove last item. (popitem)
source_state = list(sly.state_dict().values())
for i, sk in enumerate(tail_state.keys()):
if i < 2:
tail_state[sk].copy_(source_state[i])
# Load target Dict in the target layer.
tail.load_state_dict(tail_state)
for i, p in enumerate(tail.parameters()):
if i < 2:
p.requires_grad = False
logger.info(f'Transferred and froze Linear Encoder layers {sly.layer_definition}')
else:
# Set-up a head layer to each stream. This is done in the parent class.
self.set_up_heads(c_defaults, feature_td, streams)
# Add Body to each stream.
for td, s in zip(feature_td, streams):
self._add_body(s, td, kwargs, c_defaults)
# Create tail.
linear_layers = self.get_list_parameter('linear_layers', int, kwargs)
# Add dropout parameter this will make a list of tuples of (layer_size, dropout)
linear_layers = [(i, c_defaults.inter_layer_drop_out) for i in linear_layers]
# Add last layer. Because this is binary, it has to have size of 1.
linear_layers.append((1, 0.0))
tail = TailBinary(sum(s.out_size for s in streams), linear_layers, c_defaults.linear_batch_norm)
# Assume the last entry is the label
self._y_index = self._x_indexes[-1] + 1
self.streams = nn.ModuleList(
[s.create() for s in streams]
)
self.tail = tail
# Last but not least, set-up the loss function
self.set_loss_fn(SingleLabelBCELoss())
def _add_body(self, stream: _ModelStream, tensor_def: TensorDefinition, kwargs: dict, defaults: ClassifierDefaults):
if tensor_def.rank == 2:
# No need to add anything to the body, rank goes directly to the tail.
return
elif tensor_def.rank == 3:
# Figure out to which body to use.
if self.is_param_defined('recurrent_layers', kwargs):
body_type = 'recurrent'
elif self.is_param_defined('convolutional_layers', kwargs):
body_type = 'convolutional'
elif self.is_param_defined('attention_heads', kwargs):
body_type = 'transformer'
else:
body_type = defaults.default_series_body
# Set-up the body.
if body_type.lower() == 'recurrent':
self._add_recurrent_body(stream, kwargs, defaults)
elif body_type.lower() == 'convolutional':
self._add_convolutional_body(stream, tensor_def, kwargs, defaults)
elif body_type.lower() == 'transformer':
self._add_transformer_body(stream, tensor_def, kwargs, defaults)
else:
raise PyTorchModelException(
f'Do not know how to build body of type {body_type}'
)
def _add_recurrent_body(self, stream: _ModelStream, kwargs: dict, defaults: ClassifierDefaults):
attn_heads = self.get_int_parameter('attention_heads', kwargs, 0)
# attn_do = defaults.attention_drop_out
rnn_features = self.get_int_parameter(
'recurrent_features', kwargs, self.closest_power_of_2(int(stream.out_size / 3))
)
rnn_layers = self.get_int_parameter('recurrent_layers', kwargs, 1)
# Add attention if requested
if attn_heads > 0:
attn = AttentionLastEntry(stream.out_size, attn_heads, rnn_features)
stream.add('Attention', attn, attn.output_size)
# Add main rnn layer
rnn = LSTMBody(stream.out_size, rnn_features, rnn_layers, True, False)
stream.add('Recurrent', rnn, rnn.output_size)
def _add_convolutional_body(self, stream: _ModelStream, tensor_def: TensorDefinition, kwargs: dict,
defaults: ClassifierDefaults):
s_length = [s[1] for s in tensor_def.shapes if len(s) == 3][0]
convolutional_layers = self.get_list_of_tuples_parameter('convolutional_layers', int, kwargs, None)
dropout = defaults.convolutional_drop_out
dense = defaults.convolutional_dense
cnn = ConvolutionalBody1d(stream.out_size, s_length, convolutional_layers, dropout, dense)
stream.add('Convolutional', cnn, cnn.output_size)
def _add_transformer_body(self, stream: _ModelStream, tensor_def: TensorDefinition, kwargs: dict,
defaults: ClassifierDefaults):
s_length = [s[1] for s in tensor_def.shapes if len(s) == 3][0]
attention_head = self.get_int_parameter('attention_heads', kwargs, 1)
feedforward_size = self.get_int_parameter(
'feedforward_size', kwargs, self.closest_power_of_2(int(stream.out_size / 3))
)
drop_out = defaults.transformer_drop_out
positional_size = defaults.transformer_positional_size
positional_logic = defaults.transformer_positional_logic
trans = TransformerBody(
stream.out_size,
s_length,
positional_size,
positional_logic,
attention_head,
feedforward_size,
drop_out
)
stream.add('Transformer', trans, trans.output_size)
def get_y(self, ds: List[torch.Tensor]) -> List[torch.Tensor]:
return ds[self._y_index: self._y_index+1]
def history(self, *args) -> _History:
return BinaryClassifierHistory(*args)
def forward(self, x: List[torch.Tensor]):
y = [s([x[i] for i in hi]) for hi, s in zip(self.head_indexes, self.streams)]
y = self.tail(y)
return y
| 46.041162 | 120 | 0.645438 | 18,427 | 0.969077 | 0 | 0 | 6,996 | 0.36792 | 0 | 0 | 6,935 | 0.364712 |
c34bda54e37900d299bfad9266c734ecc115936d | 5,369 | py | Python | qklnn/plots/hyperpar_scan.py | cambouvy/BSc-Thesis-Project | ca2504cb828ab068545e130eac393ceb34f2a457 | [
"MIT"
]
| 1 | 2021-10-02T11:15:10.000Z | 2021-10-02T11:15:10.000Z | qklnn/plots/hyperpar_scan.py | cambouvy/BSc-Thesis-Project | ca2504cb828ab068545e130eac393ceb34f2a457 | [
"MIT"
]
| null | null | null | qklnn/plots/hyperpar_scan.py | cambouvy/BSc-Thesis-Project | ca2504cb828ab068545e130eac393ceb34f2a457 | [
"MIT"
]
| null | null | null | import re
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
from matplotlib import gridspec
from peewee import AsIs, JOIN, prefetch, SQL
from IPython import embed
from bokeh.layouts import row, column
from bokeh.plotting import figure, show, output_file
from bokeh.transform import linear_cmap
from bokeh.models import (
ColumnDataSource,
Range1d,
LabelSet,
Label,
Rect,
HoverTool,
Div,
)
from qlknn.NNDB.model import (
Network,
PureNetworkParams,
PostprocessSlice,
NetworkMetadata,
TrainMetadata,
Postprocess,
db,
Hyperparameters,
)
from qlknn.plots.statistical_spread import get_base_stats
from qlknn.misc.to_precision import to_precision
# First, get some statistics
target_names = ["efeTEM_GB"]
hyperpars = ["cost_stable_positive_scale", "cost_l2_scale"]
# hyperpars = ['cost_stable_positive_scale', 'cost_stable_positive_offset']
goodness_pars = [
"rms",
"no_pop_frac",
"no_thresh_frac",
"pop_abs_mis_median",
"thresh_rel_mis_median",
"wobble_qlkunstab",
]
try:
report = get_base_stats(target_names, hyperpars, goodness_pars)
except Network.DoesNotExist:
report = pd.DataFrame(columns=goodness_pars, index=["mean", "stddev", "stderr"])
query = (
Network.select(
Network.id.alias("network_id"),
PostprocessSlice,
Postprocess.rms,
Hyperparameters,
)
.join(PostprocessSlice, JOIN.LEFT_OUTER)
.switch(Network)
.join(Postprocess, JOIN.LEFT_OUTER)
.switch(Network)
.where(Network.target_names == target_names)
.switch(Network)
.join(PureNetworkParams)
.join(Hyperparameters)
.where(Hyperparameters.cost_stable_positive_offset.cast("numeric") == -5)
.where(Hyperparameters.cost_stable_positive_function == "block")
)
if query.count() > 0:
results = list(query.dicts())
df = pd.DataFrame(results)
# df['network'] = df['network'].apply(lambda el: 'pure_' + str(el))
# df['l2_norm'] = df['l2_norm'].apply(np.nanmean)
df.drop(["id", "network"], inplace=True, axis="columns")
df.set_index("network_id", inplace=True)
stats = df
stats = stats.applymap(np.array)
stats = stats.applymap(lambda x: x[0] if isinstance(x, np.ndarray) and len(x) == 1 else x)
stats.dropna(axis="columns", how="all", inplace=True)
stats.dropna(axis="rows", how="all", inplace=True)
stats = stats.loc[:, hyperpars + goodness_pars]
stats.reset_index(inplace=True)
# stats.set_index(hyperpars, inplace=True)
# stats.sort_index(ascending=False, inplace=True)
# stats = stats.groupby(level=list(range(len(stats.index.levels)))).mean() #Average equal hyperpars
# stats.reset_index(inplace=True)
aggdict = {"network_id": lambda x: tuple(x)}
aggdict.update({name: "mean" for name in goodness_pars})
stats_mean = stats.groupby(hyperpars).agg(aggdict)
aggdict.update({name: "std" for name in goodness_pars})
stats_std = stats.groupby(hyperpars).agg(aggdict)
stats = stats_mean.merge(stats_std, left_index=True, right_index=True, suffixes=("", "_std"))
stats.reset_index(inplace=True)
for name in hyperpars:
stats[name] = stats[name].apply(str)
for name in goodness_pars:
fmt = lambda x: "" if np.isnan(x) else to_precision(x, 4)
fmt_mean = stats[name].apply(fmt)
stats[name + "_formatted"] = fmt_mean
fmt = lambda x: "" if np.isnan(x) else to_precision(x, 2)
fmt_std = stats[name + "_std"].apply(fmt)
prepend = lambda x: "+- " + x if x != "" else x
stats[name + "_std_formatted"] = fmt_std.apply(prepend)
x = np.unique(stats[hyperpars[1]].values)
x = sorted(x, key=lambda x: float(x))
y = np.unique(stats[hyperpars[0]].values)
y = sorted(y, key=lambda x: float(x))
source = ColumnDataSource(stats)
plotmode = "bokehz"
hover = HoverTool(
tooltips=[
("network_id", "@network_id"),
(hyperpars[0], "@" + hyperpars[0]),
(hyperpars[1], "@" + hyperpars[1]),
]
)
plots = []
for statname in goodness_pars:
fmt = lambda x: "" if np.isnan(x) else to_precision(x, 2)
title = "{:s} (ref={:s}±{:s})".format(
statname,
fmt(report[statname]["mean"]),
fmt(report[statname]["stddev"] + report[statname]["stderr"]),
)
p = figure(title=title, tools="tap", toolbar_location=None, x_range=x, y_range=y)
p.add_tools(hover)
color = linear_cmap(statname, "Viridis256", min(stats[statname]), max(stats[statname]))
p.rect(
x=hyperpars[1],
y=hyperpars[0],
width=1,
height=1,
source=source,
fill_color=color,
line_color=None,
nonselection_fill_alpha=0.4,
nonselection_fill_color=color,
)
non_selected = Rect(fill_alpha=0.8)
label_kwargs = dict(
x=hyperpars[1],
y=hyperpars[0],
level="glyph",
source=source,
text_align="center",
text_color="red",
)
labels = LabelSet(text=statname + "_formatted", text_baseline="bottom", **label_kwargs)
labels_std = LabelSet(text=statname + "_std_formatted", text_baseline="top", **label_kwargs)
p.add_layout(labels)
p.add_layout(labels_std)
p.xaxis.axis_label = hyperpars[1]
p.yaxis.axis_label = hyperpars[0]
plots.append(p)
from bokeh.layouts import layout, widgetbox
title = Div(text=",".join(target_names))
l = layout([[title], [plots]])
show(l)
| 30.856322 | 99 | 0.67778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 948 | 0.176536 |
c34c38cdb59ab3adcb9297c65de4aee9cda600b1 | 1,328 | py | Python | py3canvas/tests/shared_brand_configs.py | tylerclair/py3canvas | 7485d458606b65200f0ffa5bbe597a9d0bee189f | [
"MIT"
]
| null | null | null | py3canvas/tests/shared_brand_configs.py | tylerclair/py3canvas | 7485d458606b65200f0ffa5bbe597a9d0bee189f | [
"MIT"
]
| null | null | null | py3canvas/tests/shared_brand_configs.py | tylerclair/py3canvas | 7485d458606b65200f0ffa5bbe597a9d0bee189f | [
"MIT"
]
| null | null | null | """SharedBrandConfigs API Tests for Version 1.0.
This is a testing template for the generated SharedBrandConfigsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.shared_brand_configs import SharedBrandConfigsAPI
from py3canvas.apis.shared_brand_configs import Sharedbrandconfig
class TestSharedBrandConfigsAPI(unittest.TestCase):
"""Tests for the SharedBrandConfigsAPI."""
def setUp(self):
self.client = SharedBrandConfigsAPI(
secrets.instance_address, secrets.access_token
)
def test_share_brandconfig_theme(self):
"""Integration test for the SharedBrandConfigsAPI.share_brandconfig_theme method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_shared_theme(self):
"""Integration test for the SharedBrandConfigsAPI.update_shared_theme method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_un_share_brandconfig_theme(self):
"""Integration test for the SharedBrandConfigsAPI.un_share_brandconfig_theme method."""
id = None # Change me!!
r = self.client.un_share_brandconfig_theme(id)
| 37.942857 | 126 | 0.752259 | 1,014 | 0.763554 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.503012 |
c34deabfbf09d812a3e974c9b52d0665996b8dda | 1,095 | py | Python | apps/cars/tests/api/abstract/abstract_base_api_test.py | agorsk1/car-rating-app | 354c5933f4cbad69c9a57d1839f9086cd5cf9a1d | [
"MIT"
]
| 1 | 2022-03-03T11:15:25.000Z | 2022-03-03T11:15:25.000Z | apps/cars/tests/api/abstract/abstract_base_api_test.py | agorsk1/car-rating-app | 354c5933f4cbad69c9a57d1839f9086cd5cf9a1d | [
"MIT"
]
| null | null | null | apps/cars/tests/api/abstract/abstract_base_api_test.py | agorsk1/car-rating-app | 354c5933f4cbad69c9a57d1839f9086cd5cf9a1d | [
"MIT"
]
| null | null | null | from abc import ABC, abstractmethod
from django.test import TestCase
from rest_framework.generics import GenericAPIView
from rest_framework.test import APIRequestFactory
from apps.cars.factory import UserFactory
class AbstractBaseTest(object):
class AbstractBaseApiTestCase(TestCase, ABC):
"""
Abstract Base TestCase class.
"""
def setUp(self) -> None:
"""Base setup"""
self.user = UserFactory.create()
self.request_factory = APIRequestFactory()
self.view = self._view()
self.endpoint = self._endpoint()
@abstractmethod
def _view(self) -> GenericAPIView.as_view():
"""Abstract method that returns YourApiToTest.as_view()"""
pass
@abstractmethod
def _endpoint(self) -> str:
"""Abstract method that return endpoint string E.g /cars/"""
pass
@abstractmethod
def test_anonymous_request(self, *args, **kwargs) -> None:
"""test if anonymous user cannot access endpoint"""
pass
| 29.594595 | 72 | 0.621918 | 877 | 0.800913 | 0 | 0 | 460 | 0.420091 | 0 | 0 | 238 | 0.217352 |
c34e3c84ae9852ef18383b6753e4f283c886e50c | 995 | py | Python | templating-tool.py | salayatana66/vw-serving-flask | 7b91f986b0e03e9784cf481b1f8833508dc40bfb | [
"BSD-2-Clause-FreeBSD"
]
| 4 | 2020-10-01T17:31:00.000Z | 2021-05-09T12:21:41.000Z | templating-tool.py | salayatana66/vw-serving-flask | 7b91f986b0e03e9784cf481b1f8833508dc40bfb | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | templating-tool.py | salayatana66/vw-serving-flask | 7b91f986b0e03e9784cf481b1f8833508dc40bfb | [
"BSD-2-Clause-FreeBSD"
]
| 2 | 2020-10-01T17:31:01.000Z | 2020-10-02T17:48:01.000Z | """
A simple templating tool for Dockerfiles
"""
import sys
import os
import click
import jinja2
import yaml
@click.group()
def cli():
""" @Unimplemented """
pass
@cli.command()
@click.argument("template", required=True, type=str)
@click.option("-y", "--yaml_file", required=True,
help="Yaml file with keys for template",
type=str)
def from_yaml(template, yaml_file):
"""
Fills in template file fields using the
yaml_file
"""
temp_path = os.path.expanduser(
os.path.expandvars(template))
yml_path = os.path.expanduser(
os.path.expandvars(yaml_file))
with open(temp_path, 'r') as tfile:
temp_jin = jinja2.Template(tfile.read())
with open(yml_path, 'r') as yfile:
yml_loaded = yaml.load(yfile, Loader=yaml.BaseLoader)
temp_rend = temp_jin.render(**yml_loaded)
sys.stdout.write(temp_rend)
sys.stdout.flush()
cli.add_command(from_yaml)
if __name__ == '__main__':
cli()
| 19.509804 | 61 | 0.648241 | 0 | 0 | 0 | 0 | 811 | 0.815075 | 0 | 0 | 216 | 0.217085 |
c3508126a16d94b29f0bc62586532976da28f49d | 11,552 | py | Python | verbforms.py | wmcooper2/Clean-Code-English-Tests | a966ed40e13608a75bb618d35bf812d9229cacc3 | [
"MIT"
]
| null | null | null | verbforms.py | wmcooper2/Clean-Code-English-Tests | a966ed40e13608a75bb618d35bf812d9229cacc3 | [
"MIT"
]
| 1 | 2018-09-02T12:46:41.000Z | 2018-09-02T12:55:30.000Z | verbforms.py | wmcooper2/TotalEnglishAssistant | a966ed40e13608a75bb618d35bf812d9229cacc3 | [
"MIT"
]
| null | null | null | """File for holding the different verb forms for all of the verbs in the Total
English book series."""
verb_forms = {
'become' :
{
'normal' : 'become',
'present' : ['become','becomes'],
'past' : 'became',
'past participle' : 'become',
'gerund' : 'becoming',
},
'be':
{
'normal' : 'be',
'present' : ['am','is','are'],
'past' : ['was', 'were'],
'past participle' : 'been',
'gerund' : 'being',
},
'begin':
{
'normal' : 'begin',
'present' : ['begin','begins'],
'past' : 'began',
'past participle' : 'begun',
'gerund' : 'beginning',
},
'blow':
{
'normal' : 'blow',
'present' : ['blow', 'blows'],
'past' : 'blew',
'past participle' : 'blown',
'gerund' : 'blowing',
},
'bring':
{
'normal' : 'bring',
'present' : ['bring','brings'],
'past' : 'brought',
'past participle' : 'brought',
'gerund' : 'bringing',
},
'build':
{
'normal' : 'build',
'present' : ['build','builds'],
'past' : 'built',
'past participle' : 'built',
'gerund' : 'building',
},
'burn':
{
'normal' : 'burn',
'present' : ['burn','burns'],
'past' : ['burned','burnt'],
'past participle' : ['burned','burnt'],
'gerund' : 'burning',
},
'buy':
{
'normal' : 'buy',
'present' : ['buy','buys'],
'past' : 'bought',
'past participle' : 'bought',
'gerund' : 'buying',
},
'catch':
{
'normal' : 'catch',
'present' : ['catch','catches'],
'past' : 'caught',
'past participle' : 'caught',
'gerund' : 'catching',
},
'choose':
{
'normal' : 'choose',
'present' : ['choose','chooses'],
'past' : 'chose',
'past participle' : 'chosen',
'gerund' : 'choosing',
},
'come':
{
'normal' : 'come',
'present' : ['come','comes'],
'past' : 'came',
'past participle' : 'come',
'gerund' : 'coming',
},
'cut':
{
'normal' : 'cut',
'present' : ['cut','cuts'],
'past' : 'cut',
'past participle' : 'cut',
'gerund' : 'cutting',
},
'do':
{
'normal' : 'do',
'present' : ['do','does'],
'past' : 'did',
'past participle' : 'done',
'gerund' : 'doing',
},
'drink':
{
'normal' : 'drink',
'present' : ['drink','drinks'],
'past' : 'drank',
'past participle' : 'drunk',
'gerund' : 'drinking',
},
'eat':
{
'normal' : 'eat',
'present' : ['eat','eats'],
'past' : 'ate',
'past participle' : 'eaten',
'gerund' : 'eating',
},
'feel':
{
'normal' : 'feel',
'present' : ['feel','feels'],
'past' : 'felt',
'past participle' : 'felt',
'gerund' : 'feeling',
},
'fight':
{
'normal' : 'fight',
'present' : ['fight','fights'],
'past' : 'fought',
'past participle' : 'fought',
'gerund' : 'fighting',
},
'find':
{
'normal' : 'find',
'present' : ['find','finds'],
'past' : 'found',
'past participle' : 'found',
'gerund' : 'finding',
},
'fly':
{
'normal' : 'fly',
'present' : ['fly','flies'],
'past' : 'flew',
'past participle' : 'flown',
'gerund' : 'flying',
},
'forget':
{
'normal' : 'forget',
'present' : ['forget','forgets'],
'past' : 'forgot',
'past participle' : ['forgotten','forgot'],
'gerund' : 'forgetting',
},
'get':
{
'normal' : 'get',
'present' : ['get','gets'],
'past' : 'got',
'past participle' : ['gotten','got'],
'gerund' : 'getting',
},
'give':
{
'normal' : 'give',
'present' : ['give','gives'],
'past' : 'gave',
'past participle' : 'given',
'gerund' : 'giving',
},
'go':
{
'normal' : 'go',
'present' : ['go','goes'],
'past' : 'went',
'past participle' : 'gone',
'gerund' : 'going',
},
'grow':
{
'normal' : 'grow',
'present' : ['grow','grows'],
'past' : 'grew',
'past participle' : 'grown',
'gerund' : 'growing',
},
'have':
{
'normal' : 'have',
'present' : ['have','has'],
'past' : 'had',
'past participle' : 'had',
'gerund' : 'having',
},
'hear':
{
'normal' : 'hear',
'present' : ['hear','hears'],
'past' : 'heard',
'past participle' : 'heard',
'gerund' : 'hearing',
},
'hit':
{
'normal' : 'hit',
'present' : ['hit','hits'],
'past' : 'hit',
'past participle' : 'hit',
'gerund' : 'hitting',
},
'hold':
{
'normal' : 'hold',
'present' : ['hold','holds'],
'past' : 'held',
'past participle' : 'held',
'gerund' : 'holding',
},
'hurt':
{
'normal' : 'hurt',
'present' : ['hurt','hurts'],
'past' : 'hurt',
'past participle' : 'hurt',
'gerund' : 'hurting',
},
'keep':
{
'normal' : 'keep',
'present' : ['keep','keeps'],
'past' : 'kept',
'past participle' : 'kept',
'gerund' : 'keeping',
},
'know':
{
'normal' : 'know',
'present' : ['know','knows'],
'past' : 'knew',
'past participle' : 'known',
'gerund' : 'knowing',
},
'lead':
{
'normal' : 'lead',
'present' : ['lead','leads'],
'past' : 'led',
'past participle' : 'led',
'gerund' : 'leading',
},
'leave':
{
'normal' : 'leave',
'present' : ['leave','leaves'],
'past' : 'left',
'past participle' : 'left',
'gerund' : 'leaving',
},
'lend':
{
'normal' : 'lend',
'present' : ['lend','lends'],
'past' : 'lent',
'past participle' : 'lent',
'gerund' : 'lending',
},
'lie':
{
'normal' : 'lie',
'present' : ['lie','lies'],
'past' : 'lay',
'past participle' : 'lain',
'gerund' : 'lying',
},
'lose':
{
'normal' : 'lose',
'present' : ['lose','loses'],
'past' : 'lost',
'past participle' : 'lost',
'gerund' : 'losing',
},
'make':
{
'normal' : 'make',
'present' : ['make','makes'],
'past' : 'made',
'past participle' : 'made',
'gerund' : 'making',
},
'mean':
{
'normal' : 'mean',
'present' : ['mean','means'],
'past' : 'meant',
'past participle' : 'meant',
'gerund' : 'meaning',
},
'meet':
{
'normal' : 'meet',
'present' : ['meet','meets'],
'past' : 'met',
'past participle' : 'met',
'gerund' : 'meeting',
},
'put':
{
'normal' : 'put',
'present' : ['put','puts'],
'past' : 'put',
'past participle' : 'put',
'gerund' : 'putting',
},
'read':
{
'normal' : 'read',
'present' : ['read','reads'],
'past' : 'read',
'past participle' : 'read',
'gerund' : 'reading',
},
'ride':
{
'normal' : 'ride',
'present' : ['ride','rides'],
'past' : 'rode',
'past participle' : 'ridden',
'gerund' : 'riding',
},
'ring':
{
'normal' : 'ring',
'present' : ['ring','rings'],
'past' : 'rang',
'past participle' : 'rung',
'gerund' : 'ringing',
},
'run':
{
'normal' : 'run',
'present' : ['run','runs'],
'past' : 'ran',
'past participle' : 'run',
'gerund' : 'running',
},
'say':
{
'normal' : 'say',
'present' : ['say','says'],
'past' : 'said',
'past participle' : 'said',
'gerund' : 'saying',
},
'see':
{
'normal' : 'see',
'present' : ['see','sees'],
'past' : 'saw',
'past participle' : 'seen',
'gerund' : 'seeing',
},
'sell':
{
'normal' : 'sell',
'present' : ['sell','sells'],
'past' : 'sold',
'past participle' : 'sold',
'gerund' : 'selling',
},
'send':
{
'normal' : 'send',
'present' : ['send','sends'],
'past' : 'sent',
'past participle' : 'sent',
'gerund' : 'sending',
},
'shake':
{
'normal' : 'shake',
'present' : ['shake','shakes'],
'past' : 'shook',
'past participle' : 'shaken',
'gerund' : 'shaking',
},
'show':
{
'normal' : 'show',
'present' : ['show','shows'],
'past' : 'showed',
'past participle' : 'shown',
'gerund' : 'showing',
},
'shut':
{
'normal' : 'shut',
'present' : ['shut','shuts'],
'past' : 'shut',
'past participle' : 'shut',
'gerund' : 'shutting',
},
'sing':
{
'normal' : 'sing',
'present' : ['sing','sings'],
'past' : 'sang',
'past participle' : 'sung',
'gerund' : 'singing',
},
'sit':
{
'normal' : 'sit',
'present' : ['sit','sits'],
'past' : 'sat',
'past participle' : 'sat',
'gerund' : 'sitting',
},
'sleep':
{
'normal' : 'sleep',
'present' : ['sleep','sleeps'],
'past' : 'slept',
'past participle' : 'slept',
'gerund' : 'sleeping',
},
'smell':
{
'normal' : 'smell',
'present' : ['smell','smells'],
'past' : 'smelled,smelt',
'past participle' : 'smelled,smelt',
'gerund' : 'smelling',
},
'speak':
{
'normal' : 'speak',
'present' : ['speak','speaks'],
'past' : 'spoke',
'past participle' : 'spoken',
'gerund' : 'speaking',
},
'spend':
{
'normal' : 'spend',
'present' : ['spend','spends'],
'past' : 'spent',
'past participle' : 'spent',
'gerund' : 'spending',
},
'stand':
{
'normal' : 'stand',
'present' : ['stand','stands'],
'past' : 'stood',
'past participle' : 'stood',
'gerund' : 'standing',
},
'swim':
{
'normal' : 'swim',
'present' : ['swim','swims'],
'past' : 'swam',
'past participle' : 'swum',
'gerund' : 'swimming',
},
'take':
{
'normal' : 'take',
'present' : ['take','takes'],
'past' : 'took',
'past participle' : 'taken',
'gerund' : 'taking',
},
'teach':
{
'normal' : 'teach',
'present' : ['teach','teaches'],
'past' : 'taught',
'past participle' : 'taught',
'gerund' : 'teaching',
},
'tell':
{
'normal' : 'tell',
'present' : ['tell','tells'],
'past' : 'told',
'past participle' : 'told',
'gerund' : 'telling',
},
'think':
{
'normal' : 'think',
'present' : ['think','thinks'],
'past' : 'thought',
'past participle' : 'thought',
'gerund' : 'thinking',
},
'throw':
{
'normal' : 'throw',
'present' : ['throw','throws'],
'past' : 'threw',
'past participle' : 'thrown',
'gerund' : 'throwing',
},
'understand':
{
'normal' : 'understand',
'present' : ['understand','understands'],
'past' : 'understood',
'past participle' : 'understood',
'gerund' : 'unerstanding',
},
'wear':
{
'normal' : 'wear',
'present' : ['wear','wears'],
'past' : 'wore',
'past participle' : 'worn',
'gerund' : 'wearing',
},
'win':
{
'normal' : 'win',
'present' : ['win','wins'],
'past' : 'won',
'past participle' : 'won',
'gerund' : 'winning',
},
'write':
{
'normal' : 'write',
'present' : ['write','writes'],
'past' : 'wrote',
'past participle' : 'written',
'gerund' : 'writing',},}
| 18.814332 | 78 | 0.428497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,678 | 0.578082 |
c3518b43a0aa4df0b06d8f5ad7ea43c927361987 | 169 | py | Python | bokeh/themes/__init__.py | quasiben/bokeh | 738343bd18c851dfd1fdc82cf35fe3eb4cdfd475 | [
"BSD-3-Clause"
]
| null | null | null | bokeh/themes/__init__.py | quasiben/bokeh | 738343bd18c851dfd1fdc82cf35fe3eb4cdfd475 | [
"BSD-3-Clause"
]
| null | null | null | bokeh/themes/__init__.py | quasiben/bokeh | 738343bd18c851dfd1fdc82cf35fe3eb4cdfd475 | [
"BSD-3-Clause"
]
| null | null | null | ''' Provides API for loading themes
'''
from __future__ import absolute_import
from os.path import join
from .theme import Theme
default = Theme(json={})
del join
| 12.071429 | 38 | 0.739645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.236686 |
c351ebb4f07cf7eccdee13a557a0b9df8efb0303 | 4,321 | py | Python | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | Puppet-Finland/puppet-trac | ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3 | [
"BSD-2-Clause"
]
| null | null | null | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | Puppet-Finland/puppet-trac | ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3 | [
"BSD-2-Clause"
]
| null | null | null | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | Puppet-Finland/puppet-trac | ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3 | [
"BSD-2-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Dirk Stöcker <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
import hashlib
import random
import urllib2
from trac.config import Option
from trac.core import Component, implements
from trac.util.html import tag
from tracspamfilter.api import user_agent
from tracspamfilter.captcha import ICaptchaMethod
class KeycaptchaCaptcha(Component):
"""KeyCaptcha implementation"""
implements(ICaptchaMethod)
private_key = Option('spam-filter', 'captcha_keycaptcha_private_key', '',
"""Private key for KeyCaptcha usage.""", doc_domain="tracspamfilter")
user_id = Option('spam-filter', 'captcha_keycaptcha_user_id', '',
"""User id for KeyCaptcha usage.""", doc_domain="tracspamfilter")
def generate_captcha(self, req):
session_id = "%d-3.4.0.001" % random.randint(1, 10000000)
sign1 = hashlib.md5(session_id + req.remote_addr +
self.private_key).hexdigest()
sign2 = hashlib.md5(session_id + self.private_key).hexdigest()
varblock = "var s_s_c_user_id = '%s';\n" % self.user_id
varblock += "var s_s_c_session_id = '%s';\n" % session_id
varblock += "var s_s_c_captcha_field_id = 'keycaptcha_response_field';\n"
varblock += "var s_s_c_submit_button_id = 'keycaptcha_response_button';\n"
varblock += "var s_s_c_web_server_sign = '%s';\n" % sign1
varblock += "var s_s_c_web_server_sign2 = '%s';\n" % sign2
varblock += "document.s_s_c_debugmode=1;\n"
fragment = tag(tag.script(varblock, type='text/javascript'))
fragment.append(
tag.script(type='text/javascript',
src='http://backs.keycaptcha.com/swfs/cap.js')
)
fragment.append(
tag.input(type='hidden', id='keycaptcha_response_field',
name='keycaptcha_response_field')
)
fragment.append(
tag.input(type='submit', id='keycaptcha_response_button',
name='keycaptcha_response_button')
)
req.session['captcha_key_session'] = session_id
return None, fragment
def verify_key(self, private_key, user_id):
if private_key is None or user_id is None:
return False
# FIXME - Not yet implemented
return True
def verify_captcha(self, req):
session = None
if 'captcha_key_session' in req.session:
session = req.session['captcha_key_session']
del req.session['captcha_key_session']
response_field = req.args.get('keycaptcha_response_field')
val = response_field.split('|')
s = hashlib.md5('accept' + val[1] + self.private_key +
val[2]).hexdigest()
self.log.debug("KeyCaptcha response: %s .. %s .. %s",
response_field, s, session)
if s == val[0] and session == val[3]:
try:
request = urllib2.Request(
url=val[2],
headers={"User-agent": user_agent}
)
response = urllib2.urlopen(request)
return_values = response.read()
response.close()
except Exception, e:
self.log.warning("Exception in KeyCaptcha handling (%s)", e)
else:
self.log.debug("KeyCaptcha check result: %s", return_values)
if return_values == '1':
return True
self.log.warning("KeyCaptcha returned invalid check result: "
"%s (%s)", return_values, response_field)
else:
self.log.warning("KeyCaptcha returned invalid data: "
"%s (%s,%s)", response_field, s, session)
return False
def is_usable(self, req):
return self.private_key and self.user_id
| 38.238938 | 82 | 0.610507 | 3,566 | 0.825081 | 0 | 0 | 0 | 0 | 0 | 0 | 1,600 | 0.370199 |
c353ac7f88d4d2f15d7dbe0bb2a19e95c08d7680 | 3,222 | py | Python | app/model/causalnex.py | splunk/splunk-mltk-container-docker | 6e98e5984d99d7a3318f3e68c224d2a5163b717b | [
"Apache-2.0"
]
| 20 | 2019-10-28T10:10:00.000Z | 2022-02-17T02:31:54.000Z | app/model/causalnex.py | splunk/splunk-mltk-container-docker | 6e98e5984d99d7a3318f3e68c224d2a5163b717b | [
"Apache-2.0"
]
| 13 | 2019-11-22T16:00:02.000Z | 2022-01-12T10:57:08.000Z | app/model/causalnex.py | splunk/splunk-mltk-container-docker | 6e98e5984d99d7a3318f3e68c224d2a5163b717b | [
"Apache-2.0"
]
| 15 | 2019-10-25T23:19:43.000Z | 2022-03-27T16:49:21.000Z | #!/usr/bin/env python
# coding: utf-8
# In[18]:
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import numpy as np
import pandas as pd
from causalnex.structure import DAGRegressor
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
# ...
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# In[22]:
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
def stage(name):
with open("data/"+name+".csv", 'r') as f:
df = pd.read_csv(f)
with open("data/"+name+".json", 'r') as f:
param = json.load(f)
return df, param
# In[24]:
# initialize your model
# available inputs: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
def init(df,param):
model = DAGRegressor(
alpha=0.1,
beta=0.9,
fit_intercept=True,
hidden_layer_units=None,
dependent_target=True,
enforce_dag=True,
)
return model
# In[26]:
# train your model
# returns a fit info json object and may modify the model object
def fit(model,df,param):
target=param['target_variables'][0]
#Data prep for processing
y_p = df[target]
y = y_p.values
X_p = df[param['feature_variables']]
X = X_p.to_numpy()
X_col = list(X_p.columns)
#Scale the data
ss = StandardScaler()
X_ss = ss.fit_transform(X)
y_ss = (y - y.mean()) / y.std()
scores = cross_val_score(model, X_ss, y_ss, cv=KFold(shuffle=True, random_state=42))
print(f'MEAN R2: {np.mean(scores).mean():.3f}')
X_pd = pd.DataFrame(X_ss, columns=X_col)
y_pd = pd.Series(y_ss, name=target)
model.fit(X_pd, y_pd)
info = pd.Series(model.coef_, index=X_col)
#info = pd.Series(model.coef_, index=list(df.drop(['_time'],axis=1).columns))
return info
# In[28]:
# apply your model
# returns the calculated results
def apply(model,df,param):
data = []
for col in list(df.columns):
s = model.get_edges_to_node(col)
for i in s.index:
data.append([i,col,s[i]]);
graph = pd.DataFrame(data, columns=['src','dest','weight'])
#results to send back to Splunk
graph_output=graph[graph['weight']>0]
return graph_output
# In[ ]:
# save model to name in expected convention "<algo_name>_<model_name>"
def save(model,name):
#with open(MODEL_DIRECTORY + name + ".json", 'w') as file:
# json.dump(model, file)
return model
# In[ ]:
# load model from name in expected convention "<algo_name>_<model_name>"
def load(name):
model = {}
#with open(MODEL_DIRECTORY + name + ".json", 'r') as file:
# model = json.load(file)
return model
# In[ ]:
# return a model summary
def summary(model=None):
returns = {"version": {"numpy": np.__version__, "pandas": pd.__version__} }
return returns
| 16.272727 | 108 | 0.624146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,305 | 0.405028 |
c3545eaf7cf8c0dfbca19e2063b2250b17a5d6be | 6,500 | py | Python | Assignment1/Q4/q4.py | NavneelSinghal/COL774 | d8b473b9cd05984ef4ffe8642ce3ce5cb9a17252 | [
"MIT"
]
| null | null | null | Assignment1/Q4/q4.py | NavneelSinghal/COL774 | d8b473b9cd05984ef4ffe8642ce3ce5cb9a17252 | [
"MIT"
]
| null | null | null | Assignment1/Q4/q4.py | NavneelSinghal/COL774 | d8b473b9cd05984ef4ffe8642ce3ce5cb9a17252 | [
"MIT"
]
| null | null | null | import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
matplotlib.use('Agg')
import math
import numpy as np
import sys
from os.path import join, isfile
import warnings
warnings.filterwarnings("ignore")
def gda(x, y):
x = x.T
y = y.T
# phi = P(y = 1)
# mu[i] = mean of the feature vectors of the ith class
# sigma = common co-variance matrix
# M[i] = number of data points of class i
phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0])
m = y.shape[0]
M[1] = np.sum(y)
M[0] = m - M[1]
phi = M[1] / m
mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)])
sigma = np.sum(np.array([np.outer(x[i] - mu[y[i]], x[i] - mu[y[i]]) for i in range(m)]), axis=0).astype(float) / m
return phi, mu, sigma
def gda_general(x, y):
x = x.T
y = y.T
# phi = P(y = 1)
# mu[i] = mean of the feature vectors of the ith class
# sigma[i] = co-variance matrix for the ith class
# M[i] = number of data points of class i
phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0])
m = y.shape[0]
M[1] = np.sum(y)
M[0] = m - M[1]
phi = M[1] / m
mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)])
sigma = np.array([np.sum(np.array([np.outer(x[i] - mu[k], x[i] - mu[k]) for i in range(m) if y[i] == k]), axis=0) / M[k] for k in range(2)]).astype(float)
return phi, mu, sigma
def main():
# read command-line arguments
data_dir = sys.argv[1]
out_dir = sys.argv[2]
part = sys.argv[3]
# check for existence of input files
for c in ['x', 'y']:
if not isfile(join(data_dir, 'q4' + c + '.dat')):
raise Exception('q4' + c + '.dat not found')
# read from csv file
x = np.array(np.genfromtxt(join(data_dir, 'q4x.dat'))).T
y = np.array([0 if yi == 'Alaska' else 1 for yi in np.loadtxt(join(data_dir, 'q4y.dat'), dtype=str)])
# normalisation
x_mean = np.array([0.0] * 2)
x_stddev = np.array([0.0] * 2)
for i in range(2):
x_mean[i] = np.mean(x[i])
x[i] -= np.full_like(x[i], np.mean(x[i]))
x_stddev[i] = np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0])
x[i] /= np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0])
# part A
# running GDA with common co-variance matrix
phi, mu, sigma = gda(x, y)
if part == 'a':
output_file = open(join(out_dir, '4aoutput.txt'), mode='w')
output_file.write('phi = ' + str(phi) + '\n')
output_file.write('mu[0] = ' + str(mu[0]) + '\n')
output_file.write('mu[1] = ' + str(mu[1]) + '\n')
output_file.write('sigma = \n' + str(sigma) + '\n')
output_file.close()
print('phi = ' + str(phi))
print('mu[0] = ' + str(mu[0]))
print('mu[1] = ' + str(mu[1]))
print('sigma = \n' + str(sigma))
return 0
# part B, C
fig4b, ax4b = plt.subplots()
# filter by y-values
x0, x1 = [], []
for i in range(y.shape[0]):
if y[i] == 0:
x0.append([x[0][i], x[1][i]])
else:
x1.append([x[0][i], x[1][i]])
x0 = np.array(x0).T
x1 = np.array(x1).T
# plot classes
alaska = ax4b.scatter(x0[0] * x_stddev[0] + x_mean[0], x0[1] * x_stddev[1] + x_mean[1], c='red', s=6)
canada = ax4b.scatter(x1[0] * x_stddev[0] + x_mean[0], x1[1] * x_stddev[1] + x_mean[1], c='blue', s=6)
ax4b.set_xlabel('Fresh water ring dia.')
ax4b.set_ylabel('Marine water ring dia.')
fig4b.legend((alaska, canada), ('Alaska', 'Canada'))
if part == 'b':
fig4b.savefig(join(out_dir, '1b_plot.png'))
plt.show()
return 0
# linear boundary computation - equation in report
sigma_inverse = np.linalg.inv(sigma)
theta = np.array([0., 0., 0.])
theta[0] = np.log(phi / (1 - phi))
for i in range(2):
mui = np.array([mu[i]])
theta[0] += ((-1) ** i) * np.matmul(np.matmul(mui, sigma_inverse), mui.T)
theta[1:] = np.matmul(np.array([mu[1] - mu[0]]), sigma_inverse)
# plotting the boundary
rx = np.arange(-3, 4)
ry = (-theta[0] - theta[1] * rx) / theta[2]
ax4b.plot(rx * x_stddev[0] + x_mean[0], ry * x_stddev[1] + x_mean[1])
#plt.show()
if part == 'c':
fig4b.savefig(join(out_dir, '1c_plot.png'))
plt.show()
return 0
# part D
# running generalised GDA
phi, mu, sigma = gda_general(x, y)
if part == 'd':
output_file = open(join(out_dir, '4doutput.txt'), mode='w')
output_file.write('phi = ' + str(phi) + '\n')
output_file.write('mu[0] = ' + str(mu[0]) + '\n')
output_file.write('mu[1] = ' + str(mu[1]) + '\n')
output_file.write('sigma[0] = \n' + str(sigma[0]) + '\n')
output_file.write('sigma[1] = \n' + str(sigma[1]) + '\n')
output_file.close()
print('phi = ' + str(phi))
print('mu[0] = ' + str(mu[0]))
print('mu[1] = ' + str(mu[1]))
print('sigma[0] = \n' + str(sigma[0]))
print('sigma[1] = \n' + str(sigma[1]))
return 0
# part E
# quadratic boundary computation - equation in report
constant = np.log(phi / (1 - phi)) + np.log(np.linalg.det(sigma[0]) / np.linalg.det(sigma[1])) / 2
linear = 0
quadratic = 0
for i in range(2):
sigma_inverse = np.linalg.inv(sigma[i])
mui = np.array([mu[i]])
prod = np.matmul(mui, sigma_inverse)
constant += ((-1) ** i) * np.matmul(prod, mui.T) / 2
linear += ((-1) ** (i + 1)) * prod
quadratic += ((-1) ** i) * sigma_inverse / 2
constant = constant[0][0]
linear = linear[0]
# note that here x transposed is the feature vector (as x is a row vector)
# and similarly mu[i] is also a row vector, which explains the equations above
# equation is x * quadratic * x.T + linear * x.T + constant = 0
# plotting the quadratic boundary
Z = 0
X, Y = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100))
Z += quadratic[0, 0] * (X ** 2) + (quadratic[0, 1] + quadratic[1, 0]) * X * Y + (quadratic[1, 1]) * (Y ** 2)
Z += linear[0] * X + linear[1] * Y
Z += constant
ax4b.contour(X * x_stddev[0] + x_mean[0], Y * x_stddev[1] + x_mean[1], Z, 0)
if part == 'e':
fig4b.savefig(join(out_dir, '1e_plot.png'))
plt.show()
# part F - in the report
return 0
if __name__ == '__main__':
main()
| 31.400966 | 158 | 0.534923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,432 | 0.220308 |
c35493185a871b0c5b3f41a18ba8dd0865c75b5e | 1,521 | py | Python | var/spack/repos/builtin/packages/bcache/package.py | milljm/spack | b476f8aa63d48f4b959522ece0406caa32992d4a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| null | null | null | var/spack/repos/builtin/packages/bcache/package.py | milljm/spack | b476f8aa63d48f4b959522ece0406caa32992d4a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| null | null | null | var/spack/repos/builtin/packages/bcache/package.py | milljm/spack | b476f8aa63d48f4b959522ece0406caa32992d4a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bcache(MakefilePackage):
"""Bcache is a patch for the Linux kernel to use SSDs to cache other block
devices."""
homepage = "http://bcache.evilpiepirate.org"
url = "https://github.com/g2p/bcache-tools/archive/v1.0.8.tar.gz"
version('1.0.8', sha256='d56923936f37287efc57a46315679102ef2c86cd0be5874590320acd48c1201c')
version('1.0.7', sha256='64d76d1085afba8c3d5037beb67bf9d69ee163f357016e267bf328c0b1807abd')
version('1.0.6', sha256='9677c6da3ceac4e1799d560617c4d00ea7e9d26031928f8f94b8ab327496d4e0')
version('1.0.5', sha256='1449294ef545b3dc6f715f7b063bc2c8656984ad73bcd81a0dc048cbba416ea9')
version('1.0.4', sha256='102ffc3a8389180f4b491188c3520f8a4b1a84e5a7ca26d2bd6de1821f4d913d')
depends_on('libuuid')
depends_on('util-linux')
depends_on('gettext')
depends_on('pkgconfig', type='build')
def setup_build_environment(self, env):
env.append_flags('LDFLAGS', '-lintl')
patch('func_crc64.patch', sha256='558b35cadab4f410ce8f87f0766424a429ca0611aa2fd247326ad10da115737d')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('bcache-register', prefix.bin)
install('bcache-super-show', prefix.bin)
install('make-bcache', prefix.bin)
install('probe-bcache', prefix.bin)
| 40.026316 | 104 | 0.738988 | 1,300 | 0.854701 | 0 | 0 | 0 | 0 | 0 | 0 | 952 | 0.625904 |
c354a245e57f7c727ba4576fb602286db50cc1a3 | 645 | py | Python | core/migrations/0010_wagtailsitepage_screenshot.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
]
| null | null | null | core/migrations/0010_wagtailsitepage_screenshot.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
]
| null | null | null | core/migrations/0010_wagtailsitepage_screenshot.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 23:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_wagtail112upgrade'),
]
operations = [
migrations.RenameField(
model_name='wagtailsitepage',
old_name='image_desktop',
new_name='site_screenshot',
),
migrations.RemoveField(
model_name='wagtailsitepage',
name='image_phone',
),
migrations.RemoveField(
model_name='wagtailsitepage',
name='image_tablet',
),
]
| 22.241379 | 48 | 0.568992 | 534 | 0.827907 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.327132 |
c3556300b12020a7a08798e9741d8eecbab08f07 | 1,555 | py | Python | circuitpython/schedule.py | Flameeyes/birch-books-smarthome | 245a8afc848b2a8cf4dbcde31b36716b44937200 | [
"MIT"
]
| null | null | null | circuitpython/schedule.py | Flameeyes/birch-books-smarthome | 245a8afc848b2a8cf4dbcde31b36716b44937200 | [
"MIT"
]
| null | null | null | circuitpython/schedule.py | Flameeyes/birch-books-smarthome | 245a8afc848b2a8cf4dbcde31b36716b44937200 | [
"MIT"
]
| null | null | null | # SPDX-FileCopyrightText: © 2020 The birch-books-smarthome Authors
# SPDX-License-Identifier: MIT
BOOKSTORE_GROUND_FLOOR = 0x0007
BOOKSTORE_FIRST_FLOOR = 0x0008
BOOKSTORE_TERRARIUM = 0x0010
BOOKSTORE_BEDROOM = 0x0020
HOUSE_BASEMENT = 0x0040
HOUSE_GROUND_FLOOR = 0x0380
HOUSE_BEDROOM_LIGHT = 0x0400
HOUSE_BEDROOM_LAMP = 0x0800
HOUSE_FIREPLACE_1 = 0x1000
HOUSE_FIREPLACE_2 = 0x2000
SCHEDULE = [
BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_BASEMENT | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_BEDROOM | HOUSE_BASEMENT | HOUSE_BEDROOM_LAMP,
BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LAMP,
0,
0,
]
TEST_SCHEDULE = [
BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_FIRST_FLOOR,
BOOKSTORE_TERRARIUM,
BOOKSTORE_BEDROOM,
HOUSE_BASEMENT,
HOUSE_GROUND_FLOOR,
HOUSE_BEDROOM_LIGHT,
HOUSE_BEDROOM_LAMP,
HOUSE_FIREPLACE_1,
HOUSE_FIREPLACE_2,
]
| 33.804348 | 87 | 0.803859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.062339 |
c3561322c8fe83a3cce278173951cb1c3bdb4ed4 | 284 | py | Python | imdb/utils.py | rinkurajole/imdb_sanic_app | 502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6 | [
"BSD-3-Clause"
]
| null | null | null | imdb/utils.py | rinkurajole/imdb_sanic_app | 502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6 | [
"BSD-3-Clause"
]
| null | null | null | imdb/utils.py | rinkurajole/imdb_sanic_app | 502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6 | [
"BSD-3-Clause"
]
| null | null | null | import bcrypt
salt = bcrypt.gensalt()
def generate_hash(passwd, salt=salt):
return str(bcrypt.hashpw(passwd, salt))
def match_password(req_pwd, db_pwd):
db_pwd = db_pwd.replace('b\'','').replace('\'','').encode('utf-8')
return db_pwd == bcrypt.hashpw(req_pwd, db_pwd)
| 23.666667 | 70 | 0.683099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.070423 |
c3562d1b40f2737d409b58bb9f4467b0ae1bbe8c | 14,200 | py | Python | test/test_modules/test_math.py | dragonteros/unsuspected-hangeul | 52dda3768809f5ba91e4fd7bb754223737b2da3d | [
"MIT"
]
| 62 | 2019-02-24T17:45:04.000Z | 2021-06-14T07:34:57.000Z | test/test_modules/test_math.py | dragonteros/unsuspected-hangeul | 52dda3768809f5ba91e4fd7bb754223737b2da3d | [
"MIT"
]
| 11 | 2019-02-25T17:19:45.000Z | 2020-07-18T05:04:17.000Z | test/test_modules/test_math.py | dragonteros/unsuspected-hangeul | 52dda3768809f5ba91e4fd7bb754223737b2da3d | [
"MIT"
]
| 2 | 2019-02-25T07:51:14.000Z | 2019-09-23T12:36:08.000Z | from test.test_base import TestBase
class TestMath(TestBase):
def test_isclose(self):
_test = self._assert_execute
_test('ㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True')
_test('ㅈㅈㅈ ㄴㄱ ㅅㅎㄷ ㅅㅈㅈ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False')
_test('ㅈㅈㅈㅈㅈㅈㅈ ㄴㄱ ㅅㅎㄷ ㅅㅈㅈㅈㅈㅈㅈ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False')
_test('ㅅㄷㄱ ㅈ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False')
_test('ㄴㅈㄱㄹㄴㄹㄱ ㅅㄴㅂㄱㄱㄴㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅈ ㅂㅎㄹ) (ㄱ ㅂ ㅅ ㅂ ㅂㅎㄹ ㅄㅎㄷ) ㅅㅎㄷ, ㄴㄱ, (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('ㄱㄴ ㄷ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄱㄴ ㄹ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄱㄴ ㅁ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄴㄱ ㄷ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄴㄱ ㄹ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄴㄱ ㅁ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
def test_isnan(self):
_test = self._assert_execute
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄴ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
def test_isinf(self):
_test = self._assert_execute
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄴ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
def test_abs(self):
_test = self._assert_execute
_test('ㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0')
_test('ㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.0')
_test('ㄱ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.0')
_test('ㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1')
_test('ㄴㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1')
_test('ㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '2')
_test('ㄷㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㅁ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.25')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.25')
_test('ㄴ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄴㄱ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄱ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄱ ㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄱ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄹ ㅁ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄹㄱ ㅁ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄹ ㅁㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄹㄱ ㅁㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂ ㅁㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂㄱ ㅁㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂ ㅁㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂㄱ ㅁㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄴㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄴㄱ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄴㄱ ㅁ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅁㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄱㄴ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄱㄷ ㅁ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
def test_log(self):
_test = self._assert_execute
_test('ㄴ [((ㅂ ㅅ ㅈ ㅂㅎㄹ) (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷㄱ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
def test_trig(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄷ ㄴㄱ ㅅㅎㄷ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄴ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅁ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅁㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㄹ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) (ㄹ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅅ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) (ㄹ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅂ ㅂㅎㄹ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅂ ㅂㅎㄹ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
def test_asin(self):
_test = self._assert_execute
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
def test_acos(self):
_test = self._assert_execute
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
def test_atan(self):
_test = self._assert_execute
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
def test_atan2(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ', '0.0')
_test('ㄱ ㄴㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True')
_test('(ㄴ ㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ) ㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True')
_test('(ㄴㄱ ㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ) ㄷㄱ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True')
_test('[ㄴ ㄴㄱ ㄷ ㄷㄱ ㄹ ㄺ ㅁㅀㅅ] [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ, ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ, ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄷ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
def test_trunc(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-2')
def test_floor(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-3')
def test_round(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2')
def test_ceil(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '3')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-2')
def test_round_to_inf(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '3')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-3')
| 55.905512 | 158 | 0.520352 | 24,351 | 0.998401 | 0 | 0 | 0 | 0 | 0 | 0 | 19,677 | 0.806765 |
c3565453ab31565d1b32ad8f383deec201854e66 | 1,563 | py | Python | services/smtp.py | sourceperl/docker.mqttwarn | 9d87337f766843c8bdee34eba8d29776e7032009 | [
"MIT"
]
| null | null | null | services/smtp.py | sourceperl/docker.mqttwarn | 9d87337f766843c8bdee34eba8d29776e7032009 | [
"MIT"
]
| null | null | null | services/smtp.py | sourceperl/docker.mqttwarn | 9d87337f766843c8bdee34eba8d29776e7032009 | [
"MIT"
]
| 2 | 2016-09-03T09:12:17.000Z | 2020-03-03T11:58:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jan-Piet Mens <jpmens()gmail.com>'
__copyright__ = 'Copyright 2014 Jan-Piet Mens'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
import smtplib
from email.mime.text import MIMEText
def plugin(srv, item):
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
smtp_addresses = item.addrs
server = item.config['server']
sender = item.config['sender']
starttls = item.config['starttls']
username = item.config['username']
password = item.config['password']
msg = MIMEText(item.message)
msg['Subject'] = item.get('title', "%s notification" % (srv.SCRIPTNAME))
msg['To'] = ", ".join(smtp_addresses)
msg['From'] = sender
msg['X-Mailer'] = srv.SCRIPTNAME
try:
srv.logging.debug("Sending SMTP notification to %s [%s]..." % (item.target, smtp_addresses))
server = smtplib.SMTP(server)
server.set_debuglevel(0)
server.ehlo()
if starttls:
server.starttls()
if username:
server.login(username, password)
server.sendmail(sender, smtp_addresses, msg.as_string())
server.quit()
srv.logging.debug("Successfully sent SMTP notification")
except Exception, e:
srv.logging.warn("Error sending notification to SMTP recipient %s [%s]: %s" % (item.target, smtp_addresses, str(e)))
return False
return True
| 33.978261 | 124 | 0.621241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.298145 |
c3566cc0d033b24fec07c1d00481ebc4541fed37 | 1,865 | py | Python | xknx/knxip/disconnect_request.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
]
| null | null | null | xknx/knxip/disconnect_request.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
]
| null | null | null | xknx/knxip/disconnect_request.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
]
| null | null | null | """
Module for Serialization and Deserialization of a KNX Disconnect Request information.
Disconnect requests are used to disconnect a tunnel from a KNX/IP device.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.exceptions import CouldNotParseKNXIP
from .body import KNXIPBody
from .hpai import HPAI
from .knxip_enum import KNXIPServiceType
if TYPE_CHECKING:
from xknx.xknx import XKNX
class DisconnectRequest(KNXIPBody):
"""Representation of a KNX Disconnect Request."""
SERVICE_TYPE = KNXIPServiceType.DISCONNECT_REQUEST
def __init__(
self,
xknx: XKNX,
communication_channel_id: int = 1,
control_endpoint: HPAI = HPAI(),
):
"""Initialize DisconnectRequest object."""
super().__init__(xknx)
self.communication_channel_id = communication_channel_id
self.control_endpoint = control_endpoint
def calculated_length(self) -> int:
"""Get length of KNX/IP body."""
return 2 + HPAI.LENGTH
def from_knx(self, raw: bytes) -> int:
"""Parse/deserialize from KNX/IP raw data."""
if len(raw) < 2:
raise CouldNotParseKNXIP("Disconnect info has wrong length")
self.communication_channel_id = raw[0]
# raw[1] is reserved
return self.control_endpoint.from_knx(raw[2:]) + 2
def to_knx(self) -> bytes:
"""Serialize to KNX/IP raw data."""
return (
bytes((self.communication_channel_id, 0x00)) # 2nd byte is reserved
+ self.control_endpoint.to_knx()
)
def __str__(self) -> str:
"""Return object as readable string."""
return (
"<DisconnectRequest "
f'CommunicationChannelID="{self.communication_channel_id}" '
f'control_endpoint="{self.control_endpoint}" />'
)
| 29.603175 | 85 | 0.657909 | 1,433 | 0.768365 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.329759 |
c357fdd1d20a6b3edc1499c2dfe1f260522fc967 | 391 | py | Python | src/database_setup.py | liuchanglilian/crowdsourcing-text-msg | 8270a8175bc78141d9eff00b53f4b292d0d2678c | [
"MIT"
]
| null | null | null | src/database_setup.py | liuchanglilian/crowdsourcing-text-msg | 8270a8175bc78141d9eff00b53f4b292d0d2678c | [
"MIT"
]
| null | null | null | src/database_setup.py | liuchanglilian/crowdsourcing-text-msg | 8270a8175bc78141d9eff00b53f4b292d0d2678c | [
"MIT"
]
| null | null | null | from src.sqlite_helper import create_message_table, drop_message_table
"""
This script will create a SQLite table for you, and should be one time setup
The table name is message which will store all the Post message
"""
create_message_table()
"""
If you need to drop the message table, un-comment the following code by removing the # sign in the beginning
"""
#
# drop_message_table()
#
| 24.4375 | 108 | 0.769821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.73913 |
c3590d5e9d8eea5dee2b2753a4c5f63a26af1754 | 5,401 | py | Python | home/pedrosenarego/zorba/zorba1.0.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
]
| 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/pedrosenarego/zorba/zorba1.0.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
]
| 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/pedrosenarego/zorba/zorba1.0.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
]
| 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | from java.lang import String
import threading
import random
import codecs
import io
import itertools
import time
import os
import urllib2
import textwrap
import socket
import shutil
#############################################################
# This is the ZOrba
#
#############################################################
# All bot specific configuration goes here.
leftPort = "/dev/ttyACM1"
rightPort = "/dev/ttyACM0"
headPort = leftPort
gesturesPath = "/home/pedro/Dropbox/pastaPessoal/3Dprinter/inmoov/scripts/zorba/gestures"
botVoice = "WillBadGuy"
#starting the INMOOV
i01 = Runtime.createAndStart("i01", "InMoov")
i01.setMute(True)
##############STARTING THE RIGHT HAND#########
i01.rightHand = Runtime.create("i01.rightHand", "InMoovHand")
#tweaking defaults settings of right hand
i01.rightHand.thumb.setMinMax(20,155)
i01.rightHand.index.setMinMax(30,130)
i01.rightHand.majeure.setMinMax(38,150)
i01.rightHand.ringFinger.setMinMax(30,170)
i01.rightHand.pinky.setMinMax(30,150)
i01.rightHand.thumb.map(0,180,20,155)
i01.rightHand.index.map(0,180,30,130)
i01.rightHand.majeure.map(0,180,38,150)
i01.rightHand.ringFinger.map(0,180,30,175)
i01.rightHand.pinky.map(0,180,30,150)
#################
#################STARTING RIGHT ARM###############
i01.startRightArm(rightPort)
#i01.rightArm = Runtime.create("i01.rightArm", "InMoovArm")
## tweak default RightArm
i01.detach()
i01.rightArm.bicep.setMinMax(0,60)
i01.rightArm.bicep.map(0,180,0,60)
i01.rightArm.rotate.setMinMax(46,130)
i01.rightArm.rotate.map(0,180,46,130)
i01.rightArm.shoulder.setMinMax(0,155)
i01.rightArm.shoulder.map(0,180,0,155)
i01.rightArm.omoplate.setMinMax(8,85)
i01.rightArm.omoplate.map(0,180,8,85)
########STARTING SIDE NECK CONTROL########
def neckMoveTo(restPos,delta):
leftneckServo.moveTo(restPos + delta)
rightneckServo.moveTo(restPos - delta)
leftneckServo = Runtime.start("leftNeck","Servo")
rightneckServo = Runtime.start("rightNeck","Servo")
right = Runtime.start("i01.right", "Arduino")
#right.connect(rightPort)
leftneckServo.attach(right, 13)
rightneckServo.attach(right, 12)
restPos = 90
delta = 20
neckMoveTo(restPos,delta)
#############STARTING THE HEAD##############
i01.head = Runtime.create("i01.head", "InMoovHead")
#weaking defaults settings of head
i01.head.jaw.setMinMax(35,75)
i01.head.jaw.map(0,180,35,75)
i01.head.jaw.setRest(35)
#tweaking default settings of eyes
i01.head.eyeY.setMinMax(0,180)
i01.head.eyeY.map(0,180,70,110)
i01.head.eyeY.setRest(90)
i01.head.eyeX.setMinMax(0,180)
i01.head.eyeX.map(0,180,70,110)
i01.head.eyeX.setRest(90)
i01.head.neck.setMinMax(40,142)
i01.head.neck.map(0,180,40,142)
i01.head.neck.setRest(70)
i01.head.rothead.setMinMax(21,151)
i01.head.rothead.map(0,180,21,151)
i01.head.rothead.setRest(88)
#########STARTING MOUTH CONTROL###############
i01.startMouthControl(leftPort)
i01.mouthControl.setmouth(0,180)
######################################################################
# mouth service, speech synthesis
mouth = Runtime.createAndStart("i01.mouth", "AcapelaSpeech")
mouth.setVoice(botVoice)
######################################################################
# helper function help debug the recognized text from webkit/sphinx
######################################################################
def heard(data):
print "Speech Recognition Data:"+str(data)
######################################################################
# Create ProgramAB chat bot ( This is the inmoov "brain" )
######################################################################
zorba2 = Runtime.createAndStart("zorba", "ProgramAB")
zorba2.startSession("Pedro", "zorba")
######################################################################
# Html filter to clean the output from programab. (just in case)
htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter")
######################################################################
# the "ear" of the inmoov TODO: replace this with just base inmoov ear?
ear = Runtime.createAndStart("i01.ear", "WebkitSpeechRecognition")
ear.addListener("publishText", python.name, "heard");
ear.addMouth(mouth)
######################################################################
# MRL Routing webkitspeechrecognition/ear -> program ab -> htmlfilter -> mouth
######################################################################
ear.addTextListener(zorba)
zorba2.addTextListener(htmlfilter)
htmlfilter.addTextListener(mouth)
#starting the INMOOV
i01 = Runtime.createAndStart("i01", "InMoov")
i01.setMute(True)
i01.mouth = mouth
######################################################################
# Launch the web gui and create the webkit speech recognition gui
# This service works in Google Chrome only with the WebGui
#################################################################
webgui = Runtime.createAndStart("webgui","WebGui")
######################################################################
# Helper functions and various gesture definitions
######################################################################
i01.loadGestures(gesturesPath)
ear.startListening()
######################################################################
# starting services
######################################################################
i01.startRightHand(rightPort)
i01.detach()
leftneckServo.detach()
rightneckServo.detach()
i01.startHead(leftPort)
i01.detach()
| 31.95858 | 89 | 0.587854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,721 | 0.503796 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.