id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
109883 | <gh_stars>0
world_cities = ['Dubai', 'New Orleans', 'Santorini', 'Gaza', 'Seoul']
print('***********')
print(world_cities)
print('***********')
print(sorted(world_cities))
print('***********')
print(world_cities)
print('***********')
print(sorted(world_cities, reverse=True))
print('***********')
print(world_cities)
print('***********')
world_cities.reverse()
print('***********')
print(world_cities)
world_cities.reverse()
print('***********')
print(world_cities)
print('***********')
world_cities.sort()
print(world_cities)
| StarcoderdataPython |
11832 | <filename>main.py
import pandas as pd
import numpy as np
import io
import time
import uuid
from flask import Flask, render_template, request, redirect, url_for, Response, session, send_file, make_response, send_from_directory
from os.path import join, dirname, realpath
from werkzeug.wsgi import FileWrapper
app = Flask(__name__)
app.config["DEBUG"] = True
app.config["UPLOAD_FOLDER"] = 'media/dataset'
app.config["EXPORT_FOLDER_CSV"] = 'media/result'
app.config["SECRET_KEY"] = '<KEY>'
app.config['SESSION_TYPE'] = 'filesystem'
@app.route('/')
def index():
return render_template('index.html')
@app.route("/", methods=['POST'])
def uploadExcel():
start_id = request.form['id']
uploaded_file = request.files['file']
if uploaded_file.filename != '':
file_path = join(app.config['UPLOAD_FOLDER'], uploaded_file.filename)
uploaded_file.save(file_path)
cleanExcel(file_path, start_id)
csv_name = session['csv_name']
return redirect(url_for('success', file_id=csv_name))
else:
return redirect(url_for('index'))
@app.route('/export/<file_id>', methods=['GET','POST'])
def success(file_id):
filename = session['csv_name'] if "csv_name" in session else ""
return render_template('success.html', filename=file_id)
@app.route('/downloads/<path:filename>', methods=['GET','POST'])
def download(filename):
uploads = join(app.root_path, app.config['EXPORT_FOLDER_CSV'])
return send_from_directory(directory=uploads, filename=filename)
def cleanExcel(file_path, start_id):
xls = pd.read_excel(file_path)
xls.replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"], value=["",""], regex=True)
print("Jumlah awal: {}".format(xls.shape))
xls.rename(columns = {
'NIK':'nik',
'NAMA':'nama',
'JENIS_KELAMIN':'jkel',
'TANGGAL_LAHIR':'tgl_lahir',
'NO_HP':'telp',
'INSTANSI_PEKERJAAN':'instansi',
'ALAMAT KTP': 'alamat',
'ALAMAT_KTP': 'alamat',
'KODE_KAB_KOTA_TEMPAT_KERJA': 'kab_id',
'KODE_KATEGORI': 'kategori'
}, inplace = True)
xls['nik'] = xls['nik'].astype(str)
xls.insert(0, 'id', range(int(start_id), int(start_id) + len(xls)))
xls.insert(2, 'nama_ktp', xls['nama'])
xls.insert(6, 'status', 0)
# del xls['NO']
del xls['UMUR']
del xls['JENIS_PEKERJAAN']
xls.drop(xls[xls['tgl_lahir'].isnull()].index, inplace = True)
xls.drop(xls[xls['nik'].isnull()].index, inplace = True)
xls.drop(xls[xls['nik'].str.len() > 16].index, inplace = True)
xls.drop(xls[xls['nik'].str.len() < 16].index, inplace = True)
xls.drop(xls[xls.duplicated(['nik'])].index, inplace = True)
if xls['tgl_lahir'].dtypes == 'object':
xls['tgl_lahir'] = pd.to_datetime(xls['tgl_lahir'])
if xls['telp'].dtypes == 'float64':
xls['telp'] = xls['telp'].astype(str)
xls['telp'] = xls['telp'].str.split('.').str[0]
xls['telp'] = xls['telp'].replace('nan',np.NaN)
xls['telp'] = '0' + xls['telp']
if xls['telp'].dtypes == 'object':
xls['telp'] = xls['telp'].str.split('/').str[0]
xls['telp'] = xls['telp'].str.replace('\+62','0')
xls['telp'] = xls['telp'].str.replace(' ','')
xls['telp'] = xls['telp'].str.replace('-','')
if xls['kab_id'].dtypes == 'float64':
xls['kab_id'] = xls['kab_id'].astype(str)
xls['kab_id'] = xls['kab_id'].str.split('.').str[0]
xls['kab_id'] = xls['kab_id'].replace('nan',np.NaN)
if xls['kategori'].dtypes == 'int64':
xls['kategori'] = xls['kategori'].astype(str)
xls['kategori'] = xls['kategori'].apply(lambda x: '0' + x if len(x) == 1 else x)
xls['alamat'] = xls['alamat'].replace(';','')
print("Jumlah akhir: {}".format(xls.shape))
uid = str(uuid.uuid4())[:4]
path_file = 'media/result/'
outfile_name = '{0}{1}'.format(time.strftime("%Y%m%d-%H%M%S-"),uid)
session['csv_name'] = f'{outfile_name}'
xls.to_csv(f'{path_file}{outfile_name}.csv', index=False, header=True, encoding="utf-8")
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
4824277 | import random
from operator import add
class SlipperyGrid:
"""
Slippery grid-world modelled as an MDP
...
Attributes
----------
shape: list
1d list with two elements: 1st element is the num of row cells and the 2nd is the num of column cells (default [40, 40])
initial_state : list
1d list with two elements (default [0, 39])
slip_probability: float
probability of slipping (default 0.15)
sink_states : list
sinks states if any (default [])
Methods
-------
reset()
resets the MDP state
step(action)
changes the state of the MDP upon executing an action, where the action set is {right,up,left,down,stay}
state_label(state)
outputs the label of input state
"""
def __init__(
self,
shape=None,
initial_state=None,
slip_probability=0.15,
sink_states=None
):
if sink_states is None:
sink_states = []
if shape is None:
self.shape = [40, 40]
else:
self.shape = shape
if initial_state is None:
self.initial_state = [0, 39]
else:
self.initial_state = initial_state
self.current_state = self.initial_state.copy()
self.slip_probability = slip_probability
self.sink_states = sink_states
self.labels = None
# directional actions
self.action_space = [
"right",
"up",
"left",
"down",
"stay"
]
def reset(self):
self.current_state = self.initial_state.copy()
def step(self, action):
# check if the agent is in a sink state
if self.current_state in self.sink_states:
next_state = self.current_state
else:
# slipperiness
if random.random() < self.slip_probability:
action = random.choice(self.action_space)
# grid movement dynamics:
if action == 'right':
next_state = list(map(add, self.current_state, [0, 1]))
elif action == 'up':
next_state = list(map(add, self.current_state, [-1, 0]))
elif action == 'left':
next_state = list(map(add, self.current_state, [0, -1]))
elif action == 'down':
next_state = list(map(add, self.current_state, [1, 0]))
elif action == 'stay':
next_state = self.current_state
# check for boundary violations
if next_state[0] == self.shape[0]:
next_state[0] = self.shape[0] - 1
if next_state[1] == self.shape[1]:
next_state[1] = self.shape[1] - 1
if -1 in next_state:
next_state[next_state.index(-1)] = 0
# check for obstacles
if 'obstacle' in self.state_label(next_state):
next_state = self.current_state
# update current state
self.current_state = next_state
return next_state
def state_label(self, state):
return self.labels[state[0], state[1]]
| StarcoderdataPython |
60797 | from readthedocs.api.v2.views.footer_views import BaseFooterHTML
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.embed.views import EmbedAPIBase
class BaseProxiedFooterHTML(BaseFooterHTML):
# DRF has BasicAuthentication and SessionAuthentication as default classes.
# We don't support neither in the community site.
authentication_classes = []
class ProxiedFooterHTML(SettingsOverrideObject):
_default_class = BaseProxiedFooterHTML
class ProxiedEmbedAPIBase(EmbedAPIBase):
# DRF has BasicAuthentication and SessionAuthentication as default classes.
# We don't support neither in the community site.
authentication_classes = []
class ProxiedEmbedAPI(SettingsOverrideObject):
_default_class = ProxiedEmbedAPIBase
| StarcoderdataPython |
3393939 | <reponame>TylerYep/wolfbot<filename>tests/solvers/state_test.py<gh_stars>1-10
from tests.conftest import set_roles
from wolfbot import const
from wolfbot.enums import Role, SwitchPriority
from wolfbot.solvers import SolverState
from wolfbot.statements import Statement
class TestSolverState:
"""Tests for the SolverState class."""
@staticmethod
def test_constructor() -> None:
"""Should initialize a SolverState."""
result = SolverState((frozenset({Role.VILLAGER}),), path=(True,))
assert isinstance(result, SolverState)
@staticmethod
def test_eq(example_small_solverstate: SolverState) -> None:
"""Should be able to compare two identical SolverStates."""
possible_roles = (
frozenset({Role.SEER}),
frozenset({Role.ROBBER, Role.VILLAGER, Role.SEER}),
frozenset({Role.ROBBER}),
)
switches = ((SwitchPriority.ROBBER, 2, 0),)
path = (True,)
result = SolverState(possible_roles, switches, path)
assert result == example_small_solverstate
@staticmethod
def test_get_role_counts() -> None:
"""
Should return True if there is a a dict with counts of all certain roles.
"""
set_roles(Role.WOLF, Role.SEER, Role.VILLAGER, Role.ROBBER, Role.VILLAGER)
possible_roles_list = (
frozenset({Role.VILLAGER}),
frozenset({Role.SEER}),
frozenset({Role.VILLAGER}),
) + (const.ROLE_SET,) * 2
result = SolverState(possible_roles_list).get_role_counts()
assert result == {Role.SEER: 0, Role.VILLAGER: 0, Role.WOLF: 1, Role.ROBBER: 1}
@staticmethod
def test_repr() -> None:
"""Should pretty-print SolverStates using the custom formatter."""
result = SolverState((frozenset({Role.VILLAGER}),), path=(True,))
assert (
str(result)
== repr(result)
== (
"SolverState(\n"
" possible_roles=(frozenset([Role.VILLAGER]),),\n"
" path=(True,),\n"
" role_counts={\n"
" Role.INSOMNIAC: 1,\n"
" Role.VILLAGER: 2,\n"
" Role.ROBBER: 1,\n"
" Role.DRUNK: 1,\n"
" Role.WOLF: 2,\n"
" Role.SEER: 1,\n"
" Role.TANNER: 1,\n"
" Role.MASON: 2,\n"
" Role.MINION: 1,\n"
" Role.TROUBLEMAKER: 1,\n"
" Role.HUNTER: 1\n"
" },\n"
" count_true=1\n"
")"
)
)
class TestIsConsistent:
"""Tests for the is_consistent function."""
@staticmethod
def test_is_consistent_on_empty_state(
example_small_solverstate: SolverState, example_statement: Statement
) -> None:
"""
Should check a new statement against an empty SolverState for consistency.
"""
start_state = SolverState()
result = start_state.is_consistent(example_statement)
assert result == example_small_solverstate
@staticmethod
def test_invalid_state(example_statement: Statement) -> None:
"""Should return None for inconsistent states."""
start_state = SolverState((frozenset({Role.VILLAGER}),) * 3, path=(True,))
invalid_state = start_state.is_consistent(example_statement)
assert invalid_state is None
@staticmethod
def test_is_consistent_on_existing_state(
example_medium_solverstate: SolverState,
) -> None:
"""
Should check a new statement against accumulated statements for consistency.
Should not change result.path - that is done in the switching_solver function.
"""
possible_roles = (frozenset({Role.SEER}),) + (const.ROLE_SET,) * (
const.NUM_ROLES - 1
)
example_solverstate = SolverState(possible_roles, path=(True,))
new_statement = Statement(
"next", ((2, frozenset({Role.DRUNK})),), ((SwitchPriority.DRUNK, 2, 5),)
)
result = example_solverstate.is_consistent(new_statement)
assert result == example_medium_solverstate
@staticmethod
def test_is_consistent_deepcopy_mechanics(
example_medium_solverstate: SolverState,
) -> None:
"""
Modifying one SolverState should not affect
other SolverStates created by is_consistent.
"""
possible_roles = (frozenset({Role.SEER}),) + (const.ROLE_SET,) * (
const.NUM_ROLES - 1
)
example = SolverState(possible_roles, path=(True,))
new_statement = Statement(
"next", ((2, frozenset({Role.DRUNK})),), ((SwitchPriority.DRUNK, 2, 5),)
)
result = example.is_consistent(new_statement)
example.possible_roles += (frozenset({Role.NONE}),)
example.switches += ((SwitchPriority.DRUNK, 5, 5),)
example.possible_roles = (example.possible_roles[0] & {Role.NONE},)
assert result == example_medium_solverstate
| StarcoderdataPython |
3391774 | import random, time, sys, subprocess, threading, pycurl, os, requests
from colorama import Fore
class Proxy_Checker():
def __init__(self):
subprocess.call('clear', shell=True)
sys.setrecursionlimit(10**6)
print(f"""{Fore.BLUE}
██▓███ ██▀███ ▒█████ ▒██ ██▓██ ██▓ ▄▄▄█████▓▒█████ ▒█████ ██▓
▓██░ ██▓██ ▒ ██▒██▒ ██▒▒ █ █ ▒░▒██ ██▒ ▓ ██▒ ▓▒██▒ ██▒██▒ ██▓██▒
▓██░ ██▓▓██ ░▄█ ▒██░ ██░░ █ ░ ▒██ ██░ ▒ ▓██░ ▒▒██░ ██▒██░ ██▒██░
▒██▄█▓▒ ▒██▀▀█▄ ▒██ ██░░ █ █ ▒ ░ ▐██▓░ ░ ▓██▓ ░▒██ ██▒██ ██▒██░
▒██▒ ░ ░██▓ ▒██░ ████▓▒▒██▒ ▒██▒ ░ ██▒▓░ ▒██▒ ░░ ████▓▒░ ████▓▒░██████▒
▒▓▒░ ░ ░ ▒▓ ░▒▓░ ▒░▒░▒░▒▒ ░ ░▓ ░ ██▒▒▒ ▒ ░░ ░ ▒░▒░▒░░ ▒░▒░▒░░ ▒░▓ ░
░▒ ░ ░▒ ░ ▒░ ░ ▒ ▒░░░ ░▒ ░▓██ ░▒░ ░ ░ ▒ ▒░ ░ ▒ ▒░░ ░ ▒ ░
░░ ░░ ░░ ░ ░ ▒ ░ ░ ▒ ▒ ░░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
""")
self.yes = ["yes", "y", "ye", "Y", "YES", 'YE']
self.no = ["no", "n", "NO", "n"]
self.thr = 100
self.TARGET = input(f"{Fore.BLUE}[CONSOLE] Please enter full target url to check the proxies: ")
self.verbose = input(f"{Fore.BLUE}[CONSOLE] Input bad requests too?: ")
self.proxy_type = input(f"{Fore.BLUE}[CONSOLE] Proxy type (http, socks4, socks5): ")
get_proxies = input(f'{Fore.BLUE}[CONSOLE] Get the proxies or you already have http proxy list? (get/n):')
if get_proxies == 'get':
try:
os.remove("ProxyChecker/http_proxies.txt")
os.remove("ProxyChecker/good_proxies.txt")
except:
pass
proxylist = open(f'ProxyChecker/{self.proxy_type}_proxies.txt', 'a+')
try:
r1 = requests.get(f'https://api.proxyscrape.com?request=getproxies&proxytype={self.proxy_type}&ssl=yes')
proxylist.write(r1.text)
except:
pass
proxylist.close()
self.proxy_file = f'ProxyChecker/{self.proxy_type}_proxies.txt'
else:
self.proxy_file = input(f"{Fore.BLUE}[CONSOLE] Please enter the proxy filename: ")
self.timeout = int(input(f"{Fore.BLUE}[CONSOLE] Please enter proxy timeout (10-100): "))
self.pro = open("ProxyChecker/good_proxies.txt", "a+")
self.checked = 0
self.good = 0
self.headers= [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000',
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
def proxy_checker(self, proxy):
if self.proxy_type == "http":
try:
ip, port = proxy.split(":")[0].replace('\n', ''), proxy.split(":")[1].replace('\n', '')
c = pycurl.Curl()
c.setopt(pycurl.URL, self.TARGET)
c.setopt(pycurl.PROXY, ip)
c.setopt(pycurl.PROXYPORT, int(port))
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP)
c.setopt(pycurl.HTTPHEADER, [f'user-agent: {random.choice(self.headers)}'])
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.WRITEFUNCTION, lambda x: None)
c.perform()
if c.getinfo(pycurl.HTTP_CODE) != 403:
print(f"{Fore.GREEN}Good Proxy: {proxy}")
self.good += 1
self.pro.write(f"{proxy}\n")
else:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except pycurl.error:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except Exception as e:
print(f'{Fore.RED}{e}')
elif self.proxy_type == "socks4":
try:
ip, port = proxy.split(":")[0].replace('\n', ''), proxy.split(":")[1].replace('\n', '')
c = pycurl.Curl()
c.setopt(pycurl.URL, self.TARGET)
c.setopt(pycurl.PROXY, ip)
c.setopt(pycurl.PROXYPORT, int(port))
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS4)
c.setopt(pycurl.HTTPHEADER, [f'user-agent: {random.choice(self.headers)}'])
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.WRITEFUNCTION, lambda x: None)
c.perform()
print(f"{Fore.GREEN}Good Proxy: {proxy}")
self.good += 1
self.pro.write(f"{proxy}\n")
except pycurl.error:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except Exception as e:
print(f'{Fore.RED}{e}')
elif self.proxy_type == "socks5":
try:
ip, port = proxy.split(":")[0].replace('\n', ''), proxy.split(":")[1].replace('\n', '')
c = pycurl.Curl()
c.setopt(pycurl.URL, self.TARGET)
c.setopt(pycurl.PROXY, ip)
c.setopt(pycurl.PROXYPORT, int(port))
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
c.setopt(pycurl.HTTPHEADER, [f'user-agent: {random.choice(self.headers)}'])
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
c.setopt(pycurl.WRITEFUNCTION, lambda x: None)
c.perform()
print(f"{Fore.GREEN}Good Proxy: {proxy}")
self.good += 1
self.pro.write(f"{proxy}\n")
except pycurl.error:
if self.verbose in self.yes:
print(f"{Fore.RED}Bad Proxy: {proxy}")
except Exception as e:
print(f'{Fore.RED}{e}')
def start(self):
print(f"{Fore.YELLOW}[CONSOLE] Okay! I'm searching for the best proxies. It may take some time...")
proxys = open(f"{self.proxy_file}", "r", encoding="utf-8", errors='ignore')
proxies = [proxy.replace("\n", "") for proxy in proxys]
threads = []
length = 0
for _ in proxies:
length +=1
while True:
if threading.active_count() <self.thr:
if self.checked < length:
t = threading.Thread(target=self.proxy_checker, args=(proxies[self.checked],))
threads.append(t)
t.start()
self.checked +=1
else:
print(f"\n\n{Fore.RED}[CONSOLE] Closing proxy threads.")
for th in threads:
th.join()
print(f"\n\n{Fore.YELLOW}[CONSOLE] Found {self.good} proxies out of {length}.")
proxys.close()
self.pro.close()
return | StarcoderdataPython |
3340585 | from math import cos, sin, pi
card_w, card_h = map(int, input().split())
env_w, env_h = map(int, input().split())
eps = 1e-10
if card_w == env_h and card_h == env_w:
print('Possible')
exit()
def check_insert(w, h):
if w <= env_w + eps and h <= env_h + eps:
return True
else:
return False
def rotate_on_degree(deg):
rad = pi * deg / 180
w = cos(rad) * card_w + sin(rad) * card_h
h = sin(rad) * card_w + cos(rad) * card_h
return w, h
i = 0
while i < 90:
w, h = rotate_on_degree(i)
res = check_insert(w, h)
if res:
print('Possible')
exit()
i += 0.0005
print('Impossible')
| StarcoderdataPython |
1749863 | # -*- coding: utf-8 -*-
import scrapy
class VisirSlurperItem(scrapy.Item):
url = scrapy.Field()
article_text = scrapy.Field()
author = scrapy.Field()
possible_authors = scrapy.Field()
date_published = scrapy.Field()
headline = scrapy.Field()
description = scrapy.Field()
body = scrapy.Field()
id = scrapy.Field()
category = scrapy.Field()
| StarcoderdataPython |
3281340 | ##
## controlpanels.py - part of wxfalsecolor
##
## $Id$
## $URL$
import os
import wx
import wx.lib.foldpanelbar as fpb
# work around for bug in some new wxPython versions
if not 'FPB_DEFAULT_STYLE' in dir(fpb):
fpb.FPB_DEFAULT_STYLE = fpb.FPB_VERTICAL
import wx.lib.buttons as buttons
class BaseControlPanel(wx.Panel):
def __init__(self, parent, wxapp, *args, **kwargs):
"""save wxapp and call self.layout() to create buttons"""
wx.Panel.__init__(self, parent, *args, **kwargs)
self.wxapp = wxapp
self._log = wxapp._log
self._cmdLine = None
self.layout()
def createCenteredGrid(self, layout):
"""arrange controls in centered grid sizer"""
## create grid sizer
grid = wx.GridBagSizer(2,2)
for r,row in enumerate(layout):
c1,c2 = row
if c2:
grid.Add( c1, (r,0), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
grid.Add( c2, (r,1), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
else:
grid.Add( c1, (r,0), (1,2), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(wx.Panel(self), proportion=1, flag=wx.EXPAND,border=0)
sizer.Add(grid)
sizer.Add(wx.Panel(self), proportion=1, flag=wx.EXPAND,border=0)
self.SetSizer(sizer)
self.SetInitialSize()
def layout(self):
"""create buttons here"""
pass
class FalsecolorControlPanel(BaseControlPanel):
def __init__(self, parent, wxapp, *args, **kwargs):
self.positions = ['WS','W','WN','NW','N','NE','EN','E','ES','SE','S','SW']
BaseControlPanel.__init__(self, parent, wxapp, *args, **kwargs)
def layout(self):
"""create control elements in grid layout"""
## type choice button
self.fc_type = wx.Choice(self, wx.ID_ANY, choices=["color fill", "c-lines", "c-bands"])
self.fc_type.SetStringSelection("color fill")
self.Bind(wx.EVT_CHOICE, self.updateFCButton, self.fc_type)
self.legpos = wx.Choice(self, wx.ID_ANY, choices=self.positions, size=(60,-1))
self.legpos.SetStringSelection("WS")
self.Bind(wx.EVT_CHOICE, self.updatePosition, self.legpos)
self.inside = wx.CheckBox(self, wx.ID_ANY, 'in')
self.Bind(wx.EVT_CHECKBOX, self.updateFCButton, self.inside)
self.label = wx.TextCtrl(self, wx.ID_ANY, "cd/m2", size=(50,-1))
self.scale = wx.TextCtrl(self, wx.ID_ANY, "1000", size=(50,-1))
self.steps = wx.TextCtrl(self, wx.ID_ANY, "8", size=(50,-1))
self.logv = wx.TextCtrl(self, wx.ID_ANY, "2", size=(50,-1))
self.maskv = wx.TextCtrl(self, wx.ID_ANY, "0.001", size=(50,-1))
self.fc_log = wx.CheckBox(self, wx.ID_ANY, 'log')
self.fc_mask = wx.CheckBox(self, wx.ID_ANY, 'mask')
self.fc_col = wx.CheckBox(self, wx.ID_ANY, 'old colours')
self.fc_extr = wx.CheckBox(self, wx.ID_ANY, 'show extremes')
self.fc_zero = wx.CheckBox(self, wx.ID_ANY, '0 based leg')
self.legW = wx.TextCtrl(self, wx.ID_ANY, "100", size=(50,-1))
self.legH = wx.TextCtrl(self, wx.ID_ANY, "200", size=(50,-1))
## 'hidden' option for background image
self._background = ""
## 'falsecolor' button
self.doFCButton = buttons.GenButton(self, wx.ID_ANY, label='falsecolor')
self.doFCButton.Bind(wx.EVT_LEFT_DOWN, self.doFalsecolor)
self.doFCButton.Disable()
## bind events
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.label)
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.scale)
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.steps)
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.logv)
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.maskv)
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.legW)
self.Bind(wx.EVT_TEXT, self.updateFCButton, self.legH)
self.Bind(wx.EVT_CHECKBOX, self.updateFCButton, self.fc_log)
self.Bind(wx.EVT_CHECKBOX, self.updateFCButton, self.fc_mask)
self.Bind(wx.EVT_CHECKBOX, self.updateFCButton, self.fc_col)
self.Bind(wx.EVT_CHECKBOX, self.updateFCButton, self.fc_extr)
self.Bind(wx.EVT_CHECKBOX, self.updateFCButton, self.fc_zero)
layout = [(self.fc_type, None),
(self.inside, self.legpos),
(wx.Panel(self,wx.ID_ANY,size=(-1,10)), None),
(wx.StaticText(self, wx.ID_ANY, "legend:"), self.label),
(wx.StaticText(self, wx.ID_ANY, "scale:"), self.scale),
(wx.StaticText(self, wx.ID_ANY, "steps:"), self.steps),
(wx.Panel(self,wx.ID_ANY,size=(-1,10)), None),
(self.fc_log, self.logv),
(self.fc_mask, self.maskv),
(self.fc_col, None),
(self.fc_extr, None),
(self.fc_zero, None),
(wx.Panel(self,wx.ID_ANY,size=(-1,10)), None),
(wx.StaticText(self, wx.ID_ANY, "leg-w:"), self.legW),
(wx.StaticText(self, wx.ID_ANY, "leg-h:"), self.legH),
(wx.Panel(self,wx.ID_ANY,size=(-1,10)), None),
(self.doFCButton, None),
(wx.Panel(self,wx.ID_ANY,size=(-1,5)), None)]
## arrange in grid
self.createCenteredGrid(layout)
def doFalsecolor(self, event):
"""start conversion to falsecolor and update button"""
args = self.getFCArgs()
if self.wxapp.doFalsecolor(args) == True:
self._cmdLine = " ".join(self.getFCArgs())
self.doFCButton.SetLabel("update fc")
self.doFCButton.Disable()
self.doFCButton.SetBackgroundColour(wx.WHITE)
else:
self.doFCButton.SetLabel("update fc")
self.doFCButton.Enable()
self.doFCButton.SetBackgroundColour(wx.Colour(255,140,0))
self.doFCButton.Refresh()
def enableFC(self, text=""):
"""enable and update doFCButton"""
self._log.debug("enableFC(): text='%s'" % text)
self.doFCButton.Enable()
if text != "":
self.doFCButton.SetLabel(text)
self.doFCButton.Refresh()
def getFCArgs(self):
"""collect command line args as list"""
args = []
#args.extend(["-t", "./tempdir"])
if self.fc_type.GetCurrentSelection() > 0:
args.append(["", "-cl", "-cb"][self.fc_type.GetCurrentSelection()])
position = self.positions[self.legpos.GetCurrentSelection()]
if self.inside.GetValue():
position = "-" + position
args.extend(["-lp", position])
args.extend(["-lw", self.legW.GetValue()])
args.extend(["-lh", self.legH.GetValue()])
args.extend(["-l", self.getFCLableText()])
args.extend(["-s", self.scale.GetValue()])
args.extend(["-n", self.steps.GetValue()])
if self.fc_log.GetValue():
args.extend(["-log", self.logv.GetValue()])
if self.fc_mask.GetValue():
args.extend(["-mask", self.maskv.GetValue()])
if self.fc_col.GetValue():
args.append("-spec")
if self.fc_extr.GetValue():
args.append("-e")
if self.fc_zero.GetValue():
args.append("-z")
if self._background != "":
args.extend(["-p", self._background])
self._log.debug("getFCArgs(): args=%s" % str(args))
return args
def getFCLableText(self):
"""return value of label text box"""
return self.label.GetValue()
def reset(self, label="cd/m2"):
"""reset controls to initial values"""
self._log.debug("resetting falsecolor controls ...")
self.enableFC("convert fc")
self.label.SetValue(label)
self.fc_type.SetSelection(0)
self.fc_log.SetValue(False)
self.fc_mask.SetValue(False)
self.fc_col.SetValue(False)
self.fc_extr.SetValue(False)
self.fc_zero.SetValue(False)
self.scale.SetValue("1000")
self.steps.SetValue("8")
self.legpos.SetStringSelection("WS")
self.inside.SetValue(False)
self.legW.SetValue("100")
self.legH.SetValue("200")
self._background = ""
self._cmdLine = None
def setFromArgs(self, args):
"""set control values from command line args"""
args.append("#")
args.reverse()
ignore = ["-i", "-ip", "-df", "-t", "-r", "-g", "-b"]
set_cmdline = True
while args:
arg = args.pop()
self._log.debug("setFromArgs() arg='%s'" % arg)
if arg == "#":
pass
elif arg == "-d":
pass
elif arg == "-v":
pass
elif arg == "-m":
pass
elif arg == "-nofc":
set_cmdline = False
elif arg in ignore:
v = args.pop()
elif arg == "-p":
self._background = self.validatePath(args.pop())
elif arg == "-cl":
self.fc_type.SetSelection(1)
elif arg == "-cb":
self.fc_type.SetSelection(2)
elif arg == "-e":
self.fc_extr.SetValue(True)
elif arg == "-l":
self.label.SetValue(args.pop())
elif arg == "-log":
self.fc_log.SetValue(True)
self.logv.SetValue(args.pop())
elif arg == "-lh":
self.legH.SetValue(args.pop())
elif arg == "-lw":
self.legW.SetValue(args.pop())
elif arg == "-lp":
v = args.pop()
if v.startswith("-"):
self.inside.SetValue(True)
v = v[1:]
self.legpos.SetStringSelection(v)
elif arg == "-mask":
self.fc_mask.SetValue(True)
self.maskv.SetValue(args.pop())
elif arg == "-n":
self.steps.SetValue(args.pop())
elif arg == "-s":
self.scale.SetValue(args.pop())
elif arg == "-spec":
self.fc_col.SetValue(True)
elif arg == "-z":
self.fc_zero.SetValue(True)
if set_cmdline:
self._cmdLine = " ".join(self.getFCArgs())
else:
## _cmdLine needs to be set for updateFCButton
self._cmdLine = ""
## set button label
self.wxapp.expandControlPanel(1)
self.updateFCButton()
#self.doFCButton.Disable()
def updateFCButton(self, event=None):
"""set label of falsecolor button to 'update'"""
if self._cmdLine == None:
return
newCmd = " ".join(self.getFCArgs())
if self._cmdLine != newCmd:
self.doFCButton.SetLabel("update fc")
self.doFCButton.Enable()
self.doFCButton.SetBackgroundColour(wx.Colour(255,140,0))
else:
self.doFCButton.Disable()
self.doFCButton.SetBackgroundColour(wx.WHITE)
self.doFCButton.Refresh()
def updatePosition(self, event):
"""update height and width when position changes"""
pos = self.positions[self.legpos.GetCurrentSelection()]
pos = pos.replace("-", "")
if pos.startswith("W") or pos.startswith("E"):
self.legW.SetValue("100")
self.legH.SetValue("200")
else:
self.legW.SetValue("400")
self.legH.SetValue("50")
def validatePath(self, path):
"""return path if file exists, otherwise empty string"""
if os.path.isfile(path):
return path
else:
return ""
class DisplayControlPanel(BaseControlPanel):
def layout(self):
"""creates layout of ximage buttons"""
self.acuity = wx.CheckBox(self, wx.ID_ANY, 'acuity loss')
self.glare = wx.CheckBox(self, wx.ID_ANY, 'veiling glare')
self.contrast = wx.CheckBox(self, wx.ID_ANY, 'contrast')
self.colour = wx.CheckBox(self, wx.ID_ANY, 'color loss')
self.exposure = wx.CheckBox(self, wx.ID_ANY, 'exp')
self.expvalue = wx.TextCtrl(self, wx.ID_ANY, "+0", size=(50,-1))
self.linear = wx.CheckBox(self, wx.ID_ANY, 'linear response')
self.centre = wx.CheckBox(self, wx.ID_ANY, 'centre-w. avg')
self.dsprange = wx.CheckBox(self, wx.ID_ANY, 'display range')
self.dsp_min = wx.TextCtrl(self, wx.ID_ANY, "0.5", size=(40,-1))
self.dsp_max = wx.TextCtrl(self, wx.ID_ANY, "200", size=(40,-1))
dsp_box = wx.BoxSizer(wx.HORIZONTAL)
dsp_box.Add(self.dsp_min, proportion=0, flag=wx.EXPAND|wx.ALL, border=0)
dsp_box.Add(wx.StaticText(self, wx.ID_ANY, "to", style=wx.ALIGN_CENTER), proportion=1, flag=wx.EXPAND|wx.Left|wx.RIGHT, border=0)
dsp_box.Add(self.dsp_max, proportion=0, flag=wx.EXPAND|wx.ALL, border=0)
self.Bind(wx.EVT_CHECKBOX, self.updatePcondButton, self.acuity)
self.Bind(wx.EVT_CHECKBOX, self.updatePcondButton, self.glare)
self.Bind(wx.EVT_CHECKBOX, self.updatePcondButton, self.contrast)
self.Bind(wx.EVT_CHECKBOX, self.updatePcondButton, self.colour)
self.Bind(wx.EVT_CHECKBOX, self.OnExposure, self.exposure)
self.Bind(wx.EVT_TEXT, self.OnExpValue, self.expvalue)
self.Bind(wx.EVT_CHECKBOX, self.updatePcondButton, self.linear)
self.Bind(wx.EVT_CHECKBOX, self.updatePcondButton, self.centre)
self.Bind(wx.EVT_CHECKBOX, self.OnDspRange, self.dsprange)
self.Bind(wx.EVT_TEXT, self.OnDspValue, self.dsp_min)
self.Bind(wx.EVT_TEXT, self.OnDspValue, self.dsp_max)
self.pcondButton = buttons.GenButton(self, wx.ID_ANY, label='apply pcond', size=(-1,24))
self.pcondButton.Bind(wx.EVT_BUTTON, self.OnDoPcond)
self.pcondButton.Disable()
saveBitmap = wx.Button(self, wx.ID_ANY, "save bitmap")
saveBitmap.Bind(wx.EVT_BUTTON, self.OnSaveBitmap)
layout = [(self.acuity, None),
(self.glare, None),
(self.contrast, None),
(self.colour, None),
(wx.Panel(self,wx.ID_ANY,size=(-1, 5)), None),
(self.exposure, self.expvalue),
(self.linear, None),
(self.centre, None),
(wx.Panel(self,wx.ID_ANY,size=(-1, 5)), None),
(self.dsprange, None),
(dsp_box, None),
(wx.Panel(self,wx.ID_ANY,size=(-1, 5)), None),
(self.pcondButton, None),
(wx.Panel(self,wx.ID_ANY,size=(-1,10)), None),
(saveBitmap, None),
(wx.Panel(self,wx.ID_ANY,size=(-1, 5)), None)]
## arrange in grid
self.createCenteredGrid(layout)
def disablePcondButton(self):
"""disable pcond button and change colour"""
self.pcondButton.Disable()
self.pcondButton.SetBackgroundColour(wx.WHITE)
def getPcondArgs(self):
"""collect pcond arguments and return as list"""
args = []
if self.acuity.GetValue(): args.append("-a");
if self.glare.GetValue(): args.append("-v");
if self.contrast.GetValue(): args.append("-s");
if self.colour.GetValue(): args.append("-c");
if self.linear.GetValue(): args.append("-l");
if self.centre.GetValue(): args.append("-w");
if self.exposure.GetValue():
args.append("-e")
args.append(self.expvalue.GetValue())
if self.dsprange.GetValue():
try:
black = float(self.dsp_min.GetValue())
white = float(self.dsp_max.GetValue())
if black <= 0:
black == 0.5
if white <= 0:
white = 100
range = white/black
args.extend(["-u", "%d" % white, "-d", "%d" % range])
except ValueError:
pass
self._log.debug("getPcondArgs() args=%s" % str(args))
return args
def OnDoPcond(self, event):
"""run pcond and update imagepanel"""
if self.wxapp.rgbeImg:
args = self.getPcondArgs()
if self.wxapp.doPcond(args) == True:
self._cmdLine = " ".join(args)
self.disablePcondButton()
def OnDspRange(self, event):
"""check than min and max display settings are numbers"""
if self.dsprange.GetValue() == False:
self.updatePcondButton(event)
return
try:
black = float(self.dsp_min.GetValue())
if black <= 0:
self.dsp_min.SetValue("0.5")
except ValueError:
self.dsp_min.SetValue("0.5")
try:
white = float(self.dsp_max.GetValue())
if white <= 0:
self.dsp_max.SetValue("200")
except ValueError:
self.dsp_max.SetValue("200")
self.updatePcondButton(event)
def OnDspValue(self, event):
"""set display range to True on dsp value change"""
if self.dsp_min.GetValue() == "" or self.dsp_max.GetValue() == "":
return
else:
try:
black = float(self.dsp_min.GetValue())
white = float(self.dsp_max.GetValue())
self.dsprange.SetValue(True)
self.updatePcondButton(event)
except ValueError:
pass
def OnExposure(self, event):
"""select 'linear' cb if exposure is enabled"""
if self.exposure.GetValue() == True:
self.linear.SetValue(True)
try:
v = float(self.expvalue.GetValue())
except ValueError:
self.expvalue.SetValue("+0")
self.updatePcondButton(event)
def OnExpValue(self, event):
"""enable exposure cb on expvalue change"""
try:
v = float(self.expvalue.GetValue())
self.exposure.SetValue(True)
self.linear.SetValue(True)
self.updatePcondButton(event)
except ValueError:
self.exposure.SetValue(False)
def OnSaveBitmap(self, event):
"""call imagepanel's saveBitmap() function"""
self.wxapp.imagepanel.saveBitmap()
def reset(self):
"""set buttons to initial state"""
self.acuity.SetValue(False)
self.glare.SetValue(False)
self.contrast.SetValue(False)
self.colour.SetValue(False)
self.expvalue.SetValue("+0")
self.exposure.SetValue(False)
self.linear.SetValue(False)
self.centre.SetValue(False)
self.pcondButton.Enable()
self.pcondButton.SetBackgroundColour(wx.WHITE)
def updatePcondButton(self, event):
"""enable pcond button if new options are selected"""
if not self.wxapp.rgbeImg:
self.disablePcondButton()
return
if " ".join(self.getPcondArgs()) != self._cmdLine:
self.pcondButton.Enable()
self.pcondButton.SetBackgroundColour(wx.Colour(255,140,0))
else:
self.disablePcondButton()
class LablesControlPanel(BaseControlPanel):
def __init__(self, parent, wxapp, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.wxapp = wxapp
self._log = wxapp._log
self._layout()
def _layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
self.loadClearButton = wx.Button(self, wx.ID_ANY, "no data")
self.loadClearButton.Bind(wx.EVT_BUTTON, self.OnShowValues)
self.loadClearButton.Disable()
sizer.Add(self.loadClearButton, proportion=0, flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
lable = wx.StaticText(self, wx.ID_ANY, "text:")
self.lableText = wx.TextCtrl(self, wx.ID_ANY, "")
self.Bind(wx.EVT_TEXT, self.OnTextChange, self.lableText)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(lable, flag=wx.ALL, border=2)
hsizer.Add(self.lableText, proportion=1)
sizer.Add(hsizer, proportion=0, flag=wx.EXPAND|wx.ALL, border=10)
spacer = wx.Panel(self, wx.ID_ANY, size=(-1,5))
sizer.Add(spacer, proportion=0, flag=wx.EXPAND|wx.ALL, border=0)
saveBitmap = wx.Button(self, wx.ID_ANY, "save bitmap")
saveBitmap.Bind(wx.EVT_BUTTON, self.OnSaveBitmap)
sizer.Add(saveBitmap, proportion=0, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=5)
## add spacer and set size
spacer = wx.Panel(self, wx.ID_ANY, size=(-1,5))
sizer.Add(spacer, proportion=0, flag=wx.EXPAND|wx.ALL, border=0)
self.SetSizer(sizer)
self.SetInitialSize()
def getLableText(self):
return self.lableText.GetValue()
def OnShowValues(self, event):
"""load data from image and clear labels"""
self.wxapp.imagepanel.clearLabels()
self.loadClearButton.Disable()
self.wxapp.statusbar.SetStatusText("loading image data ...")
if self.wxapp.loadValues() == False:
self.loadClearButton.SetLabel("no data")
self.wxapp.statusbar.SetStatusText("Error loading image data!")
elif self.wxapp.loadingCanceled == True:
self.wxapp.statusbar.SetStatusText("Loading of image data canceled.")
self.loadClearButton.SetLabel("load data")
self.loadClearButton.Enable()
else:
## if we have data
self.loadClearButton.SetLabel("clear lables")
self.wxapp.statusbar.SetStatusText("")
if self.wxapp.rgbeImg.isIrridiance():
self.setLable("Lux")
else:
self.setLable("cd/m2")
def OnSaveBitmap(self, event):
"""call imagepanel's saveBitmap() function"""
self.wxapp.imagepanel.saveBitmap()
def OnTextChange(self, event):
"""call imagepanel's saveBitmap() function"""
self.wxapp.imagepanel.UpdateDrawing()
def reset(self):
self.loadClearButton.Enable()
self.loadClearButton.SetLabel("load data")
self.setLable(" ")
def setLable(self, text):
self.lableText.SetValue(text)
class MiscControlPanel(wx.Panel):
def __init__(self, parent, wxapp, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.wxapp = wxapp
self._layout()
def _layout(self):
"""create buttons for various functions"""
sizer = wx.BoxSizer(wx.VERTICAL)
buttons = [("show header", self.wxapp.showHeaders, 20),
("check update", self.wxapp.check_for_update, 10),
("about", self.wxapp.showAboutDialog, 5)]
## create buttons and spacers
for label, func, space in buttons:
button = wx.Button(self, wx.ID_ANY, label)
button.Bind(wx.EVT_BUTTON, func)
sizer.Add(button, proportion=0, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=5)
if space > 0:
spacer = wx.Panel(self, wx.ID_ANY, size=(-1,space))
sizer.Add(spacer, proportion=0, flag=wx.EXPAND|wx.ALL, border=0)
## set sizer and finish
self.SetSizer(sizer)
self.SetInitialSize()
class MyFoldPanelBar(fpb.FoldPanelBar):
"""base for FoldPanelBar in controlls panel"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, *args,**kwargs):
fpb.FoldPanelBar.__init__(self, parent, id, pos, size, *args, **kwargs)
def OnPressCaption(self, event):
"""collapse all other panels on EVT_CAPTIONBAR event"""
fpb.FoldPanelBar.OnPressCaption(self, event)
for i in range(self.GetCount()):
p = self.GetFoldPanel(i)
if p != event._tag:
self.Collapse(p)
class FoldableControlsPanel(wx.Panel):
"""combines individual feature panels"""
def __init__(self, parent, wxapp, style=wx.DEFAULT_FRAME_STYLE):
wx.Panel.__init__(self, parent, id=wx.ID_ANY)
self.wxapp = wxapp
self.SetSize((140,350))
self._layout()
self.Bind(wx.EVT_SIZE, self.setBarSize)
def expand(self, idx):
"""expand element <idx> on self.pnl"""
if not self.pnl:
return False
total = self.pnl.GetCount()
if idx >= total:
return False
for i in range(total):
panel = self.pnl.GetFoldPanel(i)
self.pnl.Collapse(panel)
panel = self.pnl.GetFoldPanel(idx)
self.pnl.Expand(panel)
def _layout(self, vertical=True):
bar = MyFoldPanelBar(self, style=fpb.FPB_DEFAULT_STYLE|fpb.FPB_VERTICAL)
item = bar.AddFoldPanel("lables", collapsed=False)
self.lablecontrols = LablesControlPanel(item, self.wxapp)
bar.AddFoldPanelWindow(item, self.lablecontrols, flags=fpb.FPB_ALIGN_WIDTH)
item = bar.AddFoldPanel("falsecolor", collapsed=True)
self.fccontrols = FalsecolorControlPanel(item, self.wxapp)
bar.AddFoldPanelWindow(item, self.fccontrols, flags=fpb.FPB_ALIGN_WIDTH)
item = bar.AddFoldPanel("display", collapsed=True)
self.displaycontrols = DisplayControlPanel(item, self.wxapp)
bar.AddFoldPanelWindow(item, self.displaycontrols, flags=fpb.FPB_ALIGN_WIDTH)
item = bar.AddFoldPanel("misc", collapsed=True)
pc_controls = MiscControlPanel(item, self.wxapp)
bar.AddFoldPanelWindow(item, pc_controls)
if hasattr(self, "pnl"):
self.pnl.Destroy()
self.pnl = bar
size = self.GetClientSize()
self.pnl.SetDimensions(0, 0, size.GetWidth(), size.GetHeight())
def setBarSize(self, event):
size = event.GetSize()
self.pnl.SetDimensions(0, 0, size.GetWidth(), size.GetHeight())
| StarcoderdataPython |
36991 | <filename>ETLPipeline (1).py
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
input:
messages_filepath: The path of messages dataset.
categories_filepath: The path of categories dataset.
output:
df: The merged dataset
'''
disastermessages = pd.read_csv('disaster_messages.csv')
disastermessages.head()
# load categories dataset
disastercategories = pd.read_csv('disaster_categories.csv')
disastercategories.head()
df = pd.merge(disastermessages, disastercategories, left_on='id', right_on='id', how='outer')
return df
def clean_data(df):
'''
input:
df: The merged dataset in previous step.
output:
df: Dataset after cleaning.
'''
disastercategories = df.categories.str.split(';', expand = True)
# select the first row of the categories dataframe
row = disastercategories.iloc[0,:]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
disastercategory_colnames = row.apply(lambda x:x[:-2])
print(disastercategory_colnames)
disastercategories.columns = category_colnames
for column in disastercategories:
# set each value to be the last character of the string
disastercategories[column] = disastercategories[column].str[-1]
# convert column from string to numeric
disastercategories[column] = disastercategories[column].astype(np.int)
disastercategories.head()
df.drop('categories', axis = 1, inplace = True)
df = pd.concat([df, categories], axis = 1)
# drop the original categories column from `df`
df = df.drop('categories',axis=1)
df.head()
# check number of duplicates
print('Number of duplicated rows: {} out of {} samples'.format(df.duplicated().sum(),df.shape[0]))
df.drop_duplicates(subset = 'id', inplace = True)
return df
def save_data(df):
engine = create_engine('sqlite:///disastermessages.db')
df.to_sql('df', engine, index=False)
def main():
df = load_data()
print('Cleaning data...')
df = clean_data(df)
save_data(df)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| StarcoderdataPython |
137984 | #!/usr/bin/python
"""Example of a guild - Qt hybrid system.
This example shows three ways to connect guild pipelines to Qt
objects. Version 1 (preferred) uses a normal guild pipeline with a
hybrid (guild/Qt) display object. Version 2 uses a hybrid source and a
standard Qt display, which is what you'd do if you don't want to
modify your Qt component. Version 3 introduces a hybrid "bridge"
between a guild source and a Qt display - useful if you don't want to
modify either.
"""
from __future__ import print_function
import re
import subprocess
import sys
import time
from guild.actor import *
from guild.qtactor import ActorSignal, QtActorMixin
from PyQt4 import QtGui, QtCore
def VideoFileReader(file_name):
# get video dims
proc_pipe = subprocess.Popen([
'ffmpeg', '-loglevel', 'info', '-i', file_name,
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10**8)
stdout, stderr = proc_pipe.communicate()
pattern = re.compile('Stream.*Video.* ([0-9]{2,})x([0-9]+)')
for line in str(stderr).split('\n'):
match = pattern.search(line)
if match:
xlen, ylen = map(int, match.groups())
break
else:
print('Could not get video dimensions of', file_name)
return
try:
bytes_per_frame = xlen * ylen * 3
proc_pipe = subprocess.Popen([
'ffmpeg', '-loglevel', 'warning', '-i', file_name,
'-f', 'image2pipe', '-pix_fmt', 'rgb24', '-vcodec', 'rawvideo', '-'
], stdout=subprocess.PIPE, bufsize=bytes_per_frame)
while True:
raw_image = proc_pipe.stdout.read(bytes_per_frame)
if len(raw_image) < bytes_per_frame:
break
yield xlen, ylen, raw_image
finally:
proc_pipe.terminate()
proc_pipe.stdout.close()
class Player(Actor):
def __init__(self, video_file):
self.video_file = video_file
self.paused = False
super(Player, self).__init__()
def gen_process(self):
self.reader = VideoFileReader(self.video_file)
raw_image = None
while True:
yield 1
if not (self.paused and raw_image):
try:
xlen, ylen, raw_image = next(self.reader)
except StopIteration:
break
image = QtGui.QImage(
raw_image, xlen, ylen, QtGui.QImage.Format_RGB888)
self.output(image)
time.sleep(1.0/25)
@actor_method
def set_paused(self, paused):
self.paused = paused
def onStop(self):
self.reader.close()
class PlayerQt(QtActorMixin, QtCore.QObject):
signal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, video_file):
self.video_file = video_file
self.paused = False
super(PlayerQt, self).__init__()
def gen_process(self):
self.reader = VideoFileReader(self.video_file)
raw_image = None
while True:
yield 1
if not (self.paused and raw_image):
try:
xlen, ylen, raw_image = next(self.reader)
except StopIteration:
break
image = QtGui.QImage(
raw_image, xlen, ylen, QtGui.QImage.Format_RGB888)
self.signal.emit(image)
time.sleep(1.0/25)
@actor_method
@QtCore.pyqtSlot(bool)
def set_paused(self, paused):
self.paused = paused
def onStop(self):
self.reader.close()
class Display(QtGui.QLabel):
@QtCore.pyqtSlot(QtGui.QImage)
def show_frame(self, frame):
pixmap = QtGui.QPixmap.fromImage(frame)
self.setPixmap(pixmap)
class DisplayActor(QtActorMixin, QtGui.QLabel):
@actor_method
def show_frame(self, frame):
pixmap = QtGui.QPixmap.fromImage(frame)
self.setPixmap(pixmap)
input = show_frame
class MainWindow(QtGui.QMainWindow):
def __init__(self, video_file):
super(MainWindow, self).__init__()
self.setWindowTitle("Guild video player")
# create guild pipeline
# version 1: guild player -> hybrid display
self.player = Player(video_file).go()
display = DisplayActor().go()
pipeline(self.player, display)
self.actors = [self.player, display]
# version 2: hybrid player -> Qt display
## self.player = PlayerQt(video_file).go()
## display = Display()
## self.player.signal.connect(display.show_frame)
## self.actors = [self.player]
# version 3: guild player -> hybrid bridge -> Qt display
## self.player = Player(video_file).go()
## bridge = ActorSignal().go()
## display = Display()
## pipeline(self.player, bridge)
## bridge.signal.connect(display.show_frame)
## self.actors = [self.player, bridge]
# central widget
widget = QtGui.QWidget()
grid = QtGui.QGridLayout()
grid.setColumnStretch(4, 1)
widget.setLayout(grid)
self.setCentralWidget(widget)
grid.addWidget(display, 0, 0, 1, 6)
# pause button
pause_button = QtGui.QCheckBox('pause')
pause_button.clicked.connect(self.player.set_paused)
pause_button.setShortcut('Space')
grid.addWidget(pause_button, 1, 0)
# quit button
quit_button = QtGui.QPushButton('quit')
quit_button.clicked.connect(self.shutdown)
quit_button.setShortcut('Ctrl+Q')
grid.addWidget(quit_button, 1, 5)
self.show()
def shutdown(self):
stop(*self.actors)
wait_for(*self.actors)
QtGui.QApplication.instance().quit()
if len(sys.argv) != 2:
print('usage: %s video_file' % sys.argv[0])
sys.exit(1)
app = QtGui.QApplication([])
main = MainWindow(sys.argv[1])
app.exec_()
| StarcoderdataPython |
3331105 | <reponame>McStasMcXtrace/ufit
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, <NAME> and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""Model of S(q,w) with resolution convolution for TAS."""
import inspect
from numpy import matrix as zeros
from ufit.rescalc import resmat, calc_MC, calc_MC_cluster, calc_MC_mcstas, \
load_cfg, load_par, PARNAMES, CFGNAMES, plot_resatpoint
from ufit.models.base import Model
from ufit.param import prepare_params, update_params
from ufit.pycompat import string_types
__all__ = ['ConvolvedScatteringLaw']
class ConvolvedScatteringLaw(Model):
"""Model using a scattering law given as a function and a set of resolution
parameters for a triple-axis spectrometer.
Signature of the S(q,w) function given by *sqw*::
sqw(h, k, l, E, QE0, Sigma, par0, par1, ...)
If the function is given as a string, it it taken as ``module:function``.
The calculation is then clustered using SSH and ufit.cluster.
h,k,l,E are arrays of the Monte-Carlo point coordinates, QE0 is the center
of the ellipse, Sigma are the ellipse widths around the center. A constant
background is always included as a parameter named "bkgd".
*instfiles* must be ('instr.cfg', 'instr.par').
Initial parameters can include parameters named cfg_... and par_...; this
will add instrument configuration and parameters to the fitting parameters.
They can be given initial values or a None value, in which case the initial
value will come from the .cfg or .par file.
Use cfg_ALL=1 or par_ALL=1 to add all cfg or par entries to the fit
parameters (this is mostly useful to interactively play around with the
resolution in one scan).
"""
nsamples = -1 # for plotting: plot only 4x as many points as datapoints
def __init__(self, sqw, instfiles, NMC=2000, name=None, cluster=False,
mcstas=None, matrix=None, mathkl=None, **init):
self._cluster = False
self._mcstas = mcstas
if isinstance(sqw, string_types):
modname, funcname = sqw.split(':')
mod = __import__(modname)
code = open(mod.__file__.rstrip('c')).read()
self._sqwfunc = funcname
self._sqwcode = code
self._sqw = getattr(mod, funcname)
self.name = funcname
self._cluster = cluster
else: # cannot cluster
self._sqw = sqw
self.name = name or sqw.__name__
init['NMC'] = str(NMC) # str() makes it a fixed parameter
instparnames = []
par_all = False
cfg_all = False
self._instpars = []
cfg_orig = load_cfg(instfiles[0])
par_orig = load_par(instfiles[1])
for par in init:
if par == 'par_ALL':
par_all = True
elif par == 'cfg_ALL':
cfg_all = True
elif par.startswith('par_'):
if par[4:] in PARNAMES:
self._instpars.append(par[4:])
instparnames.append(par)
if init[par] is None:
init[par] = str(par_orig[par[4:]])
else:
raise Exception('invalid instrument parameter: %r' % par)
elif par.startswith('cfg_'):
if par[4:] in CFGNAMES:
self._instpars.append(CFGNAMES.index(par[4:]))
instparnames.append(par)
if init[par] is None:
init[par] = str(cfg_orig[CFGNAMES.index(par[4:])])
else:
raise Exception('invalid instrument configuration: %r' % par)
if par_all:
instparnames = [pn for pn in instparnames if not pn.startswith('par_')]
self._instpars = [pn for pn in self._instpars if not isinstance(pn, string_types)]
instparnames.extend('par_' + pn for pn in PARNAMES)
self._instpars.extend(PARNAMES)
for pn in PARNAMES:
init['par_' + pn] = str(par_orig[pn])
if cfg_all:
instparnames = [pn for pn in instparnames if not pn.startswith('cfg_')]
self._instpars = [ip for ip in self._instpars if isinstance(ip, string_types)]
instparnames.extend('cfg_' + x for x in CFGNAMES)
self._instpars.extend(range(len(CFGNAMES)))
for i in range(len(CFGNAMES)):
init['cfg_' + CFGNAMES[i]] = str(cfg_orig[i])
# numba compat
arg_sqw = getattr(self._sqw, 'py_func', self._sqw)
self._pvs = self._init_params(name,
['NMC', 'bkgd'] +
inspect.getargspec(arg_sqw)[0][6:] +
instparnames, init)
self._ninstpar = len(instparnames)
self._resmat = resmat(cfg_orig, par_orig)
if matrix is not None:
self._resmat.fixed_res = True
self._resmat.setNPMatrix(matrix, mathkl)
# self._resmat.NP = matrix
self._resmat.R0_corrected = 1.0
def fcn(self, p, x):
parvalues = [p[pv] for pv in self._pvs]
# t1 = time.time()
# print 'Sqw: values = ', parvalues
if self._ninstpar:
sqwpar = parvalues[2:-self._ninstpar]
for pn, pv in zip(self._instpars, parvalues[-self._ninstpar:]):
if isinstance(pn, string_types):
self._resmat.par[pn] = pv
else:
self._resmat.cfg[pn] = pv
use_caching = False
else:
sqwpar = parvalues[2:]
use_caching = True
if self._mcstas:
res = calc_MC_mcstas(x, sqwpar, self._sqw, self._resmat,
parvalues[0])
elif self._cluster:
res = calc_MC_cluster(x, sqwpar, self._sqwcode,
self._sqwfunc, self._resmat, parvalues[0],
use_caching=use_caching)
else:
res = calc_MC(x, sqwpar, self._sqw, self._resmat,
parvalues[0], use_caching=use_caching)
res += parvalues[1] # background
# t2 = time.time()
# print 'Sqw: iteration = %.3f sec' % (t2-t1)
return res
def resplot(self, h, k, l, e):
self._resmat.sethklen(h, k, l, e)
plot_resatpoint(self._resmat.cfg, self._resmat.par, self._resmat)
def simulate(self, data):
varying, varynames, dependent, _ = prepare_params(self.params, data.meta)
pd = dict((p.name, p.value) for p in self.params)
update_params(dependent, data.meta, pd)
yy = self.fcn(pd, data.x)
new = data.copy()
new.y = yy
new.dy = zeros(len(yy))
return new
| StarcoderdataPython |
97329 | from datetime import timedelta, datetime
from celery.schedules import crontab
from celery.task.base import periodic_task
from couchlog.models import ExceptionRecord
from django.conf import settings
@periodic_task(run_every=crontab(minute=0, hour=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def purge_old_logs():
key = datetime.now() - timedelta(weeks=52)
results = ExceptionRecord.view(
"couchlog/all_by_date",
reduce=False,
startkey=[key.isoformat()],
descending=True,
limit=1000,
include_docs=False)
db = ExceptionRecord.get_db()
docs = []
for result in results:
docs.append({
'_id': result['id'],
'_rev': db.get_rev(result['id']),
'_deleted': True,
})
db.bulk_save(docs, use_uuids=False) | StarcoderdataPython |
110063 | <filename>tests/test_action_lib_remove_old_snapshots.py
from manage_iq_base_action_test_case import ManageIQBaseActionTestCase
from lib.remove_old_snapshots import RemoveOldSnapshots
from st2common.runners.base_action import Action
import mock
import datetime
import requests
class TestRemoveOldSnapshots(ManageIQBaseActionTestCase):
__test__ = True
action_cls = RemoveOldSnapshots
def _mock_response(self, status=200, content="CONTENT", json_data=None, raise_for_status=None):
"""Since we will be makeing alot of rest calls that
all raise for status, we are creating this helper
method to build the mock reponse for us to reduce
duplicated code.
"""
mock_resp = mock.Mock()
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
mock_resp.status_code = status
mock_resp.content = content
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
def test_init(self):
action = self.get_action_instance({})
self.assertIsInstance(action, RemoveOldSnapshots)
self.assertIsInstance(action, Action)
def test_create_session(self):
action = self.get_action_instance({})
result = action.create_session('user', 'pass')
self.assertIsInstance(result, requests.Session)
self.assertEqual(result.auth, ('user', 'pass'))
self.assertEqual(result.verify, False)
def test_get_vms(self):
action = self.get_action_instance({})
action.server = "server.tld"
mock_response = mock.MagicMock()
mock_response.json.return_value = {'resources': 'vms_json'}
mock_session = mock.MagicMock()
mock_session.get.return_value = mock_response
action.session = mock_session
result = action.get_vms()
self.assertEquals(result, 'vms_json')
mock_session.get.assert_called_with("https://server.tld/api/vms?"
"expand=resources&attributes=id,"
"snapshots,name")
mock_response.raise_for_status.assert_called_with()
def test_compile_regexes(self):
action = self.get_action_instance({})
regex_list = [".*", "^#DONTDELETE$"]
result = action.compile_regexes(regex_list)
self.assertIsInstance(result, list)
self.assertEquals(len(result), len(regex_list))
for i in range(len(result)):
self.assertEquals(result[i].pattern, regex_list[i])
def test_matches_pattern_list_match(self):
action = self.get_action_instance({})
regex_list = ["abc123", "^.*#DONTDELETE$"]
pattern_list = action.compile_regexes(regex_list)
name = "xxx #DONTDELETE"
result = action.matches_pattern_list(name, pattern_list)
self.assertTrue(result)
def test_matches_pattern_list_no_match(self):
action = self.get_action_instance({})
regex_list = ["abc123", "^#DONTDELETE$"]
pattern_list = action.compile_regexes(regex_list)
name = "xxx"
result = action.matches_pattern_list(name, pattern_list)
self.assertFalse(result)
# Test run when there are no old snapshots on any VMs
@mock.patch("lib.remove_old_snapshots.RemoveOldSnapshots.create_session")
@mock.patch("lib.remove_old_snapshots.RemoveOldSnapshots.current_time_utc")
def test_run_no_old_snapshots(self, mock_current_time_utc, mock_create_session):
action = self.get_action_instance({})
# expected
kwargs_dict = {'server': 'test.dev.encore.tech',
'username': 'user',
'password': '<PASSWORD>',
'max_age_days': 3,
'name_ignore_regexes': ['^.*#DONTDELETE$']}
test_get = {'resources': [{'name': 'test',
'snapshots': [{'created_on': '2018-01-04T00:00:00Z',
'vm_or_template_id': '1000000000001',
'id': '1000000000002',
'name': 'test1'},
{'created_on': '2018-01-05T00:00:00Z',
'vm_or_template_id': '1000000000003',
'id': '1000000000004',
'name': 'test2'}]}]}
# mocks
# One of the dates in "expected_results" is 4 days older than the following date
mock_current_time_utc.return_value = datetime.datetime(2018, 1, 5, 0, 0)
mock_session = mock.MagicMock()
mock_create_session.return_value = mock_session
mock_response = self._mock_response(json_data=test_get)
mock_session.get.return_value = mock_response
# execute
result_value = action.run(**kwargs_dict)
expected_results = (True, {'deleted_snapshots': [],
'ignored_snapshots': []})
# asserts
mock_session.get.assert_called_with("https://test.dev.encore.tech/api/"
"vms?expand=resources&attributes=id,snapshots,name")
self.assertTrue(mock_response.raise_for_status.called)
self.assertEqual(result_value, expected_results)
# Test run when there are old snapshots on a VM
@mock.patch("lib.remove_old_snapshots.RemoveOldSnapshots.create_session")
@mock.patch("lib.remove_old_snapshots.RemoveOldSnapshots.current_time_utc")
def test_run_old_snapshots(self, mock_current_time_utc, mock_create_session):
action = self.get_action_instance({})
# expected
kwargs_dict = {'server': 'test.dev.encore.tech',
'username': 'user',
'password': '<PASSWORD>',
'max_age_days': 3,
'name_ignore_regexes': ['^.*#DONTDELETE$']}
test_get = {'resources': [{'name': 'test',
'snapshots': [{'created_on': '2018-01-01T00:00:00Z',
'vm_or_template_id': '1000000000001',
'id': '1000000000002',
'name': 'test1'},
{'created_on': '2018-01-05T00:00:00Z',
'vm_or_template_id': '1000000000003',
'id': '1000000000004',
'name': 'test2'},
{'created_on': '2018-01-01T00:00:00Z',
'vm_or_template_id': '1000000000005',
'id': '1000000000005',
'name': 'abc #DONTDELETE'}]}]}
expected_results = (True, {'deleted_snapshots': ['test: test1'],
'ignored_snapshots': ['test: abc #DONTDELETE']})
# mocks
# One of the dates in "expected_results" is 4 days older than the following date
mock_current_time_utc.return_value = datetime.datetime(2018, 1, 5, 0, 0)
mock_session = mock.MagicMock()
mock_create_session.return_value = mock_session
mock_response = self._mock_response(json_data=test_get)
mock_session.get.return_value = mock_response
# execute
result_value = action.run(**kwargs_dict)
# asserts
mock_session.get.assert_called_with("https://test.dev.encore.tech/api/"
"vms?expand=resources&attributes=id,snapshots,name")
mock_session.delete.assert_called_with("https://test.dev.encore.tech/api/vms/"
"1000000000001/snapshots/1000000000002")
self.assertTrue(mock_response.raise_for_status.called)
self.assertEqual(result_value, expected_results)
| StarcoderdataPython |
3282725 | # Finds the root of an equation in the range [a, b] by using the Secant Method.
import math
def equation(x):
return math.cos(x) - x #Original equation.
def iterate(p1, p0, i):
p = p1 - ((equation(p1) * (p1 - p0)) / (equation(p1) - equation(p0)))
print("\n(p" + str(i) + ", f(p" + str(i) + ")) = " + str((p, equation(p))))
if(equation(p) != 0):
iterate(p, p1, i + 1)
def main():
p0 = float(input("p0 = "))
# p0 = math.radians(int(input("p0 = ")))
# p1 = int(input("p1 = "))
p1 = math.radians(float(input("p1 = ")))
print("\n(p0, f(p0)) = " + str((p0, equation(p0))))
print("\n(p1, f(p1)) = " + str((p1, equation(p1))))
iterate(p1, p0, 2)
if __name__ == "__main__":
main() | StarcoderdataPython |
1795246 | """
K-Means
"""
import logging as log
import numpy as np
import random
log.basicConfig(format="%(message)s", level=log.INFO)
def load_data(file_path):
"""加载数据
源数据格式为多行,每行为两个浮点数,分别表示 (x,y)
"""
data = []
with open(file_path, 'r', encoding='utf-8') as fr:
for line in fr.read().splitlines():
line_float = list(map(float, line.split('\t')))
data.append(line_float)
data = np.array(data)
return data
def score_euclidean(a, b):
"""计算两个点之间的欧式距离"""
s = np.sqrt(np.sum(np.power(a - b, 2)))
return s
def rand_center(data, k):
"""随机采样 k 个样本作为聚类中心"""
centers = np.array(random.sample(list(data), k))
return centers
def k_means(data, k, max_iter=100, score=score_euclidean, e=1e-6):
"""
K-Means 算法
一般 K-Mean 算法的终止条件有如下几个:
1. 所有样本的类别不再改变
2. 达到最大迭代次数
3. 精度达到要求(?)
返回聚类中心及聚类结果
"""
# 样本数
n = len(data)
# 保存结果
# 每个结果为一个二元组 [label, score] 分别保存每个样本所在的簇及距离质心的距离
ret = np.array([[-1, np.inf]] * n)
# 选取聚类中心
centers = rand_center(data, k)
changed = True # 标记样本类别是否改变
n_iter = 0 # 记录迭代次数
while changed and n_iter < max_iter:
changed = False
n_iter += 1
for i in range(n): # 对每个数据
i_score = np.inf
i_label = -1
for j in range(k): # 与每个质心比较
s_ij = score(data[i], centers[j])
if s_ij < i_score:
i_score = s_ij
i_label = j
if ret[i, 0] != i_label: # 样本的类别发生了改变
changed = True
ret[i, :] = i_label, i_score
# 更新聚类中心
log.info(centers)
for i in range(k):
data_i = data[ret[:, 0] == i] # 标签为 i 的样本
centers[i, :] = np.mean(data_i, axis=0) # 按类别过滤样本
log.info(n_iter) # 迭代次数
return centers, ret
def _test():
""""""
file_path = r"./data.txt"
data = load_data(file_path)
print(data)
print(np.shape(data)[1])
s = score_euclidean(data[0], data[1])
print(s)
centers = rand_center(data, 3)
print(centers)
if __name__ == '__main__':
""""""
# _test()
file_path = "./data.txt"
data = load_data(file_path)
centers, ret = k_means(data, 3)
# print(ret)
| StarcoderdataPython |
1771281 | from setuptools import setup
setup(
name='PyMoe',
version='1.0.7',
packages=['Pymoe', 'Pymoe.Anilist', 'Pymoe.Kitsu', 'Pymoe.VNDB', 'Pymoe.Bakatsuki'],
url='https://github.com/ccubed/PyMoe',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description="PyMoe is the only lib you'll ever need if you need the animu or mangu on the Python Platform. It supports AniList, VNDB, Kitsu and AniDB.",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries'
],
keywords="Anime Manga LN VN VNDB Anilist Kitsu AniDB MyAnimeList MAL Bakatsuki",
install_requires=['requests', 'bs4', 'ujson']
)
| StarcoderdataPython |
3229431 | <gh_stars>0
from django.test import TestCase, Client
from chat.views import *
from socket import socket
import os
import json
class UserTest(TestCase):
def setUp(self) -> None:
self.client = Client()
self.email = '<EMAIL>'
self.user_id = ''
self.login_data = ''
self.socket = ('127.0.0.1', int(os.environ.get('TCP_SERVER_PORT')))
def login(self, use_id=False, password='<PASSWORD>'):
login_data = {
"identity": self.email,
"password": password
}
if use_id:
login_data['identity'] = str(self.user_id)
response = self.client.post('/users/auth', login_data, content_type='application/json')
self.assertEqual(len(response.json()['data']['token']), 64)
self.login_data = response.json()['data']
def register_and_login(self):
"""测试注册之后登录"""
register_data = {
"email": self.email,
"nickname": "楷禅",
"password": "<PASSWORD>",
"code": ''
}
# 获取验证码
response = self.client.get('/code', {'email': self.email, 'type': 1})
self.assertEqual(response.json().get('mes'), '')
register_data['code'] = VerCode.objects.get(email=self.email).content
response = self.client.post('/users/', register_data, content_type='application/json')
self.user_id = response.json().get('data')
# 注册成功之后用户id应该在10000~99999之间
self.assertIn(self.user_id, range(10000, 100000))
# 测试邮箱登录
self.login()
# 测试账号登录
self.login(use_id=True)
def get_userinfo(self):
""""""
response = self.client.get(f'/users/{self.user_id}')
self.assertEqual(response.json()['data']['email'], self.email)
def reset_password(self):
data = {
'email': self.email,
'password': '<PASSWORD>',
'code': ''
}
# 获取验证码
response = self.client.get('/code', {'email': self.email, 'type': 2})
self.assertEqual(response.json().get('mes'), '')
data['code'] = VerCode.objects.get(email=self.email).content
response = self.client.put('/users/password', data, content_type='application/json')
self.assertEqual(response.json().get('mes'), '')
def get_tcp_connection(self):
client = socket()
client.connect(self.socket)
client.sendall(json.dumps({
'Authorization': self.login_data.get('token')
}).encode())
data = json.loads(client.recv(4096))
self.assertEqual(data.get('mes'), '')
def test_user(self):
self.register_and_login()
self.get_userinfo()
self.reset_password()
| StarcoderdataPython |
190987 | import bpy
import sys
import addon_utils
from pathlib import Path
def get_python_path():
if bpy.app.version < (2,9,0):
python_path = bpy.app.binary_path_python
else:
python_path = sys.executable
return Path(python_path)
python_path = get_python_path()
blender_path = Path(bpy.app.binary_path)
blender_directory = blender_path.parent
use_own_python = blender_directory in python_path.parents
version = bpy.app.version
scripts_folder = blender_path.parent / f"{version[0]}.{version[1]}" / "scripts"
user_addon_directory = Path(bpy.utils.user_resource('SCRIPTS', "addons"))
addon_directories = tuple(map(Path, addon_utils.paths()))
| StarcoderdataPython |
87639 | <reponame>mcvine/mcvine
#!/usr/bin/env python
#
#
import unittest
import journal
svq_f = lambda qx,qy,qz: qx*qx
def createSvq():
import histogram as H
qxaxis = H.axis( 'Qx', boundaries = H.arange(-5, 5.0, 0.1) )
qyaxis = H.axis( 'Qy', boundaries = H.arange(-5, 6.0, 0.1) )
qzaxis = H.axis( 'Qz', boundaries = H.arange(-5, 7.0, 0.1) )
svq = H.histogram(
'svq',
[ qxaxis, qyaxis, qzaxis ],
fromfunction = svq_f
)
return svq
import mcni, mccomposite, mccomponents.sample as ms, mccomponents.homogeneous_scatterer as mh
class SvQkernel_TestCase(unittest.TestCase):
def test1(self):
'SvQkernel'
svq = createSvq()
gridsvq = ms.gridsvq( svq )
svqkernel = ms.svqkernel(
1., 1.,
SvQ=gridsvq,
)
csvqkernel = mh.scattererEngine( svqkernel )
ev = mcni.neutron( r = (-5,0,0), v = (3000,0,0) )
self.assertAlmostEqual( csvqkernel.scattering_coefficient(ev), 1 )
self.assertAlmostEqual( csvqkernel.absorption_coefficient(ev), 2200./3000. )
csvqkernel.scatter(ev)
print(dir(csvqkernel))
return
pass # end of SvQkernel_TestCase
if __name__ == "__main__": unittest.main()
# End of file
| StarcoderdataPython |
1727292 | <reponame>JiriVales/orienteering-tools<filename>python-scripts/vegetation/Create-cultivated-land-(height&RUIAN).py
##Create cultivated land (height&RUIAN)=name
##ruianparcelswithattributes=vector
##expressionforextractcorrespondingpolygonsfromruianbyattribute=string"druhpozemk" IN ('2')
##vegetationheightopenland=vector
##cultivatedland=output vector
outputs_QGISSELECTBYEXPRESSION_1=processing.runalg('qgis:selectbyexpression', ruianparcelswithattributes,expressionforextractcorrespondingpolygonsfromruianbyattribute,0)
outputs_QGISSAVESELECTEDFEATURES_1=processing.runalg('qgis:saveselectedfeatures', outputs_QGISSELECTBYEXPRESSION_1['RESULT'],None)
outputs_QGISEXTRACTBYLOCATION_1=processing.runalg('qgis:extractbylocation', vegetationheightopenland,outputs_QGISSAVESELECTEDFEATURES_1['OUTPUT_LAYER'],['intersects'],1.0,cultivatedland) | StarcoderdataPython |
4825753 | import pyexcel as pe
import xlrd
import logging
def process_excel(filename):
if filename.endswith(".csv"):
# add merged cells here please
sheet = pe.get_sheet(file_name=filename)
yield sheet, "name", ()
else:
book = pe.get_book(file_name=filename)
sheets = book.to_dict()
booklrd = xlrd.open_workbook(filename)
for name in sheets.keys():
sheet = book[name]
fill_merged_cells(sheet, booklrd.sheet_by_name(name).merged_cells)
yield sheet, name, booklrd.sheet_by_name(name).merged_cells
# filling the empty cells which are located in the merged cells in reality but are read as empty by pyexcel
def fill_merged_cells(sheet, merged_blocks):
for block in merged_blocks:
val = sheet[block[0], block[2]]
for row in range(block[0], block[1]):
for col in range(block[2], block[3]):
try:
sheet[row, col] = val
except:
# There is a difference in the way xlrd and pyexcel handle merged cells
# Sometimes we end up accessing out of range cells
logging.error("Tried to access out of range cell.")
break
| StarcoderdataPython |
1683898 | import os
import sys
import tempfile
import contextlib
import shutil
import distutils
import setuptools
import setuptools.command.build_ext
import cppimport.config
from cppimport.filepaths import make_absolute
if sys.version_info[0] == 2:
import StringIO as io
else:
import io
@contextlib.contextmanager
def stdchannel_redirected(stdchannel):
"""
Redirects stdout or stderr to a StringIO object. As of python 3.4, there is a
standard library contextmanager for this, but backwards compatibility!
"""
try:
s = io.StringIO()
old = getattr(sys, stdchannel)
setattr(sys, stdchannel, s)
yield s
finally:
setattr(sys, stdchannel, old)
# Subclass setuptools Extension to add a parameter specifying where the shared
# library should be placed after being compiled
class ImportCppExt(setuptools.Extension):
def __init__(self, libdest, *args, **kwargs):
self.libdest = libdest
setuptools.Extension.__init__(self, *args, **kwargs)
# Subclass setuptools build_ext to put the compiled shared library in the
# appropriate place in the source tree.
class BuildImportCppExt(setuptools.command.build_ext.build_ext):
def copy_extensions_to_source(self):
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
src_filename = os.path.join(self.build_lib, filename)
dest_filename = os.path.join(ext.libdest, os.path.basename(filename))
distutils.file_util.copy_file(
src_filename, dest_filename,
verbose = self.verbose, dry_run = self.dry_run
)
# Patch for parallel compilation with distutils
# From: http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils
def parallel_compile(self, sources, output_dir = None, macros = None,
include_dirs = None, debug = 0, extra_preargs = None, extra_postargs = None,
depends = None):
# these lines are copied directly from distutils.ccompiler.CCompiler
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# Determine the number of compilation threads. Unless there are special
# circumstances, this is the number of cores on the machine
N = 1
try:
import multiprocessing
import multiprocessing.pool
N = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
# import time
# start = time.time()
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# end = time.time()
# print("took " + str(end - start) + " to compile " + str(obj))
# imap is evaluated on demand, converting to list() forces execution
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
return objects
def build_module(module_data):
build_path = tempfile.mkdtemp()
full_module_name = module_data['fullname']
filepath = module_data['filepath']
cfg = module_data['cfg']
module_data['abs_include_dirs'] = [
make_absolute(module_data['filedirname'], d)
for d in cfg.get('include_dirs', [])
] + [os.path.dirname(filepath)]
module_data['abs_library_dirs'] = [
make_absolute(module_data['filedirname'], d)
for d in cfg.get('library_dirs', [])
]
module_data['dependency_dirs'] = (
module_data['abs_include_dirs'] + [module_data['filedirname']]
)
module_data['extra_source_filepaths'] = [
make_absolute(module_data['filedirname'], s)
for s in cfg.get('sources', [])
]
ext = ImportCppExt(
os.path.dirname(filepath),
full_module_name,
language = 'c++',
sources = (
module_data['extra_source_filepaths'] +
[module_data['rendered_src_filepath']]
),
include_dirs = module_data['abs_include_dirs'],
extra_compile_args = cfg.get('extra_compile_args', []),
extra_link_args = cfg.get('extra_link_args', []),
library_dirs = module_data['abs_library_dirs'],
libraries = cfg.get('libraries', [])
)
args = ['build_ext', '--inplace']
args.append('--build-temp=' + build_path)
args.append('--build-lib=' + build_path)
if cppimport.config.quiet:
args.append('-q')
else:
args.append('-v')
setuptools_args = dict(
name = full_module_name,
ext_modules = [ext],
script_args = args,
cmdclass = {
'build_ext': BuildImportCppExt
}
)
# Monkey patch in the parallel compiler if requested.
py33orgreater = sys.version_info[0] >= 3 and sys.version_info[1] >= 3
parallelize = cfg.get('parallel') and py33orgreater
if parallelize:
old_compile = distutils.ccompiler.CCompiler.compile
distutils.ccompiler.CCompiler.compile = parallel_compile
if cppimport.config.quiet:
with stdchannel_redirected("stdout"):
with stdchannel_redirected("stderr"):
setuptools.setup(**setuptools_args)
else:
setuptools.setup(**setuptools_args)
# Remove the parallel compiler to not corrupt the outside environment.
if parallelize:
distutils.ccompiler.CCompiler.compile = old_compile
shutil.rmtree(build_path)
| StarcoderdataPython |
199918 | <filename>tests/generator/test_legacy_array.py
from responses import RequestsMock
from tests import loader
def test_formula_prefix(responses: RequestsMock, tmpdir):
responses.add(
responses.GET,
url="http://test/",
json={
"swagger": "2.0",
"paths": {
"/test": {
"get": {
"responses": {200: {"description": "successful operation"}},
}
}
},
},
match_querystring=True,
)
generated_functions = loader.load(
tmpdir,
{
"legacy": {
"open_api": {"definition": "http://test/"},
"formulas": {"legacy_array": {"lock_excel": True}},
}
},
)
responses.add(
responses.GET,
url="http://test/test",
json={},
match_querystring=True,
)
assert generated_functions.legacy_legacy_get_test() == [[""]]
| StarcoderdataPython |
3367127 | # Copyright (c) 2020. Author: <NAME>, <EMAIL>
# Ref: https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/data_utils/ShapeNetDataLoader.py
import os, json, torch, warnings, numpy as np
from PC_Augmentation import pc_normalize
from torch.utils.data import Dataset
warnings.filterwarnings('ignore')
class PartNormalDataset(Dataset):
"""
Data Source: https://shapenet.cs.stanford.edu/media/shapenetcore_partanno_segmentation_benchmark_v0_normal.zip
"""
def __init__(self, root, num_point=2048, split='train', use_normal=False):
self.catfile = os.path.join(root, 'synsetoffset2category.txt')
self.use_normal = use_normal
self.num_point = num_point
self.cache_size = 20000
self.datapath = []
self.root = root
self.cache = {}
self.meta = {}
self.cat = {}
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
# self.cat -> {'class name': syn_id, ...}
# self.meta -> {'class name': file list, ...}
# self.classes -> {'class name': class id, ...}
# self.datapath -> [('class name', single file) , ...]
self.classes = dict(zip(self.cat, range(len(self.cat))))
train_ids = self.read_fns(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'))
test_ids = self.read_fns(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'))
val_ids = self.read_fns(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'))
for item in self.cat:
dir_point = os.path.join(self.root, self.cat[item])
fns = sorted(os.listdir(dir_point))
self.meta[item] = []
if split is 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split is 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s [Option: ]. Exiting...' % split)
exit(-1)
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append(os.path.join(dir_point, token + '.txt'))
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn))
self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35],
'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29],
'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Lamp': [24, 25, 26, 27],
'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Knife': [22, 23],
'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
'Chair': [12, 13, 14, 15]}
@staticmethod
def read_fns(path):
with open(path, 'r') as file:
ids = set([str(d.split('/')[2]) for d in json.load(file)])
return ids
def __getitem__(self, index):
if index in self.cache:
pts, cls, seg = self.cache[index]
else:
fn = self.datapath[index]
cat, pt = fn[0], np.loadtxt(fn[1]).astype(np.float32)
cls = np.array([self.classes[cat]]).astype(np.int32)
pts = pt[:, :6] if self.use_normal else pt[:, :3]
seg = pt[:, -1].astype(np.int32)
if len(self.cache) < self.cache_size:
self.cache[index] = (pts, cls, seg)
choice = np.random.choice(len(seg), self.num_point, replace=True)
pts[:, 0:3] = pc_normalize(pts[:, 0:3])
pts, seg = pts[choice, :], seg[choice]
return pts, cls, seg
def __len__(self):
return len(self.datapath)
if __name__ == "__main__":
root = '../data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
TRAIN_DATASET = PartNormalDataset(root=root, num_point=2048, split='trainval', use_normal=False)
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=24, shuffle=True, num_workers=4)
for i, data in enumerate(trainDataLoader):
points, label, target = data
| StarcoderdataPython |
4802690 | import setuptools
import pkg
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name=pkg.name,
version=pkg.version,
author=pkg.author,
author_email=pkg.author_email,
description=pkg.description,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bthornton191/adamspy",
packages=setuptools.find_packages(exclude=['test', 'pkg']),
package_data={'adamspy.adripy': ['templates/*'], 'adamspy.postprocess': ['aview_scripts/*']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
],
install_requires = pkg.install_requires
) | StarcoderdataPython |
3374839 | <reponame>MarcSkovMadsen/panel-presentation<gh_stars>10-100
import panel as pn
import param
from .base import ComponentBase
import numpy as np
class Trend(ComponentBase):
component = param.Parameter(pn.indicators.Trend)
reference = param.String("https://panel.holoviz.org/reference/indicators/Trend.html#indicators-gallery-trend")
imports = """\
import panel as pn
import numpy as np
pn.extension(sizing_mode="stretch_width")
"""
def example(self, theme="default", accent_base_color="blue"):
def get_widget(theme="default", accent_base_color="blue"):
data = {'x': np.arange(50), 'y': np.random.randn(50).cumsum()}
trend = pn.indicators.Trend(
title='Price', data=data, plot_color=accent_base_color, sizing_mode="stretch_width", height=500
)
return "Hello"
# trend = get_widget(theme=theme, accent_base_color=accent_base_color)
# component = pn.Column(
# trend,
# sizing_mode="stretch_both",
# )
component=get_widget(theme=theme, accent_base_color=accent_base_color)
return component | StarcoderdataPython |
1763624 | import re
from pydantic import BaseModel, validator
def validate_image(image: str) -> str:
if re.fullmatch(r"https?://[\w!?/+\-_~;.,*&@#$%()'[\]]+", image) is None:
raise ValueError('malformed image url: %s' % image)
return image
def validate_name(name: str) -> str:
if len(name) == 0:
raise ValueError('name is required')
return name
def validate_min(min_v: int, values) -> int:
if min_v > values.get('max'):
raise ValueError('max must be larger than min')
return min_v
def validate_nat(v: int) -> int:
if v < 0:
raise ValueError('must be larger than 0')
return v
class ContainerBase(BaseModel):
id: str
image: str
name: str
max: int
min: int
_validate_image = validator('image', allow_reuse=True)(validate_image)
_validate_name = validator('name', allow_reuse=True)(validate_name)
_validate_max = validator('max', allow_reuse=True)(validate_nat)
_validate_min = validator('min', allow_reuse=True)(validate_min)
class Container(ContainerBase):
class Config:
orm_mode = True
| StarcoderdataPython |
106523 | <filename>rtamt/exception/stl/exception.py
class STLException(Exception):
pass
class STLParseException(Exception):
pass
class STLOfflineException(Exception):
pass | StarcoderdataPython |
3373807 | <filename>ilrdc/util/__init__.py
from .url_downloader import download_url
from .sound_url_modifier import modify_sound_url
| StarcoderdataPython |
104029 | """Preprocessing of raw LAU2 data to bring it into normalised form."""
import geopandas as gpd
import pandas as pd
from renewablepotentialslib.shape_utils import to_multi_polygon
OUTPUT_DRIVER = "GeoJSON"
KOSOVO_MUNICIPALITIES = [f"RS{x:02d}" for x in range(1, 38)]
def merge_lau(path_to_shapes, path_to_attributes, path_to_output):
"""Merge LAU shapes with attributes."""
shapes = gpd.read_file(path_to_shapes)
shapes.geometry = shapes.geometry.map(to_multi_polygon)
attributes = gpd.read_file(path_to_attributes)
attributes = pd.DataFrame(attributes) # to be able to remove the geo information
del attributes["geometry"]
all_shapes = shapes.merge(attributes, on="COMM_ID", how="left")
all_shapes_no_kosovo = _remove_kosovo(all_shapes)
all_shapes_no_kosovo.to_file(path_to_output, driver=OUTPUT_DRIVER)
def _remove_kosovo(shapes):
"""Identify and remove municipalities in Kosovo.
Those municipalities must be removed as we do not have load data and pycountry
cannot handle them at the moment (as of 2018, Kosovo does not have a standardised
country code).
"""
return shapes.set_index("COMM_ID").drop(KOSOVO_MUNICIPALITIES).reset_index()
if __name__ == "__main__":
merge_lau(
path_to_shapes=snakemake.input.shapes,
path_to_attributes=snakemake.input.attributes,
path_to_output=snakemake.output[0]
)
| StarcoderdataPython |
1717619 | """
Tests for functions in ensemble_tools.py.
Authors: <NAME> & <NAME>
Note that functions that start with an underscore (_) are designed for local
use only by the primary functions within ensemble_tools.py. Therefore, testing
of those local scripts does not include checking for irrational inputs that
would cause meaningless results and/or an exception since such inputs are
checked for earlier in the primary functions.
"""
import numpy as np
import pytest
import sys
from ensemble.ensemble_tools import (
_gumbel_cdf,
_probability_from_members,
probability_from_members,
_prob_from_outside_rank_gumbel,
_prob_from_outside_rank_exp,
_deterministic_event_prob,
probability_from_members,
prob_between_values,
ensemble_verification_rank,
_validate_arg_type
)
# Define ensemble datasets to test with
MEMBERS_ALPHA = np.array([[0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.5, 0.5, 0.5, 1.0]])
MEMBERS_BRAVO = np.array([[-1.0, -1.0, 0.0, 0.0, 0.1, 0.2, 0.5, 0.5, 1.0, 1.0]])
MEMBERS_CHARLIE = np.array([[7, 7, 7, 7, 7, 7, 7, 7, 7, 7]])
# Set the roundoff decimals for testing precision
ROUNDOFF = 5
# ------------------------------------------------------------------------------
# _gumbel_cdf
# ------------------------------------------------------------------------------
@pytest.mark.parametrize("members, x, expected",
[(MEMBERS_ALPHA[0], 1.1, 0.97949),
(MEMBERS_ALPHA[0], 10.0, 1.0),
(MEMBERS_ALPHA[0], -10.0, 0.0),
])
def test_gumbel_cdf(members, x, expected):
print("I AM TESTING _gumbel_cdf")
assert np.round(_gumbel_cdf(members, x), ROUNDOFF) == expected
# ------------------------------------------------------------------------------
# _prob_from_outside_rank_gumbel
# ------------------------------------------------------------------------------
@pytest.mark.parametrize("threshold, members, threshold_position, expected",
[(1000, MEMBERS_ALPHA, 'above_all_members', 0.0),
(-1000, MEMBERS_ALPHA, 'below_all_members', 1),
(1.1, MEMBERS_ALPHA, 'mouse_burgers', 1e32),
(1.1, MEMBERS_ALPHA, 'above_all_members', 'between_0_1')])
def test_prob_from_outside_rank_gumbel(
threshold, members, threshold_position, expected
):
prob = _prob_from_outside_rank_gumbel(threshold, members, threshold_position)
if isinstance(expected, float) or isinstance(expected, int) :
assert(np.round(prob, ROUNDOFF) == expected),\
(f"prob={prob}, expected={expected}")
else :
assert(prob < 1. and prob > 0.),\
("Expected prob between zero and one but got {prob}")
# ------------------------------------------------------------------------------
# _prob_from_outside_rank_exp
# ------------------------------------------------------------------------------
# this function estimates Prob(V >= thresh | ensemble_members & exp left tail)
# where V is the verifying value, thresh is a user specified threshold for some
# event, and ensemble_members are predictions from e.g. crps-net.
@pytest.mark.parametrize("threshold, members",
[(0.5, np.array([1,2,3,4])),])
def test_prob_from_outside_rank_exp(threshold, members) :
n_bins = len(members) + 1
n_prob_per_bin = 1 / n_bins
assert(_prob_from_outside_rank_exp(np.min(members), members) == (1-n_prob_per_bin)),\
(f"when thresh is tied with lower member, there is a full bin of probability below")
assert(_prob_from_outside_rank_exp(0, members) == 1)
(f"Can't be negative, so proba greater or above 0 always 1 regardless of members")
prob = _prob_from_outside_rank_exp(threshold, members)
assert(prob < 1 and prob > 0),\
("probability of this tail must be between 0 and 1")
# ------------------------------------------------------------------------------
# probability_from_members
# ------------------------------------------------------------------------------
# Tests for ValueErrors
@pytest.mark.parametrize("threshold, members, operator_str, presorted, positive_definite",
[(-0.15, MEMBERS_ALPHA, 'greater', True, True),
(0.1, MEMBERS_BRAVO, 'greater', True, True),
(0.1, np.array([[1, 2, 3]]), 'greater', True, True),
(0.1, MEMBERS_ALPHA, '<NAME>!', True, True),
(0.1, np.array([[1, 2, 3, 4, 5, 6, np.NaN, 8, 9, 10]]), 'greater', True, True),
] )
def test1_probability_from_members(threshold, members, operator_str, presorted, positive_definite):
with pytest.raises(ValueError):
probability_from_members(threshold, members, operator_str, presorted,
positive_definite)
# Tests for TypeErrors
@pytest.mark.parametrize("threshold, members, operator_str, presorted, positive_definite",
[('string', MEMBERS_ALPHA, 'greater', True, True),
(1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'greater', True, True),
(1, MEMBERS_ALPHA, True, True, True),
(1, MEMBERS_ALPHA, 'greater', 'string', True),
(1, MEMBERS_ALPHA, 'greater', True, 999),
] )
def test2_probability_from_members(threshold, members, operator_str, presorted, positive_definite):
with pytest.raises(TypeError):
probability_from_members(threshold, members, operator_str, presorted,
positive_definite)
# ------------------------------------------------------------------------------
# _probability_from_members
# ------------------------------------------------------------------------------
MEMBER_POSITIVE_DEF = np.array([0, 1, 2, 3, 4, 5])
# Tests for threshold between members
@pytest.mark.parametrize("threshold, members, operator_str, presorted, positive_definite, expected",
[(0.15, MEMBERS_ALPHA, 'greater', True, True, 0.5),
(0.15, MEMBERS_ALPHA, 'greater', False, False, 0.5),
(0.15, MEMBERS_ALPHA, 'greater_equal', True, True, 0.5),
(0.15, MEMBERS_ALPHA, 'greater_equal', True, False, 0.5),
(0.15, MEMBERS_ALPHA, 'less', True, True, 0.5),
(0.15, MEMBERS_ALPHA, 'less', True, False, 0.5),
(0.15, MEMBERS_ALPHA, 'less_equal', True, True, 0.5),
(0.15, MEMBERS_ALPHA, 'less_equal', False, False, 0.5),
(-100, MEMBER_POSITIVE_DEF, 'less', True, True, 0.0),
(-100, MEMBER_POSITIVE_DEF, 'less', False, True, 0.0),
] )
def test1__probability_from_members(threshold, members, operator_str, presorted,
positive_definite, expected):
assert _probability_from_members(threshold, members, operator_str, presorted,
positive_definite)[0][0] == expected
# Tests for threshold outside of members
@pytest.mark.parametrize("threshold, members, operator_str, presorted, positive_definite, expected",
[(1.1, MEMBERS_ALPHA, 'greater', True, True, 0.06111),
(7, MEMBERS_CHARLIE, 'greater', True, True, 0.0),
(8, MEMBERS_CHARLIE, 'greater', True, True, 0.0),
(8, MEMBERS_CHARLIE, 'greater_equal', True, True, 0.0),
(7, MEMBERS_CHARLIE, 'greater_equal', True, True, 1.0)
] )
def test1__probability_from_members(threshold, members, operator_str, presorted,
positive_definite, expected):
assert np.round(_probability_from_members(threshold, members, operator_str, presorted,
positive_definite)[0][0], ROUNDOFF) == expected
# Tests for handling zeros
@pytest.mark.parametrize("threshold, members, operator_str, presorted, positive_definite, expected",
[(0.0, MEMBERS_ALPHA, 'greater', True, True, np.round(7./11, ROUNDOFF)),
(0.0, MEMBERS_ALPHA, 'greater', True, False, np.round(7./11, ROUNDOFF)),
(0.0, MEMBERS_ALPHA, 'greater_equal', True, False, np.round(10./11, ROUNDOFF)),
(0.0, MEMBERS_ALPHA, 'greater_equal', True, True, 1.0)
] )
def test2__probability_from_members(threshold, members, operator_str, presorted,
positive_definite, expected):
assert np.round(_probability_from_members(threshold, members, operator_str, presorted,
positive_definite)[0][0], ROUNDOFF) == expected
# Tests for handling tie between threshold and members
@pytest.mark.parametrize("threshold, members, operator_str, presorted, positive_definite, expected",
[(0.5, MEMBERS_ALPHA, 'greater', True, True, np.round(2./11, ROUNDOFF)),
(0.5, MEMBERS_ALPHA, 'greater', False, False, np.round(2./11, ROUNDOFF)),
(0.5, MEMBERS_ALPHA, 'greater_equal', True, True, np.round(4./11, ROUNDOFF)),
(0.5, MEMBERS_ALPHA, 'greater_equal', False, False, np.round(4./11, ROUNDOFF)),
(0.5, MEMBERS_ALPHA, 'less', True, True, np.round(7./11, ROUNDOFF)),
(0.5, MEMBERS_ALPHA, 'less_equal', True, True, np.round(9./11, ROUNDOFF)),
(-1.0, MEMBERS_BRAVO, 'less', True, False, np.round(1./11, ROUNDOFF)),
(-1.0, MEMBERS_BRAVO, 'less_equal', True, False, np.round(2./11, ROUNDOFF)),
] )
def test3__probability_from_members(threshold, members, operator_str, presorted,
positive_definite, expected):
assert np.round(_probability_from_members(threshold, members, operator_str, presorted,
positive_definite)[0][0], ROUNDOFF) == expected
# ------------------------------------------------------------------------------
# prob_between_values
# ------------------------------------------------------------------------------
MEMBERS_DELTA = np.array([[0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.5, 0.5, 0.5, 1.]])
# TODO: Break this into logical separations
@pytest.mark.parametrize("members, lower, upper, bracket, positive_definite, expected",
[(MEMBERS_DELTA, 0., 0.1, "()", True, np.round(1/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 0.1, "[)", True, np.round(5/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 0.1, "[]", True, np.round(5/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 0.1, "(]", True, np.round(1/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 0.5, "()", True, np.round(3/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 1., "()", True, np.round(6/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 1., "[]", True, np.round(10/11, ROUNDOFF)),
(MEMBERS_DELTA, 0., 1., "[]", False, np.round(9/11, ROUNDOFF)),
(MEMBERS_DELTA, 10., 11., "[]", False, np.round(0/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 0., 10., "[]", False, np.round(11/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 6.99, 7.01, "[]", False, np.round(11/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 6.99, 7.01, "()", False, np.round(11/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 7, 7.0001, "()", False, np.round(0/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 7, 7.0001, "[)", False, np.round(11/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 7.1, 7.2, "[]", False, np.round(0/11, ROUNDOFF)),
(MEMBERS_CHARLIE, 6.9, 6.91, "[]", False, np.round(0/11, ROUNDOFF)),
])
def test1_prob_between_values(members, lower, upper, bracket,
positive_definite, expected) :
assert np.round(prob_between_values(members, lower, upper,
bracket, positive_definite)[0][0], ROUNDOFF) == expected
# Validate the parameters
# ------------------------------------------------------------------------------
# ensemble_verification_rank
# ------------------------------------------------------------------------------
N = int(1e6)
MEMBERS_ZERO = np.zeros((N, 5))
V_ = np.zeros(shape=(N, 1))
@pytest.mark.parametrize("v_, M, expected",
[(V_, MEMBERS_ZERO, np.array([0, 1, 2, 3, 4, 5])),
] )
def test1_ensemble_verification_rank(v_, M, expected) :
ranks = ensemble_verification_rank(v_, M)
val, count = np.unique(ranks, return_counts=True)
prob_per_bin = 1 / (M.shape[1] + 1)
proportion = count / M.shape[0]
# TODO: assert counts roughly equal to expected value
assert (val == expected).any() #and np.testing.assert_almost_equal(proportion[0], prob_per_bin, decimal=7)
# ------------------------------------------------------------------------------
# ensemble_verification_rank
# ------------------------------------------------------------------------------
# 1) correct int type
# 2) give a str when expecting an int, catch TypeError
# 3) give a list when expecting a str, catch TypeError
# 4) give a dict when expecting a dict
@pytest.mark.parametrize("parameter_name, parameter, expected_type, raises",
[("horse_face", 1, int, None),
("horse_face", "moose", int, TypeError),
("wild horses", ["rat"], str, TypeError),
("dog gunna hunt", {"cute":"dog"}, dict, None)])
def test__validate_arg_type(parameter_name, parameter, expected_type, raises) :
"""Test to make sure the function fails when expected."""
if raises is not None :
# We expect this to raise an error
with pytest.raises(raises) :
_validate_arg_type(parameter_name, parameter, expected_type)
else :
_validate_arg_type(parameter_name, parameter, expected_type)
# ------------------------------------------------------------------------------
# _deterministic_event_prob
# ------------------------------------------------------------------------------
@pytest.mark.parametrize("forecast, thresh, operator_str, expected",
[(1, 1.1, "less", 0),
(1, 0.9, "less", 1),
(1, 1, "less", 1),
(1, 1, "less_equal", 1),
(1, 0, "less_equal", 1),
(5, 10, "less_equal", 0),
(5, 10, "not_a_valid_operator", 1e64),
])
def test_deterministic_event_prob(forecast, thresh, operator_str, expected) :
prob = _deterministic_event_prob(forecast, thresh, operator_str)
assert(prob == expected),\
(f"prob={prob}, expected={expected}")
# THESE NEED TESTS
# _deterministic_event_prob
# probability_from_members
| StarcoderdataPython |
3337077 | """Implementation of quicksort in Python."""
def quick_sort(iter):
"""Sort the iterable using the merge sort method."""
if not isinstance(iter, (list, tuple)):
raise TypeError("Input only a list/tuple of integers")
if len(iter) < 2:
return iter
if not all(isinstance(x, (int, float)) for x in iter):
raise ValueError("Input only a list/tuple of integers")
small = []
large = []
pivot = iter[0]
for idx in range(len(iter)):
if iter[idx] < pivot and not idx == 0:
small.append(iter[idx])
elif iter[idx] >= pivot and not idx == 0:
large.append(iter[idx])
small = quick_sort(small)
large = quick_sort(large)
small.append(pivot)
small.extend(large)
return small
if __name__ == '__main__': # pragma: no cover
from timeit import Timer
random = Timer(
'quick_sort([randint(0, 1000) for x in range(100)][::-1])',
"from __main__ import quick_sort; from random import randint"
)
print("""
Quicksort is a divide and conquer algorithm. Quicksort first
divides a large array into two smaller sub-arrays: the low
elements and the high elements. Quicksort can then recursively
sort the sub-arrays.
""")
print("Random input (100 numbers from 0-1000 sorted) over 1000 trials:")
print(random.timeit(number=1000))
| StarcoderdataPython |
43966 | # For SSH
import Exscript
# For Color Font
from colorama import init as colorama_init
from colorama import Fore
colorama_init(autoreset=True)
username = "user1"
password = "<PASSWORD>"
ip4 = "192.168.33.3"
# SSHセッションの確立
session = Exscript.protocols.SSH2()
session.connect(ip4)
# ルータにログイン
account = Exscript.Account(name=username, password=password)
session.login(account)
print("===== Step 1. run show command =====")
session.execute("show configuration interfaces ge-0/0/1")
print(Fore.YELLOW + session.response)
print("===== Step 2. configure =====")
session.execute("configure")
config_txt = "set interfaces ge-0/0/1 disable"
session.execute(config_txt)
print(Fore.YELLOW + session.response)
print("===== Step 3. commit check =====")
session.execute("show | compare")
print(Fore.YELLOW + session.response)
session.execute("commit check")
print(session.response)
print("===== Step 4. commit =====")
# ユーザにy or nを質問
print(Fore.YELLOW + "Do you commit? y/n")
choice = input()
if choice == "y":
session.execute("commit")
print(session.response)
else:
session.execute("rollback")
print(session.response)
session.execute("exit")
print(session.response)
print("===== Step 5. run show command(again) =====")
session.execute("show configuration interfaces ge-0/0/1")
print(Fore.YELLOW + session.response)
session.send("exit")
session.close() | StarcoderdataPython |
1660834 | from oem_format_minimize.main import MinimalFormat
from oem_format_msgpack.main import MessagePackFormat
class MessagePackMinimalFormat(MessagePackFormat, MinimalFormat):
__key__ = 'minimize+msgpack'
__extension__ = 'min.mpack'
| StarcoderdataPython |
3252016 | '''Faça um programa que leia 6 numeros inteiros
e mostre a soma apenas dos que forem pares
se o valor for impar desconsidere-o'''
valor = 0
contador = 0
for x in range(1, 6):
x = int(input('Digite um numero inteiro: '))
if x % 2 == 0:
valor += x
contador += 1
print('Total de numeros pares digitado: {} '.format(contador))
print('O resultado da soma dos valores foi: {}'.format(valor))
| StarcoderdataPython |
45375 | from yuuhpizzakebab import app, admin_required, login_required
from .models import Pizza, Topping
from flask import render_template, session, redirect, url_for, request, flash
@app.route('/pizzas')
def list_pizzas():
"""Shows a list of pizzas."""
return render_template('pizza/pizzas.html',
pizzas=Pizza.get_all(),
selecting=request.args.get('selecting'))
@app.route('/pizza/create', methods=['GET', 'POST'])
@admin_required
def create_pizza():
"""Creates a new pizza.
Creates a new pizza with POST and associated any selected toppings with it.
Shows a form to fill with GET.
"""
if request.method == 'POST':
name = request.form['pizza_name']
price = request.form['pizza_price']
image_url = request.form['pizza_image_url']
selected_toppings = request.form.getlist('toppings')
p = Pizza(None, name, price, image_url, [])
success = p.save()
if not success:
flash('Some fields need to be filled', 'alert-danger')
return render_template('pizza/edit_pizza.html',
pizza=pizza,
available_toppings=Topping.get_all())
for t in selected_toppings:
topping_id = int(t)
p.add_topping(topping_id)
return redirect(url_for('list_pizzas'))
return render_template('pizza/edit_pizza.html',
available_toppings=Topping.get_all())
@app.route('/pizza/edit/<int:pizza_id>', methods=['GET', 'POST'])
@admin_required
def edit_pizza(pizza_id):
"""Edits a pizza.
arguments:
pizza_id -- id of the pizza
Saves the information with POST.
Shows a form to edit the contents with GET.
"""
if request.method == 'POST':
name = request.form['pizza_name']
price = request.form['pizza_price']
image_url = request.form['pizza_image_url']
selected_toppings = request.form.getlist('toppings')
p = Pizza(pizza_id, name, price, image_url, [])
success = p.save()
if not success:
flash('Some fields need to be filled', 'alert-danger')
return render_template('pizza/edit_pizza.html',
pizza=p,
available_toppings=Topping.get_all())
p.remove_toppings()
for t in selected_toppings:
topping_id = int(t)
p.add_topping(topping_id)
return redirect(url_for('list_pizzas'))
pizza = Pizza.get_by_id(pizza_id)
if not pizza:
return redirect(url_for('list_pizzas'))
return render_template('pizza/edit_pizza.html',
pizza=pizza,
available_toppings=Topping.get_all())
@app.route('/pizza/delete/<int:pizza_id>')
@admin_required
def delete_pizza(pizza_id):
"""Deletes a pizza.
arguments:
pizza_id -- id of the pizza
"""
Pizza.delete_by_id(pizza_id)
flash('Removed pizza', 'alert-success')
return redirect(url_for('list_pizzas'))
| StarcoderdataPython |
3226985 | <filename>wmdadict/migrations/0030_auto_20170917_2104.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-17 11:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0029_auto_20170917_2049'),
]
operations = [
migrations.RemoveField(
model_name='bmdwfield',
name='element_type_old',
),
migrations.RemoveField(
model_name='bmdwfield',
name='required_old',
),
migrations.RemoveField(
model_name='bmdwfield',
name='type_old',
),
migrations.RemoveField(
model_name='emdissemantics',
name='required_old',
),
migrations.AlterField(
model_name='bmdwfield',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
# migrations.AlterField(
# model_name='bmdwfield',
# name='element_type',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.BmdwElementType'),
# preserve_default=False,
# ),
# migrations.AlterField(
# model_name='bmdwfield',
# name='required',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.RequiredFieldType'),
# preserve_default=False,
# ),
# migrations.AlterField(
# model_name='bmdwfield',
# name='type',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.BmdwFieldType'),
# preserve_default=False,
# ),
migrations.AlterField(
model_name='emdisfield',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdisfield',
name='emdis_type',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisFieldType', verbose_name='field type'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='emdis_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisField'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='emdis_message',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisMessage'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='required',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.RequiredFieldType'),
preserve_default=False,
),
migrations.AlterField(
model_name='formfields',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
migrations.AlterField(
model_name='formfields',
name='wmda_form',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.WmdaForm'),
preserve_default=False,
),
]
| StarcoderdataPython |
3204805 | <reponame>thunder-/paprika<gh_stars>0
from paprika.repositories.ChunkRepository import ChunkRepository
from paprika.repositories.ProcessActionPropertyRepository import ProcessActionPropertyRepository
from paprika.repositories.ProcessPropertyRepository import ProcessPropertyRepository
from paprika.repositories.ProcessRepository import ProcessRepository
from paprika.system.logger.Logger import Logger
from paprika.actions.Actionable import Actionable
class State(Actionable):
def __init__(self):
Actionable.__init__(self)
def execute(self, connector, process_action):
job_name = process_action['job_name']
logger = Logger(connector, self)
chunk_repository = ChunkRepository(connector)
process_repository = ProcessRepository(connector)
process_property_repository = ProcessPropertyRepository(connector)
process_action_property_repository = ProcessActionPropertyRepository(connector)
# retrieve the chunk properties
process = process_repository.find_by_id(process_action['pcs_id'])
chunk_id = process_property_repository.get_property(process, 'chunk_id')
message = process_property_repository.get_property(process, 'message')
backtrace = process_property_repository.get_property(process, 'backtrace')
chunk = chunk_repository.find_by_id(chunk_id)
state = process_action_property_repository.get_property(process_action, 'state')
chunk['state'] = state
chunk['message'] = message
chunk['backtrace'] = backtrace
chunk_repository.state(chunk)
logger.info(job_name, 'job_name: ' + job_name + " state: " + chunk['state']) | StarcoderdataPython |
3389174 | <reponame>tylerclair/py3canvas
"""ErrorReports API Tests for Version 1.0.
This is a testing template for the generated ErrorReportsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.error_reports import ErrorReportsAPI
from py3canvas.apis.error_reports import Errorreport
class TestErrorReportsAPI(unittest.TestCase):
"""Tests for the ErrorReportsAPI."""
def setUp(self):
self.client = ErrorReportsAPI(secrets.instance_address, secrets.access_token)
def test_create_error_report(self):
"""Integration test for the ErrorReportsAPI.create_error_report method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
| StarcoderdataPython |
58518 | import numpy as np
import matplotlib.pyplot as plt
import random
s0=4.96*10**8
mtop=5.04*10**8
oneday=24*60*60
oneyear=365
t=[i for i in range(1,oneyear*30)]
bprice0=6.4669
bprice=bprice0
bnum0=1000000
b0=1.0*bnum0
cw0=0.065
cw1=0.075
cw2=0.08
cw3=0.1
realmine=0
p0list=[]
b0list=[]
s0list=[]
p1list=[]
b1list=[]
s1list=[]
p2list=[]
b2list=[]
s2list=[]
p3list=[]
b3list=[]
s3list=[]
b=b0
p0=b0/(cw0*s0)
percentforbancor=0.08
dotpersec=2
s=1.0*s0
newprice=p=p0
sp=s0*p0
allmine=0
for i in t:
l=i/(365*4+1)
mineperday=(1.0*dotpersec/(2**l))*oneday
if allmine+mineperday>mtop:
realmine=mtop-allmine
else:
realmine=mineperday
allmine+=realmine
s+=realmine
newprice=b/(cw0*s)
b+=realmine*percentforbancor*newprice
newprice=b/(cw0*s)
b0list.append(b/10**6)
s0list.append(s/10**6)
p0list.append(newprice*10**3)
b=b0
s=s0
realmine=allmine=0
for i in t:
l=i/(365*4+1)
mineperhalfday=(1.0*dotpersec/(2**l))*oneday
if allmine+mineperhalfday>mtop:
realmine=mtop-allmine
else:
realmine=mineperhalfday
allmine+=realmine
s+=realmine
newprice=b/(cw1*s)
b+=realmine*percentforbancor*newprice
newprice=b/(cw1*s)
b1list.append(b/10**6)
s1list.append(s/10**6)
p1list.append(newprice*10**3)
b=b0
s=s0
realmine=allmine=0
for i in t:
l=i/(365*4+1)
mineperhalfday=(1.0*dotpersec/(2**l))*oneday
if allmine+mineperhalfday>mtop:
realmine=mtop-allmine
else:
realmine=mineperhalfday
allmine+=realmine
s+=realmine
newprice=b/(cw2*s)
b+=realmine*percentforbancor*newprice
newprice=b/(cw2*s)
b2list.append(b/10**6)
s2list.append(s/10**6)
p2list.append(newprice*10**3)
b=b0
s=s0
realmine=allmine=0
for i in t:
l=i/(365*4+1)
mineperhalfday=(1.0*dotpersec/(2**l))*oneday
if allmine+mineperhalfday>mtop:
realmine=mtop-allmine
else:
realmine=mineperhalfday
allmine+=realmine
s+=realmine
newprice=b/(cw3*s)
b+=realmine*percentforbancor*newprice
newprice=b/(cw3*s)
b3list.append(b/10**6)
s3list.append(s/10**6)
p3list.append(newprice*10**3)
sp=plt.subplot(311)
sp.plot(t,np.array(s0list),color="black")
sp.plot(t,np.array(s1list),color="red")
sp.plot(t,np.array(s2list),color="green")
sp.plot(t,np.array(s3list),color="grey")
bp=plt.subplot(312)
bp.plot(t,np.array(b0list),color="black")
bp.plot(t,np.array(b1list),color="red")
bp.plot(t,np.array(b2list),color="green")
bp.plot(t,np.array(b3list),color="grey")
pp=plt.subplot(313)
pp.plot(t,np.array(p0list),color="black")
pp.plot(t,np.array(p1list),color="red")
pp.plot(t,np.array(p2list),color="green")
pp.plot(t,np.array(p3list),color="grey")
plt.legend()
plt.rcParams['font.sans-serif']=['AR PL UKai CN']
pp.set_title('Price-Day')
pp.set_ylabel("10^-3EOS")
sp.set_title("Supply-Day cw b="+str(cw0)+"/r"+str(cw1)+"/g"+str(cw2)+"/y"+str(cw3)+" rate="+str(percentforbancor))
sp.set_ylabel("mDOT")
bp.set_title("Reserve-Day")
bp.set_ylabel("mEOS")
plt.tight_layout()
plt.show()
| StarcoderdataPython |
152085 | <reponame>City-of-Helsinki/atv<filename>services/tests/test_admin.py<gh_stars>0
from django.urls import reverse
def test_admin_service_list_view_query_count_not_too_big(
admin_client, django_assert_max_num_queries, service_api_key_factory
):
admin_view_url = reverse("admin:services_service_changelist")
with django_assert_max_num_queries(10):
admin_client.get(admin_view_url)
service_api_key_factory.create_batch(11)
with django_assert_max_num_queries(10):
admin_client.get(admin_view_url)
def test_admin_service_api_key_list_view_query_count_not_too_big(
admin_client, django_assert_max_num_queries, service_api_key_factory
):
admin_view_url = reverse("admin:services_serviceapikey_changelist")
with django_assert_max_num_queries(10):
admin_client.get(admin_view_url)
service_api_key_factory.create_batch(11)
with django_assert_max_num_queries(10):
admin_client.get(admin_view_url)
def test_admin_service_api_key_change_view_query_count_not_too_big(
admin_client, django_assert_max_num_queries, service_api_key, service_factory
):
admin_view_url = reverse(
"admin:services_serviceapikey_change", args=(service_api_key.pk,)
)
with django_assert_max_num_queries(10):
admin_client.get(admin_view_url)
service_factory.create_batch(11)
with django_assert_max_num_queries(10):
admin_client.get(admin_view_url)
| StarcoderdataPython |
115750 | <reponame>PrabhuJoseph/cloudbreak
import json
import logging
from logging.handlers import RotatingFileHandler
class MetricsLogger:
def __init__(self, name, path, max_bytes, backup_count, debug=False):
fmt = '%(message)s'
logging.basicConfig(
level=logging.INFO,
format=fmt,
)
formatter = logging.Formatter(fmt)
handler = RotatingFileHandler(path, maxBytes=max_bytes, backupCount=backup_count, mode='a')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
self.metrics_logger = logging.getLogger(name)
self.metrics_logger.addHandler(handler)
def process(self, event):
self.metrics_logger.info(json.dumps(event)) | StarcoderdataPython |
4819402 | <filename>codes/main.py<gh_stars>1-10
import cv2
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
import LeNet_modified as lenet
from sklearn.utils import shuffle
# import disparity image
disp_gt=cv2.imread('scene1.truedisp.pgm',0) # import image, 0 indicates reading the image as a grayscale image
if disp_gt is None: # check if the image has been correctly imported
print('none')
row,col=disp_gt.shape
num_img=25
# import images
img_stack=np.ndarray(shape=(row,col,25))
for i in range(5):
for j in range(5):
k=i*5+j
im_temp=cv2.imread('scene1.row'+str(i+1)+'.col'+str(j+1)+'.ppm',0)
img_stack[:, :, k]=im_temp[18:-18,18:-18]
img_stack[:,:,k]=np.float64(img_stack[:,:,k])
if img_stack[:,:,k] is None:
print('image not imported correctly')
disp_img_stack=np.ndarray(shape=(row,col,21),dtype=np.float64)
# plt.imshow(img_stack[:,:,1], cmap = 'gray', interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
# plt.show()
for i in range(0,21):
disp_img_stack[:,:,i]=cv2.imread('disp_map_win_size'+str(i+5)+'.jpg',0)
disp_img_stack[:,:,i]=np.float64(disp_img_stack[:,:,i])
# initialize the win_size_map, whose values are the number of windows should be used for every pixel
win_size_map=np.zeros(shape=(row,col),dtype=np.int64)
disp_gt_stack=np.ndarray(shape=(row,col,21),dtype=np.float64)
for i in range(21):
np.copyto(disp_gt_stack[:,:,i],disp_gt,casting='unsafe')
diff_stack=np.abs(disp_gt_stack-disp_img_stack)
np.argmin(diff_stack,axis=2,out=win_size_map)
plt.imshow(win_size_map, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()
#print((win_size_map.shape[0]-15+1)*(win_size_map.shape[1]-15+1))
""" creating the training and testing set"""
rmin=7
rmax=row-7
colmin=7
colmax=col-7
# training_set={}
# training_set['image']=[]
# training_set['']
X=[]
Y=[]
for i in range(rmin,rmax):
for j in range(colmin,colmax):
Y.append(win_size_map[i,j])
X.append(img_stack[i-7:i+8,j-7:j+8,:])
print(np.max(win_size_map),' ',np.min(win_size_map))
train_x,x_test,train_y,y_test=train_test_split(X,Y,test_size=0.2)
# #
# x_train,x_validation,y_train,y_validation=train_test_split(train_x,train_y,test_size=0.2)
# #
# lenet.train(x_train=x_train,y_train=y_train,x_validation=x_validation,y_validation=y_validation)
# lenet.test(x_test,y_test,50)
#win_size_map_pre=np.zeros(shape=(rmax-rmin,colmax-colmin),dtype=np.int64)
out=lenet.predict(X,num_channel=25,batch_size=len(X))
out=out+5
# # for i in range(rmin,rmax):
# # for j in range(colmin,colmax):
# # #print(np.expand_dims(img_stack[i-7:i+8,j-7:j+8,:],0),np.shape(np.expand_dims(img_stack[i-7:i+8,j-7:j+8,:],0)))
# # win_size_map_pre[i,j]=lenet.predict(np.expand_dims(img_stack[i-7:i+8,j-7:j+8,:],0))[0]
win_size_map_pre=np.reshape(out,(rmax-rmin,colmax-colmin))
# plt.imshow(win_size_map_pre, cmap = 'gray', interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
# plt.show()
cv2.imwrite('result.jpg',np.uint8(win_size_map_pre)) | StarcoderdataPython |
1751576 | #!/usr/bin/env python
#
# filter-noisy-assembler-warnings.py
# Author: <NAME>
# <https://stackoverflow.com/a/41515691>
import sys
for line in sys.stdin:
# If line is a 'noisy' warning, don't print it or the following two lines.
if ('warning: section' in line and 'is deprecated' in line
or 'note: change section name to' in line):
next(sys.stdin)
next(sys.stdin)
else:
sys.stderr.write(line)
sys.stderr.flush()
| StarcoderdataPython |
3235784 | # Generated by Django 3.1.3 on 2020-11-26 13:55
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20201126_1918'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='check_in',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='reservation',
name='check_out',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='reservation',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 26, 19, 25, 30, 362943)),
),
]
| StarcoderdataPython |
72225 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: create_template_with_yaml.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from collector_service_sdk.model.collector_service import collector_template_pb2 as collector__service__sdk_dot_model_dot_collector__service_dot_collector__template__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='create_template_with_yaml.proto',
package='template',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1f\x63reate_template_with_yaml.proto\x12\x08template\x1a\x46\x63ollector_service_sdk/model/collector_service/collector_template.proto\"\\\n&CreateCollectorTemplateWithYamlRequest\x12\x12\n\nconfigYaml\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08objectId\x18\x03 \x01(\t\"\x96\x01\n.CreateCollectorTemplateWithYamlResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.collector_service.CollectorTemplateb\x06proto3')
,
dependencies=[collector__service__sdk_dot_model_dot_collector__service_dot_collector__template__pb2.DESCRIPTOR,])
_CREATECOLLECTORTEMPLATEWITHYAMLREQUEST = _descriptor.Descriptor(
name='CreateCollectorTemplateWithYamlRequest',
full_name='template.CreateCollectorTemplateWithYamlRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='configYaml', full_name='template.CreateCollectorTemplateWithYamlRequest.configYaml', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='template.CreateCollectorTemplateWithYamlRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='template.CreateCollectorTemplateWithYamlRequest.objectId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=209,
)
_CREATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER = _descriptor.Descriptor(
name='CreateCollectorTemplateWithYamlResponseWrapper',
full_name='template.CreateCollectorTemplateWithYamlResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='template.CreateCollectorTemplateWithYamlResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='template.CreateCollectorTemplateWithYamlResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='template.CreateCollectorTemplateWithYamlResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='template.CreateCollectorTemplateWithYamlResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=362,
)
_CREATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER.fields_by_name['data'].message_type = collector__service__sdk_dot_model_dot_collector__service_dot_collector__template__pb2._COLLECTORTEMPLATE
DESCRIPTOR.message_types_by_name['CreateCollectorTemplateWithYamlRequest'] = _CREATECOLLECTORTEMPLATEWITHYAMLREQUEST
DESCRIPTOR.message_types_by_name['CreateCollectorTemplateWithYamlResponseWrapper'] = _CREATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateCollectorTemplateWithYamlRequest = _reflection.GeneratedProtocolMessageType('CreateCollectorTemplateWithYamlRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATECOLLECTORTEMPLATEWITHYAMLREQUEST,
'__module__' : 'create_template_with_yaml_pb2'
# @@protoc_insertion_point(class_scope:template.CreateCollectorTemplateWithYamlRequest)
})
_sym_db.RegisterMessage(CreateCollectorTemplateWithYamlRequest)
CreateCollectorTemplateWithYamlResponseWrapper = _reflection.GeneratedProtocolMessageType('CreateCollectorTemplateWithYamlResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CREATECOLLECTORTEMPLATEWITHYAMLRESPONSEWRAPPER,
'__module__' : 'create_template_with_yaml_pb2'
# @@protoc_insertion_point(class_scope:template.CreateCollectorTemplateWithYamlResponseWrapper)
})
_sym_db.RegisterMessage(CreateCollectorTemplateWithYamlResponseWrapper)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
1646355 | ## ALL RIGHTS RESERVED.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Neither the name of the SONATA-NFV, 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## nor the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## This work has been performed in the framework of the SONATA project,
## funded by the European Commission under Grant number 671517 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the SONATA
## partner consortium (www.sonata-nfv.eu).
##
## This work has been performed in the framework of the 5GTANGO project,
## funded by the European Commission under Grant number 761493 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the 5GTANGO
## partner consortium (www.5gtango.eu).
# encoding: utf-8
from rest_framework import serializers
from api.models import *
from api.serializers import *
from django.contrib.auth.models import User
from django.core import serializers as core_serializers
#######################################################################################################
class SntPasMonResDetailSerializer(serializers.ModelSerializer):
class Meta:
model = passive_monitoring_res
fields = ('service_id','test_id','created','terminated','data')
class SntPasMonResSerializer(serializers.ModelSerializer):
class Meta:
model = passive_monitoring_res
fields = ('service_id','test_id','created','terminated')
class SntSNMPOidSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_snmp_oids
fields = ('oid', 'metric_name', 'metric_type', 'unit', 'mib_name')
class SntSNMPEntSerializer(serializers.ModelSerializer):
#user = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_users.objects.all())
#user = SntUserSerializer()
oids = SntSNMPOidSerializer(many=True)
class Meta:
model = monitoring_snmp_entities
fields = ('id', 'ip', 'port','username', 'interval', 'entity_type','oids' , 'entity_id', 'version', 'auth_protocol', 'security_level', 'status')
class SntSNMPEntFullSerializer(serializers.ModelSerializer):
oids = SntSNMPOidSerializer(many=True)
class Meta:
model = monitoring_snmp_entities
fields = ('id', 'ip', 'port','username', 'password', 'interval', 'entity_type','oids' , 'entity_id', 'version', 'auth_protocol', 'security_level', 'status')
lookup_field = {'ip', 'port'}
def create(self, validated_data):
oids_data = validated_data.pop('oids')
ent_ip = validated_data['ip']
ent_port = validated_data['port']
ent = monitoring_snmp_entities.objects.all().filter(ip=ent_ip,port=ent_port)
print (ent.count())
if ent.count() > 0:
ent.delete()
entity = monitoring_snmp_entities.objects.create(**validated_data)
for oid in oids_data:
monitoring_snmp_oids.objects.create(snmp_entity=entity, **oid)
return entity
class SntSmtpSerializerCreate(serializers.ModelSerializer):
class Meta:
model = monitoring_smtp
fields = ('id', 'smtp_server', 'port', 'user_name', 'password', 'component', 'sec_type')
class SntSmtpSerializerList(serializers.ModelSerializer):
class Meta:
model = monitoring_smtp
fields = ('id', 'smtp_server', 'port', 'user_name', 'component', 'sec_type', 'created')
class SntSmtpSerializerCred(serializers.ModelSerializer):
class Meta:
model = monitoring_smtp
fields = ('id', 'password')
class SntSPSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_service_platforms
fields = ('id', 'sonata_sp_id', 'name', 'manager_url','created')
class SntPOPSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_pops
fields = ('id', 'sonata_pop_id','sonata_sp_id' ,'name', 'prom_url','created')
lookup_field = 'sonata_pop_id'
class SntUserSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_users
fields = ('id', 'first_name', 'last_name', 'email', 'sonata_userid', 'created','type','mobile')
lookup_field = {'email','mobile'}
class SntServicesSerializer(serializers.ModelSerializer):
#user = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_users.objects.all())
#user = SntUserSerializer()
ns_instance_id = serializers.CharField(source='sonata_srv_id')
test_id = serializers.CharField(source='description')
class Meta:
model = monitoring_services
fields = ('ns_instance_id', 'test_id','created','terminated')
class SntServicesFullSerializer(serializers.ModelSerializer):
#user = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_users.objects.all())
user = SntUserSerializer()
class Meta:
model = monitoring_services
fields = ('id', 'sonata_srv_id', 'name', 'description', 'created', 'user', 'host_id','pop_id')
class SntFunctionsSerializer(serializers.ModelSerializer):
service = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_services.objects.all())
#service = SntServicesSerializer()
class Meta:
model = monitoring_functions
fields = ('id', 'sonata_func_id', 'name', 'description', 'created', 'service', 'host_id','pop_id')
class SntServicesDelSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_services
fields = ('id', 'sonata_srv_id', 'name', 'description', 'created')
lookup_field = 'sonata_srv_id'
class SntFunctionsFullSerializer(serializers.ModelSerializer):
#service = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_services.objects.all())
service = SntServicesSerializer()
class Meta:
model = monitoring_functions
fields = ('id', 'sonata_func_id', 'name', 'description', 'created', 'service', 'host_id', 'pop_id')
class SntMetricsSerializer(serializers.ModelSerializer):
#function = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_functions.objects.all())
#function = SntFunctionsSerializer()
class Meta:
model = monitoring_metrics
fields = ('id', 'name', 'description', 'threshold', 'interval','cmd', 'function', 'created',)
class SntNewMetricsSerializer(serializers.ModelSerializer):
#function = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_functions.objects.all())
#function = SntFunctionsSerializer()
class Meta:
model = monitoring_metrics
fields = ('name', 'description', 'threshold', 'interval','cmd', 'function', 'created')
class SntMetricsFullSerializer(serializers.ModelSerializer):
#function = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_functions.objects.all())
function = SntFunctionsSerializer()
class Meta:
model = monitoring_metrics
fields = ('id', 'name', 'description', 'threshold', 'interval','cmd', 'function', 'created',)
class SntMetricsSerializer1(serializers.ModelSerializer):
sonata_func_id = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_functions.objects.all())
class Meta:
model = monitoring_metrics
fields = ('id', 'name', 'description', 'threshold', 'interval','cmd', 'sonata_func_id', 'created',)
class SntNotifTypeSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_notif_types
fields = ('id', 'type',)
class SntServicesLightSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_services
fields = ('sonata_srv_id', 'name')
lookup_field = 'sonata_srv_id'
class SntRulesSerializer(serializers.ModelSerializer):
#service = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_services.objects.all())
#notification_type = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_notif_types.objects.all())
service = SntServicesLightSerializer()
notification_type = SntNotifTypeSerializer()
class Meta:
model = monitoring_rules
fields = ('id', 'name', 'duration', 'summary', 'description', 'condition', 'notification_type','service', 'created',)
lookup_field = 'consumer'
class SntRulesPerSrvSerializer(serializers.ModelSerializer):
notification_type = SntNotifTypeSerializer()
class Meta:
model = monitoring_rules
fields = ('id','function','vdu', 'name', 'duration', 'summary', 'description', 'condition', 'notification_type', 'created',)
class SntNewFunctionsSerializer(serializers.ModelSerializer):
#service = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_services.objects.all())
#service = SntServicesSerializer()
metrics = SntNewMetricsSerializer(many=True)
class Meta:
model = monitoring_functions
fields = ('sonata_func_id', 'name', 'description', 'created', 'host_id', 'pop_id', 'metrics')
class LightFunctionsSerializer(serializers.ModelSerializer):
vnfr_id = serializers.CharField(source='sonata_func_id')
vc_id = serializers.CharField(source='host_id')
vim_id = serializers.CharField()
vim_endpoint = serializers.CharField(source='pop_id')
class Meta:
model = monitoring_functions
fields = ('vnfr_id','vc_id', 'vim_id', 'vim_endpoint')
class SntNewRulesSerializer(serializers.ModelSerializer):
#service = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_services.objects.all())
#function = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_functions.objects.all())
#notification_type = serializers.PrimaryKeyRelatedField(read_only=False, queryset=monitoring_notif_types.objects.all())
class Meta:
model = monitoring_rules
fields = ('name', 'duration', 'summary', 'description', 'condition', 'notification_type', 'created',)
class NewServiceSerializer(serializers.Serializer):
service = SntServicesSerializer()
functions = SntNewFunctionsSerializer(many=True)
rules = SntNewRulesSerializer(many=True)
class LightServiceSerializer(serializers.Serializer):
ns_instance_uuid = serializers.CharField()
test_id = serializers.CharField()
functions = LightFunctionsSerializer(many=True)
class promMetricLabelSerializer(serializers.Serializer):
metric_name = ''
class promMetricsListSerializer(serializers.Serializer):
metrics = promMetricLabelSerializer(many=True)
class promLabelsSerializer(serializers.Serializer):
labels = {'label':'id'}
class SntPromMetricSerializer(serializers.Serializer):
name = serializers.CharField()
start = serializers.DateTimeField()
end = serializers.DateTimeField()
labels = promLabelsSerializer(many=True)
step = serializers.CharField()
class CommentSerializer(serializers.Serializer):
email = serializers.EmailField()
content = SntRulesSerializer(many=True)
created = serializers.DateTimeField()
class wsLabelSerializer(serializers.Serializer):
label = ''
class SntWSreqSerializer(serializers.Serializer):
metric = serializers.CharField()
filters = wsLabelSerializer(many=True)
class SntSPSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_service_platforms
fields = ('id', 'sonata_sp_id', 'name', 'manager_url','created')
class SntPOPSerializer(serializers.ModelSerializer):
class Meta:
model = monitoring_pops
fields = ('id', 'sonata_pop_id','sonata_sp_id' ,'name', 'prom_url','created')
class SntRulesConfSerializer(serializers.Serializer):
rules = SntRulesPerSrvSerializer(many=True)
class SntActMonResSerializer(serializers.ModelSerializer):
class Meta:
model = active_monitoring_res
fields = ('test_id', 'service_id', 'timestamp', 'config')
class SntActMonResDetailSerializer(serializers.ModelSerializer):
class Meta:
model = active_monitoring_res
fields = ('service_id','timestamp','data')
class SntActMonResDataSerializer(serializers.ModelSerializer):
class Meta:
model = active_monitoring_res
fields = ('id', 'data')
class SntRulesFSerializer(serializers.ModelSerializer):
notification_type = SntNotifTypeSerializer()
class Meta:
model = monitoring_rules
fields = (
'id', 'name', 'duration', 'summary', 'description', 'condition', 'notification_type', 'service', 'created',)
class SntRulesVduSerializer(serializers.Serializer):
vdu_id = serializers.CharField()
rules = SntRulesFSerializer(many=True)
class SntRulesVnfSerializer(serializers.Serializer):
vnf_id = serializers.CharField()
vdus = SntRulesVduSerializer(many=True)
class SntPLCRulesConfSerializer(serializers.Serializer):
plc_contract = serializers.CharField()
vnfs = SntRulesVnfSerializer(many=True)
class Meta:
fields = ('service_id', 'plc_contract','rules')
class SntSLARulesConfSerializer(serializers.Serializer):
sla_contract = serializers.CharField()
rules = SntRulesVnfSerializer(many=True)
class Meta:
fields = ('service_id', 'sla_contract','rules')
class CommentSerializer(serializers.Serializer):
email = serializers.EmailField()
content = serializers.CharField(max_length=200)
created = serializers.DateTimeField()
class Meta:
fields = ('email', 'content')
class HealthSerializer (serializers.Serializer):
alive_since = serializers.DateTimeField()
class Meta:
fields = ('alive_since')
class SntAlertsSerializer(serializers.Serializer):
alertname = serializers.CharField()
topic = serializers.CharField()
serviceID = serializers.CharField()
functionID = serializers.CharField()
resource_id = serializers.CharField()
alertstate = serializers.CharField()
class SntPromTargetUrlsSerializer(serializers.Serializer):
MANO_TYPES = (
('osm', 'osm'),
('sonata', 'sonata'),
('onap', 'onap'),
)
sp_ip = serializers.CharField()
type = serializers.ChoiceField(choices=MANO_TYPES)
sp_name = serializers.CharField()
targets = serializers.ListField(child=serializers.CharField(max_length=32, allow_blank=True))
class SntAlertsListSerializer(serializers.Serializer):
status = serializers.CharField()
alerts = SntAlertsSerializer(many=True)
class SntPromTargetSerializer(serializers.Serializer):
targets=SntPromTargetUrlsSerializer(many=True)
class SntPromUrlsSerializer(serializers.Serializer):
targets = serializers.ListField(child=serializers.CharField(max_length=32, allow_blank=True))
class SntPromTargetsSerializer(serializers.Serializer):
static_configs = SntPromUrlsSerializer(many=True)
job_name = serializers.CharField()
class SntPromConfSerializer(serializers.Serializer):
config = serializers.CharField()
class SntPromConfSerialazer (serializers.Serializer):
targets = serializers.JSONField
class SntPromTargetListSerializer(serializers.Serializer):
targets=SntPromTargetsSerializer(many=True) | StarcoderdataPython |
3238200 | <reponame>fpischedda/yaff
"""
Yaff game runner
It runs a Yaff based appliction specifiyng its package name, example:
$ yaff run example
this script will look up the module example.main and will try to execute
the run function
"""
import importlib.util
import sys
import click
@click.group()
def cli():
pass
@cli.command()
@click.option(
'--module-name',
default='main',
help='The name of the module containing the run function')
@click.argument('name')
def run(module_name, name):
sys.path.insert(0, '.')
module = importlib.import_module(name)
module = getattr(module, module_name)
module.run()
| StarcoderdataPython |
3254138 | <reponame>hcallen/python-xmatters<gh_stars>0
import requests
from oauthlib.oauth2 import LegacyApplicationClient
from requests_oauthlib import OAuth2Session
from xmatters import errors as err
from xmatters.connection import Connection
class OAuth2Auth(Connection):
_endpoints = {'token': '/oauth2/token'}
def __init__(self, api_base_url, client_id, username=None, password=None, **kwargs):
self.api_base_url = api_base_url
self.client_id = client_id
self.username = username
self.password = password
self._token = kwargs.get('token')
self._refresh_token = kwargs.get('refresh_token')
self.token_storage = kwargs.get('token_storage')
self.session = None
token_url = '{}{}'.format(self.api_base_url, self._endpoints.get('token'))
client = LegacyApplicationClient(client_id=self.client_id)
auto_refresh_kwargs = {'client_id': self.client_id}
token_updater = self.token_storage.write_token if self.token_storage else None
self.session = OAuth2Session(client=client, auto_refresh_url=token_url,
auto_refresh_kwargs=auto_refresh_kwargs,
token_updater=token_updater)
# OAuth2Session must be initiated before inheritance inorder to process passed XMSession kwargs
super(OAuth2Auth, self).__init__(self, **kwargs)
self._set_token()
def refresh_token(self):
"""
Refreshes the session token.
Token is automatically applied to the session and stored in token_storage (if defined).
:return: token object
:rtype: dict
"""
refresh_token = self._refresh_token or self.token.get('refresh_token')
token = self.session.refresh_token(token_url=self.session.auto_refresh_url, refresh_token=refresh_token,
kwargs=self.session.auto_refresh_kwargs)
self._update_storage()
# Return in-case user wants it
return token
def fetch_token(self):
"""
Fetches session token.
Token is automatically applied to the session and stored in token_storage (if defined).
:return: token object
:rtype: dict
"""
token = self.session.fetch_token(token_url=self.session.auto_refresh_url, username=self.username,
password=self.password, include_client_id=True)
self._update_storage()
# Return in-case user wants it
return token
def _update_storage(self):
if self.token_storage:
self.token_storage.write_token(self.token)
def _set_token(self):
if self._token:
self.token = self._token
self._update_storage()
elif self._refresh_token:
self.refresh_token()
elif None not in (self.username, self.password):
self.fetch_token()
elif self.token_storage and self.token_storage.read_token():
self.token = self.token_storage.read_token()
else:
raise err.AuthorizationError('Unable to obtain token with provided arguments')
@property
def token(self):
return self.session.token if self.session else self._token
@token.setter
def token(self, token):
if self.session:
self.session.token = token
else:
self._token = token
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
return self.__repr__()
class BasicAuth(Connection):
"""
Used to authentication requests using basic authentication
:param base_url: xMatters instance url or xMatters instance base url
:type base_url: str
:param username: xMatters username
:type username: str
:param password: <PASSWORD>
:type password: str
"""
def __init__(self, api_base_url, username, password, **kwargs):
self.username = username
self.password = password
self.session = requests.Session()
self.api_base_url = api_base_url
self.session.auth = (self.username, self.password)
super(BasicAuth, self).__init__(self, **kwargs)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
return self.__repr__()
| StarcoderdataPython |
189450 | from __future__ import annotations
import asyncio
import functools
import inspect
import logging
import uuid
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Awaitable, Callable, Optional
from ocpp.charge_point import camel_to_snake_case, remove_nones, snake_to_camel_case
from ocpp.exceptions import NotImplementedError, OCPPError
from ocpp.messages import Call, MessageType, unpack, validate_payload
log = logging.getLogger("ocpp-asgi")
class Subprotocol(str, Enum):
ocpp16 = "ocpp1.6"
ocpp20 = "ocpp2.0"
ocpp201 = "ocpp2.0.1"
@dataclass
class OCPPAdapter:
"""OCPPAdapter encapsulates OCPP version specific call and call_result methods."""
ocpp_version: str
call: Awaitable[Any]
call_result: Awaitable[Any]
@dataclass
class RouterContext:
"""RouterContext instance is passed to router."""
scope: dict # Store ASGI scope dictionary as is
body: dict # Store ASGI content mainly to avoid parsing http event twice
subprotocol: Subprotocol
ocpp_adapter: OCPPAdapter
send: Callable[[str, bool, RouterContext], Awaitable[None]]
charging_station_id: str
queue: Any
call_lock: Any
@dataclass
class HandlerContext:
"""HandlerContext instance is passed to handler."""
charging_station_id: str
# References to RouterContext and Router added here so that
# we can send messages to specific Charging Station, which initiated messaging.
_router_context: RouterContext
_router: Router
async def send(self, message: dataclass) -> Any:
"""Send message to Charging Station within action handler."""
# Use a lock to prevent make sure that only 1 message can be send at a
# a time.
async with self._router_context.call_lock:
return await self._router.call(
message=message, context=self._router_context
)
def subprotocol_to_ocpp_version(subprotocol: str) -> str:
"""Strip away ocpp prefix from"""
return subprotocol[4:]
class Router:
"""Router is a collection of ocpp action handlers."""
subprotocol: Subprotocol = None
def __init__(
self,
*,
subprotocol: Subprotocol,
response_timeout: Optional[int] = 30,
create_task: bool = True,
):
"""Initialize Router instance.
Args:
subprotocol (Subprotocol): Defines the ocpp protocol version for this router
response_timeout (int): When no response on a request is received
within this interval, a asyncio.TimeoutError is raised.
create_task (bool): Create asyncio.Task for executing
"after"-handler. Does not affect "on-handler".
"""
self.subprotocol = subprotocol
# The maximum time in seconds it may take for a CP to respond to a
# CALL. An asyncio.TimeoutError will be raised if this limit has been
# exceeded.
self._response_timeout = response_timeout
# A dictionary that hooks for Actions. So if the CS receives a it will
# look up the Action into this map and execute the corresponding hooks
# if exists.
# Dictionary contains the following structure for each Action:
# {
# Action.BootNotification: {
# "_on_action": <reference to "on_boot_notification">,
# "_after_action": <reference to "after_boot_notification">,
# "_skip_schema_validation": False,
# },
# }
self._route_map = {}
# Function used to generate unique ids for CALLs. By default
# uuid.uuid4() is used, but it can be changed. This is meant primarily
# for testing purposes to have predictable unique ids.
self._unique_id_generator = uuid.uuid4
# Use asyncio.create_task for "after"-handler.
self._create_task = create_task
self._response_queue = asyncio.Queue()
# Dictionary for storing subscribers for, which are waiting for CallResult or
# CallErrors.
self.subscriptions = {}
def on(self, action, *, skip_schema_validation=False):
def decorator(func):
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
option = "_on_action"
if action not in self._route_map:
self._route_map[action] = {}
self._route_map[action][option] = func
self._route_map[action]["_skip_schema_validation"] = skip_schema_validation
return inner
return decorator
def after(self, action):
def decorator(func):
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
option = "_after_action"
if action not in self._route_map:
self._route_map[action] = {}
self._route_map[action][option] = func
return inner
return decorator
async def route_message(self, *, message: str, context: RouterContext):
"""
Route a message received from a Charging Station.
If the message is a of type Call the corresponding hooks are executed.
If the message is of type CallResult or CallError the message is passed
to the call() function via the response_queue.
"""
try:
msg = unpack(message)
except OCPPError as e:
log.exception(
"Unable to parse message: '%s', it doesn't seem "
"to be valid OCPP: %s",
message,
e,
)
return
if msg.message_type_id == MessageType.Call:
await self._handle_call(msg, context=context)
elif msg.message_type_id in [
MessageType.CallResult,
MessageType.CallError,
]:
if msg.unique_id in self.subscriptions:
self.subscriptions[msg.unique_id].put_nowait(msg)
async def _handle_call(self, msg, *, context: RouterContext = None):
"""
Execute all hooks installed for based on the Action of the message.
First the '_on_action' hook is executed and its response is returned to
the client. If there is no '_on_action' hook for Action in the message
a CallError with a NotImplemtendError is returned.
Next the '_after_action' hook is executed.
"""
ocpp_version = subprotocol_to_ocpp_version(self.subprotocol)
try:
handlers = self._route_map[msg.action]
except KeyError:
raise NotImplementedError(f"No handler for '{msg.action}' " "registered.")
if not handlers.get("_skip_schema_validation", False):
validate_payload(msg, ocpp_version)
# OCPP uses camelCase for the keys in the payload. It's more pythonic
# to use snake_case for keyword arguments. Therefore the keys must be
# 'translated'. Some examples:
#
# * chargePointVendor becomes charge_point_vendor
# * firmwareVersion becomes firmwareVersion
snake_case_payload = camel_to_snake_case(msg.payload)
try:
handler = handlers["_on_action"]
except KeyError:
raise NotImplementedError(f"No handler for '{msg.action}' " "registered.")
handler_context = HandlerContext(
charging_station_id=context.charging_station_id,
_router_context=context,
_router=self,
)
# Convert message to correct Call instance
class_ = getattr(context.ocpp_adapter.call, f"{msg.action}Payload")
payload = class_(**snake_case_payload)
try:
response = handler(payload=payload, context=handler_context)
if inspect.isawaitable(response):
response = await response
except Exception as e:
log.exception("Error while handling request '%s'", msg)
response = msg.create_call_error(e).to_json()
await self._send(message=response, is_response=True, context=context)
temp_response_payload = asdict(response)
# Remove nones ensures that we strip out optional arguments
# which were not set and have a default value of None
response_payload = remove_nones(temp_response_payload)
# The response payload must be 'translated' from snake_case to
# camelCase. So:
#
# * charge_point_vendor becomes chargePointVendor
# * firmware_version becomes firmwareVersion
camel_case_payload = snake_to_camel_case(response_payload)
response = msg.create_call_result(camel_case_payload)
if not handlers.get("_skip_schema_validation", False):
validate_payload(response, ocpp_version)
await self._send(message=response.to_json(), is_response=True, context=context)
try:
handler = handlers["_after_action"]
response = handler(payload=payload, context=handler_context)
if inspect.isawaitable(response):
if self._create_task:
# Create task to avoid blocking when making a call
# inside the after handler
asyncio.ensure_future(response)
else:
await response
except KeyError:
# '_on_after' hooks are not required. Therefore ignore exception
# when no '_on_after' hook is installed.
pass
async def call(self, *, message: Any, context: RouterContext):
ocpp_version = subprotocol_to_ocpp_version(self.subprotocol)
camel_case_payload = snake_to_camel_case(asdict(message))
call = Call(
unique_id=str(self._unique_id_generator()),
action=message.__class__.__name__[:-7],
payload=remove_nones(camel_case_payload),
)
validate_payload(call, ocpp_version)
await self._send(message=call.to_json(), is_response=False, context=context)
self.subscriptions[call.unique_id] = context.queue
try:
response = await asyncio.wait_for(
context.queue.get(), self._response_timeout
)
except asyncio.TimeoutError:
del self.subscriptions[call.unique_id]
raise
if response.message_type_id == MessageType.CallError:
log.warning("Received a CALLError: %s'", response)
raise response.to_exception()
else:
response.action = call.action
validate_payload(response, ocpp_version)
snake_case_payload = camel_to_snake_case(response.payload)
call_result = context.ocpp_adapter.call_result
cls = getattr(call_result, message.__class__.__name__)
return cls(**snake_case_payload)
async def _send(self, *, message: str, is_response: bool, context: RouterContext):
log.debug(f"{context.charging_station_id=} {message=}")
await context.send(message=message, is_response=is_response, context=context)
| StarcoderdataPython |
1740750 | <reponame>ThundeRatz/vss_simulation
#!/usr/bin/env python3
# coding=utf-8
"""
File:
keyboard_node.py
Description:
Simple python routine to watch the keyboard or a joystick
to send velocity commands to a Gazebo simulation.
"""
import pygame
import sys
import rospy
from geometry_msgs.msg import Twist
# Vamos acompanhar o estado dessas teclas
KEYS = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d]
# Indice dos eixos x e y do joystick
X_AXIS = 0
Y_AXIS = 4
INVERT_X_AXIS = True
INVERT_Y_AXIS = True
ROBOTS = 3
# Namespace dos tópicos que iremos publicar
DEFAULT_NAMESPACE = "/yellow_team/robot_"
DEFAULT_DEBUG = False
# A vel máxima do robô é 2 m/s
MAX_LIN_VEL = 1.0 # m/s
# A vel máxima do robô é 40 rad/s
MAX_ROT_VEL = 20 # rad/s
# Define a rampa de aceleração quando usando o teclado
# Valores em porcentagem da velocidade máxima
KEYBOARD_LINEAR_STEP = 0.03
KEYBOARD_LINEAR_MAX = 1.0
KEYBOARD_ANGULAR_STEP = 0.03
KEYBOARD_ANGULAR_MAX = 0.6
# Os comandos vão de -126 até 126 de modo que os bytes 0xFE e 0xFF
# nunca são utilizados
SCALE = 126
def getNamespace(number):
return DEFAULT_NAMESPACE + f'{number}'
def drawConsole(win, font, console):
"""
Fills window console with the sentences stored in the list console
:param win: pygame.display Window object to be filled
:param font: pygame.Font Font style to be used
:param console: list<font.render> List of text to write
"""
img = font.render("Event console Area", 1, (155, 155, 155), (0, 0, 0))
win.blit(img, (2, 132))
ypos = 450
h = list(console)
h.reverse()
for line in h:
r = win.blit(line, (10, ypos))
win.fill(0, (r.right, r.top, 620, r.height))
ypos -= font.get_height()
def main(debug=DEFAULT_DEBUG):
vel_pub = []
rate = None
current_robot = 0
# Inicia configs do ROS
rospy.init_node('vss_human_controller')
for i in range(ROBOTS):
vel_pub.append(rospy.Publisher(
getNamespace(i) + '/diff_drive_controller/cmd_vel',
Twist, queue_size=2))
rate = rospy.Rate(60) # 60hz
pygame.init()
# Cria a janela
win = pygame.display.set_mode((640, 480), pygame.RESIZABLE)
pygame.display.set_caption("Keyboard Comunication Interface")
# Lista de frases a serem mostradas no console
console = []
font = pygame.font.Font(None, 26)
# Dicionário para guardar o estado de algumas teclas
state = {}
for key in KEYS:
state[key] = False
# Vamos detectar os Joysticks conectados ao computador
axis = [0.0, 0.0]
using_joystick = True
for x in range(pygame.joystick.get_count()):
j = pygame.joystick.Joystick(x)
j.init()
txt = "Enabled joystick: " + j.get_name()
print(txt)
img = font.render(txt, 1, (50, 200, 50), (0, 0, 0))
console.append(img)
if not pygame.joystick.get_count():
using_joystick = False
print("No Joysticks to Initialize")
img = font.render("No Joysticks to Initialize", 1,
(50, 200, 50), (0, 0, 0))
console.append(img)
vel_lin = 0.0
vel_ang = 0.0
running = True
while running:
for e in pygame.event.get():
# Movimento dos botões do teclado
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
running = False
if e.key in KEYS:
state[e.key] = True
elif e.type == pygame.KEYUP:
if e.key in KEYS:
state[e.key] = False
# Movimento dos direcionais do joystick
if e.type == pygame.JOYAXISMOTION:
if e.dict['axis'] in (X_AXIS, Y_AXIS):
if e.dict['axis'] == X_AXIS:
if INVERT_X_AXIS:
axis[0] = -e.value
else:
axis[0] = e.value
elif e.dict['axis'] == Y_AXIS:
if INVERT_Y_AXIS:
axis[1] = -e.value
else:
axis[1] = e.value
# Caso algum botão do joystick seja apertado
if e.type == pygame.JOYBUTTONDOWN \
or e.type == pygame.JOYBUTTONUP \
or e.type == pygame.JOYHATMOTION:
txt = "%s: %s" % (pygame.event.event_name(e.type), e.dict)
print(txt)
img = font.render(txt, 1, (50, 200, 50), (0, 0, 0))
console.append(img)
console = console[-13:]
# L1 pressionado
if (e.type == pygame.JOYBUTTONDOWN and e.dict['button'] == 4) or (e.type == pygame.KEYDOWN and e.key == pygame.K_e):
current_robot += 1
current_robot %= ROBOTS
# R1 pressionado
if (e.type == pygame.JOYBUTTONDOWN and e.dict['button'] == 5) or (e.type == pygame.KEYDOWN and e.key == pygame.K_q):
current_robot -= 1
current_robot %= ROBOTS
elif e.type == pygame.VIDEORESIZE:
win = pygame.display.set_mode(e.size, pygame.RESIZABLE)
elif e.type == pygame.QUIT:
running = False
drawConsole(win, font, console)
pygame.display.flip()
if using_joystick:
txt = f"Linear: {int(axis[1]*SCALE)} Angular: {int(axis[0]*SCALE)}"
img = font.render(txt, 1, (50, 200, 50), (0, 0, 0))
console.append(img)
console = console[-13:]
if debug:
print(txt)
vel_cmd_twist = Twist()
vel_cmd_twist.linear.x = axis[1]*MAX_LIN_VEL
vel_cmd_twist.angular.z = axis[0]*MAX_ROT_VEL
vel_pub[current_robot].publish(vel_cmd_twist)
else:
if state[pygame.K_w] and not state[pygame.K_s]:
vel_lin += KEYBOARD_LINEAR_STEP
vel_lin = min(vel_lin, KEYBOARD_LINEAR_MAX)
elif state[pygame.K_s] and not state[pygame.K_w]:
vel_lin -= KEYBOARD_LINEAR_STEP
vel_lin = max(vel_lin, -KEYBOARD_LINEAR_MAX)
else:
vel_lin = 0.0
if state[pygame.K_a] and not state[pygame.K_d]:
vel_ang += KEYBOARD_ANGULAR_STEP
vel_ang = min(vel_ang, KEYBOARD_ANGULAR_MAX)
elif state[pygame.K_d] and not state[pygame.K_a]:
vel_ang -= KEYBOARD_ANGULAR_STEP
vel_ang = max(vel_ang, -KEYBOARD_ANGULAR_MAX)
else:
vel_ang = 0.0
txt = f"Linear: {int(vel_lin*SCALE)} Angular: {int(vel_ang*SCALE)}"
img = font.render(txt, 1, (50, 200, 50), (0, 0, 0))
console.append(img)
console = console[-13:]
if debug:
print(txt)
vel_cmd_twist = Twist()
vel_cmd_twist.linear.x = vel_lin * MAX_LIN_VEL
vel_cmd_twist.angular.z = vel_ang * MAX_ROT_VEL
vel_pub[current_robot].publish(vel_cmd_twist)
rate.sleep()
if __name__ == "__main__":
rospy.loginfo("Começando a brincadeira!")
# Clean ROS parameters from command line
myargv = rospy.myargv(argv=sys.argv)
print(myargv)
rospy.loginfo(myargv)
main()
| StarcoderdataPython |
1660418 | <filename>Django/middlewareimplement/my_middleware.py
# -*- coding: utf-8 -*-
# @Author: Clarence
# @Date: 2020-06-10 18:39:52
# @Last Modified by: Clarence
# @Last Modified time: 2020-06-10 18:46:26
# 中间件的简单实现
class Router(obejct):
def __init__(self):
self.path_info = {}
def route(self, environ, start_response):
application = self.path_infp[environ['PATH_INFO']]
return application(environ, start_response)
def __call__(self, path):
def wrapper(application):
self.path_info[path] = application
return wrapper
router = Router() | StarcoderdataPython |
86127 | <filename>icrawler/utils/session.py
import requests
from six.moves.urllib.parse import urlsplit
class Session(requests.Session):
def __init__(self, proxy_pool):
super(Session, self).__init__()
self.proxy_pool = proxy_pool
def _url_scheme(self, url):
return urlsplit(url).scheme
def get(self, url, **kwargs):
proxy = self.proxy_pool.get_next(protocol=self._url_scheme(url))
if proxy is None:
return super(Session, self).get(url, **kwargs)
try:
response = super(Session, self).get(url, proxies=proxy.format(),
**kwargs)
except requests.exceptions.ConnectionError:
self.proxy_pool.decrease_weight(proxy)
raise
except:
raise
else:
self.proxy_pool.increase_weight(proxy)
return response
def post(self, url, data=None, json=None, **kwargs):
proxy = self.proxy_pool.get_next(protocol=self._url_scheme(url))
if proxy is None:
return super(Session, self).get(url, data, json, **kwargs)
try:
response = super(Session, self).post(url, data, json,
proxies=proxy.format(),
**kwargs)
except requests.exceptions.ConnectionError:
self.proxy_pool.decrease_weight(proxy)
raise
except:
raise
else:
self.proxy_pool.increase_weight(proxy)
return response
| StarcoderdataPython |
3322496 | """
!!! Use this for ad-hoc updating of results for a known list of supplier IDs.
Takes a CSV file with rows in the format: Supplier ID, Supplier Name, Result
e.g:
123456, Supplier name 1, pass
123212, Supplier name 2, fail
234567, Supplier name 3, pass
The supplier name is cross-referenced against the supplier name in the Digital Marketplace for that supplier ID,
as a sanity check against sloppy CSV creation.
If the names don't match then the script will not update the framework status.
Usage:
scripts/insert-framework-results.py <framework_slug> <stage> <data_api_token> <filename> [<user>]
"""
import getpass
import sys
sys.path.insert(0, '.')
from docopt import docopt
from dmapiclient import DataAPIClient
from dmscripts.insert_framework_results import insert_results
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == '__main__':
arguments = docopt(__doc__)
client = DataAPIClient(get_api_endpoint_from_stage(arguments['<stage>']), arguments['<data_api_token>'])
output = sys.stdout
framework_slug = arguments['<framework_slug>']
filename = arguments['<filename>']
user = arguments['<user>'] or getpass.getuser()
insert_results(client, output, framework_slug, filename, user)
| StarcoderdataPython |
46313 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='darch',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='Deep Architect',
long_description=long_description,
# The project's main homepage.
url='https://github.com/negrinho/deep_architect',
# Author details
author='The Python Packaging Authority',
author_email='<EMAIL>',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='deep architect',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['darch'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'numpy',
'scipy',
'sklearn'
]
)
| StarcoderdataPython |
163736 | <reponame>CubexX/tram-bot<filename>utils.py
import re
import requests
from telegram import (InlineKeyboardButton, InlineKeyboardMarkup,
ReplyKeyboardMarkup)
from stations import invert_stations, stations
from storer import Storer
storer = Storer('bot.db')
def get_inline_keyboard(station_id):
return InlineKeyboardMarkup(
[[InlineKeyboardButton('Обновить 🔄', callback_data=station_id)]]
)
def get_response(station_id):
url = 'http://online.ettu.ru/station/{}'.format(station_id)
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 '
'Safari/537.36',
}
res = requests.get(url, headers=headers).text
return res
def get_schedule(html):
# Route name
route = re.findall('<p>(.*)</p>(.*)<img', html, re.DOTALL)[0][0].strip()
route = re.sub('<[^<]+?>', '', route)
tram_list = []
if "мин" in html:
tram_numbers = re.findall('center;"><b>(\d+)', html)
a = re.findall('right;">(.*)</div>', html)
i = 0
for n in tram_numbers:
tram_list.append('***{}*** - {} - {}\n'.format(n, a[i], a[i + 1]))
i += 2
else:
tram_list = 'Нет трамваев'
return '***{}***\n\n{}'.format(route, ''.join(tram_list))
def get_user_stations(uid):
users = storer.restore('users')
if len(users[uid]['routes']):
_list = []
for s in users[uid]['routes']:
_list.append('🚃 <b>{0}</b>\n'
'<i>Выбрать:</i> /st_{1}\n'
'<i>Удалить:</i> /fav_{1}\n\n'.format(stations[int(s)], s))
msg = '<b>Ваши остановки</b> \n\n' + ''.join(_list)
else:
msg = 'Список ваших остановок пуст'
return msg
def get_user_history(uid):
users = storer.restore('users')
if len(users[uid]['history']):
_list = []
history = users[uid]['history'][:5]
for s in history:
_list.append('🚃 <b>{0}</b>\n'
'<i>Выбрать:</i> /st_{1}\n'
'<i>В избранное:</i> /fav_{1}\n\n'.format(stations[int(s)], s))
msg = '<b>Ваша история</b> \n\n' + ''.join(_list)
else:
msg = 'История пуста'
return msg
def get_stations_by_char(char):
_list = []
for st in invert_stations:
if st[0][:1] == char:
_list.append('🚃 <b>{0}</b>\n'
'<i>Выбрать:</i> /st_{1}\n'
'<i>В избранное:</i> /fav_{1}\n\n'.format(st[0], st[1]))
if len(_list):
msg = '<b>Остановки на {}</b> \n\n{}'.format(char, ''.join(_list))
else:
msg = 'Не найдено'
return msg
default_keyboard = [
['Найти 🔎'],
['Мои остановки 📖'],
['История 📝']
]
chars = [['1', '4', '7', 'А', 'Б'], ['В', 'Г', 'Д', 'Е', 'Ж'],
['З', 'И', 'К', 'Л', 'М'], ['Н', 'О', 'П', 'Р', 'С'],
['Т', 'У', 'Ф', 'Х', 'Ц'], ['Ч', 'Ш', 'Щ', 'Э', 'Ю'], ['Я', '↩️']]
default_keyboard = ReplyKeyboardMarkup(default_keyboard, resize_keyboard=True)
chars_keyboard = ReplyKeyboardMarkup(chars)
| StarcoderdataPython |
1626373 | <reponame>marnunez/ezc3d
"""
Test for file IO
"""
from pathlib import Path
import numpy as np
import pytest
import ezc3d
def test_create_c3d():
c3d = ezc3d.c3d()
# Test the header
assert c3d['header']['points']['size'] == 0
assert c3d['header']['points']['frame_rate'] == 0.0
assert c3d['header']['points']['first_frame'] == 0
assert c3d['header']['points']['last_frame'] == 0
assert c3d['header']['analogs']['size'] == 0
assert c3d['header']['analogs']['frame_rate'] == 0.0
assert c3d['header']['analogs']['first_frame'] == 0
assert c3d['header']['analogs']['last_frame'] == -1
assert c3d['header']['events']['size'] == 18
assert c3d['header']['events']['events_time'] \
== (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
assert c3d['header']['events']['events_label'] \
== ('', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '')
# Test the parameters
assert c3d['parameters']['POINT']['USED']['value'][0] == 0
assert c3d['parameters']['POINT']['SCALE']['value'][0] == -1
assert c3d['parameters']['POINT']['RATE']['value'][0] == 0.0
assert c3d['parameters']['POINT']['FRAMES']['value'][0] == 0
assert len(c3d['parameters']['POINT']['LABELS']['value']) == 0
assert len(c3d['parameters']['POINT']['DESCRIPTIONS']['value']) == 0
assert len(c3d['parameters']['POINT']['UNITS']['value']) == 0
assert c3d['parameters']['ANALOG']['USED']['value'][0] == 0
assert len(c3d['parameters']['ANALOG']['LABELS']['value']) == 0
assert len(c3d['parameters']['ANALOG']['DESCRIPTIONS']['value']) == 0
assert c3d['parameters']['ANALOG']['GEN_SCALE']['value'][0] == 1
assert len(c3d['parameters']['ANALOG']['SCALE']['value']) == 0
assert len(c3d['parameters']['ANALOG']['OFFSET']['value']) == 0
assert len(c3d['parameters']['ANALOG']['UNITS']['value']) == 0
assert c3d['parameters']['ANALOG']['RATE']['value'][0] == 0.0
assert len(c3d['parameters']['ANALOG']['FORMAT']['value']) == 0
assert len(c3d['parameters']['ANALOG']['BITS']['value']) == 0
assert c3d['parameters']['FORCE_PLATFORM']['USED']['value'][0] == 0
assert len(c3d['parameters']['FORCE_PLATFORM']['TYPE']['value']) == 0
assert np.all(c3d['parameters']['FORCE_PLATFORM']['ZERO']['value'] == (1, 0))
assert len(c3d['parameters']['FORCE_PLATFORM']['CORNERS']['value']) == 0
assert len(c3d['parameters']['FORCE_PLATFORM']['ORIGIN']['value']) == 0
assert len(c3d['parameters']['FORCE_PLATFORM']['CHANNEL']['value']) == 0
assert len(c3d['parameters']['FORCE_PLATFORM']['CAL_MATRIX']['value']) == 0
# Test the data
assert c3d['data']['points'].shape == (4, 0, 0)
assert c3d['data']['analogs'].shape == (1, 0, 0)
def test_create_and_read_c3d():
# Load an empty c3d structure
c3d = ezc3d.c3d()
# Fill it with random data
point_names = ('point1', 'point2', 'point3', 'point4', 'point5')
point_frame_rate = 100
n_second = 2
points = np.random.rand(3, len(point_names), point_frame_rate * n_second)
analog_names = ('analog1', 'analog2', 'analog3', 'analog4', 'analog5', 'analog6')
analog_frame_rate = 1000
analogs = np.random.rand(1, len(analog_names), analog_frame_rate * n_second)
c3d['parameters']['POINT']['RATE']['value'] = [100]
c3d['parameters']['POINT']['LABELS']['value'] = point_names
c3d['data']['points'] = points
c3d['parameters']['ANALOG']['RATE']['value'] = [1000]
c3d['parameters']['ANALOG']['LABELS']['value'] = analog_names
c3d['data']['analogs'] = analogs
# Add a custom parameter to the POINT group
point_new_param = ("POINT", "newPointParam", (1.0, 2.0, 3.0))
c3d.add_parameter(point_new_param[0], point_new_param[1], point_new_param[2])
# Add a custom parameter a new group
new_group_param = ("NewGroup", "newGroupParam", ["MyParam1", "MyParam2"])
c3d.add_parameter(new_group_param[0], new_group_param[1], new_group_param[2])
# Write and read back the data
c3d.write("temporary.c3d")
c3d_to_compare = ezc3d.c3d("temporary.c3d")
# Test the header
assert c3d_to_compare['header']['points']['size'] == len(point_names)
assert c3d_to_compare['header']['points']['frame_rate'] == point_frame_rate
assert c3d_to_compare['header']['points']['first_frame'] == 0
assert c3d_to_compare['header']['points']['last_frame'] == point_frame_rate * n_second - 1
assert c3d_to_compare['header']['analogs']['size'] == len(analog_names)
assert c3d_to_compare['header']['analogs']['frame_rate'] == analog_frame_rate
assert c3d_to_compare['header']['analogs']['first_frame'] == 0
assert c3d_to_compare['header']['analogs']['last_frame'] == analog_frame_rate * n_second - 1
assert c3d_to_compare['header']['events']['size'] == 18
assert c3d_to_compare['header']['events']['events_time'] == \
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
assert c3d_to_compare['header']['events']['events_label'] == \
('', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '')
# Test the parameters
assert c3d_to_compare['parameters']['POINT']['USED']['value'][0] == len(point_names)
assert c3d_to_compare['parameters']['POINT']['SCALE']['value'][0] == -1.0
assert c3d_to_compare['parameters']['POINT']['RATE']['value'][0] == point_frame_rate
assert c3d_to_compare['parameters']['POINT']['FRAMES']['value'][0] == point_frame_rate * n_second
assert c3d_to_compare['parameters']['POINT']['LABELS']['value'] == list(point_names)
assert c3d_to_compare['parameters']['POINT']['DESCRIPTIONS']['value'] == ["" for _ in point_names]
assert len(c3d_to_compare['parameters']['POINT']['UNITS']['value']) == 0
assert np.all(c3d_to_compare['parameters'][point_new_param[0].upper()][point_new_param[1].upper()]['value']
== point_new_param[2])
assert c3d_to_compare['parameters']['ANALOG']['USED']['value'][0] == len(analog_names)
assert c3d_to_compare['parameters']['ANALOG']['LABELS']['value'] == list(analog_names)
assert c3d_to_compare['parameters']['ANALOG']['DESCRIPTIONS']['value'] == ["" for _ in analog_names]
assert c3d_to_compare['parameters']['ANALOG']['GEN_SCALE']['value'][0] == 1
assert np.all(c3d_to_compare['parameters']['ANALOG']['SCALE']['value'] == tuple([1.0 for _ in analog_names]))
assert np.all(c3d_to_compare['parameters']['ANALOG']['OFFSET']['value'] == tuple([0 for _ in analog_names]))
assert c3d_to_compare['parameters']['ANALOG']['UNITS']['value'] == ["" for _ in analog_names]
assert c3d_to_compare['parameters']['ANALOG']['RATE']['value'][0] == analog_frame_rate
assert len(c3d_to_compare['parameters']['ANALOG']['FORMAT']['value']) == 0
assert len(c3d_to_compare['parameters']['ANALOG']['BITS']['value']) == 0
assert c3d_to_compare['parameters']['FORCE_PLATFORM']['USED']['value'][0] == 0
assert len(c3d_to_compare['parameters']['FORCE_PLATFORM']['TYPE']['value']) == 0
assert np.all(c3d_to_compare['parameters']['FORCE_PLATFORM']['ZERO']['value'] == (1, 0))
assert len(c3d_to_compare['parameters']['FORCE_PLATFORM']['CORNERS']['value']) == 0
assert len(c3d_to_compare['parameters']['FORCE_PLATFORM']['ORIGIN']['value']) == 0
assert len(c3d_to_compare['parameters']['FORCE_PLATFORM']['CHANNEL']['value']) == 0
assert len(c3d_to_compare['parameters']['FORCE_PLATFORM']['CAL_MATRIX']['value']) == 0
assert c3d_to_compare['parameters'][new_group_param[0].upper()][new_group_param[1].upper()]['value'] \
== new_group_param[2]
# Test the data
assert c3d_to_compare['data']['points'].shape == (4, len(point_names), point_frame_rate * n_second)
assert c3d_to_compare['data']['analogs'].shape == (1, len(analog_names), analog_frame_rate * n_second)
# Compare the read c3d
np.testing.assert_almost_equal(c3d_to_compare['data']['points'][0:3, :, :], points)
np.testing.assert_almost_equal(c3d_to_compare['data']['analogs'], analogs)
def test_create_and_read_c3d_with_nan():
# Load an empty c3d structure
c3d = ezc3d.c3d()
# Fill it with random data
point_names = ('point1', 'point2')
point_frame_rate = 100
n_second = 2
points = np.random.rand(3, len(point_names), point_frame_rate * n_second) * np.nan
analog_names = ('analog1', 'analog2')
analog_frame_rate = 1000
analogs = np.random.rand(1, len(analog_names), analog_frame_rate * n_second) * np.nan
c3d['parameters']['POINT']['RATE']['value'] = [100]
c3d['parameters']['POINT']['LABELS']['value'] = point_names
c3d['data']['points'] = points
c3d['parameters']['ANALOG']['RATE']['value'] = [1000]
c3d['parameters']['ANALOG']['LABELS']['value'] = analog_names
c3d['data']['analogs'] = analogs
# Write and read back the data
c3d.write("temporary.c3d")
c3d_to_compare = ezc3d.c3d("temporary.c3d")
# Compare the read c3d
np.testing.assert_equal(np.sum(np.isnan(c3d_to_compare['data']['points'])),
3 * len(point_names) * point_frame_rate * n_second)
np.testing.assert_equal(np.sum(np.isnan(c3d_to_compare['data']['analogs'])),
len(analog_names) * analog_frame_rate * n_second)
@pytest.fixture(scope='module', params=["BTS", "Optotrak", "Qualisys", "Vicon"])
def c3d_build_rebuild(request):
base_folder = Path("test/c3dTestFiles")
orig_file = Path(base_folder / (request.param + ".c3d"))
rebuild_file = Path(base_folder / (request.param + "_after.c3d"))
original = ezc3d.c3d(orig_file.as_posix())
original.write(rebuild_file.as_posix())
rebuilt = ezc3d.c3d(rebuild_file.as_posix())
yield (original, rebuilt)
Path.unlink(rebuild_file)
def test_parse_and_rebuild(c3d_build_rebuild):
for i in c3d_build_rebuild:
assert isinstance(i, ezc3d.c3d)
orig, rebuilt = c3d_build_rebuild
assert orig == rebuilt
def test_parse_and_rebuild_header(c3d_build_rebuild):
orig, rebuilt = c3d_build_rebuild
assert orig['header'] == rebuilt['header']
def test_parse_and_rebuild_parameters(c3d_build_rebuild):
orig, rebuilt = c3d_build_rebuild
assert orig['parameters'] == rebuilt['parameters']
def test_parse_and_rebuild_data(c3d_build_rebuild):
orig, rebuilt = c3d_build_rebuild
assert orig['data'] == rebuilt['data']
| StarcoderdataPython |
15033 | <filename>tests/validation/test_is_subnational1.py
import unittest
from ebird.api.validation import is_subnational1
class IsSubnational1Tests(unittest.TestCase):
"""Tests for the is_subnational1 validation function."""
def test_is_subnational1(self):
self.assertTrue(is_subnational1("US-NV"))
def test_invalid_code_is_not_subnational1(self):
self.assertFalse(is_subnational1("U"))
self.assertFalse(is_subnational1("US-"))
def test_country_is_not_subnational1(self):
self.assertFalse(is_subnational1("US"))
def test_subnational2_is_not_subnational1(self):
self.assertFalse(is_subnational1("US-NV-VMT"))
def test_location_is_not_subnational1(self):
self.assertFalse(is_subnational1("L123456"))
| StarcoderdataPython |
3282518 | <filename>autoprompt/pre_defined_prompt.py
import time
import argparse
import json
import logging
from pathlib import Path
import random
import os
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import transformers
from transformers import AutoConfig, AutoModelWithLMHead, AutoTokenizer
from tqdm import tqdm
import autoprompt.utils as utils
logger = logging.getLogger(__name__)
class PredictWrapper:
"""
PyTorch transformers model wrapper. Handles necc. preprocessing of inputs for triggers
experiments.
"""
def __init__(self, model):
self._model = model
def __call__(self, model_inputs, trigger_ids):
# Copy dict so pop operations don't have unwanted side-effects
model_inputs = model_inputs.copy()
trigger_mask = model_inputs.pop('trigger_mask')
predict_mask = model_inputs.pop('predict_mask')
model_inputs = replace_trigger_tokens(model_inputs, trigger_ids, trigger_mask)
logits, *_ = self._model(**model_inputs)
predict_logits = logits.masked_select(predict_mask.unsqueeze(-1)).view(logits.size(0), -1)
return predict_logits
class AccuracyFn:
"""
Computing the accuracy when a label is mapped to multiple tokens is difficult in the current
framework, since the data generator only gives us the token ids. To get around this we
compare the target logp to the logp of all labels. If target logp is greater than all (but)
one of the label logps we know we are accurate.
"""
def __init__(self, tokenizer, label_map, device, tokenize_labels=False):
self._all_label_ids = []
self._pred_to_label = []
logger.info(label_map)
for label, label_tokens in label_map.items():
self._all_label_ids.append(utils.encode_label(tokenizer, label_tokens, tokenize_labels).to(device))
self._pred_to_label.append(label)
logger.info(self._all_label_ids)
def __call__(self, predict_logits, gold_label_ids):
# Get total log-probability for the true label
gold_logp = get_loss(predict_logits, gold_label_ids)
# Get total log-probability for all labels
bsz = predict_logits.size(0)
all_label_logp = []
for label_ids in self._all_label_ids:
label_logp = get_loss(predict_logits, label_ids.repeat(bsz, 1))
all_label_logp.append(label_logp)
all_label_logp = torch.stack(all_label_logp, dim=-1)
_, predictions = all_label_logp.max(dim=-1)
predictions = [self._pred_to_label[x] for x in predictions.tolist()]
# Add up the number of entries where loss is greater than or equal to gold_logp.
ge_count = all_label_logp.le(gold_logp.unsqueeze(-1)).sum(-1)
correct = ge_count.le(1) # less than in case of num. prec. issues
return correct.float()
# TODO: @rloganiv - This is hacky. Replace with something sensible.
def predict(self, predict_logits):
bsz = predict_logits.size(0)
all_label_logp = []
for label_ids in self._all_label_ids:
label_logp = get_loss(predict_logits, label_ids.repeat(bsz, 1))
all_label_logp.append(label_logp)
all_label_logp = torch.stack(all_label_logp, dim=-1)
_, predictions = all_label_logp.max(dim=-1)
predictions = [self._pred_to_label[x] for x in predictions.tolist()]
return predictions
def load_pretrained(model_name):
"""
Loads pretrained HuggingFace config/model/tokenizer, as well as performs required
initialization steps to facilitate working with triggers.
"""
config = AutoConfig.from_pretrained(model_name)
model = AutoModelWithLMHead.from_pretrained(model_name)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_name, add_prefix_space=True)
utils.add_task_specific_tokens(tokenizer)
return config, model, tokenizer
def set_seed(seed: int):
"""Sets the relevant random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
def replace_trigger_tokens(model_inputs, trigger_ids, trigger_mask):
"""Replaces the trigger tokens in input_ids."""
out = model_inputs.copy()
input_ids = model_inputs['input_ids']
trigger_ids = trigger_ids.repeat(trigger_mask.size(0), 1)
try:
filled = input_ids.masked_scatter(trigger_mask, trigger_ids)
except RuntimeError:
filled = input_ids
out['input_ids'] = filled
return out
def get_loss(predict_logits, label_ids):
predict_logp = F.log_softmax(predict_logits, dim=-1)
target_logp = predict_logp.gather(-1, label_ids)
target_logp = target_logp - 1e32 * label_ids.eq(0) # Apply mask
target_logp = torch.logsumexp(target_logp, dim=-1)
return -target_logp
def run_model(args):
set_seed(args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Loading model, tokenizer, etc.')
config, model, tokenizer = load_pretrained(args.model_name)
model.to(device)
predictor = PredictWrapper(model)
if args.saved_label_map is not None:
with open(args.saved_label_map, 'rb') as f:
label_token_list = pickle.load(f)
label_map = {str(lbl_id): label_tokens for lbl_id, label_tokens in enumerate(label_token_list)}
logger.info(f"Label map: {label_map}")
else:
logger.info('No label map')
exit(-1)
templatizer = utils.TriggerTemplatizer(
args.template,
config,
tokenizer,
label_map=label_map,
label_field=args.label_field,
tokenize_labels=args.tokenize_labels,
add_special_tokens=False,
use_ctx=args.use_ctx
)
# Obtain the initial trigger tokens and label mapping
if args.initial_trigger_path:
with open(args.initial_trigger_path, 'rb') as f:
triggers = pickle.load(f)
best_triggers = tokenizer.tokenize(triggers[0])
print(f"best_triggers = {best_triggers}")
trigger_ids = tokenizer.convert_tokens_to_ids(best_triggers)
logger.debug(f'Initial trigger: {best_triggers}')
logger.debug(f'Trigger ids: {trigger_ids}')
assert len(trigger_ids) == templatizer.num_trigger_tokens
else:
logger.info('No triggers')
exit(-1)
trigger_ids = torch.tensor(trigger_ids, device=device).unsqueeze(0)
# NOTE: Accuracy can otnly be computed if a fixed pool of labels is given, which currently
# # requires the label map to be specified. Since producing a label map may be cumbersome (e.g.,
# # for link prediction asks), we just use (negative) loss as the evaluation metric in these cases.
if label_map:
evaluation_fn = AccuracyFn(tokenizer, label_map, device)
else:
evaluation_fn = lambda x, y: -get_loss(x, y)
logger.info('Loading datasets')
collator = utils.Collator(pad_token_id=tokenizer.pad_token_id)
dev_dataset = utils.load_trigger_dataset(args.dev, templatizer, use_ctx=args.use_ctx)
dev_loader = DataLoader(dev_dataset, batch_size=args.eval_size, shuffle=False, collate_fn=collator)
all_trg_domains = args.trg_domains.split(",")
all_test_loaders = []
for trg_domain in all_trg_domains:
test_dataset = utils.load_trigger_dataset(Path(os.path.join(args.test, trg_domain, 'test-labeled.tsv')),
templatizer, use_ctx=args.use_ctx)
test_loader = DataLoader(test_dataset, batch_size=args.eval_size, shuffle=False, collate_fn=collator)
all_test_loaders.append((trg_domain, test_loader))
logger.info('Evaluating')
numerator = 0
denominator = 0
for model_inputs, labels in tqdm(dev_loader):
model_inputs = {k: v.to(device) for k, v in model_inputs.items()}
labels = labels.to(device)
with torch.no_grad():
predict_logits = predictor(model_inputs, trigger_ids)
numerator += evaluation_fn(predict_logits, labels).sum().item()
denominator += labels.size(0)
best_dev_metric = numerator / (denominator + 1e-13)
logger.info(f'Dev metric: {best_dev_metric}')
logger.info(f'Trigger tokens: {tokenizer.convert_ids_to_tokens(trigger_ids.squeeze(0))}')
logger.info('Testing')
best_test_metric = {}
for (trg_domain, test_loader) in all_test_loaders:
numerator = 0
denominator = 0
for model_inputs, labels in tqdm(test_loader):
model_inputs = {k: v.to(device) for k, v in model_inputs.items()}
labels = labels.to(device)
with torch.no_grad():
predict_logits = predictor(model_inputs, trigger_ids)
numerator += evaluation_fn(predict_logits, labels).sum().item()
denominator += labels.size(0)
best_test_metric[trg_domain] = numerator / (denominator + 1e-13)
logger.info(f'test metric - {trg_domain}: {best_test_metric[trg_domain]}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=Path, required=True, help='Dev data path')
parser.add_argument('--test', type=str, required=True, help='Test data path')
parser.add_argument('--trg_domains', type=str, required=True, help='All test domains seperated with a comma.')
parser.add_argument('--template', type=str, help='Template string')
parser.add_argument('--saved_label_map', type=Path, default=None, help='label tokens path')
# LAMA-specific
parser.add_argument('--tokenize-labels', action='store_true',
help='If specified labels are split into word pieces.'
'Needed for LAMA probe experiments.')
parser.add_argument('--initial_trigger_path', type=Path, default=None, help='Manual prompt path.')
parser.add_argument('--initial-trigger', nargs='+', type=str, default=None, help='Manual prompt')
parser.add_argument('--label-field', type=str, default='label',
help='Name of the label field')
parser.add_argument('--bsz', type=int, default=32, help='Batch size')
parser.add_argument('--eval-size', type=int, default=256, help='Eval size')
parser.add_argument('--model-name', type=str, default='bert-base-cased',
help='Model name passed to HuggingFace AutoX classes.')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--limit', type=int, default=None)
parser.add_argument('--use-ctx', action='store_true',
help='Use context sentences for relation extraction only')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if args.debug:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level)
run_model(args)
| StarcoderdataPython |
66412 | # -*- coding: utf-8 -*
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import requests
import os
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.table import Table, Column
from astropy.io.votable import parse
from astroquery import log
from astroquery.casda import Casda
try:
from unittest.mock import Mock, patch, PropertyMock, MagicMock
except ImportError:
pytest.skip("Install mock for the casda tests.", allow_module_level=True)
DATA_FILES = {'CIRCLE': 'cone.xml', 'RANGE': 'box.xml', 'DATALINK': 'datalink.xml', 'RUN_JOB': 'run_job.xml',
'COMPLETED_JOB': 'completed_job.xml', 'DATALINK_NOACCESS': 'datalink_noaccess.xml'}
class MockResponse:
def __init__(self, content):
self.content = content
self.text = content.decode()
def raise_for_status(self):
return
first_job_pass = True
def get_mockreturn(self, method, url, data=None, timeout=10,
files=None, params=None, headers=None, **kwargs):
log.debug("get_mockreturn url:{} params:{} kwargs:{}".format(url, params, kwargs))
if kwargs and 'auth' in kwargs:
auth = kwargs['auth']
if auth and (auth[0] != 'user' or auth[1] != 'password'):
log.debug("Rejecting credentials")
return create_auth_failure_response()
if 'data/async' in str(url):
# Responses for an asynchronous SODA job
if str(url).endswith('data/async'):
self.first_job_pass = True
return create_soda_create_response('111-000-111-000')
elif str(url).endswith('/phase') and method == 'POST':
key = "RUN_JOB"
elif str(url).endswith('111-000-111-000') and method == 'GET':
key = "RUN_JOB" if self.first_job_pass else "COMPLETED_JOB"
self.first_job_pass = False
else:
raise ValueError("Unexpected SODA async {} call to url {}".format(method, url))
elif 'datalink' in str(url):
if 'cube-244' in str(url):
key = 'DATALINK'
else:
key = 'DATALINK_NOACCESS'
else:
key = params['POS'].split()[0] if params['POS'] else None
filename = data_path(DATA_FILES[key])
log.debug('providing ' + filename)
content = open(filename, 'rb').read()
return MockResponse(content)
def create_soda_create_response(jobid):
job_url = 'https://casda.csiro.au/casda_data_access/data/async/' + jobid
create_response_headers = [
['location', job_url]
]
create_response = Mock(spec=requests.Response)
create_response.configure_mock(status_code=303, message='OK', headers=create_response_headers, url=job_url)
return create_response
def create_auth_failure_response():
unauthenticated_headers = [
['WWW-Authenticate', 'Basic realm="ATNF OPAL Login"']
]
create_response = MagicMock(spec=requests.Response)
attrs = {'raise_for_status.side_effect': requests.exceptions.HTTPError()}
create_response.configure_mock(status_code=401, message='OK', headers=unauthenticated_headers, **attrs)
return create_response
@pytest.fixture
def patch_get(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests.Session, 'request', get_mockreturn)
return mp
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def isclose(value1, value2, abs_tol=1e-09):
return abs(value1 - value2) < abs_tol
def test_query_region_text_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
query_payload = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=radius * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_async_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region_async(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_query_region_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 2
def test_query_region_async_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_filter_out_unreleased():
all_records = parse(data_path('partial_unreleased.xml'), verify='warn').get_first_table().to_table()
assert all_records[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert all_records[1]['obs_release_date'] == '2218-01-02T16:51:00.728Z'
assert all_records[2]['obs_release_date'] == ''
assert len(all_records) == 3
# This should filter out the rows with either a future obs_release_date or no obs_release_date
filtered = Casda.filter_out_unreleased(all_records)
assert filtered[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert filtered[0]['obs_publisher_did'] == 'cube-502'
assert len(filtered) == 1
def test_stage_data_unauthorised(patch_get):
table = Table()
with pytest.raises(ValueError) as excinfo:
Casda.stage_data(table)
assert "Credentials must be supplied" in str(excinfo.value)
def test_stage_data_empty(patch_get):
table = Table()
casda = Casda('user', 'password')
urls = casda.stage_data(table)
assert urls == []
def test_stage_data_invalid_credentials(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-220']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', '<PASSWORD>')
with pytest.raises(requests.exceptions.HTTPError) as excinfo:
casda.stage_data(table)
def test_stage_data_no_link(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-240']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'password')
casda.POLL_INTERVAL = 1
with pytest.raises(ValueError) as excinfo:
casda.stage_data(table)
assert "You do not have access to any of the requested data files." in str(excinfo.value)
def test_stage_data(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'password')
casda.POLL_INTERVAL = 1
urls = casda.stage_data(table, verbose=True)
assert urls == ['http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits.checksum',
'http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits']
| StarcoderdataPython |
1663482 | <gh_stars>0
"""
this is a program intend to let usr to sort list
@author: <NAME>
"""
def insertion_sort(l):
""" scan each item in a list and findout if the current position number is less than target """
sorted_list = []
for item_compare in l:
for offset, sorted_number in enumerate(sorted_list.copy()):
if item_compare <= sorted_number:
sorted_list.insert(offset, item_compare)
break
else:
sorted_list.append(item_compare)
return sorted_list
def list_copy(l):
# this is the function using list comprehension
return [item for item in l]
def list_intersect(l1, l2):
# this is a function return two list common item into a list
return [item for item in l1 if item in l2]
def list_difference(l1, l2):
# this is a function return two list difference item into a lisst
return [item for item in l1 if item not in l2]
def remove_vowels(string):
# this is a function return the string without the vowels
return "".join([item for item in string if item.lower() not in ['a', 'e', 'i', 'o', 'u']])
def check_pwd(password):
# pwd checker
return len([item for item in password if item.isupper()]) > 0 and len([item for item in password if item.islower()]) > 0 and len([item for item in password if item.isdigit()]) > 0 | StarcoderdataPython |
1718375 | <reponame>Pcosmin/Optimus
from pyspark.sql import DataFrame as SparkDataFrame
from dask.dataframe.core import DataFrame as DaskDataFrame
from optimus.helpers.columns import check_column_numbers
from optimus.helpers.columns import parse_columns
from optimus.plots.functions import plot_scatterplot, plot_boxplot, plot_frequency, plot_hist, \
plot_correlation, plot_qqplot
def plot(self):
df = self
class Plot:
@staticmethod
def hist(columns=None, buckets=10, output_format="plot", output_path=None):
"""
Plot histogram
:param columns: Columns to be printed
:param buckets: Number of buckets
:param output_format:
:param output_path: path where the image is going to be saved
:return:
"""
columns = parse_columns(self, columns)
data = self.cols.hist(columns, buckets)
for col_name in data.keys():
plot_hist({col_name: data[col_name]["hist"]}, output=output_format, path=output_path)
@staticmethod
def scatter(columns=None, buckets=30, output_format="plot", output_path=None):
"""
Plot scatter
:param columns: columns to be printed
:param buckets: number of buckets
:param output_format:
:param output_path: path where the image is going to be saved
:return:
"""
columns = parse_columns(self, columns, filter_by_column_dtypes=df.constants.NUMERIC_TYPES)
check_column_numbers(columns, "*")
data = self.cols.scatter(columns, buckets)
plot_scatterplot(data, output=output_format, path=output_path)
@staticmethod
def box(columns=None, output_format="plot", output_path=None):
"""
Plot boxplot
:param columns: Columns to be printed
:param output_format:
:param output_path: path where the image is going to be saved
:return:
"""
columns = parse_columns(self, columns, filter_by_column_dtypes=df.constants.NUMERIC_TYPES)
check_column_numbers(columns, "*")
for col_name in columns:
stats = self.cols.boxplot(col_name)
plot_boxplot({col_name: stats}, output=output_format, path=output_path)
@staticmethod
def frequency(columns=None, buckets=10, output_format="plot", output_path=None):
"""
Plot frequency chart
:param columns: Columns to be printed
:param buckets: Number of buckets
:param output_format:
:param output_path: path where the image is going to be saved
:return:
"""
columns = parse_columns(self, columns)
data = self.cols.frequency(columns, buckets)
for k, v in data.items():
plot_frequency({k: v}, output=output_format, path=output_path)
@staticmethod
def correlation(col_name, method="pearson", output_format="plot", output_path=None):
"""
Compute the correlation matrix for the input data set of Vectors using the specified method. Method
mapped from pyspark.ml.stat.Correlation.
:param col_name: The name of the column for which the correlation coefficient needs to be computed.
:param method: String specifying the method to use for computing correlation. Supported: pearson (default),
spearman.
:param output_format: Output image format
:param output_path: Output path
:return: Heatmap plot of the corr matrix using seaborn.
"""
cols_data = self.cols.correlation(col_name, method, output="array")
plot_correlation(cols_data, output=output_format, path=output_path)
@staticmethod
def qqplot(columns, n=100, output_format="plot", output_path=None):
"""
QQ plot
:param columns:
:param n: Sample size
:param output_format: Output format
:param output_path: Path to the output file
:return:
"""
df = self
columns = parse_columns(self, cols_args=columns, filter_by_column_dtypes=df.constants.NUMERIC_TYPES)
if columns is not None:
sample_data = df.ext.sample(n=n, random=True)
for col_name in columns:
plot_qqplot(col_name, sample_data, output=output_format, path=output_path)
return Plot()
SparkDataFrame.plot = property(plot)
DaskDataFrame.plot = property(plot)
| StarcoderdataPython |
122453 | <filename>min_heap.py<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
'''
@author: <NAME>
@license: (C) Copyright @ <NAME>
@contact: <EMAIL>
@file: min_heap.py
@time: 2019/4/29 20:23
@desc:
'''
# please see the comments in max_heap.py
def min_heap(array):
if not array:
return None
length = len(array)
if length == 1:
return array
for i in range(length // 2 - 1, -1, -1):
current_idx = i
temp = array[current_idx]
flag = False
while not flag and 2 * current_idx + 1 < length:
left_idx = 2 * current_idx + 1
idx = left_idx
if left_idx + 1 < length and array[left_idx] > array[left_idx + 1]:
idx = left_idx + 1
if temp > array[idx]:
array[current_idx] = array[idx]
current_idx = idx
else:
flag = True
array[current_idx] = temp
if __name__ == '__main__':
array = [7,6,5,4,3,2,1]
min_heap(array)
print(array)
| StarcoderdataPython |
3244994 | """
Picklify is a function that works similar to memoization; it is meant for
functions that return a dictionary. Often, such functions will parse a file to
generate a dictionary that maps certain keys to values. To save on such
overhead costs, we "picklify" them the first time they are called (save the
dictionary in a pickle file), and then simply load the dictionary from the
saved pickle files the next time around.
"""
import pickle
import sys
from pathlib import Path
from .config import PICKLE_PATH
def picklify(dict_generator, *args, **kwargs):
"""
Given a function that returns an object such as a dictionary (only dict
fully supported), returns the dictionary generated by the function. The
function is only called if it has not been "picklified" (passed as an
argument to this function) before. Otherwise, its cached dictionary is
returned instead. Thus getting the dicttionary by wrapping this function
speeds up the dictionary creation overall.
Note that this function should not be called by two different functions
with the same name.
:param dict_generator: the function which generates a dictionary.
:param *args: Any args to pass to the dictionary
:param **kwargs: Any keyword args to pass to the dictionary.
:returns: dictionary returned by dict_generator().
"""
# Danger! Never call picklify with functions that have the same name!
pickle_path = f"{PICKLE_PATH}/{dict_generator.__name__}.pickle"
try:
with open(pickle_path, "rb") as pickle_handle:
dict_to_return = pickle.load(pickle_handle)
except FileNotFoundError:
dict_to_return = dict_generator(*args, **kwargs)
try:
Path(pickle_path).parent.mkdir(parents=True, exist_ok=True)
with open(pickle_path, "wb") as pickle_handle:
pickle.dump(
dict_to_return,
pickle_handle,
protocol=pickle.HIGHEST_PROTOCOL,
)
except PermissionError:
print(
"Caching failed due to permission errors...", file=sys.stderr
)
return dict_to_return
| StarcoderdataPython |
3333379 | #!/usr/bin/env python3
# Copyright (C) 2021, RTE (http://www.rte-france.com)
# SPDX-License-Identifier: CC-BY-4.0
"""
Script to test Pacemaker module: stop VM
"""
from vm_manager.helpers.pacemaker import Pacemaker
VM_NAME = "vm1"
SLEEP = 1
if __name__ == "__main__":
with Pacemaker(VM_NAME) as p:
state = p.show()
print(VM_NAME + " state: " + state)
if state == "Started":
print("Stop " + VM_NAME)
p.stop()
p.wait_for("Stopped")
print("VM " + VM_NAME + " stopped")
else:
raise Exception("Machine is already stopped")
| StarcoderdataPython |
1779302 | # Based on the 'util/collect_env.py' script from PyTorch.
# <https://github.com/pytorch/pytorch>
#
# From PyTorch:
#
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (<NAME>)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
#
# All contributions by Cruise LLC:
# Copyright (c) 2022 Cruise LLC.
# All rights reserved.
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
# Unlike the rest of the PyTorch this file must be python2 compliant.
# This script outputs relevant system environment info
# Run it with `python collect_env.py`.
import locale
import os
import re
import subprocess
import sys
from collections import namedtuple
try:
import compiler_gym
COMPILER_GYM_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
COMPILER_GYM_AVAILABLE = False
# System Environment Information
SystemEnv = namedtuple(
"SystemEnv",
[
"compiler_gym_version",
"is_debug_build",
"gcc_version",
"clang_version",
"cmake_version",
"os",
"libc_version",
"python_version",
"python_platform",
"pip_version", # 'pip' or 'pip3'
"pip_packages",
"conda_packages",
],
)
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == "win32":
enc = "oem"
else:
enc = locale.getpreferredencoding()
output = raw_output.decode(enc)
err = raw_err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def run_and_return_first_line(run_lambda, command):
"""Runs command using run_lambda and returns first line if output is not empty"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out.split("\n")[0]
def get_conda_packages(run_lambda):
conda = os.environ.get("CONDA_EXE", "conda")
out = run_and_read_all(run_lambda, conda + " list")
if out is None:
return out
# Comment starting at beginning of line
comment_regex = re.compile(r"^#.*\n")
return re.sub(comment_regex, "", out)
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, "gcc --version", r"gcc (.*)")
def get_clang_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "clang --version", r"clang version (.*)"
)
def get_cmake_version(run_lambda):
return run_and_parse_first_match(run_lambda, "cmake --version", r"cmake (.*)")
def get_platform():
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")
def get_windows_version(run_lambda):
system_root = os.environ.get("SYSTEMROOT", "C:\\Windows")
wmic_cmd = os.path.join(system_root, "System32", "Wbem", "wmic")
findstr_cmd = os.path.join(system_root, "System32", "findstr")
return run_and_read_all(
run_lambda, "{} os get Caption | {} /v Caption".format(wmic_cmd, findstr_cmd)
)
def get_lsb_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "lsb_release -a", r"Description:\t(.*)"
)
def check_release_file(run_lambda):
return run_and_parse_first_match(
run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"'
)
def get_os(run_lambda):
from platform import machine
platform = get_platform()
if platform == "win32" or platform == "cygwin":
return get_windows_version(run_lambda)
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return "macOS {} ({})".format(version, machine())
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
return "{} ({})".format(platform, machine())
# Unknown platform
return platform
def get_python_platform():
import platform
return platform.platform()
def get_libc_version():
import platform
if get_platform() != "linux":
return "N/A"
return "-".join(platform.libc_ver())
def indent(s):
return " " + "\n ".join(s.split("\n"))
def get_pip_packages(run_lambda):
"""Returns `pip list` output. Note: will also find conda-installed pytorch
and numpy packages."""
# People generally have `pip` as `pip` or `pip3`
# But here it is incoved as `python -mpip`
def run_with_pip(pip):
return run_and_read_all(run_lambda, pip + " list --format=freeze")
pip_version = "pip3" if sys.version[0] == "3" else "pip"
out = run_with_pip(sys.executable + " -mpip")
return pip_version, out
def get_cachingallocator_config():
ca_config = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", "")
return ca_config
def get_env_info():
run_lambda = run
pip_version, pip_list_output = get_pip_packages(run_lambda)
if COMPILER_GYM_AVAILABLE:
version_str = compiler_gym.__version__
# NOTE(cummins): CompilerGym does not yet have a debug string.
debug_mode_str = "N/A"
else:
version_str = debug_mode_str = "N/A"
sys_version = sys.version.replace("\n", " ")
return SystemEnv(
compiler_gym_version=version_str,
is_debug_build=debug_mode_str,
python_version="{} ({}-bit runtime)".format(
sys_version, sys.maxsize.bit_length() + 1
),
python_platform=get_python_platform(),
pip_version=pip_version,
pip_packages=pip_list_output,
conda_packages=get_conda_packages(run_lambda),
os=get_os(run_lambda),
libc_version=get_libc_version(),
gcc_version=get_gcc_version(run_lambda),
clang_version=get_clang_version(run_lambda),
cmake_version=get_cmake_version(run_lambda),
)
env_info_fmt = """
CompilerGym: {compiler_gym_version}
Is debug build: {is_debug_build}
Python version: {python_version}
Python platform: {python_platform}
OS: {os}
GCC version: {gcc_version}
Clang version: {clang_version}
CMake version: {cmake_version}
Libc version: {libc_version}
Versions of all installed libraries:
{pip_packages}
{conda_packages}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag="[prepend]"):
lines = text.split("\n")
updated_lines = [tag + line for line in lines]
return "\n".join(updated_lines)
def replace_if_empty(text, replacement="No relevant packages"):
if text is not None and len(text) == 0:
return replacement
return text
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split("\n")) > 1:
return "\n{}\n".format(string)
return string
mutable_dict = envinfo._asdict()
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'Could not collect'
mutable_dict = replace_nones(mutable_dict)
# If either of these are '', replace with 'No relevant packages'
mutable_dict["pip_packages"] = replace_if_empty(mutable_dict["pip_packages"])
mutable_dict["conda_packages"] = replace_if_empty(mutable_dict["conda_packages"])
# Tag conda and pip packages with a prefix
# If they were previously None, they'll show up as ie '[conda] Could not collect'
if mutable_dict["pip_packages"]:
mutable_dict["pip_packages"] = prepend(
mutable_dict["pip_packages"], " [{}] ".format(envinfo.pip_version)
)
if mutable_dict["conda_packages"]:
mutable_dict["conda_packages"] = prepend(
mutable_dict["conda_packages"], " [conda] "
)
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
return pretty_str(get_env_info())
def main():
print("Collecting environment information...")
print()
print(pretty_str(get_env_info()))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1685521 | from setuptools import setup, find_packages
VERSION = "1.0"
DESCRIPTION = "DeepTile"
LONG_DESCRIPTION = "Large image tiling and stitching algorithm for deep learning libraries."
setup(
name="deeptile",
version=VERSION,
author="<NAME>",
author_email="<<EMAIL>>",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=['dask', 'dask-image', 'nd2', 'numpy', 'scikit-image', 'tifffile'],
keywords=["segmentation", "stitching"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| StarcoderdataPython |
1771365 | <filename>{{cookiecutter.project_slug}}/app/users/managers.py<gh_stars>0
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
def __create_user(self,
email,
name,
password=<PASSWORD>,
is_staff=False,
is_active=False,
is_superuser=False):
if not name:
raise ValueError('Users must have a name')
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(email=email.lower(),
name=name,
is_staff=is_staff,
is_active=is_active,
is_superuser=is_superuser)
if password is not None:
user.set_password(password)
else:
user.set_unusable_password()
user.save(using=self._db)
return user
def create_user(self, email, name, password):
return self.__create_user(email,
name,
password,
is_staff=False,
is_active=False,
is_superuser=False)
def create_superuser(self, email, name, password):
return self.__create_user(email,
name,
password,
is_staff=True,
is_active=True,
is_superuser=True)
def get_by_natural_key(self, email):
return self.get(email__iexact=self.normalize_email(email).lower())
def create(self, **kwargs):
"""
Important to have this to get factories working by default
"""
return self.create_user(**kwargs)
| StarcoderdataPython |
3235248 | '''
超分辨率数据库
'''
import os
import glob
import imageio
import cv2
import numpy as np
from torch.utils.data import Dataset, DataLoader
# 来自 sr_utils
def im_to_batch(im, sub_im_hw=(640, 640)):
'''
输入一张图像,输出分块图像序列
:param im: np.array [h, w, c]
:param sub_im_hw: 分块大小
:return:
'''
ori_hw = im.shape[:2]
h_c = int(np.ceil(ori_hw[0] / sub_im_hw[0]))
w_c = int(np.ceil(ori_hw[1] / sub_im_hw[1]))
sub_ims = []
for h_i in range(h_c):
for w_i in range(w_c):
nim = im[sub_im_hw[0] * h_i: sub_im_hw[0] * (h_i + 1), sub_im_hw[1] * w_i: sub_im_hw[1] * (w_i + 1)]
sub_ims.append(nim)
return sub_ims, (h_c, w_c)
def padding_im_with_multiples_of_n(im, n=32, borderType=cv2.BORDER_CONSTANT, value=None):
h_pad = im.shape[0] % n
w_pad = im.shape[1] % n
if h_pad > 0:
h_pad = n - h_pad
if w_pad > 0:
w_pad = n - w_pad
im = cv2.copyMakeBorder(im, 0, h_pad, 0, w_pad, borderType, value=value)
return im, [h_pad, w_pad]
class SrDatasetReader(Dataset):
def __init__(self, path=r'../datasets/faces', img_hw=(128, 128), iter_count=1000000):
# assert mini_dataset >= 0, 'mini_dataset must be equal or big than 0'
# self.use_random = use_random
suffix = {'.jpg', '.png', '.bmp'}
self.imgs_path = []
# for p in glob.iglob('%s/**' % path, recursive=True):
# if os.path.splitext(p)[1].lower() in suffix:
# self.imgs_path.append(p)
for p in os.listdir(path):
if os.path.splitext(p)[1].lower() in suffix:
self.imgs_path.append(os.path.join(path, p))
# if mini_dataset > 0:
# np.random.shuffle(self.imgs_path)
# self.imgs_path = self.imgs_path[:mini_dataset]
self.img_hw = img_hw
self.iter_count = iter_count
self.cache = []
# self.random_horizontal_flip = random_horizontal_flip
def __getitem__(self, _):
# if self.use_random:
# im = center_crop(im)
# im = cv2.resize(im, self.img_hw, interpolation=cv2.INTER_CUBIC)
# if self.random_horizontal_flip and np.random.uniform() > 0.5:
# im = np.array(im[:, ::-1])
if len(self.cache) == 0:
item = np.random.randint(0, len(self.imgs_path))
impath = self.imgs_path[item]
im = imageio.imread(impath)
if im.ndim == 2:
im = np.tile(im[..., None], (1, 1, 3))
elif im.shape[-1] == 4:
im = im[..., :3]
# 根据边长比例,自动缩放
# 缩放最大比例为1
h_scale_min = self.img_hw[0] / im.shape[0]
w_scale_min = self.img_hw[1] / im.shape[1]
scale_percent = np.random.uniform(min(h_scale_min, w_scale_min, 1.), 1.)
dst_wh = (int(im.shape[1] * scale_percent), int(im.shape[0] * scale_percent))
im = cv2.resize(im, dst_wh, interpolation=cv2.INTER_AREA)
# 填充,确保图像边长为要求图像大小的倍数
h_pad = im.shape[0] % self.img_hw[0]
w_pad = im.shape[1] % self.img_hw[1]
if h_pad > 0:
h_pad = self.img_hw[0] - h_pad
if w_pad > 0:
w_pad = self.img_hw[1] - w_pad
im = cv2.copyMakeBorder(im, 0, h_pad, 0, w_pad, cv2.BORDER_CONSTANT, value=0.)
ca = im_to_batch(im, self.img_hw)[0]
self.cache.extend(ca)
im = self.cache.pop(0)
return im
def __len__(self):
# if self.use_random:
# return self.iter_count
# else:
return self.iter_count
if __name__ == '__main__':
data = SrDatasetReader(r'../datasets/绮丽')
for i in range(len(data)):
a = data[i]
print(a.shape)
if a.shape != (128, 128, 3):
raise AssertionError('img shape is not equal (3, 128, 128)')
cv2.imshow('test', cv2.cvtColor(a, cv2.COLOR_RGB2BGR))
cv2.waitKey(16)
| StarcoderdataPython |
15214 | from os import path
import autolens as al
import autolens.plot as aplt
from test_autogalaxy.simulators.imaging import instrument_util
test_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "..", "..")
def pixel_scale_from_instrument(instrument):
"""
Returns the pixel scale from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return (0.2, 0.2)
elif instrument in "euclid":
return (0.1, 0.1)
elif instrument in "hst":
return (0.05, 0.05)
elif instrument in "hst_up":
return (0.03, 0.03)
elif instrument in "ao":
return (0.01, 0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def grid_from_instrument(instrument):
"""
Returns the `Grid` from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.GridIterate.uniform(shape_2d=(80, 80), pixel_scales=0.2)
elif instrument in "euclid":
return al.GridIterate.uniform(shape_2d=(120, 120), pixel_scales=0.1)
elif instrument in "hst":
return al.GridIterate.uniform(shape_2d=(200, 200), pixel_scales=0.05)
elif instrument in "hst_up":
return al.GridIterate.uniform(shape_2d=(300, 300), pixel_scales=0.03)
elif instrument in "ao":
return al.GridIterate.uniform(shape_2d=(800, 800), pixel_scales=0.01)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def psf_from_instrument(instrument):
"""
Returns the *PSF* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
if instrument in "vro":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.5, pixel_scales=0.2, renormalize=True
)
elif instrument in "euclid":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.1, pixel_scales=0.1, renormalize=True
)
elif instrument in "hst":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.05, renormalize=True
)
elif instrument in "hst_up":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.05, pixel_scales=0.03, renormalize=True
)
elif instrument in "ao":
return al.Kernel.from_gaussian(
shape_2d=(31, 31), sigma=0.025, pixel_scales=0.01, renormalize=True
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulator_from_instrument(instrument):
"""
Returns the *Simulator* from an instrument type based on real observations.
These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image.
Parameters
----------
instrument : str
A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO).
"""
grid = grid_from_instrument(instrument=instrument)
psf = psf_from_instrument(instrument=instrument)
if instrument in "vro":
return al.SimulatorImaging(
exposure_time=100.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "euclid":
return al.SimulatorImaging(
exposure_time=2260.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "hst_up":
return al.SimulatorImaging(
exposure_time=2000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
elif instrument in "ao":
return al.SimulatorImaging(
exposure_time=1000.0,
psf=psf,
background_sky_level=1.0,
add_poisson_noise=True,
)
else:
raise ValueError("An invalid instrument was entered - ", instrument)
def simulate_imaging_from_instrument(instrument, dataset_name, galaxies):
# Simulate the imaging data, remembering that we use a special image which ensures edge-effects don't
# degrade our modeling of the telescope optics (e.al. the PSF convolution).
grid = instrument_util.grid_from_instrument(instrument=instrument)
simulator = simulator_from_instrument(instrument=instrument)
# Use the input galaxies to setup a tracer, which will generate the image for the simulated imaging data.
tracer = al.Tracer.from_galaxies(galaxies=galaxies)
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
# Now, lets output this simulated imaging-data to the test_autoarray/simulator folder.
test_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "..", ".."
)
dataset_path = path.join(test_path, "dataset", "imaging", dataset_name, instrument)
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
plotter = aplt.MatPlot2D(output=aplt.Output(path=dataset_path, format="png"))
plotter = aplt.MatPlot2D(output=aplt.Output(path=dataset_path, format="png"))
aplt.Imaging.subplot_imaging(imaging=imaging, plotter=plotter)
aplt.imaging.individual(
imaging=imaging,
image=True,
noise_map=True,
psf=True,
signal_to_noise_map=True,
plotter=plotter,
)
aplt.Tracer.subplot_tracer(tracer=tracer, grid=grid, plotter=plotter)
aplt.Tracer.figures(
tracer=tracer,
grid=grid,
image=True,
source_plane=True,
convergence=True,
potential=True,
deflections=True,
plotter=plotter,
)
def load_test_imaging(dataset_name, instrument, name=None):
pixel_scales = instrument_util.pixel_scale_from_instrument(instrument=instrument)
test_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "..", ".."
)
dataset_path = path.join(test_path, "dataset", "imaging", dataset_name, instrument)
return al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
pixel_scales=pixel_scales,
name=name,
)
| StarcoderdataPython |
4800618 | import keras.backend as K
import numpy as np
import matplotlib.pylab as plt
import cPickle as pickle
import sys
import os
def find_top9_mean_act(data, Dec, target_layer, feat_map, batch_size=32):
"""
Find images with highest mean activation
args: data (numpy array) the image data
shape : (n_samples, n_channels, img_dim1, img_dim2)
Dec (DeconvNet) instance of the DeconvNet class
target_layer (str) Layer name we want to visualise
feat_map (int) index of the filter to visualise
batch_size (int) batch size
returns: top9 (numpy array) index of the top9 images that activate feat_map
"""
# Theano function to get the layer output
T_in, T_out = Dec[Dec.model.layers[0].name].input, Dec[target_layer].output
get_activation = K.function([T_in], T_out)
list_max = []
# Loop over batches and store the max activation value for each
# image in data for the target layer and target feature map
for nbatch in range(data.shape[0] / batch_size):
sys.stdout.write("\rProcessing batch %s/%s" %
(nbatch + 1, len(range(data.shape[0] / batch_size))))
sys.stdout.flush()
X = data[nbatch * batch_size: (nbatch + 1) * batch_size]
Dec.model.predict(X)
X_activ = get_activation([X])[:, feat_map, :, :]
X_sum = np.sum(X_activ, axis=(1,2))
list_max += X_sum.tolist()
# Only keep the top 9 activations
list_max = np.array(list_max)
i_sort = np.argsort(list_max)
top9 = i_sort[-9:]
print
return top9
def get_deconv_images(d_act_path, d_deconv_path, data, Dec):
"""
Deconvolve images specified in d_act. Then pickle these images
for future use
args: d_act_path (str) path to the dict that for each target layer
and for a selection of feature maps, holds the index
of the top9 images activating said feature maps
d_deconv_path (str) path to the dict that for each target layer
and for a selection of feature maps, holds the deconv
result of the top9 images activating said feature maps
data (numpy array) the image data
shape : (n_samples, n_channels, img_dim1, img_dim2)
Dec (DeconvNet) instance of the DeconvNet class
"""
# Load d_act
with open(d_act_path, 'r') as f:
d_act = pickle.load(f)
# Get the list of target layers
list_target = d_act.keys()
# Store deconv images in d_deconv
d_deconv = {}
# Iterate over target layers and feature maps
# and store the deconv image
for target_layer in list_target:
list_feat_map = d_act[target_layer].keys()
for feat_map in list_feat_map:
top9 = d_act[target_layer][feat_map]
X = data[top9]
X_out = Dec.get_deconv(X, target_layer, feat_map=feat_map)
key = target_layer + "_feat_" + str(feat_map)
d_deconv[key] = X_out
np.savez("./Data/dict_top9_deconv.npz", **d_deconv)
def format_array(arr):
"""
Utility to format array for tiled plot
args: arr (numpy array)
shape : (n_samples, n_channels, img_dim1, img_dim2)
"""
n_channels = arr.shape[1]
len_arr = arr.shape[0]
assert (n_channels == 1 or n_channels == 3), "n_channels should be 1 (Greyscale) or 3 (Color)"
if n_channels == 1:
arr = np.repeat(arr, 3, axis=1)
shape1, shape2 = arr.shape[-2:]
arr = np.transpose(arr, [1, 0, 2, 3])
arr = arr.reshape([3, len_arr, shape1 * shape2]).astype(np.float64)
arr = tuple([arr[i] for i in xrange(3)] + [None])
return arr, shape1, shape2
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
# Convert to uint to make it look like an image indeed
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8'
if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
X[tile_row * tile_shape[1] + tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
def plot_max_activation(d_act_path, d_deconv_path, data, target_layer, save=False):
"""
Plot original images (or cropped versions) and the deconvolution result
for images specified in d_act_path / d_dedeconv_path
args: d_act_path (str) path to the dict that for each target layer
and for a selection of feature maps, holds the index
of the top9 images activating said feature maps
d_deconv_path (str) path to the dict that for each target layer
and for a selection of feature maps, holds the deconv
result of the top9 images activating said feature maps
data (numpy array) the image data
shape : (n_samples, n_channels, img_dim1, img_dim2)
target_layer (str) name of the layer we want to visualise
save (bool) whether or not to save the result to a Figures folder
"""
# Load d_deconv
d_deconv = {}
arr_deconv = np.load(d_deconv_path)
for key in arr_deconv.keys():
layer, fmap = key.split("_feat_")
fmap = int(fmap)
try:
d_deconv[layer][fmap] = arr_deconv[key]
except KeyError:
d_deconv[layer] = {fmap: arr_deconv[key]}
# Load d_act
with open(d_act_path, 'r') as f:
d_act = pickle.load(f)
# Get the list of feature maps
list_feat_map = d_act[target_layer].keys()
# We'll crop images to identify the region
# the neuron has activated on
# dict to store cropped images
d_crop_deconv = {i: [] for i in list_feat_map}
d_crop_ori = {i: [] for i in list_feat_map}
# This will hold the image dimensions
max_delta_x = 0
max_delta_y = 0
# To crop images:
# First loop to get the largest image size required (bounding box)
for feat_map in list_feat_map:
X_deconv = d_deconv[target_layer][feat_map]
for k in range(X_deconv.shape[0]):
arr = np.argwhere(np.max(X_deconv[k], axis=0))
try:
(ystart, xstart), (ystop, xstop) = arr.min(0), arr.max(0) + 1
except ValueError:
print "Encountered a dead filter"
return
delta_x = xstop - xstart
delta_y = ystop - ystart
if delta_x > max_delta_x:
max_delta_x = delta_x
if delta_y > max_delta_y:
max_delta_y = delta_y
# Then loop to crop all images to the same size
for feat_map in range(len(list_feat_map)):
X_deconv = d_deconv[target_layer][feat_map]
X_ori = data[d_act[target_layer][feat_map]]
for k in range(X_deconv.shape[0]):
arr = np.argwhere(np.max(X_deconv[k], axis=0))
try:
(ystart, xstart), (ystop, xstop) = arr.min(0), arr.max(0) + 1
except ValueError:
print "Encountered a dead filter"
return
# Specific case to avoid array boundary issues
y_min, y_max = ystart, ystart + max_delta_y
if y_max >= X_deconv[k].shape[-2]:
y_min = y_min - (y_max - X_deconv[k].shape[-2])
y_max = X_deconv[k].shape[-2]
x_min, x_max = xstart, xstart + max_delta_x
if x_max >= X_deconv[k].shape[-1]:
x_min = x_min - (x_max - X_deconv[k].shape[-1])
x_max = X_deconv[k].shape[-1]
# Store the images in the dict
arr_deconv = X_deconv[k, :, y_min: y_max, x_min: x_max]
d_crop_deconv[feat_map].append(arr_deconv)
arr_ori = X_ori[k, :, y_min: y_max, x_min: x_max]
d_crop_ori[feat_map].append(arr_ori)
d_crop_deconv[feat_map] = np.array(d_crop_deconv[feat_map])
d_crop_ori[feat_map] = np.array(d_crop_ori[feat_map])
# List to hold the images in the tiled plot
list_input_img = []
list_output_img = []
# Loop over the feat maps to fill the lists above
for feat_map in list_feat_map:
arr_ori = d_crop_ori[feat_map]
arr_deconv = d_crop_deconv[feat_map]
arr_ori, shape1, shape2 = format_array(arr_ori)
arr_deconv, shape1, shape2 = format_array(arr_deconv)
input_map = tile_raster_images(arr_ori, img_shape=(shape1, shape2), tile_shape=(3, 3),
tile_spacing=(1,1), scale_rows_to_unit_interval=True,
output_pixel_vals=True)
output_map = tile_raster_images(arr_deconv, img_shape=(shape1, shape2), tile_shape=(3, 3),
tile_spacing=(1,1), scale_rows_to_unit_interval=True,
output_pixel_vals=True)
list_input_img.append(input_map)
list_output_img.append(output_map)
# Format the arrays for the plot
arr_ori1 = np.vstack(list_input_img[::2])
arr_ori2 = np.vstack(list_input_img[1::2])
arr_dec1 = np.vstack(list_output_img[::2])
arr_dec2 = np.vstack(list_output_img[1::2])
arr_ori = np.hstack((arr_ori1, arr_dec1))
arr_dec = np.hstack((arr_ori2, arr_dec2))
arr_full = np.hstack((arr_dec, arr_ori))
# RGB/GBR reordering
arr_full_copy = arr_full.copy()
arr_full[:, :, 0] = arr_full_copy[:, :, 2]
arr_full[:, :, 1] = arr_full_copy[:, :, 1]
arr_full[:, :, 2] = arr_full_copy[:, :, 0]
del arr_full_copy
# Plot and prettify
plt.imshow(arr_full, aspect='auto')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='off', right='off', left='off', labelleft='off')
plt.xlabel(target_layer, fontsize=26)
# plt.axis("off")
plt.tight_layout()
if save:
if not os.path.exists("./Figures/"):
os.makedirs("./Figures")
plt.savefig("./Figures/%s.png" % target_layer, format='png', dpi=200)
else:
plt.show()
raw_input()
def plot_deconv(img_index, data, Dec, target_layer, feat_map, save=False):
"""
Plot original images (or cropped versions) and the deconvolution result
for images specified in img_index, for the target layer and feat_map
specified in the arguments
args: img_index (list/arr) array or list of index. These are the indices
of the images we want to plot
data (numpy array) the image data
shape : (n_samples, n_channels, img_dim1, img_dim2)
Dec (DeconvNet) instance of the DeconvNet class
target_layer (str) name of the layer we want to visualise
feat_map (int) index of the filter to visualise
"""
num_img = len(img_index)
assert np.isclose(np.sqrt(num_img), int(
np.sqrt(num_img))), "len(img_index) must be a perfect square"
mosaic_size = int(np.sqrt(num_img))
X_ori = data[img_index]
X_deconv = Dec.get_deconv(data[img_index], target_layer, feat_map=feat_map)
max_delta_x = 0
max_delta_y = 0
# To crop images:
# First loop to get the largest image size required (bounding box)
for k in range(X_deconv.shape[0]):
arr = np.argwhere(np.max(X_deconv[k], axis=0))
try:
(ystart, xstart), (ystop, xstop) = arr.min(0), arr.max(0) + 1
except ValueError:
print "Encountered a dead filter, retry with different img/filter"
return
delta_x = xstop - xstart
delta_y = ystop - ystart
if delta_x > max_delta_x:
max_delta_x = delta_x
if delta_y > max_delta_y:
max_delta_y = delta_y
list_deconv = []
list_ori = []
# Then loop to crop all images to the same size
for k in range(X_deconv.shape[0]):
arr = np.argwhere(np.max(X_deconv[k], axis=0))
try:
(ystart, xstart), (ystop, xstop) = arr.min(0), arr.max(0) + 1
except ValueError:
print "Encountered a dead filter, retry with different img/filter"
return
# Specific case to avoid array boundary issues
y_min, y_max = ystart, ystart + max_delta_y
if y_max >= X_deconv[k].shape[-2]:
y_min = y_min - (y_max - X_deconv[k].shape[-2])
y_max = X_deconv[k].shape[-2]
x_min, x_max = xstart, xstart + max_delta_x
if x_max >= X_deconv[k].shape[-1]:
x_min = x_min - (x_max - X_deconv[k].shape[-1])
x_max = X_deconv[k].shape[-1]
# Store the images in the dict
arr_deconv = X_deconv[k, :, y_min: y_max, x_min: x_max]
arr_ori = X_ori[k, :, y_min: y_max, x_min: x_max]
list_ori.append(arr_ori)
list_deconv.append(arr_deconv)
arr_deconv = np.array(list_deconv)
arr_ori = np.array(list_ori)
arr_ori, shape1, shape2 = format_array(arr_ori)
arr_deconv, _, _ = format_array(arr_deconv)
input_map = tile_raster_images(arr_ori, img_shape=(shape1, shape2),
tile_shape=(mosaic_size, mosaic_size),
tile_spacing=(1, 1), scale_rows_to_unit_interval=True,
output_pixel_vals=True)
output_map = tile_raster_images(arr_deconv, img_shape=(shape1, shape2),
tile_shape=(mosaic_size, mosaic_size),
tile_spacing=(1, 1), scale_rows_to_unit_interval=True,
output_pixel_vals=True)
arr_full = np.append(input_map, output_map, axis=1)
# RGB/GBR reordering
arr_full_copy = arr_full.copy()
arr_full[:, :, 0] = arr_full_copy[:, :, 2]
arr_full[:, :, 1] = arr_full_copy[:, :, 1]
arr_full[:, :, 2] = arr_full_copy[:, :, 0]
del arr_full_copy
# Plot and prettify
plt.imshow(arr_full, aspect='auto')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='off', right='off', left='off', labelleft='off')
plt.xlabel(target_layer + "Filter: %s" % feat_map, fontsize=26)
# plt.axis("off")
plt.tight_layout()
if save:
if not os.path.exists("./Figures/"):
os.makedirs("./Figures")
plt.savefig("./Figures/sample_%s.png" % target_layer, format='png', dpi=200)
else:
plt.show()
raw_input()
| StarcoderdataPython |
4806321 | import cv2
import scipy
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from datetime import datetime
from pprint import pprint
from skimage import transform
from utils import detect_faces, detect_landmarks, generate_embedding, recognize
output_path = 'outputs'
test_file = 'videos/ongiocaudayroi1.mp4'
result_file = output_path+'/ongiocaudayroi1_detected.mp4'
tflite_model_path = 'tflite_models'
ishape = [512, 512, 3]
ID_map = [
'shark_do_thi_kim_lien', 'phung_dang_khoa', 'tu_long', 'manh_quynh', 'huong_giang', 'luxstay_van_dung', 'mi_du', 'truong_giang', 'map_vai_phu', 'vo_hoang_yen', 'hoang_thuy_linh', 'dieu_nhi', 'erik', 'le_duong_bao_lam', 'xuan_bac', 'lan_ngoc', 'tran_thanh', 'shark_nguyen_ngoc_thuy', 'duc_phuc', 'hari_won', 'tien_luat', 'tuan_tran', 'shark_pham_thanh_hung', 'miu_le', 'chi_tai', 'shark_nguyen_manh_dung', 'viet_huong', 'le_giang', 'le_tin', 'hong_kim_hanh', 'hoai_linh', 'vi_da', 'shark_nguyen_thanh_viet', 'linh_ngoc_dam']
interpreter1 = tf.lite.Interpreter(model_path=tflite_model_path+'/det_xsml_model.tflite')
interpreter1.allocate_tensors()
input_details1 = interpreter1.get_input_details()
output_details1 = interpreter1.get_output_details()
# pprint(input_details1)
# pprint(output_details1)
interpreter2 = tf.lite.Interpreter(model_path=tflite_model_path+'/ali_model08.tflite')
interpreter2.allocate_tensors()
input_details2 = interpreter2.get_input_details()
output_details2 = interpreter2.get_output_details()
# pprint(input_details2)
# pprint(output_details2)
interpreter3 = tf.lite.Interpreter(model_path=tflite_model_path+'/rec_model.tflite')
interpreter3.allocate_tensors()
input_details3 = interpreter3.get_input_details()
output_details3 = interpreter3.get_output_details()
# pprint(input_details2)
# pprint(output_details2)
cap = cv2.VideoCapture(test_file)
cap.set(cv2.CAP_PROP_POS_FRAMES, 10000)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
processed_images = []
embedding2d = np.load(output_path+'/embedding2d.npy')
while True:
# Quit with 'q' press
if cv2.waitKey(1) & 0xFF == ord('q'):
break
success, pix = cap.read()
if success is not True:
break
pix = pix[:, :, :]
h, w, _ = pix.shape
size = min(h, w)
origin_y = 0
origin_x = (w - h)//2
if h > w:
origin_y = (h - w)//2
origin_x = 0
pix = pix[origin_y:origin_y+size, origin_x:origin_x+size, :]
pix = cv2.resize(pix, (ishape[1], ishape[0]))
pix = pix/np.max(pix)
pix = pix*255
# print('Start: {}'.format(datetime.now().time()))
imgpix = tf.constant(value=pix, dtype='float32')
bbox2d = detect_faces(
interpreter=interpreter1,
input_details=input_details1,
output_details=output_details1,
pix=imgpix)
# print('End: {}'.format(datetime.now().time()))
bboxes = []
for y1, x1, y2, x2, _ in bbox2d:
h, w = y2 - y1, x2 - x1
y, x = int(y1 + 0.5*h), int(x1 + 0.5*w)
half_edge = int(1.1*max(h, w)/2)
edge = 2*half_edge
y1, x1, y2, x2 = int(y-half_edge), int(x-half_edge), int(y+half_edge), int(x+half_edge)
if y1+half_edge < 0 or x1+half_edge < 0 or y2-half_edge > ishape[0] or x2-half_edge > ishape[1]:
continue
iobject = np.zeros((edge, edge, 3))
_y1, _x1, _y2, _x2 = 0, 0, edge, edge
__y1, __x1, __y2, __x2 = y1, x1, y2, x2
if y1 < 0 and y1+half_edge > 0:
_y1 = -y1
__y1 = 0
if x1 < 0 and x1+half_edge > 0:
_x1 = -x1
__x1 = 0
if y2 > ishape[0] and y2-half_edge < ishape[0]:
_y2 = edge - (y2-ishape[0])
__y2 = ishape[0]
if x2 > ishape[1] and x2-half_edge < ishape[1]:
_x2 = edge - (x2-ishape[1])
__x2 = ishape[1]
iobject[_y1:_y2, _x1:_x2, :] = pix[__y1:__y2, __x1:__x2, :]
iobject = transform.resize(image=iobject, output_shape=[134, 134])
_iobject = np.mean(iobject, axis=-1, keepdims=True)
_iobject = _iobject/np.max(_iobject)
_iobject = _iobject*255
_iobject = np.array(_iobject, dtype='int32')
_iobject = _iobject[11:-11, 11:-11, :]
ya, xa, yb, xb, yc, xc, yd, xd, ye, xe = detect_landmarks(
interpreter=interpreter2,
input_details=input_details2,
output_details=output_details2,
pix=_iobject,
ishape=[112, 112, 1]) # (5, h, w)
likely_face = False
eye = ya != 0 or yb != 0
mouth = yc != 0 or yd != 0
left = ya != 0 or yc != 0
right = yb != 0 or yd != 0
left_eye_and_nose = ya != 0 or ye != 0
right_eye_and_nose = yb != 0 or ye != 0
left_mouth_and_nose = yc != 0 or ye != 0
right_mouth_and_nose = yd != 0 or ye != 0
if eye is True or mouth is True or left is True or right is True or left_eye_and_nose is True or right_eye_and_nose is True or left_mouth_and_nose is True or right_mouth_and_nose is True:
likely_face = True
name = ''
iobject = transform.resize(image=iobject, output_shape=[112, 112])
iobject = np.mean(iobject, axis=-1, keepdims=True)
iobject = np.concatenate([iobject, iobject, iobject], axis=-1)
iobject = np.array(iobject, dtype='int32')
embedding1d = generate_embedding(
interpreter=interpreter3,
input_details=input_details3,
output_details=output_details3,
pix=iobject)
oid = recognize(embedding2d=embedding2d, embedding1d=embedding1d)
name = '' if oid == -1 else ID_map[oid]
if likely_face is not True and name == '':
continue
scale = edge/112
ya, xa = y1 + int(scale*ya), x1 + int(scale*xa)
yb, xb = y1 + int(scale*yb), x1 + int(scale*xb)
yc, xc = y1 + int(scale*yc), x1 + int(scale*xc)
yd, xd = y1 + int(scale*yd), x1 + int(scale*xd)
ye, xe = y1 + int(scale*ye), x1 + int(scale*xe)
bboxes.append([y1, x1, y2, x2, ya, xa, yb, xb, yc, xc, yd, xd, ye, xe, name])
for y1, x1, y2, x2, ya, xa, yb, xb, yc, xc, yd, xd, ye, xe, name in bboxes:
cv2.circle(pix, (int(x1 + 0.5*(x2 - x1)), int(y1 + 0.5*(y2 - y1))), int(0.5*(y2 - y1)), [255, 255, 255], 1)
if ya != y1 and yb != y1 and yc != y1 and yd != y1 and ye != y1:
cv2.circle(pix, (xa, ya), 4, [255, 255, 0], -1)
cv2.circle(pix, (xb, yb), 4, [255, 255, 0], -1)
cv2.circle(pix, (xc, yc), 4, [0, 255, 255], -1)
cv2.circle(pix, (xd, yd), 4, [0, 255, 255], -1)
cv2.circle(pix, (xe, ye), 4, [255, 128, 255], -1)
cv2.putText(pix, name, (x1, y1-8), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 0], 1)
# Display frame
pix = np.array(pix, dtype='uint8')
cv2.imshow('frame', pix)
processed_images.append(pix)
cap.release()
cv2.destroyAllWindows()
out = cv2.VideoWriter(result_file, cv2.VideoWriter_fourcc(*'MP4V'), 24, (ishape[1], ishape[0]))
for i in range(len(processed_images)):
out.write(processed_images[i])
out.release()
| StarcoderdataPython |
62881 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from perlin import generate_perlin
def gaussian_2d_fast(size, amp, mu_x, mu_y, sigma):
x = np.arange(0, 1, 1/size[0])
y = np.arange(0, 1, 1/size[1])
xs, ys = np.meshgrid(x,y)
dxs = np.minimum(np.abs(xs-mu_x), 1-np.abs(xs-mu_x))
dys = np.minimum(np.abs(ys-mu_y), 1-np.abs(ys-mu_y))
heat_map = amp*np.exp(-(dxs**2+dys**2)/(2*sigma**2))
return heat_map
def excitability_matrix(sigma_e, sigma_i, perlin_scale, grid_offset,
p_e=0.05, p_i=0.05, we=0.22, g=4,
n_row_e=120, n_row_i=60, mu_gwn=0, multiple_connections=True,
expected_connectivity=True, is_plot=True):
n_pop_e = n_row_e**2
n_pop_i = n_row_i**2
gL = 25 * 1e-9 # Siemens
p_max_e = p_e / (2 * np.pi * sigma_e**2)
p_max_i = p_i / (2 * np.pi * sigma_i**2)
# Two landscapes: e and i. The contribution of each neuron is stored separately in the n_row_e**2 matrices
e_landscape = np.zeros((n_row_e**2, n_row_e, n_row_e))
i_landscape = np.zeros((n_row_i**2, n_row_e, n_row_e))
perlin = generate_perlin(n_row_e, perlin_scale, seed_value=0)
x = np.arange(0,1,1/n_row_e)
y = np.arange(0,1,1/n_row_e)
X, Y = np.meshgrid(x,y)
U = np.cos(perlin)
V = np.sin(perlin)
# Excitatory
mu_xs = np.arange(0,1,1/n_row_e)
mu_ys = np.arange(0,1,1/n_row_e)
counter = 0
for i, mu_x in enumerate(mu_xs):
for j, mu_y in enumerate(mu_ys):
x_offset = grid_offset / n_row_e * np.cos(perlin[i,j])
y_offset = grid_offset / n_row_e * np.sin(perlin[i,j])
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_e, mu_x+x_offset, mu_y+y_offset, sigma_e)
if not multiple_connections:
#clip probabilities at 1
e_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
e_landscape[counter] = mh
counter += 1
# Inhibitory
mu_xs = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
mu_ys = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
counter = 0
for mu_x in mu_xs:
for mu_y in mu_ys:
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_i, mu_x, mu_y, sigma_i)
if not multiple_connections:
#clip probabilities at 1
i_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
i_landscape[counter] = mh
counter += 1
# in total there should be n_pop_e * (n_pop_e * p_max_e) = 10 368 000 e-connections
# and n_pop_i * (n_pop_e * 0.05) = 2 592 000 i-connections
num_e_connections = np.sum(e_landscape)
num_i_connections = np.sum(i_landscape)
if multiple_connections:
e_calibration = 1
i_calibration = 1
else:
e_calibration = n_pop_e * n_pop_e * p_e / num_e_connections
i_calibration = n_pop_i * n_pop_e * p_i / num_i_connections
print('e_calibration is ', e_calibration)
print('i_calibration is ', i_calibration)
if expected_connectivity:
# calculate expected number of connections
e_landscape = n_row_e**2*np.mean(e_landscape, axis=0)
i_landscape = n_row_i**2*np.mean(i_landscape, axis=0)
else: # we sample
sample_e_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = e_landscape[:, i, j]
random_numbers = np.random.random(n_row_e**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_e_landscape[i, j] = num_connected
sample_i_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = i_landscape[:, i, j]
random_numbers = np.random.random(n_row_i**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_i_landscape[i, j] = num_connected
e_landscape = sample_e_landscape
i_landscape = sample_i_landscape
# Now we fill a landscape with physical units (mV)
rest_pot = -70 # mV
thres_pot = -55 # mV
ext_pot = mu_gwn / gL * 1e3 #mV
no_activity_pot = rest_pot + ext_pot # -56 mV when mu_gwn = 350 pA
landscape = no_activity_pot * np.ones((n_row_e, n_row_e))
# Synapse strengths
we = we * e_calibration #mV
wi = -g * we * i_calibration / e_calibration #mV
landscape += we * e_landscape
landscape += wi * i_landscape
# scale X and Y quiver according to values in ei_landscape. first normalize landscape
norm_landscape = np.copy(landscape)
norm_landscape -= np.amin(norm_landscape)
norm_landscape /= np.amax(norm_landscape)
U = 0.5*np.multiply(U, norm_landscape)
V = 0.5*np.multiply(V, norm_landscape)
if is_plot:
# Plot
plt.figure(figsize=(8,8))
if expected_connectivity:
mode = 'Expected '
else:
mode = 'Sampled '
plt.title(mode+'EI landscape')
plt.imshow(landscape, origin='lower', extent=[0,1,0,1])
norm = mpl.colors.Normalize(vmin=round(np.amin(landscape)), vmax=round(np.amax(landscape)))
plt.colorbar(mpl.cm.ScalarMappable(norm=norm), label='mV')
plt.quiver(X, Y, U, V, units='xy', scale=50)
plt.suptitle(r'$\sigma_e=$'+str(sigma_e)+r', $\sigma_i=$'+str(sigma_i)+', perlin scale='+str(perlin_scale)+', g='+str(g),
fontsize=15)
plt.show()
# Plot binary landscape (below/above threshold)
above_thres = np.where(np.reshape(landscape, 14400)>thres_pot)
binary_landscape = np.zeros(14400)
binary_landscape[above_thres] = 1
binary_landscape = np.reshape(binary_landscape,(120, 120))
plt.figure(figsize=(8,8))
plt.title(mode+'EI landscape (binary)')
plt.imshow(binary_landscape, origin='lower', extent=[0,1,0,1])
plt.quiver(X, Y, U, V, units='xy', scale=50)
plt.suptitle(r'$\sigma_e=$'+str(sigma_e)+r', $\sigma_i=$'+str(sigma_i)+', perlin scale='+str(perlin_scale)+', g='+str(g),
fontsize=15)
plt.show()
return landscape, X, Y, U, V
| StarcoderdataPython |
1783457 | <gh_stars>1-10
# Copyright 2019 <NAME> (<EMAIL>)
# ---------------------------
# Distributed under the MIT License:
# ==================================
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
# [SimpleFEMPy] A basic Python PDE solver with the finite elements method
# ------------------------------------------------------------------------------
# Utils: Subpackage with various util tools
# ------------------------------------------------------------------------------
# logger.py - Logger manager (with a built-in static instance)
# ==============================================================================
import logging
import sys
import inspect
import copy
from simplefempy.settings import LIB_SETTINGS, TK_INSTANCE
class SimpleFemPyError(Exception):
pass
# ---- ** ----
# From: https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
K, R, G, Y, B, M, C, W = range(8)
# These are the sequences need to get colored ouput
RESET_SEQ = '\033[0m'
COLOR_SEQ = '\033[0;%dm'
BOLD_SEQ = '\033[1m'
def formatter_message(message, use_color=True):
if use_color:
message = message.replace('$RESET', RESET_SEQ).replace('$BOLD', BOLD_SEQ)
else:
message = message.replace('$RESET', '').replace('$BOLD', '')
return message
COLORS = { 'WARNING': Y, 'INFO': C, 'DEBUG': M, 'ERROR': R }
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
rec = copy.copy(record) # copy the record object to avoid confusion between streams
lvl = rec.levelname
if self.use_color and lvl in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[lvl]) + lvl + RESET_SEQ
rec.levelname = levelname_color
return logging.Formatter.format(self, rec)
# ---- ** ----
class Logger(object):
"""Object to log information according to given level (uses Python
base logger).
Parameters
----------
logging_level : str or int, optional
Level of display (to be checked).
prefix : str, optional
Specific project name to show at the beginning of all logs. 'mplibs' by
default.
use_color : bool, optional
If true, color is used to distinguish between the different levels of
logging.
stream : IO.stream or None, optional
If it is not None, then this Logger's output is redirected to the given
stream. Else, logging is done in the sys.stdout stream.
abort_errors : bool, optional
If false (by default), critical errors trigger an instant exit of the
program.
"""
LOGGING_LEVELS = {
'error': 40, 'warning': 30, 'info': 20, 'debug': 10,
40: 'error', 30: 'warning', 20: 'info', 10: 'debug'
}
def __init__(self, logging_level='info', prefix='FEM', use_color=True,
stream=None, abort_errors=False):
self.prefix = prefix
self.logger = logging.getLogger()
self.set_level(logging_level)
self.set_stream(stream, use_color)
self.abort_errors = abort_errors
@staticmethod
def _check_logging_level(logging_level, no_switch=False):
"""Returns the logging level after checking if it is valid or switching to
default 'info' mode.
Parameters
----------
logging_level : str or int
Level of display. If it is a string, it is converted to the matching
values if it is one of: 'critical', 'error', 'warning', 'info', 'debug';
else if switching is enabled, the default setting 'info' is taken. If it
is an int, it must be one of: 50, 40, 30, 20, 10; else if switching is
enabled the default setting 20 is taken.
no_switch : bool, optional
If true, an unknown logging level will be returned as is (cannot be used
but can be spotted as wrong). Else, it is switched to the default setting
('info' mode).
"""
if no_switch:
if isinstance(logging_level, str):
try: logging_level = Logger.LOGGING_LEVELS[logging_level]
except KeyError: pass
return logging_level
else:
if isinstance(logging_level, int) and not logging_level in [10,20,30,40,50]:
logging_level = 20
if isinstance(logging_level, str):
try: logging_level = Logger.LOGGING_LEVELS[logging_level]
except KeyError: logging_level = Logger.LOGGING_LEVELS['info']
return logging_level
def get_level(self):
"""Gets the current display level."""
return self.logger.getEffectiveLevel()
def set_level(self, logging_level):
"""Sets the display level.
Parameters
----------
logging_level : str or int, optional
Level of display (to be checked).
"""
l = Logger._check_logging_level(logging_level)
self.logger.setLevel(l)
def set_stream(self, stream, use_color):
"""Sets the output stream.
Parameters
----------
stream : IO.stream
Stream to output to.
"""
indent = 18 if use_color else 7
form = '[$BOLD{}$RESET.%(levelname){}s] %(message)s'.format(self.prefix, indent)
color_formatter = ColoredFormatter(formatter_message(form, use_color),
use_color)
stream_handler = logging.StreamHandler(stream)
stream_handler.setFormatter(color_formatter)
l = Logger._check_logging_level(self.get_level())
stream_handler.setLevel(l)
self.logger.addHandler(stream_handler)
def set_errors(self, abort):
"""Sets the 'abort_errors' flag (if false, errors trigger an exit of the
program; else, the error must be handled elsewhere).
Parameters
----------
abort : bool
New value for the 'abort_errors' flag.
"""
self.abort_errors = abort
def log(self, msg, level='info', stackoffset=2):
"""Logs a message.
Parameters
----------
msg : str
Message to display.
level : str or int, optional
Level of logging for the message (can be: error', 'warning', 'info'
or 'debug' for a string; or 40, 30, 20 or 10 for an int).
"""
try: stackdata = inspect.stack()[1+stackoffset]
except IndexError: stackdata = inspect.stack()[-1]
# use tuple direct indexing for Python 2.x compatibility
caller_file = stackdata[1].split('/')[-1]
caller_func = stackdata[3]
lineno = stackdata[2]
if caller_func == '<module>': caller_func = ''
else: caller_func = ' - {}()'.format(caller_func)
msg = '({}:{}{}): {}'.format(caller_file, lineno, caller_func, msg)
l = Logger._check_logging_level(level, no_switch=True)
if l == logging.DEBUG: self.logger.debug(msg)
elif l == logging.INFO: self.logger.info(msg)
elif l == logging.WARNING: self.logger.warning(msg)
elif l == logging.ERROR: self.logger.error(msg)
else: self.error('Unknown level of logging: "%s".' % level)
def error(self, msg, stackoffset=2):
"""Warns the user of a fatal error and exists the program with error
return code (1).
Parameters
----------
msg : str
Error message to display.
"""
self.log(msg, level='error', stackoffset=stackoffset+1)
if TK_INSTANCE['app'] is not None: TK_INSTANCE['app'].exit()
if not self.abort_errors:
sys.exit(1)
else:
raise SimpleFemPyError
@staticmethod
def sget_level():
"""Static equivalent of :func:`.Logger.get_level`."""
return STATIC_LOGGER.get_level()
@staticmethod
def sset_level(logging_level):
"""Static equivalent of :func:`.Logger.set_level`."""
STATIC_LOGGER.set_level(logging_level)
@staticmethod
def sset_stream(stream, use_color=False):
"""Static equivalent of :func:`.Logger.set_stream`."""
STATIC_LOGGER.set_stream(stream, use_color)
@staticmethod
def sset_errors(abort):
"""Static equivalent of :func:`.Logger.set_errors`."""
STATIC_LOGGER.set_errors(abort)
@staticmethod
def slog(msg, level='info', stackoffset=1):
"""Static equivalent of :func:`.Logger.log`."""
STATIC_LOGGER.log(msg, level, stackoffset=stackoffset+1)
@staticmethod
def serror(msg, stackoffset=1):
"""Static equivalent of :func:`.Logger.error`."""
STATIC_LOGGER.error(msg, stackoffset=stackoffset+1)
@staticmethod
def sset_prefix(prefix):
"""Sets the prefix of the static logger.
Parameters
----------
prefix : str
Specific project name to show at the beginning of all logs.
"""
STATIC_LOGGER.prefix = prefix
formatter = logging.Formatter('[ {} ] . %(asctime)s :: %(levelname)s '
':: %(message)s'.format(prefix),
datefmt='%Y-%m-%d %H:%M:%S')
STATIC_LOGGER.stream_handler.setFormatter(formatter)
STATIC_LOGGER = Logger()
| StarcoderdataPython |
4815730 | import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
__all__ = ['BCELoss', 'BalancedBCELoss',
'DiceLoss', 'BCEDiceLoss', 'GeneralizedDiceLoss',
'BinaryFocalLoss',
'TverskyLoss']
# todo: 学习一下 https://github.com/LIVIAETS/surface-loss/blob/master/losses.py
# todo: 再总结一下 https://github.com/Hsuxu/Loss_ToolBox-PyTorch
# todo: 对比学习下 https://github.com/JunMa11/SegLoss/tree/master/losses_pytorch
"""
连接
https://www.cnblogs.com/banshaohuan/p/9493024.html
https://zhuanlan.zhihu.com/p/87959967?utm_source=wechat_session&utm_medium=social&utm_oi=999439652683530240
https://github.com/JunMa11/SegLoss
https://github.com/JunMa11/SegLoss/blob/master/losses_pytorch/dice_loss.py
https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/focal_loss.py
https://github.com/wolny/pytorch-3dunet/blob/master/pytorch3dunet/unet3d/losses.py#L110
https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py
https://www.jianshu.com/p/30043bcc90b6
https://blog.csdn.net/CaiDaoqing/article/details/90457197
https://blog.csdn.net/m0_37477175/article/details/83004746#Dice_loss_70
https://blog.csdn.net/qq_42450404/article/details/92800575
https://blog.csdn.net/JMU_Ma/article/details/97533768
语义分割常用loss介绍及pytorch实现 https://blog.csdn.net/CaiDaoqing/article/details/90457197
https://blog.csdn.net/m0_37477175/article/details/83004746
"""
"""pytorch 实现dice loss的讨论:
https://github.com/pytorch/pytorch/issues/1249
nn.CrossEntropyLoss -> 多分类
nn.BCELoss -> 二分类
"""
# TODO: 规范 模型的输出为logits 重新写 `loss function`
# TODO: 如果使用softmax的目标函数,需要根据原始gt生成label数据 4分类问题, [0,1,2,4] ---> 4化为3
# import niftynet.layer.loss_segmentation as loss_segmentation
# loss_segmentation.dice()
# ---------------------- 以二分类的方式进行处理 ----------------------------------- #
""" 模型的输入是(batch-size, 4, patch-size) 模型的输出是(batch-size, 3, patch-size)
4是四种模态都使用,3是输出的标签种类数[1,2,4] 注意生成truth数据与值对应 !!!!
idx=0标签值为1的病灶 idx=1标签值为2的病灶 idx=2标签值为4的病灶
"""
# 1 BCELoss
BCELoss = nn.BCEWithLogitsLoss
# nn.CrossEntropyLoss todo: 注意 在pytorch中,多分类交叉熵 的输入不需要经过softmax处理
# 原因详见pytorch实现`多分类交叉熵`源代码 https://zhuanlan.zhihu.com/p/98785902
class BalancedBCELoss(nn.Module): # 2 BalancedBCELoss
def __init__(self):
"""
参考链接 https://blog.csdn.net/qq_34914551/article/details/101644942
"""
super(BalancedBCELoss, self).__init__()
def forward(self, logits, target, reduction='mean'):
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha_pos = num_neg / num_total
alpha_neg = num_pos / num_total
weights = alpha_pos * pos + alpha_neg * neg
return F.binary_cross_entropy_with_logits(logits, target, weights, reduction=reduction)
class DiceLoss(nn.Module): # 3 DiceLoss
"""
医学图像分割之 Dice Loss https://blog.csdn.net/JMU_Ma/article/details/97533768
from https://blog.csdn.net/CaiDaoqing/article/details/90457197
"""
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, logits, target):
probs = torch.sigmoid(logits) # 转换成概率
N = target.size(0)
smooth = 1
input_flat = probs.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
dice_loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
dice_loss = 1 - dice_loss.sum() / N
return dice_loss
class BCEDiceLoss(nn.Module):
"""Linear combination of BCE and Dice losses
from https://github.com/wolny/pytorch-3dunet/blob/master/pytorch3dunet/unet3d/losses.py#L165
"""
def __init__(self, alpha, beta):
super(BCEDiceLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.bce = nn.BCEWithLogitsLoss()
self.dice = DiceLoss()
def forward(self, logits, target):
return self.alpha * self.bce(logits, target) + self.beta * self.dice(logits, target)
class GeneralizedDiceLoss(nn.Module):
def __init__(self, epsilon=1e-6):
super(GeneralizedDiceLoss, self).__init__()
self.epsilon = epsilon
@staticmethod
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
# number of channels
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.contiguous().view(C, -1)
def forward(self, logits, target):
probs = torch.sigmoid(logits) # 转换成概率
probs = self.flatten(probs)
target = self.flatten(target)
if probs.size(0) == 1:
# for GDL to make sense we need at least 2 channels (see https://arxiv.org/pdf/1707.03237.pdf)
# put foreground and background voxels in separate channels
probs = torch.cat((probs, 1 - probs), dim=0)
target = torch.cat((target, 1 - target), dim=0)
# GDL weighting: the contribution of each label is corrected by the inverse of its volume
w_l = target.sum(-1)
w_l = 1 / (w_l * w_l).clamp(min=self.epsilon)
w_l.requires_grad = False
intersect = (probs * target).sum(-1)
intersect = intersect * w_l
denominator = (probs + target).sum(-1)
denominator = (denominator * w_l).clamp(min=self.epsilon)
# compute per channel Dice coefficient
per_channel_dice = 2 * (intersect.sum() / denominator.sum())
# average Dice score across all channels/classes
generalized_dice_loss = 1. - torch.mean(per_channel_dice)
return generalized_dice_loss
# class FocalLoss(nn.Module): # 4 FocalLoss
# """
# 使用下面的binaryFocalLoss! 原因见focal loss详解:
# https://www.cnblogs.com/king-lps/p/9497836.html
# """
# def __init__(self, alpha=0.25, gamma=2, weight=None, ignore=255):
# # from https://blog.csdn.net/CaiDaoqing/article/details/90457197
# super(FocalLoss, self).__init__()
# self.alpha = alpha
# self.gamma = gamma
# self.weight = weight
# self.ignore = ignore
# self.bce_fn = nn.BCEWithLogitsLoss(weight=self.weight)
#
# def forward(self, logits, target):
# if self.ignore is not None:
# mask = target != self.ignore
# target = target[mask]
# logits = logits[mask]
#
# logpt = -self.bce_fn(logits, target)
# pt = torch.exp(logpt)
# focal_loss = -((1 - pt) ** self.gamma) * self.alpha * logpt
# return focal_loss
class BinaryFocalLoss(nn.Module):
"""
link: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/focal_loss.py
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param reduction: `none`|`mean`|`sum`
:param **kwargs
balance_index: (int) balance class index, should be specific when alpha is float
"""
def __init__(self, alpha=None, gamma=2, ignore_index=None, reduction='mean'):
super(BinaryFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.reduction = reduction
if self.alpha is None: # raw default: [1.0, 1.0]
self.alpha = [0.25, 0.75]
self.smooth = 1e-6
assert self.reduction in ['none', 'mean', 'sum']
if self.alpha is None:
self.alpha = torch.ones(2)
elif isinstance(self.alpha, (list, np.ndarray)):
self.alpha = np.asarray(self.alpha)
self.alpha = np.reshape(self.alpha, (2, ))
assert self.alpha.shape[0] == 2, \
'the `alpha` shape is not match the number of class'
elif isinstance(self.alpha, (float, int)):
self.alpha = np.asarray([self.alpha, 1.0 - self.alpha], dtype=np.float).view(2)
else:
raise TypeError('{} not supported'.format(type(self.alpha)))
def forward(self, logits, target):
probs = torch.sigmoid(logits)
probs = torch.clamp(probs, self.smooth, 1.0 - self.smooth)
pos_mask = (target == 1).float()
neg_mask = (target == 0).float()
pos_loss = -self.alpha[0] * torch.pow(torch.sub(1.0, probs), self.gamma) * torch.log(probs) * pos_mask
neg_loss = -self.alpha[1] * torch.pow(probs, self.gamma) * torch.log(torch.sub(1.0, probs)) * neg_mask
neg_loss = neg_loss.sum()
pos_loss = pos_loss.sum()
num_pos = pos_mask.view(pos_mask.size(0), -1).sum()
num_neg = neg_mask.view(neg_mask.size(0), -1).sum()
if num_pos == 0: # todo: 只检查了不含有病灶的 要是全是病灶呢? num_neg=0
bianry_focal_loss = neg_loss
else:
bianry_focal_loss = pos_loss / num_pos + neg_loss / num_neg
return bianry_focal_loss
class TverskyLoss(nn.Module):
def __init__(self, apply_nonlin=torch.sigmoid, batch_dice=False,
do_bg=True, smooth=1., square=False, alpha=0.3, beta=0.7):
"""
from https://github.com/JunMa11/SegLoss/blob/master/losses_pytorch/dice_loss.py
paper: https://arxiv.org/pdf/1706.05721.pdf
修改apply_nonlin=torch.sigmoid 原来默认值为None
"""
super(TverskyLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.alpha = alpha
self.beta = beta
@staticmethod
def sum_tensor(inp, axes, keepdim=False):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/tensor_utilities.py
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn(self, net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tp = self.sum_tensor(tp, axes, keepdim=False)
fp = self.sum_tensor(fp, axes, keepdim=False)
fn = self.sum_tensor(fn, axes, keepdim=False)
return tp, fp, fn
def forward(self, logits, target, loss_mask=None):
shp_x = logits.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
logits = self.apply_nonlin(logits)
tp, fp, fn = self.get_tp_fp_fn(logits, target, axes, loss_mask, self.square)
tversky = (tp + self.smooth) / (tp + self.alpha*fp + self.beta*fn + self.smooth)
if not self.do_bg:
if self.batch_dice:
tversky = tversky[1:]
else:
tversky = tversky[:, 1:]
tversky = tversky.mean()
return 1 - tversky
class BinaryTverskyLossV2(nn.Module):
def __init__(self, alpha=0.3, beta=0.7, ignore_index=None, reduction='mean'):
"""
todo: 和上面是一样的
from https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/TverskyLoss/binarytverskyloss.py
"""
"""Dice loss of binary class
Args:
alpha: controls the penalty for false positives.
beta: penalty for false negative.
ignore_index: Specifies a target value that is ignored and does not contribute to the input gradient
reduction: Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'
Shapes:
output: A tensor of shape [N, 1,(d,) h, w] without sigmoid activation function applied
target: A tensor of shape same with output
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
super(BinaryTverskyLossV2, self).__init__()
self.alpha = alpha
self.beta = beta
self.ignore_index = ignore_index
self.epsilon = 1e-6
self.reduction = reduction
s = self.beta + self.alpha
if s != 1:
self.beta = self.beta / s
self.alpha = self.alpha / s
def forward(self, logits, target):
batch_size = logits.size(0)
if self.ignore_index is not None:
valid_mask = (target != self.ignore_index).float()
logits = logits.float().mul(valid_mask) # can not use inplace for bp
target = target.float().mul(valid_mask)
probs = torch.sigmoid(logits).view(batch_size, -1)
target = target.view(batch_size, -1)
P_G = torch.sum(probs * target, 1) # TP
P_NG = torch.sum(probs * (1 - target), 1) # FP
NP_G = torch.sum((1 - probs) * target, 1) # FN
tversky_index = P_G / (P_G + self.alpha * P_NG + self.beta * NP_G + self.epsilon)
loss = 1. - tversky_index
# target_area = torch.sum(target_label, 1)
# loss[target_area == 0] = 0
if self.reduction == 'none':
loss = loss
elif self.reduction == 'sum':
loss = torch.sum(loss)
else:
loss = torch.mean(loss)
return loss
class LovaszSoftmaxLoss(nn.Module):
def __init__(self):
"""
from https://blog.csdn.net/CaiDaoqing/article/details/90457197
https://github.com/bermanmaxim/LovaszSoftmax/blob/7d48792d35a04d3167de488dd00daabbccd8334b/pytorch/lovasz_losses.py
"""
super(LovaszSoftmaxLoss, self).__init__()
def forward(self, logits, target):
pass
def lovasz_hinge(self, logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(self.lovasz_hinge_flat(* self.flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
@staticmethod
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
@staticmethod
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
# todo: 结合 九、BCE + Dice loss 十、Dice + Focal loss
# ---------------------- 以多分类的方式进行处理 ----------------------------------- #
""" 模型的输入是(batch-size, 4, patch-size) 模型的输出是(batch-size, 4, patch-size)
这是一个四分类问题实际标签0/1/2/3 需要对truth进一步处理为,[0,0,0,1] 表示为第四类(one-hot 独热编码)
3实际上在truth中为'4'
---------------------- 分割问题不需要全连接层的
F.softmax(logits, dim=1) 【注意】dim=1 !!!!
"""
if __name__ == '__main__':
loss_function1 = FocalLoss()
loss_function2 = BinaryFocalLoss(alpha=None)
loss_function3 = TverskyLoss()
loss_function4 = BinaryTverskyLossV2()
x = torch.rand((1, 3, 32, 32, 32))
y = (torch.randn(1, 3, 32, 32, 32) > 0.5).float()
loss1 = loss_function1(x, y)
loss2 = loss_function2(x, y)
loss3 = loss_function3(x, y)
loss4 = loss_function4(x, y)
print(loss1, loss2, loss3, loss4)
"""Discard - 丢弃不用 有参考价值的代码
class DiceLoss(nn.Module): # todo: 需要重新实现
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, inputs, target, loss_type='jaccard'):
smooth = 1e-5
inse = torch.sum(inputs * target)
if loss_type == 'jaccard':
xl = torch.sum(inputs * inputs)
r = torch.sum(target * target)
elif loss_type == 'sorensen':
xl = torch.sum(inputs)
r = torch.sum(target)
else:
raise Exception("Unknown loss_type")
dice = (2. * inse + smooth) / (xl + r + smooth)
dice_loss = 1.0 - float(torch.mean(dice))
return dice_loss
class FocalLoss(nn.Module):
""" """
copy from: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/FocalLoss.py
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param smooth: (float,double) smooth value when cross entropy
:param balance_index: (int) balance class index, should be specific when alpha is float
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
""" """
def __init__(self, apply_nonlin=None, alpha=None, gamma=2, balance_index=0, smooth=1e-5, size_average=True):
super(FocalLoss, self).__init__()
self.apply_nonlin = apply_nonlin
self.alpha = alpha
self.gamma = gamma
self.balance_index = balance_index
self.smooth = smooth
self.size_average = size_average
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError('smooth value should be in [0,1]')
def forward(self, logit, target):
if self.apply_nonlin is not None:
logit = self.apply_nonlin(logit)
num_class = logit.shape[1]
if logit.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.permute(0, 2, 1).contiguous()
logit = logit.view(-1, logit.size(-1))
target = torch.squeeze(target, 1)
target = target.view(-1, 1)
print(logit.shape, target.shape)
#
alpha = self.alpha
if alpha is None:
alpha = torch.ones(num_class, 1)
elif isinstance(alpha, (list, np.ndarray)):
assert len(alpha) == num_class
alpha = torch.FloatTensor(alpha).view(num_class, 1)
alpha = alpha / alpha.sum()
elif isinstance(alpha, float):
alpha = torch.ones(num_class, 1)
alpha = alpha * (1 - self.alpha)
alpha[self.balance_index] = self.alpha
else:
raise TypeError('Not support alpha type')
if alpha.device != logit.device:
alpha = alpha.to(logit.device)
idx = target.cpu().long()
one_hot_key = torch.FloatTensor(target.size(0), num_class).zero_()
print(one_hot_key)
one_hot_key = one_hot_key.scatter_(1, idx, 1)
if one_hot_key.device != logit.device:
one_hot_key = one_hot_key.to(logit.device)
if self.smooth:
one_hot_key = torch.clamp(
one_hot_key, self.smooth / (num_class - 1), 1.0 - self.smooth)
pt = (one_hot_key * logit).sum(1) + self.smooth
logpt = pt.log()
gamma = self.gamma
alpha = alpha[idx]
alpha = torch.squeeze(alpha)
loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
class FocalLoss2(nn.Module):
r""""""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
""" """
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
super(FocalLoss2, self).__init__()
if alpha is None:
self.alpha = torch.ones(class_num, 1)
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = alpha
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
C = inputs.size(1)
P = inputs
class_mask = inputs.data.new(N, C).fill_(0)
print(class_mask.size())
ids = targets.view(-1, 1).cpu().long()
print(ids.size())
class_mask.scatter_(1, ids.data, 1.)
# print(class_mask)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
alpha = self.alpha[ids.data.view(-1)]
probs = (P * class_mask).sum(1).view(-1, 1)
log_p = probs.log()
# print('probs size= {}'.format(probs.size()))
# print(probs)
batch_loss = -alpha * (torch.pow((1 - probs), self.gamma)) * log_p
# print('-----bacth_loss------')
# print(batch_loss)
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
"""
| StarcoderdataPython |
1602146 | start, end = map(int, input().split())
if start == end:
print('O JOGO DUROU 24 HORA(S)')
elif start > end:
time = (24 - start) + end
if time >= 24:
day = time // 24
hours = time % 24
print(day, 'JOGO DUROU', hours, 'HORA(S)')
else:
print('O JOGO DUROU', time, 'HORA(S)')
elif start < end:
time = end - start
if time >= 24:
day = time // 24
hours = time % 24
print(day, 'JOGO DUROU', hours, 'HORA(S)')
else:
print('O JOGO DUROU', time, 'HORA(S)') | StarcoderdataPython |
1781783 | <gh_stars>0
# Generated by Django 3.1.4 on 2021-03-21 19:09
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('flex', '0007_auto_20210321_1830'),
]
operations = [
migrations.AddField(
model_name='formpage',
name='subject_de',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='subject'),
),
migrations.AddField(
model_name='formpage',
name='subject_en',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='subject'),
),
migrations.AddField(
model_name='formpage',
name='thank_you_text_de',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='formpage',
name='thank_you_text_en',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
]
| StarcoderdataPython |
1609257 | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
from typing import Dict, Any, Optional
from typing import Sequence
import numpy as np
import pandas as pd
import pytest
import cirq
import cirq.experiments.random_quantum_circuit_generation as rqcg
from cirq.experiments.xeb_simulation import simulate_2q_xeb_circuits
def test_simulate_2q_xeb_circuits():
q0, q1 = cirq.LineQubit.range(2)
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1, depth=50, two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b)
)
for _ in range(2)
]
cycle_depths = np.arange(3, 50, 9)
df = simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths)
assert len(df) == len(cycle_depths) * len(circuits)
for (circuit_i, cycle_depth), row in df.iterrows():
assert 0 <= circuit_i < len(circuits)
assert cycle_depth in cycle_depths
assert len(row['pure_probs']) == 4
assert np.isclose(np.sum(row['pure_probs']), 1)
with multiprocessing.Pool() as pool:
df2 = simulate_2q_xeb_circuits(circuits, cycle_depths, pool=pool)
pd.testing.assert_frame_equal(df, df2)
def test_simulate_circuit_length_validation():
q0, q1 = cirq.LineQubit.range(2)
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0,
q1,
depth=10, # not long enough!
two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b),
)
for _ in range(2)
]
cycle_depths = np.arange(3, 50, 9)
with pytest.raises(ValueError, match='.*not long enough.*'):
_ = simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths)
def _ref_simulate_2q_xeb_circuit(task: Dict[str, Any]):
"""Helper function for simulating a given (circuit, cycle_depth)."""
circuit_i = task['circuit_i']
cycle_depth = task['cycle_depth']
circuit = task['circuit']
param_resolver = task['param_resolver']
circuit_depth = cycle_depth * 2 + 1
assert circuit_depth <= len(circuit)
tcircuit = circuit[:circuit_depth]
tcircuit = cirq.resolve_parameters_once(tcircuit, param_resolver=param_resolver)
pure_sim = cirq.Simulator()
psi = pure_sim.simulate(tcircuit)
psi = psi.final_state_vector
pure_probs = cirq.state_vector_to_probabilities(psi)
return {'circuit_i': circuit_i, 'cycle_depth': cycle_depth, 'pure_probs': pure_probs}
def _ref_simulate_2q_xeb_circuits(
circuits: Sequence['cirq.Circuit'],
cycle_depths: Sequence[int],
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
pool: Optional['multiprocessing.pool.Pool'] = None,
):
"""Reference implementation for `simulate_2q_xeb_circuits` that
does each circuit independently instead of using intermediate states.
You can also try editing the helper function to use QSimSimulator() for
benchmarking. This simulator does not support intermediate states, so
you can't use it with the new functionality.
https://github.com/quantumlib/qsim/issues/101
"""
tasks = []
for cycle_depth in cycle_depths:
for circuit_i, circuit in enumerate(circuits):
tasks += [
{
'circuit_i': circuit_i,
'cycle_depth': cycle_depth,
'circuit': circuit,
'param_resolver': param_resolver,
}
]
if pool is not None:
records = pool.map(_ref_simulate_2q_xeb_circuit, tasks, chunksize=4)
else:
records = [_ref_simulate_2q_xeb_circuit(record) for record in tasks]
return pd.DataFrame(records).set_index(['circuit_i', 'cycle_depth']).sort_index()
@pytest.mark.parametrize('multiprocess', (True, False))
def test_incremental_simulate(multiprocess):
q0, q1 = cirq.LineQubit.range(2)
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1, depth=100, two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b)
)
for _ in range(20)
]
cycle_depths = np.arange(3, 100, 9)
if multiprocess:
pool = multiprocessing.Pool()
else:
pool = None
start = time.perf_counter()
df_ref = _ref_simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths, pool=pool)
end1 = time.perf_counter()
df = simulate_2q_xeb_circuits(circuits=circuits, cycle_depths=cycle_depths, pool=pool)
end2 = time.perf_counter()
if pool is not None:
pool.terminate()
print("\nnew:", end2 - end1, "old:", end1 - start)
pd.testing.assert_frame_equal(df_ref, df)
# Use below for approximate equality, if e.g. you're using qsim:
# assert len(df_ref) == len(df)
# assert df_ref.columns == df.columns
# for (i1, row1), (i2, row2) in zip(df_ref.iterrows(), df.iterrows()):
# assert i1 == i2
# np.testing.assert_allclose(row1['pure_probs'], row2['pure_probs'], atol=5e-5)
| StarcoderdataPython |
1767929 | <filename>02-Libraries-Implementation/csv/MergeCSV.py<gh_stars>1-10
import csv
def merge_csv(csv_list, output_path):
fieldnamese = list()
for file in csv_list:
with open(file, 'r') as input_csv:
fn = csv.DictReader(input_csv).fieldnames
fieldnamese.extend(x for x in fn if x not in fieldnamese)
with open(output_path, 'w', newline='') as output_csv:
writer = csv.DictWriter(output_csv, fieldnames=fieldnamese)
writer.writeheader()
for file in csv_list:
with open(file, 'r') as input_csv:
reader = csv.DictReader(input_csv)
for row in reader:
writer.writerow(row)
from string import Template
info = {
"title": "Algebra Engine",
"author": "<NAME>"
}
print(Template("This engine is ${title} writen by ${author}").substitute(info))
import collections
from collections import Counter
from collections import deque
def calculate_next_price(*args):
'''
This function is used to predict the next price of stock
:param args: list of input data
:return: integer value
'''
pass
def counter_implementation():
class1 = ["Edward", "Miyuki"]
counter_1 = Counter(class1)
print(counter_1.value())
print(calculate_next_price.__doc__)
def namedtuple_implementation():
Info = collections.namedtuple("Ìnformation", "Name Address Telephone")
info1 = Info("Edward", "Nagaoka", "0949619772")
print(info1)
def deque_implementation():
list_of_numbers = deque()
for i in range(10):
list_of_numbers.append(i)
for i in range(10):
list_of_numbers.appendleft(i)
print(list_of_numbers)
list_of_numbers.rotate(10)
print(list_of_numbers)
def main():
deque_implementation()
if __name__ == "__main__":
main()
| StarcoderdataPython |
440 | #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
setup(
name='persistent-celery-beat-scheduler',
version='0.1.1.dev0',
packages=find_packages('src', exclude=('tests',)),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
description=(
'Celery Beat Scheduler that stores the scheduler data in Redis.'
),
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler',
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
| StarcoderdataPython |
87469 | <filename>plasma/child_chain/child_chain.py
import rlp
from ethereum import utils
from web3 import Web3
import json
from plasma.utils.utils import unpack_utxo_pos, get_sender, recoverPersonalSignature
from .block import Block
from .exceptions import (InvalidBlockMerkleException,
InvalidBlockSignatureException,
InvalidTxSignatureException, TxAlreadySpentException,
TxAmountMismatchException, InvalidOutputIndexNumberException,
InvalidTxCurrencyMismatch, InvalidUTXOOutput)
from .transaction import Transaction
from .root_event_listener import RootEventListener
ZERO_ADDRESS = b'\x00' * 20
ZERO_SIGNATURE = b'0x00' * 65
class ChildChain(object):
def __init__(self, root_chain, eth_node_endpoint):
self.root_chain = root_chain
self.blocks = {}
self.child_block_interval = 1000
self.current_block_number = self.child_block_interval
self.current_block = Block()
self.root_chain_event_listener = RootEventListener(root_chain, ['Deposit', 'ExitStarted'], eth_node_endpoint, confirmations=0)
# Register event listeners
self.root_chain_event_listener.on('Deposit', self.apply_deposit)
self.root_chain_event_listener.on('ExitStarted', self.apply_exit)
self.unspent_utxos = {}
self.open_orders = {}
def apply_exit(self, event):
event_args = event['args']
utxo_pos = event_args['utxoPos']
self.mark_utxo_spent(*unpack_utxo_pos(utxo_pos))
def apply_deposit(self, event):
event_args = event['args']
depositor = event_args['depositor']
blknum = event_args['depositBlock']
token = event_args['token']
amount = event_args['amount']
deposit_tx = Transaction(Transaction.TxnType.transfer,
0, 0, 0,
0, 0, 0,
Transaction.UTXOType.transfer, depositor, amount, 0, token,
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS,
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS,
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS)
deposit_block = Block([deposit_tx])
self.blocks[blknum] = deposit_block
print("Child Chain: Applied Deposit on blknum %d\n %s" % (blknum, deposit_tx))
if depositor not in self.unspent_utxos:
self.unspent_utxos[Web3.toChecksumAddress(depositor)] = {}
self.unspent_utxos[Web3.toChecksumAddress(depositor)][(blknum, 0, 0)] = True
def apply_transaction(self, transaction):
tx = rlp.decode(utils.decode_hex(transaction), Transaction)
# Validate the transaction
self.validate_tx(tx)
# Mark the inputs as spent
self.mark_utxo_spent(tx.blknum1, tx.txindex1, tx.oindex1)
self.mark_utxo_spent(tx.blknum2, tx.txindex2, tx.oindex2)
self.current_block.transaction_set.append(tx)
print("Child Chain: Applied Transaction\n %s" % tx)
if tx.blknum1 != 0:
utxo1 = self._get_input_info(tx.blknum1, tx.txindex1, tx.oindex1, tx, 1)
if utxo1['utxotype'] == Transaction.UTXOType.make_order:
self.open_orders.pop((tx.blknum1, tx.txindex1, tx.oindex1))
else:
self.unspent_utxos[Web3.toChecksumAddress(utxo1['owner'])].pop((tx.blknum1, tx.txindex1, tx.oindex1))
if tx.blknum2 != 0:
utxo2 = self._get_input_info(tx.blknum2, tx.txindex2, tx.oindex2, tx, 2)
if utxo2['utxotype'] == Transaction.UTXOType.make_order:
self.open_orders.pop((tx.blknum2, tx.txindex2, tx.oindex2))
else:
self.unspent_utxos[Web3.toChecksumAddress(utxo2['owner'])].pop((tx.blknum2, tx.txindex2, tx.oindex2))
def _get_input_info(self, blknum, txidx, oindex, spending_tx, spending_utxo_num):
transaction = self.blocks[blknum].transaction_set[txidx]
if oindex == 0:
utxotype = transaction.utxotype1
owner = transaction.newowner1
amount = transaction.amount1
tokenprice = transaction.tokenprice1
spent = transaction.spent1
cur = transaction.cur1
elif oindex == 1:
utxotype = transaction.utxotype2
owner = transaction.newowner2
amount = transaction.amount2
tokenprice = transaction.tokenprice2
spent = transaction.spent2
cur = transaction.cur2
elif oindex == 2:
utxotype = transaction.utxotype3
owner = transaction.newowner3
amount = transaction.amount3
tokenprice = transaction.tokenprice3
spent = transaction.spent3
cur = transaction.cur3
elif oindex == 3:
utxotype = transaction.utxotype4
owner = transaction.newowner4
amount = transaction.amount4
tokenprice = transaction.tokenprice4
spent = transaction.spent4
cur = transaction.cur4
else:
raise InvalidOutputIndexNumberException("invalid utxo oindex number: %d" % oindex)
spending_tx_hash = None
spending_sig = None
if spending_tx and spending_utxo_num:
spending_tx_hash = spending_tx.hash
if spending_utxo_num == 1:
spending_sig = spending_tx.sig1
elif spending_utxo_num == 2:
spending_sig = spending_tx.sig2
return {'utxotype': Transaction.UTXOType(utxotype),
'owner': owner,
'amount': amount,
'currency': cur,
'tokenprice': tokenprice,
'spent': spent,
'spending_tx_hash': spending_tx_hash,
'spending_sig': spending_sig}
def _verify_signature(self, inputs, tx):
if (tx.sigtype == Transaction.SigType.utxo):
for input_utxo in inputs:
if (input_utxo['spending_sig'] == ZERO_SIGNATURE or get_sender(input_utxo['spending_tx_hash'], input_utxo['spending_sig']) != input_utxo['owner']):
raise InvalidTxSignatureException()
elif (tx.sigtype == Transaction.SigType.txn):
if (tx.txnsig == ZERO_SIGNATURE):
raise InvalidTxSignatureException()
signature_address = recoverPersonalSignature(tx.readable_str, tx.txnsig)
print("signature_address is %s" % signature_address.hex())
for input_utxo in inputs:
if input_utxo['utxotype'] == Transaction.UTXOType.transfer:
if input_utxo['owner'] != signature_address:
raise InvalidTxSignatureException()
def _validate_transfer_tx(self, tx, inputs, outputs):
input_amount = 0
tx_cur = None
self._verify_signature(inputs, tx)
for input in inputs:
if input['utxotype'] != Transaction.UTXOType.transfer:
raise InvalidUTXOType("invalid utxo input type (%s) for tx type (%s)" % (input['utxotype'].name, tx.txntype.name))
if input['spent']:
raise TxAlreadySpentException('failed to validate tx')
if tx_cur == None:
tx_cur = input['currency']
if input['currency'] != tx_cur:
raise InvalidTxCurrencyMismatch("currency mismatch in txn. txn currency (%s); utxo currency (%s)" % (tx_cur, input['currency']))
input_amount += input['amount']
output_amount = 0
for output in outputs:
if output['utxotype'] != Transaction.UTXOType.transfer:
raise InvalidUTXOType("invalid utxo output type (%s) for tx type (%s)" % (output['utxotype'].name, tx.txntype.name))
output_amount += output['amount']
if output['currency'] != tx_cur:
raise InvalidTxCurrencyMismatch("currency mismatch in txn. txn currency (%s); utxo currency (%s)" % (tx_cur, output['currency']))
if input_amount < output_amount:
raise TxAmountMismatchException('failed to validate tx')
def _validate_make_order_tx(self, tx, inputs, outputs):
input_amount = 0
tx_cur = None
self._verify_signature(inputs, tx)
for input in inputs:
if input['utxotype'] != Transaction.UTXOType.transfer:
raise InvalidUTXOType("invalid utxo input type (%s) for tx type (%s)" % (input['utxotype'].name, tx.txntype.name))
if input['spent']:
raise TxAlreadySpentException('failed to validate tx')
if tx_cur == None:
tx_cur = input['currency']
if tx_cur == ZERO_ADDRESS:
raise InvalidUTXOInputType("currency for input UTXO to make_order tx must NOT be Eth")
if input['currency'] != tx_cur:
raise InvalidTxCurrencyMismatch("currency mismatch in txn. txn currency (%s); utxo currency (%s)" % (tx_cur, input['currency']))
input_amount += input['amount']
# At least one of the outputs must be a make_order utxo.
output_amount = 0
has_make_order_utxo = False
for output in outputs:
if output['utxotype'] == Transaction.UTXOType.make_order:
has_make_order_utxo = True
if output['currency'] != tx_cur:
raise InvalidTxCurrencyMismatch("currency mismatch in txn. txn currency (%s); utxo currency (%s)" % (tx_cur, output['currency']))
output_amount += output['amount']
if not has_make_order_utxo:
raise InvalidTx()
if input_amount < output_amount:
raise TxAmountMismatchException('failed to validate tx')
def _validate_take_order_tx(self, tx, inputs, outputs):
make_order_utxo_input = None
transfer_eth_utxo_input = None
self._verify_signature(inputs, tx)
for input in inputs:
# This transaction type requires the following inputs
# 1) one of the inputs is a make_order utxo.
# 2) one of the inputs is a transfer utxo with currency eth.
if input['utxotype'] == Transaction.UTXOType.make_order:
make_order_utxo_input = input
if input['utxotype'] == Transaction.UTXOType.transfer:
transfer_eth_utxo_input = input
if input['currency'] != ZERO_ADDRESS:
raise InvalidTxCurrencyMismatch("in take_order tx, utxo transfer input must have Eth currency")
if input['spent']:
raise TxAlreadySpentException('failed to validate tx')
if make_order_utxo_input == None or \
transfer_eth_utxo_input == None:
raise InvalidUTXOType("invalid utxo input types for take_order tx")
token_transfer_utxo_output = None
eth_transfer_outputs = []
remainder_make_order_output = None
for output in outputs:
if output['utxotype'] == Transaction.UTXOType.transfer:
if output['currency'] != ZERO_ADDRESS:
# Is the the token_transfer_utxo_output
if output['currency'] != make_order_utxo_input['currency']:
raise InvalidTxCurrencyMismatch("currency mismatch in txn. txn currency (%s); utxo currency (%s)" % (tx_cur, output['currency']))
token_transfer_utxo_output = output
elif output['currency'] == ZERO_ADDRESS:
eth_transfer_outputs.append(output)
elif output['utxotype'] == Transaction.UTXOType.make_order and \
output['tokenprice'] == make_order_utxo_input['tokenprice'] and \
output['newowner'] == make_order_utxo_input['owner'] and \
output['currency'] == make_order_utxo_input['currency']:
# Is the remainder make order
remainder_make_order_output = output
else:
raise InvalidUTXOOutput("invalid eth transfer UTXO: %s" % (str(output)))
# Verify that the eth payment is in the tx
num_tokens_to_purchase = 0
if token_transfer_utxo_output:
num_tokens_to_purchase = token_transfer_utxo_output['amount']
min_ether_transfer = Web3.fromWei(num_tokens_to_purchase, 'ether') * make_order_utxo_input['tokenprice']
# Verify that there is at least one eth transfer output utxo to the maker with at least
# min_ether_transfer amount
payment_utxo_found = False
for eth_transfer_output in eth_transfer_outputs:
if eth_transfer_output['newowner'] == make_order_utxo_input['owner'] and \
eth_transfer_output['amount'] >= min_ether_transfer:
payment_utxo_found = True
break
if not payment_utxo_found:
raise InvalidUTXOOutput("must have a valid eth transfer utxo to maker for token purchase")
# Verify that the eth transfer output amounts sum to value less than eth transfer input utxo
total_output_eth_amount = sum([e['amount'] for e in eth_transfer_outputs])
if total_output_eth_amount > transfer_eth_utxo_input['amount']:
raise InvalidUTXOOutput("output eth amount greater than input eth amount")
# Verify that the output make order amount and token tranfer amount is equal to the make order input amount
total_token_transfer_amt = num_tokens_to_purchase + (remainder_make_order_output['amount'] if remainder_make_order_output else 0)
if total_token_transfer_amt != make_order_utxo_input['amount']:
raise InvalidUTXOOutput("token amount output(s) don't equal to input make order amount")
def validate_tx(self, tx):
inputs = [] # dict with keys of (utxotype, owner, amount, currency, tokenprice, spent, spender, signature)
outputs = [] # dict with keys of (utxotype, newowner, amount, tokenprice, currency)
for i_blknum, i_txidx, i_oidx, idx in [(tx.blknum1, tx.txindex1, tx.oindex1, 0), (tx.blknum2, tx.txindex2, tx.oindex2, 1)]:
if i_blknum != 0:
inputs.append(self._get_input_info(i_blknum, i_txidx, i_oidx, tx, idx+1))
for o_utxotype, o_newowner, o_amount, o_tokenprice, o_currency in [(tx.utxotype1, tx.newowner1, tx.amount1, tx.tokenprice1, tx.cur1),
(tx.utxotype2, tx.newowner2, tx.amount2, tx.tokenprice2, tx.cur2),
(tx.utxotype3, tx.newowner3, tx.amount3, tx.tokenprice3, tx.cur3),
(tx.utxotype4, tx.newowner4, tx.amount4, tx.tokenprice4, tx.cur4)]:
if o_utxotype != 0:
outputs.append({'utxotype': o_utxotype,
'newowner': o_newowner,
'amount': o_amount,
'tokenprice': o_tokenprice,
'currency': o_currency})
if tx.txntype == Transaction.TxnType.transfer:
self._validate_transfer_tx(tx, inputs, outputs)
elif tx.txntype == Transaction.TxnType.make_order:
self._validate_make_order_tx(tx, inputs, outputs)
elif tx.txntype == Transaction.TxnType.take_order:
self._validate_take_order_tx(tx, inputs, outputs)
def mark_utxo_spent(self, blknum, txindex, oindex):
if blknum == 0:
return
if oindex == 0:
self.blocks[blknum].transaction_set[txindex].spent1 = True
elif oindex == 1:
self.blocks[blknum].transaction_set[txindex].spent2 = True
elif oindex == 2:
self.blocks[blknum].transaction_set[txindex].spent3 = True
elif oindex == 3:
self.blocks[blknum].transaction_set[txindex].spent3 = True
def submit_block(self, block):
block = rlp.decode(utils.decode_hex(block), Block)
if block.merklize_transaction_set() != self.current_block.merklize_transaction_set():
raise InvalidBlockMerkleException('input block merkle mismatch with the current block')
# self.root_chain.transact({'from': self.authority}).submitBlock(block.merkle.root)
self.blocks[self.current_block_number] = self.current_block
print("Child Chain: Submitted block\n %s" % self.current_block)
blkid = self.current_block_number
for txid in range(len(block.transaction_set)):
tx = block.transaction_set[txid]
for utxotype, new_address, oindex in [(tx.utxotype1, tx.newowner1, 0),
(tx.utxotype2, tx.newowner2, 1),
(tx.utxotype3, tx.newowner3, 2),
(tx.utxotype4, tx.newowner4, 3)]:
if utxotype == Transaction.UTXOType.make_order:
self.open_orders[(blkid, txid, oindex)] = True
elif utxotype == Transaction.UTXOType.transfer:
self.unspent_utxos[Web3.toChecksumAddress(new_address)][(blkid, txid, oindex)] = True
self.current_block_number += self.child_block_interval
print("going to set current_block to new block")
# WTF!!! Not quite sure why I need to explicitly pass in transaction_set = [] to the Block constructor
self.current_block = Block(transaction_set = [])
print("new block has %d transactions" % len(self.current_block.transaction_set))
def get_transaction(self, blknum, txindex):
return rlp.encode(self.blocks[blknum].transaction_set[txindex]).hex()
def get_tx_pos(self, transaction):
decoded_tx = rlp.decode(utils.decode_hex(transaction), Transaction)
for blknum in self.blocks:
block = self.blocks[blknum]
for txindex in range(0, len(block.transaction_set)):
tx = block.transaction_set[txindex]
if (decoded_tx.hash == tx.hash):
return blknum, txindex
return None, None
def get_block(self, blknum):
return rlp.encode(self.blocks[blknum]).hex()
def get_current_block(self):
return rlp.encode(self.current_block).hex()
def get_current_block_num(self):
return self.current_block_number
def get_balances(self, address):
eth_balance = 0
pdex_balance = 0
for (blknum, txid, oindex) in self.unspent_utxos.get(address, {}).keys():
tx_info = self._get_input_info(blknum, txid, oindex, None, None)
if tx_info['currency'] == ZERO_ADDRESS:
eth_balance += tx_info['amount']
else:
pdex_balance += tx_info['amount']
return json.dumps([eth_balance, pdex_balance])
def get_utxos(self, address, currency):
utxos = []
for (blknum, txid, oindex) in self.unspent_utxos.get(Web3.toChecksumAddress(address), {}).keys():
tx_info = self._get_input_info(blknum, txid, oindex, None, None)
if tx_info['currency'] == utils.normalize_address(currency):
utxos.append([blknum, txid, oindex, tx_info['amount']])
print("get_utxos: returned utxos - %s" % str(utxos))
return rlp.encode(utxos).hex()
def get_open_orders(self):
open_orders = []
for (blknum, txid, oindex) in self.open_orders.keys():
tx_info = self._get_input_info(blknum, txid, oindex, None, None)
utxo_pos = blknum * 1000000000 + txid * 10000 + oindex * 1
open_orders.append([tx_info['amount'], tx_info['tokenprice'], '0x' + tx_info['owner'].hex(), utxo_pos])
open_orders.sort(key = lambda x: (x[1], x[3]))
return json.dumps(open_orders)
def get_makeorder_txn(self, address, currency, amount, tokenprice):
print("called get_makeorder_txn with params [%s, %s, %d, %d]" % (address, currency, amount, tokenprice))
encoded_utxos = self.get_utxos(address, currency)
utxos = rlp.decode(utils.decode_hex(encoded_utxos),
rlp.sedes.CountableList(rlp.sedes.List([rlp.sedes.big_endian_int,
rlp.sedes.big_endian_int,
rlp.sedes.big_endian_int,
rlp.sedes.big_endian_int])))
tx = None
# Find a utxos with enough tokens
for utxo in utxos:
if utxo[3] >= amount:
# generate the transaction object
change_amount = utxo[3] - amount
if change_amount:
tx = Transaction(Transaction.TxnType.make_order,
utxo[0], utxo[1], utxo[2],
0, 0, 0,
Transaction.UTXOType.make_order, utils.normalize_address(address), amount, tokenprice, utils.normalize_address(currency),
Transaction.UTXOType.transfer, utils.normalize_address(address), change_amount, 0, utils.normalize_address(currency),
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS,
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS)
else:
tx = Transaction(Transaction.TxnType.make_order,
utxo[0], utxo[1], utxo[2],
0, 0, 0,
Transaction.UTXOType.make_order, utils.normalize_address(address), amount, tokenprice, utils.normalize_address(currency),
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS,
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS,
0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS)
break
print("created make order tx: %s" % str(tx))
return (tx, tx.readable_str if tx else None)
def submit_signed_makeorder_txn(self, address, currency, amount, tokenprice, orig_makeorder_txn_hex, signature):
makeorder_txn, makeorder_txn_hex = self.get_makeorder_txn(address, currency, amount, tokenprice)
if (makeorder_txn_hex != orig_makeorder_txn_hex):
return False
else:
makeorder_txn.sigtype = Transaction.SigType.txn
makeorder_txn.txnsig = utils.decode_hex(utils.remove_0x_head(signature))
self.apply_transaction(rlp.encode(makeorder_txn, Transaction).hex())
return True
def get_takeorder_txn(self, address, utxopos, amount):
print("called get_takeorder_txn with params [%s, %d, %d]" % (address, utxopos, amount))
encoded_utxos = self.get_utxos(address, ZERO_ADDRESS)
blkid = int(utxopos / 1000000000)
txid = int((utxopos % 1000000000) / 10000)
oindex = utxopos % 10000
tx_info = self._get_input_info(blkid, txid, oindex, None, None)
print("make order info: %s" % str(tx_info))
utxos = rlp.decode(utils.decode_hex(encoded_utxos),
rlp.sedes.CountableList(rlp.sedes.List([rlp.sedes.big_endian_int,
rlp.sedes.big_endian_int,
rlp.sedes.big_endian_int,
rlp.sedes.big_endian_int])))
tx = None
purchase_price = Web3.fromWei(amount, 'ether') * tx_info['tokenprice']
# Find a utxos with enough ether
for utxo in utxos:
if utxo[3] >= purchase_price:
# generate the transaction object
ether_change_amount = utxo[3] - purchase_price
makeorder_change_amount = tx_info['amount'] - amount
if ether_change_amount:
ether_change_utxo = [Transaction.UTXOType.transfer, utils.normalize_address(address), int(ether_change_amount), 0, ZERO_ADDRESS]
else:
ether_change_utxo = [0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS]
if makeorder_change_amount:
makeorder_change_utxo = [Transaction.UTXOType.make_order, utils.normalize_address(tx_info['owner']), int(makeorder_change_amount), tx_info['tokenprice'], utils.normalize_address(tx_info['currency'])]
else:
makeorder_change_utxo = [0, ZERO_ADDRESS, 0, 0, ZERO_ADDRESS]
tx = Transaction(Transaction.TxnType.take_order,
utxo[0], utxo[1], utxo[2],
blkid, txid, oindex,
Transaction.UTXOType.transfer, utils.normalize_address(tx_info['owner']), int(purchase_price), 0, ZERO_ADDRESS, # The ether payment to seller
Transaction.UTXOType.transfer, utils.normalize_address(address), amount, 0, utils.normalize_address(tx_info['currency']), # The token transfer to buyer
*(ether_change_utxo + makeorder_change_utxo))
break
return (tx, tx.readable_str if tx else None)
def submit_signed_takeorder_txn(self, address, utxopos, amount, orig_takeorder_txn_hex, signature):
takeorder_txn, takeorder_txn_hex = self.get_takeorder_txn(address, utxopos, amount)
if (takeorder_txn_hex != orig_takeorder_txn_hex):
return False
else:
takeorder_txn.sigtype = Transaction.SigType.txn
takeorder_txn.txnsig = utils.decode_hex(utils.remove_0x_head(signature))
self.apply_transaction(rlp.encode(takeorder_txn, Transaction).hex())
return True
| StarcoderdataPython |
1624940 | <reponame>eldorbekpulatov/textractor<filename>app.py
# importing required modules
import os
import random
import textract
from flask import Flask, request, render_template, redirect, url_for
app = Flask(__name__)
UPLOAD_FOLDER = 'tmp/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# allow files of a specific type
ALLOWED_EXTENSIONS = set([ "csv", "doc", "docx", "eml", "epub", "gif", "htm", "html",
"jpeg", "jpg", "json", "log", "mp3", "msg", "odt", "ogg",
"pdf", "png", "pptx", "ps", "psv", "rtf", "tff", "tif",
"tiff", "tsv", "txt", "wav", "xls", "xlsx"])
# function to check the file extension
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/favicon.ico')
def favicon():
return redirect(url_for('static', filename='favicon.ico'))
@app.route('/')
def index():
return render_template("file_upload_form.html")
@app.route('/upload', methods = ['POST', 'GET'])
def upload():
if request.method == 'POST':
if not request.files.get('files[]', None):
return redirect(url_for("index"))
else:
content = []
files = request.files.getlist('files[]')
for file in files:
file_id = random.randint(10000, 99999)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
if file and allowed_file(file.filename):
file.save(file_path)
text = ""
try:
text = textract.process(file_path).decode("utf-8", "ignore")
except Exception as e:
text = str(e)
content.append({ "id": file_id, "name":file.filename, "type": file.content_type, "text": text })
os.remove(file_path)
file_id += 1
return render_template("file_search_form.html", data=content)
else:
return redirect(url_for("index"))
if __name__ == '__main__':
app.run(debug=True, use_debugger=True, use_reloader=True) | StarcoderdataPython |
31497 | <reponame>storagebot/readthedocs.org
import os
import shutil
import codecs
import logging
import zipfile
from django.template import Template, Context
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from builds import utils as version_utils
from core.utils import copy_to_app_servers, copy_file_to_app_servers
from doc_builder.base import BaseBuilder, restoring_chdir
from projects.utils import run
from tastyapi import apiv2
log = logging.getLogger(__name__)
class Builder(BaseBuilder):
"""
Mkdocs builder
"""
def clean(self):
pass
def find_conf_file(self, project, version='latest'):
if project.conf_py_file:
log.debug('Inserting conf.py file path from model')
return os.path.join(self.checkout_path(version), self.conf_py_file)
files = project.find('mkdocs.yml', version)
if not files:
files = project.full_find('mkdocs.yml', version)
if len(files) == 1:
return files[0]
elif len(files) > 1:
for file in files:
if file.find('doc', 70) != -1:
return file
else:
# Having this be translatable causes this odd error:
# ProjectImportError(<django.utils.functional.__proxy__ object at 0x1090cded0>,)
raise ProjectImportError(u"Conf File Missing. Please make sure you have a mkdocs.yml in your project.")
@restoring_chdir
def build(self, **kwargs):
project = self.version.project
os.chdir(project.checkout_path(self.version.slug))
if project.use_virtualenv:
build_command = "%s build --theme=readthedocs" % (
project.venv_bin(version=self.version.slug,
bin='mkdocs')
)
else:
build_command = "mkdocs build --theme=readthedocs"
build_results = run(build_command, shell=True)
return build_results
def move(self, **kwargs):
project = self.version.project
build_path = os.path.join(project.checkout_path(self.version.slug), 'site')
if os.path.exists(build_path):
#Copy the html files.
target = project.rtd_build_path(self.version.slug)
if "_" in project.slug:
new_slug = project.slug.replace('_', '-')
new_target = target.replace(project.slug, new_slug)
#Only replace 1, so user_builds doesn't get replaced >:x
targets = [target, new_target]
else:
targets = [target]
for target in targets:
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
log.info("Copying docs to remote server.")
copy_to_app_servers(build_path, target)
else:
if os.path.exists(target):
shutil.rmtree(target)
log.info("Copying docs on the local filesystem")
shutil.copytree(build_path, target)
else:
log.warning("Not moving docs, because the build dir is unknown.")
| StarcoderdataPython |
3333066 | <reponame>Nv7-GitHub/mold
def read(file):
with open(file) as f:
return f.read() | StarcoderdataPython |
67249 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from constructs import digraph
class TestDirectedGraph(testtools.TestCase):
def test_not_empty(self):
g = digraph.DirectedGraph()
g.add_node(1)
self.assertFalse(g.empty())
self.assertEqual(1, len(g))
def test_is_empty(self):
g = digraph.DirectedGraph()
self.assertTrue(g.empty())
def test_nodes_iter(self):
g = digraph.DirectedGraph()
g.add_node(1)
g.add_node(2)
self.assertEqual([1, 2], list(g.nodes_iter()))
def test_edges_iter(self):
g = digraph.DirectedGraph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2)
g.add_edge(1, 1)
self.assertEqual([(1, 2), (1, 1)], list(g.edges_iter()))
def test_successors_predecessors(self):
g = digraph.DirectedGraph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2)
self.assertEqual([2], list(g.successors_iter(1)))
self.assertEqual([1], list(g.predecessors_iter(2)))
self.assertEqual([], list(g.predecessors_iter(1)))
self.assertEqual([], list(g.successors_iter(2)))
def test_add_bad_edge(self):
g = digraph.DirectedGraph()
g.add_node(1)
self.assertRaises(ValueError, g.add_edge, 1, 2)
def test_add_remove_node(self):
g = digraph.DirectedGraph()
g.add_node(1)
g.add_node(2)
self.assertEqual(2, len(g))
g.remove_node(1)
self.assertEqual(1, len(g))
def test_basic_iter(self):
g = digraph.DirectedGraph()
g.add_node(1)
g.add_node(2)
g.add_node(3)
self.assertEqual([1, 2, 3], list(g))
def test_freeze(self):
g = digraph.DirectedGraph()
g.add_node(1)
self.assertEqual(1, len(g))
g.freeze()
self.assertRaises(RuntimeError, g.remove_node, 1)
self.assertEqual(1, len(g))
self.assertRaises(RuntimeError, g.add_node, 2)
self.assertRaises(RuntimeError, g.add_edge, 1, 1)
def test_freeze_copy(self):
g = digraph.DirectedGraph()
g.add_node(1)
self.assertEqual(1, len(g))
g.freeze()
self.assertRaises(RuntimeError, g.remove_node, 1)
g2 = g.copy()
g2.add_node(2)
self.assertEqual(1, len(g))
self.assertEqual(2, len(g2))
g2.add_edge(1, 2)
self.assertEqual(1, len(list(g2.edges_iter())))
self.assertEqual(0, len(list(g.edges_iter())))
def test_add_remove_node_edges(self):
g = digraph.DirectedGraph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2)
self.assertEqual([(1, 2)], list(g.edges_iter()))
self.assertTrue(g.has_edge(1, 2))
self.assertEqual([1], list(g.predecessors_iter(2)))
g.remove_node(1)
self.assertEqual([], list(g.edges_iter()))
self.assertRaises(ValueError, g.has_edge, 1, 2)
self.assertFalse(g.has_node(1))
self.assertEqual([], list(g.predecessors_iter(2)))
| StarcoderdataPython |
3270475 | <reponame>dadito6/juego_memoria<gh_stars>0
import PySimpleGUI as sg
from src.GUI import board
from src.Controllers import config_theme as theme, Usuario , matriz
""" Aqui se trabajara la logica del tablero"""
def start(username):
window = loop(username)
window.close()
def loop(username):
datos=Usuario.get_datos_usuario(username)
level= datos['config']['level']
ocurrencias=datos['config']['coincidences']
a= matriz.rellenar_matriz(level,ocurrencias)
window = board.build(a)
while True:
event, _values = window.read()
if event in (sg.WIN_CLOSED,'-CANCEL-', '-EXIT-'):
break
return window | StarcoderdataPython |
3254378 | <filename>src/background.py
# Copyright (C) 2022 viraelin
# License: MIT
from PyQt6.QtCore import *
from PyQt6.QtWidgets import *
from PyQt6.QtGui import *
class Background(QGraphicsRectItem):
def __init__(self) -> None:
super().__init__()
self.setZValue(-1000)
size = 800000
size_half = size / 2
rect = QRectF(-size_half, -size_half, size, size)
self.setRect(rect)
xp1 = QPoint(-size, 0)
xp2 = QPoint(size, 0)
self._line_x = QLine(xp1, xp2)
yp1 = QPoint(0, -size)
yp2 = QPoint(0, size)
self._line_y = QLine(yp1, yp2)
self._axis_color = QColor("#111111")
self._pen = QPen()
self._pen.setColor(self._axis_color)
self._pen.setWidth(4)
self._pen.setCosmetic(True)
self._pen.setStyle(Qt.PenStyle.SolidLine)
self._pen.setCapStyle(Qt.PenCapStyle.SquareCap)
self._pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
self._background_color = QColor("#222222")
self._brush = QBrush()
self._brush.setColor(self._background_color)
self._brush.setStyle(Qt.BrushStyle.SolidPattern)
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:
painter.setPen(self._pen)
painter.setBrush(self._brush)
painter.drawRect(self.rect())
painter.drawLine(self._line_x)
painter.drawLine(self._line_y)
| StarcoderdataPython |
39363 | <reponame>csb6/libtcod-ada<filename>third_party/libtcod/.ci/conan_build.py<gh_stars>100-1000
#!/usr/bin/env python3
"""Build script for conan-package-tools:
https://github.com/conan-io/conan-package-tools
"""
import os
import subprocess
from cpt.packager import ConanMultiPackager
try:
version = subprocess.check_output(
["git", "describe", "--abbrev=0"], universal_newlines=True
)
except subprocess.CalledProcessError:
version = "0.0"
if __name__ == "__main__":
if "CI" in os.environ:
os.environ["CONAN_SYSREQUIRES_MODE"] = "enabled"
# Fix GitHub Actions version tag.
if os.environ.get("GITHUB_REF", "").startswith("refs/tags/"):
version = os.environ["GITHUB_REF"].replace("refs/tags/", "")
builder = ConanMultiPackager(
username="hexdecimal",
channel="conan",
upload="https://api.bintray.com/conan/hexdecimal/conan",
upload_only_when_tag=True,
reference="libtcod/" + version,
remotes=[
"https://conan.bintray.com",
"https://api.bintray.com/conan/bincrafters/public-conan",
],
cppstds=["14"],
visual_runtimes=["MD", "MDd"],
# test_folder="tests/",
build_policy="missing",
upload_dependencies="all",
)
builder.add_common_builds(pure_c=False)
builder.run()
| StarcoderdataPython |
152534 | import frappe
from erpnext.compliance.utils import get_default_license
from frappe.modules.utils import sync_customizations
def execute():
sync_customizations("bloomstack_core")
compliance_info = frappe.get_all('Compliance Info', fields=['name'])
if not compliance_info:
return
sales_orders = frappe.get_all("Sales Order",fields=["customer", "name"])
sales_invoices = frappe.get_all("Sales Invoice",fields=["customer", "name"])
delivery_notes = frappe.get_all("Sales Invoice",fields=["customer", "name"])
quotations = frappe.get_all("Quotation",fields=["party_name", "name", "quotation_to"])
supplier_quotations = frappe.get_all("Supplier Quotation",fields=["supplier", "name"])
purchase_orders = frappe.get_all("Purchase Order",fields=["supplier", "name"])
purchase_invoices = frappe.get_all("Purchase Invoice",fields=["supplier", "name"])
purchase_receipts = frappe.get_all("Purchase Receipt",fields=["supplier", "name"])
for doc in sales_orders:
license = get_default_license("Customer", doc.customer)
if not license:
continue
frappe.db.set_value("Sales Order", doc.name, "license", license)
for doc in sales_invoices:
license = get_default_license("Customer", doc.customer)
if not license:
continue
frappe.db.set_value("Sales Invoice", doc.name, "license", license)
for doc in delivery_notes:
license = get_default_license("Customer", doc.customer)
if not license:
continue
frappe.db.set_value("Delivery Note", doc.name, "license", license)
for doc in quotations:
if doc.quotation_to == "Customer":
license = get_default_license("Customer", doc.party_name)
if not license:
continue
frappe.db.set_value("Quotation", doc.name, "license", license)
for doc in supplier_quotations:
license = get_default_license("Supplier", doc.supplier)
if not license:
continue
frappe.db.set_value("Supplier Quotation", doc.name, "license", license)
for doc in purchase_orders:
license = get_default_license("Supplier", doc.supplier)
if not license:
continue
frappe.db.set_value("Purchase Order", doc.name, "license", license)
for doc in purchase_invoices:
license = get_default_license("Supplier", doc.supplier)
if not license:
continue
frappe.db.set_value("Purchase Invoice", doc.name, "license", license)
for doc in purchase_receipts:
license = get_default_license("Supplier", doc.supplier)
if not license:
continue
frappe.db.set_value("Purchase Receipt", doc.name, "license", license)
| StarcoderdataPython |
50658 | <reponame>rwst/wikidata-molbio
import pronto, six, csv, os, json, argparse, sys, datetime
"""
Uses eyeballed A004.txt. Cleans and moves all items chunkwise (CHUNKSIZE).
"""
CHUNKSIZE = 10
# Initiate the parser
parser = argparse.ArgumentParser()
# Read arguments from the command line
args = parser.parse_args()
# Check for --version or -V
script = os.path.basename(sys.argv[0])[:-3]
done = set()
with open('A004.txt', 'r') as af:
curr_chunk = 0
d0 = []
wd0 = []
d1 = []
for line in af.readlines():
d = line.rstrip().split('|')
d0.append(d[0])
wd0.append('wd:' + d[0])
d1.append(d[1])
curr_chunk = curr_chunk + 1
if curr_chunk < CHUNKSIZE:
continue
query = """
SELECT ?it ?prop ?val (LANG(?val) AS ?lang)
WHERE
{{
VALUES ?it {{ {} }}
?it ?prop ?val.
}}
""".format(' '.join(wd0))
f = open('{}.rq'.format(script), 'w')
f.write(query)
f.close()
print('performing query...', file=sys.stderr)
ret = os.popen('wd sparql {}.rq >{}.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}.json'.format(script))
s = file.read()
jol = json.loads(s)
items = {}
for d in jol:
it = d.get('it')
p = d.get('prop')
v = d.get('val')
l = d.get('lang')
i = items.get(it)
if i is None:
items[it] = [(p,v,l)]
else:
i.append((p,v,l))
jj = ''
for it in items.keys():
alangs = set()
dlangs = set()
llangs = set()
stmts = {}
for p,v,l in items.get(it):
if p[:29] == 'http://www.wikidata.org/prop/':
s = stmts.get(p[29:])
if s is None:
stmts[p[29:]] = [v]
else:
s.append(v)
if p == 'http://www.w3.org/2004/02/skos/core#altLabel':
alangs.add(l)
if p == 'http://schema.org/description':
dlangs.add(l)
if p == 'http://www.w3.org/2000/01/rdf-schema#label':
llangs.add(l)
j = { 'id': it }
if len(alangs) > 0:
al = {}
for alang in alangs:
al[alang] = []
j['aliases'] = al
if len(dlangs) > 0:
dl = {}
for dlang in dlangs:
dl[dlang] = None
j['descriptions'] = dl
if len(llangs) > 0:
ll = {}
for llang in llangs:
ll[llang] = None
j['labels'] = ll
claims = {}
for p in stmts.keys():
c = []
for stmt in stmts.get(p):
c.append({ 'id': stmt, 'remove': True })
claims[p] = c
if len(claims) > 0:
j['claims'] = claims
jj = jj + json.dumps(j) + '\n'
f = open('{}.json1'.format(script), 'w')
f.write(jj)
f.close()
ret = os.popen('wd ee -bv -s del-manually-selected-obsolete-go-entities --no-exit-on-error <{}.json1'.format(script))
print(ret.read())
if ret.close() is not None:
print('ERROR')
f = open('{}.mtxt'.format(script), 'w')
for (fr,to) in zip(d0, d1):
f.write('{} {}\n'.format(fr, to))
f.close()
ret = os.popen('wd me -bv -s del-manually-selected-obsolete-go-entities --no-exit-on-error <{}.mtxt'.format(script))
print(ret.read())
if ret.close() is not None:
print('ERROR')
curr_chunk = 0
d0 = []
wd0 = []
d1 = []
| StarcoderdataPython |
11734 | import re
import time
from lemoncheesecake.events import TestSessionSetupEndEvent, TestSessionTeardownEndEvent, \
TestEndEvent, SuiteSetupEndEvent, SuiteTeardownEndEvent, SuiteEndEvent, SteppedEvent
from lemoncheesecake.reporting.report import ReportLocation
DEFAULT_REPORT_SAVING_STRATEGY = "at_each_failed_test"
def _is_end_of_result_event(event):
if isinstance(event, TestEndEvent):
return ReportLocation.in_test(event.test)
if isinstance(event, SuiteSetupEndEvent):
return ReportLocation.in_suite_setup(event.suite)
if isinstance(event, SuiteTeardownEndEvent):
return ReportLocation.in_suite_teardown(event.suite)
if isinstance(event, TestSessionSetupEndEvent):
return ReportLocation.in_test_session_setup()
if isinstance(event, TestSessionTeardownEndEvent):
return ReportLocation.in_test_session_teardown()
return None
def save_at_each_suite_strategy(event, _):
return isinstance(event, SuiteEndEvent)
def save_at_each_test_strategy(event, _):
return _is_end_of_result_event(event) is not None
def save_at_each_failed_test_strategy(event, report):
location = _is_end_of_result_event(event)
if location:
result = report.get(location)
return result and result.status == "failed"
else:
return False
def save_at_each_log_strategy(event, _):
return isinstance(event, SteppedEvent)
class SaveAtInterval(object):
def __init__(self, interval):
self.interval = interval
self.last_saving = None
def __call__(self, event, report):
now = time.time()
if self.last_saving:
must_be_saved = now > self.last_saving + self.interval
if must_be_saved:
self.last_saving = now
return must_be_saved
else:
self.last_saving = now # not a saving but an initialization
return False
def make_report_saving_strategy(expression):
# first, try with a static expression
static_expressions = {
"at_end_of_tests": None, # no need to an intermediate report saving in this case
"at_each_suite": save_at_each_suite_strategy,
"at_each_test": save_at_each_test_strategy,
"at_each_failed_test": save_at_each_failed_test_strategy,
"at_each_log": save_at_each_log_strategy,
"at_each_event": save_at_each_log_strategy # deprecated since 1.4.5, "at_each_log" must be used instead
}
try:
return static_expressions[expression]
except KeyError:
pass
# second, try with "every_Ns"
m = re.compile(r"^every[_ ](\d+)s$").match(expression)
if m:
return SaveAtInterval(int(m.group(1)))
# ok... nothing we know about
raise ValueError("Invalid expression '%s' for report saving strategy" % expression)
| StarcoderdataPython |
3288871 | import csv
with open('data/lean1.csv', 'rb') as csv_file:
reader = csv.reader(csv_file)
for row in reader:
genre_count[row[0]][row[1]] += 1 | StarcoderdataPython |
1604677 | from __future__ import annotations
from typing import Any
import enum
class EnumMeta(enum.EnumMeta):
def __repr__(cls) -> str:
return f"{cls.__name__}[{', '.join([f'{member.name}={repr(member.value)}' for member in cls])}]"
def __str__(cls) -> str:
return cls.__name__
def __call__(cls, *args: Any, **kwargs: Any):
try:
return super().__call__(*args, **kwargs)
except ValueError as ex:
msg, = ex.args
raise ValueError(f"{msg}, must be one of: {', '.join([repr(member.value) for member in cls])}.")
@property
def names(cls) -> list[str]:
"""A list of the names in this Enum."""
return [member.name for member in cls]
@property
def values(cls) -> list[Any]:
"""A list of the values in this Enum."""
return [member.value for member in cls]
def is_enum(cls, candidate: Any) -> bool:
"""Returns True if the candidate is a subclass of Enum, otherwise returns False."""
try:
return issubclass(candidate, enum.Enum)
except TypeError:
return False
class ValueEnumMeta(EnumMeta):
def __getattribute__(cls, name: str) -> Any:
value = super().__getattribute__(name)
if isinstance(value, cls):
value = value.value
return value
class BaseEnum(enum.Enum):
def __repr__(self) -> str:
return f"{type(self).__name__}(name={self.name}, value={repr(self.value)})"
def __hash__(self) -> int:
return id(self)
def __eq__(self, other: Any) -> bool:
return other is self or other == self.value
def __ne__(self, other: Any) -> bool:
return other is not self and other != self.value
def __str__(self) -> str:
return str(self.value)
def map_to(self, mapping: dict, else_: Any = None, raise_for_failure: bool = True) -> Any:
if (ret := mapping.get(self, else_)) is None and raise_for_failure:
raise ValueError(f"No mapping for '{self}' found in {mapping}.")
return ret
class Enum(BaseEnum, enum.Enum, metaclass=EnumMeta):
"""A subclass of aenum.Enum with additional methods."""
class ValueEnum(BaseEnum, metaclass=ValueEnumMeta):
"""A subclass of subtypes.Enum. Attribute access on descendants of this class returns the value corresponding to that name, rather than returning the member."""
| StarcoderdataPython |
115166 | """PreProcess Data
Process data for training.
.. helpdoc::
This widget pre-processes data so that it can be more efficiently used in prediction. This involves removing predictors with near zero variance (using nearZeroVar()), predictors with high correlation (using findCorrelation()), and reducing predictors in design matrices.
"""
"""<widgetXML>
<name>PreProcess Data</name>
<icon>default.png</icon>
<tags>
<tag priority='47'>Caret</tag>
</tags>
<summary>Find internal correlations within data and provide an index of items to remove that will resolve these correlations.</summary>
<author>
<authorname>Red-R Core Team</authorname>
<authorcontact>http://www.red-r.org/contact</authorcontact>
</author>
</widgetXML>
"""
from OWRpy import *
import redRGUI, signals
import libraries.RedRCaret.signalClasses as caret
class RedRfindCorrelation(OWRpy):
settingsList = []
def __init__(self, **kwargs):
OWRpy.__init__(self, **kwargs)
self.require_librarys(["caret"])
""" ..rrvarnames::"""
self.setRvariableNames(["findCorrelation", "nearZero", "findCorrelationOutput", "preProcess"])
self.data = {}
self.RFunctionParam_x = ''
self.RFunctionParam_data = ''
#self.inputs.addInput("x", "Correlation Matrix", signals.base.RMatrix, self.processx)
""".. rrsignals::
:description: `A Caret data signal or data container`
"""
self.inputs.addInput("data", "Data Table / Sample List", caret.CaretData.CaretData , self.processdata)
""".. rrsignals::
:description: `A processed Caret data signal`
"""
self.outputs.addOutput("findCorrelation Output","Reduced Data Table", caret.CaretData.CaretData)
""".. rrsignals::
:description: `A special Caret model for use in applying preprocessing to other data sets. This is a consequence of Caret's configuration and, confusing as it is, one can use Predict to apply these predictions to a new data container.`
"""
self.outputs.addOutput("preprocess model", "PreProcess Model (To Calibrate Test Data)", caret.CaretModelFit.CaretModelFit)
grid = redRGUI.base.gridBox(self.controlArea)
""".. rrgui::"""
self.nearZero = redRGUI.base.radioButtons(grid.cell(0,0), label = 'Remove Near Zero Variance Predictors?', buttons = ['Yes', 'No'], setChecked = 'Yes', callback = self.nzvShowHide)
self.nzvBox = redRGUI.base.widgetBox(grid.cell(0,0))
""".. rrgui::"""
self.freqCut = redRGUI.base.lineEdit(self.nzvBox, label = 'Frequency Cut:', text = '95/5')
""".. rrgui::"""
self.uniqueCut = redRGUI.base.lineEdit(self.nzvBox, label = 'Unique Cut:', text = '10')
""".. rrgui::"""
self.preProcess = redRGUI.base.radioButtons(grid.cell(0,1), label = 'Perform Pre Processing?', buttons = ['Yes', 'No'], setChecked = 'Yes', callback = self.nzvShowHide)
""".. rrgui::"""
self.preprocessMethodsCombo = redRGUI.base.listBox(grid.cell(0,1), label = 'Pre Process Methods', items = [("BoxCox", "BoxCox"), ("center", "Center"), ("scale", "Scale"), ("range", "Range"), ("knnImpute", "KNN Impute"), ("bagImpute", "Bag Impute"), ("pca", "Principal Components"), ("ica", "Independent Components"), ("spatialSign", "Spatial Sign")])
""".. rrgui::"""
self.preprocessMethodsCombo.setSelectionMode(QAbstractItemView.ExtendedSelection)
""".. rrgui::"""
self.preprocessMethodsCombo.setSelectedIds(["center", "scale"])
""".. rrgui::"""
self.preprocessTresh = redRGUI.base.spinBox(grid.cell(0,2), label = 'Pre Process Threshold:', min = 0, value = 0.95, decimals = 3)
""".. rrgui::"""
self.preProcessNARM = redRGUI.base.radioButtons(grid.cell(0,2), label = 'Remove NA?', buttons = [('TRUE', 'Yes'), ('FALSE', 'No')], setChecked = 'TRUE', callback = self.nzvShowHide)
""".. rrgui::"""
self.preProcessKNN = redRGUI.base.spinBox(grid.cell(0,2), label = 'Pre Process Threshold:', min = 0, value = 5, decimals = 0)
""".. rrgui::"""
self.preProcessKNNSUM = redRGUI.base.comboBox(grid.cell(0,2), label = 'KNN Summary', items = [('mean', 'Mean'), ('median', 'Median'), ('min', 'Minimum'), ('max', 'Maximum')])
""".. rrgui::"""
self.preProcessFUDGE = redRGUI.base.spinBox(grid.cell(0,2), label = 'Fudge Value:', min = 0, value = 0.2, decimals = 3)
""".. rrgui::"""
self.preProcessNUMUNI = redRGUI.base.spinBox(grid.cell(0,2), label = 'Box-Cot Unique Values', min = 2, value = 3, decimals = 0)
""".. rrgui::"""
self.RFunctionParamcutoff_spinBox = redRGUI.base.spinBox(grid.cell(0,2), label = "Max Correlation Coef (/100):", min = 1, max = 99, value = 90)
""".. rrgui::"""
redRGUI.base.commitButton(self.bottomAreaRight, "Commit", callback = self.commitFunction)
""".. rrgui::"""
self.RoutputWindow = redRGUI.base.textEdit(self.controlArea, label = "R Output Window")
def nzvShowHide(self):
if unicode(self.nearZero.getChecked()) == 'Yes':
self.nzvBox.show()
else:
self.nzvBox.hide()
def processdata(self, data):
if data:
self.RFunctionParam_data=data.getData()
self.RFunctionParam_classes = data.getClasses()
else:
self.RFunctionParam_data=''
self.RFunctionParam_classes = ''
def commitFunction(self):
if unicode(self.RFunctionParam_data) == '':
self.status.setText('No Data To Work On')
return
## findCorrelation params
injection = []
string = ',cutoff='+unicode(float(self.RFunctionParamcutoff_spinBox.value())/100)+''
injection.append(string)
inj = ''.join(injection)
## nzv parame
nzvInjection = []
nzvInjection.append(',freqCut = '+unicode(self.freqCut.text()))
nzvInjection.append(',uniqueCut = '+unicode(self.uniqueCut.text()))
nzvInj = ''.join(nzvInjection)
## if nzv checked
if unicode(self.nearZero.getChecked()) == 'Yes':
self.R('%s<-nearZeroVar(%s, %s)' % (unicode(self.Rvariables['nearZero']), unicode(self.RFunctionParam_data), unicode(nzvInj)))
cor = 'cor(%s)' % unicode(self.RFunctionParam_data)
self.R(self.Rvariables['findCorrelation']+'<-findCorrelation(x=%s %s)' % (cor, inj))
remove = 'c(%s, %s)' % (self.Rvariables['findCorrelation'], self.Rvariables['nearZero'])
## else nzv not checked
else:
cor = 'cor(%s)' % unicode(self.RFunctionParam_data)
self.R(self.Rvariables['findCorrelation']+'<-findCorrelation(x=%s %s)' % (cor, inj))
remove = self.Rvariables['findCorrelation']
## at this point we should remove those columns that do not supply any data or that are correlated.
self.R(self.Rvariables['findCorrelationOutput']+'<-'+self.RFunctionParam_data+'[, -'+remove+']', wantType = 'NoConversion')
## preprocess fits a model that must then be used as a predictor for each set of data. In this case there is a predition function that should be run, in other cases the prediction function should be run on other attached data sources.
if self.preProcess.getChecked() == 'Yes':
self.R('%(OUTPUT)s<-preProcess(%(DATA)s, method = %(METHOD)s, threshold = %(THRESH)s, na.remove = %(NARM)s, k = %(KNN)s, knnSummary = %(KNNSUM)s, outcome = %(OUTCOME)s, fudge = %(FUDGE)s, numUnique = %(NUMUNI)s)' %
{
'OUTPUT':self.Rvariables['preProcess']
,'DATA':'%s' % self.Rvariables['findCorrelationOutput']
,'METHOD':'c(%s)' % (','.join(['"%s"' % i for i in self.preprocessMethodsCombo.selectedIds()]))
,'THRESH':str(self.preprocessTresh.value())
,'NARM':self.preProcessNARM.getCheckedId()
,'KNN':str(self.preProcessKNN.value())
,'KNNSUM':self.preProcessKNNSUM.currentId()
,'OUTCOME':'%s' % self.RFunctionParam_classes
,'FUDGE':str(self.preProcessFUDGE.value())
,'NUMUNI':str(self.preProcessNUMUNI.value())
}
, wantType = 'NoConversion')
self.R('%(OUTPUT)s<-predict(%(PREPRO)s, %(OUTPUT)s)' % {
'OUTPUT':self.Rvariables['findCorrelationOutput'],
'PREPRO':self.Rvariables['preProcess']
},
wantType = 'NoConversion')
newData = caret.CaretData.CaretData(self, data = self.Rvariables['findCorrelationOutput'], classes = self.RFunctionParam_classes, parent = self.Rvariables['findCorrelationOutput'])
self.rSend("findCorrelation Output", newData)
newDataPreprocess = caret.CaretModelFit.CaretModelFit(self, data = self.Rvariables['preProcess'])
self.rSend("preprocess model", newDataPreprocess)
self.RoutputWindow.clear()
self.RoutputWindow.insertPlainText('Removed %s samples from the data.' % self.R('length('+self.Rvariables['findCorrelation']+')'))
| StarcoderdataPython |
1764795 | from pyexpat import features
from darts.models.forecasting.gradient_boosted_model import LightGBMModel
import wandb
from darts.models import TCNModel
import pandas as pd
from darts.metrics import mape, mae
from darts import TimeSeries
from darts.dataprocessing.transformers import Scaler
from copy import deepcopy
import numpy as np
import logging
import click
from functools import partial
from aeml.models.utils import split_data, choose_index
from aeml.causalimpact.utils import get_timestep_tuples, get_causalimpact_splits
import pickle
from aeml.causalimpact.utils import _select_unrelated_x
from aeml.models.gbdt.gbmquantile import LightGBMQuantileRegressor
from aeml.models.gbdt.run import run_model
from aeml.models.gbdt.settings import *
from darts.dataprocessing.transformers import Scaler
from darts import TimeSeries
import pandas as pd
from copy import deepcopy
import time
log = logging.getLogger(__name__)
MEAS_COLUMNS = ["TI-19", "TI-3", "FI-19", "FI-11", "TI-1213", "TI-35", "delta_t"]
to_exclude = {
0: ["TI-19"],
1: ["FI-19"],
2: ["TI-3"],
3: ["FI-11"],
4: ["FI-11"],
5: ["TI-1213", "TI-19"],
6: [],
}
TARGETS_clean = ["2-Amino-2-methylpropanol C4H11NO", "Piperazine C4H10N2"]
sweep_config = {
"metric": {"goal": "minimize", "name": "mae_valid"},
"method": "bayes",
"parameters": {
"lags": {"min": 1, "max": 200, "distribution": "int_uniform"},
"feature_lag": {"max": -1, "min": -200},
"n_estimators": {"min": 50, "max": 1000},
"bagging_freq": {"min": 0, "max": 10, "distribution": "int_uniform"},
"bagging_fraction": {"min": 0.001, "max": 1.0},
"num_leaves": {"min": 1, "max": 200, "distribution": "int_uniform"},
"extra_trees": {"values": [True, False]},
"max_depth": {"values": [-1, 10, 20, 40, 80, 160, 320]},
},
}
sweep_id = wandb.sweep(sweep_config, project="aeml")
def get_data(x, y, target, targets_clean):
targets = targets_clean[target]
train, valid, test, ts, ts1 = split_data(x, y, targets, 0.5)
return (train, valid, test)
def load_data(datafile="../../../paper/20210624_df_cleaned.pkl"):
df = pd.read_pickle(datafile)
Y = TimeSeries.from_dataframe(df, value_cols=TARGETS_clean).astype(np.float32)
X = TimeSeries.from_dataframe(df, value_cols=MEAS_COLUMNS).astype(np.float32)
transformer = Scaler()
X = transformer.fit_transform(X)
y_transformer = Scaler()
Y = y_transformer.fit_transform(Y)
return X, Y, transformer, y_transformer
def select_columns(day):
feat_to_exclude = to_exclude[day]
feats = [f for f in MEAS_COLUMNS if f not in feat_to_exclude]
return feats
with open("step_times.pkl", "rb") as handle:
times = pickle.load(handle)
DF = pd.read_pickle("20210508_df_for_causalimpact.pkl")
def inner_train_test(x, y, day, target):
run = wandb.init()
features = select_columns(day)
y = y[TARGETS_clean[target]]
x = x[features]
x_trains = []
y_trains = []
before, during, after, way_after = get_causalimpact_splits(x, y, day, times, DF)
# if len(before[0]) > len(way_after[0]):
x_trains.append(before[0])
y_trains.append(before[1])
# else:
x_trains.append(way_after[0])
y_trains.append(way_after[1])
xscaler = Scaler(name="x-scaler")
yscaler = Scaler(name="y-scaler")
longer = np.argmax([len(x_trains[0]), len(x_trains[1])])
shorter = np.argmin([len(x_trains[0]), len(x_trains[1])])
print(len(x_trains[0]), len(x_trains[1]))
x_trains[longer] = xscaler.fit_transform(x_trains[longer])
y_trains[longer] = yscaler.fit_transform(y_trains[longer])
x_trains[shorter] = xscaler.transform(x_trains[shorter])
y_trains[shorter] = yscaler.transform(y_trains[shorter])
steps =len(during[0])
if steps > len(x_trains[shorter]):
ts = choose_index(x, 0.3)
x_before, x_after = x_trains[longer].split_before(ts)
y_before, y_after = y_trains[longer].split_before(ts)
y_trains[shorter] = y_before
y_trains[longer] = y_after
x_trains[shorter] = x_before
x_trains[longer] = x_after
print(steps, len(x_trains[shorter]))
train = (x_trains[longer], y_trains[longer])
valid = (x_trains[shorter], y_trains[shorter])
log.info("initialize model")
model = LightGBMModel(
lags=run.config.lags,
lags_past_covariates=[
run.config.feature_lag,
]
* len(features),
n_estimators=run.config.n_estimators,
bagging_freq=run.config.bagging_freq,
bagging_fraction=run.config.bagging_fraction,
num_leaves=run.config.num_leaves,
extra_trees=run.config.extra_trees,
max_depth=run.config.max_depth,
output_chunk_length=steps,
objective="quantile",
alpha=0.5,
)
log.info("fit")
model.fit(series=train[1], past_covariates=train[0], verbose=False)
log.info("historical forecast train set")
backtest_train = model.historical_forecasts(
train[1],
past_covariates=train[0],
start=0.3,
forecast_horizon=steps,
stride=1,
retrain=False,
verbose=False,
)
log.info("historical forecast valid")
backtest_valid = model.historical_forecasts(
valid[1],
past_covariates=valid[0],
start=0.5,
forecast_horizon=steps,
stride=1,
retrain=False,
verbose=False,
)
log.info("getting scores")
# mape_valid = mape(valid[1][TARGETS_clean[0]], backtest_valid["0"])
# mape_train = mape(train[1][TARGETS_clean[0]], backtest_train["0"])
mae_valid = mae(valid[1][TARGETS_clean[target]], backtest_valid["0"])
mae_train = mae(train[1][TARGETS_clean[target]], backtest_train["0"])
# wandb.log({"mape_valid": mape_valid})
# wandb.log({"mape_train": mape_train})
log.info(f"MAE valid {mae_valid}")
wandb.log({"mae_valid": mae_valid})
wandb.log({"mae_train": mae_train})
@click.command()
@click.argument("day", type=click.INT, default=1)
@click.argument("target", type=click.INT, default=0)
def train_test(day, target):
print("get data")
x, y, _, _ = load_data("20210508_df_for_causalimpact.pkl")
optimizer_func = partial(inner_train_test, day=day, x=x, y=y, target=target)
wandb.agent(sweep_id, function=optimizer_func, project="aeml")
if __name__ == "__main__":
train_test()
| StarcoderdataPython |
3218328 | # Source Server Stats
# File: sourcestats/util/__init__.py
# Desc: general utilities
from hashlib import sha1
import requests
from flask import jsonify, abort
SOURCE_APPS = None
def get_source_apps():
global SOURCE_APPS
if SOURCE_APPS is None:
response = requests.get('http://api.steampowered.com/ISteamApps/GetAppList/v2').json()
SOURCE_APPS = {
app['appid']: app['name']
for app in response['applist']['apps']
}
return SOURCE_APPS
def hash_address(address):
host, port = address
hasher = sha1()
hasher.update(host)
hasher.update(str(port))
return hasher.hexdigest()
def api_abort(status_code, message):
response = jsonify(error=message, status=status_code)
response.status_code = status_code
abort(response)
| StarcoderdataPython |
1635722 | # python3 compatibity while retaining checking
# for both str and unicode in python2
try:
string_types = (str, unicode)
except NameError:
string_types = (str,)
def is_string_type(val):
return isinstance(val, string_types)
try:
# python3
from functools import reduce
except NameError:
pass
reduce = reduce
try:
import urllib.request as urllib2
import http.client as httplib
from urllib.parse import urlparse, urljoin, parse_qsl, parse_qs
from urllib.request import urlopen
from _thread import allocate_lock
unichr = chr
except ImportError:
# Python 2
import httplib
import urllib2
from urlparse import urlparse, urljoin, parse_qs, parse_qsl
from urllib import urlopen
from thread import allocate_lock
unichr = unichr
| StarcoderdataPython |
1761244 | <gh_stars>0
from setuptools import setup
setup(
name='ppmp',
version='1.0.1',
description='Prediction of Perturbations of Modular Protein structures by triplet analysis',
author='<NAME>',
author_email='<EMAIL>',
packages=['ppmp'],
install_requires=['matplotlib', 'numpy', 'pandas', 'seaborn', 'scipy', 'tqdm', 'biopython', 'statsmodels'],
)
| StarcoderdataPython |
4834571 | <filename>tests/test_fms_api_match_details_parser.py
import json
from datetime import datetime
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from datafeeds.parsers.fms_api.fms_api_match_parser import FMSAPIMatchDetailsParser
class TestFMSAPIEventListParser(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
def tearDown(self):
self.testbed.deactivate()
def test_parse_no_matches(self):
with open('test_data/fms_api/2016_no_score_breakdown.json', 'r') as f:
matches = FMSAPIMatchDetailsParser(2016, 'nyny').parse(json.loads(f.read()))
self.assertTrue(isinstance(matches, dict))
self.assertEqual(len(matches), 0)
def test_parse_qual(self):
with open('test_data/fms_api/2016_nyny_qual_breakdown.json', 'r') as f:
matches = FMSAPIMatchDetailsParser(2016, 'nyny').parse(json.loads(f.read()))
self.assertTrue(isinstance(matches, dict))
self.assertEqual(len(matches), 88)
def test_parse_playoff(self):
with open('test_data/fms_api/2016_nyny_playoff_breakdown.json', 'r') as f:
matches = FMSAPIMatchDetailsParser(2016, 'nyny').parse(json.loads(f.read()))
self.assertTrue(isinstance(matches, dict))
self.assertEqual(len(matches), 15)
def test_parse_playoff_with_octofinals(self):
with open('test_data/fms_api/2016_micmp_staging_playoff_breakdown.json', 'r') as f:
matches = FMSAPIMatchDetailsParser(2016, 'micmp').parse(json.loads(f.read()))
self.assertTrue(isinstance(matches, dict))
self.assertEquals(len(matches), 36)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.