id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
243907
|
<filename>filewithlock.py
import codecs
import os
import time
def wait_lock(filename):
while os.path.exists(filename):
time.sleep(0.001)
def add_lock(filename):
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if not os.path.exists(filename):
codecs.open(filename, 'w', 'utf-8').close()
def release_lock(filename):
if os.path.exists(filename):
os.remove(filename)
class FileWithLock:
def __init__(self, filename, mode, encoding, errors, buffering):
self.filename = filename
self.mode = mode
self.encoding = encoding
self.errors = errors
self.buffering = buffering
self.file = None
def __enter__(self):
if self.mode == 'r':
if not os.path.exists(self.filename):
codecs.open(self.filename, 'w', 'utf-8').close()
wait_lock(self.filename + '.readlock')
add_lock(self.filename + '.writelock')
elif self.mode == 'w':
wait_lock(self.filename + '.writelock')
add_lock(self.filename + '.readlock')
add_lock(self.filename + '.writelock')
else:
raise ValueError('invalid mode: \'{}\''.format(mode))
self.file = codecs.open(self.filename, self.mode, self.encoding,
self.errors, self.buffering)
return self.file
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'r':
release_lock(self.filename + '.writelock')
elif self.mode == 'w':
release_lock(self.filename + '.readlock')
release_lock(self.filename + '.writelock')
else:
raise ValueError('invalid mode: \'{}\''.format(mode))
self.file.close()
def open(filename, mode='r', encoding='utf-8', errors='strict', buffering=1):
return FileWithLock(filename, mode, encoding, errors, buffering)
|
StarcoderdataPython
|
1710192
|
<gh_stars>0
class Node:
def __init__(self, data, next):
self.data = data
self.next = next
|
StarcoderdataPython
|
1878033
|
import argparse
import math
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--deploy', action='store_true')
group.add_argument('--test', action='store_true')
def solver(inputString, node1="YOU", node2="SAN"):
inputString = inputString.strip()
lines = inputString.split()
parents = {}
for line in lines:
nodeA, nodeB = line.strip().split(')')
parents[nodeB] = nodeA
answer = math.inf
mapNode1 = {}
node = node1
counter = 0
mapNode1[node] = counter
while node in parents:
node = parents[node]
counter += 1
mapNode1[node] = counter
node = node2
counter = 0
while node in parents:
node = parents[node]
counter += 1
if node in mapNode1:
answer = min(answer, counter + mapNode1[node] - 2)
return str(answer)
def test():
assert(solver("""
COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
) == "4")
if __name__ == '__main__':
args = parser.parse_args()
if args.test:
print("Running test")
test()
print("Test passed successfully")
elif args.deploy:
in_f = open("6.in", "r")
out_f = open("6b.out", "w")
inputs = in_f.read()
output = solver(inputs)
print(output)
out_f.write("{}\n".format(output))
in_f.close()
out_f.close()
print("Finished running")
|
StarcoderdataPython
|
1701695
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Pixel Starships Market API
# ----- Packages ------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import datetime
import csv
import numpy as np
import os
import pandas as pd
import pss_core as core
import pss_prestige as p
import re
import urllib.request
import xml.etree.ElementTree
# Discord limits messages to 2000 characters
MESSAGE_CHARACTER_LIMIT = 2000
HOME = os.getenv('HOME')
base_url = 'http://{}/'.format(core.get_production_server())
# ----- Utilities -----------------------------------------------------
def save_raw_text(raw_text, filename):
with open(filename, 'w') as f:
f.write(raw_text)
def get_base_url(api_version=1, https=False):
if https is True:
prefix = 'https://'
else:
prefix = 'http://'
if api_version==2:
return prefix + 'api2.pixelstarships.com/'
else:
return prefix + 'api.pixelstarships.com/'
# ----- Get Latest Version --------------------------------------------
def get_latest_version():
url= base_url + 'SettingService/GetLatestVersion?language=Key=en'
data = urllib.request.urlopen(url).read()
return data.decode()
# ----- Item Designs --------------------------------------------------
def get_item_designs():
url = base_url + 'ItemService/ListItemDesigns2?languageKey=en'
data = urllib.request.urlopen(url).read()
return data.decode()
def save_item_design_raw(raw_text):
now = datetime.datetime.now()
filename = 'data/items-{}.txt'.format(now.strftime('%Y%m%d'))
save_raw_text(raw_text, filename)
def load_item_design_raw(refresh=False):
now = datetime.datetime.now()
filename = 'data/items{}.txt'.format(now.strftime('%Y%m%d'))
if os.path.isfile(filename) and refresh is False:
with open(filename, 'r') as f:
raw_text = f.read()
else:
raw_text = get_item_designs()
save_item_design_raw(raw_text)
return raw_text
def parse_item_designs(raw_text):
d = {}
# r_lookup = {}
root = xml.etree.ElementTree.fromstring(raw_text)
for c in root:
# print(c.tag) # ListItemDesigns
for cc in c:
# print(cc.tag) # ItemDesigns
for ccc in cc:
# print(ccc.tag) # ItemDesign
if ccc.tag != 'ItemDesign':
continue
item_name = ccc.attrib['ItemDesignName']
d[item_name] = ccc.attrib
# r_lookup[int(ccc.attrib['ItemDesignId'])] = item_name
return d
def xmltext_to_df(raw_text):
df = pd.DataFrame()
root = xml.etree.ElementTree.fromstring(raw_text)
for c in root:
for cc in c:
for i, ccc in enumerate(cc):
df = df.append(pd.DataFrame(ccc.attrib, index=[i]))
return df
# ----- Lists ---------------------------------------------------------
def get_lists(df_items):
item_rarities = list(df_items.Rarity.unique())
item_enhancements = list(df_items.EnhancementType.unique())
item_types = list(df_items.ItemType.unique())
item_subtypes = list(df_items.ItemSubType.unique())
return item_rarities, item_enhancements, item_types, item_subtypes
# ----- Parsing -------------------------------------------------------
def fix_item(item):
# Convert to lower case & non alpha-numeric
item = re.sub('[^a-z0-9]', '', item.lower())
item = re.sub('anonmask', 'anonymousmask', item)
item = re.sub('armour', 'armor', item)
item = re.sub('bunny', 'rabbit', item)
item = re.sub("(darkmatterrifle|dmr)(mark|mk)?(ii|2)", "dmrmarkii", item)
item = re.sub('golden', 'gold', item)
return item
def filter_item_designs(search_str, rtbl, filter):
item_original = list(rtbl.keys())
item_lookup = [ fix_item(s) for s in item_original ]
item_fixed = fix_item(search_str)
txt = ''
for i, item_name in enumerate(item_lookup):
m = re.search(item_fixed, item_name)
if m is not None:
item_name = item_original[i]
d = rtbl[item_name]
# Filter out items
if (item_name == 'Gas' or
item_name == 'Mineral' or
d['MissileDesignId'] != '0' or
d['CraftDesignId'] != '0' or
d['CharacterDesignId'] != '0'):
continue
# Process
# item_price = d['FairPrice']
item_price = d['MarketPrice']
item_slot = re.sub('Equipment', '', d['ItemSubType'])
item_stat = d['EnhancementType']
item_stat_value = d['EnhancementValue']
if filter == 'price':
if item_price == '0':
item_price = 'NA'
txt += '{}: {}\n'.format(item_name, item_price)
elif filter == 'stats':
if item_stat == 'None':
continue
txt += '{}: {} +{} ({})\n'.format(item_name,
item_stat, item_stat_value, item_slot)
else:
print('Invalid filter')
quit()
if len(txt) == 0:
return None
else:
return txt.strip('\n')
def get_real_name(search_str, rtbl):
item_original = list(rtbl.keys())
item_lookup = [ fix_item(s) for s in item_original ]
item_fixed = fix_item(search_str)
try:
# Attempt to find an exact match
idx = item_lookup.index(item_fixed)
return item_original[idx]
except:
# Perform search if the exact match failed
m = [ re.search(item_fixed, n) is not None for n in item_lookup ]
item = pd.Series(item_original)[m]
if len(item) > 0:
return item.iloc[0]
else:
return None
# ----- Item Stats ----------------------------------------------------
def get_item_stats(item_name):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
market_txt = filter_item_designs(item_name, item_lookup, filter='stats')
if market_txt is not None:
market_txt = '**Item Stats**\n' + market_txt
return market_txt
# ----- Best Items ----------------------------------------------------
def rtbl2items(rtbl):
df_rtbl = pd.DataFrame(rtbl).T
m1 = df_rtbl.EnhancementType != 'None'
m2 = df_rtbl.ItemSubType.str.contains('Equipment')
df_items = df_rtbl[m1 & m2].copy()
df_items.ItemSubType = df_items.ItemSubType.str.replace('Equipment', '')
df_items.ItemSubType = df_items.ItemSubType.str.lower()
df_items.EnhancementType = df_items.EnhancementType.str.lower()
df_items.EnhancementValue = df_items.EnhancementValue.astype(float)
return df_items
def filter_item(df_items, slot, enhancement, cols=None):
slot = slot.lower()
enhancement = enhancement.lower()
m1 = df_items.ItemSubType == slot
m2 = df_items.EnhancementType == enhancement
if cols is None:
return df_items[m1 & m2].sort_values(
'EnhancementValue', ascending=False).copy()
else:
return df_items.loc[m1 & m2, cols].sort_values(
'EnhancementValue', ascending=False).copy()
def itemfilter2txt(df_filter):
if len(df_filter) == 0:
return None
txt = ''
for row in df_filter.iterrows():
data = row[1]
mprice = data['MarketPrice']
if mprice == '0':
mprice = 'NA'
txt += '{}: {} ({} bux)\n'.format(data[0], data[1], mprice)
return txt
# ----- Item Ingredients ----------------------------------------------
def get_item_rlookup(df):
item_rlookup = {}
for row in df.iterrows():
data = row[1]
item_rlookup[data['ItemDesignId']] = data['ItemDesignName']
return item_rlookup
def get_recipe(df, item_rlookup, item_name):
ingredients = df.loc[df['ItemDesignName'] == item_name, 'Ingredients']
if len(ingredients) == 1:
ingredients = ingredients.values[0]
if len(ingredients) == 0:
return None
ingredients = ingredients.split('|')
recipe = {}
for ingredient in ingredients:
item_id, item_qty = ingredient.split('x')
recipe[item_rlookup[item_id]] = int(item_qty)
return recipe
else:
return None
def print_recipe(recipe, df_items):
txt = ''
total = 0
for ingredient in recipe.keys():
qty = recipe[ingredient]
fprice = df_items.loc[df_items['ItemDesignName'] == ingredient, 'FairPrice'].iloc[0]
mprice = df_items.loc[df_items['ItemDesignName'] == ingredient, 'MarketPrice'].iloc[0]
if mprice == '0':
mprice = np.nan
txt += '{} x {} (price: NA)\n'.format(qty, ingredient)
else:
mprice = int(mprice)
txt += '{} x {} ({} bux): {} bux\n'.format(qty, ingredient, mprice, qty*mprice)
total += qty*mprice
if np.isnan(total):
txt += 'Crafting Cost: NA'
else:
txt += 'Crafting Cost: {} bux'.format(total)
return txt
def collapse_recipe(recipe, df_items, item_rlookup):
collapse = False
sub_recipe = {}
for ingredient in recipe.keys():
qty = recipe[ingredient]
sub_ingredients = get_recipe(df_items, item_rlookup, ingredient)
if sub_ingredients is None:
if ingredient in sub_recipe.keys():
sub_recipe[ingredient] += recipe[ingredient]
else:
sub_recipe[ingredient] = recipe[ingredient]
else:
for sub_ingredient in sub_ingredients:
if sub_ingredient in sub_recipe.keys():
sub_recipe[sub_ingredient] += qty * sub_ingredients[sub_ingredient]
else:
sub_recipe[sub_ingredient] = qty * sub_ingredients[sub_ingredient]
collapse = True
# print('{} x {}: {}'.format(qty, ingredient, sub_ingredients))
if collapse is True:
return sub_recipe
else:
return None
def get_multi_recipe(name, levels=1):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
real_name = get_real_name(name, item_lookup)
df_items = xmltext_to_df(raw_text)
item_rlookup = get_item_rlookup(df_items)
recipe = get_recipe(df_items, item_rlookup, real_name)
txt = ''
level = 1
while recipe is not None:
txt += print_recipe(recipe, df_items)
recipe = collapse_recipe(recipe, df_items, item_rlookup)
level += 1
if level > levels:
break
if recipe is not None:
txt += '\n\n'
return txt
def get_item_recipe(name, levels=5):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
# print('name = {}'.format(name))
real_name = get_real_name(name, item_lookup)
# print('real_name = {}'.format(real_name))
if real_name is not None:
content = get_multi_recipe(real_name, levels)
return content, real_name
# ----- Lists ---------------------------------------------------------
def get_item_list():
raw_text = load_item_design_raw()
df_items = xmltext_to_df(raw_text)
items = list(df_items['ItemDesignName'])
# print('List of items: ' + ', '.join(items))
return core.list_to_text(items)
# ----- Main ----------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
'Pixel Starships Market API')
parser.add_argument('--market', action='store_true',
help='Get Market Data')
parser.add_argument('--subtype', default='None',
help='Subtype for market data')
parser.add_argument('--rarity', default='None',
help='Rarity for market data')
parser.add_argument('--stats', default=None,
help='Get Stats on Item')
parser.add_argument('--recipe', default=None,
help='Get Recipe for Item')
parser.add_argument('--price', default=None,
help='Get Price on Item')
parser.add_argument('--list', action='store_true',
help='Get List of items')
args = parser.parse_args()
if args.list is True:
# python3 pss_market.py --list
txt_list = get_item_list()
for txt in txt_list:
print(txt)
elif args.stats is not None:
# python3 pss_market.py --stats 'assault armor'
pass
elif args.recipe is not None:
name = args.recipe
content, real_name = get_item_recipe(name, levels=5)
if real_name is not None:
content = '**Recipe for {}**\n'.format(real_name) + content
content = content + '\n\nNote: bux prices listed here may not always be accurate due to transfers between alts/friends or other reasons'
print(content)
elif args.price is not None:
# python3 pss_market.py --price 'assault armor'
item_name = args.price
raw_text = load_item_design_raw()
rtbl = parse_item_designs(raw_text)
real_name = get_real_name(item_name, rtbl)
if real_name is not None:
print('Getting the price of {}'.format(real_name))
mkt_text = filter_item_designs(real_name, rtbl, filter='price')
print(mkt_text)
else:
print('{} not found'.format(item_name))
else:
print('Problem parsing argument list')
print('args.stats = {}'.format(args.stats))
print('args.price = {}'.format(args.price))
|
StarcoderdataPython
|
3315603
|
""" Module classes:
NewPostHandler - Handler for creating a new blog post.
"""
from base import BaseHandler
from models.post import BlogEntry
##############################################################################
class NewPostHandler(BaseHandler):
"""Handler for creating a new blog post."""
def render_error(self, author, error=""):
blog_entries = BlogEntry.first_ten_list(author)
# Get all rating information for this user and author:
if blog_entries and self.logged_in():
blog_entries = BlogEntry.add_ratings(entries=blog_entries,
username=self.account)
self.render("home.html",
account=self.account,
author=author,
blog_entries=blog_entries,
error_msg=error)
def render_main(self, author, subject="", content="", error=""):
self.render("newpost.html",
account=self.account,
author=author,
subject=subject,
content=content,
error=error)
def get(self, author):
if not self.logged_in():
self.redirect("/login")
return
if self.user_and_author(author):
self.render_main(author)
else:
self.render_error(
author,
error="You can only create posts on your own blog.")
def post(self, author):
if not self.logged_in():
self.redirect("/login")
return
if self.user_and_author(author):
subject = self.request.get("subject").strip()
content = self.request.get("content").strip()
if subject and content:
new_entry = BlogEntry.create(
author=self.account,
subject=subject,
content=content)
self.redirect("/post/%s" % str(new_entry.key.id()))
else:
error = "Subject and content, please!"
self.render_main(author, subject, content, error)
else:
self.redirect("error_404.html")
|
StarcoderdataPython
|
3443214
|
import magma
def SoCDataType(addr_width, data_width):
"""
This function returns a class (parameterized by @addr_width and
@data_width) which can be used as the magma ports with these inputs
and outputs
1. rd_en
2. rd_addr
3. rd_data
4. wr_strb
5. wr_addr
6. wr_data
"""
_SoCDataType = magma.Tuple(
wr_strb=magma.In(magma.Bits[int(data_width/8)]),
wr_addr=magma.In(magma.Bits[addr_width]),
wr_data=magma.In(magma.Bits[data_width]),
rd_en=magma.In(magma.Bit),
rd_addr=magma.In(magma.Bits[addr_width]),
rd_data=magma.Out(magma.Bits[data_width]))
return _SoCDataType
|
StarcoderdataPython
|
5159093
|
#!/usr/bin/env python
"""Entry points for the grr-response-client-builder pip package."""
# pylint: disable=g-import-not-at-top
def ClientBuild():
from grr_response_client_builder import client_build
client_build.Run()
|
StarcoderdataPython
|
1795369
|
<reponame>dre2004/django-datatables-boilerplate<filename>model/urls.py
from django.contrib import admin
from django.urls import path
from .views import (RegionsJson, RegionsView)
urlpatterns = [
path('RegionsJson/', RegionsJson.as_view(), name='RegionsJson'),
path('regions/', RegionsView.as_view(), name='RegionsView')
]
|
StarcoderdataPython
|
8030074
|
<gh_stars>0
from abc import ABC, abstractmethod
class Storage(ABC):
@abstractmethod
def __init__(self, config): pass
@abstractmethod
def get_cdn_url(self, path): pass
@abstractmethod
def exists(self, path): pass
@abstractmethod
def uploadf(self, file, key, **kwargs): pass
def upload(self, filename: str, key: str, **kwargs):
with open(filename, 'rb') as f:
self.uploadf(f, key, **kwargs)
|
StarcoderdataPython
|
3592727
|
<gh_stars>0
n=int(input())
t=1
for i in range(0,n):
for j in range(0,i+1):
print(t,end="")
t=t+1
print("\r")
|
StarcoderdataPython
|
6430932
|
class Colors:
HEADER = "\033[35m"
OKBLUE = "\033[34m"
OKGREEN = "\033[32m"
WARNING = "\033[33m"
FAIL = "\033[31m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
|
StarcoderdataPython
|
3263984
|
<gh_stars>0
import numpy as np
import torch
from model.vcn import VCN
class VCN_Wrapped(VCN):
"""L is previous frame, R is current frame, flow is L refference frame"""
# check partial GRU implementation commit
def __init__(self, size, md, fac, meanL, meanR):
"""
md - maximum disparity, flow for a pixel can be -md to md pixels
fac - width of search windows is unchanged, height is divided by fac
"""
assert size[0] == 1, 'VCN_Wrapped can only support one image at a time'
super().__init__(size, md, fac)
self.register_buffer('meanL', self.__get_mean_buffer(meanL))
self.register_buffer('meanR', self.__get_mean_buffer(meanR))
def forward(self, im_prev: torch.Tensor, im_new: torch.Tensor, disc_aux=None):
"""im_prev, im_new should be dim(3,height,width) and RGB format, normalized in [0..1]"""
assert im_prev.dim() == 3 and im_new.dim() == 3, 'VCN_Wrapped can only support one image at a time'
im_prev = im_prev.flip(0)
im_new = im_new.flip(0)
im_prev = im_prev - self.meanL.view(3, 1, -1)
im_new = im_new - self.meanR.view(3, 1, -1)
im = torch.stack((im_prev, im_new))
assert im.dtype is torch.float
return super().forward(im, disc_aux)
def __get_mean_buffer(self, meanArray: np.ndarray):
if meanArray is None:
return torch.tensor([0.33, 0.33, 0.33]).float()
meanArray = np.asarray(meanArray)
if meanArray.ndim == 1:
meanArray = meanArray[np.newaxis, :]
return torch.from_numpy(meanArray.mean(0)).float()
|
StarcoderdataPython
|
11223983
|
from time import sleep
n1 = int(input('Digite o primeiro valor: '))
n2 = int(input('Digite o segundo valor: '))
opcao = 0
while opcao != 5:
print(''' [ 1 ] SOMAR
[ 2 ] MULTIPLICAR
[ 3 ] MAIOR
[ 4 ] NOVOS VALORES
[ 5 ] SAIR''')
opcao = int(input('>>>>> O que deseja fazer com os valores? '))
if opcao == 1:
soma = n1+n2
print(f'A soma entre {n1} e {n2} é {soma}')
elif opcao == 2:
mult = n1*n2
print(f'A multiplicação de {n1} x {n2} é {mult}')
elif opcao == 3:
if n1>n2:
maior = n1
else:
maior = n2
print(f'Entre {n1} e {n2} o maior é {maior}')
elif opcao == 4:
print('Informe os números novamente')
n1 = int(input('Digite o primeiro valor: '))
n2 = int(input('Digite o segundo valor: '))
elif opcao == 5:
print('Finalizando...')
else:
print('Opção inválida. Tente novamente.')
print('-='*15)
sleep(2)
print('Fim do programa! Volte sempre.')
|
StarcoderdataPython
|
59222
|
"""
Module contenant les classes utiles à la modélisation sous forme de graphe :
Sommet, Arc, Graphe
auteur : cmarichal
"""
from typing import Tuple, List
from math import floor
import numpy as np
from classes_traitement_plan import CouleurSpeciale, Plan
class Sommet:
"""Sommet ayant une position et un numéro"""
def __init__(self, numero: int, pos: Tuple[int, int], majeur: bool = False):
self.pos = pos
self.numero = numero
self.majeur = majeur
class Arc:
"""Arc comportant 2 sommets, une longueur et une route"""
def __init__(self, sommet_depart: Sommet, sommet_arrive: Sommet, longueur: int):
self.sommets = (sommet_depart, sommet_arrive)
self.longueur = longueur
class Graphe:
"""Graphe mathématique comportant la liste des sommets et la matrice d'adjacence"""
def __init__(self):
self.matrice_adjacence = np.array([])
self.liste_sommets = []
@staticmethod
def graphe_from_plan(plan: Plan):
"""retourne le graphe associé à une image prétraitée"""
nouveau_graphe = Graphe()
sommets, coefprop = Graphe.cherche_sommets(plan)
nouveau_graphe.liste_sommets = sommets
Gr = []
for i in range(len(sommets)): # crée une matrice de zéros d'une taille adaptée
Gr.append([0] * len(sommets))
for i in range(len(sommets) - 1):
for k in range(i + 1, len(sommets)):
if plan.verifLignePaint(sommets[i].pos, sommets[k].pos): # vérifie que 2 sommets sont reliés par un arc
x = sommets[i].pos[0] - sommets[k].pos[0]
y = sommets[i].pos[1] - sommets[k].pos[1]
Gr[i][k] = floor(coefprop * np.sqrt(x ** 2 + y ** 2)) # distance entre les sommets
Gr[k][i] = Gr[i][k] # matrice symetrique
else:
Gr[i][k] = -1 # sommet inaccessible
Gr[k][i] = -1
nouveau_graphe.matrice_adjacence = np.array(Gr)
return nouveau_graphe
@staticmethod
def cherche_sommets(plan: Plan) -> Tuple[List[Sommet], float]:
"""repère les sommets/pixels rouges"""
sommets = []
echelle = []
for i in range(len(plan.image_255)):
for j in range(len(plan.image_255[0])):
code_pixel = list(plan.image_255[i][j])
if code_pixel == CouleurSpeciale.ROUGE.value:
sommets.append(Sommet(numero=len(sommets), pos=(i, j)))
elif code_pixel == CouleurSpeciale.ROSE.value:
sommets.append(Sommet(numero=len(sommets), pos=(i, j), majeur=True))
elif code_pixel == CouleurSpeciale.VIOLET.value:
echelle.append((i, j))
coefprop = plan.echelle / (echelle[1][1] - echelle[0][1]) # coefficient de propotionnalité pixels/metres
return sommets, coefprop
def get_liste_arcs_graphe(self) -> List[Tuple[int, int]]:
"""renvoie la liste de tous les arcs"""
L = []
for i in range(len(self.matrice_adjacence)):
for j in range(len(self.matrice_adjacence[0])):
if self.matrice_adjacence[i][j] != 0 and self.matrice_adjacence[i][j] != -1:
L.append((i, j))
return L
def get_liste_sommets_majeurs(self) -> List[Sommet]:
"""Renvoie la liste des sommets majeurs"""
sommets_majeurs = []
for sommet in self.liste_sommets:
if sommet.majeur:
sommets_majeurs.append(sommet)
return sommets_majeurs
|
StarcoderdataPython
|
3385084
|
<reponame>willcodefortea/wagtail<filename>wagtail/wagtailimages/admin_urls.py
from django.conf.urls import url
from wagtail.wagtailimages.views import images, chooser, multiple
urlpatterns = [
url(r'^$', images.index, name='wagtailimages_index'),
url(r'^(\d+)/$', images.edit, name='wagtailimages_edit_image'),
url(r'^(\d+)/delete/$', images.delete, name='wagtailimages_delete_image'),
url(r'^(\d+)/generate_url/$', images.url_generator, name='wagtailimages_url_generator'),
url(r'^(\d+)/generate_url/(.*)/$', images.generate_url, name='wagtailimages_generate_url'),
url(r'^add/$', images.add, name='wagtailimages_add_image'),
url(r'^usage/(\d+)/$', images.usage, name='wagtailimages_image_usage'),
url(r'^multiple/add/$', multiple.add, name='wagtailimages_add_multiple'),
url(r'^multiple/(\d+)/$', multiple.edit, name='wagtailimages_edit_multiple'),
url(r'^multiple/(\d+)/delete/$', multiple.delete, name='wagtailimages_delete_multiple'),
url(r'^chooser/$', chooser.chooser, name='wagtailimages_chooser'),
url(r'^chooser/(\d+)/$', chooser.image_chosen, name='wagtailimages_image_chosen'),
url(r'^chooser/upload/$', chooser.chooser_upload, name='wagtailimages_chooser_upload'),
url(r'^chooser/(\d+)/select_format/$', chooser.chooser_select_format, name='wagtailimages_chooser_select_format'),
]
|
StarcoderdataPython
|
4937968
|
<filename>qutebrowser/quteconfig.py
# Autogenerated config.py
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Uncomment this to still load settings configured via autoconfig.yml
# config.load_autoconfig()
# Load a restored tab as soon as it takes focus.
# Type: Bool
c.session.lazy_restore = True
# Turn on Qt HighDPI scaling. This is equivalent to setting
# QT_AUTO_SCREEN_SCALE_FACTOR=1 in the environment. It's off by default
# as it can cause issues with some bitmap fonts. As an alternative to
# this, it's possible to set font sizes and the `zoom.default` setting.
# Type: Bool
c.qt.highdpi = True
# Always restore open sites when qutebrowser is reopened.
# Type: Bool
c.auto_save.session = True
# Automatically start playing `<video>` elements. Note: On Qt < 5.11,
# this option needs a restart and does not support URL patterns.
# Type: Bool
c.content.autoplay = False
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'file://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome://*/*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'qute://*/*')
# Enable smooth scrolling for web pages. Note smooth scrolling does not
# work with the `:scroll-px` command.
# Type: Bool
c.scrolling.smooth = True
|
StarcoderdataPython
|
4889081
|
###############################################################
####### PROCESSING OF TREES ###################################
###############################################################
# structure of the tree:
# 0: name, 1: parent, 2: tab of children, 3: length, 4: isdup, 5:species, 6:bootstrap , 7: bppnumber, 8: ND
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def getbppnumber(tree,node):
return tree[node][7]
def writeBootstrap(tree,node,value):
tree[node][6] = value
def getBootstrap(tree,node):
return tree[node][6]
def addNode(tree):
id_node = 0
while tree.has_key(id_node):
id_node = id_node + 1
tree[id_node] = ["N"+str(id_node),-1,[],0,"","",""]
return id_node
def getAncestor(tree):
if tree.has_key("ancestor"):
return tree["ancestor"]
else:
return -1
def setAncestor(tree,node):
tree["ancestor"] = node
def getLength(tree,node):
return tree[node][3]
def setLength(tree,node,l):
tree[node][3] = l
def getName(tree,node):
return tree[node][0]
def setName(tree,node,name):
tree[node][0] = name
def getSpecies(tree,node):
return tree[node][5]
def writeSpecies(tree,node,annot):
tree[node][5] = annot
def getNodes(tree):
clefs = tree.keys()
c = 0
while c < len(clefs):
if (clefs[c] == "sequence" or
clefs[c] == "ancestor" or
len(tree[clefs[c]]) == 0):
del clefs[c]
else:
c = c + 1
return clefs
def getParent(tree,node):
return tree[node][1]
def getChildNumber(tree,n,c):
children = getChildren(tree,n)
if children[0] == c:
return 0
else:
return 1
def setParent(tree,node,p):
tree[node][1] = p
def addChild(tree,pere,node):
tree[pere][2].append(node)
def removeLeaf(tree,l):
#root = getRoot(tree)
#print "remove",l,writeTree(tree,root,False)
if isRoot(tree,l):
del tree[l]
else:
pere = getParent(tree,l)
if isRoot(tree,pere) and len(getChildren(tree,pere)) == 2:
#print "son of the root"
b = getBrother(tree,l)
tree[b][1] = -1
del tree[pere]
del tree[l]
elif len(getChildren(tree,pere)) == 2:
b = getBrother(tree,l)
grandpere = getParent(tree,pere)
setParent(tree,b,grandpere)
number = getChildNumber(tree,grandpere,pere)
setChild(tree,grandpere,number,b)
tree[b][3] = tree[b][3]+tree[pere][3]
del tree[pere]
del tree[l]
elif isRoot(tree,pere) and len(getChildren(tree,pere)) == 1:
del tree[l]
del tree[pere]
elif len(getChildren(tree,pere)) > 2:
number = getChildNumber(tree,pere,l)
del tree[pere][2][number]
del tree[l]
def removeNodeAndChildren(tree,node):
children = list(getChildren(tree,node))
for child in children:
removeNodeAndChildren(tree,child)
removeNode(tree,node)
def removeNode(tree,node):
# print "effacement du noeud",node
del tree[node]
def removeChildAndChildren(tree,pere,node):
numero = 0
while node != getChild(tree,pere,numero):
numero = numero + 1
del tree[pere][2][numero]
removeNodeAndChildren(tree,node)
def removeChild(tree,pere,node):
numero = 0
while node != getChild(tree,pere,numero):
numero = numero + 1
del tree[pere][2][numero]
removeNode(tree,node)
def getChild(tree,node,k):
return tree[node][2][k]
def setChild(tree,node,k,c):
tree[node][2][k] = c
def getNumberOfChildren(tree,node):
return len(tree[node][2])
def getChildren(tree,node):
return tree[node][2]
def getBrother(tree,node):
anc = getParent(tree,node)
if (getChild(tree,anc,0) == node):
return getChild(tree,anc,1)
else:
return getChild(tree,anc,0)
def isLeaf(tree,node):
return (len(getChildren(tree,node)) == 0)
def isRoot(tree,node):
return (tree[node][1] == -1)
def isDup(tree,node):
return (tree[node][4] == "D")
def getND(tree,node):
if tree[node].has_key("ND"):
return tree[node]["ND"]
else:
return ""
def lastCommonAncestor(tree,a,b):
ancestor = -1
ancestorsa = [a]
while not isRoot(tree,a):
a = getParent(tree,a)
ancestorsa.append(a)
ancestorsb = [b]
while not isRoot(tree,b):
b = getParent(tree,b)
ancestorsb.append(b)
# print ancestorsa,ancestorsb
while len(ancestorsa) > 0 and len(ancestorsb) > 0 and ancestorsa[-1] == ancestorsb[-1]:
ancestor = ancestorsa[-1]
del ancestorsa[-1]
del ancestorsb[-1]
# print "ancestor",ancestor
return ancestor
def distanceFrom(tree,a,b):
ancestor = lastCommonAncestor(tree,a,b)
distance = 0
while a != ancestor:
#print tree[a]
distance = distance + tree[a][3]
a = getParent(tree,a)
while b != ancestor:
#print tree[b]
distance = distance + tree[b][3]
b = getParent(tree,b)
return distance
def getLeaves(tree,a):
# print "getleaves",a
if isLeaf(tree,a):
return [a]
else:
#print "non feuille",child1(a),child2(a)
result = []
children = list(getChildren(tree,a))
for child in children:
result = result + getLeaves(tree,child)
return result
def writeTree(tree,a,NHX):
# print a,tree[a]
if isLeaf(tree,a):
if isRoot(tree,a):
chaine = "("
else:
chaine = ""
chaine = chaine + tree[a][0]
if tree[a][3] != -1:
#~ print tree[a][3]
chaine = chaine + ":" + str(tree[a][3])
if NHX and tree[a][5] != "":
chaine = chaine + "[&&NHX:S="+tree[a][5]+"]"
if isRoot(tree,a):
chaine = chaine + ")" + str(getBootstrap(tree,a))
else:
chaine = "("
children = list(getChildren(tree,a))
for child in children:
chaine = chaine + writeTree(tree,child,NHX)+","
chaine = chaine[:-1]+")"+str(getBootstrap(tree,a))
if (not isRoot(tree,a)) and tree[a][3] != -1:
chaine = chaine + ":" + str(tree[a][3])
if NHX and (tree[a][4] != "" or tree[a][5] != ""):
chaine = chaine + "[&&NHX:"
if tree[a][5] != "":
chaine = chaine + "S="+tree[a][5]
if tree[a][4] == "D" or tree[a][4] == "WGD":
chaine = chaine+":D=Y"
chaine = chaine + "]"
if isRoot(tree,a):
chaine = chaine + ";"
return chaine
def writeTreeNexus(tree,a,tab):
# print a,tree[a]
if isLeaf(tree,a):
if isRoot(tree,a):
chaine = "("
else:
chaine = ""
chaine = chaine + tree[a][0]
chaine = chaine + "[&!color=#"+tab[a]+"]"
if tree[a][3] != -1:
#~ print tree[a][3]
chaine = chaine + ":" + str(tree[a][3])
if isRoot(tree,a):
chaine = chaine + ")"
else:
chaine = "("
children = list(getChildren(tree,a))
for child in children:
chaine = chaine + writeTreeNexus(tree,child,tab)+","
chaine = chaine[:-1]+")"
chaine = chaine + "[&!color=#"+tab[a]+"]"
if (not isRoot(tree,a)) and tree[a][3] != -1:
chaine = chaine + ":" + str(tree[a][3])
if isRoot(tree,a):
chaine = chaine + ";"
return chaine
def getRoot(tree):
keys = getNodes(tree)
#print tree
#print keys
start = keys[0]
while (not isRoot(tree,start)):
start = getParent(tree,start)
return start
def getNodesBetween(tree,a,b):
chemin = []
ancestor = -1
ancestorsa = []
while not isRoot(tree,a):
a = getParent(tree,a)
ancestorsa.append(a)
ancestorsb = []
while not isRoot(tree,b):
b = getParent(tree,b)
ancestorsb.append(b)
while len(ancestorsa) > 0 and len(ancestorsb) > 0 and ancestorsa[-1] == ancestorsb[-1]:
ancestor = ancestorsa[-1]
del ancestorsa[-1]
del ancestorsb[-1]
# print "ancestor",ancestor
return ancestorsa+[ancestor]+ancestorsb
def isAncestor(tree,a,b):
if isRoot(tree,a):
result = True
else:
result = False
current = b
while ((not result) and (not isRoot(tree,current))):
if current == a:
result = True
else:
current = getParent(tree,current)
return result
def treeCopy(tree):
result = {}
for k in tree.keys():
if k == "ancestor" or k == "sequence":
result[k] = tree[k]
else:
result[k] = [tree[k][0],tree[k][1],list(tree[k][2]),tree[k][3],tree[k][4],tree[k][5],tree[k][6]]
return result
def changeRoot(tree,newRoot): # the new root is between newRoot and its parent NEVER TESTED
#~ print "changeroot",newRoot,getRoot(tree),getParent(tree,newRoot)
if (not isRoot(tree,newRoot)) and (not isRoot(tree,getParent(tree,newRoot))):
#~ print "changeroot"
root = getRoot(tree)
new_id = addNode(newtree)
tree[new_id][2] = [newRoot,getParent(tree,newRoot)]
tree[newRoot][1] = new_id
tree[newRoot][3] = tree[newRoot][3]/2
current = getParent(tree,newRoot)
prec = new_id
current_length = tree[newRoot][3]/2
while getParent(tree,current) != root:
if current[2][0] == prec:
tree[current][2][0] = getParent(tree,current)
else:
tree[current][2][1] = getParent(tree,current)
tree[current][1] = prec
temp = current_length
current_length = tree[current][3]
tree[current][3] = temp
prec = current
current = getParent(tree,current)
if current[2][0] == prec:
tree[current][2][0] = getBrother(tree,current)
else:
tree[current][2][1] = getBrother(tree,current)
tree[current][1] = prec
temp = current_length
current_length = tree[current][3]
tree[current][3] = temp
tree[getBrother(tree,current)][1] = current
tree[getBrother(tree,current)][3] = tree[getBrother(tree,current)][3] + current_length
del tree[root]
def SPR(tree,a,b):
#~ print a,b,getName(tree,a),getName(tree,b)
#~ print writeTree(tree,getParent(tree,a),False)
parent = getParent(tree,a)
great_parent = getParent(tree,getParent(tree,a))
brother = getBrother(tree,a)
tree[brother][1] = great_parent
child = getChildren(tree,great_parent)[0]
if child == getParent(tree,a):
tree[great_parent][2][0] = brother
else:
tree[great_parent][2][1] = brother
del tree[parent]
#~ print writeTree(tree,great_parent,False)
parent = getParent(tree,b)
new_node = addNode(tree)
tree[new_node][1] = parent
tree[new_node][2] = [a,b]
tree[a][1] = new_node
tree[b][1] = new_node
child = getChildren(tree,parent)[0]
if child == b:
tree[parent][2][0] = new_node
else:
tree[parent][2][1] = new_node
#~ print writeTree(tree,parent,False)
def NNI(tree,node):
if (not isRoot(tree,node)) and (not isLeaf(tree,node)):
parent = getParent(tree,node)
if isRoot(tree,parent):
brother = getBrother(tree,node)
if not isLeaf(tree,brother):
son1 = getChildren(tree,node)[0]
son2 = getChildren(tree,node)[1]
son3 = getChildren(tree,brother)[0]
son4 = getChildren(tree,brother)[1]
setChild(tree,node,1,son4)
setChild(tree,brother,1,son2)
setParent(tree,son2,brother)
setParent(tree,son4,node)
else:
brother = getBrother(tree,node)
if getChildren(tree,parent)[0] == brother:
no_brother = 0
else:
no_brother = 1
son1 = getChildren(tree,node)[0]
setChild(tree,node,0,brother)
setChild(tree,parent,no_brother,son1)
setParent(tree,son1,parent)
setParent(tree,brother,node)
def getLeavesNames(tree):
result = []
root = getRoot(tree)
leaves = getLeaves(tree,root)
for l in leaves:
result.append(getName(tree,l))
return result
def unroot(tree):
nodes = getNodes(tree)
if len(nodes) > 3:
root = getRoot(tree)
children = getChildren(tree,root)
if len(children) == 2:
new_root = children[0]
tree[new_root][1] = -1
tree[new_root][2].append(children[1])
tree[children[1]][1] = new_root
tree[children[1]][3] = tree[new_root][3] + tree[children[1]][3]
tree[children[1]][6] = max(tree[new_root][6],tree[children[1]][6])
tree[new_root][3] = -1
del tree[root]
def contractunsupported(tree,threshold):
result = 0
unroot(tree)
nodes = getNodes(tree)
#print "begin",len(nodes)
for n in nodes:
if isfloat(tree[n][6]):
tree[n][6] = float(tree[n][6])
else:
tree[n][6] = 0.0
if (not isRoot(tree,n)) and (not isLeaf(tree,n)) and (tree[n][6] < threshold):
#~ print "CONTRACTION",float(tree[n][6]),threshold,
parent = getParent(tree,n)
children = getChildren(tree,n)
for c in children:
tree[parent][2].append(c)
tree[c][1] = parent
removeChild(tree,parent,n)
result = result + 1
#nodes = getNodes(tree)
#print "end",len(nodes)
return result
def ultrametricize(tree):
root = getRoot(tree)
leaves = getLeaves(tree,root)
maximum = 0
index = -1
for l in leaves:
d = distanceFrom(tree,root,l)
if d > maximum:
maximum = d
index = l
#~ print getName(tree,l),"maximum",maximum
i = index
marque = []
while i != root:
marque.append(i)
i = getParent(tree,i)
marque.append(root)
while len(marque) < len(getNodes(tree)):
#~ print len(marque),len(getNodes(tree))
maximum_non_marque = 0
index = -1
for l in leaves:
d = distanceFrom(tree,root,l)
if (d > maximum_non_marque) and (not l in marque):
maximum_non_marque = d
index = l
#~ print getName(tree,l),"distance",distanceFrom(tree,root,l)
i = index
distance_from_marque = 0
while not i in marque:
distance_from_marque = distance_from_marque + getLength(tree,i)
i = getParent(tree,i)
ratio = (maximum - distanceFrom(tree,i,root)) / distance_from_marque
i = index
while not i in marque:
marque.append(i)
setLength(tree,i,getLength(tree,i) * ratio)
i = getParent(tree,i)
#~ else:
#~ print getName(tree,l),"distance",distanceFrom(tree,root,l)
def constructSupportFromBootstrapTrees(tree,setoftrees):
support = {}
leaves = getLeavesNames(tree)
pos = {}
for i in range(len(leaves)):
pos[leaves[i]] = i
bipartitions = {}
def complement(seq):
result = []
for s in seq.split("_")[0]:
if s == "1":
result.append("0")
else:
result.append("1")
return "".join(result)
def seq(leafset,node):
result = ["0"]*len(leaves)
for l in leafset:
result[pos[l]] = "1"
return "".join(result)
def constructBipartAndReturnLeaves(tree,node):
if isLeaf(tree,node):
return [getName(tree,node)]
else:
c = getChildren(tree,node)
tab0 = constructBipartAndReturnLeaves(tree,c[0])
tab1 = constructBipartAndReturnLeaves(tree,c[1])
result = tab0+tab1
if len(c) > 2:
tab2 = constructBipartAndReturnLeaves(tree,c[2])
result = result + tab2
if not isRoot(tree,node):
support[node] = 0
s = seq(tab0+tab1,node)
bipartitions[s] = node
bipartitions[complement(s)] = node
return result
root = getRoot(tree)
constructBipartAndReturnLeaves(tree,root)
def testBipartAndReturnLeaves(tree,node):
#print "boot"
if isLeaf(tree,node):
return [getName(tree,node)]
else:
#print "nonleafboot"
c = getChildren(tree,node)
tab0 = testBipartAndReturnLeaves(tree,c[0])
tab1 = testBipartAndReturnLeaves(tree,c[1])
result = tab0+tab1
if len(c) > 2:
tab2 = testBipartAndReturnLeaves(tree,c[2])
result = result + tab2
if not isRoot(tree,node):
s = seq(tab0+tab1,node)
#print s
if bipartitions.has_key(s):
#print "bip trouve"
support[bipartitions[s]] = support[bipartitions[s]] + 1
#if bipartitions.has_key(complement(s)):
#support[bipartitions[complement(s)] = support[bipartitions[complement(s)]] + 1
return result
for t in setoftrees:
root = getRoot(t)
testBipartAndReturnLeaves(t,root)
if len(setoftrees) > 0:
for k in support.keys():
writeBootstrap(tree,k,support[k]/float(len(setoftrees)))
#root = getRoot(tree)
#print writeTree(tree,root,False)
def RF(arbre1,arbre2):
root1 = getRoot(arbre1)
root2 = getRoot(arbre2)
nodes1 = getNodes(arbre1)
nodes2 = getNodes(arbre2)
clades1 = []
for n in nodes1:
leaves = getLeaves(arbre1,n)
if len(leaves) > 1:
clade = []
for l in leaves:
clade.append(getName(arbre1,l).split("|")[0].split("__")[0])
clade.sort()
clades1.append(clade)
clades2 = []
for n in nodes2:
leaves = getLeaves(arbre2,n)
if len(leaves) > 1:
clade = []
for l in leaves:
clade.append(getName(arbre2,l).split("|")[0].split("__")[0])
clade.sort()
clades2.append(clade)
distance = 0
for c in clades1:
if not c in clades2:
distance = distance + 1
#print 1,c
for c in clades2:
if not c in clades1:
distance = distance + 1
#print 2,c
return distance/2
def commonTriplets(arbre1,arbre2):
result = 0
triplets = {}
root1 = getRoot(arbre1)
leaves1 = getLeaves(arbre1,root1)
for n1 in range(len(leaves1)):
for n2 in range(n1+1,len(leaves1)):
for n3 in range(n2+1,len(leaves1)):
ids = [leaves1[n1],leaves1[n2],leaves1[n3]]
ids.sort(lambda x,y: cmp(getName(arbre1,x),getName(arbre1,y)))
names = [getName(arbre1,ids[0]),getName(arbre1,ids[1]),getName(arbre1,ids[2])]
LCA12 = lastCommonAncestor(arbre1,ids[0],ids[1])
LCA13 = lastCommonAncestor(arbre1,ids[0],ids[2])
LCA23 = lastCommonAncestor(arbre1,ids[1],ids[2])
#print LCA12,LCA13,LCA23
if LCA12 == LCA13:
triplets['_'.join(names)] = 1
if LCA12 == LCA23:
triplets['_'.join(names)] = 2
if LCA13 == LCA23:
triplets['_'.join(names)] = 3
#print names,triplets['_'.join(names)]
root2 = getRoot(arbre2)
leaves2 = getLeaves(arbre2,root2)
for n1 in range(len(leaves2)):
for n2 in range(n1+1,len(leaves2)):
for n3 in range(n2+1,len(leaves2)):
#print n1,n2,n3,result
ids = [leaves2[n1],leaves2[n2],leaves2[n3]]
ids.sort(lambda x,y: cmp(getName(arbre2,x),getName(arbre2,y)))
names = [getName(arbre2,ids[0]),getName(arbre2,ids[1]),getName(arbre2,ids[2])]
if triplets.has_key('_'.join(names)):
LCA12 = lastCommonAncestor(arbre2,ids[0],ids[1])
LCA13 = lastCommonAncestor(arbre2,ids[0],ids[2])
LCA23 = lastCommonAncestor(arbre2,ids[1],ids[2])
if LCA12 == LCA13 and triplets['_'.join(names)] == 1:
#print names,"yes",triplets['_'.join(names)]
result = result + 1
elif LCA12 == LCA23 and triplets['_'.join(names)] == 2:
#print names,"yes",triplets['_'.join(names)]
result = result + 1
elif LCA13 == LCA23 and triplets['_'.join(names)] == 3:
#print names,"yes",triplets['_'.join(names)]
result = result + 1
#else:
#print names
#else:
#print names,"not found"
return result
# structure of the tree:
# 0: name, 1: parent, 2: tab of children, 3: length, 4: isdup, 5:species, 6:bootstrap
#####################################################
#####################################################
# Traversal of one tree
#
#####################################################
#####################################################
def readTree(treeseq):
###############################################
######### TREE READING ########################
###############################################
tree = {"sequence":treeseq}
id_node = 0
nb_parenth = 0
bppnumber = 0
pile = []
t = 0
while t < len(treeseq):
if treeseq[t] == "(":
id_node = id_node + 1
nb_parenth = nb_parenth + 1
tree[id_node]={}
tree[id_node][0] = "N"+str(id_node)
tree[id_node][1] = -1
tree[id_node][2] = []
tree[id_node][3] = 0
tree[id_node][4] = ""
tree[id_node][5] = ""
tree[id_node][6] = ""
tree[id_node][7] = -1
# [nom,pere,[enfants],longueur,annotD,dotannot,bootstrap,bppnumber]
# print "ouverture",tree[id_node]
if len(pile) > 0:
tree[id_node][1] = pile[-1]
pile.append(id_node)
t = t + 1
elif treeseq[t] == ")":
t = t + 1
nb_parenth = nb_parenth - 1
tree[pile[-1]][7] = bppnumber
bppnumber = bppnumber + 1
#~ print nb_parenth,"(-1)",treeseq[t:t+80]
if treeseq[t] == "@":
t = t + 1
tree["ancestor"] = pile[-1]
while (treeseq[t] != ":" and
treeseq[t] != ";" and
treeseq[t] != "[" and
treeseq[t] != ")" and
treeseq[t] != ","):
tree[pile[-1]][6] = tree[pile[-1]][6] + treeseq[t]
t = t + 1
if treeseq[t] == ":":
debut = t + 1
while treeseq[t] != "," and treeseq[t]!=")" and treeseq[t] != "[" and treeseq[t] != ";":
t = t + 1
longueur = float(treeseq[debut:t])
tree[pile[-1]][3] = longueur
while treeseq[t] != "," and treeseq[t] != ")" and treeseq[t] != "[" and treeseq[t] != ";":
t = t + 1
if treeseq[t] == "[":
debut = t + 1
t = debut + treeseq[debut:].find("]")
chaine = treeseq[debut:t]
mots = chaine.split(":")
for m in mots:
if m == "D=Y" or m == "D=T" or m == "Ev=GDup":
tree[pile[-1]][4] = "D"
if m[:2] == "S=":
tree[pile[-1]][5] = m[2:]
if m[:2] == "B=":
tree[pile[-1]][6] = m[2:]
if m[:3] == "ND=":
tree[pile[-1]]["ND"] = m[3:]
if isfloat(m):
tree[pile[-1]][6] = float(m)
t = t + 1
if treeseq[t] == ":":
debut = t + 1
while treeseq[t] != "," and treeseq[t]!=")" and treeseq[t] != "[" and treeseq[t] != ";":
t = t + 1
longueur = float(treeseq[debut:t])
tree[pile[-1]][3] = longueur
while treeseq[t] != "," and treeseq[t] != ")" and treeseq[t] != "[" and treeseq[t] != ";":
t = t + 1
del pile[-1]
if treeseq[t] == ";":
t = len(treeseq)
elif treeseq[t] == ";":
t = len(treeseq)
elif treeseq[t]==",":
t = t + 1
elif treeseq[t]==" ":
t = t + 1
else: # nom d'une feuille
#print "nom_de_feuille"
id_node = id_node + 1
tree[id_node] = {}
tree[id_node][1] = -1
tree[id_node][2] = []
tree[id_node][3] = 0
tree[id_node][4] = ""
tree[id_node][5] = ""
tree[id_node][6] = ""
tree[id_node][7] = bppnumber
bppnumber = bppnumber + 1
if len(pile)>0:
tree[id_node][1]=pile[-1]
pile.append(id_node)
debut = t
while (treeseq[t]!="," and
treeseq[t]!=")" and
treeseq[t]!=":" and
treeseq[t]!=";" and
treeseq[t]!="\n" and
treeseq[t] != "["):
t=t+1
nom = treeseq[debut:t].strip()
tree[pile[-1]][0] = nom
#~ print nom
if treeseq[t]==":":
debut = t + 1
while treeseq[t]!="," and treeseq[t]!=")" and treeseq[t] != "[" and treeseq[t] != ";":
t = t + 1
longueur = float(treeseq[debut:t])
tree[id_node][3] = longueur
#print "fin nom"
if treeseq[t] == "[":
debut = t + 1
t = debut + treeseq[debut:].find("]")
chaine = treeseq[debut:t]
#print chaine
mots = chaine.split(":")
for m in mots:
if m[:2] == "S=":
tree[pile[-1]][5] = m[2:]
if m[:3] == "ND=":
tree[pile[-1]]["ND"] = m[3:]
t = t + 1
if treeseq[t]==":":
debut = t + 1
while treeseq[t]!="," and treeseq[t]!=")" and treeseq[t] != "[" and treeseq[t] != ";":
t = t + 1
longueur = float(treeseq[debut:t])
tree[id_node][3] = longueur
del pile[-1]
#print tree
# remplissage des enfants
nodes = list(getNodes(tree))
for node in nodes:
if not isRoot(tree,node):
pere = getParent(tree,node)
addChild(tree,pere,node)
return tree
|
StarcoderdataPython
|
3227398
|
<filename>custom_components/tion/fan.py
"""
Fan controls for Tion breezers
"""
from __future__ import annotations
import logging
from datetime import timedelta
from functools import cached_property
from typing import Any
from homeassistant.components.climate.const import PRESET_BOOST, PRESET_NONE
from homeassistant.components.fan import FanEntityDescription, FanEntity, SUPPORT_SET_SPEED, SUPPORT_PRESET_MODE, \
DIRECTION_FORWARD
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import TionInstance
from .climate import TionClimateEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
config = FanEntityDescription(
key="fan_speed",
entity_category=EntityCategory.CONFIG,
name="fan speed",
entity_registry_enabled_default=True,
icon="mdi:fan",
)
async def async_setup_entry(hass: HomeAssistant, _config: ConfigEntry, async_add_entities):
"""Set up the sensor entry"""
async_add_entities([TionFan(config, hass.data[DOMAIN][_config.unique_id])])
return True
class TionFan(FanEntity, CoordinatorEntity):
_attr_supported_features = SUPPORT_PRESET_MODE | SUPPORT_SET_SPEED
_attr_oscillating = False
_attr_preset_modes = [PRESET_NONE, PRESET_BOOST]
_attr_speed_count = len(TionClimateEntity.attr_fan_modes())
_attr_current_direction = DIRECTION_FORWARD
_mode_percent_mapping = {
0: 0,
1: 17,
2: 33,
3: 50,
4: 67,
5: 83,
6: 100,
}
_percent_mode_mapping = {
0: 0,
16: 1,
33: 2,
50: 3,
66: 4,
83: 5,
100: 6,
}
# Home Assistant is using float speed step and ceil to determinate supported speed percents.
def set_preset_mode(self, preset_mode: str) -> None:
pass
def set_direction(self, direction: str) -> None:
raise NotImplemented
def turn_on(self, percentage: int | None = None, preset_mode: str | None = None, **kwargs) -> None:
raise NotImplemented
def oscillate(self, oscillating: bool) -> None:
raise NotImplemented
def turn_off(self, **kwargs: Any) -> None:
pass
def set_percentage(self, percentage: int) -> None:
raise NotImplemented
def __init__(self, description: FanEntityDescription, instance: TionInstance):
"""Initialize the fan."""
CoordinatorEntity.__init__(self=self, coordinator=instance, )
self.entity_description = description
self._attr_name = f"{instance.name} {description.name}"
self._attr_device_info = instance.device_info
self._attr_unique_id = f"{instance.unique_id}-{description.key}"
self._saved_fan_mode = None
_LOGGER.debug(f"Init of fan {self.name} ({instance.unique_id})")
_LOGGER.debug(f"Speed step is {self.percentage_step}")
def percent2mode(self, percentage: int) -> int:
result = 0
try:
return self._percent_mode_mapping[percentage]
except KeyError:
_LOGGER.warning(f"Could not to convert {percentage} to mode with {self._percent_mode_mapping}. "
f"Will use fall back method.")
for i in range(len(TionClimateEntity.attr_fan_modes())):
if percentage < self.percentage_step * i:
break
else:
result = i
else:
result = 6
return result
def mode2percent(self) -> int | None:
return self._mode_percent_mapping[self.fan_mode] if self.fan_mode is not None else None
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
await self.coordinator.set(fan_speed=self.percent2mode(percentage), is_on=percentage > 0)
@cached_property
def boost_fan_mode(self) -> int:
return max(TionClimateEntity.attr_fan_modes())
@property
def fan_mode(self):
return self.coordinator.data.get(self.entity_description.key)
async def async_set_preset_mode(self, preset_mode: str) -> None:
if preset_mode == PRESET_BOOST and self.preset_mode != PRESET_BOOST:
if self._saved_fan_mode is None:
self._saved_fan_mode = int(self.fan_mode)
await self.coordinator.set(fan_speed=self.boost_fan_mode)
if preset_mode == PRESET_NONE and self.preset_mode == PRESET_BOOST:
if self._saved_fan_mode is not None:
await self.coordinator.set(fan_speed=self._saved_fan_mode)
self._saved_fan_mode = None
self._attr_preset_mode = preset_mode
async def async_turn_on(self, percentage: int | None = None, preset_mode: str | None = None, **kwargs, ) -> None:
target_speed = 2 if self._saved_fan_mode is None else self._saved_fan_mode
self._saved_fan_mode = None
await self.coordinator.set(fan_speed=target_speed, is_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
if self._saved_fan_mode is None and self.fan_mode > 0:
self._saved_fan_mode = self.fan_mode
await self.coordinator.set(is_on=False)
def _handle_coordinator_update(self) -> None:
self._attr_assumed_state = False if self.coordinator.last_update_success else True
self._attr_is_on = self.coordinator.data.get("is_on")
self._attr_percentage = self.mode2percent() if self._attr_is_on else 0 # should check attr to avoid deadlock
self.async_write_ha_state()
|
StarcoderdataPython
|
5069088
|
<reponame>aminhp93/learning_python
from django.db import models
from django.urls import reverse
# Create your models here.
class Tag(models.Model):
tag = models.SlugField(unique=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.tag
def get_absolute_url(self):
return reverse("tags:related", kwargs={"slug": self.tag})
|
StarcoderdataPython
|
9759306
|
<filename>misinformation/extractors/extract_article.py
import datetime
from contextlib import suppress
import re
from ReadabiliPy.readabilipy import simple_json_from_html_string
from .extract_element import extract_element
from .extract_datetime import extract_datetime_string
def xpath_extract_spec(xpath_expression, match_rule="single", warn_if_missing=True):
extract_spec = {
"select_method": "xpath",
"select_expression": xpath_expression,
"match_rule": match_rule,
"warn_if_missing": warn_if_missing
}
return extract_spec
def extract_article(response, config, db_entry=None, content_digests=False, node_indexes=False):
# Initialise an article dictionary
article = {
"site_name": config["site_name"],
"article_url": response.url,
"title": None,
"byline": None,
"publication_datetime": None,
"content": None,
"plain_content": None,
"plain_text": None,
"metadata": None,
}
# Include data from the db entry if available
if db_entry:
article["crawl_id"] = db_entry.crawl_id
article["crawl_datetime"] = db_entry.crawl_datetime.replace(tzinfo=datetime.timezone.utc).isoformat()
# Set default article fields by running readability on full page HTML
page_html = extract_element(response, xpath_extract_spec("/html", "largest"))
# Always extract the article elements from the page_html with ReadabiliPy first
default_readability_article = simple_json_from_html_string(page_html, content_digests, node_indexes, use_readability=False)
article['title'] = default_readability_article['title']
article["publication_datetime"] = default_readability_article["date"]
article["byline"] = default_readability_article["byline"]
article["content"] = default_readability_article["content"]
article["plain_content"] = default_readability_article["plain_content"]
article["plain_text"] = default_readability_article["plain_text"]
# Next overwite with site config versions, if they exist
if "article" in config:
# Attempt to extract article HTML, using a blank entry if nothing can be extracted
article_html = extract_element(response, config["article"]["content"])
if not article_html:
article_html = ""
readabilipy_article = simple_json_from_html_string(article_html, content_digests, node_indexes, use_readability=False)
article["content"] = readabilipy_article["content"]
article["plain_content"] = readabilipy_article["plain_content"]
article["plain_text"] = readabilipy_article["plain_text"]
# Check whether we extracted an empty article and reject if so
if article["content"] == "<div></div>":
article["content"] = None
article["plain_content"] = None
article["plain_text"] = None
# Try to extract other data if the article has identified content
if "content" in article and article["content"]:
# Extract title if in config
with suppress(KeyError):
article["title"] = extract_element(response, config["article"]["title"], postprocessing_fn=simplify_extracted_title)
# Extract byline
with suppress(KeyError):
article["byline"] = extract_element(response, config["article"]["byline"], postprocessing_fn=simplify_extracted_byline)
# Extract publication_datetime
with suppress(KeyError):
datetime_string = extract_element(response, config["article"]["publication_datetime"])
iso_string = None
if "datetime_formats" in config["article"]["publication_datetime"]:
datetime_formats = config["article"]["publication_datetime"]['datetime_formats']
# Only one format should match, so we just use the first one in the list that does
for dt_format in datetime_formats:
iso_string = extract_datetime_string(datetime_string, dt_format)
if iso_string:
break
else:
iso_string = extract_datetime_string(datetime_string)
article["publication_datetime"] = iso_string
# Extract additional article metadata
if "metadata" in config:
# Initialise metadata field
metadata = dict()
# Attempt to extract all metadata fields
for fieldname in config["metadata"]:
metadata[fieldname] = extract_element(response, config["metadata"][fieldname])
article["metadata"] = metadata
return article
def simplify_extracted_byline(bylines):
"""Simplify all bylines in list by removing attribution words, rejecting bylines without authors and removing
anything bracketed at the end of the byline or after a forward slash or vertical bar (usually a site name)"""
def simplify_single_byline(byline):
remove_from_start = ["by ", "By ", "and "]
remove_from_end = [","]
no_author_here = ["and", "By", ","]
remove_after = ["/", "(", "|"]
# Remove these from start of the byline string if present
for start_string in remove_from_start:
if byline.startswith(start_string):
byline = byline.replace(start_string, "")
# Remove these from end of byline string if present
for end_string in remove_from_end:
if byline.endswith(end_string):
byline = byline.replace(end_string, "")
# Remove any part of the byline string following a termination marker
for remove_string in remove_after:
byline = byline.split(remove_string)[0]
# Replace any whitespace with a single space
byline = re.sub(r"\s+", ' ', byline)
# Remove leading and trailing whitespace
byline = byline.strip()
# Ignore any byline string that does not contain an author
if byline in no_author_here:
return None
return byline
# Simplify each byline in the list and create a new list, removing all None
bylines = list(filter(None, map(simplify_single_byline, bylines)))
# Remove duplicated authors
return list(dict.fromkeys(bylines))
def simplify_extracted_title(titles):
"""Simplify titles by removing anything after a vertical bar (usually a site name)"""
def simplify_single_title(title):
remove_after = ["|"] # Add to this list if needed
for remove_string in remove_after:
title = title.split(remove_string)[0]
return title.strip()
return list(map(simplify_single_title, titles))
|
StarcoderdataPython
|
1805323
|
"""
Arguments for configuration
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import argparse
import io
import sys
import random
import numpy as np
import os
import paddle
import paddle.fluid as fluid
def str2bool(v):
"""
String to Boolean
"""
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
"""
Argument Class
"""
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, **kwargs):
"""
Add argument
"""
type = str2bool if type == bool else type
self._group.add_argument(
"--" + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
"""
Print Arguments
"""
print('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def init_checkpoint(exe, init_checkpoint_path, main_program):
"""
Init CheckPoint
"""
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
try:
checkpoint_path = os.path.join(init_checkpoint_path, "checkpoint")
fluid.load(main_program, checkpoint_path, exe)
except:
fluid.load(main_program, init_checkpoint_path, exe)
print("Load model from {}".format(init_checkpoint_path))
def data_reader(file_path, word_dict, num_examples, phrase, epoch, max_seq_len):
"""
Convert word sequence into slot
"""
unk_id = len(word_dict)
pad_id = 0
all_data = []
with io.open(file_path, "r", encoding='utf8') as fin:
for line in fin:
if line.startswith('text_a'):
continue
cols = line.strip().split("\t")
if len(cols) != 2:
sys.stderr.write("[NOTICE] Error Format Line!")
continue
label = int(cols[1])
wids = [word_dict[x] if x in word_dict else unk_id
for x in cols[0].split(" ")]
seq_len = len(wids)
if seq_len < max_seq_len:
for i in range(max_seq_len - seq_len):
wids.append(pad_id)
else:
wids = wids[:max_seq_len]
seq_len = max_seq_len
all_data.append((wids, label, seq_len))
if phrase == "train":
random.shuffle(all_data)
num_examples[phrase] = len(all_data)
def reader():
"""
Reader Function
"""
for epoch_index in range(epoch):
for doc, label, seq_len in all_data:
yield doc, label, seq_len
return reader
def load_vocab(file_path):
"""
load the given vocabulary
"""
vocab = {}
with io.open(file_path, 'r', encoding='utf8') as f:
wid = 0
for line in f:
if line.strip() not in vocab:
vocab[line.strip()] = wid
wid += 1
vocab["<unk>"] = len(vocab)
return vocab
def init_pretraining_params(exe,
pretraining_params_path,
main_program,
use_fp16=False):
"""load params of pretrained model, NOT including moment, learning_rate"""
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
fluid.load(main_program, pretraining_params_path, exe)
print("Load pretraining parameters from {}.".format(
pretraining_params_path))
|
StarcoderdataPython
|
1728031
|
first.loc['US':, ['tail_num', 'origin', 'dest']]
|
StarcoderdataPython
|
148804
|
<filename>cli/minitrino/cli.py
#!usr/bin/env/python3
# -*- coding: utf-8 -*-
import os
import click
from minitrino import settings
from minitrino import components
from pathlib import Path
CONTEXT_SETTINGS = {"auto_envvar_prefix": "MINITRINO"}
pass_environment = click.make_pass_decorator(components.Environment, ensure=True)
class CommandLineInterface(click.MultiCommand):
def list_commands(self, ctx):
cmd_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "cmd"))
retval = []
for filename in os.listdir(cmd_dir):
if filename.endswith(".py") and filename.startswith("cmd_"):
retval.append(filename[4:-3])
retval.sort()
return retval
def get_command(self, ctx, name):
try:
mod = __import__(f"minitrino.cmd.cmd_{name}", None, None, ["cli"])
except ImportError:
return
return mod.cli
@click.command(cls=CommandLineInterface, context_settings=CONTEXT_SETTINGS)
@click.option(
"-v",
"--verbose",
is_flag=True,
default=False,
help=("""Enable verbose output."""),
)
@click.option(
"-e",
"--env",
default=[],
type=str,
multiple=True,
help=(
"""Add or override environment variables.
Environment variables are sourced from the Minitrino library's root
'minitrino.env' file as well as the user config file in
'~/.minitrino/minitrino.cfg'. Variables supplied by this option will
override values from either of those sources. The variables will also be
passed to the environment of the shell executing commands during the
'provision' command."""
),
)
@pass_environment
def cli(ctx, verbose, env):
"""Welcome to the Minitrino command line interface.
To report issues and ask questions, please file a GitHub issue and apply a
descriptive label at the GitHub repository:
https://github.com/jefflester/minitrino
"""
ctx._user_init(verbose, env)
|
StarcoderdataPython
|
11338676
|
<gh_stars>1-10
import random
import json
class TextGenerator(object):
"""
Chainに基づいて文章を生成するクラスです。
Attributes
----------
chain : list
マルコフ連鎖に用いるチェーンが格納された配列。
"""
def __init__(self, chain_json_filepath):
"""
初期化メソッド
Parameters
----------
chain_json_filepath : str
チェーンデータが書かれている JSONファイルのパス。
"""
self.chain = self.get_chain_data(chain_json_filepath)
def get_chain_data(self, filepath):
"""
チェーン情報を JSONファイルから取得します。
Returns
-------
chain : list
チェーンが格納された配列。
"""
chain = []
with open(filepath, 'r') as f:
for raw in json.load(f):
chain.append({'prefix1': raw[0], 'prefix2': raw[1], 'suffix': raw[2], 'freq': raw[3]})
return chain
def generate(self):
"""
ランダムに一文を生成します。
"""
morphemes = []
first_triplet = self.get_first_triplet()
morphemes.append(first_triplet['prefix2'])
morphemes.append(first_triplet['suffix'])
while morphemes[-1] != "EOS":
prefix1 = morphemes[-2]
prefix2 = morphemes[-1]
triplet = self.get_triplet(prefix1, prefix2)
morphemes.append(triplet[2])
result = "".join(morphemes[:-1])
return result
def get_triplet(self, prefix1, prefix2):
"""
prefix1 と prefix2 から suffix をランダムに取得します。
Parameters
----------
prefix1 : str
1つ目のprefix
prefix2 : str
2つ目のprefix
"""
chain = []
for triplet_block in self.chain:
if triplet_block['prefix1'] == prefix1:
chain.append(triplet_block)
elif triplet_block['prefix2'] == prefix2:
chain.append(triplet_block)
triplet = self.get_probable_triplet(chain)
return (triplet["prefix1"], triplet["prefix2"], triplet["suffix"])
def get_first_triplet(self):
"""
文章のはじまりの3つ組をランダムに取得します。
Returns
-------
triplet : tuple
文章のはじまりの3つ組が格納されたタプル。
"""
chain = []
for triplet_block in self.chain:
if triplet_block['prefix1'] == "BOS":
chain.append(triplet_block)
triplet = self.get_probable_triplet(chain)
return triplet
def search_chain(self, *queries):
"""
引数から指定された条件に合致するものをチェーンから取得します。
Parameters
----------
queries : tuple or list
チェーンの検索条件。
Returns
-------
result : list
取得したチェーン情報の配列。
"""
def get_probable_triplet(self, triplet_blocks):
"""
引数として渡された三つ組の配列の中から確率的に一つ選び返します。
Parameters
----------
triplet_blocks : list
複数の "3つ組とそれに関する情報" が格納された配列。
Returns
-------
triplet : dict
確率的に選んだ 3つ組
"""
probability = []
for (index, triplet_block) in enumerate(triplet_blocks):
for i in range(triplet_block['freq']):
probability.append(index)
triplet_block_index = random.choice(probability)
return triplet_blocks[triplet_block_index]
|
StarcoderdataPython
|
1837333
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2016 Online SAS and Contributors. All Rights Reserved.
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licensed under the BSD 2-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at https://opensource.org/licenses/BSD-2-Clause
from . import API
REGIONS = {
'par1': {
'url': 'https://api-fr-par.scaleway.com/instance/v1/zones/fr-par-1/',
},
'ams1': {
'url': 'https://api-nl-ams.scaleway.com/instance/v1/zones/nl-ams-1/',
},
'fr-par-1': {
'url': 'https://api-fr-par.scaleway.com/instance/v1/zones/fr-par-1/',
},
'fr-par-2': {
'url': 'https://api-fr-par.scaleway.com/instance/v1/zones/fr-par-2/',
},
'nl-ams-1': {
'url': 'https://api-nl-ams.scaleway.com/instance/v1/zones/nl-ams-1/',
},
'pl-waw-1': {
'url': 'https://api-pl-waw.scaleway.com/instance/v1/zones/pl-waw-1/',
}
}
class ComputeAPI(API):
""" The default region is par1 as it was the first availability zone
provided by Scaleway, but it could change in the future.
"""
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
base_url = kwargs.pop('base_url', None)
assert region is None or base_url is None, \
"Specify either region or base_url, not both."
if base_url is None:
region = region or 'par1'
assert region in REGIONS, \
"'%s' is not a valid Scaleway region." % region
base_url = REGIONS.get(region)['url']
super(ComputeAPI, self).__init__(base_url=base_url, **kwargs)
|
StarcoderdataPython
|
12817774
|
<gh_stars>0
#!/usr/bin/env python
from builtins import classmethod
import pgzero
from pgzero.screen import Screen
from pgzero.loaders import ImageLoader, SoundLoader
from pgzero.keyboard import Keyboard
from pgzero.constants import mouse
from pgzero.clock import clock
from pgzero.actor import Actor
from pgzero.rect import Rect, ZRect
import pygame
import pygame.time
from pgzero.ptext import getfont
#from pgzero.animation import Animation
from pgzero.game import PGZeroGame
from pgzero import clock, music, tone
pgzero.ptext.DEFAULT_FONT_NAME = 'tomorrow-regular'
print("Strar")
WIDTH = 800
HEIGHT = 600
GAP = 10
import pgzrun
'''
if pgzrun.__name__ == 'qwe':
pgzero = module()
clock = module()
music = module()
tone = module()
Actor = type()
keyboard = Keyboard()
animate = function()
Rect = type()
ZRect = type()
images = ImageLoader()
sounds = SoundLoader()
mouse = EnumMeta()
keys = EnumMeta()
keymods = EnumMeta()
exit = function()
pgzrun = module()
#draw = function()
screen = Screen()
'''
class Scene:
MAX_TIME = 3.0
bottles = []
time_left = MAX_TIME
def tick(self, dt):
self.time_left -= dt
class GameSession:
scene = Scene()
def tick(self, dt):
self.scene.tick(dt)
if self.scene.time_left < 0:
self.scene = Scene()
GS = GameSession()
def update(dt):
GS.tick(dt)
MX = 0
MY = 0
def on_mouse_move(pos, rel, buttons):
global MX, MY
MX, MY = pos
def draw():
#screen = pgzero.game.screen
global screen
assert isinstance(screen, Screen)
font = getfont(fontname="tomorrow-regular", fontsize=32)
screen.clear()
screen.draw.text("Hello World", pos=(100, 100), fontname="tomorrow-regular", fontsize=32)
screen.draw.text("DEFAULT_FONT_NAME", pos=(300, 100))
screen.draw.text(str(GS.scene.time_left), pos=(30, 10))
screen.draw.circle((400, 300), 30, 'white')
# draw background
# draw bottles
bottles=[
(100, 100), (200, 100),(300, 100),
(100, 200), (200, 200),(300, 200),
(100, 300), (200, 300),(300, 300),
]
bottle_width=60
bottle_height=90
for x, y in bottles:
screen.draw.rect(Rect(x, y, bottle_width, bottle_height), 'brown')
# draw bullets/stones/lasso/tomahawks/effects
# draw smoke
# draw timer
bar_width = 100
tl = bar_width / GS.scene.MAX_TIME * GS.scene.time_left
screen.draw.filled_rect(Rect(WIDTH - bar_width - GAP, HEIGHT - 20 - GAP,
tl, 20), 'red')
# draw crosshair
screen.draw.circle((MX, MY), 20, 'white')
# draw FPS
# ???
#i=0
#for i in globals():
# print (i, '=', type(globals()[i]).__name__, '()')
'''
#import gamelib
if False:
class Mod:
@staticmethod
def draw():
global draw
draw()
DISPLAY_FLAGS = 0
m=Mod()
pgzero.loaders.set_root('.')
PGZeroGame.show_default_icon()
pygame.display.set_mode((100, 100), DISPLAY_FLAGS)
from pgzero.builtins import *
from pgzero.game import screen
#m.__dict__.update(builtins.__dict__)
PGZeroGame(m).run()
if __name__ == '__main__':
#gamelib.run()
pass
'''
pgzrun.go()
|
StarcoderdataPython
|
8146874
|
#!/usr/bin/env python2.7
import tweepy
import time
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
from unidecode import unidecode
from Adafruit_Thermal import *
printer = Adafruit_Thermal("/dev/ttyAMA0", 9600, timeout=5)
consumer_key = "YOUR KEY HERE"
consumer_secret = "YOUR SECRET HERE"
access_key = "YOUR KEY HERE"
access_secret = "YOUR SECRET HERE"
#auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
#auth.set_access_token(access_key, access_secret)
#api = tweepy.API(auth)
#new_tweets = tweepy.Cursor(api.search, q='tweetstorm').items(10)
#for tweet in new_tweets:
# printer.print(tweet.text)
#printer.invertOn()
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
tweeted = unidecode(json.loads(data)['text'])
#printer.inverseOn()
#printer.setSize('M')
#printer.setLineHeight(50)
printer.feed(2)
printer.println(tweeted)
print(tweeted)
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
stream = Stream(auth, l)
stream.filter(track=['STRING TO TRACK 1', 'STRING TO TRACK 2'])
|
StarcoderdataPython
|
6644543
|
# Given a mixed array of number and string representations of integers,
# add up the string integers and subtract this from the total of the non-string integers.
# def div_con(x):
# X = []
# Y = []
# sumX = 0
# sumY = 0
# for num in x:
# if type(num) == int:
# X.append(num)
# sumX = sum(X)
# else:
# MyInt = int(num)
# Y.append(MyInt)
# sumY = sum(Y)
# return sumX - sumY
def div_con(x):
return sum(n if isinstance(n, int) else -int(n) for n in x)
if __name__ == '__main__':
print(div_con([9, 3, '7', '3']))
|
StarcoderdataPython
|
1707178
|
print(())
print((1,))
print((1,2,3))
print(tuple())
print(tuple((1,)))
print(tuple((1,2,3)))
print(tuple([1,2,3]))
|
StarcoderdataPython
|
6503468
|
<gh_stars>1-10
"""
For Django-Rest-Framework Serialization
http://www.django-rest-framework.org/api-guide/serializers/
Serializers allow complex data (e.g. querysets and model instances)
to be converted to native Python datatypes that can be easily rendered
into JSON, XML or other types. Serializers also provide deserialization,
allowing parsed data to be converted back into complex types,
after first validating the incoming data.
For testing, we have jobs here
http://localhost:8000/api/job/?format=json
"""
from .models import Job
from rest_framework import serializers
# Serializers define the API representation
class JobSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Job
fields = ('company', 'title', 'description', 'status',
'salary_min', 'salary_max', 'location')
|
StarcoderdataPython
|
1607803
|
# SPDX-License-Identifier: Apache-2.0
import json
import os
from ..case.test_case import TestCase
from typing import List, Text, Optional
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(os.path.dirname(__file__))),
'data')
def load_model_tests(
data_dir: Text = DATA_DIR,
kind: Optional[Text] = None,
) -> List[TestCase]:
'''Load model test cases from on-disk data files.
'''
supported_kinds = os.listdir(data_dir)
if kind not in supported_kinds:
raise ValueError("kind must be one of {}".format(supported_kinds))
testcases = []
kind_dir = os.path.join(data_dir, kind)
for test_name in os.listdir(kind_dir):
case_dir = os.path.join(kind_dir, test_name)
# skip the non-dir files, such as generated __init__.py.
rtol = 1e-3
atol = 1e-7
if not os.path.isdir(case_dir):
continue
if os.path.exists(os.path.join(case_dir, 'model.onnx')):
url = None
model_name = test_name[len('test_')]
model_dir: Optional[Text] = case_dir
else:
with open(os.path.join(case_dir, 'data.json')) as f:
data = json.load(f)
url = data['url']
model_name = data['model_name']
rtol = data.get('rtol', 1e-3)
atol = data.get('atol', 1e-7)
model_dir = None
testcases.append(
TestCase(
name=test_name,
url=url,
model_name=model_name,
model_dir=model_dir,
model=None,
data_sets=None,
kind=kind,
rtol=rtol,
atol=atol,
))
return testcases
|
StarcoderdataPython
|
252834
|
class VerifierError(Exception):
pass
class VerifierTranslatorError(Exception):
pass
__all__ = ["VerifierError", "VerifierTranslatorError"]
|
StarcoderdataPython
|
9602674
|
<gh_stars>1-10
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import graphics
from graphics.utils.utils_perspective import lookatnp, perspectiveprojectionnp
from graphics.utils.utils_sphericalcoord import get_spherical_coords_x
from graphics.render.base import Render as Dib_Renderer
import os
import sys
import math
import torch
import numpy as np
import tqdm
import imageio
# from PIL import Image
import kaolin as kal
from kaolin.rep import TriangleMesh
sys.path.append(str(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../DIB-R')))
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, 'data')
output_directory = os.path.join(data_dir, 'results')
output_directory_dib = os.path.join(output_directory, 'dib')
os.makedirs(output_directory_dib, exist_ok=True)
def main():
filename_input = os.path.join(data_dir, 'banana.obj')
filename_output = os.path.join(output_directory, 'example1.gif')
###########################
# camera settings
###########################
camera_distance = 2
elevation = 30
###########################
# load object
###########################
mesh = TriangleMesh.from_obj(filename_input)
vertices = mesh.vertices
faces = mesh.faces.int()
face_textures = (faces).clone()
vertices = vertices[None, :, :].cuda()
faces = faces[None, :, :].cuda()
face_textures[None, :, :].cuda()
###########################
# normalize verts
###########################
vertices_max = vertices.max()
vertices_min = vertices.min()
vertices_middle = (vertices_max + vertices_min) / 2.
vertices = vertices - vertices_middle
coef = 5
vertices = vertices * coef
###########################
# DIB-Renderer
###########################
renderer = Dib_Renderer(256, 256, mode='VertexColor')
textures = torch.ones(1, vertices.shape[1], 3).cuda()
loop = tqdm.tqdm(list(range(0, 360, 4)))
loop.set_description('Drawing Dib_Renderer VertexColor')
writer = imageio.get_writer(os.path.join(output_directory_dib, 'rotation_VertexColor.gif'), mode='I')
for azimuth in loop:
renderer.set_look_at_parameters([90 - azimuth], [elevation], [camera_distance])
predictions, _, _ = renderer.forward(points=[vertices, faces[0].long()], colors=[textures])
image = predictions.detach().cpu().numpy()[0]
writer.append_data((image * 255).astype(np.uint8))
writer.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6570847
|
<gh_stars>10-100
#
# The MIT License (MIT)
#
# This file is part of RLScore
#
# Copyright (c) 2008 - 2016 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from scipy import sparse as sp
from rlscore.utilities import array_tools
class PolynomialKernel(object):
"""Polynomial kernel.
k(xi,xj) = (gamma * <xi, xj> + coef0)**degree
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_bvectors, n_features]
Basis vectors
gamma : float, optional (default 1.0)
Kernel parameter
coef0 : float, optional (default 0.)
Kernel parameter
degree : int, optional (default 2)
Kernel parameter
Attributes
----------
X : {array-like, sparse matrix}, shape = [n_bvectors, n_features]
Basis vectors
gamma : float
Kernel parameter
coef0 : float
Kernel parameter
degree : int
Kernel parameter
"""
def __init__(self, X, degree=2, gamma=1.0, coef0=0):
X = array_tools.as_2d_array(X, True)
self.train_X = X
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
def getKM(self, X):
"""Returns the kernel matrix between the basis vectors and X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
K : array, shape = [n_samples, n_bvectors]
kernel matrix
"""
X = array_tools.as_2d_array(X, True)
test_X = X
degree, coef0, gamma = self.degree, self.coef0, self.gamma
if sp.issparse(test_X):
test_X = array_tools.spmat_resize(test_X, self.train_X.shape[1])
else:
test_X = array_tools.as_dense_matrix(test_X)
train_X = self.train_X
K = array_tools.as_array(train_X * test_X.T)
K *= gamma
K += coef0
K = K ** degree
return K.T
|
StarcoderdataPython
|
6466283
|
class Apple(object):
def JustAMethod(self, abc):
self.unused = abc
self.x = abc
self.y = abc
return self
def SetX(a, x):
a.unused = 0
a.x = x
def SetY(a, y):
a.y = y
def Sum(a):
return a.x + a.y
a = Apple()
SetX(a, 20)
SetY(a, 3)
assert 23 == Sum(a)
print Sum(a)
|
StarcoderdataPython
|
8081998
|
from eth_wallet.cli.eth_wallet_cli import(
eth_wallet_cli,
)
from click.testing import(
CliRunner,
)
def call_eth_wallet(fnc=None, parameters=None, envs=None):
"""
Creates testing environment for cli application
:param fnc: command to run
:param parameters: program cmd argument
:param envs:
:return: invoked cli runner
"""
fnc = fnc or eth_wallet_cli
runner = CliRunner()
envs = envs or {}
parameters = parameters or []
# catch exceptions enables debugger
return runner.invoke(fnc, args=parameters, env=envs, catch_exceptions=False)
|
StarcoderdataPython
|
8042579
|
<reponame>ttung/starfish
import os
import starfish
from starfish.image import ApplyTransform, Filter, LearnTransform, Segmentation
from starfish.spots import SpotFinder, TargetAssignment
from starfish.types import Axes
test = os.getenv("TESTING") is not None
def iss_pipeline(fov, codebook):
primary_image = fov.get_image(starfish.FieldOfView.PRIMARY_IMAGES)
# register the raw image
learn_translation = LearnTransform.Translation(reference_stack=fov.get_image('dots'),
axes=Axes.ROUND, upsampling=100)
transforms_list = learn_translation.run(primary_image.max_proj(Axes.CH, Axes.ZPLANE))
warp = ApplyTransform.Warp()
registered = warp.run(primary_image, transforms_list=transforms_list, in_place=False, verbose=True)
# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filtered = filt.run(registered, verbose=True, in_place=False)
# detect spots using laplacian of gaussians approach
p = SpotFinder.BlobDetector(
min_sigma=1,
max_sigma=10,
num_sigma=30,
threshold=0.01,
measurement_type='mean',
)
intensities = p.run(
filtered,
blobs_image=fov.get_image('dots'),
blobs_axes=(Axes.ROUND, Axes.ZPLANE))
# decode the pixel traces using the codebook
decoded = codebook.decode_per_round_max(intensities)
# segment cells
seg = Segmentation.Watershed(
nuclei_threshold=.16,
input_threshold=.22,
min_distance=57,
)
label_image = seg.run(primary_image, fov.get_image('dots'))
# assign spots to cells
ta = TargetAssignment.Label()
assigned = ta.run(label_image, decoded)
return assigned, label_image
# process all the fields of view, not just one
def process_experiment(experiment: starfish.Experiment):
decoded_intensities = {}
regions = {}
for i, (name_, fov) in enumerate(experiment.items()):
decoded, segmentation_results = iss_pipeline(fov, experiment.codebook)
decoded_intensities[name_] = decoded
regions[name_] = segmentation_results
if test and i == 1:
# only run through 2 fovs for the test
break
return decoded_intensities, regions
# run the script
if test:
# TODO: (ttung) Pending a fix for https://github.com/spacetx/starfish/issues/700, it's not
# possible to validate the schema for this experiment.
with starfish.config.environ(VALIDATION_STRICT="false"):
exp = starfish.Experiment.from_json(
"https://d2nhj9g34unfro.cloudfront.net/browse/formatted/20180926/iss_breast/experiment.json")
else:
exp = starfish.Experiment.from_json("iss/formatted/experiment.json")
decoded_intensities, regions = process_experiment(exp)
|
StarcoderdataPython
|
6693543
|
<gh_stars>1-10
import torch
from .jacobian import jacobian_backward, jacobian_norm, jacobian
import numpy as np
import pytest
def test_jacobian_backward():
"""Test the jacobian backprop function for a linear system
y = A x
For a linear system the gradient of Frobenious norm of the jacobian should
be exactly equal to the original matrix.
del_A ||y_x||^2/2 = del_A || A ||^2 / 2 = A I = A
"""
a = torch.rand(10, 10, requires_grad=True)
x = torch.rand(10, requires_grad=True)
y = a.matmul(x)
jacobian_backward(y, x)
A = a.grad.numpy()
np.testing.assert_allclose(A, a.detach().numpy())
def test_jacobian_norm():
a = torch.rand(10, 10, requires_grad=True)
x = torch.rand(10, requires_grad=True)
y = a.matmul(x)
out = jacobian_norm(y, x)
expected = a.norm()**2 / 2
assert out.item() == pytest.approx(expected.item())
# test gradient (see test_jacobian_backward docstring)
out.backward()
actual = a.grad.numpy()
expected = a.data.numpy()
np.testing.assert_allclose(actual, expected)
def test_jacobian():
a = torch.rand(10, 10, requires_grad=True)
x = torch.rand(10, requires_grad=True)
y = a.matmul(x)
out = jacobian(y, x)
actual = out.data.numpy()
expected = a.data.numpy()
np.testing.assert_allclose(actual, expected)
|
StarcoderdataPython
|
3364846
|
'''
Functional tests for cassandra timeseries
'''
import time
import datetime
import os
import cql
from . import helpers
from .helpers import unittest, os, Timeseries
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraApiTest(helpers.ApiHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraApiTest,self).setUp()
def test_url_parse(self):
assert_equals( 'CassandraSeries',
Timeseries( 'cql://localhost', type='series' ).__class__.__name__ )
# Not running gregorian tests because they run in the "far future" where long
# TTLs are not supported.
#@<EMAIL>Unless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
#class CassandraGregorianTest(helpers.GregorianHelper):
#def setUp(self):
#self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
#super(CassandraGregorianTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraSeriesTest(helpers.SeriesHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraSeriesTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraHistogramTest(helpers.HistogramHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraHistogramTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraCountTest(helpers.CountHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraCountTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraGaugeTest(helpers.GaugeHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraGaugeTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraSetTest(helpers.SetHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraSetTest,self).setUp()
|
StarcoderdataPython
|
367992
|
import os
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.nn as nn
import argparse
import models
import math
parser = argparse.ArgumentParser(description='sample.py')
parser.add_argument('-init', default='The meaning of life is ',
help="""Initial text """)
parser.add_argument('-load_model', default='',
help="""Model filename to load""")
parser.add_argument('-seq_length', type=int, default=50,
help="""Maximum sequence length""")
parser.add_argument('-temperature', type=float, default=0.4,
help="""Temperature for sampling.""")
parser.add_argument('-neuron', type=int, default=0,
help="""Neuron to read.""")
parser.add_argument('-overwrite', type=float, default=0,
help="""Value used to overwrite the neuron. 0 means don't overwrite.""")
parser.add_argument('-layer', type=int, default=-1,
help="""Layer to read. -1 = last layer""")
# GPU
parser.add_argument('-cuda', action='store_true',
help="""Use CUDA""")
opt = parser.parse_args()
def batchify(data, bsz):
tokens = len(data.encode())
ids = torch.LongTensor(tokens)
token = 0
for char in data.encode():
ids[token] = char
token += 1
nbatch = ids.size(0) // bsz
ids = ids.narrow(0, 0, nbatch * bsz)
ids = ids.view(bsz, -1).t().contiguous()
return ids
def color(p):
p = math.tanh(3*p)*.5+.5
q = 1.-p*1.3
r = 1.-abs(0.5-p)*1.3+.3*q
p=1.3*p-.3
i = int(p*255)
j = int(q*255)
k = int(r*255)
if j<0:
j=0
if k<0:
k=0
if k >255:
k=255
if i<0:
i = 0
return ('\033[38;2;%d;%d;%dm' % (j, k, i)).encode()
batch_size = 1
checkpoint = torch.load(opt.load_model)
embed = checkpoint['embed']
rnn = checkpoint['rnn']
loss_fn = nn.CrossEntropyLoss()
text = batchify(opt.init, batch_size)
def make_cuda(state):
if isinstance(state, tuple):
return (state[0].cuda(), state[1].cuda())
else:
return state.cuda()
batch = Variable(text)
states = rnn.state0(batch_size)
if isinstance(states, tuple):
hidden, cell = states
else:
hidden = states
last = hidden.size(0)-1
if opt.layer <= last and opt.layer >= 0:
last = opt.layer
if opt.cuda:
batch =batch.cuda()
states = make_cuda(states)
embed.cuda()
rnn.cuda()
loss_avg = 0
loss = 0
gen = bytearray()
for t in range(text.size(0)):
emb = embed(batch[t])
ni = (batch[t]).data[0]
states, output = rnn(emb, states)
if isinstance(states, tuple):
hidden, cell = states
else:
hidden = states
feat = hidden.data[last,0,opt.neuron]
if ni< 128:
col = color(feat)
gen+=(col)
gen.append(ni)
print(opt.init)
if opt.temperature == 0:
topv, topi = output.data.topk(1)
ni = topi[0][0]
gen.append(ni)
inp = Variable(topi[0], volatile=True)
if opt.cuda:
inp = inp.cuda()
for t in range(opt.seq_length):
emb = embed(inp)
states, output = rnn(emb, states)
topv, topi = output.data.topk(1)
ni = topi[0][0]
gen.append(ni)
inp = Variable(topi[0])
if opt.cuda:
inp = inp.cuda()
else:
probs = F.softmax(output[0].squeeze().div(opt.temperature)).data.cpu()
ni = torch.multinomial(probs,1)[0]
feat = hidden.data[last,0,opt.neuron]
if ni < 128:
col = color(feat)
gen+=(col)
gen.append(ni)
inp = Variable(torch.LongTensor([ni]), volatile=True)
if opt.cuda:
inp = inp.cuda()
for t in range(opt.seq_length):
emb = embed(inp)
states, output = rnn(emb, states)
if isinstance(states, tuple):
hidden, cell = states
else:
hidden = states
feat = hidden.data[last,0,opt.neuron]
if isinstance(output, list):
output =output[0]
probs = F.softmax(output.squeeze().div(opt.temperature)).data.cpu()
ni = torch.multinomial(probs,1)[0]
if ni< 128:
col = color(feat)
gen+=(col)
gen.append(ni)
inp = Variable(torch.LongTensor([ni]))
if opt.cuda:
inp = inp.cuda()
if opt.overwrite != 0:
hidden.data[last,0,opt.neuron] = opt.overwrite
gen+=('\033[0m').encode()
print(gen.decode("utf-8",errors = 'ignore' ))
|
StarcoderdataPython
|
228948
|
import meshed as ms
import pytest
@pytest.fixture
def simple_graph():
return dict(a='c', b='cd', c='abd', e='')
def test_edge_reversed_graph(simple_graph):
g = simple_graph
assert ms.makers.edge_reversed_graph(g) == {
'c': ['a', 'b'],
'd': ['b', 'c'],
'a': ['c'],
'b': ['c'],
'e': [],
}
reverse_g_with_sets = ms.makers.edge_reversed_graph(g, set, set.add)
assert reverse_g_with_sets == {
'c': {'a', 'b'},
'd': {'b', 'c'},
'a': {'c'},
'b': {'c'},
'e': set([]),
}
assert ms.makers.edge_reversed_graph(dict(e='', a='e')) == {
'e': ['a'],
'a': [],
}
assert ms.makers.edge_reversed_graph(dict(a='e', e='')) == {
'e': ['a'],
'a': [],
}
|
StarcoderdataPython
|
3563567
|
#Definition of the Workload: https://dumps.wikimedia.org/other/pagecounts-raw/
#
import locale
locale.getdefaultlocale()
from datetime import datetime, date, time
import pandas as pd
import calendar
class RequestSummary:
def __init__(self, project, titlePage, numberRequests, sizeContentBytes, year, month, day, hour):
self.project = project
self.titlePage = titlePage
self.totalNumberRequests = numberRequests
self.totalSizeContentBytes = sizeContentBytes
self.occurrences = pd.DataFrame({'date':[self.getDateTimeStamp(year, month, day, hour)], 'requests':[numberRequests]})
def getProject(self):
return self.project
def getTitlePage(self):
return self.titlePage
def getNumberRequests(self):
return self.totalNumberRequests
def getSizeContentBytes(self):
return self.totalSizeContentBytes
def getOccurrences(self):
return self.occurrences
def getDateTimeStamp(self, year, month, day, hour):
d = date(year, month, day)
t = time(hour, 00)
d = datetime.combine(d, t)
return calendar.timegm(d.timetuple())
def getStringTime(self, timestamp):
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def addOccurence(self, titlePage, numberRequests, sizeContentBytes, year, month, day, hour):
workloadOccurrence = self.occurrences.loc[self.occurrences['date'] == self.getDateTimeStamp(year, month, day, hour)]
if workloadOccurrence.empty:
workloadOccurrence = pd.DataFrame([[self.getDateTimeStamp(year, month, day, hour), numberRequests]], columns=['date', 'requests'])
self.occurrences = self.occurrences.append(workloadOccurrence, ignore_index=True)
else:
index = self.occurrences[self.occurrences['date'] ==
self.getDateTimeStamp(year, month, day, hour)].index.tolist()[0]
self.occurrences.set_value(index, 'requests', numberRequests + workloadOccurrence['requests'])
def groupWorkloadPerDay(self, df):
dfCopy = df.copy(deep=True);
dfCopy['date'] = pd.to_datetime(dfCopy['date'],unit='s')
groupedFrame = dfCopy.groupby(pd.DatetimeIndex(dfCopy['date']).normalize()).sum()
groupedFrame.columns = ['date']
return groupedFrame
def groupWorkloadPerMonth(self, df):
dfCopy = df.copy(deep=True);
dfCopy['date'] = pd.to_datetime(dfCopy['date'],unit='s')
groupedFrame = dfCopy.groupby(pd.DatetimeIndex(dfCopy['date']).normalize().month).sum()
groupedFrame.columns = ['date']
return groupedFrame
def sortOccurrencesPerTimeStamp(self, wO):
dfCopy = wO.copy(deep=True);
return dfCopy.sort_index(by=['date'], ascending=True)
def sortOccurrencePerNumberOfRequests(self, wO):
dfCopy = wO.copy(deep=True);
return dfCopy.sort_index(by=['requests'], ascending=True)
def __str__(self):
return self.project + ' ' + self.titlePage + ' ' + str(self.totalNumberRequests) + ' ' + str(self.totalSizeContentBytes)
|
StarcoderdataPython
|
5109635
|
import os
import sys
import unittest
import django
def runtests():
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
setup_file = sys.modules['__main__'].__file__
setup_dir = os.path.abspath(os.path.dirname(setup_file))
return unittest.defaultTestLoader.discover(setup_dir)
if __name__ == '__main__':
runtests()
|
StarcoderdataPython
|
9778591
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset_metadata.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_transform.saved import saved_transform_io
import unittest
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import lookup_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def _create_test_saved_model():
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.placeholder(tf.float32, shape=[1])
output = (input_float - 2.0) / 5.0
inputs = {'x': input_float}
outputs = {'x_scaled': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
return export_path
class SavedTransformIOTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._test_saved_model = _create_test_saved_model()
def test_apply_saved_transform(self):
with tf.Graph().as_default() as graph:
with tf.Session().as_default() as session:
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
result_tensor = transformed_features['x_scaled']
self.assertTrue(isinstance(result_tensor, tf.Tensor))
self.assertAllEqual(session.run(result_tensor), [247.0])
self.assertEqual(graph.get_tensor_by_name('Const:0'), input_floats)
self.assertEqual(
graph.get_tensor_by_name('transform/truediv:0'),
result_tensor)
def test_apply_transform_extra_features_no_passthrough(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_floats = tf.constant([1234.0]) # tf.float32
input_features = {'x': input_floats,
'extra_1': tf.constant('1'),
'extra_2': tf.constant('2')}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_transform_type_mismatch(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_strings = tf.constant(['bogus']) # tf.string
input_features = {'x': input_strings}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_transform_shape_mismatch(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_floats = tf.constant(1234.0) # tf.float32
input_features = {'x': input_floats}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_saved_transform_to_tensor_inside_scope(self):
with tf.Graph().as_default():
with tf.name_scope('my_scope'):
with tf.Session().as_default() as session:
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
result_tensor = transformed_features['x_scaled']
self.assertAllEqual(session.run(result_tensor), [247.0])
def test_apply_saved_transform_to_tensor_outside_scope(self):
with tf.Graph().as_default():
input_floats = tf.constant([1237.0]) # tf.float32
with tf.name_scope('my_scope'):
with tf.Session().as_default() as session:
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
result_tensor = transformed_features['x_scaled']
self.assertAllEqual(session.run(result_tensor), [247.0])
def test_dense_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.placeholder(tf.float32)
# show that unrelated & unmapped placeholders do not interfere
tf.placeholder(tf.int64)
output = input_float / 5.0
inputs = {'input': input_float}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
# Using a computed input gives confidence that the graphs are fused.
input_float = tf.constant(25.0) * 2
inputs = {'input': input_float}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
result = session.run(outputs['output'])
# (25 * 2) / 5 = 10
self.assertEqual(10.0, result)
def test_table_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_string = tf.placeholder(tf.string)
# Map string through a table, in this case based on a constant tensor.
table = lookup_ops.index_table_from_tensor(
tf.constant(['cat', 'dog', 'giraffe']))
output = table.lookup(input_string)
inputs = {'input': input_string}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
# Using a computed input gives confidence that the graphs are fused.
input_string = tf.constant('dog')
inputs = {'input': input_string}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
session.run(tf.tables_initializer())
result = session.run(outputs['output'])
self.assertEqual(1, result)
def test_sparse_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.sparse_placeholder(tf.float32)
output = input_float / 5.0
inputs = {'input': input_float}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
input_sparse = tf.SparseTensor(
indices=indices, values=values, dense_shape=shape)
# Using a computed input gives confidence that the graphs are fused
inputs = {'input': input_sparse * 10}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
output_sparse = outputs['output']
self.assertTrue(isinstance(output_sparse, tf.SparseTensor))
result = session.run(output_sparse)
# indices and shape unchanged; values divided by 2
self.assertEqual(indices.tolist(), result.indices.tolist())
self.assertEqual([2.0, 4.0], result.values.tolist())
self.assertEqual(shape.tolist(), result.dense_shape.tolist())
def test_stale_asset_collections_are_cleaned(self):
vocabulary_file = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes('asset'))
file_io.write_string_to_file(vocabulary_file, 'foo bar baz')
export_path = os.path.join(tempfile.mkdtemp(), 'export')
# create a SavedModel including assets
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_string = tf.placeholder(tf.string)
# Map string through a table loaded from an asset file
table = lookup_ops.index_table_from_file(
vocabulary_file, num_oov_buckets=12, default_value=12)
output = table.lookup(input_string)
inputs = {'input': input_string}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
# Load it and save it again repeatedly, verifying that the asset collections
# remain valid.
for _ in [1, 2, 3]:
with tf.Graph().as_default() as g:
with tf.Session().as_default() as session:
input_string = tf.constant('dog')
inputs = {'input': input_string}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
self.assertEqual(
1, len(g.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
self.assertEqual(
0, len(g.get_collection(tf.saved_model.constants.ASSETS_KEY)))
# Check that every ASSET_FILEPATHS refers to a Tensor in the graph.
# If not, get_tensor_by_name() raises KeyError.
for asset_path in g.get_collection(ops.GraphKeys.ASSET_FILEPATHS):
tensor_name = asset_path.name
g.get_tensor_by_name(tensor_name)
export_path = os.path.join(tempfile.mkdtemp(), 'export')
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4890741
|
from flask import Flask
from flask import render_template
from flask import url_for
from flask import request
from flask import redirect
from flask import session
from flask_bootstrap import Bootstrap
import sqlite3 as sql
app = Flask(__name__)
# Creates the secret key, should be more secure in production.
app.secret_key = 'secretkey'
# static routs, some not used
@app.route("/login")
def login_page():
return render_template("login.html")
@app.route("/createaccount")
def createaccount_page():
return render_template("createaccount.html")
@app.route("/main")
def main_page():
return render_template("main.html")
@app.route("/about")
def about_page():
return render_template("about.html")
# Creates the database. Delete old database and then uncomment the following script and then run to create
# the database. Cretes the users and transactions tables.
# def create_database():
# conn = sql.connect("bank.db")
# conn.execute("CREATE TABLE users (userid INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT, password TEXT, fname TEXT, lname TEXT, phonenumber INTEGER, email TEXT, balance INT)")
# conn.execute("CREATE TABLE transactions (transactionid INTEGER PRIMARY KEY AUTOINCREMENT, userid INTEGER, type TEXT, amount INTEGER, CONSTRAINT fk_users FOREIGN KEY (userid) REFERENCES users(userid))")
# conn.close()
# create_database()
# Code for the test page. Simply displays all of the information from the database on this page for both tables.
@app.route("/testpage")
def list_data():
con = sql.connect("bank.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("SELECT * FROM users")
rows = cur.fetchall()
con.commit()
con = sql.connect("bank.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("SELECT * FROM transactions")
rowstransactions = cur.fetchall()
rows2 = cur.fetchall()
return render_template("testpage.html", rows = rows, rows2 = rowstransactions)
# Used in createaccount.html form. Extracts information from form and inserts it into the database.
# Then sends back to login page if completed succesfully.
@app.route("/addrec", methods=["POST"])
def addrec():
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
fname = request.form["firstname"]
lname = request.form["lastname"]
phone = request.form["phonenumber"]
email = request.form["email"]
with sql.connect("bank.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO users (username, password, fname, lname, phonenumber, email, balance) VALUES (?, ?, ?, ?, ?, ?, 0)"
, [username, password, fname, lname, phone, email])
con.commit()
return redirect(url_for('login_page'))
# Takes information from login.html. Gets the form.values and inserts them as session variables. If not done
# this way, the app will crash when navigating away from the main page. AFter storing values, the page redirects
# to create main method.
@app.route('/form_login', methods=['POST', 'GET'])
def login():
session['loginusername'] = request.form['username']
session['loginpassword'] = request.form['password']
return redirect(url_for('createmain'))
#Populates the main.html file with information.
@app.route('/newmain')
def createmain():
#Takes values from sessions and makes them local variables
loginusername = session['loginusername']
loginpassword = session['loginpassword']
# Gets all of the inforamtion for a user where username = loginusername. Stores it in testquery variable.
con = sql.connect("bank.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("SELECT * FROM users WHERE username=? AND password=?", [loginusername, loginpassword])
testquery = cur.fetchone()
con.commit()
#Used to get the userid. Data originates as a tuple, this makes it into a single text variable.
con = sql.connect("bank.db")
con.row_factory = sql.Row
cur = con.cursor()
query = cur.execute("SELECT userid FROM users WHERE username = ? ", [loginusername])
# checks if row[0] exists first. if not, makes user id = 0. this is so that the application does not crash.
row = cur.fetchone()
if row:
session['userid'] = row[0]
else:
session['userid'] = 0
# Gets all of the transactions when equal to session stored user id.
con = sql.connect("bank.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("SELECT * FROM transactions WHERE userid = ?", [session['userid']])
rowstransactions2 = cur.fetchall()
rows3 = cur.fetchall()
con.commit()
# if testquery finds a match of username and password, displays main.html with populated data. Else, returns to login page
if testquery:
return render_template('main.html', row = testquery, rows3 = rowstransactions2)
else:
return redirect(url_for('login_page'))
# Adds a transaction to the transactions table from the main.html form
@app.route('/addtransaction', methods=['POST', 'GET'])
def addtransaction():
#receives information from the form, determines what radio button was picked
formamount = request.form['amount']
button = request.form['flexRadioDefault']
transactiontype = "Deposit"
if button == 'Deposit':
transactiontype = 'Deposit'
else:
transactiontype = 'Withdrawal'
# inserts into table, and then updates the balance for the user id depending on choice of deposit or withdrawal
with sql.connect("bank.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO transactions (userid, type, amount) VALUES (?, ?, ?)" , [session['userid'], transactiontype, formamount])
if transactiontype == 'Deposit':
cur.execute("UPDATE users SET balance = (balance + ?) WHERE username = ?", [formamount, session['loginusername']])
elif transactiontype == 'Withdrawal':
cur.execute("UPDATE users SET balance = (balance - ?) WHERE username = ?", [formamount, session['loginusername']])
con.commit()
return redirect(url_for('createmain'))
if __name__ == '__main__':
app.run(debug=True)
|
StarcoderdataPython
|
3272565
|
import logging
from test.rules.inspect.utils import cache_nlp, dep_list, if_inside
from test.rules.utils.load_dataset import load_dataset
from src.utils.spacy import get_spacy
logger = logging.getLogger(__name__)
if __name__ == "__main__":
data = list(load_dataset("pretrained_data/task_core_aux_cond/all.jsonl"))
nlp = get_spacy()
aux: str = "Aux"
for check in dep_list:
total, match = 0, 0
for i, sent, anno in data:
sent_processed = cache_nlp(nlp, sent)
if if_inside(sent_processed, check):
total += 1
for start, end, lab in anno:
if lab == aux:
match += 1
break
if total > 0 and match / total > 0.4:
logger.error(
f"{check} - Match {match} out of {total} with {match / total if total else 0}"
)
else:
logger.warning(
f"{check} - Match {match} out of {total} with {match / total if total else 0}"
)
# agent - Match 41 out of 55 with 0.7454545454545455
# oprd - Match 5 out of 9 with 0.5555555555555556
# relcl - Match 130 out of 187 with 0.6951871657754011
|
StarcoderdataPython
|
5146231
|
import numpy as np
import pandas as pd
from tqdm import tqdm as tqdm
import torch
from core.data.utils import AdversarialDatasetWithPerturbation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_orthogonal_vector(r):
"""
Returns a random unit vector orthogonal to given unit vector r.
"""
r = r / torch.norm(r.view(-1), p=2)
p = torch.rand(r.numel()).to(device)
p = p - p.dot(r.view(-1))*r.view(-1)
p = p / torch.norm(p, p=2)
p = p.view(r.shape)
assert np.isclose(torch.dot(p.view(-1), r.view(-1)).item(), 0, atol=1e-6) == True, 'p and r are not orthogonal.'
return p
def line_search(model, x, r, y, precision=0.1, ord=2, max_alpha=35, normalize_r=True, clip_min=0., clip_max=1., ortho=False):
"""
Perform line search to find margin.
"""
x, r = x.unsqueeze(0), r.unsqueeze(0)
pert_preds = model(torch.clamp(x+r, 0, 1))
if normalize_r:
r = r / r.view(-1).norm(p=ord)
if ortho:
r = get_orthogonal_vector(r)
orig_preds = model(x)
orig_labels = orig_preds.argmax(dim=1)
pert_x = replicate_input(x)
for a in range(0, max_alpha + 1): # fast search
pert_labels = model(pert_x).argmax(dim=1)
if pert_labels != orig_labels:
break
pert_x = x + a*r
alpha = a
pert_x = replicate_input(x)
if alpha != max_alpha: # fine-tune search with given precision
for a in np.arange(alpha - 1, alpha + precision, precision):
pert_labels = model(pert_x).argmax(dim=1)
if pert_labels != orig_labels:
break
pert_x = x + a*r
margin = a
else:
margin = max_alpha
pert_labels = pert_preds.argmax(dim=1)
return {'mar': margin, 'true': y, 'orig_pred': orig_labels.item(), 'pert_pred': pert_labels.item()}
def measure_margin(trainer, data_path, precision, ord=2, ortho=False, verbose=False):
"""
Estimate margin using line search.
"""
if ord not in [2, np.inf]:
raise NotImplementedError('Only ord=2 and ord=inf have been implemented!')
trainer.model.eval()
mar_adv_any = []
dataset = AdversarialDatasetWithPerturbation(data_path)
for x, r, y in tqdm(dataset, disable=not verbose):
x, r = x.to(device), r.to(device)
mar_any = line_search(trainer.model, x, r, y, ord=ord, precision=precision, ortho=ortho)
mar_adv_any.append(mar_any)
assert len(mar_adv_any) == len(dataset), 'Lengths must match'
mar_adv_any = pd.DataFrame(mar_adv_any)
mar10, mar50, mar90 = np.percentile(mar_adv_any['mar'], [10, 50, 90])
out_margin = {'mean_margin': np.mean(mar_adv_any['mar']), '10_margin': mar10, '50_margin': mar50, '90_margin': mar90}
return mar_adv_any, out_margin
|
StarcoderdataPython
|
3426109
|
<reponame>jpypi/dup-image-search
#!/usr/bin/python
"""
find_all_image_duplicates.py
Given an input of hashes and corresponding filenames, generates 2 files:
exact_duplicates.txt - contains the listing of image filenames that are
duplicates and what they are duplicates of
corrupt_images.txt - contains a list of images that were corrupt in
some way
:author: <NAME>
:license: MIT
"""
import sys
import argparse
import filecmp
from PIL import Image
def is_valid_image(filepath):
try:
image = Image.open(filepath)
image.verify()
except IndexError:
return False
except IOError:
return False
return True
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="file containing hashes")
args = parser.parse_args()
duplicates_file = open("exact_duplicates.txt", "w")
corruption_file = open("corrupt_images.txt", "w")
line_counter = 0
duplicate_counter = 0
corruption_counter = 0
hash_collisions = 0
with open(args.filename, "r") as f:
last_hash = None
identical_hash_filenames = []
line = f.readline()
while line:
line_arr = line.strip().split()
hash = line_arr[0]
image_filename = " ".join(line_arr[1:])
if(hash == last_hash):
found = False
for file in identical_hash_filenames:
if(filecmp.cmp(image_filename, file)):
duplicates_file.write(
"{0},{1}\n".format(image_filename, file))
duplicate_counter += 1
found = True
break
if(not found):
if(is_valid_image(image_filename)):
identical_hash_filenames.append(image_filename)
hash_collisions += 1
else:
corruption_file.write(
"{0}\n".format(image_filename))
corruption_counter += 1
else:
if(is_valid_image(image_filename)):
identical_hash_filenames = [image_filename]
else:
identical_hash_filenames = []
corruption_file.write(
"{0}\n".format(image_filename))
corruption_counter += 1
last_hash = hash
line_counter += 1
if(line_counter % 50000 == 0):
print "Update: scanned {0!s} files.".format(line_counter)
line = f.readline()
print "Scanned {0!s} files.".format(line_counter)
print "Total exact duplicates: {0!s}.".format(duplicate_counter)
print "Total corrupt files: {0!s}.".format(corruption_counter)
print "Hash collisions: {0!s}.".format(hash_collisions)
print "See {0} and {1} for more details.".format(
"exact_duplicates.txt", "corrupt_images.txt")
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
387976
|
import numpy as np
from os import listdir
from os.path import isfile, join, dirname
from scipy.io import loadmat
meta_clsloc_file = join(dirname(__file__), "data", "meta_clsloc.mat")
synsets = loadmat(meta_clsloc_file)["synsets"][0]
synsets_imagenet_sorted = sorted([(int(s[0]), str(s[1][0])) for s in synsets[:1000]],
key=lambda v:v[1])
corr = {}
for j in range(1000):
corr[synsets_imagenet_sorted[j][0]] = j
corr_inv = {}
for j in range(1,1001):
corr_inv[corr[j]] = j
def depthfirstsearch(id_, out=None):
if out is None:
out = []
if isinstance(id_, int):
pass
else:
id_ = next(int(s[0]) for s in synsets if s[1][0] == id_)
out.append(id_)
children = synsets[id_-1][5][0]
for c in children:
depthfirstsearch(int(c), out)
return out
def synset_to_dfs_ids(synset):
ids = [x for x in depthfirstsearch(synset) if x <= 1000]
ids = [corr[x] for x in ids]
return ids
def synset_to_id(synset):
a = next((i for (i,s) in synsets if s == synset), None)
return a
def id_to_synset(id_):
return str(synsets[corr_inv[id_]-1][1][0])
def id_to_words(id_):
return synsets[corr_inv[id_]-1][2][0]
def pprint_output(out, n_max_synsets=10):
best_ids = out.argsort()[::-1][:10]
for u in best_ids:
print("%.2f"% round(100*out[u],2)+" : "+id_to_words(u))
|
StarcoderdataPython
|
196323
|
import requests
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.oauth2_provider.client import (
OAuth2Client,
OAuth2Error,
)
from .provider import UntappdProvider
class UntappdOAuth2Client(OAuth2Client):
"""
Custom client because Untappd:
* uses redirect_url instead of redirect_uri
* nests access_token inside an extra 'response' object
"""
def get_access_token(self, code):
data = {
"client_id": self.consumer_key,
"redirect_url": self.callback_url,
"grant_type": "authorization_code",
"response_type": "code",
"client_secret": self.consumer_secret,
"code": code,
}
params = None
self._strip_empty_keys(data)
url = self.access_token_url
if self.access_token_method == "GET":
params = data
data = None
# Allow custom User Agent to comply with Untappd API
settings = app_settings.PROVIDERS.get(UntappdProvider.id, {})
headers = {"User-Agent": settings.get("USER_AGENT", "django-allauth")}
# TODO: Proper exception handling
resp = requests.request(
self.access_token_method,
url,
params=params,
data=data,
headers=headers,
)
access_token = None
if resp.status_code == 200:
access_token = resp.json()["response"]
if not access_token or "access_token" not in access_token:
raise OAuth2Error("Error retrieving access token: %s" % resp.content)
return access_token
|
StarcoderdataPython
|
384925
|
<gh_stars>0
"""
Python pickle and jsonpickle demo object serialization
Serialize an object, save it to a file and use it later on the state
that it was pickled.
AUTHOR
<NAME>
DATE
17/01/2020
# refactor1 to generate a two teams game and play inning by inning
# refactor2 keep the score
# refactor3 allow to make changes to the lineup with the bench
# refactor4 allow mercy rule
# refactor5 display the score once is available
# TODO: ramdomly generate a rain delay game if it is before the
# 5th inning, then save the game using pickling and then load it
# back and continue
"""
from csv import DictReader
from random import choice
# import pickle
# import jsonpickle
class Player():
"""
A class used to represent a baseball player
Attributes
----------
name : str
name of the player
position : str
field position of the player
plays : list
plays the player has made during a game
Methods
-------
make_a_play(new_play)
append a player's new play to a list
get_plays()
return a list of the players plays during a game
"""
def __init__(self, name, position):
"""
initialize the player's name, position and create an
empty list of plays
"""
self.name = name
self.position = position
self.plays = []
def __repr__(self):
return f"Player class: name={self.name}, position={self.position}"
# getters and setters the Python way
@property
def name(self):
"""
return the player's name @property
"""
return self.__name
@name.setter
def name(self, new_name):
"""
set the player's name property
"""
if new_name == "":
new_name = "Unknowm Player"
self.__name = new_name
@property
def position(self):
"""
return the player's fielding position abbreaviation @property
"""
return self.__position
@position.setter
def position(self, new_position):
"""
set the player's name property, if blank set to Utility player
"""
if new_position == "":
new_position = "Utility"
self.__position = new_position
def make_a_play(self, new_play):
"""
create a new game play for the player
"""
self.plays.append(new_play)
def get_plays(self):
"""
return the list of plays of the player in a game
"""
return self.plays
class Team():
"""
A class used to represent a baseball team
Attributes
----------
name : str
name of the team
players : list
list of players in the lineup
bench : list
list of the players sitting on the bench
at_bat : int
current player at bat (1 - 9)
inning : int
current inning
outs : int
current number of outs
hit : tuple
abbreviations that represent a non-out play
out : tuple
abbreviations that represent an out play
plays : tuple
all possible plays
score : list
score for each inning
Methods
-------
name(self)
return name of the Team
lineup(self, new_player)
create a new lineup position for player, if the
lineup is completed with 9 players, add the player
to the bench
game(self)
generate a 9 inning game
scoring_sheet(self)
display the generated game scoring sheets
"""
hit = {"H": 1, "2H": 2, "3H": 3, "HR": 4}
safe = {"BB": 1, "IBB": 1, "HBP": 1}
out = ("KS", "Kl", "1-3", "2-3", "U3", "4-3",
"5-3", "6-3", "F1", "F2", "F3", "F4",
"F5", "F6", "F7", "F8", "F9", "FO"
)
def __init__(self, name):
self.name = name
self.players = []
self.bench = []
self.at_bat = 1
self.inning = 1
self.outs = 0
self.plays = list(self.hit.keys()) + \
list(self.safe.keys()) + \
list(self.out)
self.score = []
self.bases = []
def __repr__(self):
return f"Team class: name={self.name}"
# getters and setters the Python way
@property
def name(self):
"""
Getter of name
"""
return self.__name
@name.setter
def name(self, new_name):
if new_name == "":
new_name = "Unknowm Club"
self.__name = new_name
def lineup(self, new_player):
"""
Add a new player to the lineup. If the lineup is
filled with the 9 batters, add the player to the
bench
"""
if isinstance(new_player, Player):
if len(self.players) < 9:
self.players.append(new_player)
else:
self.bench.append(new_player)
def game(self):
"""
generate a team's half game
to be refactored to have two teams cometing with each other
"""
# refactor: keep the score
# refactor1 to generate a two teams game and play inning by inning
# refactor3 allow to make changes to the lineup with the bench
# refactor4 allow mercy rule
def clear_score():
self.score = [0] * 10 # index 0 is left untouched
def empty_bases():
# players on base 0: HP, 1: 1B, 2: 2B, 3: 3B
# if bases length > 4, players have been pushed by a play
self.bases = empty_base * 4
def play_bases(player):
self.bases[0] = player # player is at bat on home plate
play = choice(self.plays) # and makes a random play
player.make_a_play(play) # persist the play within the player
if play in self.out:
self.outs += 1
if self.outs == 3:
self.outs = 0
self.inning += 1
empty_bases()
else:
# get how many the player advances by concatenating
# the hits and safe plays and getting the value
how_many_bases = {**self.hit, **self.safe}[play]
self.bases = (empty_base * how_many_bases) + [player] + \
self.bases[1:]
bases_pushed = self.bases[4:] # any length > 4 is a play push
# sum of pushed bases with players on them
runs_scored = sum([1 for player in bases_pushed
if isinstance(player, Player)])
self.score[self.inning] += runs_scored
self.bases = self.bases[:4]
# the game starts here!
empty_base = [None]
clear_score()
empty_bases()
if len(self.players) == 9: # enough players?
while self.inning < 10: # games are 9 inning max
if self.at_bat > 9: # rotate the lineup
self.at_bat = 1
idx = self.at_bat - 1
play_bases(self.players[idx])
self.at_bat += 1
def scoring_sheet(self):
"""
Return the score of the Team in the game
"""
return self.score
def box_score(self):
"""
game's box score for one of the teams
"""
print(f"{self.name}")
print("\nBox Score")
print("---------")
idx = 0
while idx < len(self.players):
print(f"{idx + 1}. {self.players[idx].name}")
player_plays = self.players[idx].get_plays()
for play in player_plays:
print(f" {play}", end="")
print("")
idx += 1
# TODO: ramdomly generate a rain delay game if it is before the
# 5th inning, then save the game using pickling and then load it
# back and continue
def main():
"""
generate a baseball game
"""
brussels_kangaroos = Team("<NAME>")
# ----------------------
# to refactor all these:
# ----------------------
# get the players and build a lineup and bench
with open("players.csv") as file:
csv_reader = DictReader(file)
for player_row in csv_reader:
player = Player(player_row["name"], player_row["position"])
brussels_kangaroos.lineup(player)
brussels_kangaroos.game()
bru_score = brussels_kangaroos.scoring_sheet()
scoring_display = " " * 8
for inning in range(1, len(bru_score)):
scoring_display += " | " + str(inning)
if inning == 9:
scoring_display += " | \n"
scoring_display += "-" * 72
scoring_display += "\n"
scoring_display += brussels_kangaroos.name[:8]
idx = 0
while idx < len(bru_score):
if idx > 0:
scoring_display += " | " + str(bru_score[idx])
if idx == 9:
scoring_display += f" | {sum(bru_score)}\n"
idx += 1
#
hoboken_pioneers = Team("Hoboken Pioneers")
# get the players and build a lineup and bench
with open("players_hoboken.csv") as file:
csv_reader = DictReader(file)
for player_row in csv_reader:
player = Player(player_row["name"], player_row["position"])
hoboken_pioneers.lineup(player)
hoboken_pioneers.game()
hob_score = hoboken_pioneers.scoring_sheet()
scoring_display += "-" * 72
scoring_display += "\n"
idx = 0
scoring_display += hoboken_pioneers.name[:8]
while idx < len(hob_score):
if idx > 0:
scoring_display += " | " + str(hob_score[idx])
if idx == 9:
scoring_display += f" | {sum(hob_score)}\n"
idx += 1
print(scoring_display)
brussels_kangaroos.box_score()
hoboken_pioneers.box_score()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8029490
|
<filename>tests/demos/demo_URL_recognition.py
# # -*- coding:utf-8 -*-
# Author:wancong
# Date: 2018-04-30
from pyhanlp import *
def demo_URL_recognition(text):
""" 演示URL识别
>>> text = '''HanLP的项目地址是https://github.com/hankcs/HanLP,
... 发布地址是https://github.com/hankcs/HanLP/releases,
... 我有时候会在www.hankcs.com上面发布一些消息,
... 我的微博是http://weibo.com/hankcs/,会同步推送hankcs.com的新闻。
... 听说.中国域名开放申请了,但我并没有申请hankcs.中国,因为穷……
... '''
>>> demo_URL_recognition(text)
[HanLP/nx, 的/ude1, 项目/n, 地址/n, 是/vshi, https://github.com/hankcs/HanLP/xu, ,/w,
/w, 发布/v, 地址/n, 是/vshi, https://github.com/hankcs/HanLP/releases/xu, ,/w,
/w, 我/rr, 有时候/d, 会/v, 在/p, www.hankcs.com/xu, 上面/f, 发布/v, 一些/m, 消息/n, ,/w,
/w, 我/rr, 的/ude1, 微博/n, 是/vshi, http://weibo.com/hankcs//xu, ,/w, 会/v,
同步/vd, 推送/nz, hankcs.com/xu, 的/ude1, 新闻/n, 。/w,
/w, 听说/v, ./w, 中国/ns, 域名/n, 开放/v, 申请/v, 了/ule, ,/w, 但/c, 我/rr, 并/cc,
没有/v, 申请/v, hankcs.中国/xu, ,/w, 因为/c, 穷/a, ……/w,
/w]
https://github.com/hankcs/HanLP
https://github.com/hankcs/HanLP/releases
www.hankcs.com
http://weibo.com/hankcs/
hankcs.com
hankcs.中国
"""
Nature = JClass("com.hankcs.hanlp.corpus.tag.Nature")
Term = JClass("com.hankcs.hanlp.seg.common.Term")
URLTokenizer = JClass("com.hankcs.hanlp.tokenizer.URLTokenizer")
term_list = URLTokenizer.segment(text)
print(term_list)
for term in term_list:
if term.nature == Nature.xu:
print(term.word)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True, optionflags=doctest.NORMALIZE_WHITESPACE)
|
StarcoderdataPython
|
5139123
|
<reponame>jicewarwick/DingTalkMessageBot<filename>DingTalkMessageBot.py
import base64
import hashlib
import hmac
import json
import time
import urllib.parse
import requests
class DingTalkMessageBot(object):
msg_template = {
"msgtype": "text",
"text": {
"content": ""
}
}
header = {
'Content-Type': 'application/json',
'Charset': 'UTF-8'
}
def __init__(self, token: str, secret: str = None):
self.token = token
self.secret = secret
def _generate_url(self):
if self.secret:
timestamp = str(round(time.time() * 1000))
secret_enc = self.secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, self.secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
return f'https://oapi.dingtalk.com/robot/send?access_token={self.token}×tamp={timestamp}&sign={sign}'
else:
return f'https://oapi.dingtalk.com/robot/send?access_token={self.token}'
def send_message(self, msg: str):
info = self.msg_template.copy()
info['text']['content'] = msg
requests.post(self._generate_url(), json=info, headers=self.header)
@classmethod
def from_config(cls, json_loc: str, bot_name: str):
with open(json_loc, 'r', encoding='utf-8') as f:
config = json.load(f)
assert 'ding_bot' in config.keys(), 'config file must contain entry "ding_bot"'
assert bot_name in config['ding_bot'].keys(), f'config file does not contain {bot_name} in entry "ding_bot"'
assert 'token' in config['ding_bot'][bot_name].keys(), f"config does not provide {bot_name}'s token"
token = config['ding_bot'][bot_name]['token']
if 'secret' in config['ding_bot'][bot_name].keys():
secret = config['ding_bot'][bot_name]['secret']
else:
secret = None
return cls(token, secret)
|
StarcoderdataPython
|
209130
|
<gh_stars>1-10
"""Tests numerical inverse kinematics pipeline.
"""
import math
import unittest
import random
from colony_picker.inverse_kinematics import*
from tests.helper_functions_for_tests import*
from colony_picker.dh_params import AR3_DH_PARAMS, AR3_NUM_JOINTS
animation_test_warning = "Only one animation test should be run at a time."
class TestInverseKinematics(unittest.TestCase):
"""A unit test harness for testing the numerical inverse kinematics pipeline
"""
def test_get_t_mat(self):
"""Tests that we can accurately compute the position and orientation
of a single joint by comparing our solution to <NAME>'s solution
for the AR3 robot arm given a single theta and set of dh params for that
joint.
"""
single_joint_dh_param = AR3_DH_PARAMS[:, 0]
# Testing theta = 0 and thetas greater than 2*pi to confirm we can
# handle angles greater than a period.
test_0_solution = np.array([[1, 0, 0, 64.2], [0, 0, 1, 0], [
0, -1, 0, 169.77], [0, 0, 0, 1]])
self.assertTrue(
np.allclose(get_t_mat(0, single_joint_dh_param), test_0_solution))
self.assertTrue(
np.allclose(get_t_mat(math.pi*2, single_joint_dh_param),
test_0_solution))
self.assertTrue(
np.allclose(get_t_mat(math.pi*4, single_joint_dh_param),
test_0_solution))
# Testing theta = pi/2, confirming we can handle thetas other than zero
# and that we can handle thetas within a quadrant.
test_1_solution = np.array(
[[0, 0, -1, 0], [1, 0, 0, 64.2], [0, -1, 0, 169.77], [0, 0, 0, 1]])
self.assertTrue(
np.allclose(get_t_mat(math.pi/2, single_joint_dh_param),
test_1_solution))
# Testing theta = pi, confirming that we can handle the case when theta
# is half a period and generally confirming we can handle thetas other
# than zero.
test_2_solution = np.array(
[[-1, 0, 0, -64.2], [0, 0, -1, 0], [0, -1, 0, 169.77],
[0, 0, 0, 1]])
self.assertTrue(
np.allclose(get_t_mat(math.pi, single_joint_dh_param),
test_2_solution))
def test_get_t_mats(self):
"""Confirming that our forward kinematics pipeline results in the same
values as those <NAME> provided for the AR3 robot arm given
different sets of thetas.
"""
# Confirming we get the same result as <NAME> when all thetas = 0.
test_0_thetas = np.zeros(6)
test_0_solution = np.array(
[[0, 0, -1, 628.08], [0, -1, 0, 0], [-1, 0, 0, 169.77],
[0, 0, 0, 1]])
self.assertTrue(np.allclose(get_t_mats(
test_0_thetas, AR3_DH_PARAMS)[-1], test_0_solution))
# Confirming we get the same result as <NAME> when all thetas are
# not the same.
test_1_thetas = np.array(
[0, math.pi/4, math.pi/2, 3*math.pi/4, math.pi, 2*math.pi])
test_1_solution = np.array(
[[-0.5, 0.5, -0.7071067812, 148.0770064],
[0.7071067812, 0.7071067812, 0, 0],
[0.5, -0.5, -0.7071067812, -177.6881301], [0, 0, 0, 1]])
# Confirming we get the same result as <NAME> when most thetas are
# greater than a period.
self.assertTrue(np.allclose(get_t_mats(
test_1_thetas, AR3_DH_PARAMS)[-1], test_1_solution))
test_2_thetas = np.array([360, 405, 765, 450, 495, 540])*(math.pi/180)
test_2_solution = np.array([[0, -1, 0, 279.8675683],
[-0.7071067812, 0, 0.7071067812, -25.63262082],
[-0.7071067812, 0, -0.7071067812, -242.8949474],
[0, 0, 0, 1]])
self.assertTrue(np.allclose(get_t_mats(
test_2_thetas, AR3_DH_PARAMS)[-1], test_2_solution))
def test_wrap_joint_angles(self):
"""Confirming that the wrap function properly confines angles in all
quadrants that are greater than 360 degrees to a period of -180 -> 180
degrees.
"""
# Testing angles in different quadrants and in different representations
# (i.e. 367 and 7 which are the same angle, but both representations are
# tested).
test_cases = [(7, 7), (367, 7), (540, -180),
(-75, -75), (-360, 0), (-367, -7),
(-460, -100)]
for test_case in test_cases:
joint_angle, euler_angle = test_case
joint_angle_arr = np.array([joint_angle])
euler_angle_arr = np.array([euler_angle])
self.assertTrue(wrap_joint_angles(
joint_angle_arr, radians=False) == euler_angle_arr)
def test_find_joint_angles_random_pose(self):
"""Testing that any random pose can be solved for with smart seed,
regardless of the initial thetas fed into the optimization algorithm.
"""
iters = 100
title_str = "find joint angles random pose"
print_decorated_title(title_str)
thetas_init = np.zeros(AR3_NUM_JOINTS)
for iter in range(iters):
# Confirming that optimization solves always given any reasonable
# desired end effector pose.
test_joint_angles = np.array(
[random.uniform(-math.pi, math.pi) for i in range(AR3_NUM_JOINTS)])
target_pose = get_end_effector_pose(
test_joint_angles, AR3_DH_PARAMS)
solved_joint_angles, solved = find_joint_angles(
thetas_init, target_pose, AR3_DH_PARAMS, smart_seed=True)
self.assertTrue(solved)
# Confirming that optimization is solving for the correct thetas.
solved_pose = get_end_effector_pose(
solved_joint_angles, AR3_DH_PARAMS)
self.assertTrue(np.allclose(get_error_vector(
target_pose, solved_pose), np.zeros(4), atol=1e-02))
draw_loading_bar(iter, iters, title_str)
if PRINT_PROGRESS:
print()
def test_find_joint_angles_incremental_pose(self):
"""Testing that incrementally moving the end effector, by requiring the
movement of a single joint, rather than choosing random positions, and
seeding the optimization algorithm with the current thetas (instead of
using smart seed), always solves.
"""
iters = 100
title_str = "find joint angles incremental pose"
print_decorated_title(title_str)
thetas_init = np.zeros(AR3_NUM_JOINTS)
for iter in range(iters):
rand_idx = random.randint(0, AR3_NUM_JOINTS - 1)
rand_theta = random.uniform(-math.pi, math.pi)
test_joint_angles = thetas_init
# Choosing a set of thetas with only one theta value different
# than the seed.
test_joint_angles[rand_idx] = rand_theta
target_pose = get_end_effector_pose(
test_joint_angles, AR3_DH_PARAMS)
solved_joint_angles, solved = find_joint_angles(
thetas_init, target_pose, AR3_DH_PARAMS)
self.assertTrue(solved)
# Updating the seed to be the current thetas.
thetas_init = solved_joint_angles
draw_loading_bar(iter, iters, title_str)
if PRINT_PROGRESS:
print()
@ unittest.skip(animation_test_warning)
def test_animate_forward_kinematics(self):
"""Testing that each joint rotates around the previous joint's
rotational axis when changing the corresponding theta and that this
behavior is observed for every joint angle possible. Also testing that
the structure of the arm matches the kinematics diagram provided by the
manufacturer.
"""
animate_forward_kinematics(AR3_DH_PARAMS)
@ unittest.skip(animation_test_warning)
def test_animate_inverse_kinematics_sphere(self):
"""Tests that when attempting impossible poses, the inverse kinematics
pipeline does well at estimating the joint angles. Also tests that
inverse kinematics solves for a variety of reasonable end effector poses
that are both very close to the current pose and very far away.
Human input allows us to test desired poses that are reasonable without
needing to generate them with the forward kinematics pipeline first.
"""
animate_inverse_kinematics_sphere(AR3_DH_PARAMS)
@ unittest.skip(animation_test_warning)
def test_animate_inverse_kinematics_sliders(self):
"""Allows us to confirm that inverse kinematics results in the end
effector moving as expected allong the x, y, and z axes as well as
rotate in x, y, and z according to euler ZYX convention.
"""
animate_inverse_kinematics_sliders(AR3_DH_PARAMS)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
12845700
|
<filename>jit_compiling/test.py
import torch
from torch.utils.cpp_extension import load
norm = load(name="two_norm",
sources=["two_norm/two_norm_bind.cpp", "two_norm/two_norm_kernel.cu"],
verbose=True)
n,m = 8,3
a = torch.randn(n,m)
b = torch.randn(n,m)
c = torch.zeros(1)
print("a:\n",a)
print("\nb:\n",b)
a = a.cuda()
b = b.cuda()
c = c.cuda()
norm.two_norm(a,b,c,n,m)
torch.cuda.synchronize()
print("\nresult by two_norm:",c)
print("\nresult by torch.norm:",torch.norm(a-b))
|
StarcoderdataPython
|
5049265
|
#! /bin/env python
import sys, os
import yaml
# set up PYTHONPATH
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
libpath = path
sys.path.append(libpath)
import mcb
from mcb.config import Config
from mcb.runner import Runner
from mcb.frontends.cli import getCliRunner
config = mcb.config.Config()
data = yaml.load(open(path + '/tests/conf1.yaml'))
config.outputs = data['outputs']
SERVICES_TO_TEST=[
#'mcb.services.github.GithubService',
#'mcb.services.dropbo.DropboxService'
#'mcb.services.email.EmailImapService',
#'mcb.services.google.CalendarService',
'mcb.services.google.GmailService'
]
for name, conf in data['services'].items():
if name in SERVICES_TO_TEST:
config.services = {name: conf}
runner = getCliRunner(config)
runner.run()
#runner.saveConfig()
|
StarcoderdataPython
|
6417187
|
#!/usr/bin/env python
# encoding: utf-8
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
"""Base Configuration"""
SECRET_KEY = os.environ.get('SECRET_KEY') or '<PASSWORD>' # Modify your SECRET KEY 建议足够复杂
TITLE = 'PersonalResume' # 简历标题,例:马云的简历
SUB_TITLE = '好的东西往往都是很难描述的' # 简历子标题,一句话介绍自己,例:好的东西往往都是很难描述的。
READ_PASSWORD = '<PASSWORD>' # 简历浏览密码
ADMIN_PASSWORD = '<PASSWORD>' # 简历管理密码
BASE_DIR = basedir
UPLOAD_FOLDER = basedir
PDF_OPTIONS = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'no-outline': None
}
@classmethod
def init_app(cls, app):
pass
class ModifiedConfig(Config):
"""Modified Your Configuration"""
@classmethod
def init_app(cls, app):
Config.init_app(app)
config = {
'modified': ModifiedConfig,
'default': ModifiedConfig,
}
|
StarcoderdataPython
|
9638950
|
<gh_stars>0
# Clases base
class BaseCompute:
def __init__(self, driver):
self.driver = driver
def create_vm(self, name=None, image_id=None, size_id=None, subnet=None, add_public_ip=True):
"""
:param name: String with a name for this new node
:type name: ``str``
:param size_id: The size identifier of resources allocated to this node.
:type size_id: ``str``
:param image_id: OS image identifier to boot on node.
:type image_id: ``str``
:param subnet: The subnet for this new node.
:type subnet: ``Subnet``
:param add_public_ip: If must be created a public IP for this new node.
:type add_public_ip: ``boolean``
"""
raise NotImplementedError(
'create_vm not implemented for this driver')
def list_vms(self):
raise NotImplementedError(
'list_vms not implemented for this driver')
def list_images(self):
raise NotImplementedError(
'list_images not implemented for this driver')
def get_image(self, image_id):
raise NotImplementedError(
'get_image not implemented for this driver')
def list_sizes(self):
raise NotImplementedError(
'list_sizes not implemented for this driver')
def get_size(self, size_id):
raise NotImplementedError(
'get_size not implemented for this driver')
|
StarcoderdataPython
|
3554092
|
#!/usr/bin/env python
"""
Created by howie.hu at 30/03/2018.
"""
import time
from pprint import pprint
from talospider import Spider, Item, TextField, AttrField
from talospider.utils import get_random_user_agent
from owllook.database.mongodb import PyMongoDb, MotorBase
from owllook.utils.tools import async_callback
class HYNovelInfoItem(Item):
"""
定义继承自item的Item类
"""
novel_name = AttrField(css_select="meta[property='og:title']", attr='content')
author = AttrField(css_select="meta[property='og:novel:author']", attr='content')
cover = AttrField(css_select="meta[property='og:image']", attr='content')
abstract = AttrField(css_select="meta[property='og:description']", attr='content')
status = AttrField(css_select="meta[property='og:novel:status']", attr='content')
novels_type = AttrField(css_select="meta[property='og:novel:category']", attr='content')
novel_chapter_url = AttrField(css_select='div#voteList a.index', attr='href')
latest_chapter = AttrField(css_select="meta[property='og:novel:latest_chapter_name']", attr='content')
latest_chapter_url = AttrField(css_select="meta[property='og:novel:latest_chapter_url']", attr='content')
latest_chapter_time = AttrField(css_select="meta[property='og:novel:update_time']", attr='content')
# novel_name = TextField(css_select='div.c-left>div.mod>div.hd>h2')
# author = TextField(css_select='div.author-zone div.right a.name strong')
# cover = AttrField(css_select='img.book-cover', attr='src')
# abstract = TextField(css_select='pre.note')
# status = ''
# novels_type = TextField(css_select='div.c-left>div.mod>div.hd>p.infos>span.cate>a')
# latest_chapter = ''
# novel_chapter_url = AttrField(css_select='div#voteList a.index', attr='href')
def tal_cover(self, cover):
if 'https' in cover:
return cover
else:
return cover.replace('http', 'https')
def tal_novels_type(self, novels_type):
types_dict = {
'社会': '都市'
}
print(types_dict.get(str(novels_type).strip(), novels_type))
return types_dict.get(str(novels_type).strip(), novels_type)
def tal_latest_chapter_time(self, latest_chapter_time):
return latest_chapter_time.replace(u'今天', str(time.strftime("%Y-%m-%d ", time.localtime()))).replace(u'昨日', str(
time.strftime("%Y-%m-%d ", time.localtime(time.time() - 24 * 60 * 60))))
class HYNovelInfoSpider(Spider):
start_urls = []
request_config = {
'RETRIES': 3,
'TIMEOUT': 10
}
headers = {
"User-Agent": get_random_user_agent()
}
all_novels_col = PyMongoDb().db.all_novels
all_novels_info_col = PyMongoDb().db.all_novels_info
def parse(self, res):
item_data = HYNovelInfoItem.get_item(html=res.html)
item_data['target_url'] = res.url
item_data['spider'] = 'heiyan'
item_data['updated_at'] = time.strftime("%Y-%m-%d %X", time.localtime())
print('获取 {} 小说信息成功'.format(item_data['novel_name']))
print(item_data)
self.all_novels_info_col.update({'novel_name': item_data['novel_name'], 'spider': 'heiyan'}, item_data,
upsert=True)
async_callback(self.save, res_dic=item_data)
async def save(self, **kwargs):
# 存进数据库
res_dic = kwargs.get('res_dic')
try:
motor_db = MotorBase().get_db()
await motor_db.all_novels_info.update_one({
'novel_name': res_dic['novel_name'], 'spider': 'heiyan'},
{'$set': res_dic},
upsert=True)
except Exception as e:
self.logger.exception(e)
if __name__ == '__main__':
HYNovelInfoSpider.start_urls = ['http://www.heiyan.com/book/62599']
# HYNovelInfoSpider.start_urls = [each.get('novel_url', '') for each in search_author('火星引力', 'qidian')]
print(HYNovelInfoSpider.start_urls)
HYNovelInfoSpider.start()
|
StarcoderdataPython
|
1639889
|
import remoto
import json
import ceph_medic
from ceph_medic import terminal
def get_mon_report(conn):
command = [
'ceph',
'--cluster=%s' % ceph_medic.metadata['cluster_name'],
'report'
]
out, err, code = remoto.process.check(
conn,
command
)
if code > 0:
terminal.error('failed to connect to the cluster to fetch a report from the monitor')
terminal.error('command: %s' % ' '.join(command))
for line in err:
terminal.error(line)
raise RuntimeError()
try:
return json.loads(b''.join(out).decode('utf-8'))
except ValueError:
return {}
def get_cluster_nodes(conn):
"""
Ask a monitor (with a pre-made connection) about all the nodes in
a cluster. This will be able to get us all known MONs and OSDs.
It returns a dictionary with a mapping that looks like::
{
'mons': [
{
'host': 'node1',
'public_ip': '192.168.1.100',
},
],
'osds': [
{
'host': 'node2',
'public_ip': '192.168.1.101',
},
{
'host': 'node3',
'public_ip': '192.168.1.102',
},
]
}
"""
report = get_mon_report(conn)
nodes = {'mons': [], 'osds': []}
try:
# XXX Is this really needed? in what case we wouldn't have a monmap
# with mons?
mons = report['monmap']['mons']
except KeyError:
raise SystemExit(report)
for i in mons:
nodes['mons'].append({
'host': i['name'],
'public_ip': _extract_ip_address(i['public_addr'])
})
osds = report['osd_metadata']
for i in osds:
nodes['osds'].append({
'host': i['hostname'],
'public_ip': _extract_ip_address(i['front_addr'])
})
return nodes
# XXX does not support IPV6
def _extract_ip_address(string):
"""
Addresses from Ceph reports can come up with subnets and ports using ':'
and '/' to identify them properly. Parse those types of strings to extract
just the IP.
"""
port_removed = string.split(':')[0]
return port_removed.split('/')[0]
|
StarcoderdataPython
|
9719786
|
# coding=utf-8
"""
Dummy package that holds templates for code injection
"""
|
StarcoderdataPython
|
1997270
|
###############################################################################
# RobustScaler
import numpy
from nimbusml import FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.preprocessing.normalization import RobustScaler
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path, sep=',')
print(data.head())
# row_num education age parity induced case spontaneous stratum pooled.stratum
# 0 1 0-5yrs 26 6 1 1 2 1 3
# 1 2 0-5yrs 42 1 1 1 0 2 1
# 2 3 0-5yrs 39 6 2 1 0 3 4
# 3 4 0-5yrs 34 4 2 1 0 4 2
# 4 5 6-11yrs 35 3 1 1 1 5 32
# transform usage
xf = RobustScaler(
center=True, scale=True,
columns={'age_norm': 'age', 'par_norm': 'parity'})
# fit and transform
features = xf.fit_transform(data)
print(features.head(n=10))
# row_num education age parity induced case spontaneous stratum pooled.stratum age_norm par_norm
# 0 1 0-5yrs 26 6 1 1 2 1 3 -0.434783 1.6
# 1 2 0-5yrs 42 1 1 1 0 2 1 0.956522 -0.4
# 2 3 0-5yrs 39 6 2 1 0 3 4 0.695652 1.6
# 3 4 0-5yrs 34 4 2 1 0 4 2 0.260870 0.8
# 4 5 6-11yrs 35 3 1 1 1 5 32 0.347826 0.4
# 5 6 6-11yrs 36 4 2 1 1 6 36 0.434783 0.8
# 6 7 6-11yrs 23 1 0 1 0 7 6 -0.695652 -0.4
# 7 8 6-11yrs 32 2 0 1 0 8 22 0.086957 0.0
# 8 9 6-11yrs 21 1 0 1 1 9 5 -0.869565 -0.4
# 9 10 6-11yrs 28 2 0 1 0 10 19 -0.260870 0.0
|
StarcoderdataPython
|
11236574
|
# encoding: utf-8
"""
@author: xyliao
@contact: <EMAIL>
"""
from copy import deepcopy
import numpy as np
import torch
from mxtorch import meter
from mxtorch.trainer import Trainer, ScheduledOptim
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
import models
from config import opt
from data import TextDataset, TextConverter
def get_data(convert):
dataset = TextDataset(opt.txt, opt.len, convert.text_to_arr)
return DataLoader(dataset, opt.batch_size, shuffle=True, num_workers=opt.num_workers)
def get_model(convert):
model = getattr(models, opt.model)(convert.vocab_size,
opt.embed_dim,
opt.hidden_size,
opt.num_layers,
opt.dropout)
if opt.use_gpu:
model = model.cuda()
return model
def get_loss(score, label):
return nn.CrossEntropyLoss()(score, label.view(-1))
def get_optimizer(model):
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
return ScheduledOptim(optimizer)
def pick_top_n(preds, top_n=5):
top_pred_prob, top_pred_label = torch.topk(preds, top_n, 1)
top_pred_prob /= torch.sum(top_pred_prob)
top_pred_prob = top_pred_prob.squeeze(0).cpu().numpy()
top_pred_label = top_pred_label.squeeze(0).cpu().numpy()
c = np.random.choice(top_pred_label, size=1, p=top_pred_prob)
return c
class CharRNNTrainer(Trainer):
def __init__(self, convert):
self.convert = convert
model = get_model(convert)
criterion = get_loss
optimizer = get_optimizer(model)
super().__init__(model, criterion, optimizer)
self.config += ('text: ' + opt.txt + '\n' + 'train text length: ' + str(opt.len) + '\n')
self.config += ('predict text length: ' + str(opt.predict_len) + '\n')
self.metric_meter['loss'] = meter.AverageValueMeter()
def train(self, kwargs):
self.reset_meter()
self.model.train()
train_data = kwargs['train_data']
for data in tqdm(train_data):
x, y = data
y = y.long()
if opt.use_gpu:
x = x.cuda()
y = y.cuda()
x, y = Variable(x), Variable(y)
# Forward.
score, _ = self.model(x)
loss = self.criterion(score, y)
# Backward.
self.optimizer.zero_grad()
loss.backward()
# Clip gradient.
nn.utils.clip_grad_norm(self.model.parameters(), 5)
self.optimizer.step()
self.metric_meter['loss'].add(loss.data[0])
# Update to tensorboard.
if (self.n_iter + 1) % opt.plot_freq == 0:
self.writer.add_scalar('perplexity', np.exp(self.metric_meter['loss'].value()[0]), self.n_plot)
self.n_plot += 1
self.n_iter += 1
# Log the train metrics to dict.
self.metric_log['perplexity'] = np.exp(self.metric_meter['loss'].value()[0])
def test(self, kwargs):
"""Set beginning words and predicted length, using model to generate texts.
Returns:
predicted generating text
"""
self.model.eval()
begin = np.array([i for i in kwargs['begin']])
begin = np.random.choice(begin, size=1)
text_len = kwargs['predict_len']
samples = [self.convert.word_to_int(c) for c in begin]
input_txt = torch.LongTensor(samples)[None]
if opt.use_gpu:
input_txt = input_txt.cuda()
input_txt = Variable(input_txt)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
for i in range(text_len):
out, init_state = self.model(model_input, init_state)
pred = pick_top_n(out.data)
model_input = Variable(torch.LongTensor(pred))[None]
if opt.use_gpu:
model_input = model_input.cuda()
result.append(pred[0])
# Update generating txt to tensorboard.
self.writer.add_text('text', self.convert.arr_to_text(result), self.n_plot)
self.n_plot += 1
print(self.convert.arr_to_text(result))
def predict(self, begin, predict_len):
self.model.eval()
samples = [self.convert.word_to_int(c) for c in begin]
input_txt = torch.LongTensor(samples)[None]
if opt.use_gpu:
input_txt = input_txt.cuda()
input_txt = Variable(input_txt)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
for i in range(predict_len):
out, init_state = self.model(model_input, init_state)
pred = pick_top_n(out.data)
model_input = Variable(torch.LongTensor(pred))[None]
if opt.use_gpu:
model_input = model_input.cuda()
result.append(pred[0])
text = self.convert.arr_to_text(result)
print('Generate text is: {}'.format(text))
with open(opt.write_file, 'a') as f:
f.write(text)
def load_state_dict(self, checkpoints):
self.model.load_state_dict(torch.load(checkpoints))
def get_best_model(self):
if self.metric_log['perplexity'] < self.best_metric:
self.best_model = deepcopy(self.model.state_dict())
self.best_metric = self.metric_log['perplexity']
def train(**kwargs):
opt._parse(kwargs)
torch.cuda.set_device(opt.ctx)
convert = TextConverter(opt.txt, max_vocab=opt.max_vocab)
train_data = get_data(convert)
char_rnn_trainer = CharRNNTrainer(convert)
char_rnn_trainer.fit(train_data=train_data,
epochs=opt.max_epoch,
begin=opt.begin,
predict_len=opt.predict_len)
def predict(**kwargs):
opt._parse(kwargs)
torch.cuda.set_device(opt.ctx)
convert = TextConverter(opt.txt, max_vocab=opt.max_vocab)
char_rnn_trainer = CharRNNTrainer(convert)
char_rnn_trainer.load_state_dict(opt.load_model)
char_rnn_trainer.predict(opt.begin, opt.predict_len)
if __name__ == '__main__':
import fire
fire.Fire()
|
StarcoderdataPython
|
1897531
|
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Dict, Optional
from lander.ext.parser._cidata import CiMetadata
from lander.ext.parser._datamodel import DocumentMetadata
from lander.ext.parser._gitdata import GitRepository
from lander.ext.parser.texutils.extract import get_macros
from lander.ext.parser.texutils.normalize import read_tex_file, replace_macros
if TYPE_CHECKING:
from pathlib import Path
from lander.settings import BuildSettings
__all__ = ["Parser"]
class Parser(metaclass=ABCMeta):
"""Base class for TeX document metadata parsing extensions.
Parameters
----------
settings : `lander.settings.BuildSettings`
The build settings for this site, which includes command-line and YAML
configuration overrides of metadata.
"""
def __init__(self, *, settings: BuildSettings) -> None:
self._settings = settings
_tex_source = read_tex_file(self.tex_path)
self._tex_macros = get_macros(_tex_source)
self._tex_source = self.normalize_source(_tex_source)
try:
self._git_repository: Optional[
GitRepository
] = GitRepository.create(self.tex_path.parent)
except Exception:
self._git_repository = None
self._ci_metadata = CiMetadata.create()
self._metadata = self.extract_metadata()
@property
def settings(self) -> BuildSettings:
"""The build settings."""
return self._settings
@property
def tex_path(self) -> Path:
""""Path to the root TeX source file."""
return self.settings.source_path
@property
def tex_source(self) -> str:
"""TeX source, which has been normalized."""
return self._tex_source
@property
def tex_macros(self) -> Dict[str, str]:
"""TeX macros detected by
`lander.ext.parser.texutils.extract.get_macros`.
Keys are command names (including the slash) and values are the values
of those macros.
This property is useful because the normalized source in `tex_source`
typically has macro definitions clobbered.
"""
return self._tex_macros
@property
def ci_metadata(self) -> CiMetadata:
"""Metadata from the CI environment
This attribute is instantiate automatically and is available to the
`extract_metadata` hook for use by parser implementations.
"""
return self._ci_metadata
@property
def git_repository(self) -> Optional[GitRepository]:
"""Metadata from the local Git repository
This attribute is instantiate automatically and is available to the
`extract_metadata` hook for use by parser implementations.
"""
return self._git_repository
@property
def metadata(self) -> DocumentMetadata:
"""Metadata about the document."""
return self._metadata
def normalize_source(self, tex_source: str) -> str:
"""Process the TeX source after it is read, but before metadata
is extracted.
Parameters
----------
tex_source
TeX source content.
Returns
-------
tex_source
Normalized TeX source content.
"""
macros = get_macros(tex_source)
return replace_macros(tex_source, macros)
@abstractmethod
def extract_metadata(self) -> DocumentMetadata:
"""Hook for implementing metadata extraction.
Returns
-------
metadata
The metadata parsed from the document source.
"""
raise NotImplementedError
|
StarcoderdataPython
|
12800598
|
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8 ai ts=4 sts=4 et sw=4
"""Tests for wafer.user.models"""
from django.test import TestCase
from wafer.tests.utils import create_user
class UserModelTestCase(TestCase):
def test_str_method_issue192(self):
"""Test that str(user) works correctly"""
user = create_user('test')
self.assertEqual(str(user.userprofile), 'test')
|
StarcoderdataPython
|
197004
|
import requests
from bs4 import BeutifulSuop
import urllib.request
link = ""
ptc = requests.get(link)
html = ptc.content
sp = BeutifulSuop(html, "html.parser")
for img in sp.find_all('img'):
print(img.get("src"))
'''
https://cidades.ibge.gov.br/brasil/rn/natal/panorama
'''
'''
print(ptc) tempo de resposta
print(html) todo conteudo html do site
print(sp) somente o contudo html do site
i = sp.find_all('link') mostra todo os links ou qualquer marcação que desejar
print(i)
'''
|
StarcoderdataPython
|
5014556
|
<reponame>OgiBalboa/E-Book_Cryptology<filename>main.py
"""
Bu uygulama Marmara Üniversitesi Teknoloji Fakültesi Mekatronik Mühendisliği
Bölümü için geliştirilmiştir. E-book kitapları için şifreleme sistemidir.
@yazar: ogibalboa
Tarih : 05.06.2020
"""
import sys
sys.path.append("bin")
from PyQt5 import QtCore, QtGui, QtWidgets,uic
from subprocess import call
import datetime
from db import db,permission
import os
from pylocker import ServerLocker
from cryptography.fernet import Fernet
import logos_rc
from auth import AuthMenu
from admin_panel import AdminPanel
from book import Book
db = db()
if permission(db) == False:
exit()
#lock = ServerLocker(password =
global library
library = {}
class MainMenu(QtWidgets.QMainWindow):
def __init__(self):
super(MainMenu,self).__init__()
uic.loadUi('ui/mainmenu.ui',self)
self.admin_panel = None
self.email = ""
self.no = 0
self.password = ""
self.add_book_btn.clicked.connect(lambda: self.add_book(self.code_input.text()))
self.openbook_btn.clicked.connect(self.open_book)
self.open_admin_panel_btn.clicked.connect(self.open_admin_panel)
self.refresh_btn.clicked.connect(self.update_library)
self.open_admin_panel_btn.hide()
self.admin = False
self.db = db
self.dlg = WaitDialog()
self.dlg.close()
def submit(self):
user_info = db.students.child(self.no).get()
if user_info["secret"] == "admin":
self.admin = True
self.open_admin_panel_btn.show()
self.update_library()
self.db.st_books = self.db.db.reference("books/" + self.no + "/st_books")
def open_book(self):
name = self.tableWidget.selectedItems()[0].data(0)+".epub"
cpath = os.getcwd()
path = os.path.join(cpath,"res","lib",name)
os.system('sumatra -restrict -view "single page" "' + path +'"')
def update_library(self):
try:
for book in db.students.child(self.no).child("st_books").get().items():
info = db.books.child(book[0]).get()
if info == None:
continue
date = book[1]
library.update({book: Book(info["name"], info["supervisor"], info["lecture"],
date)})
except : pass
self.check_library()
def add_book(self,code):
name = None
for item in db.codes.order_by_child('code').equal_to(code).get().items():
name = item[1]["c_book"]
date = item[1]["date"]
for book in db.books.order_by_child('name').equal_to(name).get().items():
book = book[1]
library.update({name:Book(book["name"],book["supervisor"],book["lecture"],date)})
if not name == None:
db.storage.blob("books/"+name+".epub").download_to_filename(os.path.join(os.getcwd(),"res","lib",name+".epub"))
self.db.st_books()
self.check_library()
def check_library(self,):
global library
while self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(0);
for row,book in enumerate(library.values()):
self.tableWidget.insertRow(row)
self.tableWidget.setItem(row,0,QtWidgets.QTableWidgetItem(book.name))
self.tableWidget.setItem(row,1,QtWidgets.QTableWidgetItem(book.lecture))
self.tableWidget.setItem(row,2,QtWidgets.QTableWidgetItem(book.supervisor))
self.tableWidget.setItem(row,3,QtWidgets.QTableWidgetItem(book.date))
def open_admin_panel(self):
self.admin_panel.show()
def setWaiting(self,status: bool,text):
if status == True:
self.dlg.setTitle(text)
self.dlg.show()
else:
self.dlg.close()
class WaitDialog(QtWidgets.QProgressDialog):
def __init__(self):
super(WaitDialog, self).__init__()
self.setAutoClose(True)
self.btn = QtWidgets.QPushButton('Cancel')
self.btn.setEnabled(False)
self.setCancelButton(self.btn)
def setTitle(self,text):
self.setWindowTitle(text)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyle("fusion")
menu = MainMenu()
auth = AuthMenu(menu)
admin_panel = AdminPanel(menu)
menu.admin_panel = admin_panel
#ui = Auth(mainwin)
auth.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
6655073
|
<gh_stars>0
# Generated by Django 2.1.2 on 2018-11-29 18:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0002_student_classes'),
]
operations = [
migrations.AddField(
model_name='student',
name='type',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
|
StarcoderdataPython
|
1903485
|
<reponame>frsierrag/accesoriesStoreApp<filename>blog/migrations/versions/d5fdc4591e9e_modelo_usuario.py
"""modelo usuario
Revision ID: d5fdc4591e9e
Revises:
Create Date: 2020-12-12 21:25:31.881940
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('products',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nombre', sa.String(length=100), nullable=False),
sa.Column('precio', sa.Float(), nullable=True),
sa.Column('image', sa.String(length=255), nullable=True),
sa.Column('cantidad', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('hash_clave', sa.String(length=128), nullable=True),
sa.Column('admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('association',
sa.Column('users_id', sa.Integer(), nullable=False),
sa.Column('products_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['products_id'], ['products.id'], ),
sa.ForeignKeyConstraint(['users_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('users_id', 'products_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('association')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('products')
# ### end Alembic commands ###
|
StarcoderdataPython
|
8085250
|
from blaze.expr import *
from blaze.expr.split import *
from blaze.api.dplyr import transform
import datashape
from datashape import dshape
from datashape.predicates import isscalar
t = TableSymbol('t', '{name: string, amount: int, id: int}')
a = Symbol('a', '1000 * 2000 * {x: float32, y: float32}')
def test_path_split():
expr = t.amount.sum() + 1
assert path_split(t, expr).isidentical(t.amount.sum())
expr = t.amount.distinct().sort()
assert path_split(t, expr).isidentical(t.amount.distinct())
t2 = transform(t, id=t.id * 2)
expr = by(t2.id, amount=t2.amount.sum()).amount + 1
assert path_split(t, expr).isidentical(by(t2.id, amount=t2.amount.sum()))
expr = count(t.amount.distinct())
assert path_split(t, expr).isidentical(t.amount.distinct())
expr = summary(total=t.amount.sum())
assert path_split(t, expr).isidentical(expr)
def test_sum():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.sum())
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.sum(keepdims=True))
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(sum(agg))
def test_sum_with_axis_argument():
chunk = Symbol('chunk', '100 * 100 * {x: float32, y: float32}')
(chunk, chunk_expr), (agg, agg_expr) = split(a, a.x.sum(axis=0), chunk=chunk)
assert chunk.schema == a.schema
assert agg_expr.dshape == a.x.sum(axis=0).dshape
assert chunk_expr.isidentical(chunk.x.sum(axis=0, keepdims=True))
assert agg_expr.isidentical(agg.sum(axis=0))
def test_split_reasons_correctly_about_uneven_aggregate_shape():
x = Symbol('chunk', '10 * 10 * int')
chunk = Symbol('chunk', '3 * 3 * int')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(axis=0),
chunk=chunk)
assert agg.shape == (4, 10)
def test_split_reasons_correctly_about_aggregate_shape():
chunk = Symbol('chunk', '100 * 100 * {x: float32, y: float32}')
(chunk, chunk_expr), (agg, agg_expr) = split(a, a.x.sum(), chunk=chunk)
assert agg.shape == (10, 20)
chunk = Symbol('chunk', '100 * 100 * {x: float32, y: float32}')
(chunk, chunk_expr), (agg, agg_expr) = split(a, a.x.sum(axis=0), chunk=chunk)
assert agg.shape == (10, 2000)
def test_distinct():
(chunk, chunk_expr), (agg, agg_expr) = split(t, count(t.amount.distinct()))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.distinct())
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(count(agg.distinct()))
def test_summary():
(chunk, chunk_expr), (agg, agg_expr) = split(t, summary(a=t.amount.count(),
b=t.id.sum() + 1))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(summary(a=chunk.amount.count(),
b=chunk.id.sum(), keepdims=True))
# assert not agg.schema == dshape('{a: int32, b: int32}')
assert agg_expr.isidentical(summary(a=agg.a.sum(),
b=agg.b.sum() + 1))
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, summary(total=t.amount.sum()))
assert chunk_expr.isidentical(summary(total=chunk.amount.sum(),
keepdims=True))
assert agg_expr.isidentical(summary(total=agg.total.sum()))
def test_by_sum():
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, by(t.name, total=t.amount.sum()))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(by(chunk.name, total=chunk.amount.sum()))
assert not isscalar(agg.dshape.measure)
assert agg_expr.isidentical(by(agg.name, total=agg.total.sum()))
def test_by_count():
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, by(t.name, total=t.amount.count()))
assert chunk_expr.isidentical(by(chunk.name, total=chunk.amount.count()))
assert agg_expr.isidentical(by(agg.name, total=agg.total.sum()))
def test_embarassing_rowwise():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount + 1)
assert chunk_expr.isidentical(chunk.amount + 1)
assert agg_expr.isidentical(agg)
def test_embarassing_selection():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t[t.amount > 0])
assert chunk_expr.isidentical(chunk[chunk.amount > 0])
assert agg_expr.isidentical(agg)
x = Symbol('x', '24 * 16 * int32')
def test_nd_chunk():
c = Symbol('c', '4 * 4 * int32')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(), chunk=c)
assert chunk.shape == (4, 4)
assert chunk_expr.isidentical(chunk.sum(keepdims=True))
assert agg.shape == (6, 4)
assert agg_expr.isidentical(agg.sum())
def test_nd_chunk_axis_args():
c = Symbol('c', '4 * 4 * int32')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(axis=0), chunk=c)
assert chunk.shape == (4, 4)
assert chunk_expr.shape == (1, 4)
assert chunk_expr.isidentical(chunk.sum(keepdims=True, axis=0))
assert agg.shape == (6, 16)
assert agg_expr.isidentical(agg.sum(axis=0))
def test_agg_shape_in_tabular_case_with_explicit_chunk():
t = Symbol('t', '1000 * {name: string, amount: int, id: int}')
c = Symbol('chunk', 100 * t.schema)
expr = by(t.name, total=t.amount.sum())
(chunk, chunk_expr), (agg, agg_expr) = split(t, expr, chunk=c)
assert agg.dshape == dshape('var * {name: string, total: int}')
|
StarcoderdataPython
|
9735083
|
from models import Duck, Pink
from core.auth import authentication, authorization, check_scopes
from core.base import BaseMgr
from core.errors import InvalidCredential, RecordNotFound
from core.singleton import db, pat, redis, sendgrid
from core.utils import FromConf, random_b85
class PinkMgr(BaseMgr):
model = Pink
t_life = FromConf.load('TL_NEW_PINK')
@classmethod
def assign_token(cls, deps, amount):
check_scopes(deps)
deps_s = ','.join(deps)
tokens = [random_b85(k=20) for __ in range(amount)]
redis.mset({f'deps-{token}': deps_s for token in tokens}, ex=cls.t_life.seconds)
return tokens
@classmethod
def sign_up(cls, name: str, pwd, qq: int, other: str, email_token: str, deps_token: str):
if not (deps_s := redis.get(f'deps-{deps_token}')):
raise InvalidCredential(type_=InvalidCredential.T.new)
pink = cls.model(id=cls.gen_id(), name=name, email=None, qq=str(qq), other=other)
pink.pwd = <PASSWORD>
pink.deps = deps_s.split(',')
PinkMgr(pink).set_email(email_token)
db.session.add(pink)
sendgrid.send(to=pink.email, template_name='new pink', name=pink.name)
return pink
def update_info(self, qq: int, other: str):
if qq:
self.o.qq = str(qq)
if other:
self.o.other = other
def set_email(self, token):
payload = pat.decode(token)
if self.o.email != payload['old']:
raise InvalidCredential(type_=InvalidCredential.T.email)
self.o.email = payload['new']
def deactivate(self):
self.o.active = False
authentication.revoke_all_lemons(self.o)
def alter_ducks(self, add, remove):
conflicts = self.o.ducks.filter(Duck.node.in_(add.keys())).all()
for node in add.keys() - {conflict.node for conflict in conflicts}:
info = add[node]
DuckMgr.grant(pink_id=self.o.id,
node=node,
allow=info['allow'],
scopes=list(info['scopes']))
if remove:
self.o.ducks.filter(Duck.node.in_(remove)).delete()
authorization.DuckCache.clean(pink_id=self.o.id)
return (self.o.ducks.all(), conflicts)
class DuckMgr(BaseMgr):
model = Duck
def __init__(self, pink_id, node):
if not (obj := self.model.query.get((pink_id, node))):
raise RecordNotFound(cls_=self.model, id_=(','.join((pink_id, node))))
self.o = obj
@classmethod
def grant(cls, pink_id, node, allow, scopes):
duck = cls.model(pink_id=pink_id, node=node, allow=allow, scopes=scopes)
db.session.add(duck)
return duck
def modi_scopes(self, scopes: set):
self.o.scopes = list(scopes)
authorization.DuckCache.clean(self.o.pink_id)
return scopes
def add_scopes(self, scopes):
return self.modi_scopes(set(self.o.scopes) | scopes)
def remove_scopes(self, scopes):
return self.modi_scopes(set(self.o.scopes) - scopes)
|
StarcoderdataPython
|
4832096
|
from rest_framework import serializers
from . import models as services_models
class ServiceAgentSerializer(serializers.ModelSerializer):
owner = serializers.HyperlinkedRelatedField(
many=False, read_only=True, view_name='users:service_bus_details',
)
class Meta:
model = services_models.ServiceAgentBus
fields = '__all__'
|
StarcoderdataPython
|
3221157
|
max_1 = 1000
max_2 = 1000
max_3 = 1000
with open('res.txt','r') as file:
for line in file.readlines():
t = line.split(';')
if float(t[2].replace("\n","")) < max_3:
max_1 = float(t[0])
max_2 = float(t[1])
max_3 = float(t[2].replace("\n",""))
print(max_1,max_2,max_3)
|
StarcoderdataPython
|
37552
|
<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from htm.bindings.sdr import SDR
from htm.bindings.algorithms import TemporalMemory
from htm.bindings.algorithms import SpatialPooler
from itertools import product
from copy import deepcopy
import json
EPS = 1e-12
class Memory:
"""
The Memory object saves SDR representations of states and clusterizes them using the similarity measure.
The SDR representation must have fixed sparsity of active cells for correct working.
Parameters
----------
size : int
The size is the size of SDR representations, which are stored
threshold: float
The threshold is used to determine then it's necessary to create a new cluster.
Attributes
----------
size: int
It stores size argument.
kernels : np.array
This is the list of created clusters representations in dence form. It contains information about frequency of
cell's activity (for each cluster) during working. Its shape: (number of clusters, size).
norms: np.array
This is the list of representations amount for each cluster. Its shape: (munber of clusters, 1)
threshold: float
It stores threshold argument.
"""
def __init__(self, size, threshold=0.5):
self.kernels = None
self.norms = None
self.threshold = threshold
self.size = size
@property
def number_of_clusters(self):
if (self.kernels is not None) and (self.kernels.ndim == 2):
return self.kernels.shape[0]
else:
return 0
def add(self, state):
""" Add a new SDR representation (store and clusterize).
Parameters
----------
state: np.array
This is the SDR representation (sparse), that we want to store ande clusterize with other stored SDRs.
Returns
-------
"""
state_dense = np.zeros(self.size)
state_dense[state] = 1
sims = self.similarity(state_dense)
if np.sum(sims > self.threshold) == 0:
if self.kernels is None:
self.kernels = state_dense.reshape((1, -1))
self.norms = np.array([[1]])
else:
self.kernels = np.vstack((self.kernels, state_dense))
self.norms = np.vstack((self.norms, [1]))
else:
self.kernels[np.argmax(sims)] += state_dense
self.norms[np.argmax(sims)] += 1
def similarity(self, state):
"""This function evaluate similarity measure between stored clusters and new state.
Parameters
----------
state: np.array
The sparse representation of the state to be compared.
Returns
-------
similarities: np.array
The similarity measures for given state. If the Memory object don't have any saved clusters, then the empty
array is returned, else returned array contained similarities between the state and each cluster.
Its shape: (number of kernels, 1).
"""
if self.kernels is None:
return np.array([])
else:
normalised_kernels = self.kernels / self.norms
sims = normalised_kernels @ state.T / (
np.sqrt(np.sum(normalised_kernels ** 2, axis=1)) * np.sqrt(state @ state.T))
similarities = sims.T
return similarities
def adopted_kernels(self, sparsity):
"""This function normalises stored representations and cuts them by sparsity threshold.
Parameters
----------
sparsity: float
The sparsity of active cells in stored SDR representations.
Returns
-------
clusters_representations: np.array
Normalised and cutted representations of each cluster. The cutting is done by choosing the most frequent
active cells (their number is defined by sparsity) in kernels attribute. All elements of array are
in [0, 1]. The shape is (number of clusters, 1).
"""
data = np.copy(self.kernels)
data[data < np.quantile(data, 1 - sparsity, axis=1).reshape((-1, 1))] = 0
clusters_representations = data / self.norms
return clusters_representations
class Empowerment:
"""
The Empowerment object contains all necessary things to evaluate 'empowerment' using the model of environment. This
model creates and learns also here.
Parameters
----------
seed: int
The seed for random generator.
encode_size: int
The size of SDR representations which is taken by model.
tm_config: dict
It contains all parameters for initialisation of the TemporalMemory without the columnDimensions.
columnDimensions is defined inside Empowerment.
sparsity: float
The sparsity of SDR representations which are used in the TemporalMemory algorithm.
sp_config (optional): dict
It contains all parameters for initialisation of the SpatialPooler without the inputDimensions
and localareaDensity. They are defined inside Empowerment. By default sp_config is None that means the absence
of SpatialPooler.
memory (optional): bool
This parameter defines will be used the Memory for saving and clusterization of state representations or not.
By default is False (doesn't use the Memory).
similarity_threshold (optional): float
This parameter determines the threshold for cluster creation. It is used then memory is True. By default: 0.6.
evaluate (optional): bool
This flag defines the necessarity of storing some statistics to evaluate the learning process.
By default is True.
Attributes
----------
evaluate: bool
It stores the same parameter.
anomalies: list
It stores the anomaly values of TM for easch time step after learning. Only then evaluate is True.
IoU: list
It stores the Intersection over Union values of TM predictions and real ones for easch time step after learning.
Only then evaluate is True.
sparsity: float
It stores the same parameter.
sp: SpatialPooler
It contains the SpatialPooler object if it was defined, else None
tm: TemporalMemory
It contains the TemporalMemory object.
size: int
It stores the encode_size parameter.
memory: Memory
It contains the Memory object if memory parameter is True, else None.
"""
def __init__(self, seed, encode_size, tm_config, sparsity,
sp_config=None,
memory=False,
similarity_threshold=0.6,
evaluate=True,
filename=None):
self.filename = filename
if self.filename is None:
self.evaluate = evaluate
if evaluate:
self.anomalies = []
self.IoU = []
self.sdr_0 = SDR(encode_size)
self.sdr_1 = SDR(encode_size)
self.sparsity = sparsity
if sp_config is not None:
self.sp = SpatialPooler(inputDimensions=[encode_size],
seed=seed,
localAreaDensity=sparsity,
**sp_config,
)
self.tm = TemporalMemory(
columnDimensions=self.sp.getColumnDimensions(),
seed=seed,
**tm_config,
)
self.sdr_sp = SDR(self.sp.getColumnDimensions())
self.size = self.sp.getColumnDimensions()[0]
else:
self.sp = None
self.tm = TemporalMemory(
columnDimensions=[encode_size],
seed=seed,
**tm_config,
)
self.size = self.tm.getColumnDimensions()[0]
if memory:
self.memory = Memory(self.tm.getColumnDimensions()[0], threshold=similarity_threshold)
else:
self.memory = None
else:
with open(self.filename) as json_file:
self.empowerment_data = json.load(json_file)
def eval_from_file(self, position):
return self.empowerment_data[str(position[0])][str(position[1])]
def eval_state(self, state, horizon, use_segments=False, use_memory=False):
"""This function evaluates empowerment for given state.
Parameters
----------
state: np.array
The SDR representation (sparse) of the state.
horizon: int
The horison of evaluating for given state. The good value is 3.
use_segments (optional): bool
The flag determines using of segments instead of cells to evaluate empowerment. By default: False.
use_memory (optional): bool
The flag determines using of the Memory object. Useful only if this object was initialised.
By default: False
Returns
-------
empowerment: float
The empowerment value (always > 0).
p: np.array
The array of probabilities on that the empowerment was calculated.
start_state: np.array
The SDR representation of given state that is used in TM. (Only if sp is defined it differs from parameter
state).
"""
if self.sp is not None:
self.sdr_0.sparse = state
self.sp.compute(self.sdr_0, learn=False, output=self.sdr_sp)
sdr = self.sdr_sp
else:
self.sdr_0.sparse = state
sdr = self.sdr_0
start_state = np.copy(sdr.sparse)
data = np.zeros(self.tm.getColumnDimensions()[0])
for actions in range(horizon):
self.tm.reset()
self.tm.compute(sdr, learn=False)
self.tm.activateDendrites(learn=False)
predictiveCells = self.tm.getPredictiveCells().sparse
predictedColumnIndices = [self.tm.columnForCell(i) for i in predictiveCells]
sdr.sparse = np.unique(predictedColumnIndices)
if use_segments:
predictedColumnIndices = map(self.tm.columnForCell,
map(self.tm.connections.cellForSegment, self.tm.getActiveSegments()))
for i in predictedColumnIndices:
data[i] += 1
if self.memory is not None and use_memory:
if (self.memory.kernels is not None) and (self.memory.kernels.size > 0):
clusters = self.memory.adopted_kernels(self.sparsity)
mask = (clusters[:, data!=0].sum(axis=1) / (self.sparsity * self.size)) < self.memory.threshold
p = np.dot(clusters, data.T) / (self.sparsity * self.size)
p[mask] = 0
total_p = p.sum()
empowerment = np.sum(-p / (total_p + EPS) * np.log(p / (total_p + EPS), where=p != 0), where=p != 0)
p = p / (total_p + EPS)
return empowerment, p, start_state
else:
return 0, None, start_state
empowerment = np.sum(-data / (data.sum() + EPS) * np.log(data / (data.sum() + EPS), where=data != 0), where=data != 0)
p = data / (data.sum() + EPS)
return empowerment, p, start_state
def eval_env(self, environment, horizon, use_segments=False, use_memory=False):
"""This function evaluate empowerment for every state in gridworld environment.
Parameters
----------
environment:
The gridworld environment to be evaluated.
horizon: int
The horison of evaluating for given state. The good value is 3.
use_segments (optional): bool
The flag determines using of segments instead of cells to evaluate empowerment. By default: False.
use_memory (optional): bool
The flag determines using of the Memory object. Useful only if this object was initialised.
By default: False
Returns
-------
empowerment_map: np.array
This is the map of the environment with values of empowerment for each state.
"""
env = deepcopy(environment)
empowerment_map = np.zeros(env.env.shape)
for i in range(env.env.shape[0]):
for j in range(env.env.shape[1]):
if not env.env.entities['obstacle'].mask[i, j]:
env.env.agent.position = (i, j)
_, s, _ = env.observe()
empowerment_map[i, j] = self.eval_state(s, horizon, use_segments, use_memory)[0]
# plt.imshow(empowerment_map)
# plt.show()
return empowerment_map
def learn(self, state_0, state_1):
"""This function realize learning of TM.
Parameters
----------
state_0: np.array
The SDR representation of the state (sparse form).
state_1: np.array
The SDR representation of the next state (sparse form).
Returns
-------
"""
self.sdr_0.sparse = state_0
self.sdr_1.sparse = state_1
self.tm.reset()
if self.sp is not None:
self.sp.compute(self.sdr_0, learn=True, output=self.sdr_sp)
if self.memory is not None:
self.memory.add(self.sdr_sp.sparse)
self.tm.compute(self.sdr_sp, learn=True)
else:
if self.memory is not None:
self.memory.add(self.sdr_0.sparse)
self.tm.compute(self.sdr_0, learn=True)
if self.evaluate:
self.tm.activateDendrites(learn=False)
predictiveCells = self.tm.getPredictiveCells().sparse
predictedColumnIndices = np.unique([self.tm.columnForCell(i) for i in predictiveCells])
if self.sp is not None:
self.sp.compute(self.sdr_1, learn=True, output=self.sdr_sp)
self.tm.compute(self.sdr_sp, learn=True)
if self.evaluate:
intersection = np.intersect1d(self.sdr_sp.sparse, predictedColumnIndices)
union = np.union1d(self.sdr_sp.sparse, predictedColumnIndices)
else:
self.tm.compute(self.sdr_1, learn=True)
if self.evaluate:
intersection = np.intersect1d(self.sdr_1.sparse, predictedColumnIndices)
union = np.union1d(self.sdr_1.sparse, predictedColumnIndices)
if self.evaluate:
self.IoU.append(len(intersection) / len(union))
self.anomalies.append(self.tm.anomaly)
self.tm.reset()
def detailed_evaluate(self, env, horizon, use_segments=False, use_memory=False):
"""This function evaluate TM and real empowerment and confusion matrix for every state in gridworld environment.
Parameters
----------
env:
The gridworld environment to be evaluated.
horizon: int
The horison of evaluating for given state. The good value is 3.
use_segments (optional): bool
The flag determines using of segments instead of cells to evaluate empowerment. By default: False.
use_memory (optional): bool
The flag determines using of the Memory object. Useful only if this object was initialised.
By default: False
Returns
-------
plot normalised maps with TM and real empowerment. Also plot confusion matrix in map style.
"""
confusion_data = np.zeros((env.env.shape[0] * env.env.shape[1], self.tm.getColumnDimensions()[0]))
empowerment_map = np.zeros(env.env.shape)
real_empowerment_map = np.zeros(env.env.shape)
for i in trange(env.env.shape[0]):
for j in range(env.env.shape[1]):
if not env.env.entities['obstacle'].mask[i, j]:
env.env.agent.position = (i, j)
_, s, _ = env.observe()
emp, _, s = self.eval_state(s, horizon, use_segments, use_memory)
empowerment_map[i, j] = emp
confusion_data[env.env.shape[1] * i + j, s] = 1
real_empowerment_map[i, j] = real_empowerment(env, (i, j), horizon)[0]
plt.figure(figsize=(10, 5))
plt.subplot(121)
mask = empowerment_map != 0
empowerment_map[mask] = (empowerment_map[mask != 0] - np.min(empowerment_map[mask])) / (
np.max(empowerment_map) - np.min(empowerment_map[mask]))
plt.imshow(empowerment_map)
plt.colorbar()
plt.title('TM')
plt.subplot(122)
mask = real_empowerment_map != 0
real_empowerment_map[mask] = (real_empowerment_map[mask != 0] - np.min(real_empowerment_map[mask])) / (
np.max(real_empowerment_map) - np.min(real_empowerment_map[mask]))
plt.imshow(real_empowerment_map)
plt.colorbar()
plt.title('Real')
plt.show()
intersection = confusion_data @ confusion_data.T
inv_mat = ~confusion_data.astype(bool)
union = inv_mat.shape[1] - inv_mat.astype(float) @ inv_mat.astype(float).T
iou = np.divide(intersection, union, out=np.zeros_like(intersection), where=union != 0)
plot_data = iou.reshape(env.env.shape[0], env.env.shape[1], env.env.shape[0], env.env.shape[1])
image = np.zeros((env.env.shape[0] ** 2, env.env.shape[0] ** 2))
for i in range(env.env.shape[0]):
for j in range(env.env.shape[1]):
image[env.env.shape[0] * i:env.env.shape[0] * (i + 1),
env.env.shape[1] * j:env.env.shape[1] * (j + 1)] = \
plot_data[i, j]
plt.figure(figsize=(15, 15))
plt.imshow(image)
plt.yticks([-0.5 + env.env.shape[0] * i for i in range(env.env.shape[0])])
plt.xticks([-0.5 + env.env.shape[1] * i for i in range(env.env.shape[0])])
plt.grid(linewidth=3)
plt.colorbar()
plt.show()
def draw_tm(tm, grid_step):
tm.activateDendrites(learn=False)
activeCells = tm.getActiveCells().dense
predictedCells = tm.getPredictiveCells().dense
data = np.zeros((tm.getColumnDimensions()[0], tm.getCellsPerColumn(), 3))
data[:, :, 0] = activeCells
data[:, :, 1] = predictedCells
plt.figure(figsize=(tm.getColumnDimensions()[0] / 10, tm.getCellsPerColumn() * 2))
plt.imshow(np.moveaxis(data, [0, 1, 2], [1, 0, 2]), aspect='auto')
plt.yticks([-0.5 + i for i in range(tm.getCellsPerColumn())])
plt.xticks([-0.5 + i * grid_step for i in range(tm.getColumnDimensions()[0] // grid_step)])
plt.grid(linewidth=2)
plt.show()
def draw_segments(tm):
data = np.zeros(tm.getCellsPerColumn() * tm.getColumnDimensions()[0])
max_seg = 0
for cell in trange(tm.getCellsPerColumn() * tm.getColumnDimensions()[0]):
segs = tm.connections.segmentsForCell(cell)
data[cell] = len(segs)
if len(segs) > max_seg:
max_seg = len(segs)
plt.figure(figsize=(tm.getColumnDimensions()[0] / 10, tm.getCellsPerColumn() * 2))
print(f'Number of segments. Max: {max_seg}')
plt.imshow(data.reshape((tm.getCellsPerColumn(), tm.getColumnDimensions()[0]), order='F'), aspect='auto')
plt.show()
def draw_active_segments(tm):
data = np.zeros(tm.getCellsPerColumn() * tm.getColumnDimensions()[0])
for seg in tm.getActiveSegments():
cell = tm.connections.cellForSegment(seg)
data[cell] += 1
plt.figure(figsize=(tm.getColumnDimensions()[0] / 10, tm.getCellsPerColumn() * 2))
print(f'Number of segments. Max: {data.max()}')
plt.imshow(data.reshape((tm.getCellsPerColumn(), tm.getColumnDimensions()[0]), order='F'), aspect='auto')
plt.show()
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def real_empowerment(env, position, horizon):
data = np.zeros(env.env.shape)
for actions in product(range(4), repeat=horizon):
env.env.agent.position = position
for a in actions:
env.act(a)
data[env.env.agent.position] += 1
return np.sum(-data / data.sum() * np.log(data / data.sum(), where=data != 0), where=data != 0), data
def learn(seed,
empowerment,
env,
steps,
dump_step=None,
horizon=3,
use_segments=False,
use_memory=False,
):
np.random.seed(seed)
visit_map = np.zeros(env.env.shape)
encode_sizes = []
for t in trange(steps):
visit_map[env.env.agent.position] += 1
a = np.random.randint(env.n_actions)
_, s0, _ = env.observe()
encode_sizes.append(len(s0))
env.act(a)
_, s1, _ = env.observe()
empowerment.learn(s0, s1)
if dump_step is not None:
if (t + 1) % dump_step == 0:
empowerment.eval_env(env, horizon, use_segments, use_memory)
plt.title('Visit')
plt.imshow(visit_map)
plt.colorbar()
plt.show()
plt.plot(moving_average(empowerment.anomalies, 100))
plt.title('Anomaly')
plt.ylim(0, 1)
plt.grid()
plt.show()
plt.plot(moving_average(empowerment.IoU, 100))
plt.title('Intersection over union')
plt.ylim(0, 1)
plt.grid()
plt.show()
plt.plot(moving_average(encode_sizes, 100))
plt.title('Number of active columns')
plt.show()
|
StarcoderdataPython
|
11204918
|
<gh_stars>1-10
"""
Test reload for trained models.
"""
import os
import pytest
import unittest
import tempfile
import numpy as np
import deepchem as dc
import tensorflow as tf
import scipy
from flaky import flaky
from sklearn.ensemble import RandomForestClassifier
from deepchem.molnet.load_function.chembl25_datasets import CHEMBL25_TASKS
from deepchem.feat import create_char_to_idx
def test_sklearn_classifier_reload():
"""Test that trained model can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model_dir = tempfile.mkdtemp()
model = dc.models.SklearnModel(sklearn_model, model_dir)
# Fit trained model
model.fit(dataset)
model.save()
# Load trained model
reloaded_model = dc.models.SklearnModel(None, model_dir)
reloaded_model.reload()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_multitaskregressor_reload():
"""Test that MultitaskRegressor can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_multitaskclassification_reload():
"""Test that MultitaskClassifier can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=dc.models.optimizers.Adam(
learning_rate=0.0003, beta1=0.9, beta2=0.999),
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Reload trained model
reloaded_model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=dc.models.optimizers.Adam(
learning_rate=0.0003, beta1=0.9, beta2=0.999),
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_residual_classification_reload():
"""Test that a residual network can reload correctly."""
n_samples = 10
n_features = 5
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=500)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload trained model
reloaded_model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_robust_multitask_classification_reload():
"""Test robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reloaded Trained Model
reloaded_model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_atomic_conv_model_reload():
from deepchem.models.atomic_conv import AtomicConvModel
from deepchem.data import NumpyDataset
model_dir = tempfile.mkdtemp()
batch_size = 1
N_atoms = 5
acm = AtomicConvModel(
n_tasks=1,
batch_size=batch_size,
layer_sizes=[
1,
],
frag1_num_atoms=5,
frag2_num_atoms=5,
complex_num_atoms=10,
model_dir=model_dir)
features = []
frag1_coords = np.random.rand(N_atoms, 3)
frag1_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}
frag1_z = np.random.randint(10, size=(N_atoms))
frag2_coords = np.random.rand(N_atoms, 3)
frag2_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}
frag2_z = np.random.randint(10, size=(N_atoms))
system_coords = np.random.rand(2 * N_atoms, 3)
system_nbr_list = {
0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
7: [],
8: [],
9: []
}
system_z = np.random.randint(10, size=(2 * N_atoms))
features.append(
(frag1_coords, frag1_nbr_list, frag1_z, frag2_coords, frag2_nbr_list,
frag2_z, system_coords, system_nbr_list, system_z))
features = np.asarray(features)
labels = np.random.rand(batch_size)
dataset = NumpyDataset(features, labels)
acm.fit(dataset, nb_epoch=1)
reloaded_model = AtomicConvModel(
n_tasks=1,
batch_size=batch_size,
layer_sizes=[
1,
],
frag1_num_atoms=5,
frag2_num_atoms=5,
complex_num_atoms=10,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
origpred = acm.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
def test_normalizing_flow_model_reload():
"""Test that NormalizingFlowModel can be reloaded correctly."""
from deepchem.models.normalizing_flows import NormalizingFlow, NormalizingFlowModel
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tfk = tf.keras
model_dir = tempfile.mkdtemp()
Made = tfb.AutoregressiveNetwork(
params=2, hidden_units=[512, 512], activation='relu', dtype='float64')
flow_layers = [tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=Made)]
# 3D Multivariate Gaussian base distribution
nf = NormalizingFlow(
base_distribution=tfd.MultivariateNormalDiag(
loc=np.zeros(2), scale_diag=np.ones(2)),
flow_layers=flow_layers)
nfm = NormalizingFlowModel(nf, model_dir=model_dir)
target_distribution = tfd.MultivariateNormalDiag(loc=np.array([1., 0.]))
dataset = dc.data.NumpyDataset(X=target_distribution.sample(96))
final = nfm.fit(dataset, nb_epoch=1)
x = np.zeros(2)
lp1 = nfm.flow.log_prob(x).numpy()
assert nfm.flow.sample().numpy().shape == (2,)
reloaded_model = NormalizingFlowModel(nf, model_dir=model_dir)
reloaded_model.restore()
# Check that reloaded model can sample from the distribution
assert reloaded_model.flow.sample().numpy().shape == (2,)
lp2 = reloaded_model.flow.log_prob(x).numpy()
# Check that density estimation is same for reloaded model
assert np.all(lp1 == lp2)
def test_robust_multitask_regressor_reload():
"""Test that RobustMultitaskRegressor can be reloaded correctly."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_IRV_multitask_classification_reload():
"""Test IRV classifier can be reloaded."""
n_tasks = 5
n_samples = 10
n_features = 128
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskIRVClassifier(
n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset_trans)
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload Trained Model
reloaded_model = dc.models.MultitaskIRVClassifier(
n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.random(dataset_trans.X.shape)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
def test_progressive_classification_reload():
"""Test progressive multitask can reload."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.ProgressiveMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=400)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload Trained Model
reloaded_model = dc.models.ProgressiveMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_progressivemultitaskregressor_reload():
"""Test that ProgressiveMultitaskRegressor can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_DAG_regression_reload():
"""Test DAG regressor reloads."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
mols = ["CC", "CCO", "CC", "CCC", "CCCCO", "CO", "CC", "CCCCC", "CCC", "CCCO"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.rand(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
model_dir = tempfile.mkdtemp()
model = dc.models.DAGModel(
n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .1
reloaded_model = dc.models.DAGModel(
n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
predset = transformer.transform(predset)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .1
def test_weave_classification_reload():
"""Test weave model can be reloaded."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
mols = ["CC", "CCCCC", "CCCCC", "CCC", "COOO", "COO", "OO"]
n_samples = len(mols)
X = featurizer(mols)
y = [1, 1, 1, 1, 0, 0, 0]
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
batch_size = 5
model_dir = tempfile.mkdtemp()
model = dc.models.WeaveModel(
n_tasks,
batch_size=batch_size,
learning_rate=0.01,
mode="classification",
dropouts=0.0,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloaded_model = dc.models.WeaveModel(
n_tasks,
batch_size=batch_size,
learning_rate=0.003,
mode="classification",
dropouts=0.0,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
#Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
def test_MPNN_regression_reload():
"""Test MPNN can reload datasets."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.rand(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
batch_size = 10
model_dir = tempfile.mkdtemp()
model = dc.models.MPNNModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
# Reload trained model
reloaded_model = dc.models.MPNNModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
reloaded_model.restore()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
def test_textCNN_classification_reload():
"""Test textCNN model reloadinng."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
featurizer = dc.feat.RawFeaturizer()
tasks = ["outcome"]
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, ids=mols)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 3
model_dir = tempfile.mkdtemp()
model = dc.models.TextCNNModel(
n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
# Reload trained model
reloaded_model = dc.models.TextCNNModel(
n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification",
model_dir=model_dir)
reloaded_model.restore()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
assert len(reloaded_model.model.get_weights()) == len(
model.model.get_weights())
for (reloaded, orig) in zip(reloaded_model.model.get_weights(),
model.model.get_weights()):
assert np.all(reloaded == orig)
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred, ids=predmols)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
assert len(model.model.layers) == len(reloaded_model.model.layers)
def test_1d_cnn_regression_reload():
"""Test that a 1D CNN can reload."""
n_samples = 10
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, 10, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.CNN(
n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
# Reload trained model
reloaded_model = dc.models.CNN(
n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, 10, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_graphconvmodel_reload():
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
n_tasks = len(tasks)
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.array([0, 1, 0])
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
batch_size = 10
model_dir = tempfile.mkdtemp()
model = dc.models.GraphConvModel(
len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification',
model_dir=model_dir)
model.fit(dataset, nb_epoch=10)
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] >= 0.6
# Reload trained Model
reloaded_model = dc.models.GraphConvModel(
len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification',
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
def test_chemception_reload():
"""Test that chemception models can be saved and reloaded."""
img_size = 80
img_spec = "engd"
res = 0.5
n_tasks = 1
featurizer = dc.feat.SmilesToImage(
img_size=img_size, img_spec=img_spec, res=res)
data_points = 10
mols = ["CCCCCCCC"] * data_points
X = featurizer(mols)
y = np.random.randint(0, 2, size=(data_points, n_tasks))
w = np.ones(shape=(data_points, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, mols)
classsification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
model_dir = tempfile.mkdtemp()
model = dc.models.ChemCeption(
n_tasks=n_tasks,
img_spec="engd",
model_dir=model_dir,
mode="classification")
model.fit(dataset, nb_epoch=3)
# Reload Trained Model
reloaded_model = dc.models.ChemCeption(
n_tasks=n_tasks,
img_spec="engd",
model_dir=model_dir,
mode="classification")
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# TODO: This test is a little awkward. The Smiles2Vec model awkwardly depends on a dataset_file being available on disk. This needs to be cleaned up to match the standard model handling API.
def test_smiles2vec_reload():
"""Test that smiles2vec models can be saved and reloaded."""
dataset_file = os.path.join(os.path.dirname(__file__), "chembl_25_small.csv")
max_len = 250
pad_len = 10
max_seq_len = 20
char_to_idx = create_char_to_idx(
dataset_file, max_len=max_len, smiles_field="smiles")
feat = dc.feat.SmilesToSeq(
char_to_idx=char_to_idx, max_len=max_len, pad_len=pad_len)
n_tasks = 5
data_points = 10
loader = dc.data.CSVLoader(
tasks=CHEMBL25_TASKS, smiles_field='smiles', featurizer=feat)
dataset = loader.create_dataset(
inputs=[dataset_file], shard_size=10000, data_dir=tempfile.mkdtemp())
y = np.random.randint(0, 2, size=(data_points, n_tasks))
w = np.ones(shape=(data_points, n_tasks))
dataset = dc.data.NumpyDataset(dataset.X[:data_points, :max_seq_len], y, w,
dataset.ids[:data_points])
classsification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
model_dir = tempfile.mkdtemp()
model = dc.models.Smiles2Vec(
char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=model_dir,
mode="classification")
model.fit(dataset, nb_epoch=3)
# Reload Trained Model
reloaded_model = dc.models.Smiles2Vec(
char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=model_dir,
mode="classification")
reloaded_model.restore()
# Check predictions match on original dataset
origpred = model.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
# TODO: We need a cleaner usage example for this
def test_DTNN_regression_reload():
"""Test DTNN can reload datasets."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
n_tasks = y.shape[1]
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.DTNNModel(
n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
model_dir=model_dir,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=250)
# Eval model on train
pred = model.predict(dataset)
mean_rel_error = np.mean(np.abs(1 - pred / y))
assert mean_rel_error < 0.2
reloaded_model = dc.models.DTNNModel(
n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
model_dir=model_dir,
mode="regression")
reloaded_model.restore()
# Check predictions match on random sample
origpred = model.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
def generate_sequences(sequence_length, num_sequences):
for i in range(num_sequences):
seq = [
np.random.randint(10)
for x in range(np.random.randint(1, sequence_length + 1))
]
yield (seq, seq)
def test_seq2seq_reload():
"""Test reloading for seq2seq models."""
sequence_length = 8
tokens = list(range(10))
model_dir = tempfile.mkdtemp()
s = dc.models.SeqToSeq(
tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1,
model_dir=model_dir)
# Train the model on random sequences. We aren't training long enough to
# really make it reliable, but I want to keep this test fast, and it should
# still be able to reproduce a reasonable fraction of input sequences.
s.fit_sequences(generate_sequences(sequence_length, 25000))
# Test it out.
tests = [seq for seq, target in generate_sequences(sequence_length, 50)]
pred1 = s.predict_from_sequences(tests, beam_width=1)
pred4 = s.predict_from_sequences(tests, beam_width=4)
reloaded_s = dc.models.SeqToSeq(
tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1,
model_dir=model_dir)
reloaded_s.restore()
reloaded_pred1 = reloaded_s.predict_from_sequences(tests, beam_width=1)
assert len(pred1) == len(reloaded_pred1)
for (p1, r1) in zip(pred1, reloaded_pred1):
assert p1 == r1
reloaded_pred4 = reloaded_s.predict_from_sequences(tests, beam_width=4)
assert len(pred4) == len(reloaded_pred4)
for (p4, r4) in zip(pred4, reloaded_pred4):
assert p4 == r4
embeddings = s.predict_embeddings(tests)
pred1e = s.predict_from_embeddings(embeddings, beam_width=1)
pred4e = s.predict_from_embeddings(embeddings, beam_width=4)
reloaded_embeddings = reloaded_s.predict_embeddings(tests)
reloaded_pred1e = reloaded_s.predict_from_embeddings(
reloaded_embeddings, beam_width=1)
reloaded_pred4e = reloaded_s.predict_from_embeddings(
reloaded_embeddings, beam_width=4)
assert np.all(embeddings == reloaded_embeddings)
assert len(pred1e) == len(reloaded_pred1e)
for (p1e, r1e) in zip(pred1e, reloaded_pred1e):
assert p1e == r1e
assert len(pred4e) == len(reloaded_pred4e)
for (p4e, r4e) in zip(pred4e, reloaded_pred4e):
assert p4e == r4e
|
StarcoderdataPython
|
4869997
|
#!/usr/bin/env python3
#coding: utf-8
### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command
### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it
__doc__ = "this module allow to check and get info about the file system"#information describing the purpose of this module
__status__ = "Development"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release'
__version__ = "2.0.1"# version number,date or about last modification made compared to the previous version
__license__ = "public domain"# ref to an official existing License
__date__ = "2020"#started creation date / year month day
__author__ = "N-zo <EMAIL>"#the creator origin of this prog,
__maintainer__ = "Nzo"#person who curently makes improvements, replacing the author
__credits__ = []#passed mainteners and any other helpers
__contact__ = "<EMAIL>"# current contact adress for more info about this file
### import the required modules
import stat # module defines constants and functions for interpreting the results of os stat
import os
from os import path
#import pathlib # filesystem paths with semantics appropriate for different operating systems
#import urllib # modules for working with URLs
import shutil
import filecmp # this module defines functions to compare files and directories,
import mimetypes #giving files MIME types.
### types constants for file system items
TYPE_DIRECTORY='directory'
TYPE_SYMBOLIC_LINK ='symbolic_link'
TYPE_FILE='file'
TYPE_UNKNOW='unknow'
TYPE_SPECIAL_CHARACTER_DEVICE='Character_device'
TYPE_BLOK_DEVICE='Block_device'
TYPE_FIFO='fifo'
TYPE_SOCKET='socket'
def pathname(pathname):
"""check if the given path and name point to an existing file or directory"""
return path.exists(pathname)
def directory_pathname(pathname):
"""check if the given path and name point to an existing directory"""
return path.isdir(pathname)
def file_pathname(pathname):
"""check if the given path and name point to an existing file"""
return path.isfile(pathname)
def link_pathname(pathname):
"""check if the given path and name point to an existing link"""
return path.islink(pathname)
def file_size(pathname):
"""for given file path gives the size in bytes."""
#info=os.lstat(pathname)
#size = info[stat.ST_SIZE]
size = path.getsize(pathname)
### if pahtname is a link return 4096 for a pointed directory or the size of the pointed file
### raise error if the file does not exist or is inaccessible.
return size
def get_mimetype(pathname):
"""for the given file path return the mime type"""
return mimetypes.guess_type(pathname,strict=True)#strict means registered with IANA.
def get_type(pathname):
"""for the given path name return the type"""
info=os.lstat(pathname)
mode = info[stat.ST_MODE]
if stat.S_ISLNK(mode) :
return TYPE_SYMBOLIC_LINK
elif stat.S_ISDIR(mode):
return TYPE_DIRECTORY
elif stat.S_ISCHR(mode) :
return TYPE_SPECIAL_CHARACTER_DEVICE
elif stat.S_ISBLK(mode) :
return TYPE_BLOK_DEVICE
elif stat.S_ISREG(mode):
return TYPE_FILE
elif stat.S_ISFIFO(mode) :
return TYPE_FIFO
elif stat.S_ISSOCK(mode) :
return TYPE_SOCKET
else :
return TYPE_UNKNOW
def same_files(f1,f2):
"""check if the 2 files are the same,(compare byte by byte the contents)"""
### If shallow is true, files with identical os.stat() are equal.
### Otherwise, the contents of files are compared.
return filecmp.cmp(f1, f2, shallow=False)
def disk_usage(pathname):
"""Return disk usage statistics for the given path"""
### Return tuple with the attributes total,used,free in bytes.
### usage(total=118013599744, used=63686647808, free=48352747520)
return shutil.disk_usage(pathname)
def hardlink_qantum(pathname):
"""return the number of harlinks made for the given pathname
(returns the number of files with the same inode, so it's always at least 1)"""
info=os.lstat(pathname)
qantum = info[stat.ST_NLINK]
return qantum
|
StarcoderdataPython
|
152150
|
import databricks.koalas as ks
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from gators.converter.koalas_to_pandas import KoalasToPandas
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{
"q": {0: 0.0, 1: 3.0, 2: 6.0},
"w": {0: 1.0, 1: 4.0, 2: 7.0},
"e": {0: 2.0, 1: 5.0, 2: 8.0},
}
)
y = ks.Series([0, 0, 1], name="TARGET")
return X, y, X.to_pandas(), y.to_pandas()
@pytest.mark.koalas
def test_ks(data_ks):
X_ks, y_ks, X_expected, y_expected = data_ks
X_new, y_new = KoalasToPandas().transform(X_ks, y_ks)
assert_frame_equal(X_new, X_expected)
assert_series_equal(y_new, y_expected)
def test_input():
with pytest.raises(TypeError):
_ = KoalasToPandas().transform(pd.DataFrame(), pd.DataFrame())
|
StarcoderdataPython
|
9632480
|
"""Extract minimal growth media and growth rates."""
import pandas as pd
from micom import load_pickle
from micom.media import minimal_medium
from micom.workflows import workflow
max_procs = 6
processes = []
def media_and_gcs(sam):
com = load_pickle("models/" + sam + ".pickle")
# Get growth rates
sol = com.cooperative_tradeoff(fraction=0.9)
rates = sol.members["growth_rate"].copy()
rates["community"] = sol.growth_rate
rates.name = s
# Get the minimal medium
med = minimal_medium(com, 0.95*sol.growth_rate)
med.name = s
return {"medium": med, "gcs": rates}
samples = pd.read_csv("recent.csv")
gcs = pd.DataFrame()
media = pd.DataFrame()
results = workflow(media_and_gcs, samples.run_accession, max_procs)
for s in results:
gcs = gcs.append(results["gcs"])
media = media.append(results["media"])
gcs.to_csv("growth_rates.csv")
media.to_csv("minimal_media.csv")
|
StarcoderdataPython
|
3520797
|
"""
In charge of evolving a population for the set
number of mating events.
"""
import sys
from kaplan.ga_input import read_ga_input, verify_ga_input
from kaplan.mol_input import read_mol_input, verify_mol_input
from kaplan.ring import Ring, RingEmptyError
from kaplan.tournament import run_tournament
from kaplan.output import run_output
def run_kaplan(ga_input_file, mol_input_file):
"""Run the Kaplan programme.
Parameters
----------
ga_input_file : str
The input file containing genetic algorithm constants.
mol_input_file : str
The input file containing the molecular information.
"""
# read in and verify ga_input_file
ga_input_dict = read_ga_input(ga_input_file)
verify_ga_input(ga_input_dict)
# read in and verify mol_input_file
# check that initial geometry converges
# and construct a parser object
mol_input_dict = read_mol_input(mol_input_file)
parser = verify_mol_input(mol_input_dict)
# check that inputs agree on a very trivial level
assert ga_input_dict['num_atoms'] == len(parser.coords)
# make a ring
ring = Ring(ga_input_dict['num_geoms'],
ga_input_dict['num_atoms'],
ga_input_dict['num_slots'],
ga_input_dict['pmem_dist'],
ga_input_dict['fit_form'],
ga_input_dict['coef_energy'],
ga_input_dict['coef_rmsd'],
parser)
# fill ring with an initial population
ring.fill(ga_input_dict['num_filled'], 0)
# run the mevs
for mev in range(ga_input_dict['num_mevs']):
try:
print(mev)
run_tournament(ga_input_dict['t_size'],
ga_input_dict['num_muts'],
ga_input_dict['num_swaps'],
ring, mev)
except RingEmptyError:
ring.fill(ga_input_dict['num_filled'], mev)
# run output
run_output(ring)
if __name__ == "__main__":
if len(sys.argv) != 3:
raise FileNotFoundError("Please include the ga_input_file and the\
mol_input_file as arguments to the program.")
run_kaplan(sys.argv[1], sys.argv[2])
|
StarcoderdataPython
|
128598
|
<gh_stars>1-10
"""
mem_fifo.py
============================================================================
Create a queue/storage for small amounts of data inside a given memory pool.
"""
import uctypes
import sys
# from uasyncio.queues import QueueFull, QueueEmpty
FIFO_HEADER = {
"magic": 0 | uctypes.UINT32,
"rd_i": 4 | uctypes.UINT8,
"wr_i": 5 | uctypes.UINT8,
"elements": 6 | uctypes.UINT8,
"elem_size": 7 | uctypes.UINT8,
"full": 8 | uctypes.UINT8,
}
FIFO_MAGIC = 0x3ffd3e80
class QueueOverrun(BaseException):
pass
class QueueEmpty(BaseException):
pass
class MemFifo():
""" Class managing a simple FIFO queue"""
def __init__(self, mem_desc, struct):
"""
Attach to an existing queue or create a new one at the location defined by
the pool parameter.
Parameters
----------
addr
Address of an memory area.
size
Size of the memory area at addr
struct
Definition of an uctype structure.
This describes the messages we will manage in the queue.
magic
Optional.
"""
hdr_size = uctypes.sizeof(FIFO_HEADER)
elem_size = uctypes.sizeof(struct)
hdr = uctypes.struct(mem_desc.addr, FIFO_HEADER)
entries = (mem_desc.size - hdr_size) // elem_size
if hdr.magic != FIFO_MAGIC or hdr.elem_size != elem_size or hdr.elements != entries:
print("MemFifo: init queue")
hdr.rd_i = 0
hdr.wr_i = 0
hdr.elem_size = elem_size
hdr.elements = entries
hdr.magic = FIFO_MAGIC
hdr.full = False
self._hdr = hdr
self._data_addr = mem_desc.addr + uctypes.sizeof(self._hdr)
self._struct = struct
def _incr_wrap(self, index):
index = index + 1
if index == self._hdr.elements:
index = 0
return index
def _enqueue(self, data):
"""
Adds a data record to the queue.
Raises a QueueOverrunException when there are no more slots available in
in the queue.
Parameters
----------
data
An uctype stucture holding the message to add to the queue.
The structure SHOULD conform to the definition given when creating
the MemFifo object or should at least have the same size.
Longer structures get silently truncated when addded to the queue.
"""
hdr = self._hdr
if hdr.full:
raise QueueOverrun()
addr = self._data_addr + hdr.elem_size * hdr.wr_i
src = uctypes.bytearray_at(uctypes.addressof(data), hdr.elem_size)
dst = uctypes.bytearray_at(addr, hdr.elem_size)
dst[:] = src[:]
hdr.wr_i = self._incr_wrap(hdr.wr_i)
hdr.full = hdr.wr_i == hdr.rd_i
def _dequeue(self):
"""
Removes the first message from the queue and returns either the data as
uctype struct or None when the queue is empty.
The returned value references memory directly in the queue slot, so it might
change when enqueue() is called!
"""
hdr = self._hdr
if (not hdr.full) and (hdr.rd_i == hdr.wr_i):
return None
addr = self._data_addr + hdr.elem_size * hdr.rd_i
hdr.rd_i = self._incr_wrap(hdr.rd_i)
hdr.full = False
return uctypes.struct(addr, self._struct)
def peek_nowait(self):
if self.empty():
raise QueueEmpty()
hdr = self._hdr
addr = self._data_addr + hdr.elem_size * hdr.rd_i
return uctypes.struct(addr, self._struct)
async def peek(self):
while self.empty():
await uasyncio.sleep(0)
return self.peek_nowait()
# uasyncio-queue like interface
async def put(self, data):
while self._hdr.full:
await uasyncio.sleep(0)
return self._enqueue(data)
async def get(self):
r = self._dequeue()
while r is None:
await uasyncio.sleep(0)
r = self._dequeue()
return r
def put_nowait(self, data):
return self._enqueue(data)
def get_nowait(self):
if self.empty():
raise QueueEmpty()
return self._dequeue(data)
def full(self):
return self._hdr.full
def empty(self):
hdr = self._hdr
return (hdr.rd_i == hdr.wr_i) and (not hdr.full)
def qsize(self):
return self._hdr.elements
|
StarcoderdataPython
|
9624023
|
/anaconda/lib/python3.6/base64.py
|
StarcoderdataPython
|
6654684
|
<filename>test/programytest/storage/stores/file/store/test_patternnodes.py
import os
import os.path
import unittest
from programy.parser.pattern.factory import PatternNodeFactory
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.config import FileStoreConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.store.nodes import FilePatternNodeStore
class FilePatternNodeStoreTests(unittest.TestCase):
def test_initialise(self):
config = FileStorageConfiguration()
engine = FileStorageEngine(config)
engine.initialise()
store = FilePatternNodeStore(engine)
self.assertEqual(store.storage_engine, engine)
def test_storage_path(self):
config = FileStorageConfiguration()
engine = FileStorageEngine(config)
engine.initialise()
store = FilePatternNodeStore(engine)
self.assertEquals('/tmp/nodes/pattern_nodes.conf', store._get_storage_path())
self.assertIsInstance(store.get_storage(), FileStoreConfiguration)
def test_load_variables(self):
config = FileStorageConfiguration()
config._pattern_nodes_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "data" + os.sep + "nodes" + os.sep + "pattern_nodes.conf", fileformat="text", encoding="utf-8", delete_on_start=False)
engine = FileStorageEngine(config)
engine.initialise()
store = FilePatternNodeStore(engine)
collection = PatternNodeFactory()
store.load(collection)
self.assertEqual(12, len(collection.nodes))
self.assertTrue(collection.exists("zeroormore"))
|
StarcoderdataPython
|
6566439
|
<filename>ja_tableau/test_tableau.py
import ja_language as ja_lan
import pandas as pd
if __name__ == "__main__":
# Inital JA Language Agent
ja_lan = ja_lan.language_translator()
try:
ja_lan_df = pd.read_pickle('ja_lan_env.pkl')
apply_lan = ja_lan_df['ja_lan'][0]
ja_lan.set_language_code(apply_lan)
print(ja_lan.print("[INFO]: Your apply language is {%s}" % apply_lan))
except:
print("[INFO]: No ja_lan_env.pkl found !")
print("Set language as default 'English' ")
|
StarcoderdataPython
|
9651598
|
<filename>appengine/src/greenday_api/user/messages.py
"""
Protorpc messages for the user API
"""
from protorpc import messages, message_types
from django.contrib.auth import get_user_model
from django_protorpc import DjangoProtoRPCMessage
class UserRequestMessage(DjangoProtoRPCMessage):
"""
ProtoRPC message definition to represent a user to be updated by
themself.
"""
first_name = messages.StringField(1)
last_name = messages.StringField(2)
email = messages.StringField(3)
profile_img_url = messages.StringField(4)
google_plus_profile = messages.StringField(5)
accepted_nda = messages.BooleanField(6)
last_login = message_types.DateTimeField(6)
class SuperUserRequestMessage(DjangoProtoRPCMessage):
"""
ProtoRPC message definition to represent a user to be updated by a
super user.
"""
class Meta:
model = get_user_model()
exclude = ('id', 'date_joined', 'username',)
class UserResponseMessage(DjangoProtoRPCMessage):
"""ProtoRPC message definition to represent a user that is stored."""
class Meta:
model = get_user_model()
exclude = (
'password',
'actions',
'owner_of_globaltags',
'owner_of_projecttags',
'owner_of_videotags',
'owner_of_tag_instances',
'owner_of_videos',
'related_videos',
'user_permissions',
'groups',
'videos',
'projectusers',
)
class UserResponseBasic(DjangoProtoRPCMessage):
""" Message to return a user's basic data """
id = messages.IntegerField(
1, variant=messages.Variant.INT32)
is_superuser = messages.BooleanField(2)
gaia_id = messages.StringField(3)
first_name = messages.StringField(4)
last_name = messages.StringField(5)
email = messages.StringField(6)
profile_img_url = messages.StringField(7)
google_plus_profile = messages.StringField(8)
class UserListResponse(DjangoProtoRPCMessage):
"""ProtoRPC message definition to represent a list of stored users."""
items = messages.MessageField(UserResponseBasic, 1, repeated=True)
is_list = messages.BooleanField(2)
class UserStatsResponse(DjangoProtoRPCMessage):
"""Message definition to represent a user's application stats"""
id = messages.IntegerField(
1, variant=messages.Variant.INT32)
videos_watched = messages.IntegerField(
2, variant=messages.Variant.INT32)
tags_added = messages.IntegerField(
3, variant=messages.Variant.INT32)
|
StarcoderdataPython
|
3498267
|
<filename>examples/DataAnalysis/CentralBase.py
# encoding: UTF-8
import sys
import json
from pymongo import MongoClient
from vnpy.trader.app.ctaStrategy.ctaBase import DATABASE_NAMES
import pandas as pd
import numpy as np
import datetime as dt
import talib as ta
from interval import Interval
import time
#方向
M_TO_UP = True
M_TO_DOWN = False
#节点或中枢是否正式形成
M_FORMAL = True
M_TEMP = False
#顶点或底点
M_TOP = 1
M_BOTTOM = -1
#常量
M_MAX_VAL = 5000
M_MIN_VAL = -5000
M_INVALID_INDEX = -1
#背驰点或买卖点的判定
M_NODECIDE = 0
M_FALSE = -1
M_TRUE = 1
#交易方向
M_BUY = 1
M_SELL = -1
#最高使用次节点生成背驰的级别的祖父级别
GRANDPA_CB_LEVER=[]
class Node:
"""
趋势节点,包含(时间, 值)
low_id:次级中枢编号
low_count:当前点累计的次级中枢数目
ntype: 顶点或低点
isformal:正式或临时节点
"""
def __init__(self, time, value, ntype, low_id=None, low_count=None, isformal=M_TEMP):
self.datetime = time
self.value = value
self.ntype= ntype
self.low_id = low_id
self.low_count = low_count
self.isformal = isformal
class Centralbase:
"""
中枢定义
start:开始时间
end:结束时间
up:上边界
down: 下边界
start_node_id: 开始ID
end_node_id: 结束ID
ctype:类型计数
isformal:正式或临时节点
"""
def __init__(self, start, end, up, down, start_node_id=None, end_node_id=None, isformal=M_TEMP):
self.start = start
self.end = end
self.up = up
self.down = down
self.start_node_id = start_node_id
self.end_node_id = end_node_id
self.ctype = 0
self.isformal = isformal
self.max_val = M_MIN_VAL
self.min_val = M_MAX_VAL
self.max_node_id = M_INVALID_INDEX
self.min_node_id = M_INVALID_INDEX
def setCType(self, ctype):
self.ctype = ctype
def getCBInterval(self):
return Interval(lower_bound=self.down, upper_bound=self.up,
lower_closed=False, upper_cloesed=False)
class BeichiTime:
'''
time:背驰时间
btype:背驰类型和中枢层级,正数为顶背驰 负数为底背驰
real_beichi:是否为真背驰, M_NODECIDE为未判定,M_FALSE否 M_TRUE是
'''
def __init__(self, time, btype, node_id, real_time=None, low_cb_id=None):
self.time = time
self.btype = btype
self.node_id = node_id
self.real_beichi = M_NODECIDE
self.real_time = real_time
self.low_cb_id = low_cb_id
class BuyPoint:
def __init__(self, time, node_id,real_time = None):
self.time = time
self.real_time = real_time
self.node_id = node_id
self.real_buy = M_NODECIDE
class SellPoint:
def __init__(self, time, node_id,real_time = None):
self.time = time
self.real_time = real_time
self.node_id = node_id
self.real_sell = M_NODECIDE
class TradePoint:
def __init__(self, time, node_id,low_cb_id=None, real_time = None, trade_direct=M_BUY):
self.time = time
self.real_time = real_time
self.node_id = node_id
self.trade_direct = trade_direct
self.real_sell = M_NODECIDE
self.low_cb_id = low_cb_id
class TradeStrategy:
def __init__(self):
self.trade_point_list = []
self.trade_on_going = False
class KData:
def __init__(self):
self.data_dict={}
self.rely_dict={}
def addDataItem(self, name='D', item=None):
if item==None:
self.data_dict[name] = pd.DataFrame()
else:
self.data_dict[name] = self.data_dict[name].concat(item,ignore_index=False)
def buildRelation(up_name='D', low_name='30MIN'):
self.rely_dict[up_name] = low_name
class CentralBaseSet:
def __init__(self, freq, dataframe, low_CB_set=None, all_data=None):
self.data = dataframe.set_index(dataframe['datetime'])
self.low_CB_set = low_CB_set
self.node_list = []
self.centralbase_list = []
self.freq = freq
self.beichi_list=[]
self.beichi_pc_list=[]
self.share_beichi_list=[]
self.first_sell_point_list = []
self.sec_sell_point_list = []
self.all_sell_point_list = []
self.first_buy_point_list = []
self.sec_buy_point_list = []
self.third_buy_point_list = []
self.beichi_processing = False
self.upgrade_cnt=0
self.seek_max=None
self.temp_open = None
self.temp_close = None
self.data_columns = self.data.columns
self.all_data = all_data
self.data = all_data
self.cur_time_index = None
self.cur_min_value = M_MAX_VAL
self.cur_min_node_id = 0
self.cur_max_value = M_MIN_VAL
self.cur_max_node_id = 0
#交易策略列表
self.trade_strategy_list = []
#中枢升级逻辑
self.cur_cut_low_id = -1
self.cur_cut_start_node_id = -1
self.cur_low_beichi_time = None
self.timespend1=0
self.timespend2=0
self.timespend3=0
self.timespend4=0
self.callcnt = 0
def analyze_CB_Step(self):
if self.low_CB_set == None:
self.getNodeList_KLine_Step()
else:
self.getNodeList_Lower_Step()
if self.freq=='D' :
if len(self.node_list)>2 and abs(self.node_list[-2].value-4.71)<0.001:
a=1
self.get_Centralbase_Step()
self.getBeichi_Share_With_LowBeichi_Step()
if self.low_CB_set == None or True:
self.beichi_processing = self.getBeichi_LastTwo_Step()
else:
self.beichi_processing = self.getBeichi_LastOne_Step()
self.update_max_min_value()
self.beichi_judge_step()
#self.sell_point_judge()
#self.SnakeTrade_step()
if self.freq=='D':
self.SnakeTrade_With_ShareBeichi_step()
def update_data(self, low_data_frame=None, data_item_series=None):
if not data_item_series.empty:
data = {}
for column in self.data_columns:
data[column] = []
data[column].append(data_item_series[column])
data_item = pd.DataFrame(data, columns=self.data_columns)
self.data = pd.concat([self.data, data_item], ignore_index=True)
self.data.set_index(self.data['datetime'], inplace=True)
self.indexGenerator_step()
'''
else:
if low_data_frame!=None:
if self.low_CB_set==None:
self.data = low_data_frame
else:
data_seg = low_data_frame.ix[low_data_frame.index>self.data.index[-1]]
if data_seg!=None:
open_p = data_seg.ix[0, 'open']
close_p = data_seg.ix[-1, 'close']
volumns = data_seg['volume'].sum()
'''
def update_data_time(self, low_data_frame=None, data_item_time=None):
if data_item_time !=None:
start = time.clock()
self.cur_time_index = data_item_time
self.indexGenerator_step()
self.timespend4 = self.timespend4 + (time.clock() - start)
'''
else:
if low_data_frame!=None:
if self.low_CB_set==None:
self.data = low_data_frame
else:
data_seg = low_data_frame.ix[low_data_frame.index>self.data.index[-1]]
if data_seg!=None:
open_p = data_seg.ix[0, 'open']
close_p = data_seg.ix[-1, 'close']
volumns = data_seg['volume'].sum()
'''
def __getmax(self, a, b):
return a if a>b else b
def __getmin(self, a,b):
return a if a<b else b
def resultToSource(self):
self.data['node'] = None
self.data['base_up'] = None
self.data['base_down'] = None
self.data['beichi'] = None
self.data['sharebeichi'] = None
self.data['panzhbeichi'] = None
self.data['sec_buy'] = None
for node in self.node_list:
time_seg = self.data.ix[self.data.index>=node.datetime, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'node', node.value)
for base in self.centralbase_list:
self.data.ix[base.start:base.end,'base_up'] = base.up
self.data.ix[base.start:base.end,'base_down'] = base.down
self.data.ix[base.start:base.end,'base_type'] = base.ctype
for beichi in self.beichi_list:
time_seg = self.data.ix[self.data.index>=beichi.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'beichi', self.data.ix[time, 'close'])
for sharebeichi in self.share_beichi_list:
time_seg = self.data.ix[self.data.index>=sharebeichi.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'sharebeichi', self.data.ix[time, 'close'])
for panzhbeichi in self.beichi_pc_list:
time_seg = self.data.ix[self.data.index>=panzhbeichi.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'panzhbeichi', self.data.ix[time, 'close'])
for sec_buy in self.sec_buy_point_list:
time_seg = self.data.ix[self.data.index>=sec_buy.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'sec_buy', self.data.ix[time, 'close'])
def indexGenerator_step(self):
#length = np.size(self.data, axis=0)
#if length<40:
#self.data['SMA5'] = ta.SMA(self.data['close'].values, timeperiod = 5) #5日均线
#self.data['SMA10'] = ta.SMA(self.data['close'].values, timeperiod = 10) #10日均线
macd_talib, signal, hist = ta.MACD(self.data['close'].values,fastperiod=12,signalperiod=9)
self.data['DIF'] = macd_talib #DIF
self.data['DEA'] = signal #DEA
self.data['MACD'] = hist #MACD
#else:
##self.data.ix[-40:,'SMA5'] = ta.SMA(self.data.ix[-40:,'close'].values, timeperiod = 5) #5日均线
##self.data.ix[-40:,'SMA10'] = ta.SMA(self.data.ix[-40:,'close'].values, timeperiod = 10) #10日均线
#macd_talib, signal, hist = ta.MACD(self.data.ix[-40:,'close'].values,fastperiod=12,signalperiod=9)
#self.data.ix[-1,'DIF'] = macd_talib[-1] #DIF
#self.data.ix[-1:,'DEA'] = signal[-1] #DEA
#self.data.ix[-1:,'MACD'] = hist[-1] #MACD
def getNodeList_KLine_Step(self):
#if self.data.empty:
#return
#time = self.data.index[-1]
if self.cur_time_index == None:
return
time = self.cur_time_index
open_price = self.data.ix[time, 'open']
close_price = self.data.ix[time, 'close']
up_flag = open_price <= close_price
if self.seek_max==None: #初始数据
if up_flag:
self.seek_max = M_TO_UP
self.node_list.append(Node(time, close_price, M_TOP , isformal=M_TEMP))
else:
self.seek_max = M_TO_DOWN
self.node_list.append(Node(time, close_price, M_BOTTOM, isformal=M_TEMP))
go_judge = True
if self.seek_max == M_TO_UP:
if abs(close_price - open_price) <=0.001: #排查十字星的情况
if close_price >= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
go_judge = False
if up_flag and go_judge:
if close_price >= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
if close_price < self.node_list[-1].value:
self.node_list[-1].isformal = M_FORMAL
self.node_list.append(Node(time, close_price, M_BOTTOM, isformal=M_TEMP))
self.seek_max = M_TO_DOWN
else:
if abs(close_price - open_price) <=0.001: #排查十字星的情况
if close_price <= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
go_judge = False
if (not up_flag) and go_judge:
if close_price <= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
if close_price > self.node_list[-1].value:
self.node_list[-1].isformal = M_FORMAL
self.node_list.append(Node(time, close_price, M_TOP, isformal=M_TEMP))
self.seek_max = M_TO_UP
def __getNodeListCross(self, start_id, end_id_include):
cross_itval = Interval.none()
i=start_id
while(i<end_id_include):
if cross_itval == Interval.none():
cross_itval = self.__getSegment(i)
else:
cross_itval = cross_itval & self.__getSegment(i)
i+=1
return cross_itval
def get_Centralbase_Step(self):
'''
有效逻辑时机:
1.首个中枢;
2.背驰处理;
3.形成新的临时节点和正式节点
'''
seg_list=[]
start = None
end = None
start_id = -1
end_id = -1
cross_itval = Interval.none
if self.freq=='5MIN' and len(self.node_list)>2 \
and abs(self.node_list[-2].value-5.29)<0.001\
and abs(self.node_list[-1].value-5.28)<0.001:
a=1
if self.freq=='5MIN' :
a=1
if self.freq=='30MIN' :
a=1
if self.freq=='D' :
a=1
if len(self.centralbase_list) ==0:#首个中枢
if len(self.node_list) > 3:
cross_itval = self.__getSegment(0) & self.__getSegment(1)
start = self.__getSegmentStart(0)
end = self.__getSegmentEnd(1)
newcbase = Centralbase(start, end, cross_itval.upper_bound, cross_itval.lower_bound, 0, 2, isformal=M_TEMP)
newcbase.setCType(self.__getCBType(newcbase))
newcbase.max_node_id, newcbase.max_val = self.__getMaxNode_Val(0, 2)
newcbase.min_node_id, newcbase.min_val = self.__getMinNode_Val(0, 2)
self.centralbase_list.append(newcbase)
else:
end_node_id = self.centralbase_list[-1].end_node_id
start_node_id = self.centralbase_list[-1].start_node_id
if len(self.node_list)-2 > end_node_id: #新临时NODE已经形成,新正式NODE形成
cross_itval = self.centralbase_list[-1].getCBInterval() & self.__getSegment(end_node_id)
if cross_itval != Interval.none():#新正式段与原中枢相交,更新中枢信息
#if end_node_id-start_node_id >=4 :
##切割中枢
#self.centralbase_list[-1].isformal = M_FORMAL
#cross_itval = self.__getSegment(start_node_id) & self.__getSegment(start_node_id+2)
#self.centralbase_list[-1].up = cross_itval.upper_bound
#self.centralbase_list[-1].down = cross_itval.lower_bound
#self.centralbase_list[-1].end_node_id = start_node_id+3
#self.centralbase_list[-1].end = self.node_list[start_node_id+3].datetime
#self.centralbase_list[-1].max_node_id, self.centralbase_list[-1].max_val = self.__getMaxNode_Val(start_node_id, start_node_id+3)
#self.centralbase_list[-1].min_node_id, self.centralbase_list[-1].min_val = self.__getMinNode_Val(start_node_id, start_node_id+3)
##添加新中枢
#cross_itval = self.centralbase_list[-1].getCBInterval() & self.__getSegment(start_node_id+3) & self.__getSegment(start_node_id+4)
#start = self.node_list[start_node_id+3].datetime
#end = self.node_list[end_node_id+1].datetime
#newcbase = Centralbase(start, end, cross_itval.upper_bound, cross_itval.lower_bound, start_node_id+3, end_node_id+1, isformal=M_TEMP)
#newcbase.setCType(self.__getCBType(newcbase))
#newcbase.max_node_id, newcbase.max_val = self.__getMaxNode_Val(start_node_id+3, end_node_id+1)
#newcbase.min_node_id, newcbase.min_val = self.__getMinNode_Val(start_node_id+3, end_node_id+1)
#self.centralbase_list.append(newcbase)
#else:
self.centralbase_list[-1].up = cross_itval.upper_bound
self.centralbase_list[-1].down = cross_itval.lower_bound
self.centralbase_list[-1].end_node_id = end_node_id+1
self.centralbase_list[-1].end = self.node_list[end_node_id+1].datetime
#self.centralbase_list[-1].setCType(self.__getCBType(newcbase=None, isnew=False, cb_id=len(self.centralbase_list)-1))
self.get_panzheng_beichi_step()
#更新极值信息
if self.node_list[end_node_id+1].value > self.centralbase_list[-1].max_val:
self.centralbase_list[-1].max_val = self.node_list[end_node_id+1].value
self.centralbase_list[-1].max_node_id = end_node_id+1
if self.node_list[end_node_id+1].value < self.centralbase_list[-1].min_val:
self.centralbase_list[-1].min_val = self.node_list[end_node_id+1].value
self.centralbase_list[-1].min_node_id = end_node_id+1
else:
self.centralbase_list[-1].isformal = M_FORMAL
#添加新中枢
cross_itval = self.__getSegment(end_node_id)
start = self.node_list[end_node_id].datetime
end = self.node_list[end_node_id+1].datetime
newcbase = Centralbase(start, end, cross_itval.upper_bound, cross_itval.lower_bound, end_node_id, end_node_id+1, isformal=M_TEMP)
newcbase.setCType(self.__getCBType(newcbase))
newcbase.max_node_id, newcbase.max_val = self.__getMaxNode_Val(end_node_id, end_node_id+1)
newcbase.min_node_id, newcbase.min_val = self.__getMinNode_Val(end_node_id, end_node_id+1)
self.centralbase_list.append(newcbase)
if self.centralbase_list[-1].ctype < self.centralbase_list[-2].ctype:
self.sec_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
if (self.centralbase_list[-1].ctype >0) and (self.centralbase_list[-1].ctype*self.centralbase_list[-2].ctype<0):
self.third_buy_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
def getNodeList_Lower_Step(self):
lower_CB_list = self.low_CB_set.centralbase_list
length = len(lower_CB_list)
index = length-1
if length<2:
return
pre_base = lower_CB_list[-2]
base = lower_CB_list[-1]
if self.freq=='30MIN' and abs(base.up-4.35)<0.001:
a=1
if self.freq=='30MIN':
a=1
if (length==2) and len(self.node_list)==0:
self.seek_max = M_TO_UP
if 1==self.__get_CB_pos(pre_base, base):
self.seek_max = M_TO_DOWN
else:
self.seek_max = M_TO_UP
#生成新临时节点
self.__Make_New_Temp_Node_Lower(self.seek_max, base.start, base.end, index)
return
if self.cur_cut_low_id != index:
self.cur_cut_low_id = index
self.cur_cut_start_node_id = base.start_node_id
cur_base_start_node_id = self.cur_cut_start_node_id
cur_base_end_node_id = base.end_node_id
'''
#中枢升级逻辑
if (cur_base_end_node_id - cur_base_start_node_id)==9:
if self.freq=='D':
a=1
self.node_list.pop()
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, cur_base_start_node_id+3, index)
self.node_list[-1].isformal = M_FORMAL
cur_base_start_node_id = cur_base_start_node_id+3
self.seek_max=self.__reverse_direct(self.seek_max)
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, cur_base_start_node_id+3, index)
self.node_list[-1].isformal = M_FORMAL
cur_base_start_node_id = cur_base_start_node_id+3
#进行中枢计算
self.get_Centralbase_Step()
self.update_max_min_value()
self.seek_max=self.__reverse_direct(self.seek_max)
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, cur_base_start_node_id+3, index)
cur_base_start_node_id = cur_base_start_node_id+3
self.cur_cut_start_node_id = cur_base_start_node_id
return
'''
if self.node_list[-1].isformal == M_FORMAL and (base.start<=self.node_list[-1].datetime and base.end>=self.node_list[-1].datetime):
return
if self.seek_max==M_TO_UP: #向上
#当前中枢在前一中枢下或相交,当前趋势结束
if((0<self.__get_CB_pos(pre_base, base)) and (index>self.node_list[-1].low_id)):
#更新正式节点信息
#self.__Update_Last_Node_Lower_WithID(self.seek_max, pre_base.start, pre_base.end, isformal=M_FORMAL)
self.node_list[-1].isformal = M_FORMAL
#生成新临时节点
self.seek_max = M_TO_DOWN
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, index)
else:#趋势延续
low_node_time, low_node_value = self.__share_same_beichi_with_low_judge()
if low_node_time!=None and low_node_value!=None and False:
self.node_list[-1].isformal = M_FORMAL
self.node_list[-1].datetime = low_node_time
self.node_list[-1].value = low_node_value
self.node_list[-1].low_id = index
else:
self.__Update_Last_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, isformal=M_TEMP,low_id=index)
else:
#当前中枢在前一中枢上或相交,当前趋势结束
if((0>self.__get_CB_pos(pre_base, base)) and (index>self.node_list[-1].low_id)):
#更新正式节点信息
#self.__Update_Last_Node_Lower(self.seek_max, pre_base.start, pre_base.end, isformal=M_FORMAL)
self.node_list[-1].isformal = M_FORMAL
#生成新临时节点
self.seek_max = M_TO_UP
self.__Make_New_Temp_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, index)
else:#趋势延续
low_node_time, low_node_value = self.__share_same_beichi_with_low_judge()
if low_node_time!=None and low_node_value!=None and False:
self.node_list[-1].isformal = M_FORMAL
self.node_list[-1].datetime = low_node_time
self.node_list[-1].value = low_node_value
self.node_list[-1].low_id = index
else:
self.__Update_Last_Node_Lower_WithID(self.seek_max, cur_base_start_node_id, base.end_node_id, isformal=M_TEMP,low_id=index)
def __Make_New_Temp_Node_Lower(self, seek_max, start_time, end_time, low_id=None):
'''
生成新的临时节点
seek_max:该临时节点与上一节点的关系
'''
lower_data = self.low_CB_set.data
if seek_max==M_TO_UP:
time,value = self.__getMaxIndex_Val(lower_data, start_time, end_time)
top_bottom = M_TOP
else:
time,value = self.__getMinIndex_Val(lower_data, start_time, end_time)
top_bottom = M_BOTTOM
if time==None:
time_seg = self.data.ix[self.data.index>end_time, 'close']
time = time_seg.index[0]
value = self.data.ix[0, 'close']
self.node_list.append(Node(time, value, top_bottom, low_id=low_id, isformal=M_TEMP))
def __Make_New_Temp_Node_Lower_WithID(self, seek_max, start_node_id, end_node_id, low_id=None):
'''
生成新的临时节点
seek_max:该临时节点与上一节点的关系
'''
lower_node_list = self.low_CB_set.node_list
if seek_max==M_TO_UP:
node_id,value = self.__getMaxLowerNode_Val( start_node_id, end_node_id)
top_bottom = M_TOP
else:
node_id,value = self.__getMinLowerNode_Val( start_node_id, end_node_id)
top_bottom = M_BOTTOM
self.node_list.append(Node(lower_node_list[node_id].datetime, value, top_bottom, low_id=low_id, isformal=M_TEMP))
def __Update_Last_Node_Lower(self, seek_max, start_time, end_time, isformal=None, low_id = None) :
'''
更新最后节点信息
seek_max:该临时节点与上一节点的关系
'''
lower_data = self.low_CB_set.data
if seek_max==M_TO_UP:
time,value = self.__getMaxIndex_Val(lower_data, start_time, end_time)
else:
time,value = self.__getMinIndex_Val(lower_data, start_time, end_time)
if time==None:
time_seg = self.data.ix[self.data.index>end_time, 'close']
time = time_seg.index[0]
value = self.data.ix[0, 'close']
if ((seek_max==M_TO_UP) and (value>self.node_list[-1].value))\
or ((seek_max==M_TO_DOWN) and (value<self.node_list[-1].value)):
self.node_list[-1].datetime = time
self.node_list[-1].value = value
if low_id!=None:
self.node_list[-1].low_id = low_id
if isformal!=None:
self.node_list[-1].isformal = isformal
def __Update_Last_Node_Lower_WithID(self, seek_max, start_node_id, end_node_id, isformal=None, low_id = None) :
'''
更新最后节点信息
seek_max:该临时节点与上一节点的关系
'''
lower_node_list = self.low_CB_set.node_list
if seek_max==M_TO_UP:
node_id,value = self.__getMaxLowerNode_Val( start_node_id, end_node_id)
else:
node_id,value = self.__getMinLowerNode_Val( start_node_id, end_node_id)
if ((seek_max==M_TO_UP) and (value>self.node_list[-1].value))\
or ((seek_max==M_TO_DOWN) and (value<self.node_list[-1].value)):
self.node_list[-1].datetime = lower_node_list[node_id].datetime
self.node_list[-1].value = value
if low_id!=None:
self.node_list[-1].low_id = low_id
if isformal!=None:
self.node_list[-1].isformal = isformal
def __reverse_direct(self, seek_max):
if seek_max == M_TO_UP:
return M_TO_DOWN
else:
return M_TO_UP
def __get_lowest_current_time(self, freq):
low_cb_set = self.low_CB_set
while(low_cb_set!=None):
if low_cb_set.freq == freq:
return low_cb_set.cur_time_index
else:
low_cb_set = low_cb_set.low_CB_set
return self.cur_time_index
def get_lower_beichi(self):
if self.low_CB_set!=None:
low_beichi_list = self.low_CB_set.beichi_list
low_node_list = self.low_CB_set.node_list
if len(low_beichi_list)<=0 \
or len(low_node_list)<2 \
or len(self.centralbase_list)<=0:
return
if self.freq=='30MIN' :
if abs(low_node_list[-2].value-36)<0.001:
a=1
if (low_beichi_list[-1].time == low_node_list[-2].datetime) and (self.cur_low_beichi_time != low_node_list[-2].datetime):
self.cur_low_beichi_time = low_node_list[-2].datetime
base = self.centralbase_list[-1]
if(base.ctype<=-2):
if low_node_list[-2].value <= self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(low_node_list[-3].datetime, low_node_list[-2].datetime, seekMax=False)
pre_vol = self.__getVolumn_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_vol = self.__getVolumn_Sum(low_node_list[-3].datetime, low_node_list[-2].datetime, seekMax=False)
if (abs(cur_macd) < abs(pre_macd)) or (abs(cur_vol)<abs(pre_vol)):
self.beichi_list.append(BeichiTime(low_node_list[-2].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_buy_point_list.append(BuyPoint(low_node_list[-2].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
elif (base.ctype>=2):
if low_node_list[-2].value >= self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(low_node_list[-3].datetime, low_node_list[-2].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd):
self.beichi_list.append(BeichiTime(low_node_list[-2].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_sell_point_list.append(SellPoint(low_node_list[-2].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
def update_max_min_value(self):
'''
根据正式节点的值更新最大和最小值
'''
if(len(self.centralbase_list)<2):
return
pre_base = self.centralbase_list[-2]
base = self.centralbase_list[-1]
if (self.cur_min_node_id == len(self.node_list)-2) \
or (self.cur_max_node_id == len(self.node_list)-2):
return
if base.ctype==0 or pre_base.ctype*base.ctype<0:
self.cur_max_node_id,self.cur_max_value = self.__getMaxNode_Val(base.start_node_id, base.end_node_id)
self.cur_min_node_id,self.cur_min_value = self.__getMinNode_Val(base.start_node_id, base.end_node_id)
else:
if self.node_list[-2].value <= self.cur_min_value:#创新低
self.cur_min_node_id = len(self.node_list)-2
self.cur_min_value = self.node_list[-2].value
if self.node_list[-2].value >= self.cur_max_value:#创新高
self.cur_max_node_id = len(self.node_list)-2
self.cur_max_value = self.node_list[-2].value
def getBeichi_LastTwo_Step(self):
'''
分步获取背驰节点
返回当前中枢新加入节点是否为背驰点
调用时机:
新的正式节点加入中枢,并未更新此中枢的极值信息
'''
if(len(self.centralbase_list)<2):
return False
if len(self.beichi_list)>0 and self.beichi_list[-1].time == self.node_list[-2].datetime:
return False
pre_base = self.centralbase_list[-2]
base = self.centralbase_list[-1]
cur_macd = 0
pre_macd = 0
cur_macd_lower = 0
pre_macd_lower = 0
if(base.ctype<=-2):
if self.node_list[-2].value < self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id ].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=False)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id ].datetime, seekMax=False)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=False)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
if self.freq=="D":
a=1
self.beichi_list.append(BeichiTime(self.node_list[-2].datetime,base.ctype, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_buy_point_list.append(BuyPoint(self.node_list[-2].datetime,len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
elif (base.ctype>=2):
if self.node_list[-2].value > self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=True)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[-3].datetime, self.node_list[-2].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
self.beichi_list.append(BeichiTime(self.node_list[-2].datetime,base.ctype, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
else:
return self.beichi_processing
return self.beichi_processing
def get_panzheng_beichi_step(self):
'''
盘整背驰
'''
if(len(self.centralbase_list)<=1) or len(self.node_list)<1:
return False
base = self.centralbase_list[-1]
start_node_id = base.start_node_id
end_node_id = base.end_node_id
if len(self.beichi_pc_list)>0 and self.node_list[end_node_id].datetime ==self.beichi_pc_list[-1].time:
return False
if end_node_id-start_node_id >=2:
if self.node_list[end_node_id].value<base.min_val:#创新低
min_node_id = base.min_node_id
pre_macd = self.__getMACD_Sum(self.node_list[min_node_id-1].datetime, self.node_list[min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=False)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[min_node_id-1].datetime, self.node_list[min_node_id].datetime, seekMax=False)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=False)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
self.beichi_pc_list.append(BeichiTime(self.node_list[end_node_id].datetime,-2, end_node_id,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
elif self.node_list[end_node_id].value>base.max_val:#创新高
max_node_id = base.max_node_id
pre_macd = self.__getMACD_Sum(self.node_list[max_node_id-1].datetime, self.node_list[max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=True)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[max_node_id-1].datetime, self.node_list[max_node_id].datetime, seekMax=True)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[end_node_id-1].datetime, self.node_list[end_node_id].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower) < abs(pre_macd_lower) :
if self.freq=='30MIN':
a=1
self.beichi_pc_list.append(BeichiTime(self.node_list[end_node_id].datetime,2, end_node_id,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
return False
def getBeichi_LastOne_Step(self):
'''
分步获取背驰节点
返回当前中枢新加入节点是否为背驰点
'''
if(len(self.centralbase_list)<2):
return False
if self.node_list[-1].isformal != M_FORMAL:
return False
if len(self.beichi_list)>0 and self.beichi_list[-1].time == self.node_list[-1].datetime:
return False
if self.freq=='30MIN' :
a=1
pre_base = self.centralbase_list[-2]
base = self.centralbase_list[-1]
cur_macd = 0
pre_macd = 0
if(base.ctype<=-2):
if self.node_list[-1].value <= self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[-2].datetime, self.node_list[-1].datetime, seekMax=False)
pre_vol = self.__getVolumn_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_vol = self.__getVolumn_Sum(self.node_list[-2].datetime, self.node_list[-1].datetime, seekMax=False)
if (abs(cur_macd) < abs(pre_macd)) or (abs(cur_vol)<abs(pre_vol)):
self.beichi_list.append(BeichiTime(self.node_list[-1].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_buy_point_list.append(BuyPoint(self.node_list[-1].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
elif (base.ctype>=2):
if self.node_list[-1].value >= self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[-2].datetime, self.node_list[-1].datetime, seekMax=True)
if abs(cur_macd) < abs(pre_macd):
self.beichi_list.append(BeichiTime(self.node_list[-1].datetime, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
self.first_sell_point_list.append(SellPoint(self.node_list[-1].datetime, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
return True
else:
return self.beichi_processing
return self.beichi_processing
def getBeichi_Share_With_LowBeichi_Step(self):
if self.low_CB_set==None:
return
if len(self.centralbase_list)<1:
return
if len(self.share_beichi_list)>0 and self.share_beichi_list[-1].time == self.low_CB_set.node_list[-2].datetime:
return
base = self.centralbase_list[-1]
low_node_time, low_node_value = self.__share_same_beichi_with_low_judge()
if low_node_time!=None and low_node_value!=None :
self.share_beichi_list.append(BeichiTime(low_node_time, base.ctype, len(self.node_list)-1,\
real_time=self.__get_lowest_current_time("5MIN")))
def beichi_judge_step(self):
for beichi in self.beichi_list:
if beichi.real_beichi == M_NODECIDE:
if beichi.node_id + 4 == len(self.node_list):
if beichi.btype >0 and self.node_list[beichi.node_id].value>=self.node_list[-2].value: #顶背驰判断
beichi.real_beichi = M_TRUE
elif beichi.btype <0 and self.node_list[beichi.node_id].value<=self.node_list[-2].value: #低背驰判断
beichi.real_beichi = M_TRUE
else:
beichi.real_beichi = M_FALSE
def trade_strategy_step(self, high_cb_set):
for sec_buy_point in self.sec_buy_point_list:
if sec_buy_point.real_buy==M_NODECIDE:
if len(high_cb_set.centralbase_list)>0 and high_cb_set.centralbase_list[-1].ctype>=-2:
sec_buy_point.real_buy = M_TRUE
else:
sec_buy_point.real_buy = M_FALSE
def sell_point_judge(self):
if len(self.node_list)<3:
return
if len(self.first_buy_point_list)>0:
if len(self.node_list)-2 == (self.first_buy_point_list[-1].node_id+1):
self.all_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
if len(self.sec_buy_point_list)>0:
if len(self.node_list)-2 == (self.sec_buy_point_list[-1].node_id+1):
self.all_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
if len(self.third_buy_point_list)>0:
if len(self.node_list)-2 == (self.third_buy_point_list[-1].node_id+1):
self.all_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
def __getMACD_Sum(self, start_time, end_time, seekMax=True):
data_seg = self.data.ix[(self.data.index>=start_time) & (self.data.index<=end_time) & (self.data.index <= self.cur_time_index), 'MACD']
if seekMax:
data_seg = data_seg[data_seg>0]
else:
data_seg = data_seg[data_seg<0]
#return data_seg.sum()
if data_seg.empty:
return 0
else:
return data_seg.mean()
def __getMACD_Sum_Lower(self, start_time, end_time, seekMax=True):
if self.low_CB_set!= None:
data_seg = self.low_CB_set.data.ix[(self.low_CB_set.data.index>=start_time) & (self.low_CB_set.data.index<=end_time) & (self.low_CB_set.data.index <= self.low_CB_set.cur_time_index), 'MACD']
else:
data_seg = self.data.ix[(self.data.index>=start_time) & (self.data.index<=end_time) & (self.data.index <= self.cur_time_index), 'MACD']
if seekMax:
data_seg = data_seg[data_seg>0]
else:
data_seg = data_seg[data_seg<0]
#return data_seg.sum()
if data_seg.empty:
return 0
else:
return data_seg.mean()
def __getVolumn_Sum(self, start_time, end_time, seekMax=True):
data_seg = self.data.ix[(self.data.index>=start_time) & (self.data.index<=end_time) & (self.data.index <= self.cur_time_index), 'volume']
#return data_seg.sum()
if data_seg.empty:
return 0
else:
return data_seg.mean()
def __getVolumn_Sum_Lower(self, start_time, end_time, seekMax=True):
if self.low_CB_set!= None:
data_seg = self.low_CB_set.data.ix[(self.low_CB_set.data.index>=start_time) & (self.low_CB_set.data.index<=end_time) & (self.low_CB_set.data.index <= self.low_CB_set.cur_time_index), 'volume']
else:
data_seg = self.data.ix[(self.data.index>=start_time) & (self.data.index<=end_time) & (self.data.index <= self.cur_time_index), 'volume']
#return data_seg.sum()
if data_seg.empty:
return 0
else:
return data_seg.mean()
def __get_CB_pos(self, first, second):
"""
获取两个中枢的相对位置:1前在后上,-1前在后下,0相交
"""
#if (first.up <=second.down):
#return -1
#elif (first.down >=second.up) :
#return 1
#else:
#return 0
if (first.up <second.up) and (first.down <=second.down):
return -1
elif (first.down >second.down) and (first.up >= second.up) :
return 1
else:
return 0
def __getMaxIndex_Val(self, data, start, end):
data_seg = data.ix[(data.index>=start)&(data.index<=end), 'close']
if data_seg.any():
return (data_seg.idxmax(), data_seg.max())
else:
return (None, None)
def __getMinIndex_Val(self, data, start, end):
data_seg = data.ix[(data.index>=start)&(data.index<=end), 'close']
if data_seg.any():
return (data_seg.idxmin(), data_seg.min())
else:
return (None, None)
def __getMaxNode_Val(self, start_in, end_in):
val = 0.0
val_index = -1
for index in range(start_in, end_in+1):
if self.node_list[index].value > val:
val = self.node_list[index].value
val_index = index
return (val_index, val)
def __getMaxLowerNode_Val(self, start_in, end_in):
val = 0.0
val_index = -1
for index in range(start_in, end_in+1):
if self.low_CB_set.node_list[index].value > val:
val = self.low_CB_set.node_list[index].value
val_index = index
return (val_index, val)
def __getMinNode_Val(self, start_in, end_in):
val = 5000
val_index = -1
for index in range(start_in, end_in+1):
if self.node_list[index].value < val:
val = self.node_list[index].value
val_index = index
return (val_index, val)
def __getMinLowerNode_Val(self, start_in, end_in):
val = 5000
val_index = -1
for index in range(start_in, end_in+1):
if self.low_CB_set.node_list[index].value < val:
val = self.low_CB_set.node_list[index].value
val_index = index
return (val_index, val)
def __getSegment(self, i):
"""
i from 0
"""
if i<0 or i>np.size(self.node_list)-1:
return None
return Interval(lower_bound=self.node_list[i].value, upper_bound=self.node_list[i+1].value,
lower_closed=False, upper_cloesed=False)
def __getSegmentStart(self, i):
"""
i from 0
"""
if i<0 or i>np.size(self.node_list)-1:
return None
return self.node_list[i].datetime
def __getSegmentEnd(self, i):
"""
i from 0
"""
if i<0 or i>np.size(self.node_list)-1:
return None
return self.node_list[i+1].datetime
def __getCBType(self, newcbase, isnew=True, cb_id=None):
if isnew:
if(np.size(self.centralbase_list)<1):
return 0
r_pos = self.__get_CB_pos(self.centralbase_list[-1], newcbase)
pre_ctype = self.centralbase_list[-1].ctype
#if pre_ctype==0:#前一个是起点或背驰形成的第一中枢
# return (-2*r_pos)
else:
if cb_id-1<0:
return 0
r_pos = self.__get_CB_pos(self.centralbase_list[cb_id-1], self.centralbase_list[cb_id])
pre_ctype = self.centralbase_list[cb_id-1].ctype
if self.centralbase_list[cb_id].ctype==0:#前一个是起点或背驰形成的第一中枢
return 0
if(0==r_pos):
return pre_ctype
else:
if((r_pos*pre_ctype) > 0):#转折
if abs(pre_ctype) >=3:
return (-2*r_pos)
else:
return (-2*r_pos)
elif((r_pos*pre_ctype) < 0):#延续
return (pre_ctype-r_pos)
else:
return(-1*r_pos)
def __share_same_beichi_with_low_judge(self):
if self.low_CB_set!=None:
low_beichi_list = self.low_CB_set.beichi_list
low_node_list = self.low_CB_set.node_list
if len(low_beichi_list)<=0 \
or len(low_node_list)<2 \
or len(self.centralbase_list)<=0:
return (None, None)
if self.freq=='30MIN' :
if abs(low_node_list[-2].value-36)<0.001:
a=1
if self.freq in GRANDPA_CB_LEVER:
if low_node_list[-1].isformal == M_TRUE:
low_node_time = low_node_list[-1].datetime
low_node_value = low_node_list[-1].value
else:
return (None, None)
else:
low_node_time = low_node_list[-2].datetime
low_node_value = low_node_list[-2].value
if (low_beichi_list[-1].time == low_node_time):
if self.freq=='D' :
a=1
base = self.centralbase_list[-1]
if(base.ctype<=-2):
if low_node_value < self.cur_min_value:#创新低
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_macd = self.__getMACD_Sum(self.node_list[-2].datetime, low_node_time, seekMax=False)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id ].datetime, seekMax=False)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[-2].datetime, low_node_time, seekMax=False)
pre_vol = self.__getVolumn_Sum(self.node_list[self.cur_min_node_id-1].datetime, self.node_list[self.cur_min_node_id].datetime, seekMax=False)
cur_vol = self.__getVolumn_Sum(self.node_list[-2].datetime, low_node_time, seekMax=False)
if (abs(cur_macd) < abs(pre_macd)) or (abs(cur_macd_lower)<abs(pre_macd_lower)):
return (low_node_time, low_node_value)
elif (base.ctype>=2):
if low_node_value > self.cur_max_value:#创新高
pre_macd = self.__getMACD_Sum(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd = self.__getMACD_Sum(self.node_list[-2].datetime, low_node_time, seekMax=True)
pre_macd_lower = self.__getMACD_Sum_Lower(self.node_list[self.cur_max_node_id-1].datetime, self.node_list[self.cur_max_node_id].datetime, seekMax=True)
cur_macd_lower = self.__getMACD_Sum_Lower(self.node_list[-2].datetime, low_node_time, seekMax=True)
if abs(cur_macd) < abs(pre_macd) or abs(cur_macd_lower)<abs(pre_macd_lower):
return (low_node_time, low_node_value)
else:
return (None, None)
return (None, None)
def SnakeTrade_step(self):
'''
贪吃蛇策略
'''
if len(self.node_list)<=2 or len(self.beichi_list)<=0:
return
if len(self.trade_strategy_list)>0 \
and self.trade_strategy_list[-1].trade_point_list[-1].node_id ==len(self.node_list)-2:
return
if (self.node_list[-2].datetime == self.beichi_list[-1].time) and self.beichi_list[-1].btype<0: #底背驰
if len(self.trade_strategy_list)>0:
self.trade_strategy_list[-1].trade_on_going = False
self.trade_strategy_list.append(TradeStrategy())
self.trade_strategy_list[-1].trade_point_list.append(\
TradePoint(time =self.beichi_list[-1].time,
node_id = self.beichi_list[-1].node_id,
real_time = self.beichi_list[-1].real_time,
trade_direct = M_BUY))
self.trade_strategy_list[-1].trade_on_going = True
elif(self.node_list[-2].ntype == M_TOP and len(self.trade_strategy_list)>0 and self.trade_strategy_list[-1].trade_on_going):
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-2].datetime, \
node_id = len(self.node_list)-2, \
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_SELL))
elif(self.node_list[-2].ntype == M_BOTTOM and len(self.trade_strategy_list)>0 and self.trade_strategy_list[-1].trade_on_going):
if len(self.trade_strategy_list[-1].trade_point_list)==2:
if self.node_list[-2].value > self.node_list[self.trade_strategy_list[-1].trade_point_list[-2].node_id].value:
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-2].datetime, \
node_id = len(self.node_list)-2, \
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_BUY))
else:
self.trade_strategy_list[-1].trade_on_going = False
elif (self.centralbase_list[-1].end_node_id - self.centralbase_list[-1].start_node_id)==1 and self.centralbase_list[-1].ctype>=2:
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-2].datetime, \
node_id = len(self.node_list)-2, \
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_BUY))
else:
self.trade_strategy_list[-1].trade_on_going = False
def SnakeTrade_With_ShareBeichi_step(self):
'''
贪吃蛇策略,使用共享背驰点
'''
if self.low_CB_set==None:
return
if len(self.node_list)<=2 or len(self.share_beichi_list)<=0:
return
if (self.low_CB_set.beichi_list[-1].time == self.share_beichi_list[-1].time) \
and self.low_CB_set.beichi_list[-1].time == self.low_CB_set.share_beichi_list[-1].time \
and self.share_beichi_list[-1].btype<0: #底背驰
if len(self.trade_strategy_list)<=0 or (not self.trade_strategy_list[-1].trade_on_going):
self.trade_strategy_list.append(TradeStrategy())
self.trade_strategy_list[-1].trade_point_list.append(\
TradePoint(time =self.share_beichi_list[-1].time,
node_id = self.share_beichi_list[-1].node_id,
low_cb_id=self.node_list[-1].low_id,
real_time = self.__get_lowest_current_time("5MIN"),
trade_direct = M_BUY))
self.trade_strategy_list[-1].trade_on_going = True
if len(self.beichi_list)>0 and self.beichi_list[-1].time == self.share_beichi_list[-1].time\
and self.share_beichi_list[-1].btype<0: #底背驰
if len(self.trade_strategy_list)<=0 or (not self.trade_strategy_list[-1].trade_on_going):
self.trade_strategy_list.append(TradeStrategy())
self.trade_strategy_list[-1].trade_point_list.append(\
TradePoint(time =self.share_beichi_list[-1].time,
node_id = self.share_beichi_list[-1].node_id,
low_cb_id=self.node_list[-1].low_id,
real_time = self.__get_lowest_current_time("5MIN"),
trade_direct = M_BUY))
self.trade_strategy_list[-1].trade_on_going = True
#中枢不成立
if len(self.trade_strategy_list)>0 and self.trade_strategy_list[-1].trade_on_going:
buy_node_id = self.trade_strategy_list[-1].trade_point_list[-1].node_id
if len(self.node_list)-1 == buy_node_id+2:
pre_base = self.low_CB_set.centralbase_list[-2]
base = self.low_CB_set.centralbase_list[-1]
if (base.ctype<pre_base.ctype) and self.low_CB_set.node_list[base.start_node_id].value<self.node_list[buy_node_id].value:
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-1].datetime, \
node_id = len(self.node_list)-1, \
low_cb_id= self.node_list[-1].low_id,\
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_SELL))
self.trade_strategy_list[-1].trade_on_going = False
#次级别共顶背驰
if len(self.trade_strategy_list)>0 and self.trade_strategy_list[-1].trade_on_going:
if (self.low_CB_set.beichi_list[-1].time == self.low_CB_set.share_beichi_list[-1].time) \
and self.low_CB_set.share_beichi_list[-1].btype>0:
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-1].datetime, \
node_id = len(self.node_list)-1, \
low_cb_id= self.node_list[-1].low_id,\
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_SELL))
self.trade_strategy_list[-1].trade_on_going = False
#本级别顶背驰
if len(self.trade_strategy_list)>0 and self.trade_strategy_list[-1].trade_on_going:
if (self.share_beichi_list[-1].btype>0):
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-1].datetime, \
node_id = len(self.node_list)-1, \
low_cb_id= self.node_list[-1].low_id,\
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_SELL))
self.trade_strategy_list[-1].trade_on_going = False
#本级别趋势改变
if len(self.trade_strategy_list)>0 and self.trade_strategy_list[-1].trade_on_going:
buy_node_id = self.trade_strategy_list[-1].trade_point_list[-1].node_id
if (self.centralbase_list[-1].start_node_id > buy_node_id) and self.centralbase_list[-1].ctype<0:
self.trade_strategy_list[-1].trade_point_list.append(TradePoint(time =self.node_list[-1].datetime, \
node_id = len(self.node_list)-1, \
low_cb_id= self.node_list[-1].low_id,\
real_time=self.__get_lowest_current_time("5MIN"), \
trade_direct = M_SELL))
self.trade_strategy_list[-1].trade_on_going = False
|
StarcoderdataPython
|
57906
|
<gh_stars>1-10
import numpy as np
import pandas as pd
import os
import sys
"""
Storey Q-Values - https://github.com/StoreyLab/qvalue
--------------------
Python Wrapper
Author: <NAME>
https://github.com/broadinstitute/tensorqtl/blob/master/tensorqtl/rfunc.py
"""
def qvalue(p, lambda_qvalue=None):
"""Wrapper for qvalue::qvalue"""
import rpy2
from rpy2.robjects.packages import importr
from collections import Iterable
qvalue = importr("qvalue")
rp = rpy2.robjects.vectors.FloatVector(p)
if lambda_qvalue is None:
q = qvalue.qvalue(rp)
else:
if not isinstance(lambda_qvalue, Iterable):
lambda_qvalue = [lambda_qvalue]
rlambda = rpy2.robjects.vectors.FloatVector(lambda_qvalue)
q = qvalue.qvalue(rp, **{'lambda':rlambda})
qval = np.array(q.rx2('qvalues'))
pi0 = np.array(q.rx2('pi0'))[0]
return qval, pi0
def t_test(mat: pd.DataFrame, group_s: pd.Series, equal_var: bool = False) -> pd.DataFrame:
"""
t-test
---------------------
Args:
* mat: pd.DataFrame (genes x samples)
* group_s: series of groupings
* equal_var: wald-ttest (False)
"""
from scipy import stats
from statsmodels.stats.multitest import multipletests
mat = mat[group_s.index]
def _collapser(x, index, columns, name):
_df = pd.DataFrame(x, index=index, columns=columns).reset_index()
_id = _df.columns[0]
return pd.melt(
pd.DataFrame(x, index=index, columns=columns).reset_index(),
id_vars=_id,
).set_index(_id).rename(columns={'variable':group_s.name,'value':name})
groups = np.array(group_s)
X = mat.values
n_groups = np.unique(groups).shape[0]
n_genes = X.shape[0]
# Init np.arrays
t_stat = np.zeros((n_genes, n_groups))
pval = np.zeros((n_genes, n_groups))
pval_adj = np.zeros((n_genes, n_groups))
qval = np.zeros((n_genes, n_groups))
x_in = np.zeros((n_genes, n_groups))
x_out = np.zeros((n_genes, n_groups))
for idx,group in enumerate(np.unique(groups)):
mask = groups==group
if sum(mask) > 1:
X_in = X[:,mask]
X_out = X[:,~mask]
t_stat[:,idx], pval[:,idx] = stats.ttest_ind(X_in, X_out, axis=1, equal_var=equal_var)
_,pval_adj[:,idx],_,_ = multipletests(
pval[:,idx],
alpha=0.05,
method='fdr_bh',
is_sorted=False,
returnsorted=False
)
qval[:,idx],_ = qvalue(pval[:,idx])
x_in[:,idx] = np.mean(X_in,1)
x_out[:,idx] = np.mean(X_out,1)
# Collapse to dataframe
de_df = pd.concat([
_collapser(x_in, mat.index, np.unique(groups), 'x_in'),
_collapser(x_out, mat.index, np.unique(groups), 'x_out')['x_out'],
_collapser(t_stat, mat.index, np.unique(groups), 't')['t'],
_collapser(pval, mat.index, np.unique(groups), 'pval')['pval'],
_collapser(pval_adj, mat.index, np.unique(groups), 'pval_adj')['pval_adj'],
_collapser(qval, mat.index, np.unique(groups), 'qval')['qval']
],1)
# Fold-change
de_df['diff'] = de_df['x_in'] - de_df['x_out']
# Signed FC * -log10(qval)
de_df['gsea_rank'] = de_df['diff'] * -np.log10(de_df['pval_adj'])
return de_df
def mannwhitneyu(mat: pd.DataFrame, group_s: pd.Series) -> pd.DataFrame:
"""
mannwhitneyu
---------------------
Args:
* mat: pd.DataFrame (genes x samples)
* group_s: series of groupings
"""
from tqdm import tqdm
from scipy import stats
from statsmodels.stats.multitest import multipletests
from sys import stdout
mat = mat[group_s.index]
def _collapser(x, index, columns, name):
_df = pd.DataFrame(x, index=index, columns=columns).reset_index()
_id = _df.columns[0]
return pd.melt(
pd.DataFrame(x, index=index, columns=columns).reset_index(),
id_vars=_id,
).set_index(_id).rename(columns={'variable':group_s.name,'value':name})
groups = np.array(group_s)
X = mat.values
n_groups = np.unique(groups).shape[0]
n_genes = X.shape[0]
# Init np.arrays
u_stat = np.zeros((n_genes, n_groups))
pval = np.zeros((n_genes, n_groups))
pval_adj = np.zeros((n_genes, n_groups))
qval = np.zeros((n_genes, n_groups))
x_in = np.zeros((n_genes, n_groups))
x_out = np.zeros((n_genes, n_groups))
for idx,group in enumerate(np.unique(groups)):
stdout.write("\r{} of {}".format(idx+1, n_groups))
mask = groups==group
if sum(mask) > 1:
X_in = X[:,mask]
X_out = X[:,~mask]
for gn in range(X_in.shape[0]):
#u_stat[gn,idx], pval[gn,idx] = stats.mannwhitneyu(X_in[gn], X_out[gn])
u_stat[gn,idx], pval[gn,idx] = stats.mannwhitneyu(X_in[gn], X_out[gn], alternative='two-sided')
_,pval_adj[:,idx],_,_ = multipletests(
pval[:,idx],
alpha=0.05,
method='fdr_bh',
is_sorted=False,
returnsorted=False
)
try:
qval[:,idx],_ = qvalue(fgsea_df['pval'].values)
except:
try:
qval[:,idx],_ = qvalue(fgsea_df['pval'].values, lambda_qvalue=0.5)
except:
qval[:,idx] = None
x_in[:,idx] = np.mean(X_in,1)
x_out[:,idx] = np.mean(X_out,1)
# Collapse to dataframe
de_df = pd.concat([
_collapser(x_in, mat.index, np.unique(groups), 'x_in'),
_collapser(x_out, mat.index, np.unique(groups), 'x_out')['x_out'],
_collapser(u_stat, mat.index, np.unique(groups), 'u')['u'],
_collapser(pval, mat.index, np.unique(groups), 'pval')['pval'],
_collapser(pval_adj, mat.index, np.unique(groups), 'pval_adj')['pval_adj'],
_collapser(qval, mat.index, np.unique(groups), 'qval')['qval']
],1)
# Fold-change
de_df['diff'] = de_df['x_in'] - de_df['x_out']
# Signed FC * -log10(qval)
de_df['gsea_rank'] = de_df['diff'] * -np.log10(de_df['pval_adj'])
return de_df
|
StarcoderdataPython
|
1888135
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import unittest
from kubernetes import config
from liminal.kubernetes import volume_util
try:
config.load_kube_config()
except Exception:
msg = "Kubernetes is not running\n"
sys.stdout.write(f"INFO: {msg}")
class TestKubernetesVolume(unittest.TestCase):
def setUp(self) -> None:
self.config = {
'volumes': [
{
'volume': 'gettingstartedvol-test',
'claim_name': 'gettingstartedvol-test-pvc',
'local': {
'path': '.'
}
}
]
}
def test_volume_config(self):
volumes_config = volume_util.get_volume_configs(self.config, ".")
self.assertEqual(
str(self.config['volumes'][0]),
str(volumes_config[0]))
def test_create_volume(self):
self._delete_volumes()
self._create_volumes()
matching_volumes = volume_util._list_persistent_volumes(self.config['volumes'][0]['volume'])
self.assertTrue(self.config['volumes'][0]['volume'] in matching_volumes[0]['metadata']['name'],
self.config['volumes'][0]['claim_name'] in matching_volumes[0]['spec']['claim_ref']['name'])
def test_delete_volume(self):
self._create_volumes()
self._delete_volumes()
matching_volumes = volume_util._list_persistent_volumes(self.config['volumes'][0]['volume'])
self.assertEqual([],matching_volumes)
def _create_volumes(self):
volume_util.create_local_volumes(self.config, ".")
def _delete_volumes(self):
volume_util.delete_local_volumes(self.config, ".")
|
StarcoderdataPython
|
6421108
|
from x64dbg import *
def main():
start, end = Gui.SelectionGet(Gui.Window.DisassemblyWindow)
print("Disassembly Window: 0x%X - 0x%X" % (start, end))
start, end = Gui.Disassembly.SelectionGet()
print("Disassembly Window: 0x%X - 0x%X" % (start, end))
start, end = Gui.Dump.SelectionGet()
print("Dump Window: 0x%X - 0x%X" % (start, end))
start, end = Gui.Stack.SelectionGet()
print("Stack Window: 0x%X - 0x%X" % (start, end))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3292300
|
<reponame>CrazyDi/Python1
import asyncio
async def handle_echo(reader, writer):
data = await reader.read(1024)
message = data.decode()
addr = writer.get_extra_info("peername")
print("received %r from %r" % (message, addr))
# writer.close()
if __name__ == "__main__":
loop = asyncio.new_event_loop()
coro = asyncio.start_server(handle_echo, "127.0.0.1", 10001, loop=loop)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
StarcoderdataPython
|
3344107
|
#!/bin/python3.4
#Django
#author: <NAME>
#
from django.contrib import admin
from .models import Post
admin.site.register(Post)
|
StarcoderdataPython
|
92800
|
from django import forms
from .utils import get_coins_list
class ChooseCoinToPayForm(forms.Form):
currency = forms.ChoiceField(choices=get_coins_list(),
widget=forms.RadioSelect(),
label='',
required=True)
|
StarcoderdataPython
|
97577
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import queue
import threading
import subprocess
import datetime
import time
import codecs
# weather 天気予報
import speech_api_weather as weather_api
import speech_api_weather_key as weather_key
qPathTTS = 'temp/a3_5tts_txt/'
qPathWork = 'temp/a3_9work/'
qBusyCtrl = qPathWork + 'busy_speechctl.txt'
qBusyInput = qPathWork + 'busy_voice2wav.txt'
qBusySTT = qPathWork + 'busy_sttcore.txt'
qBusyTTS = qPathWork + 'busy_ttscore.txt'
qBusyPlay = qPathWork + 'busy_playvoice.txt'
def qBusyCheck(file, sec):
chktime = time.time()
while (os.path.exists(file)) and ((time.time() - chktime) < sec):
time.sleep(0.10)
if (os.path.exists(file)):
return 'busy'
else:
return 'none'
def speech_wait(idolsec=2, maxwait=15, ):
global qBusyCtrl
global qBusyInput
global qBusySTT
global qBusyTTS
global qBusyPlay
busy_flag = True
chktime1 = time.time()
while (busy_flag == True) and ((time.time() - chktime1) < maxwait):
busy_flag = False
chktime2 = time.time()
while ((time.time() - chktime2) < idolsec):
if (qBusyCheck(qBusySTT , 0) == 'busy') \
or (qBusyCheck(qBusyTTS , 0) == 'busy') \
or (qBusyCheck(qBusyPlay, 0) == 'busy'):
busy_flag = True
time.sleep(0.10)
break
def tts_speech(runMode, id, speechText, idolsec=2, maxwait=15, ):
global qPathTTS
speech_wait(idolsec,maxwait)
print(speechText)
if (speechText != ''):
now=datetime.datetime.now()
stamp=now.strftime('%Y%m%d-%H%M%S')
wrkFile = qPathTTS + stamp + '.' + id + '.txt'
try:
w = codecs.open(wrkFile, 'w', 'utf-8')
w.write(speechText)
w.close()
w = None
except:
w = None
def speech_run(runMode, speechs, lang='ja,hoya,', idolsec=2, maxwait=15, ):
speech_wait(idolsec,maxwait)
seq = 0
for speech in speechs:
txt = lang + str(speech['text'])
seq+= 1
id = 'weather.' + '{:02}'.format(seq)
tts_speech(runMode, id, txt, 0, 0, )
time.sleep(speech['wait'])
qLogNow=datetime.datetime.now()
qLogFlie = 'temp/_log/' + qLogNow.strftime('%Y%m%d-%H%M%S') + '_' + os.path.basename(__file__) + '.log'
def qLogOutput(pLogText='', pDisplay=True, pOutfile=True):
#try:
if (pDisplay == True):
print(str(pLogText))
if (pOutfile == True):
w = codecs.open(qLogFlie, 'a', 'utf-8')
w.write(str(pLogText) + '\n')
w.close()
w = None
#except:
#pass
if (__name__ == '__main__'):
qLogOutput('')
qLogOutput('weather___:init')
qLogOutput('weather___:exsample.py runMode, inpText, ')
runMode = 'debug'
inpText = u'三木市'
if (len(sys.argv) >= 2):
runMode = sys.argv[1]
if (len(sys.argv) >= 3):
inpText = sys.argv[2]
qLogOutput('weather___:runMode =' + str(runMode ))
qLogOutput('weather___:inpText =' + str(inpText ))
qLogOutput('weather___:start')
if (True):
tenkiAPI = weather_api.WeatherAPI()
city = inpText
lang = 'ja,hoya,'
api = 'openweathermap'
key = weather_key.getkey(api)
weather, temp_max, temp_min, humidity = \
tenkiAPI.getWeather(api, key, city, )
if (weather != ''):
speechs = []
speechs.append({'text':city + u'、今日の天気は、「' + weather + u'」です。', 'wait':0, })
if (temp_max != ''):
speechs.append({'text':u'最高気温は、' + temp_max + u'℃。', 'wait':0, })
if (temp_min != ''):
speechs.append({'text':u'最低気温は、' + temp_min + u'℃。', 'wait':0, })
if (humidity != ''):
speechs.append({'text':u'湿度は、' + humidity + u'%です。', 'wait':0, })
speech_run(runMode, speechs, lang, )
speech_run(runMode, speechs, '' , )
else:
txt = u'ごめんなさい。外部のAIに聞いてみます。'
tts_speech(runMode, 'weather.00', txt, )
time.sleep(5.00)
speechtext = 'ja,hoya,' + city + u'の天気?'
smart = 'auto'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
qLogOutput('weather___:terminate')
qLogOutput('weather___:bye!')
|
StarcoderdataPython
|
158533
|
<filename>exceltogdx/exceltogdx.py
from gdxpds import load_gdxcc
from gdxpds.write_gdx import Translator
from openpyxl import load_workbook
from io import BytesIO
import pandas as pd
import numpy as np
import logging
import os
import re
logging.getLogger('gdxpds').setLevel(logging.ERROR)
def xlsdynamicecke(typ, cell, rdim, cdim, sheetname, wb, verbose=False):
'''
Returns a list of row and col of bottom-left corner of a table in pandas indexing format (from zero to inf).
It stops when there is an empty cell in index (rows) or headings (columns).
typ: string 'set' or 'par'
cell: string in excel format of top-right table corner cell.
rdim: indicates the number of columns from the beginning are sets
cdim: indicates the number of rows from the top are sets
sheetname: self-explanatory
wb: is the workbook of an excel file instance of 'from openpyxl import load_workbook'
eg. xlsdynamicecke('set', C5', 1, 0, 'sheet1', workbook.object)
return set or table coord.
'''
cell = cell.upper()
sheet = wb[sheetname]
string = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def col2num(letters):
'''
column letter to column number
'''
num = 0
for c in letters:
if c in string:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
return num
def colnum_string(n):
strings = ""
while n > 0:
n, remainder = divmod(n - 1, 26)
strings = chr(65 + remainder) + strings
return strings
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
def atoi(text):
return int(text) if text.isdigit() else text
return [atoi(c) for c in re.split(r'(\d+)', text)]
cut = 0
for s in cell:
if s in string:
cut += 1
else:
break
rowstr = cell[cut:]
colstr = cell[:cut]
row = int(rowstr)
col = col2num(colstr.upper())
if typ == 'par':
if cdim == 0:
j = 0
for i, r in enumerate(sheet.iter_rows(min_row=row, min_col=col, max_col=col, values_only=True)):
j = i
if r[0] is None:
j = i - 1
break
max_col = rdim + 1
max_row = row + j
rng = colnum_string(col) + str(row - 1) + ':' + colnum_string(max_col) + str(max_row)
data = sheet[rng]
output = [[cells.value for cells in row] for row in data]
if verbose:
print(rng)
print(output[:3])
else:
j = 0
for i, c in enumerate(sheet.iter_cols(min_row=row, max_row=row, min_col=col+rdim, values_only=True)):
j = i
if c[0] is None:
j = i - 1
break
max_col = col + j + rdim
for i, r in enumerate(sheet.iter_rows(min_row=row+cdim+1, min_col=col, max_col=col, values_only=True)):
j = i
if r[0] is None:
j = i - 1
break
max_row = row + j + cdim + 1
rng = cell + ':' + colnum_string(max_col) + str(max_row)
data = sheet[rng]
output = [[cells.value for cells in row] for row in data]
if verbose:
print(rng)
print(output[:3])
elif typ == 'set':
setls = []
if rdim == 1:
for i, r in enumerate(sheet.iter_rows(min_row=row, min_col=col, max_col=col, values_only=True)):
if r[0] is not None:
setls.append(r[0])
else:
break
if all([isinstance(s, (int, float)) for s in list(set(setls))]):
output = sorted(list(set(setls)))
else:
output = sorted(list(set(setls)), key=natural_keys)
elif cdim == 1:
for i, c in enumerate(sheet.iter_cols(min_row=row, max_row=row, min_col=col, values_only=True)):
if c[0] is not None:
setls.append(c[0])
else:
break
if all([isinstance(s, (int, float)) for s in list(set(setls))]):
output = sorted(list(set(setls)))
else:
output = sorted(list(set(setls)), key=natural_keys)
else:
raise ValueError('Set must have either rdim or cdim as 1, check dim in py sheet')
del sheet
return output
def exceltogdx(excel_file, gdx_file, csv_file=None, csv_copy=None, verbose=False, gams_dir=None):
'''
excel_file: input file path
gdx_file: output file path
csv_file: if None, it looks at excel file to find sheet with name 'py'
that contains the instructions to get sets and parameters.
Otherwise, csv file path.
csv_copy: indicate folder where csv files are saved. None (Default): no csv files are created.
'''
load_gdxcc(gams_dir)
if csv_file is None:
mapping = pd.read_excel(excel_file, sheet_name='py', index_col='symbol', engine='openpyxl')
else:
mapping = pd.read_csv(csv_file, index_col='symbol')
print(f"Loading excel file: {excel_file}")
with open(excel_file, 'rb') as f:
datas = BytesIO(f.read())
wb = load_workbook(datas, data_only=True)
dc = {}
df = pd.DataFrame()
for k, v in mapping.iterrows():
if verbose:
print(v['type'],': ', k)
xlsvalues = xlsdynamicecke(v['type'], v['startcell'], v['rdim'], v['cdim'], v['sheet_name'], wb, verbose=verbose)
if v['type'] == 'par':
df = pd.DataFrame(xlsvalues)
if v['cdim'] == 0:
df = df.T.set_index(0, append=False).T
try:
df = df.set_index(df.columns[list(range(v['rdim']))].to_list())
except KeyError:
raise KeyError("each rdim in parameter '{}' must have a heading (Don't leave it empty), not required for cdim".format(k))
df.index.names = list(range(1,df.index.nlevels+1))
elif v['cdim'] == 1:
df = df.T.set_index(0, append=False).T
try:
df = df.set_index(df.columns[list(range(v['rdim']))].to_list())
except KeyError:
raise KeyError("each rdim in parameter '{}' must have a heading (Don't leave it empty), not required for cdim".format(k))
df = df.stack([0]*df.columns.nlevels)
df.index.names = list(range(1,df.index.nlevels+1))
df = pd.DataFrame(df)
elif v['cdim'] > 1:
df = df.T.set_index(list(range(v['cdim'])), append=False).T
try:
df = df.set_index(df.columns[list(range(v['rdim']))].to_list())
except KeyError:
raise KeyError("each rdim in parameter '{}' must have a heading (Don't leave it empty), not required for cdim".format(k))
df = df.stack([0]*df.columns.nlevels)
df.index.names = list(range(1,df.index.nlevels+1))
df = pd.DataFrame(df)
else:
raise Exception('is "{}" a parameter?, verify cdim on "py" sheet. cdim must be positive integer'.format(k))
df = df.reset_index().rename(columns={df.columns.to_list()[-1]: 'value'}).astype(object)
df.loc[df[df['value'].astype('str').str.lower() == 'inf'].index, 'value'] = np.inf
df.loc[df[df['value'].astype('str').str.lower() == '+inf'].index, 'value'] = np.inf
df.loc[df[df['value'].astype('str').str.lower() == '-inf'].index, 'value'] = -np.inf
df.loc[df[df['value'].astype('str').str.lower() == 'eps'].index, 'value'] = np.finfo(float).eps # np.nextafter(0,1)
df.loc[:,[c for c in df.columns if (c != 'value' and df[c].dtypes == float)]] = df[[c for c in df.columns if (c != 'value' and df[c].dtypes == float)]].astype(int)
df.loc[:,[c for c in df.columns if c != 'value']] = df[[c for c in df.columns if c != 'value']].astype(str)
dc[k] = df.rename(columns={c: '*' for c in df.columns if c != 'value'})
elif v['type'] == 'set':
df = pd.DataFrame({'*': xlsvalues})
df.loc[:, 'value'] = 'True'
df.dropna(inplace=True)
dc[k] = df
if csv_copy is not None:
os.makedirs(csv_copy, exist_ok=True)
name = v['type'] + '_' + k + '.csv'
df.to_csv(os.path.join(csv_copy, name), index=False)
os.makedirs(os.path.abspath(os.path.join(gdx_file, os.pardir)), exist_ok=True)
print(f'Generating gdx file: {gdx_file}')
translator = Translator(dc)
translator.gams_dir = gams_dir
translator.save_gdx(gdx_file)
translator.gdx
print('GDX Done!')
return dc
|
StarcoderdataPython
|
9799148
|
<filename>utils/data/samplers/range_sampler.py
import torch
from torch.utils.data.sampler import Sampler
class RangeSampler(Sampler):
def __init__(self, start_ind, end_ind):
self.start_ind = start_ind
self.end_ind = end_ind
def __iter__(self):
indices = torch.arange(self.start_ind, self.end_ind).tolist()
return iter(indices)
def __len__(self):
return self.end_ind - self.start_ind
|
StarcoderdataPython
|
6684245
|
# -*- coding: utf-8 -*-
"""
タマリンコネクタにおけるデータモデルのシリアライザ.
@author: <EMAIL>
"""
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
"""[Userモデルのシリアライザ]"""
class Meta:
model = models.User
fields = ["id", "username", "date_updated", "scene_tag", "scene_color", "context_tag", "download_rule"]
class MediaSerializer(serializers.ModelSerializer):
"""[Mediaモデルのシリアライザ]"""
class Meta:
model = models.Media
fields = ["id", "owner", "date_taken", "content_type", "author_name", "scene_tag", "context_tag", "encryption_key", "encrypted_data"]
class HistorySerializer(serializers.ModelSerializer):
"""[Historyモデルのシリアライザ]"""
class Meta:
model = models.History
fields = ["id", "date_occurred", "type", "user", "media"]
class FeedbackSerializer(serializers.ModelSerializer):
"""[Feedbackモデルのシリアライザ]"""
class Meta:
model = models.Feedback
fields = ["id", "date_occurred", "author_name", "comment"]
|
StarcoderdataPython
|
70347
|
"""
The ``cpp_pimpl`` test project.
"""
from testing.hierarchies import clike, directory, file, namespace
def default_class_hierarchy_dict():
"""Return the default class hierarchy dictionary."""
return {
namespace("pimpl"): {
clike("class", "Planet"): {},
clike("class", "Earth"): {},
clike("class", "EarthImpl"): {},
clike("class", "Earth_v2"): {},
clike("class", "Jupiter"): {},
clike("class", "JupiterImpl"): {},
clike("class", "Jupiter_v2"): {},
namespace("detail"): {
clike("class", "EarthImpl"): {},
clike("class", "JupiterImpl"): {}
}
}
}
def default_file_hierarchy_dict():
"""Return the default file hierarchy dictionary."""
return {
directory("include"): {
directory("pimpl"): {
file("planet.hpp"): {
namespace("pimpl"): {
clike("class", "Planet"): {}
}
},
file("earth.hpp"): {
namespace("pimpl"): {
clike("class", "Earth"): {},
clike("class", "EarthImpl"): {},
clike("class", "Earth_v2"): {},
namespace("detail"): {
clike("class", "EarthImpl"): {}
}
}
},
file("jupiter.hpp"): {
namespace("pimpl"): {
clike("class", "Jupiter"): {},
clike("class", "JupiterImpl"): {},
clike("class", "Jupiter_v2"): {},
namespace("detail"): {
clike("class", "JupiterImpl"): {}
}
}
}
}
}
}
|
StarcoderdataPython
|
3531623
|
from graph import *
filename = input()
G = Graph(filename)
dist_arrays = []
n_max = 0
for i in range (G.n_vertices):
dist_arrays.append(G.dijkstra(i))
for j in range(len(dist_arrays[i])):
if n_max < len(str(dist_arrays[i][j])):
n_max = len(str(dist_arrays[i][j]))
for i in range (len(dist_arrays)):
string = ""
for j in range (len(dist_arrays[i])):
string = str(dist_arrays[i][j]).rjust(n_max)
print(string, end=" ")
print()
|
StarcoderdataPython
|
296225
|
<gh_stars>1-10
'''
Includes:
* Function to compute the IoU, and ARIou180, similarity for rectangular, 2D bounding boxes
* Function for coordinate conversion for rectangular, 2D bounding boxes
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Modifications author : <NAME>
'''
from __future__ import division
import numpy as np
# removed conversions other than minmax2centroids and centroids2minmax
# removed border pixels : always half
def convert_coordinates_axis_aligned(tensor, start_index, conversion):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
2 supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax'.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind + 1]) / 2.0 # Set cx
tensor1[..., ind + 1] = (tensor[..., ind + 2] + tensor[..., ind + 3]) / 2.0 # Set cy
tensor1[..., ind + 2] = tensor[..., ind + 1] - tensor[..., ind] # Set w
tensor1[..., ind + 3] = tensor[..., ind + 3] - tensor[..., ind + 2] # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind + 2] / 2.0 # Set xmin
tensor1[..., ind + 1] = tensor[..., ind] + tensor[..., ind + 2] / 2.0 # Set xmax
tensor1[..., ind + 2] = tensor[..., ind + 1] - tensor[..., ind + 3] / 2.0 # Set ymin
tensor1[..., ind + 3] = tensor[..., ind + 1] + tensor[..., ind + 3] / 2.0 # Set ymax
else:
raise ValueError(
"Unexpected conversion value. Supported values are 'minmax2centroids', 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', and 'corners2minmax'.")
return tensor1
def get_corners(labels):
'''
Get corners coordinates for 2D rotated boxes
Arguments:
labels (array): A Numpy nD array containing the five consecutive coordinate : (cx, cy, w, h, angle).
Returns:
A 4-tuple containing the values for the 4 corners.
Each corner is a n * 2 values tuple containing the x and y coordinates, n being the number of boxes in labels
'''
cx, cy, w, h, angle = 1, 2, 3, 4, 5
# get center of boxes
centers = np.array([labels[:, cx], labels[:, cy]])
# get vertices of boxes
dxcos = labels[:, w] * np.cos(labels[:, angle]) / 2
dxsin = labels[:, w] * np.sin(labels[:, angle]) / 2
dycos = labels[:, h] * np.cos(labels[:, angle]) / 2
dysin = labels[:, h] * np.sin(labels[:, angle]) / 2
toplefts = centers + np.array([-dxcos - dysin, -dxsin + dycos])
toprights = centers + np.array([-dxcos - -dysin, -dxsin + -dycos])
bottomlefts = centers + np.array([dxcos - dysin, dxsin + dycos])
bottomrights = centers + np.array([dxcos - -dysin, dxsin + -dycos])
return toplefts, toprights, bottomlefts, bottomrights
def get_centroids_coords(toplefts, toprights, bottomlefts, bottomrights):
'''
Get centroids coordinates for 2D boxes
Arguments:
toplefts (tuple) : n * 2 value tuple containing the x and y coordinates of top lefts corners
toprights (tuple): n * 2 value tuple containing the x and y coordinates of top rights corners
bottomlefts (tuple): n * 2 value tuple containing the x and y coordinates of bottom lefts corners
bottomrights (tuple): n * 2 value tuple containing the x and y coordinates of bottom rights corners
Returns:
A Numpy nD array containing the coordinates of the boxes in the centroids format
'''
cx = np.mean([toplefts[0], toprights[0], bottomlefts[0], bottomrights[0]], axis=0)
cy = np.mean([toplefts[1], toprights[1], bottomlefts[1], bottomrights[1]], axis=0)
w = np.sqrt((toplefts[0] - bottomlefts[0]) ** 2 + (toplefts[1] - bottomlefts[1]) ** 2)
h = np.sqrt((toplefts[0] - toprights[0]) ** 2 + (toplefts[1] - toprights[1]) ** 2)
angle = np.arctan((toplefts[1] - bottomlefts[1]) / (toplefts[0] - bottomlefts[0]))
angle = np.mod(angle, np.pi)
return cx, cy, w, h, angle
def rotate_box(boxes, rect_base):
'''
Return rotated rectangles by the angle of rect_base. Only the coordinates cx, cy are rotated.
We do all the transpose operations in order to save time during batch generation
Arguments:
boxes (array): A Numpy nD array containing the boxes to be rotated
rect_base (array): the box which contains the angle to rotate the boxes in the array boxes
Returns:
A Numpy nD array with the same size as the argument boxes, the rotated boxes.
'''
# get angle of rect_base
theta = rect_base[4]
# transition matrix
trans = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
boxes = np.copy(boxes.T)
rot = np.copy(boxes[:2])
rect_base_coords = rect_base[:2]
# translate boxes by rect_base coordinates
rot = (rot.T - rect_base_coords)
# rotate cx, cy of boxes
rot = np.dot(rot, trans)
# translate back again with coordinates of rect_base
rot = rot + rect_base_coords
boxes[:2] = rot.T
boxes = np.swapaxes(boxes, 1, 0)
return boxes
def intersection_area_training(boxes1, boxes2):
'''
Computes the intersection areas (with the formula of ARiou180) of two sets of 2D rectangular boxes.
Used to compute similarity during training.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
The boxes in boxes2 are rotated to have the same angle as the boxes in boxes1 to compute the intersection area of ARiou180.
We apply a rotation to the centers of boxes2 by the angle of the boxes in boxes1, and then we consider both angles to be zero.
This way we can use numpy to calculate the intersection area and increase the speed.
We do all the transpose operations in order to save time during batch generation
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values with the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
inter_areas = np.zeros((m, n))
xmin = 0
xmax = 1
ymin = 2
ymax = 3
# iterate over boxes1
for i, b1 in enumerate(boxes1):
# rotate boxes2 with the angle of box b
rotated_boxes = rotate_box(boxes2, b1)
# convert coordinates to minmax, we consider that the angles of both boxes are zero
rotated_boxes = rotated_boxes.T
rotated_boxes = convert_coordinates_axis_aligned(rotated_boxes[:4].T, 0, 'centroids2minmax')
b1 = convert_coordinates_axis_aligned(b1[:4], 0, 'centroids2minmax')
rotated_boxes = rotated_boxes.T
# get the greater xmin and ymin values.
min_xy = np.maximum(rotated_boxes[[xmin, ymin]].T, b1[[xmin, ymin]])
# get the smaller xmax and ymax values.
max_xy = np.minimum(rotated_boxes[[xmax, ymax]].T, b1[[xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy)
side_lengths = side_lengths.T
inter_areas[i, :] = (side_lengths[0] * side_lengths[1]).T
return inter_areas
class Vector:
'''
Class representing a point
'''
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, v):
return Vector(self.x + v.x, self.y + v.y)
def __sub__(self, v):
return Vector(self.x - v.x, self.y - v.y)
def cross(self, v):
return self.x * v.y - self.y * v.x
class Line:
'''
Class representing an edge of a bounding box
'''
# ax + by + c = 0
def __init__(self, v1, v2):
self.a = v2.y - v1.y
self.b = v1.x - v2.x
self.c = v2.cross(v1)
def __call__(self, p):
'''
Computes ax + by + c for a new point p
Determines on wich side of the line the point is.
Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
any point p with line(p) > 0 is on the "outside".
'''
return self.a * p.x + self.b * p.y + self.c
def intersection(self, other):
'''
Get intersection point between this line and another line
'''
w = self.a * other.b - self.b * other.a
return Vector(
(self.b * other.c - self.c * other.b) / w,
(self.c * other.a - self.a * other.c) / w
)
def rectangle_vertices(cx, cy, w, h, r):
'''
Compute the angles of a bounding box and returns objects of the class Vector
'''
angle = r
dx = w / 2
dy = h / 2
dxcos = dx * np.cos(angle)
dxsin = dx * np.sin(angle)
dycos = dy * np.cos(angle)
dysin = dy * np.sin(angle)
return (
Vector(cx, cy) + Vector(-dxcos - -dysin, -dxsin + -dycos),
Vector(cx, cy) + Vector(dxcos - -dysin, dxsin + -dycos),
Vector(cx, cy) + Vector(dxcos - dysin, dxsin + dycos),
Vector(cx, cy) + Vector(-dxcos - dysin, -dxsin + dycos)
)
def intersection_area_(r1, r2):
'''
Computes the real intersection area of two rotated bounding boxes
Used during decoding in intersection_area_decoding.
Arguments:
r1 (array): a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the format (cx, cy, w, h, angle)
r2 (array): a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the format (cx, cy, w, h, angle)
Returns:
a float representing the intersection area of r1 and r2
'''
# First convert r1 and r2 into a sequence of vertices
rect1 = rectangle_vertices(*r1)
rect2 = rectangle_vertices(*r2)
# Use the vertices of the first rectangle as
# starting vertices of the intersection polygon.
intersection = rect1
# Loop over the edges of the second rectangle
for p, q in zip(rect2, rect2[1:] + rect2[:1]):
if len(intersection) <= 2:
break # No intersection
line = Line(p, q)
# Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
# Any point p with line(p) > 0 is on the "outside".
# Loop over the edges of the intersection polygon,
# and determine which part is inside and which is outside.
new_intersection = []
line_values = [line(t) for t in intersection]
for s, t, s_value, t_value in zip(
intersection, intersection[1:] + intersection[:1],
line_values, line_values[1:] + line_values[:1]):
if s_value <= 0:
new_intersection.append(s)
if s_value * t_value < 0:
# Points are on opposite sides.
# Add the intersection of the lines to new_intersection.
intersection_point = line.intersection(Line(s, t))
new_intersection.append(intersection_point)
intersection = new_intersection
# Calculate area
if len(intersection) <= 2:
return 0
# return intersection area
return 0.5 * sum(p.x * q.y - p.y * q.x for p, q in
zip(intersection, intersection[1:] + intersection[:1]))
def intersection_area_decoding(boxes1, boxes2):
'''
Computes the intersection areas of two sets of 2D rectangular boxes.
The function is used for decoding raw predictions with non-maximum suppression (NMS)
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values with the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
inter_areas = np.zeros((m, n))
for i, b1 in enumerate(boxes1):
for j, b2 in enumerate(boxes2):
inter_areas[i, j] = intersection_area_(b1, b2)
return inter_areas
def sum_area_(boxes1, boxes2):
'''
Computes the sum of areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the sum of the areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values with the sum of the areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
areas1 = boxes1[:, 2] * boxes1[:, 3] # w*h
areas2 = boxes2[:, 2] * boxes2[:, 3] # w*h
s1 = np.tile(np.expand_dims(areas1, axis=1), reps=(1, n))
s2 = np.tile(np.expand_dims(areas2, axis=0), reps=(m, 1))
return s1 + s2
def ARiou180(boxes1, boxes2):
'''
Computes the modified version of intersection-over-union similarity, ARIou180, of two sets of rotated 2D rectangular boxes.
Used only for training.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the ARIoU180s for all possible combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values in [0,1], the ARiou180 similarity of the boxes in `boxes1` and
`boxes2`.
0 means there is no overlap between two given boxes, 1 means their coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Compute the cosine of the difference in angles for all possible combinations of the boxes in `boxes1` and `boxes2`
b1 = np.tile(np.expand_dims(boxes1[:, 4], axis=1), reps=(1, n))
b2 = np.tile(np.expand_dims(boxes2[:, 4], axis=0), reps=(m, 1))
cos_matrix = np.abs(np.cos(b1 - b2))
# Compute the intersection areas
intersection_areas = intersection_area_training(boxes1, boxes2)
# Compute the union areas.
sum_areas = sum_area_(boxes1, boxes2)
union_areas = sum_areas - intersection_areas
return intersection_areas * cos_matrix / union_areas
def iou(boxes1, boxes2):
'''
Computes the intersection-over-union similarity (also known as Jaccard similarity)
of two sets of rotated 2D rectangular boxes. Used only for decoding raw predictions with non-maximum suppression (NMS).
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m,n)` matrix with the IoUs for all possible combinations of the boxes in `boxes1` and `boxes2`.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(m, 5)` containing the coordinates for `m` boxes.
boxes2 (array): Either a 1D Numpy array of shape `(5, )` containing the coordinates for one box in the
format centroids or a 2D Numpy array of shape `(n, 5)` containing the coordinates for `n` boxes.
Returns:
A 2D Numpy array of dtype float containing values in [0,1], the Jaccard similarity of the boxes in `boxes1` and `
boxes2`.
0 means there is no overlap between two given boxes, 1 means their coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 5):
raise ValueError(
"All boxes must consist of 5 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(
boxes1.shape[1], boxes2.shape[1]))
# Compute intersection areas
intersection_areas = intersection_area_decoding(boxes1, boxes2)
# Compute the union areas.
sum_areas = sum_area_(boxes1, boxes2)
union_areas = sum_areas - intersection_areas
return intersection_areas / union_areas
|
StarcoderdataPython
|
281084
|
from toontown.hood import HoodAI
from toontown.safezone import DistributedTrolleyAI
from toontown.safezone import DistributedMMPianoAI
from toontown.toonbase import ToontownGlobals
from toontown.ai import DistributedEffectMgrAI
class MMHoodAI(HoodAI.HoodAI):
def __init__(self, air):
HoodAI.HoodAI.__init__(self, air,
ToontownGlobals.MinniesMelodyland,
ToontownGlobals.MinniesMelodyland)
self.trolley = None
self.piano = None
self.startup()
def startup(self):
HoodAI.HoodAI.startup(self)
if simbase.config.GetBool('want-minigames', True):
self.createTrolley()
self.piano = DistributedMMPianoAI.DistributedMMPianoAI(self.air)
self.piano.generateWithRequired(self.zoneId)
self.trickOrTreatMgr = DistributedEffectMgrAI.DistributedEffectMgrAI(self.air, ToontownGlobals.HALLOWEEN, 12)
self.trickOrTreatMgr.generateWithRequired(4835) # Ursatz for Really Kool Katz, Tenor Terrace
self.winterCarolingMgr = DistributedEffectMgrAI.DistributedEffectMgrAI(self.air, ToontownGlobals.CHRISTMAS, 14)
self.winterCarolingMgr.generateWithRequired(4614) # Shave and a Haircut for a Song, Alto Avenue
def createTrolley(self):
self.trolley = DistributedTrolleyAI.DistributedTrolleyAI(self.air)
self.trolley.generateWithRequired(self.zoneId)
self.trolley.start()
|
StarcoderdataPython
|
5058321
|
from abc import ABCMeta, abstractmethod
from logging import getLogger
from typing import Callable, Dict, List
import numpy as np
logger = getLogger(__name__)
class RuntimeModuleBase(metaclass=ABCMeta):
"""Base class of runtime module.
RuntimeModule wraps the runtime of the model framework.
"""
@abstractmethod
def __init__(self, model: str, **kwargs):
pass
@abstractmethod
def run(self):
"""run inference"""
pass
@abstractmethod
def set_input(self, idx, value, **kwargs):
pass
@abstractmethod
def get_output(self, idx) -> np.ndarray:
pass
@abstractmethod
def get_input_details(self) -> List[dict]:
"""Get model input details. Dict format depends on the type of the runtime.
Returns:
List[dict]: List of the model input info.
"""
pass
@abstractmethod
def get_output_details(self) -> List[dict]:
"""Get model output details. Dict format depends on the type of the runtime.
Returns:
List[dict]: List of the model output info.
"""
pass
@abstractmethod
def benchmark(self, warmup: int = 1, repeat: int = 10, number: int = 1) -> Dict:
"""Request to run benchmark.
Args:
warmup (int, optional): [description]. Defaults to 1.
repeat (int, optional): [description]. Defaults to 10.
number (int, optional): [description]. Defaults to 1.
Returns:
Dict: benchmark result. Result dict has ['mean', 'std', 'max', 'min'] as key. Value is time in milisecond.
"""
pass
class RuntimeModuleFactory:
registry = {}
@classmethod
def register(cls, name: str) -> Callable:
def inner_wrapper(wrapped_class: RuntimeModuleBase) -> RuntimeModuleBase:
if name in cls.registry:
logger.warning(f"RuntimeModule for {name} already exists. Will replace it")
cls.registry[name] = wrapped_class
return wrapped_class
return inner_wrapper
@classmethod
def get(cls, name: str, **kwargs) -> RuntimeModuleBase:
if name not in cls.registry:
raise Exception(f"RuntimeModule {name} not exists in the registry")
runtime_class = cls.registry[name]
runtime = runtime_class(**kwargs)
return runtime
@classmethod
def list(cls) -> List[str]:
return list(cls.registry.keys())
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.