max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
S2.Surface_Normal/lib/helper.py | leoshine/Spherical_Regression | 133 | 12797551 | <filename>S2.Surface_Normal/lib/helper.py
# coding: utf8
"""
@Author : <NAME>
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
def eval_cls(Preds, GTs):
acc = torch.mean((Preds==GTs).float())
return acc.item()
class Cross_Entropy_Loss_Handler:
def __init__(self):
self.cross_entropy_loss = nn.CrossEntropyLoss().cuda()
# interface function
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names
GT : dict of ground truth for each target BxHxW
Pred: dict of prediction for each target BxHxWx4
"""
mask = GT['mask']
Loss = edict()
for tgt in tgts:
gt = GT[tgt][mask].view(-1) # as (BxK,)
pr = Pred[tgt][mask].view(gt.size(0),-1) # Pred[tgt][mask] (BxK, 4)
Loss[tgt] = self.cross_entropy_loss(pr, gt).double()
return Loss
class Neg_Dot_Loss_Handler:
def __init_(self):
pass
def compute_loss(self, tgts, Pred, GT):
Loss = edict()
for tgt in tgts:
""" Bug fixed on 22 Aug 2018
torch.dot can only be applied to 1-dim tensor
Don't know why there's no error. """
# Loss[tgt] = torch.mean( -torch.dot(GT[tgt],Pred[tgt]) ) # In fact here only does -GT[tgt]*Pred[tgt]
Loss[tgt] = torch.mean( -torch.sum(GT[tgt]*Pred[tgt], dim=1))
return Loss
class Cos_Proximity_Loss_Handler:
def __init__(self):
self.cos_sim = nn.CosineSimilarity(dim=1).cuda()
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names. In this case has to be tgts=['norm']
GT : dict of ground truth for each target BxHxWx3
Pred: dict of prediction for each target BxHxWx3
"""
mask = GT['mask']
Loss = edict()
Errs = edict()
for tgt in tgts:
cos_sim = self.cos_sim(Pred[tgt][mask], GT[tgt][mask])
Loss[tgt] = torch.mean( 1 - cos_sim ) # use 1-cos(theta) to make loss as positive.
Errs[tgt] = torch.acos(cos_sim.clamp(-1,1))*180./np.pi # .clip(-1,1)
return Loss, Errs
class Smooth_L1_Loss_Handler:
def __init__(self):
self.smooth_l1_loss = nn.SmoothL1Loss().cuda()
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names e.g. tgts=['a', 'e', 't']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
Loss = edict()
for tgt in tgts:
Loss[tgt] = self.smooth_l1_loss(Pred[tgt], GT[tgt]) # [warning] pred first, gt second
return Loss
| 2.34375 | 2 |
nevergrad/functions/corefuncs.py | akhti/nevergrad | 1 | 12797552 | <reponame>akhti/nevergrad<filename>nevergrad/functions/corefuncs.py<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from typing import Dict, Any, Tuple, List, Callable
import numpy as np
from .utils import PostponedObject
from ..instrumentation import discretization
from ..common.decorators import Registry
registry = Registry[Callable[[np.ndarray], float]]()
def _onemax(x: List[int]) -> float:
"""onemax(x) is the most classical case of discrete functions, adapted to minimization.
It is originally designed for lists of bits. It just counts the number of 1,
and returns len(x) - number of ones..
It also works in the continuous case but in that cases discretizes the
input domain by ]0.5,1.5] --> 1 and 0 everywhere else.
"""
return len(x) - sum(1 if int(round(w)) == 1 else 0 for w in x)
def _leadingones(x: List[int]) -> float:
"""leadingones is the second most classical discrete function, adapted for minimization.
Returns len(x) - number of initial 1. I.e.
leadingones([0 1 1 1]) = 4,
leadingones([1 1 1 1]) = 0,
leadingones([1 0 0 0]) = 1.
"""
for i, x_ in enumerate(list(x)):
if int(round(x_)) != 1:
return len(x) - i
return 0
def _jump(x: List[int]) -> float: # TODO: docstring?
"""There exists variants of jump functions; we are in minimization.
The principle of a jump function is that local descent does not succeed.
Jumps are necessary.
"""
n = len(x)
m = n // 4
o = n - _onemax(x)
if o == n or o <= n - m:
return n - m - o
return o # Deceptive part.
def _styblinksitang(x: np.ndarray, noise: float) -> float:
"""Classical function for testing noisy optimization."""
x = np.asarray(x)
val = np.sum(np.power(x, 4) - 16 * np.power(x, 2) + 5 * x)
# return a positive value for maximization
return float(39.16599 * len(x) + 1 * 0.5 * val + noise * np.random.normal(size=val.shape))
@registry.register
def delayedsphere(x: np.ndarray) -> float:
'''For asynchronous experiments, we induce delays.'''
time.sleep(abs(1./x[0]) / 100000. if x[0] != 0. else 0.)
return float(np.sum(x**2))
class DelayedSphere(PostponedObject):
def __call__(self, x: np.ndarray) -> float:
return float(np.sum(x**2))
def get_postponing_delay(self, args: Tuple[Any, ...], kwargs: Dict[str, Any], value: float) -> float:
x = args[0]
return float(abs(1./x[0]) / 1000.) if x[0] != 0. else 0.
registry.register(DelayedSphere())
@registry.register
def sphere(x: np.ndarray) -> float:
"""The most classical continuous optimization testbed.
If you do not solve that one then you have a bug."""
return float(np.sum(x**2))
@registry.register
def sphere1(x: np.ndarray) -> float:
"""Translated sphere function."""
return float(np.sum((x - 1.)**2))
@registry.register
def sphere2(x: np.ndarray) -> float:
"""A bit more translated sphere function."""
return float(np.sum((x - 2.)**2))
@registry.register
def sphere4(x: np.ndarray) -> float:
"""Even more translated sphere function."""
return float(np.sum((x - 4.)**2))
@registry.register
def maxdeceptive(x: np.ndarray) -> float:
dec = 3 * x**2 - (2 / (3**(x - 2)**2 + .1))
return float(np.max(dec))
@registry.register
def sumdeceptive(x: np.ndarray) -> float:
dec = 3 * x**2 - (2 / (3**(x - 2)**2 + .1))
return float(np.sum(dec))
@registry.register
def altcigar(x: np.ndarray) -> float:
"""Similar to cigar, but variables in inverse order.
E.g. for pointing out algorithms not invariant to the order of variables."""
return float(x[-1]**2 + 1000000. * np.sum(x[:-1]**2))
@registry.register
def cigar(x: np.ndarray) -> float:
"""Classical example of ill conditioned function.
The other classical example is ellipsoid.
"""
return float(x[0]**2 + 1000000. * np.sum(x[1:]**2))
@registry.register
def altellipsoid(y: np.ndarray) -> float:
"""Similar to Ellipsoid, but variables in inverse order.
E.g. for pointing out algorithms not invariant to the order of variables."""
x = y[::-1]
return sum((10**(6 * (i - 1) / float(len(x) - 1))) * (x[i]**2) for i in range(len(x)))
@registry.register
def ellipsoid(x: np.ndarray) -> float:
"""Classical example of ill conditioned function.
The other classical example is cigar.
"""
return sum((10**(6 * (i - 1) / float(len(x) - 1))) * (x[i]**2) for i in range(len(x)))
@registry.register
def rastrigin(x: np.ndarray) -> float:
"""Classical multimodal function."""
cosi = float(np.sum(np.cos(2 * np.pi * x)))
return float(10 * (len(x) - cosi) + sphere(x))
@registry.register
def hm(x: np.ndarray) -> float:
"""New multimodal function (proposed for Nevergrad)."""
return float(np.sum((x**2) * (1.1 + np.cos(1. / x))))
@registry.register
def rosenbrock(x: np.ndarray) -> float:
return sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
@registry.register
def griewank(x: np.ndarray) -> float:
"""Multimodal function, often used in Bayesian optimization."""
part1 = np.sum(x**2)
part2 = np.prod(np.cos(x / np.sqrt(1 + np.arange(len(x)))))
return 1 + (float(part1)/4000.0) - float(part2)
@registry.register
def deceptiveillcond(x: np.ndarray) -> float:
"""An extreme ill conditioned functions. Most algorithms fail on this.
The condition number increases to infinity as we get closer to the optimum."""
assert len(x) >= 2
return float(max(np.abs(np.arctan(x[1]/x[0])),
np.sqrt(x[0]**2. + x[1]**2.),
1. if x[0] > 0 else 0.) if x[0] != 0. else float("inf"))
@registry.register
def deceptivepath(x: np.ndarray) -> float:
"""A function which needs following a long path. Most algorithms fail on this.
The path becomes thiner as we get closer to the optimum."""
assert len(x) >= 2
distance = np.sqrt(x[0]**2 + x[1]**2)
if distance == 0.:
return 0.
angle = np.arctan(x[0] / x[1]) if x[1] != 0. else np.pi / 2.
invdistance = (1. / distance) if distance > 0. else 0.
if np.abs(np.cos(invdistance) - angle) > 0.1:
return 1.
return float(distance)
@registry.register
def deceptivemultimodal(x: np.ndarray) -> float:
"""Infinitely many local optima, as we get closer to the optimum."""
assert len(x) >= 2
distance = np.sqrt(x[0]**2 + x[1]**2)
if distance == 0.:
return 0.
angle = np.arctan(x[0] / x[1]) if x[1] != 0. else np.pi / 2.
invdistance = int(1. / distance) if distance > 0. else 0.
if np.abs(np.cos(invdistance) - angle) > 0.1:
return 1.
return float(distance)
@registry.register
def lunacek(x: np.ndarray) -> float:
"""Multimodal function.
Based on https://www.cs.unm.edu/~neal.holts/dga/benchmarkFunction/lunacek.html."""
problemDimensions = len(x)
s = 1.0 - (1.0 / (2.0 * np.sqrt(problemDimensions + 20.0) - 8.2))
mu1 = 2.5
mu2 = - np.sqrt(abs((mu1**2 - 1.0) / s))
firstSum = 0.0
secondSum = 0.0
thirdSum = 0.0
for i in range(problemDimensions):
firstSum += (x[i]-mu1)**2
secondSum += (x[i]-mu2)**2
thirdSum += 1.0 - np.cos(2*np.pi*(x[i]-mu1))
return min(firstSum, 1.0*problemDimensions + secondSum)+10*thirdSum
# following functions using discretization should not be used with translation/rotation
@registry.register_with_info(no_transfrom=True)
def hardonemax(y: np.ndarray) -> float:
"""Onemax, with a discretization in 2 by threshold 0 (>0 or <0)."""
return _onemax(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardjump(y: np.ndarray) -> float:
"""Hardjump, with a discretization in 2 by threshold 0 (>0 or <0)."""
return _jump(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardleadingones(y: np.ndarray) -> float:
"""Leading ones, with a discretization in 2 by threshold 0 (>0 or <0)."""
return _leadingones(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardonemax5(y: np.ndarray) -> float:
"""Hardonemax, with a discretization by 5 with 4 thresholds (quantiles of Gaussian)."""
return _onemax(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def hardjump5(y: np.ndarray) -> float:
"""Jump, with a discretization by 5 with 4 thresholds (quantiles of Gaussian)."""
return _jump(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def hardleadingones5(y: np.ndarray) -> float:
"""Leadingones, with a discretization by 5 with 4 thresholds (quantiles of Gaussian)."""
return _leadingones(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def onemax(y: np.ndarray) -> float:
"""Softmax discretization of onemax (This multiplies the dimension by 2)."""
return _onemax(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def jump(y: np.ndarray) -> float:
"""Softmax discretization of jump (This multiplies the dimension by 2)."""
return _jump(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def leadingones(y: np.ndarray) -> float:
"""Softmax discretization of leadingones (This multiplies the dimension by 2)."""
return _leadingones(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def onemax5(y: np.ndarray) -> float:
"""Softmax discretization of onemax with 5 possibles values.
This multiplies the dimension by 5."""
return _onemax(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def jump5(y: np.ndarray) -> float:
"""Softmax discretization of jump with 5 possibles values.
This multiplies the dimension by 5."""
return _jump(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def leadingones5(y: np.ndarray) -> float:
"""Softmax discretization of leadingones with 5 possibles values.
This multiplies the dimension by 5."""
return _leadingones(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def genzcornerpeak(y: np.ndarray) -> float:
"""One of the Genz functions, originally used in integration,
tested in optim because why not."""
value = float(1 + np.mean(np.tanh(y)))
if value == 0:
return float("inf")
return value**(-len(y) - 1)
@registry.register_with_info(no_transfrom=True)
def minusgenzcornerpeak(y: np.ndarray) -> float:
"""One of the Genz functions, originally used in integration,
tested in optim because why not."""
return -float(genzcornerpeak(y))
@registry.register
def genzgaussianpeakintegral(x: np.ndarray) -> float:
"""One of the Genz functions, originally used in integration,
tested in optim because why not."""
return float(np.exp(-np.sum(x**2 / 4.)))
@registry.register
def minusgenzgaussianpeakintegral(x: np.ndarray) -> float:
"""One of the Genz functions, originally used in integration,
tested in optim because why not."""
return -float(np.exp(-sum(x**2 / 4.)))
@registry.register
def slope(x: np.ndarray) -> float:
return sum(x)
@registry.register
def linear(x: np.ndarray) -> float:
return float(np.tanh(x[0]))
@registry.register
def st0(x: np.ndarray) -> float:
"""Styblinksitang function with 0 noise."""
return _styblinksitang(x, 0)
@registry.register
def st1(x: np.ndarray) -> float:
"""Styblinksitang function with noise 1."""
return _styblinksitang(x, 1)
@registry.register
def st10(x: np.ndarray) -> float:
"""Styblinksitang function with noise 10."""
return _styblinksitang(x, 10)
@registry.register
def st100(x: np.ndarray) -> float:
"""Styblinksitang function with noise 100."""
return _styblinksitang(x, 100)
| 3.09375 | 3 |
tests/data/src/requires_simple/setup.py | rsumnerz/pip | 2 | 12797553 | <filename>tests/data/src/requires_simple/setup.py
from setuptools import find_packages, setup
setup(name='requires_simple',
version='0.1',
install_requires=['simple==1.0']
)
| 1.148438 | 1 |
main.py | flkapes/best-buy-bot | 0 | 12797554 | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from random import randint
from time import sleep
from discord import Webhook, RequestsWebhookAdapter
import json
# Loads config file
json = json.load(open('config.json', 'r'))
webhook = Webhook.from_url(
json['discord_webook'],
adapter=RequestsWebhookAdapter()) # Creates webhook using discord url
driver = webdriver.Firefox(
executable_path=json['executable_path']) # Creates WebDriver instance
url = "https://www.bestbuy.com"
timeout = 3 # Timeout for element loaded checks
purchased = open('purchased.txt', 'r').read()
def navigate_to_bb():
"""
* Navigates to the URL supplied, by default this is BestBuy.com
"""
driver.get(url)
print("navigated to bestbuy")
def navigate_to_product():
"""
* Navigates to the URL supplied + the product URL
"""
driver.get(url + json['url'])
def check_if_in_stock():
"""
This function tries to find the Add To Cart button, if it does not find it, it means it is out
of stock currently and it throws a NoSuchElementException.
:return: Returns True for in stock and False for not in stock
:rtype: None Type
"""
try:
not_sold_out = driver.find_element_by_css_selector(
'button.btn-primary:nth-child(1)')
except NoSuchElementException:
return False
return True
def add_to_cart():
"""
This function finds the Add to Cart button, and then adds the product to cart
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR, 'button.btn-primary:nth-child(1)'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
add_to_cart_button = driver.find_element_by_css_selector(
"button.btn-primary:nth-child(1)")
add_to_cart_button.click()
print("added to cart")
def navigate_to_cart():
"""
This function navigates to the BestBuy cart page
"""
driver.get(url + "/cart")
print("navigated to cart")
return driver.title
def change_zip_code_and_select_shipping():
"""
This function first selects the ZipCode element on the cart page, then types the correct
zip code for shipping, and then clicks update location.
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR, '.change-zipcode-link'))
WebDriverWait(driver, 10).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
zip_code_click = driver.find_element_by_css_selector(
".change-zipcode-link")
zip_code_click.send_keys(Keys.ENTER)
print("clicked on zip code")
zip_code_change = driver.find_element_by_css_selector(
"#location")
zip_code_change.send_keys(json['zip_code'])
update = driver.find_element_by_css_selector(
'#item-availability-links > button:nth-child(3)')
update.click()
print("changed zip code")
def click_checkout_key():
"""
This function clicks the checkout button on the BestBuy cart page
:rtype: object
"""
checkout_button = driver.find_element_by_css_selector(
".btn-lg")
checkout_button.click()
print("checkout started")
def select_guest_checkout():
"""
This function selects the Checkout as Guest option on the page following the BestBuy cart
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR, '.cia-guest-content__continue'))
WebDriverWait(driver, 9).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
guest = driver.find_element_by_css_selector('.cia-guest-content__continue')
guest.click()
def sign_in_and_click_button():
"""
This function types the supplied email and password and then clicks the Sign In button.
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR,
'.cia-form__controls__submit'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
email = driver.find_element_by_id("fld-e")
email.send_keys(json['email'])
print("email typed")
password = driver.find_element_by_id("fld-p1")
password.send_keys(json['password'])
print("password typed")
button = driver.find_element_by_css_selector(
'.cia-form__controls__submit')
button.click()
print("signed in")
def check_if_verify():
"""
This function checks if the account has been flagged for manual user verification
:rtype: object
"""
try:
verify = driver.find_element_by_css_selector(
'h1.cia-section-title').text
if "Verify Your Account" in verify:
return False
else:
return True
except NoSuchElementException:
return False
# return True
def check_if_shipping_info_needed():
"""
This function checks to see if the bot needs to input the shipping information if the user has been
signed in using the previous functions
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.ID, 'consolidatedAddresses.ui_address_2.firstName'))
WebDriverWait(driver, 3).until(element_present)
except BaseException:
return False
return True
def input_shipping_information():
"""
This function inputs the shipping information that the user provides if they have been logged in with
previous functions
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.ID, 'consolidatedAddresses.ui_address_2.firstName'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
fname = driver.find_element_by_id(
"consolidatedAddresses.ui_address_2.firstName")
fname.send_keys(json['first_name'])
print("fname typed")
lname = driver.find_element_by_id(
"consolidatedAddresses.ui_address_2.lastName")
lname.send_keys(json["last_name"])
print("lname typed")
suggestions = driver.find_element_by_css_selector(".autocomplete__toggle")
if "Hide Suggestions" in suggestions.text:
suggestions.click()
print("suggestions removed")
address = driver.find_element_by_id(
"consolidatedAddresses.ui_address_2.street")
address.send_keys(json['address'])
print("street address typed")
city = driver.find_element_by_id("consolidatedAddresses.ui_address_2.city")
city.send_keys(json['city'])
print("city typed")
select = Select(driver.find_element_by_id(
'consolidatedAddresses.ui_address_2.state'))
select.select_by_visible_text(json['state'])
print("state selected")
zip_code = driver.find_element_by_id(
'consolidatedAddresses.ui_address_2.zipcode')
zip_code.send_keys(json['zip_code'])
print("zip code address section typed")
def input_shipping_info_guest():
"""
This function inputs the shipping information that the user provides if they have selected to checkout
as a guest
:rtype: object
"""
fname = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[1]/label[1]/div[1]/input[1]")
for i in range(len(json['first_name'])):
fname.send_keys(json['first_name'][i])
print(json['first_name'] + " typed")
lname = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[2]/label[1]/div[1]/input[1]")
for i in range(len(json['last_name'])):
lname.send_keys(json["last_name"][i])
print("lname typed")
suggestions = driver.find_element_by_css_selector(".autocomplete__toggle")
if "Hide Suggestions" in suggestions.text:
suggestions.click()
print("suggestions removed")
address = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[3]/label[1]/div[2]/div[1]/div[1]/input[1]")
for i in range(len(json['address'])):
address.send_keys(json['address'][i])
print("street address typed")
city = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[5]/div[1]/div[1]/label[1]/div[1]/input[1]")
for i in range(len(json['city'])):
city.send_keys(json['city'][i])
print("city typed")
select = Select(driver.find_element_by_xpath(
'/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[5]/div[1]/div[2]/label[1]/div[1]/div[1]/select[1]'))
select.select_by_visible_text(json['state'])
print("state selected")
zip_code = driver.find_element_by_xpath(
'/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[6]/div[1]/div[1]/label[1]/div[1]/input[1]')
for i in range(len(json['zip_code'])):
zip_code.send_keys(json['zip_code'][i])
print("zip code address section typed")
def input_phone_and_email():
"""
This function inputs the phone number and email that the user has provided if they are checking out
as a guest
:rtype: object
"""
email = driver.find_element_by_id('user.emailAddress')
email.send_keys(json['email'])
phone = driver.find_element_by_id('user.phone')
phone.send_keys(json['phone'])
def check_if_payment_info_on_page():
"""
This function checks if the bot must enter payment information on the current page
:rtype: object
"""
try:
cvv = driver.find_element_by_id('credit-card-cvv')
except NoSuchElementException:
return False
return True
def click_continue_to_payment_info():
"""
This function clicks the continue to payment information if the previous function returns False
:rtype: object
"""
button = driver.find_element_by_css_selector(
'.btn-lg')
button.click()
def input_payment_info():
"""
This function inputs the CVV if the user has been logged in during a previous function and has a card saved
:rtype: object
"""
cvv = driver.find_element_by_id('credit-card-cvv')
cvv.send_keys(json['cvv'])
print("CVV added")
def input_payment_info_guest():
"""
This function inputs the payment information of the user if they have selected Guest checkout
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.ID, 'optimized-cc-card-number'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
cc_number = driver.find_element_by_id(
'optimized-cc-card-number')
cc_number.send_keys(json['cc_number'])
select = Select(driver.find_element_by_name(
'expiration-month'))
select.select_by_visible_text(json['month'])
print("month selected")
select = Select(driver.find_element_by_name(
'expiration-year'))
select.select_by_visible_text(json['year'])
print("year selected")
cvv = driver.find_element_by_css_selector('#credit-card-cvv')
cvv.send_keys(json['cvv'])
print("CVV typed")
def place_order():
"""
This function places the order by clicking the final button
:rtype: object
"""
button = driver.find_element_by_css_selector(
'.btn-lg')
button.click()
def main(guest_or_sign_in):
time_start = 0
time_end = 0
if purchased.strip() == "0":
in_stock = 0
while in_stock == 0:
navigate_to_product()
driver.implicitly_wait(0.3)
y = check_if_in_stock()
if not y:
in_stock = 0
randinteger = randint(1, 5)
print(
"Sleeping for " +
str(randinteger) +
" seconds due to product not being in stock")
sleep(randinteger)
else:
#print("Stock found - running script")
#webhook.send("@everyone Stock Found")
#webhook.send(url + json['url'])
time_start = time.time()
add_to_cart()
in_stock = 1
navigate_to_cart()
change_zip_code_and_select_shipping()
click_checkout_key()
if guest_or_sign_in == "sign-in":
sign_in_and_click_button()
if not check_if_verify():
quit(0)
if check_if_shipping_info_needed() is True:
input_shipping_information()
if check_if_payment_info_on_page() is False:
click_continue_to_payment_info()
input_payment_info()
# place_order()
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
input_payment_info()
# place_order
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
if check_if_payment_info_on_page() is False:
click_continue_to_payment_info()
input_payment_info()
# place_order()
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
input_payment_info()
# place_order
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
elif guest_or_sign_in == "guest":
select_guest_checkout()
# driver.refresh()
input_shipping_info_guest()
input_phone_and_email()
click_continue_to_payment_info()
input_payment_info_guest()
# place_order()
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
webhook.send(
"@everyone Not purchased as item has already been bought. "
"To reset this please open purchased.txt and replace the 0 with a 1")
quit(0)
main(guest_or_sign_in=json['bot_usage_case'])
| 2.828125 | 3 |
api/serializers.py | vault-the/babybuddy | 0 | 12797555 | <reponame>vault-the/babybuddy<filename>api/serializers.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from django.contrib.auth.models import User
from core.models import (Child, DiaperChange, Feeding, Note, Sleep, Timer,
TummyTime)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
class ChildSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Child
fields = ('first_name', 'last_name', 'birth_date', 'slug')
lookup_field = 'slug'
class DiaperChangeSerializer(serializers.HyperlinkedModelSerializer):
child = ChildSerializer()
class Meta:
model = DiaperChange
fields = ('child', 'time', 'wet', 'solid', 'color')
class FeedingSerializer(serializers.HyperlinkedModelSerializer):
child = ChildSerializer()
class Meta:
model = Feeding
fields = ('child', 'start', 'end', 'duration', 'type', 'method',
'amount')
class NoteSerializer(serializers.HyperlinkedModelSerializer):
child = ChildSerializer()
class Meta:
model = Note
fields = ('child', 'note', 'time')
class SleepSerializer(serializers.HyperlinkedModelSerializer):
child = ChildSerializer()
class Meta:
model = Sleep
fields = ('child', 'start', 'end', 'duration')
class TimerSerializer(serializers.HyperlinkedModelSerializer):
user = UserSerializer()
class Meta:
model = Timer
fields = ('name', 'start', 'end', 'duration', 'active', 'user')
class TummyTimeSerializer(serializers.HyperlinkedModelSerializer):
child = ChildSerializer()
class Meta:
model = TummyTime
fields = ('child', 'start', 'end', 'duration', 'milestone')
| 2.109375 | 2 |
api/drinks/models/__init__.py | gthole/drink-stash | 7 | 12797556 | <filename>api/drinks/models/__init__.py
from .activities import Activity
from .books import Book, BookUser
from .recipes import Quantity, Recipe
from .lists import UserList, UserListRecipe
from .users import UserIngredient, Profile
from .comments import Comment
from .ingredients import Ingredient
from .uom import Uom
from .tags import Tag
| 1.34375 | 1 |
utility.py | IamVicky90/insurance-prediction-pyspark | 0 | 12797557 | from asyncore import read
import os
import shutil
import yaml
import json
from app_logger import logger
from datetime import datetime
import uuid
def create_directory(path: str, is_recreate: bool = False)->None:
"""Utility to create the dirctory
Args:
path (str): Give the full path with directory name
is_recreate (bool, optional): If True then it will first delete and then ceate the directory . Defaults to False.
"""
if is_recreate:
try:
shutil.rmtree(path)
except Exception:
pass
os.makedirs(path,exist_ok=True) # It will not through error if the folder already exists
def read_params(config_path: str ='config/params.yaml')->dict:
"""Responsible for reading the yaml file
Args:
config_path (str): Path of the Yaml file . Defaults to 'config/params.yaml'
Returns:
dict: Return the details of the yaml file
"""
with open(config_path, 'r') as f:
return yaml.safe_load(f)
def get_log_object_for_training(collection_name: str, execution_id : str=None, executed_by: str=None, project_id :str=None, is_log_enabled : bool=True):
"""It will give the Log Object for training
Args:
collection_name (str): Name of the collection in which the log will be stored
execution_id (str, optional): Execution id. Defaults to None.
executed_by (str, optional): Executed by. Defaults to None.
project_id (str, optional): Id of the project. Defaults to None.
is_log_enabled (bool, optional): If it is set to True then only it will write the logs. Defaults to True.
Returns:
Logger: Logger Object
"""
params=read_params()
if execution_id==None:
execution_id=uuid.uuid4().hex
if executed_by==None:
executed_by=params['base']['author']
if project_id==None:
project_id = params['base']['project_id']
logger_obj = logger.Logger(execution_id=execution_id, executed_by=executed_by, project_id=project_id,
databasename=params['database_logs']['training_logs']['database_name'], collection_name=collection_name, is_log_enabled=is_log_enabled)
return logger_obj
def get_log_object_for_prediction(collection_name: str, execution_id : str=None, executed_by: str=None, project_id :str=None, is_log_enabled : bool=True):
"""It will give the Log Object for prediction
Args:
collection_name (str): Name of the collection in which the log will be stored
execution_id (str, optional): Execution id. Defaults to None.
executed_by (str, optional): Executed by. Defaults to None.
project_id (str, optional): Id of the project. Defaults to None.
is_log_enabled (bool, optional): If it is set to True then only it will write the logs. Defaults to True.
Returns:
Logger: Logger Object
"""
params=read_params()
if execution_id==None:
execution_id=uuid.uuid4().hex
if executed_by==None:
executed_by=params['base']['author']
if project_id==None:
project_id = params['base']['project_id']
logger_obj = logger.Logger(execution_id=execution_id, executed_by=executed_by, project_id=project_id,
databasename=params['database_logs']['prediction_logs']['database_name'], collection_name=collection_name, is_log_enabled=is_log_enabled)
return logger_obj
def read_prediction_schema():
"""Responsible for reading the schema from schema_prediction.json
"""
params=read_params()
path=params['data_schemas']['prediction_schema']
with open(path) as f:
schema=json.load(f)
LengthOfDateStampInFile = schema['LengthOfDateStampInFile']
LengthOfTimeStampInFile = schema['LengthOfTimeStampInFile']
NumberofColumns = schema['NumberofColumns']
ColName = schema['ColName']
return LengthOfDateStampInFile,LengthOfTimeStampInFile,NumberofColumns,ColName
def read_training_schema():
"""Responsible for reading the schema from schema_training.json
"""
params=read_params()
path = params['data_schemas']['training_schema']
with open(path) as f:
schema=json.load(f)
LengthOfDateStampInFile = schema['LengthOfDateStampInFile']
LengthOfTimeStampInFile = schema['LengthOfTimeStampInFile']
NumberofColumns = schema['NumberofColumns']
ColName = schema['ColName']
return LengthOfDateStampInFile,LengthOfTimeStampInFile,NumberofColumns,ColName
def get_date():
"""Returns the current date.
"""
return datetime.now().date().strftime('%d-%m-%y')
def get_time():
"""Returns the current time
"""
return datetime.now().time().strftime('%H-%M-%S')
| 2.734375 | 3 |
SMPyBandits/Policies/Experimentals/UCBjulia.py | balbok0/SMPyBandits | 309 | 12797558 | <reponame>balbok0/SMPyBandits
# -*- coding: utf-8 -*-
""" The UCB policy for bounded bandits, with UCB indexes computed with Julia.
Reference: [Lai & Robbins, 1985].
.. warning::
Using a Julia function *from* Python will not speed up anything, as there is a lot of overhead in the "bridge" protocol used by pyjulia.
The idea of using naively a tiny Julia function to speed up computations is basically useless.
A naive benchmark showed that in this approach, :class:`UCBjulia` (used withing Python) is about 125 times slower (!) than :class:`UCB`.
.. warning:: This is only experimental, and purely useless. See https://github.com/SMPyBandits/SMPyBandits/issues/98
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# WARNING: this is a HUGE hack to fix a mystery bug on importing this policy
from sys import path
from os.path import dirname
path.insert(0, '/'.join(dirname(__file__).split('/')[:-1]))
try:
from .IndexPolicy import IndexPolicy
except ImportError:
from IndexPolicy import IndexPolicy
class UCBjulia(IndexPolicy):
""" The UCB policy for bounded bandits, with UCB indexes computed with Julia.
Reference: [Lai & Robbins, 1985].
.. warning:: This is only experimental, and purely useless. See https://github.com/SMPyBandits/SMPyBandits/issues/98
"""
def __init__(self, nbArms, lower=0., amplitude=1.):
""" Will fail directly if the bridge with julia is unavailable or buggy."""
super(UCBjulia, self).__init__(nbArms, lower=lower, amplitude=amplitude)
self.t = 0
# Importing the julia module and creating the bridge
try:
import julia
except ImportError as e:
print("Error: unable to load the 'julia' Python module. Install with 'pip install julia', or see https://github.com/JuliaPy/pyjulia/") # DEBUG
raise e
_j = julia.Julia()
try:
self._index_function = _j.evalfile("Policies/UCBjulia.jl")
except RuntimeError:
try:
self._index_function = _j.evalfile("UCBjulia.jl")
except RuntimeError:
raise ValueError("Error: Unable to load 'UCBjulia.jl' julia file.") # WARNING
try:
self._index_function([1], [1], 1, 1)
except (RuntimeError, ValueError):
raise ValueError("Error: the index function loaded from 'UCBjulia.jl' is bugged or unavailable.") # WARNING
def computeIndex(self, arm):
r""" Compute the current index, at time t and after :math:`N_k(t)` pulls of arm k:
.. math:: I_k(t) = \frac{X_k(t)}{N_k(t)} + \sqrt{\frac{2 \log(t)}{N_k(t)}}.
"""
# WARNING: the 'arm + 1' part comes from the difference between 0-based indexes
# for Python and the 1-based indexes in Julia. The rest works pretty well!
return self._index_function(self.rewards, self.pulls, self.t, arm + 1)
| 1.796875 | 2 |
tests/test_pmd.py | believeinlain/asynch-cv | 1 | 12797559 | """Test of pmd_consumer functionality, with a selection of data."""
from os.path import join, expanduser
from async_cv.play_file import play_file
from async_cv.event_processing.pmd_consumer import pmd_consumer
data_root = 'OneDrive\\Documents\\NIWC\\NeuroComp\\boat_tests\\'
annot_root = 'OneDrive\\Documents\\NIWC\\NeuroComp\\boat_tests\\'
files = {
'june_12': {
'boat_tests': {
2: 'Davis346red-2020-06-12T12-11-45-0700-0_Test_2.aedat4',
3: 'Davis346red-2020-06-12T12-15-01-0700-0_Test_3.aedat4',
5: 'Davis346red-2020-06-12T12-24-03-0700-0_Test_5.aedat4',
6: 'Davis346red-2020-06-12T12-25-39-0700-0_Test_6.aedat4'
},
'annotations': {
2: 'Davis346red-2020-06-12T12-11-45-0700-0_Test_2.xml',
3: 'Davis346red-2020-06-12T12-15-01-0700-0_Test_3.xml',
5: 'Davis346red-2020-06-12T12-24-03-0700-0_Test_5.xml',
6: 'Davis346red-2020-06-12T12-25-39-0700-0_Test_6.xml'
},
'data_format': '.aedat4'
},
'june_26': {
'boat_tests': {
# 2: 'Davis346red-2020-06-26T12-26-42-0700-00000195-0_Test_2.aedat4',
3: 'Davis346red-2020-06-26T12-27-39-0700-00000195-0_Test_3.aedat4',
# 4: 'Davis346red-2020-06-26T12-28-38-0700-00000195-0_Test_4.aedat4',
6: 'Davis346red-2020-06-26T12-30-20-0700-00000195-0_Test_6.aedat4',
9: 'Davis346red-2020-06-26T12-32-12-0700-00000195-0_Test_9.aedat4',
21: 'Davis346red-2020-06-26T13-22-40-0700-00000195-0_Test_21.aedat4'
},
'annotations': {
# 2: 'Davis346red-2020-06-26T12-26-42-0700-00000195-0_Test_2.xml',
3: 'Davis346red-2020-06-26T12-27-39-0700-00000195-0_Test_3.xml',
# 4: 'Davis346red-2020-06-26T12-28-38-0700-00000195-0_Test_4.xml',
6: 'Davis346red-2020-06-26T12-30-20-0700-00000195-0_Test_6.xml',
9: 'Davis346red-2020-06-26T12-32-12-0700-00000195-0_Test_9.xml',
21: 'Davis346red-2020-06-26T13-22-40-0700-00000195-0_Test_21.xml'
},
'data_format': '.aedat4'
},
'april_12': {
'boat_tests': {
0: '25mm-1000us-speedboat-2021_04_12_15_09_24.aedat4',
1: '25mm-1200us-drifting-boat-2021_04_12_15_33_47.aedat4',
2: '75mm-1500us-drifting-boat-2021_04_12_15_35_24.aedat4',
3: '75mm-2000us-boat2-2021_04_12_15_21_16.aedat4',
4: '75mm-2000us-boat3-2021_04_12_15_30_50.aedat4',
5: '75mm-2000us-filter-boat-2021_04_12_15_16_43.aedat4',
6: '75mm-2000us-on-off-filter-boat-2021_04_12_15_17_24.aedat4',
# 7: '75mm-2000us-speedboat-2021_04_12_15_26_01.aedat4'
},
'annotations': {
0: '25mm-1000us-speedboat-2021_04_12_15_09_24-2021_06_03_18_58_28-cvat+for+video+1.1.xml',
1: '25mm-1200us-drifting-boat-2021_04_12_15_33_47-2021_06_03_21_30_33-cvat+for+video+1.1.xml',
2: '75mm-1500us-drifting-boat-2021_04_12_15_35_24-2021_06_03_21_50_58-cvat+for+video+1.1.xml',
3: '75mm-2000us-boat2-2021_04_12_15_21_16-2021_06_03_22_21_59-cvat+for+video+1.1.xml',
4: '75mm-2000us-boat3-2021_04_12_15_30_50-2021_06_03_22_55_50-cvat+for+video+1.1.xml',
5: '75mm-2000us-filter-boat-2021_04_12_15_16_43-2021_06_03_23_20_19-cvat+for+video+1.1.xml',
6: '75mm-2000us-on-off-filter-boat-2021_04_12_15_17_24-2021_06_03_23_26_34-cvat+for+video+1.1.xml',
# 7: '75mm-2000us-speedboat-2021_04_12_15_26_01-2021_06_07_15_08_31-cvat+for+video+1.1.xml'
}
},
# 'april_29': {
# 1: 'out_2021-04-29_17-56-14.raw',
# 2: 'out_2021-04-29_17-57-47.raw',
# 3: 'out_2021-04-29_18-02-48.raw',
# 4: 'out_2021-04-29_18-04-41.raw',
# 5: 'out_2021-04-29_18-06-47.raw',
# 6: 'out_2021-04-29_18-10-59.raw',
# 7: 'out_2021-04-29_18-17-21.raw',
# 8: 'out_2021-04-29_18-20-10.raw'
# },
}
def run_one(group, test, setting=''):
run_name = setting+f'{group}_run_{test:02d}'
data_path = join(expanduser('~\\'), data_root, join(
group, files[group]['boat_tests'][test]))
annot_path = join(expanduser('~\\'), annot_root, join(
group, files[group]['annotations'][test]))
play_file(data_path, 33, pmd_consumer,
run_name=run_name,
video_out=True,
targets=['vessel', 'boat', 'RHIB'],
annot_file=annot_path,
show_metrics=False,
parameters=parameters
)
def run_group(group, setting=''):
for test in files[group]['boat_tests'].keys():
run_one(group, test, setting)
def run_all(setting=''):
for group in files:
run_group(group, setting)
for factor in range(0, 1010, 10):
# Define PMD parameters
parameters = {
'x_div': 4, # number of horizontal divisions
'y_div': 4, # number of vertical divisions
'us_per_event': 50, # processing time alloted to each event handler to process events
'temporal_filter': 100_000,
# number of events to remember for each (x, y) position
'event_buffer_depth': 8,
'tf': 200_000, # how far back in time to consider events for filtering
'tc': 200_000, # how far back in time to consider events for clustering
'n': 4, # minimum number of correlated events required to allow a particular event through the filter
'max_cluster_size': 30, # maximum taxicab dist from center of cluster to each event
# microseconds periodicity to flush expired (>tc) events from buffer
'buffer_flush_period': 20_000,
'num_analyzers': 32,
'sample_period': 100_000, # microseconds between each centroid position sample
'long_duration': 3_000_000, #5_000_000,
'short_duration': 2_000_000, #3_000_000,
'detection_tau': -0.002,
'ratio_threshold': 0,
'dot_ratio_threshold': 1.0,
'ratio_stability_factor': 1.0,
'dot_ratio_stability_factor': factor,
}
run_group('june_12', f'{factor:03}/')
run_group('june_26', f'{factor:03}/')
run_group('april_12', f'{factor:03}/')
# run_all()
# run_one('june_12', 6)
| 1.828125 | 2 |
examples/live_sowemail_example.py | SoWeMail/sowerest-python | 0 | 12797560 | <gh_stars>0
import os
import sowerest
host = "http://api.sowemail.com:9000"
api_key = os.environ.get('SOWEMAIL_API_KEY')
request_headers = {
"Authorization": 'Bearer {}'.format(api_key)
}
version = 1
client = sowerest.Client(host=host,
request_headers=request_headers,
version=version)
# Send email
data = {
"personalizations": [
{
"to": [
{
"email": "<EMAIL>"
}
]
}
],
"from": {
"email": "<EMAIL>"
},
"subject": "Hello from SoWeMail",
"content": [
{
"type": "text/plain",
"value": "Simple email sending example using python's sowerest library"
}
]
}
response = client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
| 2.5625 | 3 |
DCNN-CIFAR10.py | bansalshubh91/Deep-CNN---CIFAR10 | 0 | 12797561 | <reponame>bansalshubh91/Deep-CNN---CIFAR10<gh_stars>0
# coding: utf-8
# In[ ]:
# # coding: utf-8
# # In[1]:
import numpy as np
import h5py
import time
import copy
from random import randint
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# In[2]:
batch_size = 50
# Download and construct CIFAR-10 dataset.
train_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=True,
transform=transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.4),
transforms.ToTensor()]),
download=False)
# Data loader (this provides queues and threads in a very simple way).
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
# When iteration starts, queue and thread start to load data from files.
data_iter = iter(train_loader)
# Mini-batch images and labels.
images, labels = data_iter.next()
# In[3]:
# In[4]:
#number of hidden units
H = 500
#Model architecture
class CIFAR10Model(nn.Module):
def __init__(self):
super(CIFAR10Model, self).__init__()
# input is 3x32x32
#These variables store the model parameters.
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=1, padding=2 )
self.conv1_bn = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=4,stride=1, padding=2 )
self.conv2_drop = nn.Dropout2d()
self.conv3 = nn.Conv2d(64, 64, kernel_size=4,stride=1, padding=2 )
self.conv3_bn = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 64, kernel_size=4,stride=1, padding=2 )
self.conv4_drop = nn.Dropout2d()
self.conv5 = nn.Conv2d(64, 64, kernel_size=4,stride=1, padding=2 )
self.conv5_bn = nn.BatchNorm2d(64)
self.conv6 = nn.Conv2d(64, 64, kernel_size=3,stride=1, padding=0 )
self.conv6_drop = nn.Dropout2d()
self.conv7 = nn.Conv2d(64, 64, kernel_size=3,stride=1, padding=0 )
self.conv7_bn = nn.BatchNorm2d(64)
self.conv8 = nn.Conv2d(64, 64, kernel_size=3,stride=1, padding=0 )
self.conv8_bn = nn.BatchNorm2d(64)
self.conv8_drop = nn.Dropout2d()
self.conv9 = nn.Conv2d(64, 64, kernel_size=4,stride=1, padding=2 )
self.conv9_bn = nn.BatchNorm2d(64)
self.conv9_drop = nn.Dropout2d()
self.fc1 = nn.Linear(64 * 5 * 5, H)
self.fc2 = nn.Linear(H, H)
self.fc3 = nn.Linear(H, 10)
def forward(self, x):
#Here is where the network is specified.
x = F.relu(self.conv1_bn(self.conv1(x)))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size=2,stride=2)
x = self.conv2_drop(x)
x = F.relu(self.conv3_bn(self.conv3(x)))
x = F.relu(self.conv4(x))
x = F.max_pool2d(x, kernel_size=2,stride=2)
# x = self.conv4_drop(x)
x = F.relu(self.conv5_bn(self.conv5(x)))
x = F.relu(self.conv6(x))
# x = self.conv6_drop(x)
x = F.relu(self.conv7_bn(self.conv7(x)))
x = F.relu(self.conv8_bn(self.conv8(x)))
# x = self.conv8_drop(x)
x = F.relu(self.conv9_bn(self.conv9(x)))
# x = self.conv9_drop(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
model = CIFAR10Model()
model.cuda()
# In[5]:
#Stochastic gradient descent optimizer
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
num_epochs = 1
model.train()
train_loss = []
# In[6]:
#Train Model
for epoch in range(num_epochs):
train_accu = []
for images, labels in train_loader:
data, target = Variable(images).cuda(), Variable(labels).cuda()
#PyTorch "accumulates gradients", so we need to set the stored gradients to zero when there’s a new batch of data.
optimizer.zero_grad()
#Forward propagation of the model, i.e. calculate the hidden units and the output.
output = model(data)
#The objective function is the negative log-likelihood function.
loss = F.nll_loss(output, target)
#This calculates the gradients (via backpropagation)
loss.backward()
train_loss.append(loss.data[0])
#The parameters for the model are updated using stochastic gradient descent.
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
if('step' in state and state['step']>=1024):
state['step'] = 1000
optimizer.step()
#Calculate accuracy on the training set.
prediction = output.data.max(1)[1] # first column has actual prob.
accuracy = ( float( prediction.eq(target.data).sum() ) /float(batch_size))*100.0
train_accu.append(accuracy)
accuracy_epoch = np.mean(train_accu)
print(epoch, accuracy_epoch)
# # Save and load the entire model.
# torch.save(model, 'model.ckpt')
# model = torch.load('model.ckpt')
# In[ ]:
# Download and construct CIFAR-10 dataset.
test_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=False,
transform=transforms.ToTensor(),
download=False)
# Data loader (this provides queues and threads in a very simple way).
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# When iteration starts, queue and thread start to load data from files.
data_iter = iter(test_loader)
# Mini-batch images and labels.
images, labels = data_iter.next()
# # In[ ]:
#Calculate accuracy of trained model on the Test Set
model.eval()
test_accu = []
for images, labels in test_loader:
data, target = Variable(images).cuda(), Variable(labels).cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
prediction = output.data.max(1)[1] # first column has actual prob.
accuracy = ( float( prediction.eq(target.data).sum() ) /float(batch_size))*100.0
test_accu.append(accuracy)
accuracy_test = np.mean(test_accu)
print(accuracy_test)
# # In[51]:
# #Calculate accuracy of trained model on the Test Set
# # model.eval()
output = torch.zeros((50,50,10))
prediction = torch.zeros((50,1))
accuracy = torch.zeros((50,1))
test_accu = []
for images, labels in test_loader:
data, target = Variable(images).cuda(), Variable(labels).cuda()
# optimizer.zero_grad()
output[0,:,:] = model(data).data
for i in range(1,50):
output[i,:,:] = output[i-1,:,:] + model(data).data
for i in range(50):
output[i,:,:] = output[i,:,:] / (i+1)
# prediction[i] = output[i,:,:].data.max(1)[1] # first column has actual prob
import pdb; pdb.set_trace()
prediction[i] = torch.max(output[i,:,:],1)
accuracy[i] = ( float( prediction[i].eq(target.data).sum() ) /float(batch_size))*100.0
test_accu.append(accuracy)
test_accu = np.asarray(test_accu).reshape((10000/50,50))
accuracy_test = np.mean(test_accu, axis = 0)
print(accuracy_test)
| 2.859375 | 3 |
alipay/aop/api/domain/CollectReceiptOpenApiDTO.py | antopen/alipay-sdk-python-all | 213 | 12797562 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class CollectReceiptOpenApiDTO(object):
def __init__(self):
self._bsn_no = None
self._bsn_ref_no = None
self._business_scene = None
self._channel = None
self._channel_log_no = None
self._channel_memo = None
self._collect_amt = None
self._collect_date = None
self._collect_status = None
self._collected_amt = None
self._creator = None
self._freeze_amt = None
self._fund_log_id = None
self._gl_exchange_rate = None
self._gmt_create = None
self._gmt_modified = None
self._payee_account_name = None
self._payee_account_no = None
self._payee_inst_id = None
self._payee_ip_role_id = None
self._payer_account_name = None
self._payer_account_no = None
self._payer_bank_branch_name = None
self._payer_inst_id = None
self._payer_ip_role_id = None
self._receipt_no = None
self._ref_trans_no = None
self._ref_trans_no_type = None
self._source = None
self._status = None
self._tnt_inst_id = None
self._used_amt = None
self._writeoff_relative_id = None
@property
def bsn_no(self):
return self._bsn_no
@bsn_no.setter
def bsn_no(self, value):
self._bsn_no = value
@property
def bsn_ref_no(self):
return self._bsn_ref_no
@bsn_ref_no.setter
def bsn_ref_no(self, value):
self._bsn_ref_no = value
@property
def business_scene(self):
return self._business_scene
@business_scene.setter
def business_scene(self, value):
self._business_scene = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def channel_log_no(self):
return self._channel_log_no
@channel_log_no.setter
def channel_log_no(self, value):
self._channel_log_no = value
@property
def channel_memo(self):
return self._channel_memo
@channel_memo.setter
def channel_memo(self, value):
self._channel_memo = value
@property
def collect_amt(self):
return self._collect_amt
@collect_amt.setter
def collect_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._collect_amt = value
else:
self._collect_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def collect_date(self):
return self._collect_date
@collect_date.setter
def collect_date(self, value):
self._collect_date = value
@property
def collect_status(self):
return self._collect_status
@collect_status.setter
def collect_status(self, value):
self._collect_status = value
@property
def collected_amt(self):
return self._collected_amt
@collected_amt.setter
def collected_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._collected_amt = value
else:
self._collected_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def freeze_amt(self):
return self._freeze_amt
@freeze_amt.setter
def freeze_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._freeze_amt = value
else:
self._freeze_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def fund_log_id(self):
return self._fund_log_id
@fund_log_id.setter
def fund_log_id(self, value):
self._fund_log_id = value
@property
def gl_exchange_rate(self):
return self._gl_exchange_rate
@gl_exchange_rate.setter
def gl_exchange_rate(self, value):
self._gl_exchange_rate = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def payee_account_name(self):
return self._payee_account_name
@payee_account_name.setter
def payee_account_name(self, value):
self._payee_account_name = value
@property
def payee_account_no(self):
return self._payee_account_no
@payee_account_no.setter
def payee_account_no(self, value):
self._payee_account_no = value
@property
def payee_inst_id(self):
return self._payee_inst_id
@payee_inst_id.setter
def payee_inst_id(self, value):
self._payee_inst_id = value
@property
def payee_ip_role_id(self):
return self._payee_ip_role_id
@payee_ip_role_id.setter
def payee_ip_role_id(self, value):
self._payee_ip_role_id = value
@property
def payer_account_name(self):
return self._payer_account_name
@payer_account_name.setter
def payer_account_name(self, value):
self._payer_account_name = value
@property
def payer_account_no(self):
return self._payer_account_no
@payer_account_no.setter
def payer_account_no(self, value):
self._payer_account_no = value
@property
def payer_bank_branch_name(self):
return self._payer_bank_branch_name
@payer_bank_branch_name.setter
def payer_bank_branch_name(self, value):
self._payer_bank_branch_name = value
@property
def payer_inst_id(self):
return self._payer_inst_id
@payer_inst_id.setter
def payer_inst_id(self, value):
self._payer_inst_id = value
@property
def payer_ip_role_id(self):
return self._payer_ip_role_id
@payer_ip_role_id.setter
def payer_ip_role_id(self, value):
self._payer_ip_role_id = value
@property
def receipt_no(self):
return self._receipt_no
@receipt_no.setter
def receipt_no(self, value):
self._receipt_no = value
@property
def ref_trans_no(self):
return self._ref_trans_no
@ref_trans_no.setter
def ref_trans_no(self, value):
self._ref_trans_no = value
@property
def ref_trans_no_type(self):
return self._ref_trans_no_type
@ref_trans_no_type.setter
def ref_trans_no_type(self, value):
self._ref_trans_no_type = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def used_amt(self):
return self._used_amt
@used_amt.setter
def used_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._used_amt = value
else:
self._used_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def writeoff_relative_id(self):
return self._writeoff_relative_id
@writeoff_relative_id.setter
def writeoff_relative_id(self, value):
self._writeoff_relative_id = value
def to_alipay_dict(self):
params = dict()
if self.bsn_no:
if hasattr(self.bsn_no, 'to_alipay_dict'):
params['bsn_no'] = self.bsn_no.to_alipay_dict()
else:
params['bsn_no'] = self.bsn_no
if self.bsn_ref_no:
if hasattr(self.bsn_ref_no, 'to_alipay_dict'):
params['bsn_ref_no'] = self.bsn_ref_no.to_alipay_dict()
else:
params['bsn_ref_no'] = self.bsn_ref_no
if self.business_scene:
if hasattr(self.business_scene, 'to_alipay_dict'):
params['business_scene'] = self.business_scene.to_alipay_dict()
else:
params['business_scene'] = self.business_scene
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.channel_log_no:
if hasattr(self.channel_log_no, 'to_alipay_dict'):
params['channel_log_no'] = self.channel_log_no.to_alipay_dict()
else:
params['channel_log_no'] = self.channel_log_no
if self.channel_memo:
if hasattr(self.channel_memo, 'to_alipay_dict'):
params['channel_memo'] = self.channel_memo.to_alipay_dict()
else:
params['channel_memo'] = self.channel_memo
if self.collect_amt:
if hasattr(self.collect_amt, 'to_alipay_dict'):
params['collect_amt'] = self.collect_amt.to_alipay_dict()
else:
params['collect_amt'] = self.collect_amt
if self.collect_date:
if hasattr(self.collect_date, 'to_alipay_dict'):
params['collect_date'] = self.collect_date.to_alipay_dict()
else:
params['collect_date'] = self.collect_date
if self.collect_status:
if hasattr(self.collect_status, 'to_alipay_dict'):
params['collect_status'] = self.collect_status.to_alipay_dict()
else:
params['collect_status'] = self.collect_status
if self.collected_amt:
if hasattr(self.collected_amt, 'to_alipay_dict'):
params['collected_amt'] = self.collected_amt.to_alipay_dict()
else:
params['collected_amt'] = self.collected_amt
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.freeze_amt:
if hasattr(self.freeze_amt, 'to_alipay_dict'):
params['freeze_amt'] = self.freeze_amt.to_alipay_dict()
else:
params['freeze_amt'] = self.freeze_amt
if self.fund_log_id:
if hasattr(self.fund_log_id, 'to_alipay_dict'):
params['fund_log_id'] = self.fund_log_id.to_alipay_dict()
else:
params['fund_log_id'] = self.fund_log_id
if self.gl_exchange_rate:
if hasattr(self.gl_exchange_rate, 'to_alipay_dict'):
params['gl_exchange_rate'] = self.gl_exchange_rate.to_alipay_dict()
else:
params['gl_exchange_rate'] = self.gl_exchange_rate
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.payee_account_name:
if hasattr(self.payee_account_name, 'to_alipay_dict'):
params['payee_account_name'] = self.payee_account_name.to_alipay_dict()
else:
params['payee_account_name'] = self.payee_account_name
if self.payee_account_no:
if hasattr(self.payee_account_no, 'to_alipay_dict'):
params['payee_account_no'] = self.payee_account_no.to_alipay_dict()
else:
params['payee_account_no'] = self.payee_account_no
if self.payee_inst_id:
if hasattr(self.payee_inst_id, 'to_alipay_dict'):
params['payee_inst_id'] = self.payee_inst_id.to_alipay_dict()
else:
params['payee_inst_id'] = self.payee_inst_id
if self.payee_ip_role_id:
if hasattr(self.payee_ip_role_id, 'to_alipay_dict'):
params['payee_ip_role_id'] = self.payee_ip_role_id.to_alipay_dict()
else:
params['payee_ip_role_id'] = self.payee_ip_role_id
if self.payer_account_name:
if hasattr(self.payer_account_name, 'to_alipay_dict'):
params['payer_account_name'] = self.payer_account_name.to_alipay_dict()
else:
params['payer_account_name'] = self.payer_account_name
if self.payer_account_no:
if hasattr(self.payer_account_no, 'to_alipay_dict'):
params['payer_account_no'] = self.payer_account_no.to_alipay_dict()
else:
params['payer_account_no'] = self.payer_account_no
if self.payer_bank_branch_name:
if hasattr(self.payer_bank_branch_name, 'to_alipay_dict'):
params['payer_bank_branch_name'] = self.payer_bank_branch_name.to_alipay_dict()
else:
params['payer_bank_branch_name'] = self.payer_bank_branch_name
if self.payer_inst_id:
if hasattr(self.payer_inst_id, 'to_alipay_dict'):
params['payer_inst_id'] = self.payer_inst_id.to_alipay_dict()
else:
params['payer_inst_id'] = self.payer_inst_id
if self.payer_ip_role_id:
if hasattr(self.payer_ip_role_id, 'to_alipay_dict'):
params['payer_ip_role_id'] = self.payer_ip_role_id.to_alipay_dict()
else:
params['payer_ip_role_id'] = self.payer_ip_role_id
if self.receipt_no:
if hasattr(self.receipt_no, 'to_alipay_dict'):
params['receipt_no'] = self.receipt_no.to_alipay_dict()
else:
params['receipt_no'] = self.receipt_no
if self.ref_trans_no:
if hasattr(self.ref_trans_no, 'to_alipay_dict'):
params['ref_trans_no'] = self.ref_trans_no.to_alipay_dict()
else:
params['ref_trans_no'] = self.ref_trans_no
if self.ref_trans_no_type:
if hasattr(self.ref_trans_no_type, 'to_alipay_dict'):
params['ref_trans_no_type'] = self.ref_trans_no_type.to_alipay_dict()
else:
params['ref_trans_no_type'] = self.ref_trans_no_type
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.used_amt:
if hasattr(self.used_amt, 'to_alipay_dict'):
params['used_amt'] = self.used_amt.to_alipay_dict()
else:
params['used_amt'] = self.used_amt
if self.writeoff_relative_id:
if hasattr(self.writeoff_relative_id, 'to_alipay_dict'):
params['writeoff_relative_id'] = self.writeoff_relative_id.to_alipay_dict()
else:
params['writeoff_relative_id'] = self.writeoff_relative_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CollectReceiptOpenApiDTO()
if 'bsn_no' in d:
o.bsn_no = d['bsn_no']
if 'bsn_ref_no' in d:
o.bsn_ref_no = d['bsn_ref_no']
if 'business_scene' in d:
o.business_scene = d['business_scene']
if 'channel' in d:
o.channel = d['channel']
if 'channel_log_no' in d:
o.channel_log_no = d['channel_log_no']
if 'channel_memo' in d:
o.channel_memo = d['channel_memo']
if 'collect_amt' in d:
o.collect_amt = d['collect_amt']
if 'collect_date' in d:
o.collect_date = d['collect_date']
if 'collect_status' in d:
o.collect_status = d['collect_status']
if 'collected_amt' in d:
o.collected_amt = d['collected_amt']
if 'creator' in d:
o.creator = d['creator']
if 'freeze_amt' in d:
o.freeze_amt = d['freeze_amt']
if 'fund_log_id' in d:
o.fund_log_id = d['fund_log_id']
if 'gl_exchange_rate' in d:
o.gl_exchange_rate = d['gl_exchange_rate']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'payee_account_name' in d:
o.payee_account_name = d['payee_account_name']
if 'payee_account_no' in d:
o.payee_account_no = d['payee_account_no']
if 'payee_inst_id' in d:
o.payee_inst_id = d['payee_inst_id']
if 'payee_ip_role_id' in d:
o.payee_ip_role_id = d['payee_ip_role_id']
if 'payer_account_name' in d:
o.payer_account_name = d['payer_account_name']
if 'payer_account_no' in d:
o.payer_account_no = d['payer_account_no']
if 'payer_bank_branch_name' in d:
o.payer_bank_branch_name = d['payer_bank_branch_name']
if 'payer_inst_id' in d:
o.payer_inst_id = d['payer_inst_id']
if 'payer_ip_role_id' in d:
o.payer_ip_role_id = d['payer_ip_role_id']
if 'receipt_no' in d:
o.receipt_no = d['receipt_no']
if 'ref_trans_no' in d:
o.ref_trans_no = d['ref_trans_no']
if 'ref_trans_no_type' in d:
o.ref_trans_no_type = d['ref_trans_no_type']
if 'source' in d:
o.source = d['source']
if 'status' in d:
o.status = d['status']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'used_amt' in d:
o.used_amt = d['used_amt']
if 'writeoff_relative_id' in d:
o.writeoff_relative_id = d['writeoff_relative_id']
return o
| 1.617188 | 2 |
Web/EnterthePolygon/exif/front/migrations/0003_auto_20190318_2200.py | HackUCF/SunshineCTF-2019-Public | 9 | 12797563 | # Generated by Django 2.1.4 on 2019-03-18 22:00
from django.db import migrations, models
import front.models
class Migration(migrations.Migration):
dependencies = [
('front', '0002_images_filename'),
]
operations = [
migrations.AlterField(
model_name='images',
name='ifile',
field=models.ImageField(unique=True, upload_to=front.models.upld_dir),
),
]
| 1.617188 | 2 |
util/summary_func.py | demetoir/ALLGANS | 11 | 12797564 | """tensorflow summary util"""
import tensorflow as tf
def mean_summary(var):
"""mean scalar summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
def stddev_summary(var):
"""stddev scalar summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
mean = tf.reduce_mean(var)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("stddev", stddev)
def histogram_summary(var):
"""histogram summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.histogram('histogram', var)
def max_summary(var):
"""max scalar summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.scalar("max", tf.reduce_max(var))
def min_summary(var):
"""min summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.scalar("min", tf.reduce_min(var))
def summary_loss(var):
"""loss summary
loss's scalar and histogram summary
:type var: tensorflow.Variable
:param var: variable to summary
"""
with tf.name_scope(var.name.split(":")[0]):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
tf.summary.histogram('histogram', var)
def summary_image(var, max_outputs=0):
"""image summary
:type var: tensorflow.Variable
:type max_outputs: int
:param var: variable to summary
:param max_outputs: max output to summary image
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.image("image", var, max_outputs=max_outputs)
| 3.078125 | 3 |
date-and-time/src/datetime-methods.py | giserh/book-python | 1 | 12797565 | from datetime import datetime
armstrong = datetime(1969, 7, 21, 14, 56, 15)
armstrong.date() # datetime.date(1969, 7, 21)
armstrong.time() # datetime.time(14, 56, 15)
armstrong.weekday() # 0 # in US week starts with Sunday
| 3.078125 | 3 |
phantasy/library/parser/polarity.py | phantasy-project/phantasy | 0 | 12797566 | # -*- coding: utf-8 -*-
"""Read polarity.
"""
import csv
def readfile(filepath, fmt='csv'):
with open(filepath, 'r') as f:
data = csv.reader(f, delimiter=',', skipinitialspace=True)
next(data)
r = {i[0]: int(i[1]) for i in data if not i[0].startswith("#")}
return r
| 3.609375 | 4 |
elrosproject/jedis/admin.py | prophet322/elrostest | 0 | 12797567 | from django.contrib import admin
from .models import Planet, Jedi, Tests, Questions
# Register your models here.
admin.site.register(Planet)
admin.site.register(Jedi)
class TestsInline(admin.StackedInline):
model = Questions
extra = 0
@admin.register(Tests)
class QuestionsAdmin(admin.ModelAdmin):
inlines = [
TestsInline,
]
| 1.8125 | 2 |
Super Ugly Number.py | quake0day/oj | 0 | 12797568 | class Solution(object):
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
res = [1]
hashmap = {val:0 for val in primes}
m = [float('inf')] * len(primes)
while len(res) < n:
newm = [res[hashmap[p]] * p for p in primes]
mn = min(newm)
hashmap[primes[newm.index(mn)]] += 1
if mn not in res:
res.append(mn)
else:
continue
return res[-1]
a = Solution()
print a.nthSuperUglyNumber(12, [2, 7, 13, 19]) | 3.359375 | 3 |
pandasCSVread.py | apalom/Clustering_NHTS_Driving_Patterns | 1 | 12797569 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 18 22:57:53 2018
@author: <NAME>
"""
# import libraries
import pandas as pd
import timeit
# initialize values
start_time = timeit.default_timer()
<<<<<<< HEAD
# Import NHTS2009 Data
df0 = pd.read_csv(r'C:\Users\avi_b\Box\CS6140 Project\Data\CSV\DAYV2PUB.CSV', header=0)
=======
# NHTS2009 Data Location for Alex's Laptop
#df0 = pd.read_csv(r'C:\Users\avi_b\Box\CS6140 Project\Data\CSV\DAYV2PUB.CSV', header=0)
# NHTS2009 Data Location for Alex's Lab Computer
df0 = pd.read_csv(r'C:\Users\Alex\Documents\NHTS_2017\trippub.CSV', header=0)
#df0 = pd.read_csv(r'C:\Users\<NAME>\Documents\NHTS_2017\trippub.CSV', header=0)
>>>>>>> Alex1
# filter dataframe zero (raw NHTS2009) to columns listed in filter
df1 = df0.filter(['TDCASEID','TRAVDAY','STRTTIME','DWELTIME','ENDTIME','TRIPPURP',
'WHYFROM','WHYTO','WHYTRP1S','WHYTRP90','WHODROVE',
'CENSUS_D','CENSUS_R','DRIVER','AWAYHOME','FRSTHM','TDTRPNUM',
'TDWKND','TRPACCMP','TRPHHACC','TRVLCMIN','TRVL_MIN','TRWAITTM',
'VEHTYPE','VEHYEAR','VMT_MILE','HHFAMINC','HHSIZE','HHSTATE','HOMEOWN',
'NUMADLT','NUMONTRIP','PRMACT','PAYPROF','PROXY','PRMACT','R_AGE','R_SEX'], axis=1)
# function call to attribute why descriptions with why codes
from funcWhyID import funcWhyID
[df1, whyID, whyIDsum] = funcWhyID(df1, whyID, whyIDsum)
whyIDsumList = set(df1['whyDescSmry'])
# build out dataframe table
colNames0 = list(df0) # shows all column headers
colNames1 = list(df1) # shows all column headers
firstNrows0 = df0.head(25) # shows first n rows
firstNrows1 = df1.head(25) # shows first n rows
lastNrows0 = df0.tail(5) # shows last n rows
lastNrows1 = df1.tail(5) # shows last n rows
df0['TRIPPURP'].describe()
# print data shapes (rows x columns)
print('Dataframe Raw Shape:', df0.shape)
print('Dataframe Filtered Shape:', df1.shape)
elapsed = timeit.default_timer() - start_time
# timeit statement
print('Execution time: {0:.4f} sec'.format(elapsed))
# %% plotting section
# plots histogram
#plotHistSmry = df1['WHYTRP1S'].hist(bins=25)
#plotPieSmry = plt.pie(df1['WHYTRP1S'])
#plotPieSmry = plt.pie(df1['WHYTRP1S'], labels=whyIDsumList, autopct='%1.0f%%)
#plt.plot("whyDescSmry",type="bar")
#df1["WHYFROM"].plot(kind="bar")
#first5rows1['whyDescSmry'].hist() | 2.75 | 3 |
api/bittrex.py | arafuse/CryptoWatcher | 1 | 12797570 | <reponame>arafuse/CryptoWatcher<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Bittrex API module.
"""
__author__ = '<NAME> <$(echo nqnz.enshfr#tznvy.pbz | tr a-z# n-za-m@)>'
__version__ = "0.2.0"
__all__ = ['Client']
import hmac
import json
import time
import asyncio
import hashlib
import traceback
from datetime import datetime, timezone
from typing import Any, Dict, List, Sequence, Tuple
import api
import utils
import common
import configuration
import aiohttp
config = configuration.config
"""
Global configuration.
"""
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
API_URL = 'https://bittrex.com/api/{}?{}'
API_METHODS = {
'getMarketSummaries': {
'path': 'v2.0/pub/markets/getMarketSummaries',
'params': '',
'auth': False
},
'getMarketSummariesV1': {
'path': 'v1.1/public/getMarketSummaries',
'params': '',
'auth': False
},
'getTicks': {
'path': 'v2.0/pub/market/getTicks',
'params': 'marketName={}&tickInterval={}',
'auth': False
},
'getLatestTick': {
'path': 'v2.0/pub/market/getLatestTick',
'params': 'marketName={}&tickInterval={}',
'auth': False
},
'getTicker': {
'path': 'v1.1/public/getticker',
'params': 'market={}',
'auth': False
},
'buyLimit': {
'path': 'v1.1/market/buylimit',
'params': 'market={}&quantity={}&rate={}',
'auth': True
},
'sellLimit': {
'path': 'v1.1/market/selllimit',
'params': 'market={}&quantity={}&rate={}',
'auth': True
},
'cancelOrder': {
'path': 'v1.1/market/cancel',
'params': 'uuid={}',
'auth': True
},
'getOrder': {
'path': 'v1.1/account/getorder',
'params': 'uuid={}',
'auth': True
},
'getBalance': {
'path': 'v1.1/account/getbalance',
'params': 'currency={}',
'auth': True
},
}
class Client(api.Client):
"""
Client for interacting with the Bittrex API.
"""
def __init__(self, session: aiohttp.ClientSession, log=utils.logging.DummyLogger()):
self.session = session
"""
Object HTTP client session.
"""
self.log = utils.logging.ChildLogger(parent=log, scope=self)
"""
Object logger.
"""
self.lock = asyncio.Lock()
"""
Lock used for syncing access to API data.
"""
self.cache = {
'balance': {}
}
"""
Response cache.
"""
self.tick_interval_str: str
"""
String representation of the configured tick interval.
"""
if config['tick_interval_secs'] == 60:
self.tick_interval_str = 'oneMin'
elif config['tick_interval_secs'] == 300:
self.tick_interval_str = 'fiveMin'
else:
raise ValueError("Unsupported tick interval: {}".format(config['tick_interval_secs']))
async def call(self, method: str, params: Sequence[Any]=None):
"""
Call a Bittrex API method.
Implements retry and exponentional backoff for HTTP level error conditions.
Arguments:
method: Name of the API method to call.
params: Values of query parameters to pass to the method.
Returns:
(tuple): A tuple containing:
data (str): The raw HTTP response body (may be None).
status (int): The HTTP response status code. A value of 0 indicates a connection or transport failure.
"""
retry = False
attempt = 0
status = 0
data = None
url, headers = await self._get_request_data(method, params)
while attempt < config['http_max_retries']:
try:
async with self.session.get(url, headers=headers) as response:
status = response.status
if status >= 200 and status <= 399:
data = await response.text()
break
if (status >= 500 and status <= 599 and status != 504) or (status in [0, 408, 429]):
retry_reason = 'status {}'.format(status)
retry = True
else:
self.log.error('Got non-retryable status {}.', status)
data = await response.text()
break
except (aiohttp.ClientConnectionError, aiohttp.ClientPayloadError, asyncio.TimeoutError) as e:
retry_reason = '{}: {}'.format(type(e).__name__, e)
retry = True
if retry:
attempt += 1
await common.backoff(attempt, "Bittrex call {}".format(method), retry_reason)
retry = False
return (data, status)
@staticmethod
async def _get_request_data(method: str, params: Sequence[Any]=None):
"""
Get the request URL and headers for a given API method and parameter list.
Forms the full URL with query string and calculates any needed HMAC signature to be passed in headers.
Arguments:
method: Name of the API method to call.
params: Values of query parameters to pass to the method.
Returns:
(tuple): A tuple containing:
(str): Full URL for the request.
(dict): Dictionary of headers for the request, or None if no headers are required.
"""
query = API_METHODS[method]['params'].format(*params or [])
if API_METHODS[method]['auth']:
nonce = int(time.time() * 1000)
api_key = config['bittrex_api_key']
api_secret = config['bittrex_api_secret']
query = 'apikey={}&nonce={}&'.format(api_key, nonce) + query
url = API_URL.format(API_METHODS[method]['path'], query)
signature = hmac.new(api_secret.encode(), url.encode(), hashlib.sha512).hexdigest()
headers = {'apisign': signature}
else:
url = API_URL.format(API_METHODS[method]['path'], query)
headers = None
return (url, headers)
async def call_json(self, method: str, params: list=None):
"""
Call a Bittrex API method and parse JSON response.
Implements retry and exponential backoff for higher-level API error conditions on a 200 response, specifically
empty response body, malformed response body (invalid JSON), or missing 'success' value.
Arguments:
method: Name of the API method to call.
params: Values of query parameters to pass to the method.
Returns:
(tuple): A tuple containing:
data (object): On success, a dict containing the parsed JSON response.
On a non-200 response, the raw response body (may be None).
On a response with a missing response body, None.
status (int): The HTTP response status code. A value of 0 indicates a connection or transport failure.
"""
retry = False
attempt = 0
status = 0
data = None
while attempt < config['http_max_retries']:
raw_data, status = await self.call(method, params)
if status != 200:
return (raw_data, status)
if raw_data is None:
retry_reason = "'None' on successful response"
retry = True
if not retry:
try:
data = json.loads(raw_data)
_ = data['success']
return (data, status)
except json.JSONDecodeError:
retry_reason = 'invalid JSON response'
except KeyError:
retry_reason = "missing 'success' value"
retry = True
if retry:
attempt += 1
await common.backoff(attempt, "Bittrex call_json {}".format(method), retry_reason)
retry = False
return (data, status)
async def call_extract(self, extract: Sequence[str], method: str, params: Sequence[Any]=None,
retry_data=False, retry_fail=False, log=False):
"""
Call a Bittrex API method and extract data items from its JSON response.
Implements retry and exponential backoff for invalid data items. Caution must be taken to ensure that the
specified extract dict keys are correct to avoid repeating of non-idempotent operations (such as buying or
selling) so should always be tested with retry=False (the default) first.
Arguments:
extract: A list of strings representing the dictionary paths of the response data items to extract,
eg. ["['result'][0]['C']", "['result'][0]['T']"]
method: Name of the API method to call.
params: Values of query parameters to pass to the method.
retry_data: If True, will perform backoff and retry on empty or missing data items. Syntax errors in
extract paths will not be retried.
retry_fail: If True, will perform backoff and retry on explicit failure response from the API.
log: If True, will log the API JSON response. This is optional as some responses can be quite
large.
Returns:
(tuple): A tuple containing:
data (object): On a normal 200 response, a tuple containing the values for each extracted item. Any items
that failed to be extracted after exhausting all retries, or had syntax errors in extract
paths will be set to None.
On a non-200 response, the raw response body (may be None).
On a 200 response with a missing response body, None.
status (int): The HTTP response status code. A value of 0 indicates a connection or transport failure.
Raises:
SyntaxError, NameError: If one or more of the passed extract dict paths contains invalid syntax.
"""
retry = False
attempt = 0
while attempt <= config['api_max_retries']:
data, status = await self.call_json(method, params)
if status != 200 or data is None:
self.log.error("Failed on API method '{}({})': status {}, data {}", method, params, status, data)
return (data, status)
if log:
self.log.debug("API method '{}({})' response:\n{}", method, params, json.dumps(data, indent=2))
if not data['success'] and retry_fail:
retry = True
try:
reason = data['message'] if data['message'] != '' else "success == false (blank message)"
except KeyError:
reason = "success == false (missing message)"
if not retry:
results, ex = await self._extract_items(extract, data)
retry, reason = await self._handle_extract_exception(ex, data, retry_data)
if retry:
attempt += 1
await common.backoff(attempt, "Bittrex call_extract {}".format(method), reason)
retry = False
else:
break
if reason is not None:
self.log.error("Giving up on: {}", reason)
return (tuple(results), status)
@staticmethod
async def _extract_items(extract: Sequence[str], data: Dict[str, Any]):
"""
Extract items from a dictionary of data.
Arguments:
extract: List of strings representing the dictionary paths of the response data items to extract,
eg. ["['result'][0]['C']", "['result'][0]['T']"]
data: Dictionary of data to extract items from.
Returns:
(tuple): A tuple containing:
list: Result of each extracted path, or None if a syntax or or extraction error occurred.
Exception: The last exception that occurred during extraction, or None if no exception occurred.
"""
ex = None
results = []
for item in extract:
try:
expr = 'lambda d: d' + item
expr_func = eval(expr) # pylint: disable=W0123
results.append(expr_func(data))
except (TypeError, IndexError, KeyError, SyntaxError, NameError) as e:
ex = e
results.append(None)
return (results, ex)
@staticmethod
async def _handle_extract_exception(ex: Exception, data: Dict[str, Any], retry_data: bool):
"""
Handle any exception produced from an extract operation.
Arguments:
ex: Exception returned from :meth:`_extract_items`.
data: Dictionary of data passed to :meth:`_extract_items`.
retry_data: True if missing data should be retried, false otherwise.
Returns:
(tuple): A tuple containing:
(bool): True if the exception warrants a retry, False if no error or and unretryable error occurred.
(str): Sentence fragment or formatted traceback describing the reason for retry or error, or None
if no issue occurred.
"""
if isinstance(ex, (TypeError, IndexError, KeyError)):
reason = await Client._get_extract_failure_reason(ex, data)
if retry_data and data['success']:
retry = True
else:
retry = False
elif isinstance(ex, (SyntaxError, NameError)):
reason = "{}: {}\n{}".format(type(ex).__name__, ex, ''.join(traceback.format_tb(ex.__traceback__)))
retry = False
elif ex is not None:
reason = await Client._get_extract_failure_reason(ex, data)
retry = False
else:
reason = None
retry = False
return (retry, reason)
@staticmethod
async def _get_extract_failure_reason(ex: Exception, data: Dict[str, Any]):
"""
Get the failure reason from the given extraction exception and API response message (if present).
Arguments:
data: Dict of the parsed API response.
ex: Exception thrown as a result of the extraction attempt.
"""
if 'message' in data and data['message'] and data['message'] != '':
api_message = data['message']
else:
api_message = 'empty or missing results'
return "{} ({}: {})".format(api_message, type(ex).__name__, ex)
async def get_market_summaries(self) -> List[Dict[str, Any]]:
"""
Get the market summaries from the Bittrex API.
Returns:
The market summaries dict.
"""
results, status, = await self.call_extract([
"['result']",
"['result'][0]['Market']['BaseCurrency']", # To retry on any missing fields
"['result'][0]['Market']['MinTradeSize']",
"['result'][0]['Market']['IsActive']",
"['result'][0]['Market']['Notice']",
"['result'][0]['Summary']['MarketName']",
"['result'][0]['Summary']['BaseVolume']",
"['result'][0]['Summary']['PrevDay']",
"['result'][0]['Summary']['Last']",
], 'getMarketSummaries', retry_data=True, retry_fail=True)
if status != 200 or results is None or results[0] is None:
self.log.error("Failed getting market summaries: status {}, results {}.", status, results)
return None
summaries = {}
for summary in results[0]:
pair = summary['Summary']['MarketName']
active = summary['Market']['IsActive']
notice = summary['Market']['Notice']
last = summary['Summary']['Last']
prev_day = summary['Summary']['PrevDay']
if not prev_day: prev_day = last
if notice:
self.log.info("{} NOTICE: {}", pair, notice)
if 'will be removed' in notice or 'will be delisted' in notice or 'scheduled for delisting' in notice:
self.log.info("{} marked as inactive due to pending removal.", pair)
active = False
summaries[pair] = {
'active': active,
'baseCurrency': summary['Market']['BaseCurrency'],
'minTradeQty': summary['Market']['MinTradeSize'],
'minTradeSize': 0.0,
'minTradeValue': 0.0,
'baseVolume': summary['Summary']['BaseVolume'],
'prevDay': prev_day,
'last': last,
}
return summaries
async def get_ticks(self, pair: str, length: int=None) -> List[Dict[str, Any]]:
"""
Get ticks (closing values and closing times) for a pair from the Bittrex API.
Arguments:
pair: The currency pair eg. 'BTC-ETH'.
length: Not supported by the API, will always return all ticks.
Returns:
A list of the raw tick data from the API, or None if an error occurred or no ticks are available.
"""
params = [pair, self.tick_interval_str]
results, status, = await self.call_extract([
"['result']",
"['result'][0]['C']", # To retry if not at least one element exists
"['result'][0]['T']"
], 'getTicks', params=params, retry_data=True, retry_fail=True)
if status != 200 or results is None or results[0] is None:
self.log.error("Failed getting ticks: params {}, status {}, results {}.", params, status, results)
return None
for tick in results[0]:
close_datetime = datetime.strptime(tick['T'], TIME_FORMAT)
tick['T'] = close_datetime.replace(tzinfo=timezone.utc).timestamp()
return results[0]
async def get_tick_range(self, pair: str, start_time: float, end_time: float) -> List[Dict[str, Any]]:
"""
Get a range of ticks (closing values and closing times) for a pair from the Bittrex API.
"""
raise NotImplementedError("Tick range not supported by the Bittrex API.")
async def get_last_values(self, pair: str) -> Tuple[float, float]:
"""
Get the last price and 24-hour volume for a currency pair from the API.
Arguments:
pair: Currency pair name eg. 'BTC-ETH'
Returns:
(tuple): A tuple containing:
float: The current close price, or None if an error occurred.
float: The current 24 hour volume, or None if an error occurred.
"""
market_summaries = await self._get_market_summaries_v1()
if market_summaries is None:
return None
return (market_summaries[pair]['Last'], market_summaries[pair]['BaseVolume'])
async def buy_limit(self, pair: str, quantity: float, value: float):
"""
"""
params = [pair, quantity, value]
results, status = await self.call_extract([
"['result']['uuid']",
], 'buyLimit', params=params, log=True)
if status != 200 or results is None or results[0] is None:
self.log.error("Failed executing buy order request: params {}, status {}, results {}.",
params, status, results)
return None
return results[0]
async def sell_limit(self, pair: str, quantity: float, value: float):
"""
"""
params = [pair, quantity, value]
results, status = await self.call_extract([
"['result']['uuid']",
], 'sellLimit', params=params, log=True, retry_data=True)
if status != 200 or results is None or results[0] is None:
self.log.error("Failed executing sell order request: params {}, status {}, results {}.",
params, status, results)
return None
return results[0]
async def get_order(self, pair: str, order_id: str):
"""
"""
params = [order_id]
results, status = await self.call_extract([
"['success']",
"['result']['IsOpen']",
"['result']['Quantity']",
"['result']['QuantityRemaining']",
"['result']['PricePerUnit']",
"['result']['CommissionPaid']",
], 'getOrder', params=params, log=True, retry_data=True)
if status != 200 or results is None or not results[0]:
self.log.error("Failed getting order: params{}, status {}, results {}.", params, status, results)
return None
return {
'open': results[1],
'quantity': results[2],
'remaining': results[3],
'value': results[4],
'fees': results[5],
}
async def cancel_order(self, pair: str, order_id: str):
"""
"""
params = [order_id]
results, status = await self.call_extract([
"['success']"
], 'cancelOrder', params=params, log=True, retry_data=True)
if status != 200 or results is None or results[0] is None:
self.log.error("Failed executing cancel order request: params {} status {}, results {}.",
params, status, results)
return None
return results[0]
async def get_balance(self, base: str):
"""
"""
params = [base]
results, status = await self.call_extract([
"['result']['Available']",
], 'getBalance', params=params, log=True, retry_data=True)
if status != 200 or results is None or results[0] is None:
self.log.error("Failed getting balance: params {}, status {}, results {}.",
params, status, results)
return None
balance = results[0]
self.cache['balance'][base] = {
'time': time.time(),
'data': balance
}
return balance
async def _get_market_summaries_v1(self):
"""
Get v1 market summaries from the API, cached for the current tick interval.
Converts the response list to a dict for faster lookups. This data is used for batching tick updates, since
the v1 API is kept current (unlike v2).
"""
await self.lock.acquire()
if 'marketSummariesV1' in self.cache:
if time.time() - self.cache['marketSummariesV1']['time'] < config['tick_interval_secs']:
self.log.debug("Returning cached data for marketSummariesV1.", verbosity=1)
self.lock.release()
return self.cache['marketSummariesV1']['data']
results, status = await self.call_extract([
"['result']",
"['result'][0]['Last']", # For retry of missing fields
"['result'][0]['BaseVolume']",
"['result'][0]['PrevDay']",
], 'getMarketSummariesV1', retry_data=True)
if status == 200 and results is not None and results[0] is not None:
market_summaries = {}
for result in results[0]:
market_summaries[result['MarketName']] = result
else:
self.log.error("Failed getting v1 market summaries: status {}, results {}.", status, results)
if 'marketSummariesV1' in self.cache:
self.cache['marketSummariesV1']['time'] = time.time()
self.lock.release()
return self.cache['marketSummariesV1']['data']
else:
self.lock.release()
return None
self.cache['marketSummariesV1'] = {
'time': time.time(),
'data': market_summaries
}
self.lock.release()
return market_summaries
| 2.03125 | 2 |
bvlapi/api/team/matches_by_guid.py | alanverresen/bvl-api | 1 | 12797571 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains function to call API for information about a team's games.
from bvlapi.api.call import call_api
from bvlapi.api.settings import API_BASE_URL
from bvlapi.common.exceptions import InvalidGuid
from bvlapi.guid.team import is_team_guid
def get_matches_by_guid(guid):
""" Calls API to retrieve information about a basketball team's season.
:param str guid: GUID of basketball team
:rtype: [dict]
:return: a list of dictionaries containing information about team's games
:raise ApiCallFailed: something went wrong while calling API
"""
if not is_team_guid(guid):
raise InvalidGuid("'{}' is not a valid team GUID.".format(guid))
url = API_BASE_URL + "TeamMatchesByGuid?teamGuid={}".format(guid)
return call_api(url)
| 3.015625 | 3 |
fastdb/__init__.py | kaankarakoc42/FastDB | 0 | 12797572 | from .fastdb import FastDB
from .fastclient import FastClient | 1.101563 | 1 |
transik/views.py | macie/transik | 0 | 12797573 | <gh_stars>0
from flask import flash, render_template, redirect, request, session, url_for
from transik import app
@app.route('/')
def dashboard():
projects = [
{'id': 'first', 'name': 'first project'}
]
return render_template('dashboard.html', session=session, projects=projects)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session['username'] = request.form['username']
return redirect(url_for('dashboard'))
return render_template('login.html')
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
return redirect(url_for('dashboard'))
@app.route('/project/<id>')
def project(id):
segments = [
{'key': 's1', 'src': 'src1', 'target': 'targ1'},
{'key': 's2', 'src': 'src2', 'target': 'targ2'},
]
return render_template('project.html', segments=segments)
@app.route('/import/<project_id>')
@app.route('/import/<project_id>/<lang>')
def import_segments(project_id, lang=None):
segments = [
{'key': 's1', 'src': 'src1', 'target': 'targ1'},
{'key': 's2', 'src': 'src2', 'target': 'targ2'},
]
return render_template('project.html', segments=segments)
@app.route('/export/<project_id>')
@app.route('/export/<project_id>/<lang>')
def export_segments(project_id, lang=None):
segments = [
{'key': 's1', 'src': 'src1', 'target': 'targ1'},
{'key': 's2', 'src': 'src2', 'target': 'targ2'},
]
return render_template('project.html', segments=segments)
| 2.21875 | 2 |
06-templates/templates_server.py | bzd111/aiohttp-all | 0 | 12797574 | import sqlite3
from pathlib import Path
from typing import Any, AsyncIterator, Dict
import aiohttp_jinja2
import aiosqlite
import jinja2
from aiohttp import web
router = web.RouteTableDef()
async def fetch_post(db: aiosqlite.Connection, post_id: int) -> Dict[str, Any]:
async with db.execute(
"select owner, editor, title, text from posts where id = ?", [post_id]
) as cursor:
row = await cursor.fetchone()
print(row)
if row is None:
raise RuntimeError(f"Post {post_id} does not exist")
return {
"id": post_id,
"owner": row["owner"],
"editor": row["editor"],
"title": row["title"],
"text": row["text"],
}
@router.get("/")
@aiohttp_jinja2.template("index.html")
async def index(request: web.Request) -> Dict[str, Any]:
ret = []
db = request.config_dict["DB"]
async with db.execute("select id, owner, editor, title from posts") as cursor:
async for row in cursor:
ret.append(
{
"id": row["id"],
"owner": row["owner"],
"editor": row["editor"],
"title": row["title"],
}
)
return {"posts": ret}
@router.get("/new")
@aiohttp_jinja2.template("new.html")
async def new_post(request: web.Request) -> Dict[str, Any]:
return {}
@router.post("/new")
@aiohttp_jinja2.template("edit.html")
async def new_post_apply(request: web.Request) -> Dict[str, Any]:
db = request.config_dict["DB"]
post = await request.post()
owner = "Anonymous"
await db.execute(
"insert into posts (owner, editor, title, text) values (?,?,?,?)",
[owner, owner, post["title"], post["text"]],
)
await db.commit()
raise web.HTTPSeeOther(location=f"/")
@router.get("/{post}")
@aiohttp_jinja2.template("view.html")
async def view_post(request: web.Request) -> Dict[str, Any]:
post_id = request.match_info["post"]
if post_id.endswith(".ico"):
raise web.HTTPSeeOther(location=f"/")
db = request.config_dict["DB"]
return {"post": await fetch_post(db, post_id)}
@router.get("/{post}/edit")
@aiohttp_jinja2.template("edit.html")
async def edit_post(request: web.Request) -> Dict[str, Any]:
post_id = request.match_info["post"]
db = request.config_dict["DB"]
return {"post": await fetch_post(db, post_id)}
@router.post("/{post}/edit")
async def edit_post_apply(request: web.Request) -> web.Response:
post_id = request.match_info["post"]
db = request.config_dict["DB"]
post = await request.post()
await db.execute(
"update posts set title=?, text=? where id =?",
[post["title"], post["text"], post_id],
)
await db.commit()
raise web.HTTPSeeOther(location=f"/{post_id}")
@router.get("/{post}/delete")
async def delete_post(request: web.Request) -> web.Response:
post_id = request.match_info["post"]
db = request.config_dict["DB"]
await db.execute("delete from posts where id=?", [post_id])
raise web.HTTPSeeOther(location=f"/")
def get_db_path() -> Path:
here = Path.cwd()
return here / "db.sqlite3"
async def init_db(app: web.Application) -> AsyncIterator[None]:
sqlite_db = get_db_path()
db = await aiosqlite.connect(sqlite_db)
db.row_factory = aiosqlite.Row
app["DB"] = db
yield
await db.close()
async def init_app() -> web.Application:
app = web.Application()
app.add_routes(router)
app.cleanup_ctx.append(init_db)
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(str(Path.cwd() / "templates"))
)
return app
def try_make_db() -> None:
sqlite_db = get_db_path()
if sqlite_db.exists():
return
with sqlite3.connect(sqlite_db) as conn:
cur = conn.cursor()
cur.execute(
"""CREATE TABLE posts (
id INTEGER PRIMARY KEY,
title TEXT,
text TEXT,
owner TEXT,
editor TEXT)
"""
)
conn.commit()
try_make_db()
web.run_app(init_app())
| 2.5625 | 3 |
ACO.py | mentesniker/Maxcut-solver | 0 | 12797575 | <reponame>mentesniker/Maxcut-solver
from random import random, uniform
from scipy.optimize import minimize
from math import e, sqrt,cos,pi
'''
Class Point.
A point is an object that has a position and a pheromone that leads
to the point.
'''
class Point():
'''
The constructor of the class.
Params:
- point: a coordinate.
- pheromone: the pheromone that leads to the point.
'''
def __init__(self, point, pheromone) -> None:
self.point = point
self.pheromone = pheromone
'''
Method to get the coordinates of the point.
Return:
- point: The coordinates of the point.
'''
def get_point(self):
return self.point
'''
Method to get the pheromone of the point.
Return:
- point: The pheromone of the point.
'''
def get_pheromone(self):
return self.pheromone
'''
Method to set the pheromone of the point.
Params:
- pheromone: The pheromone of the point.
'''
def set_pheromone(self, pheromone):
self.pheromone = pheromone
'''
Method to set a coordinate of the point.
Params:
- point: The coordinate of the point.
'''
def set_point(self, point):
self.point = point
'''
Method that returns the string representation of the point.
Return:
- string: the string representation of the point.
'''
def __str__(self):
return "point: " + str(self.point) + "," + "pheromone: " + str(self.pheromone)
'''
Class Ant.
An ant is an object that has a position, a memory and a limit for it's memory.
An ant can move, forget/remember previous visited places and return it's location.
'''
class Ant():
'''
The constructor of the class.
Params:
- memory_limit: the maximum number of previous visited placed that an
ant can remember.
'''
def __init__(self, memory_limit) -> None:
self.memory = list()
self.memory_limit = memory_limit
self.current_localization = list()
'''
Method to clear the ant location.
'''
def clear_location(self):
self.current_localization = list()
'''
Method to get the coordinates of the ant location.
Return:
- list: the list of coordinates of the ant position.
'''
def get_location(self):
output_list = list()
for point in self.current_localization:
output_list.append(point.get_point())
return output_list
'''
Method to update the position of the ant.
Params:
- new_location: a list that contains the coordinates of the
new location.
'''
def update_location(self, new_location):
for i in range(len(self.current_localization)):
self.current_localization[i].set_point(new_location[i])
'''
Method that adds a point to the list that contains
the location of the ant.
'''
def assign_point(self, point):
self.current_localization.append(point)
'''
Method that updates the pheromone of the current location
point of the ant.
Params:
- error: The error induced by the best solution in the colony.
'''
def update_pheromone(self, error):
for point in self.current_localization:
point.set_pheromone(point.get_pheromone() + (1/error))
'''
Method to save a new location in the ant memory.
Params:
- point: the point that will be saved in the ant memory
Return:
- True: if the point was added to the memory and False otherwise.
'''
def set_memory(self, point):
for p in self.memory:
if(point.get_point() == p.get_point()):
return False
self.memory.append(point)
if( len(self.memory) > self.memory_limit ):
del self.memory[0]
return True
'''
Method that returns the string representation of the bat.
Return:
- string: the string representation of the bat.
'''
def __str__(self):
memory = ""
for point in self.memory:
memory += " " + str(point) + " "
location = ""
for point in self.current_localization:
location += " " + str(point) + " "
return "memory: " + memory + " and " + "current location" + location
'''
Class PointList.
A list that contains points.
'''
class PointsList():
'''
The constructor of the class.
Params:
- list_of_points: the list of points.
'''
def __init__(self, list_of_points) -> None:
self.points = list_of_points
'''
Method that returns the point object that has the higher pheromone.
Return:
- Point: the point with the higher pheromone trail.
'''
def get_best_point(self):
best_point = Point(0,0)
for point in self.points:
if(point.get_pheromone() > best_point.get_pheromone()):
best_point = point
return best_point
'''
Method that returns the sum of the pheromones of the list of points.
Return:
- float: the total pf pheromones.
'''
def get_total_pheromones(self):
total = 0
for point in self.points:
total += point.get_pheromone()
return total
'''
Method that returns the list of points.
Return:
- list: the the list of points.
'''
def get_list_points(self):
return self.points
'''
Method that evaporates the pheromones in the points.
'''
def evaporate_pheromone(self, p):
for point in self.points:
point.set_pheromone((1-p)*point.get_pheromone())
'''
Class ACO.
Class to run the ant colony optimization with respect of the
given function.
'''
class ACO():
'''
The constructor of the class.
Params:
- num_params: the number of dimentios of the objective function.
- discrete_points: the number of discrete points to sample.
- interval: an interval to draw number from.
- number_ants: The number of ants of the colony.
- q: A constant.
- evaporation_rate: A constant to control the evaporation of the pheromone.
- num_iterations (optional): The number of iterations of the algorithm.
'''
def __init__(self, num_params, discrete_points, interval, number_ants, q, evaporation_rate, num_iterations = 50) -> None:
def first_guess_linear(n):
[Point(uniform(interval[0],interval[1]), 1/2) for _ in range(discrete_points)]
theta = [Point(uniform(0, pi),1/2) for _ in range(0,int(n/2))] + [Point(uniform(0, 2*pi),1/2) for _ in range(0,int(n/2))]
return (theta)
self.number_params = num_params
self.num_iterations = num_iterations
self.discrete_points = discrete_points
self.points = list()
self.q = q
self.p = evaporation_rate
self.ants = [Ant(num_params) for _ in range(0, number_ants)]
for _ in range(0,self.number_params):
self.points.append(PointsList(first_guess_linear(discrete_points)))
'''
Method that returns the best ant and it's cost
with respect to the cost function.
Return:
- Ant: the best ant in the colony.
- float: the cost of the best ant.
'''
def get_best_ant(self, function):
best_ant = self.ants[0]
cost = function(best_ant.get_location())
for ant in self.ants:
ant_cost = (function(ant.get_location()))
if(ant_cost < cost):
cost = ant_cost
best_ant = ant
return best_ant, cost
'''
Method that does a local search around the current position
of an ant.
'''
def local_search(self, function):
for ant in self.ants:
res = minimize(function, ant.get_location(), method='COBYLA', options={"maxiter":5})
ant.update_location(res.x)
'''
Method that updates the pheromone of the ants in the colony.
'''
def update_pheromone(self, ant, cost):
ant.update_pheromone(cost)
for point_list in self.points:
point_list.evaporate_pheromone(self.p)
'''
Method in which the ants in the colony decides to move to a location
based on the pheromone trail or on a probabilistic desition.
'''
def probabilistic_construction(self):
for ant in self.ants:
ant.clear_location()
if(random() > 1 - self.q):
for point_list in self.points:
ant_asigned = ant.set_memory(point_list.get_best_point())
ant.assign_point(point_list.get_best_point())
else:
for point_list in self.points:
for point in point_list.get_list_points():
if(random() > (point.get_pheromone())/point_list.get_total_pheromones()):
ant_asigned = ant.set_memory(point)
if (ant_asigned):
ant.assign_point(point)
break
'''
Method to run the PSO heuristic over the objective function.
Params:
- fx: the cost function.
Return:
-list: a list with the best point find by the colony.
-float: the cost of the best point found by the colony.
'''
def run(self,fx):
self.probabilistic_construction()
self.local_search(fx)
best_ant, best_cost = self.get_best_ant(fx)
best_location = best_ant.get_location()
self.update_pheromone(best_ant, best_cost)
for i in range(self.num_iterations):
self.probabilistic_construction()
self.local_search(fx)
ant, cost = self.get_best_ant(fx)
self.update_pheromone(ant, cost)
if(cost < best_cost):
best_location = ant.get_location()
best_ant = ant
best_cost = cost
return [best_location,self.num_iterations] | 3.765625 | 4 |
mmvmm/tap_device.py | marcsello/mmvmm | 0 | 12797576 | <filename>mmvmm/tap_device.py
#!/usr/bin/env python3
import subprocess
from threading import RLock
class TAPDevice(object):
"""
This class issues iproute2 commands to add and remove tap devices required for VM networking
"""
_allocated_device_ids = []
NAMING_SCHEME = "tap{id}"
_global_network_lock = RLock() # protects the _allocated_device_ids list, and the adding and removing of tap devices
def __init__(self, master: str):
self._active = True
with TAPDevice._global_network_lock:
self._devid = 0
while True:
if self._devid not in TAPDevice._allocated_device_ids:
break
else:
self._devid += 1
TAPDevice._allocated_device_ids.append(self._devid)
self._devname = TAPDevice.NAMING_SCHEME.format(id=self._devid)
self._masterdevname = None
subprocess.check_call(["ip", "tuntap", "add", "name", self._devname, "mode", "tap"])
subprocess.check_call(["ip", "link", "set", self._devname, "up"])
try:
self.update_master(master)
except subprocess.CalledProcessError:
self.free()
raise
def update_master(self, master: str): # This raises exception if master is not available
if not self._active:
raise RuntimeError("Device is no longer available")
with TAPDevice._global_network_lock:
subprocess.check_call(["ip", "link", "set", self._devname, "master", master])
self._masterdevname = master
@property
def device(self) -> str:
if not self._active:
raise RuntimeError("Device is no longer available")
return self._devname
@property
def master(self) -> str:
if not self._active:
raise RuntimeError("Device is no longer available")
return self._masterdevname
def free(self):
"""
Free up the tap device.
After calling this function, subsequent calls to the objects should not be made.
"""
if not self._active:
raise RuntimeError("Device is no longer available")
with TAPDevice._global_network_lock:
subprocess.check_call(["ip", "link", "set", self._devname, "down"])
subprocess.check_call(["ip", "tuntap", "del", "name", self._devname, "mode", "tap"])
TAPDevice._allocated_device_ids.remove(self._devid)
self._active = False
| 2.765625 | 3 |
utils/mesh_utils.py | amirhertz/maps | 2 | 12797577 | from custom_types import *
from constants import EPSILON
import igl
def scale_all(*values: T):
max_val = max([val.max().item() for val in values])
min_val = min([val.min().item() for val in values])
scale = max_val - min_val
values = [(val - min_val) / scale for val in values]
if len(values) == 1:
return values[0]
return values
def get_faces_normals(mesh: Union[T_Mesh, T]) -> T:
if type(mesh) is not T:
vs, faces = mesh
vs_faces = vs[faces]
else:
vs_faces = mesh
if vs_faces.shape[-1] == 2:
vs_faces = torch.cat(
(vs_faces, torch.zeros(*vs_faces.shape[:2], 1, dtype=vs_faces.dtype, device=vs_faces.device)), dim=2)
face_normals = torch.cross(vs_faces[:, 1, :] - vs_faces[:, 0, :], vs_faces[:, 2, :] - vs_faces[:, 1, :])
return face_normals
def compute_face_areas(mesh: Union[T_Mesh, T]) -> TS:
face_normals = get_faces_normals(mesh)
face_areas = torch.norm(face_normals, p=2, dim=1)
face_areas_ = face_areas.clone()
face_areas_[torch.eq(face_areas_, 0)] = 1
face_normals = face_normals / face_areas_[:, None]
face_areas = 0.5 * face_areas
return face_areas, face_normals
def check_sign_area(*meshes: T_Mesh) -> bool:
for mesh in meshes:
face_normals = get_faces_normals(mesh)
if not face_normals[:, 2].gt(0).all():
return False
return True
def to_numpy(*tensors: T) -> ARRAYS:
params = [param.detach().cpu().numpy() if type(param) is T else param for param in tensors]
return params
def create_mapper(mask: T) -> T:
mapper = torch.zeros(mask.shape[0], dtype=torch.int64, device=mask.device) - 1
mapper[mask] = torch.arange(mask.sum().item(), device=mask.device)
return mapper
def mesh_center(mesh: T_Mesh):
return mesh[0].mean(0)
def to_center(vs):
max_vals = vs.max(0)[0]
min_vals = vs.min(0)[0]
center = (max_vals + min_vals) / 2
vs -= center[None, :]
return vs
def to_unit_sphere(mesh: T_Mesh, in_place: bool = True, scale=1.) -> T_Mesh:
vs, faces = mesh
if not in_place:
vs = vs.clone()
vs = to_center(vs)
norm = vs.norm(2, dim=1).max()
vs *= scale * norm ** -1
return vs, faces
def scale_from_ref(mesh: T_Mesh, center: T, scale: float, in_place: bool = True) -> T_Mesh:
vs, faces = mesh
if not in_place:
vs = vs.clone()
vs -= center[None, :]
vs *= scale
return vs, faces
def to_unit_cube(*meshes: T_Mesh_T, scale=1, in_place: bool = True) -> Tuple[Union[T_Mesh_T, Tuple[T_Mesh_T, ...]], Tuple[T, float]]:
remove_me = 0
meshes = [(mesh, remove_me) if type(mesh) is T else mesh for mesh in meshes]
vs, faces = meshes[0]
max_vals = vs.max(0)[0]
min_vals = vs.min(0)[0]
max_range = (max_vals - min_vals).max() / 2
center = (max_vals + min_vals) / 2
meshes_ = []
scale = float(scale / max_range)
for mesh in meshes:
vs_, faces_ = scale_from_ref(mesh, center, scale)
meshes_.append(vs_ if faces_ is remove_me else (vs_, faces_))
if len(meshes_) == 1:
meshes_ = meshes_[0]
return meshes_, (center, scale)
def get_edges_ind(mesh: T_Mesh) -> T:
vs, faces = mesh
raw_edges = torch.cat([faces[:, [i, (i + 1) % 3]] for i in range(3)]).sort()
raw_edges = raw_edges[0].cpu().numpy()
edges = {(int(edge[0]), int(edge[1])) for edge in raw_edges}
edges = torch.tensor(list(edges), dtype=torch.int64, device=faces.device)
return edges
def edge_lengths(mesh: T_Mesh, edges_ind: TN = None) -> T:
vs, faces = mesh
if edges_ind is None:
edges_ind = get_edges_ind(mesh)
edges = vs[edges_ind]
return torch.norm(edges[:, 0] - edges[:, 1], 2, dim=1)
# in place
def to_unit_edge(*meshes: T_Mesh) -> Tuple[Union[T_Mesh, Tuple[T_Mesh, ...]], Tuple[T, float]]:
ref = meshes[0]
center = ref[0].mean(0)
ratio = edge_lengths(ref).mean().item()
for mesh in meshes:
vs, _ = mesh
vs -= center[None, :].to(vs.device)
vs /= ratio
if len(meshes) == 1:
meshes = meshes[0]
return meshes, (center, ratio)
def to(tensors, device: D) -> Union[T_Mesh, TS, T]:
out = []
for tensor in tensors:
if type(tensor) is T:
out.append(tensor.to(device, ))
elif type(tensor) is tuple or type(tensors) is List:
out.append(to(list(tensor), device))
else:
out.append(tensor)
if len(tensors) == 1:
return out[0]
else:
return tuple(out)
def clone(*tensors: Union[T, TS]) -> Union[TS, T_Mesh]:
out = []
for t in tensors:
if type(t) is T:
out.append(t.clone())
else:
out.append(clone(*t))
return out
def get_box(w: float, h: float, d: float) -> T_Mesh:
vs = [[0, 0, 0], [w, 0, 0], [0, d, 0], [w, d, 0],
[0, 0, h], [w, 0, h], [0, d, h], [w, d, h]]
faces = [[0, 2, 1], [1, 2, 3], [4, 5, 6], [5, 7, 6],
[0, 1, 5], [0, 5, 4], [2, 6, 7], [3, 2, 7],
[1, 3, 5], [3, 7, 5], [0, 4, 2], [2, 4, 6]]
return torch.tensor(vs, dtype=torch.float32), torch.tensor(faces, dtype=torch.int64)
def normalize(t: T):
t = t / t.norm(2, dim=1)[:, None]
return t
def interpolate_vs(mesh: T_Mesh, faces_inds: T, weights: T) -> T:
vs = mesh[0][mesh[1][faces_inds]]
vs = vs * weights[:, :, None]
return vs.sum(1)
def sample_uvw(shape, device: D):
u, v = torch.rand(*shape, device=device), torch.rand(*shape, device=device)
mask = (u + v).gt(1)
u[mask], v[mask] = -u[mask] + 1, -v[mask] + 1
w = -u - v + 1
uvw = torch.stack([u, v, w], dim=len(shape))
return uvw
def get_sampled_fe(fe: T, mesh: T_Mesh, face_ids: T, uvw: TN) -> T:
# to_squeeze =
if fe.dim() == 1:
fe = fe.unsqueeze(1)
if uvw is None:
fe_iner = fe[face_ids]
else:
vs_ids = mesh[1][face_ids]
fe_unrolled = fe[vs_ids]
fe_iner = torch.einsum('sad,sa->sd', fe_unrolled, uvw)
# if to_squeeze:
# fe_iner = fe_iner.squeeze_(1)
return fe_iner
def sample_on_faces(mesh: T_Mesh, num_samples: int) -> TS:
vs, faces = mesh
uvw = sample_uvw([faces.shape[0], num_samples], vs.device)
samples = torch.einsum('fad,fna->fnd', vs[faces], uvw)
return samples, uvw
class SampleBy(Enum):
AREAS = 0
FACES = 1
HYB = 2
def sample_on_mesh(mesh: T_Mesh, num_samples: int, face_areas: TN = None,
sample_s: SampleBy = SampleBy.HYB) -> TNS:
vs, faces = mesh
if faces is None: # sample from pc
uvw = None
if vs.shape[0] < num_samples:
chosen_faces_inds = torch.arange(vs.shape[0])
else:
chosen_faces_inds = torch.argsort(torch.rand(vs.shape[0]))[:num_samples]
samples = vs[chosen_faces_inds]
else:
weighted_p = []
if sample_s == SampleBy.AREAS or sample_s == SampleBy.HYB:
if face_areas is None:
face_areas, _ = compute_face_areas(mesh)
face_areas[torch.isnan(face_areas)] = 0
weighted_p.append(face_areas / face_areas.sum())
if sample_s == SampleBy.FACES or sample_s == SampleBy.HYB:
weighted_p.append(torch.ones(mesh[1].shape[0], device=mesh[0].device))
chosen_faces_inds = [torch.multinomial(weights, num_samples // len(weighted_p), replacement=True) for weights in weighted_p]
if sample_s == SampleBy.HYB:
chosen_faces_inds = torch.cat(chosen_faces_inds, dim=0)
chosen_faces = faces[chosen_faces_inds]
uvw = sample_uvw([num_samples], vs.device)
samples = torch.einsum('sf,sfd->sd', uvw, vs[chosen_faces])
return samples, chosen_faces_inds, uvw
def get_samples(mesh: T_Mesh, num_samples: int, sample_s: SampleBy, *features: T) -> Union[T, TS]:
samples, face_ids, uvw = sample_on_mesh(mesh, num_samples, sample_s=sample_s)
if len(features) > 0:
samples = [samples] + [get_sampled_fe(fe, mesh, face_ids, uvw) for fe in features]
return samples, face_ids, uvw
def find_barycentric(vs: T, triangles: T) -> T:
def compute_barycentric(ind):
triangles[:, ind] = vs
alpha = compute_face_areas(triangles)[0] / areas
triangles[:, ind] = recover[:, ind]
return alpha
device, dtype = vs.device, vs.dtype
vs = vs.to(device, dtype=torch.float64)
triangles = triangles.to(device, dtype=torch.float64)
areas, _ = compute_face_areas(triangles)
recover = triangles.clone()
barycentric = [compute_barycentric(i) for i in range(3)]
barycentric = torch.stack(barycentric, dim=1)
# assert barycentric.sum(1).max().item() <= 1 + EPSILON
return barycentric.to(device, dtype=dtype)
def from_barycentric(mesh: Union[T_Mesh, T], face_ids: T, weights: T) -> T:
if type(mesh) is not T:
triangles: T = mesh[0][mesh[1]]
else:
triangles: T = mesh
to_squeeze = weights.dim() == 1
if to_squeeze:
weights = weights.unsqueeze(0)
face_ids = face_ids.unsqueeze(0)
vs = torch.einsum('nad,na->nd', triangles[face_ids], weights)
if to_squeeze:
vs = vs.squeeze(0)
return vs
def check_circle_angles(mesh: T_Mesh, center_ind: int, select: T) -> bool:
vs, _ = mesh
all_vecs = vs[select] - vs[center_ind][None, :]
all_vecs = all_vecs / all_vecs.norm(2, 1)[:, None]
all_vecs = torch.cat([all_vecs, all_vecs[:1]], dim=0)
all_cos = torch.einsum('nd,nd->n', all_vecs[1:], all_vecs[:-1])
all_angles = torch.acos_(all_cos)
all_angles = all_angles.sum()
return (all_angles - 2 * np.pi).abs() < EPSILON
def vs_over_triangle(vs_mid: T, triangle: T, normals=None) -> T:
if vs_mid.dim() == 1:
vs_mid = vs_mid.unsqueeze(0)
triangle = triangle.unsqueeze(0)
if normals is None:
_, normals = compute_face_areas(triangle)
select = torch.arange(3)
d_vs = vs_mid[:, None, :] - triangle
d_f = triangle[:, select] - triangle[:, (select + 1) % 3]
all_cross = torch.cross(d_vs, d_f, dim=2)
all_dots = torch.einsum('nd,nad->na', normals, all_cross)
is_over = all_dots.ge(0).long().sum(1).eq(3)
return is_over
def igl_prepare(*dtypes):
def decoder(func):
def wrapper(*args, **kwargs):
mesh = args[0]
device, dtype = mesh[0].device, mesh[0].dtype
vs, faces = to_numpy(*mesh)
result = func((vs, faces), *args[1:], **kwargs)
return to_torch(result, device)
to_torch = to_torch_singe if len(dtypes) == 1 else to_torch_multi
return wrapper
def to_torch_singe(result, device):
return torch.from_numpy(result).to(device, dtype=dtypes[0])
def to_torch_multi(result, device):
return [torch.from_numpy(r).to(device, dtype=dtype) for r, dtype in zip(result, dtypes)]
return decoder
@igl_prepare(torch.float32, torch.int64)
def decimate_igl(mesh, num_faces: int):
if mesh[1].shape[0] <= num_faces:
return mesh
vs, faces, _ = igl.remove_duplicates(*mesh, 1e-8)
return igl.decimate(vs, faces, num_faces)[1:3]
@igl_prepare(torch.float32)
def gaussian_curvature(mesh: T_Mesh) -> T:
gc = igl.gaussian_curvature(*mesh)
return gc
@igl_prepare(torch.float32)
def per_vertex_normals_igl(mesh: T_Mesh, weighting: int = 0) -> T:
normals = igl.per_vertex_normals(*mesh, weighting)
return normals
@igl_prepare(torch.float32, torch.int64)
def remove_duplicate_vertices(mesh: T_Mesh, epsilon=1e-7) -> T_Mesh:
vs, _, _, faces = igl.remove_duplicate_vertices(*mesh, epsilon)
return vs, faces
@igl_prepare(torch.float32)
def winding_number_igl(mesh: T_Mesh, query: T) -> T:
query = query.cpu().numpy()
return igl.fast_winding_number_for_meshes(*mesh, query)
@igl_prepare(torch.float32, torch.float32, torch.float32, torch.float32)
def principal_curvature(mesh: T_Mesh) -> TS:
out = igl.principal_curvature(*mesh)
min_dir, max_dir, min_val, max_val = out
return min_dir, max_dir, min_val, max_val
def get_inside_outside(points: T, mesh: T_Mesh) -> T:
device = points.device
points = points.numpy()
vs, faces = mesh[0].numpy(), mesh[1].numpy()
winding_numbers = igl.fast_winding_number_for_meshes(vs, faces, points)
winding_numbers = torch.from_numpy(winding_numbers)
inside_outside = winding_numbers.lt(.5).float() * 2 - 1
return inside_outside.to(device)
@igl_prepare(torch.float32)
def lscm(mesh: T_Mesh, boundary_indices: T, boundary_coordinates: T) -> T:
boundary_indices, boundary_coordinates = boundary_indices.numpy(), boundary_coordinates.numpy()
check, uv = igl.lscm(*mesh, boundary_indices, boundary_coordinates)
return uv
def interpulate_vs(mesh: T_Mesh, faces_inds: T, weights: T) -> T:
vs = mesh[0][mesh[1][faces_inds]]
vs = vs * weights[:, :, None]
return vs.sum(1)
| 1.867188 | 2 |
core/filebrowser/urls.py | yesw2000/panda-bigmon-core | 0 | 12797578 | <gh_stars>0
"""
filebrowser.urls
"""
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
### #FIXME admin.autodiscover()
import views as filebrowser_views
urlpatterns = [
url(r'^$', filebrowser_views.index, name='filebrowser'),
url(r'^api/$', filebrowser_views.api_single_pandaid, name='filebrowser-api-single-pandaid'),
url(r'^delete/$', filebrowser_views.delete_files, name='filebrowser-delete'),
]
| 1.398438 | 1 |
disvae/models/losses.py | BartMelman/disentangling-vae | 0 | 12797579 | <filename>disvae/models/losses.py
"""
Module containing all vae losses.
"""
import abc
import math
import torch
from torch.nn import functional as F
from torch import optim
from .discriminator import Discriminator
from disvae.utils.math import log_density_normal, log_importance_weight_matrix
# TO-DO: clean data_size and device
def get_loss_f(name, kwargs_parse={}):
"""Return the correct loss function given the argparse arguments."""
kwargs_all = dict(rec_dist=kwargs_parse["rec_dist"], steps_anneal=kwargs_parse["reg_anneal"])
if name == "betaH":
return BetaHLoss(beta=kwargs_parse["betaH_B"], **kwargs_all)
elif name == "VAE":
return BetaHLoss(beta=1, **kwargs_all)
elif name == "betaB":
return BetaBLoss(C_init=kwargs_parse["betaB_initC"],
C_fin=kwargs_parse["betaB_finC"],
C_n_interp=kwargs_parse["betaB_stepsC"],
gamma=kwargs_parse["betaB_G"],
**kwargs_all)
elif name == "factor":
return FactorKLoss(kwargs_parse["device"],
kwargs_parse["data_size"],
gamma=kwargs_parse["factor_G"],
is_mutual_info=not kwargs_parse["no_mutual_info"],
is_mss=not kwargs_parse["no_mss"],
**kwargs_all)
elif name == "batchTC":
return BatchTCLoss(kwargs_parse["device"],
kwargs_parse["data_size"],
alpha=kwargs_parse["batchTC_A"],
beta=kwargs_parse["batchTC_B"],
gamma=kwargs_parse["batchTC_G"],
is_mss=not kwargs_parse["no_mss"],
**kwargs_all)
else:
raise ValueError("Uknown loss : {}".format(name))
class BaseLoss(abc.ABC):
"""
Base class for losses.
Parameters
----------
record_loss_every: int, optional
Every how many steps to recorsd the loss.
rec_dist: {"bernoulli", "gaussian", "laplace"}, optional
Reconstruction distribution istribution of the likelihood on the each pixel.
Implicitely defines the reconstruction loss. Bernoulli corresponds to a
binary cross entropy (bse), Gaussian corresponds to MSE, Laplace
corresponds to L1.
steps_anneal: nool, optional
Number of annealing steps where gradually adding the regularisation.
"""
def __init__(self, record_loss_every=50, rec_dist="bernoulli", steps_anneal=0):
self.n_train_steps = 0
self.record_loss_every = record_loss_every
self.rec_dist = rec_dist
self.steps_anneal = steps_anneal
@abc.abstractmethod
def __call__(self, data, recon_data, latent_dist, is_train, storer):
"""
Calculates loss for a batch of data.
Parameters
----------
data : torch.Tensor
Input data (e.g. batch of images). Shape : (batch_size, n_chan,
height, width).
recon_data : torch.Tensor
Reconstructed data. Shape : (batch_size, n_chan, height, width).
latent_dist : tuple of torch.tensor
sufficient statistics of the latent dimension. E.g. for gaussian
(mean, log_var) each of shape : (batch_size, latent_dim).
storer : dict
Dictionary in which to store important variables for vizualisation.
"""
def _pre_call(self, is_train, storer):
if is_train:
self.n_train_steps += 1
if not is_train or self.n_train_steps % self.record_loss_every == 1:
storer = storer
else:
storer = None
return storer
class BetaHLoss(BaseLoss):
"""
Compute the Beta-VAE loss as in [1]
Parameters
----------
beta : float, optional
Weight of the kl divergence.
References:
[1] Higgins, Irina, et al. "beta-vae: Learning basic visual concepts with
a constrained variational framework." (2016).
kwargs:
Additional arguments for `BaseLoss`, e.g. rec_dist`.
"""
def __init__(self, beta=4, **kwargs):
super().__init__(**kwargs)
self.beta = beta
def __call__(self, data, recon_data, latent_dist, is_train, storer):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer, distribution=self.rec_dist)
kl_loss = _kl_normal_loss(*latent_dist, storer)
anneal_rec = (linear_annealing(0, 1, self.n_train_steps, self.steps_anneal)
if is_train else 1)
loss = rec_loss + anneal_rec * (self.beta * kl_loss)
if storer is not None:
storer['loss'].append(loss.item())
return loss
class BetaBLoss(BaseLoss):
"""
Compute the Beta-VAE loss as in [1]
Parameters
----------
C_init : float, optional
Starting annealed capacity C.
C_fin : float, optional
Final annealed capacity C.
C_n_interp : float, optional
Number of training iterations for interpolating C.
gamma : float, optional
Weight of the KL divergence term.
kwargs:
Additional arguments for `BaseLoss`, e.g. rec_dist`.
References
----------
[1] Burgess, <NAME>., et al. "Understanding disentangling in
$\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
def __init__(self, C_init=0., C_fin=5., C_n_interp=25000, gamma=30., **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.C_init = C_init
self.C_fin = C_fin
self.C_n_interp = C_n_interp
def __call__(self, data, recon_data, latent_dist, is_train, storer):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer, distribution=self.rec_dist)
kl_loss = _kl_normal_loss(*latent_dist, storer)
C = (linear_annealing(self.C_init, self.C_fin, self.n_train_steps, self.C_n_interp)
if is_train else self.C_fin)
loss = rec_loss + self.gamma * (kl_loss - C).abs()
batch_size = data.size(0)
if storer is not None:
storer['loss'].append(loss.item())
return loss
class FactorKLoss(BaseLoss):
"""
Compute the Factor-VAE loss as per Algorithm 2 of [1]
Parameters
----------
device : torch.device
beta : float, optional
Weight of the TC loss term. `gamma` in the paper.
is_mutual_info : bool
True : includes the mutual information term in the loss
False : removes mutual information
discriminator : disvae.discriminator.Discriminator
optimizer_d : torch.optim
kwargs:
Additional arguments for `BaseLoss`, e.g. rec_dist`.
References
----------
[1] Kim, Hyunjik, and <NAME>. "Disentangling by factorising."
arXiv preprint arXiv:1802.05983 (2018).
"""
def __init__(self, device, data_size, gamma=40., is_mutual_info=True, is_mss=False,
disc_kwargs=dict(neg_slope=0.2, latent_dim=10, hidden_units=1000),
optim_kwargs=dict(lr=5e-4, betas=(0.5, 0.9)),
**kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.data_size = data_size
self.device = device
self.is_mutual_info = is_mutual_info
self.is_mss = is_mss
self.discriminator = Discriminator(**disc_kwargs).to(self.device)
self.optimizer_d = optim.Adam(self.discriminator.parameters(), **optim_kwargs)
def __call__(self, data, model, optimizer, storer):
storer = self._pre_call(model.training, storer)
# factor-vae split data into two batches. In the paper they sample 2 batches
batch_size = data.size(dim=0)
half_batch_size = batch_size // 2
data = data.split(half_batch_size)
data1 = data[0]
data2 = data[1]
# Factor VAE Loss
recon_batch, latent_dist, latent_sample1 = model(data1)
rec_loss = _reconstruction_loss(data1, recon_batch,
storer=storer, distribution=self.rec_dist)
# TODO: remove this kl_loss term once viz is sorted
# https://github.com/YannDubs/disentangling-vae/pull/25#issuecomment-473535863
kl_loss = _kl_normal_loss(*latent_dist, storer)
d_z = self.discriminator(latent_sample1)
# clamping to 0 because TC cannot be negative : TEST
tc_loss = (F.logsigmoid(d_z) - F.logsigmoid(1 - d_z)).clamp(0).mean()
anneal_rec = (linear_annealing(0, 1, self.n_train_steps, self.steps_anneal)
if model.training else 1)
# TODO replace this code with the following commented out code after viz is fixed
# https://github.com/YannDubs/disentangling-vae/pull/25#issuecomment-473535863
if self.is_mutual_info:
# return vae loss
vae_loss = rec_loss + anneal_rec * (kl_loss + self.gamma * tc_loss)
else:
# return vae loss without mutual information term
# change latent dist to torch.tensor (could probably avoid this)
latent_dist = torch.stack((latent_dist[0], latent_dist[1]), dim=2)
# calculate log p(z)
prior_params = torch.zeros(half_batch_size, latent_dist.size(1), 2).to(self.device)
logpz = log_density_normal(latent_sample1, prior_params, half_batch_size,
return_matrix=False).view(half_batch_size, -1).sum(1)
if not self.is_mss:
# minibatch weighted sampling
_, logqz_prodmarginals = _minibatch_weighted_sampling(latent_dist, latent_sample1,
self.data_size)
else:
# minibatch stratified sampling
_, logqz_prodmarginals = _minibatch_stratified_sampling(latent_dist, latent_sample1,
self.data_size)
gamma = self.gamma + 1
dw_kl_loss = (logqz_prodmarginals - logpz).mean()
vae_loss = rec_loss + anneal_rec * (gamma * tc_loss + dw_kl_loss)
# if self.is_mutual_info:
# beta = self.beta
# kl_loss = _kl_normal_loss(*latent_dist, storer)
# else:
# # beta has to be increased by one for correct comparaison
# # as the TC term is included in `_kl_normal_loss`
# beta = self.beta + 1
# kl_loss = _dimwise_kl_loss(*latent_dist, storer)
#
# vae_loss = rec_loss + kl_loss + beta * tc_loss
if storer is not None:
storer['loss'].append(vae_loss.item())
storer['tc_loss'].append(tc_loss.item())
if not model.training:
# don't backprop if evaluating
return vae_loss
# Run VAE optimizer
optimizer.zero_grad()
vae_loss.backward(retain_graph=True)
optimizer.step()
# Discriminator Loss
# Get second sample of latent distribution
latent_sample2 = model.sample_latent(data2)
z_perm = _permute_dims(latent_sample2).detach()
d_z_perm = self.discriminator(z_perm)
# Calculate total correlation loss
d_tc_loss = - (0.5 * (F.logsigmoid(d_z) + F.logsigmoid(1 - d_z_perm))).mean()
# Run discriminator optimizer
self.optimizer_d.zero_grad()
d_tc_loss.backward()
self.optimizer_d.step()
if storer is not None:
storer['discrim_loss'].append(d_tc_loss.item())
return vae_loss
class BatchTCLoss(BaseLoss):
"""
Compute the decomposed KL loss with either minibatch weighted sampling or
minibatch stratified sampling according to [1]
Parameters
----------
data_size: int
Size of the dataset
alpha : float
Weight of the mutual information term.
beta : float
Weight of the total correlation term.
gamma : float
Weight of the dimension-wise KL term.
latent_dim: int
Dimension of the latent variable
is_mss : bool
Selects either minibatch stratified sampling (True) or minibatch
weighted sampling (False)
kwargs:
Additional arguments for `BaseLoss`, e.g. rec_dist`.
References
----------
[1] Chen, <NAME>, et al. "Isolating sources of disentanglement in variational
autoencoders." Advances in Neural Information Processing Systems. 2018.
"""
def __init__(self, device, data_size, alpha=1., beta=6., gamma=1., is_mss=False, **kwargs):
super().__init__(**kwargs)
# beta values: dsprites: 6, celeba: 15
self.device = device
self.dataset_size = data_size
self.beta = beta
self.alpha = alpha
self.gamma = gamma
self.is_mss = is_mss # minibatch stratified sampling
def __call__(self, data, recon_batch, latent_dist, is_train, storer, latent_sample=None):
storer = self._pre_call(is_train, storer)
batch_size = data.size(0)
# change latent dist to torch.tensor (could probably avoid this)
latent_dist = torch.stack((latent_dist[0], latent_dist[1]), dim=2)
# calculate log q(z|x) and _log q(z) matrix
logqz_condx = log_density_normal(latent_sample, latent_dist, batch_size,
return_matrix=False).sum(dim=1)
# calculate log p(z)
prior_params = torch.zeros(batch_size, latent_dist.size(1), 2).to(self.device)
logpz = log_density_normal(latent_sample, prior_params, batch_size,
return_matrix=False).view(batch_size, -1).sum(1)
if not self.is_mss:
# minibatch weighted sampling
logqz, logqz_prodmarginals = _minibatch_weighted_sampling(latent_dist, latent_sample,
self.dataset_size)
else:
# minibatch stratified sampling
logqz, logqz_prodmarginals = _minibatch_stratified_sampling(latent_dist, latent_sample,
self.dataset_size)
# rec loss, mutual information, total correlation and dim-wise kl
rec_loss = _reconstruction_loss(data, recon_batch,
storer=storer, distribution=self.rec_dist)
mi_loss = (logqz_condx - logqz).mean()
tc_loss = (logqz - logqz_prodmarginals).mean()
dw_kl_loss = (logqz_prodmarginals - logpz).mean()
anneal_rec = (linear_annealing(0, 1, self.n_train_steps, self.steps_anneal)
if is_train else 1)
# total loss
loss = rec_loss + anneal_rec * (self.alpha * mi_loss + self.beta * tc_loss + self.gamma * dw_kl_loss)
if storer is not None:
storer['loss'].append(loss.item())
storer['mi_loss'].append(mi_loss.item())
storer['tc_loss'].append(tc_loss.item())
storer['dw_kl_loss'].append(dw_kl_loss.item())
# TODO Remove this when visualisation fixed
tc_loss_vec = (logqz - logqz_prodmarginals)
for i in range(latent_dist.size(1)):
storer['kl_loss_' + str(i)].append(tc_loss_vec[i].item())
return loss
def _minibatch_weighted_sampling(latent_dist, latent_sample, data_size):
"""
Estimates log q(z) and the log (product of marginals of q(z_j)) with minibatch
weighted sampling.
Parameters
----------
latent_dist : torch.Tensor
Mean and logvar of the normal distribution. Shape (batch_size, latent_dim, 2)
latent_sample: torch.Tensor
sample from the latent dimension using the reparameterisation trick
shape : (batch_size, latent_dim).
data_size : int
Number of data in the training set
References :
[1] Chen, <NAME>, et al. "Isolating sources of disentanglement in variational
autoencoders." Advances in Neural Information Processing Systems. 2018.
"""
batch_size = latent_dist.size(0)
_logqz = log_density_normal(latent_sample, latent_dist,
batch_size, return_matrix=True)
logqz_prodmarginals = (torch.logsumexp(_logqz, dim=1, keepdim=False) -
math.log(batch_size * data_size)).sum(dim=1)
logqz = torch.logsumexp(_logqz.sum(2), dim=1, keepdim=False) \
- math.log(batch_size * data_size)
return logqz, logqz_prodmarginals
def _minibatch_stratified_sampling(latent_dist, latent_sample, data_size):
"""
Estimates log q(z) and the log (product of marginals of q(z_j)) with minibatch
stratified sampling.
Parameters
----------
latent_dist : torch.Tensor
Mean and logvar of the normal distribution. Shape (batch_size, latent_dim, 2)
latent_sample: torch.Tensor
sample from the latent dimension using the reparameterisation trick
shape : (batch_size, latent_dim).
data_size : int
Number of data in the training set
References :
[1] Chen, <NAME>, et al. "Isolating sources of disentanglement in variational
autoencoders." Advances in Neural Information Processing Systems. 2018.
"""
batch_size = latent_dist.size(0)
_logqz = log_density_normal(latent_sample, latent_dist,
batch_size, return_matrix=True)
logiw_matrix = log_importance_weight_matrix(batch_size, data_size).to(latent_dist.device)
logqz = torch.logsumexp(logiw_matrix + _logqz.sum(2), dim=1, keepdim=False)
logqz_prodmarginals = torch.logsumexp(logiw_matrix.view(batch_size, batch_size, 1) +
_logqz, dim=1, keepdim=False).sum(1)
return logqz, logqz_prodmarginals
def _reconstruction_loss(data, recon_data, distribution="bernoulli", storer=None):
"""
Calculates the per image reconstruction loss for a batch of data.
Parameters
----------
data : torch.Tensor
Input data (e.g. batch of images). Shape : (batch_size, n_chan,
height, width).
recon_data : torch.Tensor
Reconstructed data. Shape : (batch_size, n_chan, height, width).
distribution : {"bernoulli", "gaussian", "laplace"}
Distribution of the likelihood on the each pixel. Implicitely defines the
loss Bernoulli corresponds to a binary cross entropy (bse) loss and is the
most commonly used. It has the issue that it doesn't penalize the same
way (0.1,0.2) and (0.4,0.5), which might not be optimal. Gaussian
distribution corresponds to MSE, and is sometimes used, but hard to train
ecause it ends up focusing only a few pixels that are very wrong. Laplace
distribution corresponds to L1 solves partially the issue of MSE.
storer : dict
Dictionary in which to store important variables for vizualisation.
Returns
-------
loss : torch.Tensor
Per image cross entropy (i.e. normalized per batch but not pixel and
channel)
"""
batch_size, n_chan, height, width = recon_data.size()
is_colored = n_chan == 3
if distribution == "bernoulli":
loss = F.binary_cross_entropy(recon_data, data, reduction="sum")
elif distribution == "gaussian":
# loss in [0,255] space but normalized by 255 to not be too big
loss = F.mse_loss(recon_data * 255, data * 255, reduction="sum") / 255
elif distribution == "laplace":
# loss in [0,255] space but normalized by 255 to not be too big but
# multiply by 255 and divide 255, is the same as not doing anything for L1
loss = F.l1_loss(recon_data, data, reduction="sum")
else:
raise ValueError("Unkown distribution: {}".format(distribution))
loss = loss / batch_size
if storer is not None:
storer['recon_loss'].append(loss.item())
return loss
def _kl_normal_loss(mean, logvar, storer=None):
"""
Calculates the KL divergence between a normal distribution
with diagonal covariance and a unit normal distribution.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (batch_size, latent_dim) where
D is dimension of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (batch_size,
latent_dim)
storer : dict
Dictionary in which to store important variables for vizualisation.
"""
latent_dim = mean.size(1)
# batch mean of kl for each latent dimension
latent_kl = 0.5 * (-1 - logvar + mean.pow(2) + logvar.exp()).mean(dim=0)
total_kl = latent_kl.sum()
if storer is not None:
storer['kl_loss'].append(total_kl.item())
for i in range(latent_dim):
storer['kl_loss_' + str(i)].append(latent_kl[i].item())
return total_kl
def _permute_dims(latent_sample):
"""
Implementation of Algorithm 1 in ref [1]. Randomly permutes the sample from
q(z) (latent_dist) across the batch for each of the latent dimensions (mean
and log_var).
Parameters
----------
latent_sample: torch.Tensor
sample from the latent dimension using the reparameterisation trick
shape : (batch_size, latent_dim).
References
----------
[1] <NAME>, and <NAME>. "Disentangling by factorising."
arXiv preprint arXiv:1802.05983 (2018).
"""
perm = torch.zeros_like(latent_sample)
batch_size, dim_z = perm.size()
for z in range(dim_z):
pi = torch.randperm(batch_size).to(latent_sample.device)
perm[:, z] = latent_sample[pi, z]
return perm
def linear_annealing(init, fin, step, annealing_steps):
"""Linear annealing of a parameter."""
if annealing_steps == 0:
return fin
assert fin > init
delta = fin - init
annealed = min(init + delta * step / annealing_steps, fin)
return annealed
| 2.125 | 2 |
monitor/tests/test_views/test_twitter_user_views.py | arineto/twitter_monitor | 1 | 12797580 | from django.test import TestCase
from django.urls import reverse
from model_mommy import mommy
from monitor.models import TwitterUser
from monitor.tests.utils.http_client_mixin import HTTPClientMixin
import mock
class TestTwitterUserView(HTTPClientMixin, TestCase):
def setUp(self):
super(TestTwitterUserView, self).setUp()
self.url = reverse('monitor:users')
self.users = mommy.make('monitor.TwitterUser', _quantity=3)
def test_get(self):
response = self.client.get(self.url)
self.assertEqual(len(response.data), 3)
for count, user in enumerate(self.users):
self.assertEqual(response.data[count].get('id'), user.id)
def test_post(self):
self.assertEqual(TwitterUser.objects.count(), 3)
path = (
'monitor.api.serializers.twitter_user_serializers.'
'retrieve_tweets.delay'
)
with mock.patch(path, mock.Mock()) as retrieve_tweets:
response = self.client.post(self.url, {'username': 'test'})
retrieve_tweets.assert_called()
self.assertEqual(TwitterUser.objects.count(), 4)
self.assertEqual(response.data.get('username'), 'test')
class TestUsernameListView(HTTPClientMixin, TestCase):
def setUp(self):
super(TestUsernameListView, self).setUp()
self.users = mommy.make('monitor.TwitterUser', _quantity=3)
def test_get(self):
url = reverse('monitor:usernames')
response = self.client.get(url)
self.assertEqual(len(response.data), 3)
for count, user in enumerate(self.users):
self.assertEqual(
response.data[count].get('username'), user.username
)
| 2.3125 | 2 |
surf/surf4hourstool.py | izhujiang/igsnrr | 0 | 12797581 | <filename>surf/surf4hourstool.py
# !/usr/bin/python
# -*- coding: utf-8 -*-
# COPYRIGHT 2016 igsnrr
#
# MORE INFO ...
# email:
import os
import shutil
import time
from datetime import date
from datetime import timedelta, datetime
import pandas as pd
from ..base.toolbase import ToolBase
class Surf4HoursTool(ToolBase):
"""The tool is designed to convert surf files orgarnized by month into
files by station IDs and statisics for daily and monthly."""
def __init__(self):
ToolBase.__init__(self, "Surf4HoursTool",
"The Surf4Hours Tool convert surf files organized \
by day into files organized by station. and \
statisics for daily and monthly.")
self._version = "surf4hourstool.py 0.0.1"
def defineArgumentParser(self, parser):
# parser.add_argument("source", action="store",
# help="root dir for source files")
parser.add_argument("target", action="store",
help="root dir for all data")
def run(self, args):
# srcRoot = args.source
targetRoot = args.target
# print(srcRoot, "-->", targetRoot)
bystationDir = os.path.join(targetRoot, "bystation")
# self.batchConvert(srcRoot, bystationDir)
# 08-08, qixiang
subdir = "qx0808"
dailyDir = os.path.join(targetRoot, subdir, "daily0808")
monthlyDir = os.path.join(targetRoot, subdir, "monthly0808")
yearDir = os.path.join(targetRoot, subdir, "year0808")
print("statistics qx0808")
self.statisticsDaily(bystationDir, dailyDir, "0808")
self.statisticsMonthly(dailyDir, monthlyDir)
self.statisticsYears(monthlyDir, yearDir)
# 20-20, qixiang
subdir = "qx2020"
dailyDir = os.path.join(targetRoot, subdir, "daily2020")
monthlyDir = os.path.join(targetRoot, subdir, "monthly2020")
yearDir = os.path.join(targetRoot, subdir, "year2020")
print("statistics qx2020")
self.statisticsDaily(bystationDir, dailyDir, "2020")
self.statisticsMonthly(dailyDir, monthlyDir)
self.statisticsYears(monthlyDir, yearDir)
# 08-08, shuili
subdir = "sl0808"
dailyDir = os.path.join(targetRoot, subdir, "daily0808")
monthlyDir = os.path.join(targetRoot, subdir, "monthly0808")
yearDir = os.path.join(targetRoot, subdir, "year0808")
print("statistics sl0808")
self.statisticsDaily(bystationDir, dailyDir, "0832")
self.statisticsMonthly(dailyDir, monthlyDir)
self.statisticsYears(monthlyDir, yearDir)
# def batchConvert(self, srcPathRoot, targetPathRoot):
# self.clearDirectory(targetPathRoot)
# filelist = sorted(os.listdir(srcPathRoot))
# for item in filelist:
# srcPath = os.path.join(srcPathRoot, item)
# print(srcPath)
# self.convert(srcPath, targetPathRoot)
# self.insertHeader(targetPathRoot)
# def convert(self, srcPath, targetRoot):
# if not os.path.exists(srcPath):
# self._loggej.info("Failed: {0} does't existe".format(srcPath))
# filename = os.path.basename(srcPath)
# year = int(filename[:4])
# mon = int(filename[4:6])
# day = int(filename[6:8])
# recs = []
# with open(srcPath) as f:
# recs = f.readlines()
# recs = recs[1:]
# f.close()
# group = {}
# strfmt = ("{0:>8}{1:>6d}{2:0>2d}{3:0>2d}{4:0>2d}"
# "{1:>6d}{2:>4d}{3:>4d}{4:>4d}"
# "{5:>12.1f}{6:>12.1f}{7:>12.1f}\n")
# for rec in recs:
# items = rec.split(",")
# if items[0] not in group:
# group[items[0]] = []
# if items[7] == "999990":
# items[7] == "0"
# rec = strfmt.format(items[0], int(items[1]), int(items[2]),
# int(items[3]), int(items[4]),
# float(items[5]), float(items[6]),
# float(items[7]))
# group[items[0]].append(rec)
# for k, v in group.items():
# target = os.path.join(targetRoot, k)
# recs_w = [
# strfmt.format(k, year, mon, day, i,
# 999999, 999999, 999999)
# for i in range(24)]
# for line in v:
# items = line.split()
# # try:
# recs_w[int(items[5])] = line
# # except:
# # print("An exception occurred", line, items)
# with open(target, 'a') as fo:
# fo.writelines(recs_w)
# fo.close()
# def insertHeader(self, parentDir):
# header = ("{0:>8}{1:>12}{2:>6}{3:>4}{4:>4}{5:>4}"
# "{6:>12}{7:>12}{8:>12}\n").format(
# "SID", "DATETIME", "YEAR",
# "MON", "DAY", "HR",
# "PRES", "TEMP", "PREC")
# filelist = sorted(os.listdir(parentDir))
# print(filelist)
# for item in filelist:
# with open(os.path.join(parentDir, item), 'r+') as fo:
# recs = fo.readlines()
# sample = recs[0].split()
# sid = sample[0]
# year = int(sample[2])
# mon = int(sample[3])
# day = int(sample[4])
# today = date(year, mon, day)
# nextday = today
# fo.seek(0)
# fo.write(header)
# index = 0
# last_rec = len(recs) - 1
# while index < last_rec:
# if nextday == today:
# fo.writelines(recs[index: index+24])
# index = index + 24
# if index > last_rec:
# break
# sample = recs[index].split()
# sid = sample[0]
# year = int(sample[2])
# mon = int(sample[3])
# day = int(sample[4])
# nextday = date(year, mon, day)
# else:
# strfmt = (
# "{0:>8}{1:>6d}{2:0>2d}{3:0>2d}{4:0>2d}"
# "{1:>6d}{2:>4d}{3:>4d}{4:>4d}"
# "{5:>12.1f}{6:>12.1f}{7:>12.1f}\n")
# year = today.year
# mon = today.month
# day = today.day
# recs_empty = [
# strfmt.format(
# sid, year, mon, day, i,
# 999999, 999999, 999999)
# for i in range(24)]
# fo.writelines(recs_emt
# today = today + timedelta(days=1)
# fo.flush()
# fo.close()
def statisticsDaily(self, srcPathRoot, targetPathRoot, stat_win):
self.clearDirectory(targetPathRoot)
filelist = os.listdir(srcPathRoot)
for item in filelist:
srcPath = os.path.join(srcPathRoot, item)
targetPath = os.path.join(targetPathRoot, item)
self.stasticsDailySingleStatation(item, srcPath,
targetPath, stat_win)
def stasticsDailySingleStatation(self, sid, srcPath, targetPath, stat_win):
print("processing {0}".format(srcPath))
db = pd.read_table(srcPath, skip_blank_lines=True,
delim_whitespace=True, index_col="DATETIME")
result = []
# todo: do config the range of loop
y_series = db["YEAR"]
endDay = date(y_series.max()+1, 1, 1)
curDay = date(y_series.min(), 1, 1)
while(curDay < endDay):
if stat_win == "0808":
recs = self.queryData(db, curDay, 16)
elif stat_win == "2020":
recs = self.queryData(db, curDay, 4)
else:
recs = self.queryData(db, curDay, -8)
# if not recs.empty:
day_rec = self.calcDaily(sid, curDay, recs, stat_win)
if day_rec is not None:
result.append(day_rec)
curDay = curDay + timedelta(days=1)
if stat_win == "0808" or stat_win == "0832":
header = ("{:>8}{:>10}{:>6}{:>4}{:>4}{:>10}{:>10}{:>10}{:>10}"
"{:>10}{:>10}{:>10}{:>4}{:>10}{:>4}{:>10}{:>4}\n") \
.format("SID", "DATE", "YEAR", "MON", "DAY",
"AVG_PRES", "MAX_PRES", "MIN_PRES",
"AVG_TEMP", "MAX_TEMP", "MIN_TEMP",
"PREC24", "CNT", "PREC08_20", "C1", "PREC20_08", "C2")
else:
header = ("{:>8}{:>10}{:>6}{:>4}{:>4}{:>10}{:>10}{:>10}{:>10}"
"{:>10}{:>10}{:>10}{:>4}{:>10}{:>4}{:>10}{:>4}\n") \
.format("SID", "DATE", "YEAR", "MON", "DAY",
"AVG_PRES", "MAX_PRES", "MIN_PRES",
"AVG_TEMP", "MAX_TEMP", "MIN_TEMP",
"PREC24", "CNT", "PREC20_08", "C1", "PREC08_20", "C2")
with open(targetPath, 'w') as fo:
fo.write(header)
fo.writelines(result)
fo.close()
def queryData(self, db, dt, df_hours=4):
whf = datetime(dt.year, dt.month, dt.day, 0, 0, 0) \
- timedelta(hours=df_hours)
wht = whf + timedelta(hours=24)
cond = "{0} < DATETIME <= {1}".format(whf.strftime("%Y%m%d%H"),
wht.strftime("%Y%m%d%H"))
recs = db.query(cond)
return recs
def calcDaily(self, sid, dt, hours24, stat_win):
"""
http://www.szmb.gov.cn/quf/2009/08/2017101815192310488.pdf
"""
if (len(hours24) > 24):
self._logger.error(("{1}-{2:0>2d}-{3:0>2d}, \
Station {0} has more than 24 records on").format(
sid, dt.year, dt.month, dt.day))
else:
# statistics pressure
# valid_pressure = hours24.query("1200 > PRES > 600")
# temporary change
valid_pressure = hours24.query(("1200> PRES > 600 \
& HR in [2, 8, 14, 20]"))
# print(valid_pressure)
if len(valid_pressure) == 24:
# ok for 24 hours
avg_pres = valid_pressure["PRES"].mean()
max_pres = valid_pressure["PRES"].max()
min_pres = valid_pressure["PRES"].min()
else:
valid_pressure = hours24.query(("1200> PRES > 600 \
& HR in [2, 8, 14, 20]"))
if len(valid_pressure) == 4:
avg_pres = valid_pressure["PRES"].mean()
max_pres = valid_pressure["PRES"].max()
min_pres = valid_pressure["PRES"].min()
else:
avg_pres = 999999
max_pres = 999999
min_pres = 999999
# self._logger.error(("{1}-{2:0>2d}-{3:0>2d}, "
# "Station {0} miss pressure at"
# "[02, 08, 14, 20]")
# .format(sid, dt.year,
# dt.month, dt.day))
# statistics temperature
# valid_temperature = hours24.query("60 > TEMP > -60")
valid_temperature = hours24.query("60> TEMP > -60 \
& HR in [2, 8, 14, 20]")
# print(valid_temperature)
if len(valid_temperature) == 24:
# ok for 24 hours
avg_temp = valid_temperature["TEMP"].mean()
max_temp = valid_temperature["TEMP"].max()
min_temp = valid_temperature["TEMP"].min()
else:
valid_temperature = hours24.query("60> TEMP > -60 \
& HR in [2, 8, 14, 20]")
if len(valid_temperature) == 4:
avg_temp = valid_temperature["TEMP"].mean()
max_temp = valid_temperature["TEMP"].max()
min_temp = valid_temperature["TEMP"].min()
else:
avg_temp = 999999
max_temp = 999999
min_temp = 999999
# self._logger.error(("{1}-{2:0>2d}-{3:0>2d}, "
# "Station {0} miss temperature at"
# "[02, 08, 14, 20]")
# .format(sid, dt.year,
# dt.month, dt.day))
# statistics precipation
valid_prec = hours24.query("200 > PREC >= 0")
prec24 = valid_prec["PREC"].sum()
prec24_cnt = len(valid_prec)
if prec24_cnt == 0:
prec24 = 999999
am_prec = valid_prec.query("HR <=8 | HR>20")
pm_prec = valid_prec.query("8 < HR <= 20")
prec12_am = am_prec["PREC"].sum()
prec12_am_cnt = len(am_prec)
if prec12_am_cnt == 0:
prec12_am = 999999
prec12_pm = pm_prec["PREC"].sum()
prec12_pm_cnt = len(pm_prec)
if prec12_pm_cnt == 0:
prec12_pm = 999999
if stat_win == "0808" or stat_win == "0832":
rec = ("{:>8}{:>10}{:>6}{:>4}{:>4}{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>10.1f}{:>4d}{:>10.1f}{:>4d}{:>10.1f}{:>4d}\n") \
.format(sid, dt.strftime("%Y%m%d"),
dt.year, dt.month, dt.day,
avg_pres, max_pres, min_pres,
avg_temp, max_temp, min_temp,
prec24, prec24_cnt, prec12_pm,
prec12_pm_cnt, prec12_am, prec12_am_cnt)
else:
rec = ("{:>8}{:>10}{:>6}{:>4}{:>4}{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>10.1f}{:>4d}{:>10.1f}{:>4d}{:>10.1f}{:>4d}\n") \
.format(sid, dt.strftime("%Y%m%d"),
dt.year, dt.month, dt.day,
avg_pres, max_pres, min_pres,
avg_temp, max_temp, min_temp,
prec24, prec24_cnt, prec12_am, prec12_am_cnt,
prec12_pm, prec12_pm_cnt)
return rec
def statisticsMonthly(self, srcPathRoot, targetPathRoot):
self.clearDirectory(targetPathRoot)
filelist = os.listdir(srcPathRoot)
for item in filelist:
srcPath = os.path.join(srcPathRoot, item)
targetPath = os.path.join(targetPathRoot, item)
self.stasticsMonthSingleStatation(item, srcPath, targetPath)
def stasticsMonthSingleStatation(self, sid, srcPath, targetPath):
db = pd.read_table(srcPath, skip_blank_lines=True,
delim_whitespace=True, index_col="DATE")
result = []
# todo: do config the range of loop
y_series = db["YEAR"]
year_begin = y_series.min()
year_end = y_series.max() + 1
for year in range(year_begin, year_end):
for mon in range(1, 13):
cond = "YEAR == {0} & MON == {1}".format(year, mon)
recs = db.query(cond)
if not recs.empty:
mon_rec = self.calcMonthly(sid, year, mon, recs)
result.append(mon_rec)
header = ("{:>8}{:>6}{:>4}{:>10}{:>10}{:>10}{:>10}"
"{:>10}{:>10}{:>10}{:>4}{:>12}{:>6}\n").format(
"SID", "YEAR", "MON",
"AVG_PRES", "MAX_PRES", "MIN_PRES",
"AVG_TEMP", "MAX_TEMP", "MIN_TEMP",
"PREC_MON", "CNT", "PREC24_MON", "CNT24")
with open(targetPath, 'w') as fo:
fo.write(header)
fo.writelines(result)
fo.close()
def calcMonthly(self, sid, year, mon, recs):
if len(recs) > 0:
# statistics pressure
valid_pressure = recs.query("1200 > AVG_PRES > 800")
# print(valid_pressure)
if len(valid_pressure) >= 24:
avg_pres = valid_pressure["AVG_PRES"].mean()
max_pres = valid_pressure["MAX_PRES"].max()
min_pres = valid_pressure["MIN_PRES"].min()
else:
avg_pres = 999999
max_pres = 999999
min_pres = 999999
# self._logger.error(("{1}-{2:0>2d}, "
# "Station {0} miss pressure.")
# .format(sid, year, mon))
# statistics temperature
valid_temperature = recs.query("60 > AVG_TEMP > -60")
# print(valid_temperature)
if len(valid_temperature) >= 24:
avg_temp = valid_temperature["AVG_TEMP"].mean()
max_temp = valid_temperature["MAX_TEMP"].max()
min_temp = valid_temperature["MIN_TEMP"].min()
else:
avg_temp = 999999
max_temp = 999999
min_temp = 999999
# self._logger.error(("{1}-{2:0>2d}, "
# "Station {0} miss temperature")
# .format(sid, year, mon,))
# statistics precipation
valid_prec = recs.query("500 > PREC24 >= 0")
prec_mon = valid_prec["PREC24"].sum()
prec_cnt = len(valid_prec)
if prec_cnt == 0:
prec_mon = 999999
valid_prec = recs.query("500 > PREC24 >= 0 & CNT == 24")
prec24_mon = valid_prec["PREC24"].sum()
prec24_cnt = len(valid_prec)
if prec24_cnt == 0:
prec24_mon = 999999
rec = ("{:>8}{:>6}{:>4}{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>10.1f}{:>10.1f}{:>10.1f}{:>10.1f}{:>4d}"
"{:>12.1f}{:>6d}\n") \
.format(sid, year, mon, avg_pres, max_pres, min_pres,
avg_temp, max_temp, min_temp, prec_mon, prec_cnt,
prec24_mon, prec24_cnt)
return rec
def statisticsYears(self, srcPathRoot, targetPathRoot):
self.clearDirectory(targetPathRoot)
filelist = os.listdir(srcPathRoot)
for item in filelist:
srcPath = os.path.join(srcPathRoot, item)
targetPath = os.path.join(targetPathRoot, item)
self.stasticsYearSingleStatation(item, srcPath, targetPath)
def stasticsYearSingleStatation(self, sid, srcPath, targetPath):
db = pd.read_table(srcPath, skip_blank_lines=True,
delim_whitespace=True)
result = []
# todo: do config the range of loop
y_series = db["YEAR"]
year_begin = y_series.min()
year_end = y_series.max() + 1
# year_begin = 2015
# year_end = 2016
for year in range(year_begin, year_end):
cond = "YEAR == {0}".format(year)
recs = db.query(cond)
if not recs.empty:
mon_rec = self.calcYear(sid, year, recs)
result.append(mon_rec)
header = ("{:>8}{:>6}{:>10}{:>10}{:>10}{:>10}"
"{:>10}{:>10}{:>10}{:>4}{:>10}{:>6}\n").format(
"SID", "YEAR",
"AVG_PRES", "MAX_PRES", "MIN_PRES",
"AVG_TEMP", "MAX_TEMP", "MIN_TEMP",
"PREC_Y", "CNT", "PREC24_Y", "CNT24")
with open(targetPath, 'w') as fo:
fo.write(header)
fo.writelines(result)
fo.close()
def calcYear(self, sid, year, recs):
if len(recs) > 0:
# statistics pressure
valid_pressure = recs.query("1200 > AVG_PRES > 800")
if len(valid_pressure) >= 10:
avg_pres = valid_pressure["AVG_PRES"].mean()
max_pres = valid_pressure["MAX_PRES"].max()
min_pres = valid_pressure["MIN_PRES"].min()
else:
avg_pres = 999999
max_pres = 999999
min_pres = 999999
# self._logger.error(("{1}, Station {0} miss pressure.")
# .format(sid, year))
# statistics temperature
valid_temperature = recs.query("60 > AVG_TEMP > -60")
if len(valid_temperature) >= 10:
avg_temp = valid_temperature["AVG_TEMP"].mean()
max_temp = valid_temperature["MAX_TEMP"].max()
min_temp = valid_temperature["MIN_TEMP"].min()
else:
avg_temp = 999999
max_temp = 999999
min_temp = 999999
# self._logger.error(("{1}, Station {0} miss temperature")
# .format(sid, year))
# statistics precipation
valid_prec = recs.query("5000 > PREC_MON >= 0")
prec_year = valid_prec["PREC_MON"].sum()
prec_cnt = len(valid_prec)
if prec_cnt == 0:
prec_year = 999999
valid_prec = recs.query("5000 > PREC24_MON >= 0")
prec24_year = valid_prec["PREC24_MON"].sum()
prec24_cnt = len(valid_prec)
if prec24_year == 0:
prec24_year = 999999
rec = ("{:>8}{:>6}{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>10.1f}{:>10.1f}{:>10.1f}{:>10.1f}"
"{:>4d}{:>10.1f}{:>6d}\n") \
.format(sid, year, avg_pres, max_pres, min_pres,
avg_temp, max_temp, min_temp, prec_year, prec_cnt,
prec24_year, prec24_cnt)
return rec
def clearDirectory(self, targetRoot):
if os.path.exists(targetRoot) and len(os.listdir(targetRoot)) > 0:
print("\nThe dir of {0} is not empty and will been overrided."
.format(targetRoot))
shutil.rmtree(targetRoot, True)
time.sleep(1)
if not os.path.exists(targetRoot):
os.makedirs(targetRoot)
if __name__ == "__main__":
# testing code
# import sys
# print(sys.argv)
tool = Surf4HoursTool()
import argparse
from ..base.logger import Logger
parser = argparse.ArgumentParser(prog="python -m surf4hourstool",
description="Surf4HoursTool Usage Guide",
prefix_chars="-+")
parser.add_argument("--version",
action="version", version="%(prog)s 0.0.1")
tool.defineArgumentParser(parser)
args = parser.parse_args()
print(args)
logger = Logger("./log/d2s.log")
tool.attachLogger(logger)
targetRoot = args.target
tool.run(args)
else:
print("loading day2stationtool module")
| 2.765625 | 3 |
docs/code/surrogates/pce/plot_pce_sphere.py | SURGroup/UncertaintyQuantification | 0 | 12797582 | """
Sinusoidal Function Sphere function (2 random inputs, scalar output)
======================================================================
In this example, PCE is used to generate a surrogate model for a given set of 2D data.
.. math:: f(x) = x_1^2 + x_2^2
**Description:** Dimensions: 2
**Input Domain:** This function is evaluated on the hypercube :math:`x_i \in [-5.12, 5.12]` for all :math:`i = 1,2`.
**Global minimum:** :math:`f(x^*)=0,` at :math:`x^* = (0,0)`.
**Reference:** <NAME>., & <NAME>. (1978). The global optimization problem: an introduction. Towards global optimization, 2, 1-15.
"""
# %% md
#
# Import necessary libraries.
# %%
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from UQpy.surrogates import *
from UQpy.distributions import Uniform, JointIndependent
# %% md
#
# Define the function.
# %%
def function(x,y):
return x**2 + y**2
# %% md
#
# Create a distribution object, generate samples and evaluate the function at the samples.
# %%
np.random.seed(1)
dist_1 = Uniform(loc=-5.12, scale=10.24)
dist_2 = Uniform(loc=-5.12, scale=10.24)
marg = [dist_1, dist_2]
joint = JointIndependent(marginals=marg)
n_samples = 100
x = joint.rvs(n_samples)
y = function(x[:,0], x[:,1])
# %% md
#
# Visualize the 2D function.
# %%
xmin, xmax = -6,6
ymin, ymax = -6,6
X1 = np.linspace(xmin, xmax, 50)
X2 = np.linspace(ymin, ymax, 50)
X1_, X2_ = np.meshgrid(X1, X2) # grid of points
f = function(X1_, X2_)
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X1_, X2_, f, rstride=1, cstride=1, cmap='gnuplot2', linewidth=0, antialiased=False)
ax.set_title('True function')
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20, 140)
fig.colorbar(surf, shrink=0.5, aspect=7)
plt.show()
# %% md
#
# Visualize training data.
# %%
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
ax.scatter(x[:,0], x[:,1], y, s=20, c='r')
ax.set_title('Training data')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20,140)
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
plt.show()
# %% md
#
# Create an object from the PCE class. Compute PCE coefficients using least squares regression.
# %%
max_degree = 3
polynomial_basis = TotalDegreeBasis(joint, max_degree)
least_squares = LeastSquareRegression()
pce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares)
pce.fit(x,y)
# %% md
#
# Compute PCE coefficients using LASSO.
# %%
polynomial_basis = TotalDegreeBasis(joint, max_degree)
lasso = LassoRegression()
pce2 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=lasso)
pce2.fit(x,y)
# %% md
#
# Compute PCE coefficients with Ridge regression.
# %%
polynomial_basis = TotalDegreeBasis(joint, max_degree)
ridge = RidgeRegression()
pce3 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=ridge)
pce3.fit(x,y)
# %% md
#
# PCE surrogate is used to predict the behavior of the function at new samples.
# %%
n_test_samples = 10000
x_test = joint.rvs(n_test_samples)
y_test = pce.predict(x_test)
# %% md
#
# Plot PCE prediction.
# %%
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
ax.scatter(x_test[:,0], x_test[:,1], y_test, s=1)
ax.set_title('PCE predictor')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20,140)
ax.set_xlim(-6,6)
ax.set_ylim(-6,6)
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
plt.show()
# %% md
# Error Estimation
# -----------------
# Construct a validation dataset and get the validation error.
# %%
# validation sample
n_samples = 150
x_val = joint.rvs(n_samples)
y_val = function(x_val[:,0], x_val[:,1])
# PCE predictions
y_pce = pce.predict(x_val).flatten()
y_pce2 = pce2.predict(x_val).flatten()
y_pce3 = pce3.predict(x_val).flatten()
# mean relative validation errors
error = np.sum(np.abs((y_val - y_pce)/y_val))/n_samples
error2 = np.sum(np.abs((y_val - y_pce2)/y_val))/n_samples
error3 = np.sum(np.abs((y_val - y_pce3)/y_val))/n_samples
print('Mean rel. error, LSTSQ:', error)
print('Mean rel. error, LASSO:', error2)
print('Mean rel. error, Ridge:', error3)
# %% md
# Moment Estimation
# -----------------
# Returns mean and variance of the PCE surrogate.
# %%
n_mc = 1000000
x_mc = joint.rvs(n_mc)
y_mc = function(x_mc[:,0], x_mc[:,1])
mean_mc = np.mean(y_mc)
var_mc = np.var(y_mc)
print('Moments from least squares regression :', pce.get_moments())
print('Moments from LASSO regression :', pce2.get_moments())
print('Moments from Ridge regression :', pce3.get_moments())
print('Moments from Monte Carlo integration: ', mean_mc, var_mc) | 3.453125 | 3 |
Greedy/2847.py | esdx245/algorithms | 0 | 12797583 | n = int(input())
lista= []
for i in range(n):
lista.append(int(input()))
tempmax = lista[-1]
count = 0
for i in range(n-2,-1,-1):
if lista[i] >= tempmax:
temp = lista[i] -tempmax +1
count += temp
lista[i] -= temp
tempmax = lista[i]
print(count) | 3.046875 | 3 |
linear/sym1.py | shirai708/qiita | 1 | 12797584 | <gh_stars>1-10
import matplotlib.pyplot as plt
vq = []
vp = []
h = 0.05
q = 1.0
p = 0.0
for i in range(1000):
p = p - h * q
q = q + h * p
vp.append(p)
vq.append(q)
plt.plot(vq, vp)
plt.savefig("sym1.png")
| 2.71875 | 3 |
python/tvm/topi/cuda/stft.py | shengxinhu/tvm | 4,640 | 12797585 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument
"""STFT operator"""
from math import pi
import tvm
from tvm import te, tir
from ..utils import ceil_div
def _get_max_threads(batch_row):
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
return tir.min(batch_row, max_threads)
def stft(
data,
n_fft,
hop_length,
win_length,
window,
normalized,
onesided,
output_shape,
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int
The distance between neighboring sliding window frames
win_length : int
The size of window frame and STFT filter
window : relay.Expr
A 1-D tensor window frame
normalized : bool
Whether to return the normalized STFT results
onesided : bool
Whether to return onesided result or fill with conjugate symmetry
Returns
-------
output : relay.Expr
Tensor containing the STFT result
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
def gen_ir(
data_ptr,
n_fft,
hop_length,
win_length,
window_ptr,
normalized,
onesided,
output_ptr,
):
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
window = ib.buffer_ptr(window_ptr)
output = ib.buffer_ptr(output_ptr)
max_threads = _get_max_threads(output_ptr.shape[0] * output_ptr.shape[1])
output_size = output_ptr.shape[0] * output_ptr.shape[1] * output_ptr.shape[2]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(output_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < output_size):
matrix_size = output_ptr.shape[1] * output_ptr.shape[2]
batch = tir.floordiv(tid, matrix_size)
row = tir.floordiv(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
col = tir.indexmod(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
output[batch, row, col, 0] = tir.Cast(data_ptr.dtype, 0)
output[batch, row, col, 1] = tir.Cast(data_ptr.dtype, 0)
with ib.for_range(0, win_length) as wlen:
output[batch, row, col, 0] += (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.cos(2 * pi * row * wlen / win_length)
)
output[batch, row, col, 1] -= (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.sin(2 * pi * row * wlen / win_length)
)
with ib.if_scope(normalized):
output[batch, row, col, 0] /= tir.sqrt(tir.const(n_fft, "float32"))
output[batch, row, col, 1] /= tir.sqrt(tir.const(n_fft, "float32"))
return ib.get()
output_buf = tir.decl_buffer(output_shape, data.dtype, "output_buf")
return te.extern(
output_shape,
[data, window],
lambda ins, outs: gen_ir(
ins[0], n_fft, hop_length, win_length, ins[1], normalized, onesided, outs[0]
),
dtype=[data.dtype],
out_buffers=[output_buf],
name="stft_cuda",
tag="stft_cuda",
)
| 1.84375 | 2 |
src/Gon/stock_info_spyder.py | somewheve/Listed-company-news-crawl-and-text-analysis | 1 | 12797586 | <filename>src/Gon/stock_info_spyder.py
"""
https://waditu.com/document/2
"""
import __init__
from spyder import Spyder
from Kite import config
import tushare as ts
ts.set_token(config.TUSHARE_TOKEN)
import akshare as ak
class StockInfoSpyder(Spyder):
def __init__(self):
super(StockInfoSpyder, self).__init__()
self.db = self.db_obj.create_db(config.TUSHARE_DATABASE_NAME)
self.col_basic_info = self.db_obj.create_col(self.db, config.COLLECTION_NAME_STOCK_BASIC_INFO)
def get_stock_code_info(self):
stock_info_a_code_name_df = ak.stock_info_a_code_name()
for _id in range(stock_info_a_code_name_df.shape[0]):
_dict = stock_info_a_code_name_df.iloc[_id].to_dict()
self.col.insert_one(_dict)
def get_historical_news(self):
pass
if __name__ == "__main__":
| 2.515625 | 3 |
on/ottawa/ed/definition.py | ajah/represent-canada-data | 0 | 12797587 | from datetime import date
import boundaries
boundaries.register('Ottawa wards',
domain='Ottawa, ON',
last_updated=date(2010, 8, 27),
name_func=boundaries.dashed_attr('WARD_EN'),
id_func=boundaries.attr('WARD_NUM'),
authority='City of Ottawa',
source_url='http://ottawa.ca/online_services/opendata/info/wards2010_en.html',
licence_url='http://ottawa.ca/online_services/opendata/terms_en.html',
data_url='http://ottawa.ca/online_services/opendata/data/wards2010.zip',
notes='Convert the features to 2D with: ogr2ogr -f "ESRI Shapefile" -overwrite . Wards_2010.shp -nlt POLYGON',
encoding='iso-8859-1',
)
| 2.015625 | 2 |
title_cleaner_test.py | susannahsoon/oldperth | 302 | 12797588 | from nose.tools import *
import title_cleaner
TRUTH = [
(True, 'Manhattan: 1st Ave. - 34th St. E.'),
(True, 'Queens: Hoyt Avenue - 24th Street'),
(False, "Queens: Flushing Meadow Park - New York World's Fair of 1939-40 - [Industrial exhibits.]"),
(False, 'Fifth Avenue - 90th Street, southeast corner'),
(False, 'Recreation and hobbies - Miscellaneous - Children.'),
(True, 'Manhattan: 59th Street - 6th Avenue'),
(True, 'Queens: Queens Boulevard - Junction Boulevard'),
(True, 'Manhattan: 50th Street (West) - 5th Avenue'),
(True, 'Manhattan: 5th Avenue - 78th Street'),
(True, 'Manhattan: 5th Avenue - 33rd Street'),
(True, 'Queens: Queens Boulevard - 62nd Avenue'),
(False, 'Manhattan: Battery Park.'),
(False, 'Manhattan: Central Park - The Sailboat Pool'),
(True, 'Queens: Colonial Avenue - 62nd Drive'),
(True, 'Queens: Woodhaven Blvd - Fleet Street'),
(True, 'Richmond: New Dorp Lane - Cedar Grove Avenue')
]
def test_clean_title():
for correct, title in TRUTH:
assert correct == title_cleaner.is_pure_location(title), '%s %s' % (correct, title)
| 2.515625 | 3 |
src/AoC_2015/d16_finding_Sue_with_list_comprehension/finding_sue.py | derailed-dash/Advent-of-Code | 9 | 12797589 | <reponame>derailed-dash/Advent-of-Code<filename>src/AoC_2015/d16_finding_Sue_with_list_comprehension/finding_sue.py
"""
Author: Darren
Date: 26/02/2021
Solving https://adventofcode.com/2015/day/16
500 Sues. Each with different known attributes, and potentially other forgetten attributes.
Examine list of k:v pairs determined from item received from Sue,
using the My First Crime Scene Analysis Machine (MFCSAM).
The MFCSAM produces properties, which we store as a dict.
We also have a list of k:v attributes that we can remember from 500 Sues.
But where we don't know a value, the key is absent.
Solution:
Part 1:
Iterate through our k:V from the MFCSAM.
For each Sue:
If the k is not present, this Sue is a candidate.
If k is present and the value matches, this Sue is a candidate.
Part 2:
Cats and trees readings indicates that there are greater than that many
Pomeranians and goldfish readings indicate that there are fewer than that many
"""
import os
import time
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
CATS = 'cats'
TREES = 'trees'
POMS = 'pomeranians'
FISH = 'goldfish'
known_attribs = {
'children': 3,
CATS: 7,
'samoyeds': 2,
POMS: 3,
'akitas': 0,
'vizslas': 0,
FISH: 5,
TREES: 3,
'cars': 2,
'perfumes': 1
}
def main():
# input_file = os.path.join(SCRIPT_DIR, SAMPLE_INPUT_FILE)
input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)
with open(input_file, mode="rt") as f:
data = f.read().splitlines()
sue_list = process_input(data)
# Part 1
sue_candidates = sue_list.copy()
# we need to find any Sue where k:v is an exact match
# but also consider any Sue where the k is not present as we don't know the v
for known_attrib, known_attrib_value in known_attribs.items():
sues_missing_attrib = [sue for sue in sue_candidates if known_attrib not in sue[1]]
sues_matching_attrib = [sue for sue in sue_candidates if known_attrib in sue[1]
and known_attrib_value == sue[1][known_attrib]]
sue_candidates = sues_matching_attrib + sues_missing_attrib
result = [sue[0] for sue in sue_candidates]
print(f"Part 1: Aunt Sue candidates matching MFCSAM attributes: {result}")
# Part 2
sue_candidates = sue_list.copy()
for known_attrib, known_attrib_value in known_attribs.items():
sues_missing_attrib = [sue for sue in sue_candidates if known_attrib not in sue[1]]
sues_matching_attrib = []
if known_attrib in [CATS, TREES]:
sues_matching_attrib = [sue for sue in sue_candidates if known_attrib in sue[1]
and known_attrib_value < sue[1][known_attrib]]
elif known_attrib in [POMS, FISH]:
sues_matching_attrib = [sue for sue in sue_candidates if known_attrib in sue[1]
and known_attrib_value > sue[1][known_attrib]]
else:
sues_matching_attrib = [sue for sue in sue_candidates if known_attrib in sue[1]
and known_attrib_value == sue[1][known_attrib]]
sue_candidates = sues_matching_attrib + sues_missing_attrib
result = [sue[0] for sue in sue_candidates]
print(f"Part 2: Aunt Sue candidates matching MFCSAM attributes: {result}")
def process_input(data):
# Input looks like:
# Sue 1: cars: 9, akitas: 3, goldfish: 0
# Return list. Each item is [i, {k:v, k:v...}]
sue_list = []
line: str
for line in data:
name, attribs = line[4:].split(":", 1)
properties = [x.strip().split(":") for x in attribs.split(",")]
props_dict = {prop[0]: int(prop[1]) for prop in properties}
sue_list.append([int(name), props_dict])
return sue_list
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| 2.890625 | 3 |
Tutorial code/Week 8_Functions-UserDefined.py | Yixin1103/inf1340-programmingfordatascience-fa21 | 8 | 12797590 | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[2]:
def square_root( sq_rand: float ) -> float:
ret_val = sq_rand
diff = sq_rand - ret_val*ret_val
while abs(diff) > 0.000001:
diff = sq_rand - ret_val*ret_val
ret_val = ret_val + (diff / 2.0)
return ret_val
# In[3]:
square_root(144)
# In[4]:
def display_welcome():
"""
Returns string 'Welcome to MinMax'
>>> display_welcome()
'Welcome to MinMax!'
"""
print("Welcome to MinMax!")
#This function is to set values for Universal Price Codes (UPC) per product to scan
def get_barcode(item_scanned):
"""
(str) -> str
Returns the type of item that has been scanned based on the UPC of the item that has been scanned i.e. the "item_scanned" parameter
>>>get_barcode('111111')
'SINGLES'
>>>get_barcode('666666')
'SMALL'
>>>get_barcode('242424')
'LARGE'
"""
if item_scanned == '111111':
return 'SINGLES'
elif item_scanned == '666666':
return 'SMALL'
elif item_scanned == '242424':
return 'LARGE'
elif item_scanned == '0':
return 'done'
else: print ("Oops! You entered an unrecognized Universal Price Code (UPC). Please enter an appropriate UPC for your item")
#This function calculates the subtotal as a running total, which updates each time an item is scanned
def calculate_subtotal(product_scanned):
"""
(str) -> int
Returns the subtotal of the customer's purchase before tax by using the price of the item that has been scanned
(i.e. the parameter "product_scanned")which is determined by using the UPC_SINGLE, UPC_SMALL and UPC_LARGE variables
>>> calculate_subtotal('111111')
1
>>> calculate_subtotal('666666')
5
>>> calculate_subtotal('242424')
19
"""
subtotal_before_tax = 0
PRICE_SINGLE = 1
PRICE_SMALL = 5
PRICE_LARGE = 19
UPC_SINGLE = '111111'
UPC_SMALL = '666666'
UPC_LARGE = '242424'
if product_scanned == UPC_SINGLE:
subtotal_before_tax +=PRICE_SINGLE
elif product_scanned == UPC_SMALL:
subtotal_before_tax +=PRICE_SMALL
elif product_scanned == UPC_LARGE:
subtotal_before_tax += PRICE_LARGE
return subtotal_before_tax
#This function gets how much the customer gives i.e. input("enter your stuff")
def get_amount_tendered():
"""
Returns either the end of this program or the value of the amount tendered by the customer by using an input prompt.
If the cashier hits '0', the program is ended due to cancellation. If the customer provides any other value, this is
captured as the amount tendered by the customer i.e. the 'amount_tendered' variable
>>> get_amount_tendered(30)
30
>>> get_amount_tendered(40)
40
>>>get_amount_tendered(50)
50
>>>get_amount_tendered(0)
Thanks for shopping at MinMax! You have cancelled your order. If you'd like to try again, please repeat the process and scan your items again.
"""
amount_tendered = input("Using the total displayed, please pay the complete amount owed via cash only. If you'd like to cancel this purchase, just hit 0 again.")
if amount_tendered == 0:
return "end"
else:
return amount_tendered
#This function displays the change given to the customer
def display_change(total_bill,amount_tendered):
"""
(float,float) -> float
Returns the difference as the variable "difference" in value between total_bill and amount_tendered, thus indicating
how much change is owed to the customer, or still owed to the MinMax store. The variable "difference" is formatted
to return as a float with two decimal points, including zeroes (i.e. 10.50 instead of 10.5).
"difference" is then rounded to the nearest 5 cents using the following nickel rounding scheme standard rules in Canada:
0.01 to 0.02 will round down to 0.00. 0. 03 to 0.04 will round up to 0.05. 0.06 to 0.07 will round down to 0.05.
0.08 to 0.09 will round up to 0.10
>>> display_change(10.0,7.97)
2.05
>>> display_change(10.5,2.0)
8.50
>>> display_change(10.7,1.4)
9.30
"""
difference = abs(total_bill-amount_tendered)
return (format(difference, '.2f'))
#This function calculates the total cost as a running total
def calculate_total_bill(subtotal):
"""
(float) -> float
subtotal is passed through as an input
HST_RATE variable in this function is multiplied by inputted variable
Function returns the resulting variable "total", rounded and formatted to 2 decimal points.
Variable "total" is then rounded to the nearest 5 cents using the following nickel rounding scheme standard rules in Canada:
0.01 to 0.02 will round down to 0.00. 0. 03 to 0.04 will round up to 0.05. 0.06 to 0.07 will round down to 0.05.
0.08 to 0.09 will round up to 0.10
>>> calculate_total_bill(3.0)
3.40
>>> calculate_total_bill(6.67)
7.55
>>> calculate_total_bill(2.05)
2.30
"""
HST_RATE = 1.13
total_bill = subtotal *HST_RATE
return format(round(0.05 * round(float(total_bill)/0.05), 2), '.2f')
#This function displays the final total bill
def display_totalbill():
# Returns a series of print functions for the total bill which includes: subtotal before tax, HST added to the bill, Total price before rounding to the nearest nickel, total price after rounding, payment from the customer , any change owed to the customer and a farewell greeting
#All values returned are displayed with two decimal points in the format of $0.00
print("\nHere is your bill! \nSubtotal: $", format(subtotal_before_tax, '.2f'))
print("HST: $", format(0.13 * subtotal_before_tax, '.2f'))
print("Total price before rounding: $", format(subtotal_before_tax * 1.13, '.2f'))
print("Total price after rounding: $", format(total_bill, '.2f'))
print("Payment: $", format(amount_tendered, '.2f'))
print("----------------\nChange: $", display_change(amount_tendered, total_bill))
print("\nThank you for shopping with MinMax!")
#The main function starts here
if __name__ == "__main__":
#Sets the values of subtotal_before_tax and item to be used in the upcoming loops
subtotal_before_tax = 0
amount_tendered = 0
item = True
#This displays the welcome sign to the MinMax Store
display_welcome()
#This while loop represents the scanning input, the cashier will continue to scan items until he is done (i.e. hits 0)
while get_barcode(item)!= 'done':
item = input("Scan your items that you would like to purchase here, hit 0 when you're ready to finish up! ")
subtotal_before_tax += calculate_subtotal(item)
# As the loop continues, the customer's subtotal so far will continue to accumulate and show on the screen for them to view
print("Your subtotal so far is: $", format(subtotal_before_tax,'.2f'))
#Once the loop is over and 0 has been pressed (because 0 means done), the total price ('total_bill') after HST is shown to the customer
#'total_bill' is rounded to the nearest 5 cents using the nickel rounding scheme mentioned already
total_bill = float(calculate_total_bill(subtotal_before_tax))
print("\nAfter taxes, you owe: $",format(round(0.05 * round((total_bill) / 0.05), 2), '.2f'))
#Sets the value for the amount of change either owed by the customer or given to the customer
amount_of_change = round(0.05*round(float(amount_tendered - total_bill)/0.05),2)
#This while loop represents the payment for the customer, it repeats until the full amount of the bill is paid then thanks the customer and provides a final receipt
#All values returned are displayed with two decimal points in the format of $0.00
while amount_of_change < 0:
amount_tendered = float(get_amount_tendered())
#If customer enters 0, the the order is cancelled. The customer can repeat the process again by re-running the program.
if amount_tendered == 0:
sys.exit("Thanks for shopping at MinMax! You have cancelled your order. If you'd like to try again, please repeat the process and scan your items again.")
amount_of_change = round(0.05*round(float(amount_tendered - total_bill)/0.05),2)
#If the amount tendered by the customer is less than the cost of the total bill, the customer is prompted to try again to pay full amount
if amount_of_change < 0:
print("Sorry about that! You are short by: $",format(abs(amount_of_change),'.2f'),"Please try again and enter the full amount of $",total_bill)
#If the customer pays the full amount owed on the total bill, the receipt of the total bill is displayed and the program ends
elif amount_of_change == 0:
print ("You've entered the full amount owed!")
display_totalbill()
#If the customer pays more than the full amount owed on the total bill, the receipt of the total bil is displayed, change is given to the customer and the program ends.
elif amount_of_change > 0:
print ("\nHere is your change!: $",display_change(amount_tendered,total_bill))
display_totalbill()
# In[ ]:
| 4.09375 | 4 |
PocketAlgorithm_v1.py | appielife/Machine-Learning--Perceptron-Pocket-Linear-and-Logical-Regresstion-Algorithm | 0 | 12797591 | <reponame>appielife/Machine-Learning--Perceptron-Pocket-Linear-and-Logical-Regresstion-Algorithm<filename>PocketAlgorithm_v1.py
'''
Author:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
class PocketAlgorithm:
def __init__(self, datapoints, no_of_inputs, iteration=7000, learning_rate=0.0001):
self.iteration = iteration
self.learning_rate = learning_rate
self.weights = np.random.normal(0, 0.1, no_of_inputs + 1)
self.datapoints = datapoints
self.plotResult = list()
self.bestResult = float("inf")
self.bestWeights = np.array
def predict(self, inputs):
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else:
activation = -1
return activation
def train(self):
training_inputs, labels = self.datapoints[:,:-2], self.datapoints[:,-1:]
misclassified = 1
iteration = 0
while iteration < self.iteration:
misclassified = 0
iteration += 1
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
error_rate = 1 if label == 1 else -1
if (label == 1 and prediction == -1) or (label == -1 and prediction == 1):
misclassified += 1
self.weights[1:] += self.learning_rate * error_rate * inputs
self.weights[0] += self.learning_rate * error_rate
self.plotResult.append(misclassified)
if misclassified < self.bestResult:
self.bestResult = misclassified
self.bestWeights = copy.deepcopy(self.weights)
if iteration % 500 == 0:
print("Iteration {}, misclassified points {}, Evaluation {}%".format(iteration, misclassified, self.evaluate()))
print("")
print("======== Result ========= ")
print("Iteration {}, misclassified points {}".format(iteration, misclassified))
print("Evaluation {}%".format(self.evaluate()))
def evaluate(self):
correct = 0
training_inputs, labels = self.datapoints[:,:-2], self.datapoints[:,-1:]
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
if (label == 1 and prediction == 1) or (label == -1 and prediction == -1):
correct += 1
_acc = correct / float(len(training_inputs)) * 100.0
return _acc
def plot(self):
total_data = len(self.datapoints)
with np.printoptions(precision=7, suppress=True):
print("Minimum Misclassified Points/Best Result: ", self.bestResult)
print("Weight After Final iteration: ", self.weights.transpose())
print("Best Weights of Pocket: ", self.bestWeights.transpose())
print("Best Accuracy of Pocket: {}%".format(((total_data - self.bestResult) / float(total_data)) * 100))
plt.plot(np.arange(0, self.iteration), self.plotResult)
plt.xlabel("Iterations")
plt.ylabel("Misclassified Points")
plt.axis([0, self.iteration, 800, 1200])
plt.show()
def getInputData(filename):
data = np.genfromtxt(filename, delimiter=',')
return data
if __name__ == '__main__':
data_points = np.array(getInputData('classification.txt'))
no_of_inputs = 3
pck = PocketAlgorithm(data_points, no_of_inputs)
pck.train()
pck.evaluate()
pck.plot()
| 3.109375 | 3 |
cogs/events.py | est73/cog-example | 0 | 12797592 | from discord.ext import commands
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print('Logged in as')
print(self.bot.user.name)
print(self.bot.user.id)
print('------')
def setup(bot):
bot.add_cog(Events(bot))
| 2.59375 | 3 |
itdagene/app/company/migrations/0017_auto_20160315_1953.py | itdagene-ntnu/itdagene | 9 | 12797593 | <reponame>itdagene-ntnu/itdagene
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("company", "0016_auto_20160315_1950")]
operations = [
migrations.AlterField(
model_name="company",
name="payment_email",
field=models.EmailField(
max_length=75, verbose_name="payment email", blank=True
),
preserve_default=True,
)
]
| 1.3125 | 1 |
rfpy/scripts/rfpy_hk.py | wsja/RfPy | 21 | 12797594 | <reponame>wsja/RfPy<filename>rfpy/scripts/rfpy_hk.py
#!/usr/bin/env python
# Copyright 2019 <NAME>
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import numpy as np
import pickle
import stdb
from obspy.clients.fdsn import Client
from obspy.core import Stream, UTCDateTime
from rfpy import binning, plotting, HkStack
from pathlib import Path
from argparse import ArgumentParser
from os.path import exists as exist
from numpy import nan
def get_hk_arguments(argv=None):
"""
Get Options from :class:`~optparse.OptionParser` objects.
This function is used for data processing on-the-fly (requires web connection)
"""
parser = ArgumentParser(
usage="%(prog)s [arguments] <station database>",
description="Script used to process receiver function data " +
"for H-k stacking.")
# General Settings
parser.add_argument(
"indb",
help="Station Database to process from.",
type=str)
parser.add_argument(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station keys for " +
"which to perform the analysis. These must be " +
"contained within the station database. Partial keys will " +
"be used to match against those in the dictionary. For " +
"instance, providing IU will match with all stations in " +
"the IU network [Default processes all stations in the database]")
parser.add_argument(
"-v", "-V", "--verbose",
action="store_true",
dest="verb",
default=False,
help="Specify to increase verbosity.")
parser.add_argument(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing data. " +
"[Default False]")
parser.add_argument(
"-L", "--long-name",
action="store_true",
dest="lkey",
default=False,
help="Force folder names to use long-key form (NET.STN.CHN). " +
"Default behaviour uses short key form (NET.STN) for the folder " +
"names, regardless of the key type of the database."
)
# Event Selection Criteria
TimeGroup = parser.add_argument_group(
title="Time Settings",
description="Settings associated with refining " +
"the times to include in searching for receiver function data")
TimeGroup.add_argument(
"--start",
action="store",
type=str,
dest="startT",
default="",
help="Specify a UTCDateTime compatible string representing " +
"the start time for the search. This will override any " +
"station start times. [Default start date of station]")
TimeGroup.add_argument(
"--end",
action="store",
type=str,
dest="endT",
default="",
help="Specify a UTCDateTime compatible string representing " +
"the end time for the search. This will override any " +
"station end times [Default end date of station]")
PreGroup = parser.add_argument_group(
title='Pre-processing Settings',
description="Options for pre-processing of receiver function " +
"data prior to H-k stacking")
PreGroup.add_argument(
"--binlim",
action="store",
type=float,
dest="binlim",
default=1,
help="Specify the minimum number of RFs in each bin. [Default 3]")
PreGroup.add_argument(
"--bp",
action="store",
type=str,
dest="bp",
default=None,
help="Specify the corner frequencies for the bandpass filter. " +
"[Default 0.05,0.5]")
PreGroup.add_argument(
"--nbaz",
action="store",
dest="nbaz",
type=int,
default=36,
help="Specify integer number of back-azimuth bins to consider. " +
"[Default 36]")
PreGroup.add_argument(
"--nslow",
action="store",
dest="nslow",
type=int,
default=40,
help="Specify integer number of slowness bins to consider. " +
"[Default 40]")
PreGroup.add_argument(
"--snr",
action="store",
type=float,
dest="snr",
default=-9999.,
help="Specify the SNR threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_argument(
"--snrh",
action="store",
type=float,
dest="snrh",
default=-9999,
help="Specify the horizontal component SNR threshold for " +
"extracting receiver functions. [Default None]")
PreGroup.add_argument(
"--cc",
action="store",
type=float,
dest="cc",
default=-1.,
help="Specify the CC threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_argument(
"--no-outlier",
action="store_true",
dest="no_outl",
default=False,
help="Set this option to delete outliers based on the MAD " +
"on the variance. [Default False]")
PreGroup.add_argument(
"--slowbound",
action="store",
dest="slowbound",
type=str,
default=None,
help="Specify a list of two floats with minimum and maximum" +
"bounds on slowness (s/km). [Default [0.04, 0.08]]")
PreGroup.add_argument(
"--bazbound",
action="store",
dest="bazbound",
type=str,
default=None,
help="Specify a list of two floats with minimum and maximum" +
"bounds on back azimuth (degrees). [Default [0, 360]]")
PreGroup.add_argument(
"--pws",
action="store_true",
dest="pws",
default=False,
help="Set this option to use phase-weighted stacking during binning " +
" [Default False]")
PreGroup.add_argument(
"--phase",
action="store",
type=str,
dest="phase",
default='allP',
help="Specify the phase name to plot. " +
"Options are 'P', 'PP', 'allP', 'S', 'SKS' or 'allS'. " +
"[Default 'allP']")
PreGroup.add_argument(
"--copy",
action="store_true",
dest="copy",
default=False,
help="Set this option to use a copy of the radial component " +
"filtered at different corners for the Pps and Pss phases. " +
"[Default False]")
PreGroup.add_argument(
"--bp-copy",
action="store",
dest="bp_copy",
type=str,
default=None,
help="Specify a list of two floats with minimum and maximum" +
"frequency for the copies stream (Hz). [Default [0.05, 0.35]]")
HKGroup = parser.add_argument_group(
title='Settings for H-k Stacking',
description="Specify parameters of H-k search, including" +
"bounds on search, weights, type of stacking, etc.")
HKGroup.add_argument(
"--hbound",
action="store",
type=str,
dest="hbound",
default=None,
help="Specify a list of two floats with minimum and maximum" +
"bounds on Moho depth (H, in km). [Default [20., 50.]]")
HKGroup.add_argument(
"--dh",
action="store",
type=float,
dest="dh",
default=0.5,
help="Specify search interval for H (km). [Default 0.5]")
HKGroup.add_argument(
"--kbound",
action="store",
type=str,
dest="kbound",
default=None,
help="Specify a list of two floats with minimum and maximum" +
"bounds on Vp/Vs (k). [Default [1.56, 2.1]]")
HKGroup.add_argument(
"--dk",
action="store",
type=float,
dest="dk",
default=0.02,
help="Specify search interval for k. [Default 0.02]")
HKGroup.add_argument(
"--weights",
action="store",
type=str,
dest="weights",
default=None,
help="Specify a list of three floats with for Ps, Pps and Pass " +
"weights in final stack. [Default [0.5, 2., -1.]]")
HKGroup.add_argument(
"--type",
action="store",
type=str,
dest="typ",
default="sum",
help="Specify type of final stacking. Options are: 'sum' for " +
"a weighted average (using weights), or 'product' for the product " +
"of positive values in stacks. [Default 'sum']")
HKGroup.add_argument(
"--save",
action="store_true",
dest="save",
default=False,
help="Set this option to save the HkStack object to file. " +
"[Default doesn't save]")
# Constants Settings
ModelGroup = parser.add_argument_group(
title='Model Settings',
description="Miscellaneous default values and settings")
ModelGroup.add_argument(
"--vp",
action="store",
type=float,
dest="vp",
default=6.0,
help="Specify mean crustal Vp (km/s). [Default 6.0]")
ModelGroup.add_argument(
"--strike",
action="store",
type=float,
dest="strike",
default=None,
help="Specify the strike of dipping Moho. [Default None]")
ModelGroup.add_argument(
"--dip",
action="store",
type=float,
dest="dip",
default=None,
help="Specify the dip of dipping Moho. [Default None]")
PlotGroup = parser.add_argument_group(
title='Settings for plotting results',
description="Specify parameters for plotting the H-k stacks.")
PlotGroup.add_argument(
"--plot",
action="store_true",
dest="plot",
default=False,
help="Set this option to produce a plot of the stacks [Default " +
"does not produce plot]")
PlotGroup.add_argument(
"--save-plot",
action="store_true",
dest="save_plot",
default=False,
help="Set this option to save the plot [Default doesn't save]")
PlotGroup.add_argument(
"--title",
action="store",
type=str,
dest="title",
default="",
help="Specify plot title [Default has no title]")
PlotGroup.add_argument(
"--format",
action="store",
type=str,
dest="form",
default="png",
help="Specify format of figure. Can be any one of the valid" +
"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
args = parser.parse_args(argv)
# Check inputs
if not exist(args.indb):
parser.error("Input file " + args.indb + " does not exist")
# create station key list
if len(args.stkeys) > 0:
args.stkeys = args.stkeys.split(',')
# construct start time
if len(args.startT) > 0:
try:
args.startT = UTCDateTime(args.startT)
except:
parser.error(
"Cannot construct UTCDateTime from start time: " +
args.startT)
else:
args.startT = None
# construct end time
if len(args.endT) > 0:
try:
args.endT = UTCDateTime(args.endT)
except:
parser.error(
"Cannot construct UTCDateTime from end time: " +
args.endT)
else:
args.endT = None
if args.strike is None and args.dip is None:
args.calc_dip = False
args.nbaz = None
elif args.strike is None or args.dip is None:
parser.error("Specify both strike and dip for this type " +
"of analysis")
else:
args.calc_dip = True
if args.bp is None:
args.bp = [0.05, 0.5]
else:
args.bp = [float(val) for val in args.bp.split(',')]
args.bp = sorted(args.bp)
if (len(args.bp)) != 2:
parser.error(
"Error: --bp should contain 2 " +
"comma-separated floats")
## JMG ##
if args.slowbound is None:
args.slowbound = [0.04, 0.08]
else:
args.slowbound = [float(val) for val in args.slowbound.split(',')]
args.slowbound = sorted(args.slowbound)
if (len(args.slowbound)) != 2:
parser.error(
"Error: --slowbound should contain 2 " +
"comma-separated floats")
if args.bazbound is None:
args.bazbound = [0.0, 360.0]
else:
args.bazbound = [float(val) for val in args.bazbound.split(',')]
args.bazbound = sorted(args.bazbound)
if (len(args.bazbound)) != 2:
parser.error(
"Error: --bazbound should contain 2 " +
"comma-separated floats")
## JMG ##
if args.phase not in ['P', 'PP', 'allP', 'S', 'SKS', 'allS']:
parser.error(
"Error: choose between 'P', 'PP', 'allP', 'S', 'SKS' and 'allS'.")
if args.phase == 'allP':
args.listphase = ['P', 'PP']
elif args.phase == 'allS':
args.listphase = ['S', 'SKS']
else:
args.listphase = [args.phase]
if args.typ not in ['sum', 'product']:
parser.error(
"Error: choose between 'sum' and 'product'")
if args.copy:
if args.bp_copy is None:
args.bp_copy = [0.05, 0.35]
else:
args.bp_copy = [float(val)
for val in args.bp_copy.split(',')]
args.bp_copy = sorted(args.bp_copy)
if (len(args.bp_copy)) != 2:
parser.error(
"Error: --bp_copy should contain 2 " +
"comma-separated floats")
if args.hbound is None:
args.hbound = [20., 50.]
else:
args.hbound = [float(val) for val in args.hbound.split(',')]
args.hbound = sorted(args.hbound)
if (len(args.hbound)) != 2:
parser.error(
"Error: --hbound should contain 2 " +
"comma-separated floats")
if args.kbound is None:
args.kbound = [1.56, 2.1]
else:
args.kbound = [float(val) for val in args.kbound.split(',')]
args.kbound = sorted(args.kbound)
if (len(args.kbound)) != 2:
parser.error(
"Error: --kbound should contain 2 " +
"comma-separated floats")
if args.weights is None:
args.weights = [0.5, 2.0, -1.0]
else:
args.weights = [float(val) for val in args.weights.split(',')]
if (len(args.weights)) != 3:
parser.error(
"Error: --weights should contain 3 " +
"comma-separated floats")
return args
def main():
print()
print("#########################################")
print("# __ _ _ #")
print("# _ __ / _|_ __ _ _ | |__ | | __ #")
print("# | '__| |_| '_ \| | | | | '_ \| |/ / #")
print("# | | | _| |_) | |_| | | | | | < #")
print("# |_| |_| | .__/ \__, |___|_| |_|_|\_\ #")
print("# |_| |___/_____| #")
print("# #")
print("#########################################")
print()
# Run Input Parser
args = get_hk_arguments()
# Load Database
db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)
# Track processed folders
procfold = []
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Construct Folder Name
stfld = stkey
if not args.lkey:
stfld = stkey.split('.')[0]+"."+stkey.split('.')[1]
# Define path to see if it exists
if args.phase in ['P', 'PP', 'allP']:
datapath = Path('P_DATA') / stfld
elif args.phase in ['S', 'SKS', 'allS']:
datapath = Path('S_DATA') / stfld
if not datapath.is_dir():
print('Path to ' + str(datapath) + ' doesn`t exist - continuing')
continue
# Define save path
if args.save:
savepath = Path('HK_DATA') / stfld
if not savepath.is_dir():
print('Path to '+str(savepath)+' doesn`t exist - creating it')
savepath.mkdir(parents=True)
# Get search start time
if args.startT is None:
tstart = sta.startdate
else:
tstart = args.startT
# Get search end time
if args.endT is None:
tend = sta.enddate
else:
tend = args.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
# Update Display
print(" ")
print(" ")
print("|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
# Check for folder already processed
if stfld in procfold:
print(' {0} already processed...skipping '.format(stfld))
continue
rfRstream = Stream()
datafiles = [x for x in datapath.iterdir() if x.is_dir()]
for folder in datafiles:
# Skip hidden folders
if folder.name.startswith('.'):
continue
date = folder.name.split('_')[0]
year = date[0:4]
month = date[4:6]
day = date[6:8]
dateUTC = UTCDateTime(year+'-'+month+'-'+day)
if dateUTC > tstart and dateUTC < tend:
# Load meta data
metafile = folder / "Meta_Data.pkl"
if not metafile.is_file():
continue
meta = pickle.load(open(metafile, 'rb'))
# Skip data not in list of phases
if meta.phase not in args.listphase:
continue
# QC Thresholding
if meta.snrh < args.snrh:
continue
if meta.snr < args.snr:
continue
if meta.cc < args.cc:
continue
''' # Check bounds on data
# if meta.slow < args.slowbound[0] and meta.slow > args.slowbound[1]:
# continue
# if meta.baz < args.bazbound[0] and meta.baz > args.bazbound[1]:
# continue
'''
# If everything passed, load the RF data
filename = folder / "RF_Data.pkl"
if filename.is_file():
file = open(filename, "rb")
rfdata = pickle.load(file)
rfRstream.append(rfdata[1])
file.close()
if rfdata[0].stats.npts != 1451:
print(folder)
if len(rfRstream) == 0:
continue
if args.no_outl:
t1 = 0.
t2 = 30.
varR = []
for i in range(len(rfRstream)):
taxis = rfRstream[i].stats.taxis
tselect = (taxis > t1) & (taxis < t2)
varR.append(np.var(rfRstream[i].data[tselect]))
varR = np.array(varR)
# Remove outliers wrt variance within time range
medvarR = np.median(varR)
madvarR = 1.4826*np.median(np.abs(varR-medvarR))
robustR = np.abs((varR-medvarR)/madvarR)
outliersR = np.arange(len(rfRstream))[robustR > 2.5]
for i in outliersR[::-1]:
rfRstream.remove(rfRstream[i])
print('')
print("Number of radial RF data: " + str(len(rfRstream)))
print('')
# Try binning if specified
if args.calc_dip:
rf_tmp = binning.bin_baz_slow(rfRstream,
nbaz=args.nbaz+1,
nslow=args.nslow+1,
pws=args.pws)
rfRstream = rf_tmp[0]
else:
rf_tmp = binning.bin(rfRstream,
typ='slow',
nbin=args.nslow+1,
pws=args.pws)
rfRstream = rf_tmp[0]
# Get a copy of the radial component and filter
if args.copy:
rfRstream_copy = rfRstream.copy()
rfRstream_copy.filter('bandpass', freqmin=args.bp_copy[0],
freqmax=args.bp_copy[1], corners=2,
zerophase=True)
# Check bin counts:
for tr in rfRstream:
if (tr.stats.nbin < args.binlim):
rfRstream.remove(tr)
# Continue if stream is too short
if len(rfRstream) < 5:
continue
if args.save_plot and not Path('HK_PLOTS').is_dir():
Path('HK_PLOTS').mkdir(parents=True)
print('')
print("Number of radial RF bins: " + str(len(rfRstream)))
print('')
# Filter original stream
rfRstream.filter('bandpass', freqmin=args.bp[0],
freqmax=args.bp[1], corners=2,
zerophase=True)
# Initialize the HkStack object
try:
hkstack = HkStack(rfRstream, rfV2=rfRstream_copy,
strike=args.strike, dip=args.dip, vp=args.vp)
except:
hkstack = HkStack(rfRstream,
strike=args.strike, dip=args.dip, vp=args.vp)
# Update attributes
hkstack.hbound = args.hbound
hkstack.kbound = args.kbound
hkstack.dh = args.dh
hkstack.dk = args.dk
hkstack.weights = args.weights
# Stack with or without dip
if args.calc_dip:
hkstack.stack_dip()
else:
hkstack.stack()
# Average stacks
hkstack.average(typ=args.typ)
if args.plot:
hkstack.plot(args.save_plot, args.title, args.form)
if args.save:
filename = savepath / (hkstack.rfV1[0].stats.station +
".hkstack."+args.typ+".pkl")
hkstack.save(file=filename)
# Update processed folders
procfold.append(stfld)
if __name__ == "__main__":
# Run main program
main()
| 1.945313 | 2 |
Two_snake_version.py | Hacker-Davinci/Snake_Game_My_Hero_Academia_Version | 1 | 12797595 | <filename>Two_snake_version.py
import pygame
from pygame.locals import *
import time
import random
SIZE = 40
BACKGROUND_COLOR = (110, 110, 5)
WHITE_COLOR = (255, 255, 255)
class Apple:
def __init__(self, parent_screen):
self.image = pygame.image.load("resources/The_apple_everyone_want.jpg").convert()
self.parent_screen = parent_screen
self.x = random.randint(1, 24) * SIZE
self.y = random.randint(1, 19) * SIZE
def draw(self):
self.parent_screen.blit(self.image, (self.x, self.y))
pygame.display.flip()
def move(self):
self.x = random.randint(1, 24) * SIZE
self.y = random.randint(1, 19) * SIZE
class Snake:
def __init__(self, parent_screen):
self.parent_screen = parent_screen
self.image = pygame.image.load("resources/block.jpg").convert()
self.direction = 'down'
self.length = 1
self.x = [SIZE] # SIZE == 40 [SIZE]
self.y = [SIZE]
self.dead = False
def move_left(self):
self.direction = 'left'
def move_right(self):
self.direction = 'right'
def move_up(self):
self.direction = 'up'
def move_down(self):
self.direction = 'down'
def walk(self):
# update body
for i in range(self.length - 1, 0, -1):
self.x[i] = self.x[i - 1]
self.y[i] = self.y[i - 1]
# update head
if self.direction == 'left':
self.x[0] -= SIZE
elif self.direction == 'right':
self.x[0] += SIZE
elif self.direction == 'up':
self.y[0] -= SIZE
elif self.direction == 'down':
self.y[0] += SIZE
self.draw()
def draw(self):
for i in range(self.length):
self.parent_screen.blit(self.image, (self.x[i], self.y[i]))
print(self.x[i], self.y[i])
pygame.display.update()
def increase_length(self):
self.length += 1
self.x.append(-1)
self.y.append(-1)
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Codebasics Snake And Apple Game")
pygame.mixer.init()
self.play_background_music()
self.surface = pygame.display.set_mode((1000, 800))
self.snake1 = Snake(self.surface) # for the yellow one.
self.snake2 = Snake(self.surface) # for the blue one.
# for initialize the two snakes's pictures.
self.snake1.image = pygame.image.load("resources/deku.jpg").convert()
self.snake2.image = pygame.image.load("resources/bakugou.jpg").convert()
# for initialize the two snakes' positions
self.snake1.x = [920]
self.snake1.y = [40]
self.snake2.x = [40]
self.snake2.y = [40]
self.snake1.draw()
self.snake2.draw()
self.snake1.dead = False
self.snake2.dead = False
# todo for the two apples
self.apple1 = Apple(self.surface)
self.apple1.image = pygame.image.load("resources/The_apple_everyone_want.jpg").convert()
self.apple1.draw()
self.apple2 = Apple(self.surface)
self.apple2.image = pygame.image.load("resources/gold_apple.jpg").convert()
self.apple2.draw()
def play_background_music(self): # todo spend times read docs
pygame.mixer.music.load('resources/My_Hero_Academy_OP.mp3')
pygame.mixer.music.play(-1, 0)
def play_sound(self, sound_name):
if sound_name == "crash":
sound = pygame.mixer.Sound(r"D:\Python\Master_Python\Python_Snake_games\resources\crash.mp3")
pygame.mixer.Sound.play(sound)
elif sound_name == "ding":
sound = pygame.mixer.Sound(r"D:\Python\Master_Python\Python_Snake_games\resources\ding.mp3")
pygame.mixer.Sound.play(sound)
# pygame.mixer.Sound.play(pygame.mixer.Sound("resouces/ding.mp3"))
print("todo recover")
def reset(self):
self.snake1 = Snake(self.surface)
self.snake2 = Snake(self.surface)
self.snake1.dead = False
self.snake2.dead = False
self.snake1.x = [920]
self.snake1.y = [40]
self.snake2.x = [40]
self.snake2.y = [40]
self.snake1.image = pygame.image.load("resources/deku.jpg").convert()
self.snake2.image = pygame.image.load("resources/bakugou.jpg").convert()
self.apple1 = Apple(self.surface)
self.apple1.image = pygame.image.load("resources/The_apple_everyone_want.jpg").convert()
self.apple2 = Apple(self.surface)
self.apple2.image = pygame.image.load("resources/gold_apple.jpg").convert()
def is_collision(self, x1, y1, x2, y2):
if x1 >= x2 and x1 < x2 + SIZE:
if y1 >= y2 and y1 < y2 + SIZE:
return True
return False
def collide_boundaries(self, x, y):
if x > 1000 or x < 0:
print("is collide")
return True
if y > 800 or y < 0:
print("is collide")
return True
print("not collide")
return False
def render_background(self):
bg = pygame.image.load("resources/background_hero.jpg")
self.surface.blit(bg, (0, 0))
def play(self):
self.render_background()
# for the two snake
# if self.snake2.dead == True:
# print("go expection")
if self.snake1.dead == True and self.snake2.dead == True:
print("default true ?? ")
raise "Collision Occured"
if self.snake1.dead == False:
self.snake1.walk()
print("DEKU ALIVE")
if self.snake2.dead == False:
self.snake2.walk()
print("BAKUGOU ALIVE")
# self.snake2.walk()
# TODO: Make the apple become two
self.apple1.draw()
self.apple2.draw()
self.display_score()
pygame.display.flip() # for the screen update
# snake colliding with apple
if self.is_collision(self.snake1.x[0], self.snake1.y[0], self.apple1.x, self.apple1.y):
self.play_sound("ding")
self.snake1.increase_length()
self.apple1.move()
self.apple1.draw()
if self.is_collision(self.snake2.x[0], self.snake2.y[0], self.apple1.x, self.apple1.y):
self.play_sound("ding")
self.snake2.increase_length()
self.apple1.move()
self.apple1.draw()
if self.is_collision(self.snake1.x[0], self.snake1.y[0], self.apple2.x, self.apple2.y):
self.play_sound("ding")
self.snake1.increase_length()
self.apple2.move()
self.apple2.draw()
if self.is_collision(self.snake2.x[0], self.snake2.y[0], self.apple2.x, self.apple2.y):
self.play_sound("ding")
self.snake2.increase_length()
self.apple2.move()
self.apple2.draw()
# todo make these sankes over the window
# snake colliding with itself
if self.snake1.dead == False:
for i in range(3, self.snake1.length):
if self.is_collision(self.snake1.x[0], self.snake1.y[0], self.snake1.x[i], self.snake1.y[i]):
self.play_sound("crash")
self.snake1.dead = True
if self.snake2.dead == False:
for i in range(3, self.snake2.length):
if self.is_collision(self.snake2.x[0], self.snake2.y[0], self.snake2.x[i], self.snake2.y[i]):
self.play_sound("crash")
self.snake2.dead = True
# TODO if snake1's head eats snake2 it becomes bigger
if self.snake1.dead == False and self.snake2.dead == False:
for i in range(self.snake2.length):
if self.is_collision(self.snake1.x[0], self.snake1.y[0], self.snake2.x[i], self.snake2.y[i]):
self.play_sound("crash")
self.snake2.dead = True
# TODO if snake2's head eats snake1 it becomes bigger
if self.snake1.dead == False and self.snake2.dead == False:
for i in range(self.snake1.length):
if self.is_collision(self.snake2.x[0], self.snake2.y[0], self.snake1.x[i], self.snake1.y[i]):
self.play_sound("crash")
self.snake1.dead = True
# TODO if both's head eats it becomes bigger
# snake colliding with the wall boundaries
# collides with boundaries.
if self.snake1.dead == False:
if self.collide_boundaries(self.snake1.x[0], self.snake1.y[0]):
self.play_sound("crash")
self.snake1.dead = True
for i in range(self.snake1.length):
self.snake1.x[i] = -10000000000 - 1000 * i
self.snake1.y[i] = -10000000000 - 1000 * i
if self.snake2.dead == False:
if self.collide_boundaries(self.snake2.x[0], self.snake2.y[0]):
self.play_sound("crash")
self.snake2.dead = True
for i in range(self.snake2.length):
self.snake2.x[i] = -10000000000 - 1000 * i
self.snake2.y[i] = -10000000000 - 1000 * i
def display_score(self):
font = pygame.font.SysFont('arial', 30)
score = font.render(f"Score: {self.snake1.length + self.snake2.length}", True, (200, 200, 200))
self.surface.blit(score, (850, 10))
def show_game_over(self):
self.render_background()
font = pygame.font.SysFont("arial", 30)
line1 = font.render(f"Game is over! Your score is {self.snake1.length + self.snake2.length}", True, WHITE_COLOR)
self.surface.blit(line1, (200, 300)) # blit(source, postion)
line2 = font.render("To play again press Enter. To exit press Escape!", True, WHITE_COLOR)
self.surface.blit(line2, (200, 350))
pygame.mixer.music.pause()
pygame.display.flip()
def run(self):
running = True
pause = False
while running:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
if event.key == K_RETURN:
pygame.mixer.music.unpause()
pause = False
if not pause:
# for the first snake
if self.snake1.dead == False:
if event.key == K_LEFT:
self.snake1.move_left()
if event.key == K_RIGHT:
self.snake1.move_right()
if event.key == K_UP:
self.snake1.move_up()
if event.key == K_DOWN:
self.snake1.move_down()
# for the second snake
if self.snake2.dead == False:
if event.key == K_a:
self.snake2.move_left()
if event.key == K_d:
self.snake2.move_right()
if event.key == K_w:
self.snake2.move_up()
if event.key == K_s:
self.snake2.move_down()
elif event.type == QUIT:
running = False
try:
if not pause:
self.play()
except Exception as e:
print(e)
print("raise exception")
self.show_game_over()
pause = True
self.reset()
# self.snake.walk()
time.sleep(.1)
if __name__ == "__main__":
game = Game()
game.run()
| 3.359375 | 3 |
fiery_llama/matched_filters.py | bkimmig/fiery_llama | 0 | 12797596 | <gh_stars>0
import numpy as np
import pandas as pd
import re
import latbin
def lineify(df, n, column):
return cubeify(df, [n], [column])
def squareify(df, nx, ny, xcol, ycol):
return cubeify(df, [nx, ny], [xcol, ycol])
def cubeify(df, n, columns, target="weights"):
"""bins up a dataframe into a densely sampled cube
n: list
the dimensions of the cube
columns: list
the column names to bin on
target: column name
the column to sum up for each bin
returns
cube: ndarray
the "cubeified" data
"""
cube_df = pd.DataFrame()
cube_df[target] = df[target]
for i, col in enumerate(columns):
cdx = (np.max(df[col]) - np.min(df[col]))/(n[i] - 1)
cube_df[col] = np.around(df[col]/cdx).astype(int)
cube_df[col] -= np.min(cube_df[col])
gsum = cube_df.groupby(columns)[target].sum()
out_cube = np.zeros(n)
for ind in gsum.index:
out_cube[ind] = gsum.ix[ind]
return out_cube
def compress_cloud(df, bin_size=1., npts_out=250):
"""compress a large number of points into a small representative sample
via multidimensional histogramming and averaging.
"""
Aparam = latbin.ALattice(len(df.columns), scale=bin_size)
pts = Aparam.bin(df)
centers = pts.mean()
n_in = pts.size()
cut_idx = min(len(centers), npts_out)
thresh = np.sort(n_in)[-cut_idx]
mask = (n_in >= thresh)
centers['weights'] = n_in/np.sum(n_in[mask])
centers = centers[mask]
centers = centers.reset_index()
colnames = []
for col in centers.columns:
if re.match('q_', col) is None:
colnames.append(col)
colnames = np.array(colnames)
centers = centers[colnames].copy()
return centers
class PointFilter(object):
"""PointFilter handles efficiently calculating distances to
a set of points in many dimensions.
"""
def __init__(
self,
point_cloud,
filtered_columns,
sigma_vec,
copy=True,
):
"""
point_cloud: pandas DataFrame
the points in this filter
filtered_columns: list
the column names to filter on
sigma_vec: ndarray
the distance scale to use along each dimension
copy: bool
if False a copy of the input dataframe will not be made.
"""
if copy:
point_cloud = point_cloud.copy()
self.point_cloud = point_cloud
if not "weights" in self.point_cloud.columns:
self.point_cloud["weights"] = np.repeat(
1.0/len(point_cloud),
len(point_cloud))
self.filtered_columns = filtered_columns
self.sigma_vec = sigma_vec
def get_weights(self, point_cloud):
pdata = point_cloud[self.filtered_columns]
filter_pts = self.point_cloud[self.filtered_columns]
sim_matrix = latbin.matching.sparse_distance_matrix(
pdata/self.sigma_vec,
filter_pts/self.sigma_vec,
)
weights = sim_matrix * self.point_cloud["weights"].values
return weights
| 2.921875 | 3 |
experiments/system_model_v3/recommended_params.py | trangnv/geb-simulations-h20 | 7 | 12797597 | import datetime
import os
from models.system_model_v3.model.params.init import params
from models.system_model_v3.model.state_variables.init import state_variables
from models.constants import RAY
from experiments.system_model_v3.configure import configure_experiment
from experiments.system_model_v3.run import run_experiment
from experiments.utils import save_to_HDF5, batch, merge_parameter_sweep
from radcad.core import generate_parameter_sweep
SIMULATION_TIMESTEPS = 8758 #len(eth_price_df) - 1
MONTE_CARLO_RUNS = 1
sweeps = {
'controller_enabled': [True,False],
}
# Configure sweep and update parameters
params_update, experiment_metrics = configure_experiment(sweeps, timesteps=SIMULATION_TIMESTEPS, runs=MONTE_CARLO_RUNS)
params.update(params_update)
# Override parameters
params_override = {
'liquidity_demand_enabled': [False],
}
params.update(params_override)
# Experiment details
now = datetime.datetime.now()
dir_path = os.path.dirname(os.path.realpath(__file__))
experiment_folder = __file__.split('.py')[0]
results_id = now.isoformat()
if __name__ == '__main__':
run_experiment(results_id, experiment_folder, experiment_metrics, timesteps=SIMULATION_TIMESTEPS, runs=MONTE_CARLO_RUNS, params=params, initial_state=state_variables, save_file=True, save_logs=True)
| 1.703125 | 2 |
day5/part2.py | theonewolf/aoc2021 | 0 | 12797598 | #!/usr/bin/env python3
GRID_SIZE = 1000
if __name__ == '__main__':
data = open('input').read().splitlines()
grid = [[0 for i in range(GRID_SIZE)] for j in range (GRID_SIZE)]
for row in data:
pointA, pointB = row.split(' -> ')
pointA = [int(i) for i in pointA.split(',')]
pointB = [int(i) for i in pointB.split(',')]
# only horizontal and vertical for now
# horizontal lines
if pointA[0] == pointB[0]:
if pointA[1] > pointB[1]:
# swap points to always draw in ascending order
pointA, pointB = pointB, pointA
for i in range(pointA[1], pointB[1] + 1):
grid[i][pointA[0]] += 1
# vertical lines
elif pointA[1] == pointB[1]:
# swap points to always draw in ascending order
if pointA[0] > pointB[0]:
pointA, pointB = pointB, pointA
for i in range(pointA[0], pointB[0] + 1):
grid[pointA[1]][i] += 1
# diagonal lines
else:
if pointA[0] > pointB[0] or pointA[1] > pointB[1]:
pointA, pointB = pointB, pointA
target = pointA
# 0 = x, 1 = y
grid[target[1]][target[0]] += 1
while target[0] != pointB[0] and target[1] != pointB[1]:
target[0] += 1 if pointA[0] < pointB[0] else -1
target[1] += 1 if pointA[1] < pointB[1] else -1
# 0 = x, 1 = y
grid[target[1]][target[0]] += 1
count = 0
for row in grid:
for col in row:
if col >= 2:
count += 1
print(f'{count}')
| 3.625 | 4 |
main.py | shemerofir/skaffold-auto-docs | 0 | 12797599 | from get_config import get_config
from get_ansible import get_ansible
from get_helm import get_helm
from get_skaffold import get_skaffold
from get_docker import get_docker
from get_minikube import get_minikube
from get_regs import get_registries
from edit_hosts import edit_hosts
from c_registry import c_registry
if __name__ == '__main__':
edit_hosts()
get_docker() #
c_registry() #
get_ansible() #
#get_helm() #
get_minikube() #
get_skaffold() #
get_config()
get_registries()
edit_hosts()
| 1.3125 | 1 |
tests/test_counter.py | benkrikler/fast-carpenter-github-test | 12 | 12797600 | import numpy as np
import pytest
from fast_carpenter.selection.filters import Counter
@pytest.fixture
def weight_names():
return [
"EventWeight",
# "MuonWeight", "ElectronWeight", "JetWeight",
]
@pytest.fixture
def counter(weight_names):
return Counter(weight_names)
def test_init(weight_names, full_wrapped_tree):
c = Counter(weight_names)
assert c._weight_names == weight_names
assert c.counts == (0, 0.0)
assert c._w_counts == (0.0)
def test_increment_mc(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=True)
n_events = len(full_wrapped_tree)
expected_weighted_sum = 229.94895935058594
# expected value is taken from numpy sum, but awkward sum is used
# the difference is small and due to optimization
# see https://github.com/scikit-hep/awkward-1.0/issues/1241
assert counter._w_counts == pytest.approx(np.array([expected_weighted_sum]), 1e-4)
assert counter.counts == (n_events, pytest.approx(expected_weighted_sum, 1e-4))
def test_increment_data(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=False)
n_events = len(full_wrapped_tree)
assert counter._w_counts == (n_events)
assert counter.counts == (n_events, n_events)
def test_add(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=True)
counter.add(counter)
n_events = len(full_wrapped_tree)
expected_weighted_sum = 229.94895935058594
# expected value is taken from numpy sum, but awkward sum is used
# the difference is small and due to optimization
# see https://github.com/scikit-hep/awkward-1.0/issues/1241
assert counter._w_counts == pytest.approx((expected_weighted_sum * 2,), 2e-4)
assert counter.counts == (n_events * 2, pytest.approx(expected_weighted_sum * 2, 2e-4))
def test_increment_without_weights(full_wrapped_tree):
counter = Counter([])
counter.increment(full_wrapped_tree, is_mc=True)
n_events = len(full_wrapped_tree)
with pytest.raises(IndexError):
assert counter._w_counts[0] == n_events
assert counter.counts == (n_events, )
| 2.328125 | 2 |
server/apps/vartype/tests/__init__.py | iotile/iotile_cloud | 0 | 12797601 | <reponame>iotile/iotile_cloud
from .test_api_token import *
from .tests import *
| 1.070313 | 1 |
app/root.py | SRAlexander/PyAPI | 1 | 12797602 | # coding:utf-8
import sys
from flask import Flask, jsonify
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restplus import Api
from flasgger import Swagger
from alchemy.common.base import db
from marshmallow import Schema, fields, ValidationError, pre_load
from controllers import tests_controller
from container import Container
def create_app(testConfig=None, sqlConnectionString=None):
# container and dependency injection configuration setup on controller level
container = Container()
container.wire(modules=[tests_controller])
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
api = Api(app)
swagger = Swagger(app)
# set up environmenet variables from the passed in configuration file from the instance folder
if testConfig is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config_dev.py', silent=False)
else:
# load the test config if passed in
app.config.from_pyfile(testConfig, silent=False)
if sqlConnectionString is not None:
app.config['SQLALCHEMY_DATABASE_URI']=sqlConnectionString
# import tables here to be referenced in the alembic migration scripts
from alchemy.tables.test_defintion_table import TestDefinition
db.init_app(app)
migrate = Migrate(app, db, render_as_batch=True)
# Register blueprints
routes = {
'tests': {'route': tests_controller.testsControllerBlueprint, 'url_prefix': '/tests/'},
}
for route in routes:
blueprint = routes[route]
app.register_blueprint(blueprint['route'], url_prefix = blueprint['url_prefix'])
CORS(app, resources={r"/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
return app
| 2.078125 | 2 |
vega/correlation_func.py | andreicuceu/lyafit | 0 | 12797603 | <gh_stars>0
import numpy as np
from scipy.integrate import quad
from scipy.interpolate import interp1d
from . import utils
class CorrelationFunction:
"""Correlation function computation and handling.
# ! Slow operations should be kept in init as that is only called once
# ! Compute is called many times and should be fast
Extensions should have their separate method of the form
'compute_extension' that can be called from outside
"""
def __init__(self, config, fiducial, coords_grid, scale_params,
tracer1, tracer2, bb_config=None, metal_corr=False):
"""
Parameters
----------
config : ConfigParser
model section of config file
fiducial : dict
fiducial config
coords_grid : dict
Dictionary with coordinate grid - r, mu, z
scale_params : ScaleParameters
ScaleParameters object
tracer1 : dict
Config of tracer 1
tracer2 : dict
Config of tracer 2
bb_config : list, optional
list with configs of broadband terms, by default None
metal_corr : bool, optional
Whether this is a metal correlation, by default False
"""
self._config = config
self._r = coords_grid['r']
self._mu = coords_grid['mu']
self._z = coords_grid['z']
self._multipole = config.getint('single_multipole', -1)
self._tracer1 = tracer1
self._tracer2 = tracer2
self._z_eff = fiducial['z_eff']
self._rel_z_evol = (1. + self._z) / (1 + self._z_eff)
self._scale_params = scale_params
self._metal_corr = metal_corr
# Check if we need delta rp (Only for the cross)
self._delta_rp_name = None
if tracer1['type'] == 'discrete' and tracer2['type'] != 'discrete':
self._delta_rp_name = 'drp_' + tracer1['name']
elif tracer2['type'] == 'discrete' and tracer1['type'] != 'discrete':
self._delta_rp_name = 'drp_' + tracer2['name']
# Precompute growth
self._z_fid = fiducial['z_fiducial']
self._Omega_m = fiducial.get('Omega_m', None)
self._Omega_de = fiducial.get('Omega_de', None)
if not config.getboolean('old_growth_func', False):
self.xi_growth = self.compute_growth(self._z, self._z_fid, self._Omega_m,
self._Omega_de)
else:
self.xi_growth = self.compute_growth_old(self._z, self._z_fid, self._Omega_m,
self._Omega_de)
# Initialize the broadband
self.has_bb = False
if bb_config is not None:
self._init_broadband(bb_config)
self.has_bb = True
# Check for QSO radiation modeling and check if it is QSOxLYA
# Does this work for the QSO auto as well?
self.radiation_flag = False
if 'radiation effects' in self._config:
self.radiation_flag = self._config.getboolean('radiation effects')
if self.radiation_flag:
names = [self._tracer1['name'], self._tracer2['name']]
if not ('QSO' in names and 'LYA' in names):
raise ValueError('You asked for QSO radiation effects, but it'
' can only be applied to the cross (QSOxLya)')
# Check for relativistic effects and standard asymmetry
self.relativistic_flag = False
if 'relativistic correction' in self._config:
self.relativistic_flag = self._config.getboolean('relativistic correction')
self.asymmetry_flag = False
if 'standard asymmetry' in self._config:
self.asymmetry_flag = self._config.getboolean('standard asymmetry')
if self.relativistic_flag or self.asymmetry_flag:
types = [self._tracer1['type'], self._tracer2['type']]
if ('continuous' not in types) or (types[0] == types[1]):
raise ValueError('You asked for relativistic effects or standard asymmetry,'
' but they only work for the cross')
def compute(self, pk, pk_lin, PktoXi_obj, params):
"""Compute correlation function for input P(k).
Parameters
----------
pk : ND Array
Input power spectrum
pk_lin : 1D Array
Linear isotropic power spectrum
PktoXi_obj : vega.PktoXi
An instance of the transform object used to turn Pk into Xi
params : dict
Computation parameters
Returns
-------
1D Array
Output correlation function
"""
# Compute the core
xi = self.compute_core(pk, PktoXi_obj, params)
# Add bias evolution
xi *= self.compute_bias_evol(params)
# Add growth
xi *= self.xi_growth
# Add QSO radiation modeling for cross
if self.radiation_flag and not params['peak']:
xi += self.compute_qso_radiation(params)
# Add relativistic effects
if self.relativistic_flag:
xi += self.compute_xi_relativistic(pk_lin, PktoXi_obj, params)
# Add standard asymmetry
if self.asymmetry_flag:
xi += self.compute_xi_asymmetry(pk_lin, PktoXi_obj, params)
return xi
def compute_core(self, pk, PktoXi_obj, params):
"""Compute the core of the correlation function.
This does the Hankel transform of the input P(k),
sums the necessary multipoles and rescales the coordinates
Parameters
----------
pk : ND Array
Input power spectrum
PktoXi_obj : vega.PktoXi
An instance of the transform object used to turn Pk into Xi
params : dict
Computation parameters
Returns
-------
1D Array
Output correlation function
"""
# Check for delta rp
delta_rp = 0.
if self._delta_rp_name is not None:
delta_rp = params.get(self._delta_rp_name, 0.)
# Get rescaled Xi coordinates
ap, at = self._scale_params.get_ap_at(params, metal_corr=self._metal_corr)
rescaled_r, rescaled_mu = self._rescale_coords(self._r, self._mu, ap, at, delta_rp)
# Compute correlation function
xi = PktoXi_obj.compute(rescaled_r, rescaled_mu, pk, self._multipole)
return xi
@staticmethod
def _rescale_coords(r, mu, ap, at, delta_rp=0.):
"""Rescale Xi coordinates using ap/at.
Parameters
----------
r : ND array
Array of radius coords of Xi
mu : ND array
Array of mu = rp/r coords of Xi
ap : float
Alpha parallel
at : float
Alpha transverse
delta_rp : float, optional
Delta radius_parallel - nuisance correction for wrong redshift,
used for discrete tracers, by default 0.
Returns
-------
ND Array
Rescaled radii
ND Array
Rescaled mu
"""
mask = r != 0
rp = r[mask] * mu[mask] + delta_rp
rt = r[mask] * np.sqrt(1 - mu[mask]**2)
rescaled_rp = ap * rp
rescaled_rt = at * rt
rescaled_r = np.zeros(len(r))
rescaled_mu = np.zeros(len(mu))
rescaled_r[mask] = np.sqrt(rescaled_rp**2 + rescaled_rt**2)
rescaled_mu[mask] = rescaled_rp / rescaled_r[mask]
return rescaled_r, rescaled_mu
def compute_bias_evol(self, params):
"""Compute bias evolution for the correlation function.
Parameters
----------
params : dict
Computation parameters
Returns
-------
ND Array
Bias evolution for tracer
"""
# Compute the bias evolution
bias_evol = self._get_tracer_evol(params, self._tracer1['name'])
bias_evol *= self._get_tracer_evol(params, self._tracer2['name'])
return bias_evol
def _get_tracer_evol(self, params, tracer_name):
"""Compute tracer bias evolution.
Parameters
----------
params : dict
Computation parameters
tracer_name : string
Name of tracer
Returns
-------
ND Array
Bias evolution for tracer
"""
handle_name = 'z evol {}'.format(tracer_name)
if handle_name in self._config:
evol_model = self._config.get(handle_name, 'standard')
else:
evol_model = self._config.get('z evol', 'standard')
# Compute the bias evolution using the right model
if 'croom' in evol_model:
bias_evol = self._bias_evol_croom(params, tracer_name)
else:
bias_evol = self._bias_evol_std(params, tracer_name)
return bias_evol
def _bias_evol_std(self, params, tracer_name):
"""Bias evolution standard model.
Parameters
----------
params : dict
Computation parameters
tracer_name : string
Tracer name
Returns
-------
ND Array
Bias evolution for tracer
"""
p0 = params['alpha_{}'.format(tracer_name)]
bias_z = self._rel_z_evol**p0
return bias_z
def _bias_evol_croom(self, params, tracer_name):
"""Bias evolution Croom model for QSO, see Croom et al. 2005.
Parameters
----------
params : dict
Computation parameters
tracer_name : string
Tracer name
Returns
-------
ND Array
Bias evolution for tracer
"""
assert tracer_name == "QSO"
p0 = params["croom_par0"]
p1 = params["croom_par1"]
bias_z = (p0 + p1*(1. + self._z)**2) / (p0 + p1 * (1 + self._z_eff)**2)
return bias_z
def compute_growth(self, z_grid=None, z_fid=None,
Omega_m=None, Omega_de=None):
"""Compute growth factor.
Implements eq. 7.77 from <NAME>'s Modern Cosmology book.
Returns
-------
ND Array
Growth factor
"""
# Check the defaults
if z_grid is None:
z_grid = self._z
if z_fid is None:
z_fid = self._z_fid
if Omega_m is None:
Omega_m = self._Omega_m
if Omega_de is None:
Omega_de = self._Omega_de
# Check if we have dark energy
if Omega_de is None:
growth = (1 + z_fid) / (1. + z_grid)
return growth**2
# Compute the growth at each redshift on the grid
growth = utils.growth_function(z_grid, Omega_m, Omega_de)
# Scale to the fiducial redshift
growth /= utils.growth_function(z_fid, Omega_m, Omega_de)
return growth**2
def compute_growth_old(self, z_grid=None, z_fid=None, Omega_m=None, Omega_de=None):
def hubble(z, Omega_m, Omega_de):
return np.sqrt(Omega_m*(1+z)**3 + Omega_de + (1-Omega_m-Omega_de)*(1+z)**2)
def dD1(a, Omega_m, Omega_de):
z = 1/a-1
return 1./(a*hubble(z, Omega_m, Omega_de))**3
# Calculate D1 in 100 values of z between 0 and zmax, then interpolate
nbins = 100
zmax = 5.
z = zmax * np.arange(nbins, dtype=float) / (nbins-1)
D1 = np.zeros(nbins, dtype=float)
pars = (Omega_m, Omega_de)
for i in range(nbins):
a = 1/(1+z[i])
D1[i] = 5/2.*Omega_m*hubble(z[i], *pars)*quad(dD1, 0, a, args=pars)[0]
D1 = interp1d(z, D1)
growth = D1(z_grid) / D1(z_fid)
return growth**2
def _init_broadband(self, bb_config):
"""Initialize the broadband terms.
Parameters
----------
bb_config : list
list with configs of broadband terms
"""
self.bb_terms = {}
self.bb_terms['pre-add'] = []
self.bb_terms['post-add'] = []
self.bb_terms['pre-mul'] = []
self.bb_terms['post-mul'] = []
# First pick up the normal broadband terms
normal_broadbands = [el for el in bb_config
if el['func'] != 'broadband_sky']
for index, config in enumerate(normal_broadbands):
# Create the name for the parameters of this term
name = 'BB-{}-{} {} {} {}'.format(config['cf_name'], index,
config['type'], config['pre'],
config['rp_rt'])
# Create the broadband term dictionary
bb = {}
bb['name'] = name
bb['func'] = config['func']
bb['rp_rt'] = config['rp_rt']
bb['r_config'] = config['r_config']
bb['mu_config'] = config['mu_config']
self.bb_terms[config['pre'] + "-" + config['type']].append(bb)
# Next pick up the sky broadban terms
sky_broadbands = [el for el in bb_config
if el['func'] == 'broadband_sky']
for index, config in enumerate(sky_broadbands):
assert config['rp_rt'] == 'rp,rt'
# Create the name for the parameters of this term
name = 'BB-{}-{}-{}'.format(config['cf_name'],
index + len(normal_broadbands),
config['func'])
# Create the broadband term dictionary
bb = {}
bb['name'] = name
bb['func'] = config['func']
bb['bin_size_rp'] = config['bin_size_rp']
self.bb_terms[config['pre'] + "-" + config['type']].append(bb)
def compute_broadband(self, params, pos_type):
"""Compute the broadband terms for
one position (pre-distortion/post-distortion)
and one type (multiplicative/additive).
Parameters
----------
params : dict
Computation parameters
pos_type : string
String with position and type, must be one of:
'pre-mul' or 'pre-add' or 'post-mul' or 'post-add'
Returns
-------
1d Array
Output broadband
"""
assert pos_type in ['pre-mul', 'pre-add', 'post-mul', 'post-add']
corr = None
# Loop over the right pos/type configuration
for bb_term in self.bb_terms[pos_type]:
# Check if it's sky or normal broadband
if bb_term['func'] != 'broadband_sky':
# Initialize the broadband and check
# if we need to add or multiply
if corr is None:
corr = self.broadband(bb_term, params)
if 'mul' in pos_type:
corr = 1 + corr
elif 'mul' in pos_type:
corr *= 1 + self.broadband(bb_term, params)
else:
corr += self.broadband(bb_term, params)
else:
# Initialize the broadband and check
# if we need to add or multiply
if corr is None:
corr = self.broadband_sky(bb_term, params)
if 'mul' in pos_type:
corr = 1 + corr
elif 'mul' in pos_type:
corr *= 1 + self.broadband_sky(bb_term, params)
else:
corr += self.broadband_sky(bb_term, params)
# Give defaults if corr is still None
if corr is None:
if 'mul' in pos_type:
corr = 1.
else:
corr = 0.
return corr
def broadband_sky(self, bb_term, params):
"""Compute sky broadband term.
Calculates a Gaussian broadband in rp,rt for the sky residuals.
Parameters
----------
bb_term : dict
broadband term config
params : dict
Computation parameters
Returns
-------
1d Array
Output broadband
"""
rp = self._r * self._mu
rt = self._r * np.sqrt(1 - self._mu**2)
scale = params[bb_term['name'] + '-scale-sky']
sigma = params[bb_term['name'] + '-sigma-sky']
corr = scale / (sigma * np.sqrt(2. * np.pi))
corr *= np.exp(-0.5 * (rt / sigma)**2)
w = (rp >= 0.) & (rp < bb_term['bin_size_rp'])
corr[~w] = 0.
return corr
def broadband(self, bb_term, params):
"""Compute broadband term.
Calculates a power-law broadband in r and mu or rp,rt.
Parameters
----------
bb_term : dict
broadband term config
params : dict
Computation parameters
Returns
-------
1d Array
Output broadband
"""
r1 = self._r / 100.
r2 = self._mu
if bb_term['rp_rt'] == 'rp,rt':
r1 = self._r / 100. * self._mu
r2 = self._r / 100. * np.sqrt(1 - self._mu**2)
r_min, r_max, dr = bb_term['r_config']
mu_min, mu_max, dmu = bb_term['mu_config']
r1_powers = np.arange(r_min, r_max + 1, dr)
r2_powers = np.arange(mu_min, mu_max + 1, dmu)
bb_params = []
for i in r1_powers:
for j in r2_powers:
bb_params.append(params['{} ({},{})'.format(
bb_term['name'], i, j)])
bb_params = np.array(bb_params).reshape(-1, r_max - r_min + 1)
corr = (bb_params[:, :, None, None] * r1**r1_powers[:, None, None] *
r2**r2_powers[None, :, None]).sum(axis=(0, 1, 2))
return corr
def compute_qso_radiation(self, params):
"""Model the contribution of QSO radiation to the cross
(the transverse proximity effect)
Parameters
----------
params : dict
Computation parameters
Returns
-------
1D
Xi QSO radiation model
"""
assert 'QSO' in [self._tracer1['name'], self._tracer2['name']]
assert self._tracer1['name'] != self._tracer2['name']
# Compute the shifted r and mu grids
delta_rp = params.get(self._delta_rp_name, 0.)
rp = self._r * self._mu + delta_rp
rt = self._r * np.sqrt(1 - self._mu**2)
r_shift = np.sqrt(rp**2 + rt**2)
mu_shift = rp / r_shift
# Get the QSO radiation model parameters
strength = params['qso_rad_strength']
asymmetry = params['qso_rad_asymmetry']
lifetime = params['qso_rad_lifetime']
decrease = params['qso_rad_decrease']
# Compute the QSO radiation model
xi_rad = strength / (r_shift**2) * (1 - asymmetry * (1 - mu_shift**2))
xi_rad *= np.exp(-r_shift * ((1 + mu_shift) / lifetime + 1 / decrease))
return xi_rad
def compute_xi_relativistic(self, pk, PktoXi_obj, params):
"""Calculate the cross-correlation contribution from
relativistic effects (Bonvin et al. 2014).
Parameters
----------
pk : ND Array
Input power spectrum
PktoXi_obj : vega.PktoXi
An instance of the transform object used to turn Pk into Xi
params : dict
Computation parameters
Returns
-------
1D Array
Output xi relativistic
"""
assert 'continuous' in [self._tracer1['type'], self._tracer2['type']]
assert self._tracer1['type'] != self._tracer2['type']
# Get rescaled Xi coordinates
delta_rp = params.get(self._delta_rp_name, 0.)
ap, at = self._scale_params.get_ap_at(params, metal_corr=self._metal_corr)
rescaled_r, rescaled_mu = self._rescale_coords(self._r, self._mu, ap, at, delta_rp)
# Compute the correlation function
xi_rel = PktoXi_obj.pk_to_xi_relativistic(rescaled_r, rescaled_mu, pk, params)
return xi_rel
def compute_xi_asymmetry(self, pk, PktoXi_obj, params):
"""Calculate the cross-correlation contribution from
standard asymmetry (Bonvin et al. 2014).
Parameters
----------
pk : ND Array
Input power spectrum
PktoXi_obj : vega.PktoXi
An instance of the transform object used to turn Pk into Xi
params : dict
Computation parameters
Returns
-------
1D Array
Output xi asymmetry
"""
assert 'continuous' in [self._tracer1['type'], self._tracer2['type']]
assert self._tracer1['type'] != self._tracer2['type']
# Get rescaled Xi coordinates
delta_rp = params.get(self._delta_rp_name, 0.)
ap, at = self._scale_params.get_ap_at(params, metal_corr=self._metal_corr)
rescaled_r, rescaled_mu = self._rescale_coords(self._r, self._mu, ap, at, delta_rp)
# Compute the correlation function
xi_asy = PktoXi_obj.pk_to_xi_asymmetry(rescaled_r, rescaled_mu, pk, params)
return xi_asy
| 2.515625 | 3 |
example_racktests/2_seed.py | eyal-stratoscale/pyracktest | 0 | 12797604 | from strato.racktest.infra.suite import *
from example_seeds import addition
import time
SIGNALLED_CALLABLE_CODE = """
import signal
import time
signalReceived = None
def signalHandler(sigNum, _):
global signalReceived
signalReceived = sigNum
signal.signal(signal.SIGUSR2, signalHandler)
while not signalReceived:
time.sleep(1)
"""
class Test:
HOSTS = dict(it=dict(rootfs="rootfs-basic"))
def run(self):
TS_ASSERT_EQUALS(host.it.seed.runCallable(
addition.addition, 1, second=2, takeSitePackages=True)[0], 3)
TS_ASSERT_EQUALS(host.it.seed.runCode(
"from example_seeds import addition\nresult = addition.addition(2, second=3)",
takeSitePackages=True)[0], 5)
forked = host.it.seed.forkCode(
"import time\ntime.sleep(3)\n"
"print 'OUTPUT LINE'\n"
"from example_seeds import addition\nresult = addition.addition(2, second=3)",
takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
TS_ASSERT_PREDICATE_TIMEOUT(forked.poll, TS_timeout=4)
TS_ASSERT(forked.poll())
TS_ASSERT_EQUALS(forked.result(), 5)
TS_ASSERT('OUTPUT LINE' in forked.output())
forked = host.it.seed.forkCode(
"import time\nwhile True: time.sleep(2)", takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
forked.kill()
for i in xrange(10):
if forked.poll() is None:
time.sleep(1)
else:
break
TS_ASSERT_EQUALS(forked.poll(), False)
forked = host.it.seed.forkCode(
"import time\nwhile True: time.sleep(2)", takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
forked.kill('TERM')
for i in xrange(10):
if forked.poll() is None:
time.sleep(1)
else:
break
TS_ASSERT_EQUALS(forked.poll(), False)
forked = host.it.seed.forkCode(SIGNALLED_CALLABLE_CODE, takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
forked.kill('USR2')
for i in xrange(10):
if forked.poll() is None:
time.sleep(1)
else:
break
TS_ASSERT_EQUALS(forked.poll(), True)
| 2.265625 | 2 |
Jira/jiraAPI.py | Yash-Jain289/Anton-Virtual-Assistant | 2 | 12797605 | # Reference : https://docs.atlassian.com/software/jira/docs/api/REST/8.5.3
# Reference : https://developer.atlassian.com/cloud/jira/platform/rest/v2/
# https://id.atlassian.com/manage/api-tokens - create the api token
# https://developer.atlassian.com/cloud/jira/platform/basic-auth-for-rest-apis/ - doing it
from configparser import ConfigParser
from constants import BASE_URL
import requests
import base64
class JiraAPI:
headers={}
base_url=BASE_URL
@staticmethod
def get_from_config(item):
config = ConfigParser()
config.read('../secret.ini')
try:
return config.get('Jira',item)
except:
return None
def __init__(self):
"""
Get the username and password from the secrets.ini file
"""
email = self.get_from_config("email")
api_token = self.get_from_config("api_token")
required_string = f"{email}:{api_token}"
encoded = base64.b64encode(
required_string.encode("utf-8")).decode("utf-8")
self.headers = {
'Authorization': f"Basic {encoded}",
'Content-Type': "application/json"
}
def get(self, route, params=None):
"""
Get the API Response
"""
print(f"{self.base_url}{route}")
response = None
if params is None:
response = requests.get(
f"{self.base_url}{route}",
headers=self.headers,
)
else:
response = requests.get(
f"{self.base_url}{route}",
headers=self.headers,
params=params
)
# Return the response to get the required data
try:
return response.json()
except:
return None
# Application roles
def get_application_roles_all(self):
"""
Returns all application roles.
"""
route = "rest/api/2/applicationrole"
return self.get(route=route) or {}
def get_application_roles(self,key):
"""
Returns an application roles.
:key: - The key of the application role.
"""
route = f"rest/api/2/applicationrole/{key}"
return self.get(route=route) or {}
# Audit Records
def get_audit_records(self,startat=None,maxresults=None):
"""
Returns a list of audit records.
:startat: - The number of records to skip before returning the first result.
:maxresults: - The maximum number of results to return.
"""
params={}
if(startat):
params["startat"] = startat
if(maxresults):
params["maxresults"] = maxresults
route = "rest/api/2/auditing/record"
return self.get(route=route,params=params) or {}
# Avatars
def get_system_avatars_by_type(self,avtype):
"""
Returns a list of system avatar details by owner type, where the owner
types are issue type, project, or user.
:avtype: - avatar type
"""
route = f"rest/api/2/avatar/{avtype}/system"
return self.get(route=route) or {}
def get_avatars(self,avtype,entityid):
"""
Returns the system and custom avatars for a project or issue type.
:avtype: - avatar type
:entityid: - The ID of the item the avatar is associated with.
"""
route = f"rest/api/2/universal_avatar/type/{avtype}/owner/{entityid}"
return self.get(route=route) or {}
# Dashboard
def get_all_dashboards(self,startat=None,maxresults=None):
params={}
if(startat):
params["startAt"] = startat
if(maxresults):
params["maxResults"] = maxresults
route = "rest/api/2/dashboard"
return self.get(route=route,params=params) or {}
def search_for_dashboards(self,name=None,accid=None,groupname=None):
params={}
if(name):
params["dashboardName"] = name
if(accid):
params["accountId"] = accid
if(groupname):
params["groupname"] = groupname
route = "rest/api/2/dashboard/search"
return self.get(route=route,params=params) or {}
def get_dashboard_item_property_keys(self,dashboardId,itemId):
route = f"rest/api/2/dashboard/{dashboardId}/items/{itemId}/properties"
return self.get(route=route) or {}
def get_dashboard_item_property(self,dashboardId,itemId,propertyKey):
route = f"rest/api/2/dashboard/{dashboardId}/items/{itemId}/properties/{propertyKey}"
return self.get(route=route) or {}
def get_dashboard(self,dId):
route = f"rest/api/2/dashboard/{dId}"
return self.get(route=route) or {}
# Filter
def get_filter(self,fId):
route = f"rest/api/2/filter/{fId}"
return self.get(route=route) or {}
def get_my_filters(self):
route = "rest/api/2/filter/my"
return self.get(route=route) or {}
# Groups
def get_users_from_group(self,groupname,includeInactiveUsers=None,startAt=None,maxResults=None):
params={}
params["groupname"] = groupname
if(includeInactiveUsers):
params["includeInactiveUsers"] = includeInactiveUsers
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = "rest/api/2/group/member"
return self.get(route=route,params=params) or {}
# Issues --partial
def get_issue(self,issueIdOrKey):
route = f"rest/api/2/issue/{issueIdOrKey}"
return self.get(route=route) or {}
def get_changelogs(self,issueIdOrKey,startAt=None,maxResults=None):
params={}
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = f"rest/api/2/issue/{issueIdOrKey}/changelog"
return self.get(route=route,params=params) or {}
def get_transitions(self,issueIdOrKey,transitionId=None):
params={}
if(transitionId):
params["transitionId"] = transitionId
route = f"rest/api/2/issue/{issueIdOrKey}/changelog"
return self.get(route=route,params=params) or {}
def get_comments(self,issueIdOrKey,startAt=None,maxResults=None):
params={}
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = f"rest/api/2/issue/{issueIdOrKey}/comments"
return self.get(route=route,params=params) or {}
def get_comment(self,issueIdOrKey,cId):
route = f"rest/api/2/issue/{issueIdOrKey}/comment/{cId}"
return self.get(route=route) or {}
# Permissions
def get_my_permissions(self):
"""
Provide permission information for the current user.
"""
route = "rest/api/2/mypermissions"
return self.get(route=route) or {}
def get_permissions_all(self):
"""
Provide permission information for the current user.
"""
route = "rest/api/2/permissions"
return self.get(route=route) or {}
def get_property(self,key=None,permissionLevel=None):
"""
Returns an application property.
:key: OPT
:permissionLevel: OPT
"""
params={}
if(key):
params["key"] = key
if(permissionLevel):
params["permissionLevel"] = permissionLevel
route = "rest/api/2/application-properties"
return self.get(route=route,params=params)
# Projects -- partial
def get_project(self,projectIdOrKey):
route = f"rest/api/2/project/{projectIdOrKey}"
return self.get(route=route) or {}
def get_all_projects(self,startAt=None,maxResults=None):
params={}
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = f"rest/api/2/project/search"
return self.get(route=route,params=params) or {}
# User
def get_user(self,accountId=None):
params={}
if(accountId):
params["accountId"] = accountId
route = f"rest/api/2/project/search"
return self.get(route=route,params=params) or {}
| 2.890625 | 3 |
KiZip/core/KiZip.py | gregdavill/KiZip | 18 | 12797606 | import os
import json
import re
import sys
from datetime import datetime
import logging
import wx
import zipfile
import shutil
import pcbnew
from .config import Config
from ..dialog import SettingsDialog
from ..errors import ParsingException
from .parser import Parser
class Logger(object):
def __init__(self, cli=False):
self.cli = cli
self.logger = logging.getLogger('KiZip')
self.logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)-15s %(levelname)s %(message)s")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def info(self, *args):
if self.cli:
self.logger.info(*args)
def error(self, msg):
if self.cli:
self.logger.error(msg)
else:
wx.MessageBox(msg)
def warn(self, msg):
if self.cli:
self.logger.warn(msg)
else:
wx.LogWarning(msg)
log = None # type: Logger or None
def process_substitutions(output_name_format, pcb_file_name, metadata):
# type: (str, str, dict)->str
name = output_name_format.replace('%f', os.path.splitext(pcb_file_name)[0])
name = name.replace('%p', metadata['title'])
name = name.replace('%c', metadata['company'])
name = name.replace('%r', metadata['revision'])
name = name.replace('%d', metadata['date'].replace(':', '-'))
now = datetime.now()
name = name.replace('%D', now.strftime('%Y-%m-%d'))
name = name.replace('%T', now.strftime('%H-%M-%S'))
# sanitize the name to avoid characters illegal in file systems
name = name.replace('\\', '/')
name = re.sub(r'[?%*:|"<>]', '_', name)
return name + '.zip'
class KiZipPlugin(pcbnew.ActionPlugin, object):
def __init__(self):
super(KiZipPlugin, self).__init__()
self.name = "Generate Gerber Package"
self.category = "Read PCB"
self.pcbnew_icon_support = hasattr(self, "show_toolbar_button")
self.show_toolbar_button = True
icon_dir = os.path.dirname(os.path.dirname(__file__))
self.icon_file_name = os.path.join(icon_dir, 'icon.png')
self.description = "Generate Gerber Package"
def defaults(self):
pass
def Run(self):
from ..version import version
from ..errors import ParsingException
self.version = version
board = pcbnew.GetBoard()
pcb_file_name = board.GetFileName()
config = Config(self.version, os.path.dirname(pcb_file_name))
logger = Logger()
if not pcb_file_name:
logger.error('Please save the board file before generating gerbers')
return
parser = Parser(pcb_file_name, config, logger, board)
try:
run_with_dialog(parser, config, logger)
except ParsingException as e:
logger.error(str(e))
def main(parser, config, logger):
# type: (Parser, Config, Logger) -> None
global log
log = logger
pcb_file_name = os.path.basename(parser.file_name)
pcb_file_dir = os.path.dirname(parser.file_name)
pcbdata = parser.parse()
file_list = parser.plot()
logger.info(file_list)
if os.path.isabs(config.output_dest_dir):
output_file_dir = config.output_dest_dir
else:
output_file_dir = os.path.join(pcb_file_dir, config.output_dest_dir)
output_file_name = process_substitutions(
config.output_name_format, pcb_file_name, pcbdata['metadata'])
output_file_name = os.path.join(output_file_dir, output_file_name)
os.makedirs(output_file_dir, exist_ok=True)
#zip up all files
with zipfile.ZipFile(output_file_name, "w", zipfile.ZIP_DEFLATED) as zf:
for filename in file_list:
zf.write(filename=os.path.abspath(filename), arcname=os.path.basename(filename))
def run_with_dialog(parser, config, logger):
# type: (Parser, Config, Logger) -> None
def save_config(dialog_panel):
config.set_from_dialog(dialog_panel)
config.save()
config.load_from_ini()
dlg = SettingsDialog(
config_save_func=save_config,
file_name_format_hint=config.FILE_NAME_FORMAT_HINT,
version=config.version
)
try:
config.transfer_to_dialog(dlg.panel)
if dlg.ShowModal() == wx.ID_OK:
config.set_from_dialog(dlg.panel)
main(parser, config, logger)
finally:
dlg.Destroy() | 2.03125 | 2 |
train_PFSeg.py | Dootmaan/PFSeg | 7 | 12797607 | import torch as pt
import numpy as np
from model.PFSeg import PFSeg3D
from medpy.metric.binary import jc,hd95
from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D
# from loss.FALoss3D import FALoss3D
import cv2
from loss.TaskFusionLoss import TaskFusionLoss
from loss.DiceLoss import BinaryDiceLoss
from config import config
import argparse
from tqdm import tqdm
# from tensorboardX import SummaryWriter
crop_size=config.crop_size
size=crop_size[2]
img_size=config.input_img_size
parser = argparse.ArgumentParser(description='Patch-free 3D Medical Image Segmentation.')
parser.add_argument('-dataset_path',type=str,default='/newdata/why/BraTS20',help='path to dataset')
parser.add_argument('-model_save_to',type=str,default='.',help='path to output')
parser.add_argument('-bs', type=int, default=1, help='input batch size')
parser.add_argument('-epoch', type=int, default=100, help='number of epochs')
parser.add_argument('-lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('-w_sr', type=float, default=0.5, help='w_sr of the lossfunc')
parser.add_argument('-w_tf', type=float, default=0.5, help='w_tf of the lossfunc')
parser.add_argument('-load_pretrained',type=str,default='',help='load a pretrained model')
parser.add_argument('-v', help="increase output verbosity", action="store_true")
args = parser.parse_args()
dataset_path=args.dataset_path
lr=args.lr
epoch=args.epoch
batch_size=args.bs
model_path=args.model_save_to
w_sr=args.w_sr
w_tf=args.w_tf
pretrained_model=args.load_pretrained
print(args)
model=PFSeg3D(in_channels=1,out_channels=1).cuda()
if pt.cuda.device_count()>1:
if batch_size<pt.cuda.device_count():
batch_size=pt.cuda.device_count()
print('Batch size has to be larger than GPU#. Set to {:d} instead.'.format(batch_size))
model=pt.nn.DataParallel(model)
if not pretrained_model=='':
model.load_state_dict(pt.load(pretrained_model,map_location = 'cpu'))
trainset=GuidedBraTSDataset3D(dataset_path,mode='train')
valset=GuidedBraTSDataset3D(dataset_path,mode='val')
testset=GuidedBraTSDataset3D(dataset_path,mode='test')
train_dataset=pt.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,drop_last=True)
val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True)
test_dataset=pt.utils.data.DataLoader(testset,batch_size=1,shuffle=True,drop_last=True)
lossfunc_sr=pt.nn.MSELoss()
lossfunc_seg=pt.nn.BCELoss()
lossfunc_dice=BinaryDiceLoss()
lossfunc_pf=TaskFusionLoss()
optimizer = pt.optim.Adam(model.parameters(), lr=lr)
# # scheduler = pt.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
scheduler=pt.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='max',patience=20)
def ValModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(val_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
if args.v:
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('ValPhase_BraTS.png',final_img)
print("dice:",dice)
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Total dice: ",dice_sum/len(val_dataset),'\n')
print("Finished. Avg Jaccard: ",jc_sum/len(val_dataset))
print("Finished. Avg hausdorff: ",hd_sum/len(val_dataset))
return dice_sum/len(val_dataset)
def TestModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(test_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('TestPhase_BraTS.png',final_img)
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Test Total dice: ",dice_sum/len(test_dataset),'\n')
print("Finished. Test Avg Jaccard: ",jc_sum/len(test_dataset))
print("Finished. Test Avg hausdorff: ",hd_sum/len(test_dataset))
return dice_sum/len(test_dataset)
best_dice=0
iterator=tqdm(train_dataset, ncols=100)
for x in range(epoch):
model.train()
loss_sum=0
print('\n==>Epoch',x,': lr=',optimizer.param_groups[0]['lr'],'==>\n')
for data in iterator:
(inputs,labels_seg,labels_sr,guidance,mask)=data
optimizer.zero_grad()
inputs = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_seg = pt.autograd.Variable(labels_seg).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_sr = pt.autograd.Variable(labels_sr).type(pt.FloatTensor).cuda().unsqueeze(1)
outputs_seg,outputs_sr = model(inputs,guidance)
loss_seg = lossfunc_seg(outputs_seg, labels_seg)
loss_sr = lossfunc_sr(outputs_sr, labels_sr)
loss_pf = lossfunc_pf(outputs_seg,outputs_sr,labels_seg*labels_sr)
loss_guide=lossfunc_sr(mask*outputs_sr,mask*labels_sr)
loss=lossfunc_dice(outputs_seg,labels_seg)+loss_seg+w_sr*(loss_sr+loss_guide)+w_tf*loss_pf
loss.backward()
optimizer.step()
loss_sum+=loss.item()
if args.v:
final_img=np.zeros(shape=(2*size,2*size*5))
iterator.set_postfix(loss=loss.item(),loss_seg=loss_seg.item(),loss_sr=loss_sr.item())
final_img[:,0:(2*size)]=outputs_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(2*size):(4*size)]=outputs_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(4*size):(6*size)]=labels_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(6*size):(8*size)]=labels_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(8*size):]=cv2.resize(inputs.cpu().data.numpy()[0,0,size//4,:,:],((2*size),(2*size)))*255
cv2.imwrite('combine.png',final_img)
print('==>End of epoch',x,'==>\n')
print('===VAL===>')
dice=ValModel()
scheduler.step(dice)
if dice>best_dice:
best_dice=dice
print('New best dice! Model saved to',model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
pt.save(model.state_dict(), model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
print('===TEST===>')
TestModel()
print('\nBest Dice:',best_dice) | 1.75 | 2 |
pterasoftware/__init__.py | camUrban/PteraSoftware | 68 | 12797608 | <gh_stars>10-100
# ToDo: Update this module's documentation.
"""This package contains all the source code for the Ptera Software.
This package contains the following subpackages:
None
This package contains the following directories:
airfoils: This folder contains a collection of airfoils whose coordinates are
stored in DAT files.
This package contains the following modules:
__init__.py: This module is this package's initialization script.
aerodynamics.py: This module contains vortex class definitions.
functions.py: This module contains functions used by other modules in the
pterasoftware package.
geometry.py: This module contains useful functions that relate to geometry,
and the class definitions for different types of geometries.
meshing.py: This module contains useful functions for creating meshes.
output.py: This module contains useful functions for visualizing solutions to
problems.
movement.py: This module contains the class definitions for the problem's movement.
current_operating_point.py: This module contains the class definition for the
problem's operating point.
problems.py: This module contains the class definitions for different types of
problems.
steady_horseshoe_vortex_lattice_method.py: This module contains the class
definition of this package's steady horseshoe vortex lattice solver.
steady_ring_vortex_lattice_method.py: This module contains the class definition
of this package's steady ring vortex lattice solver.
unsteady_ring_vortex_lattice_method.py: This module contains the class definition
of this package's unsteady ring vortex lattice solver.
"""
import pterasoftware.aerodynamics
import pterasoftware.airfoils
import pterasoftware.geometry
import pterasoftware.meshing
import pterasoftware.movement
import pterasoftware.operating_point
import pterasoftware.output
import pterasoftware.problems
import pterasoftware.steady_horseshoe_vortex_lattice_method
import pterasoftware.steady_ring_vortex_lattice_method
import pterasoftware.unsteady_ring_vortex_lattice_method
| 2.109375 | 2 |
task_manager/tasks/filters.py | kunatastic/kunatastic-task-manager | 0 | 12797609 | <reponame>kunatastic/kunatastic-task-manager
from django_filters.filters import ChoiceFilter, DateFilter
from tasks.models import STATUS_CHOICES, Task, History
from django_filters.rest_framework import FilterSet
from django.db.models import Q
STATUS_CHOICES_CUSTOM = (
("COMPLETED","COMPLETED"),
("NOT_COMPLETED","NOT_COMPLETED")
)
class HistoryFilter(FilterSet):
created_date = DateFilter(method='custom_date_filter')
class Meta:
model = History
fields = ['status_current', 'status_previous', 'created_date']
def custom_date_filter(self, queryset, name, value):
return queryset.filter(
updated_date__year=value.year,
updated_date__month=value.month,
updated_date__day=value.day,
)
class TaskFilter(FilterSet):
status = ChoiceFilter(method='completed_custom_filter', choices=STATUS_CHOICES_CUSTOM)
class Meta:
model = Task
fields = ['status']
def completed_custom_filter(self, queryset, name, value):
if value == "COMPLETED":
return queryset.filter(status="COMPLETED")
elif value == "NOT_COMPLETED":
return queryset.filter(~Q(status="COMPLETED"))
| 2.296875 | 2 |
django_ember_toolkit/management/commands/_base.py | ForSpareParts/django_ember_toolkit | 0 | 12797610 | from os.path import abspath, join
import subprocess
from django.apps import apps as django_apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from termcolor import colored
SEPARATOR = '---------------------------------------------------------------'
# settings with a default of None are required
DEFAULT_SETTINGS = {
'EMBER_APP_NAME': None,
'API_PATH': None,
'EMBER_APP_PATH': 'client',
'MODELS_TO_SYNC': None
}
class EmberCommand(BaseCommand):
@classmethod
def get_setting(cls, key):
'''Get a setting from the user's project by key, falling back on the default
if there's no setting available.'''
return settings.EMBER_TOOLKIT.get(key, DEFAULT_SETTINGS[key])
@classmethod
def get_full_ember_path(cls):
'''Return the full, absolute path to the project's Ember app.'''
return abspath(join(
settings.BASE_DIR,
cls.get_setting('EMBER_APP_PATH')))
def notify(self, some_text):
self.stdout.write(SEPARATOR)
self.stdout.write(some_text)
self.stdout.write(SEPARATOR)
@classmethod
def assert_required_settings(cls, *args):
'''Raise a useful error if any of args are not configured in
settings.EMBER_TOOLKIT'''
if not hasattr(settings, 'EMBER_TOOLKIT'):
raise CommandError('You must define an EMBER_TOOLKIT dict in settings')
missing_settings = []
for key in args:
if cls.get_setting(key) is None:
missing_settings.append(key)
if missing_settings:
raise CommandError(
'settings.EMBER_TOOLKIT is missing the following keys: ' +
', '.join(missing_settings))
def run_ember_command(self, cmd_name, *args, **kwargs):
'''Run the named ember in the project's FULL_EMBER_PATH. Any args and kwargs
will be converted into positional and named arguments respectively
(booleans are assumed to be "boolean named arguments")
e.g.: run_ember_command('generate', 'route', 'foobar', pod=True)
becomes: ember generate route foobar --pod
'''
command = ['ember', cmd_name] + list(args)
for key, value in kwargs:
# in the unlikely case we pass None or False, just omit the kwarg
if value:
command.append('--' + key)
if value is not True:
command.append("'{}'".format(value))
self.notify('Running {}...'.format(colored(' '.join(command), 'green')))
subprocess.check_call(command, cwd=self.get_full_ember_path())
@classmethod
def write_initial_config(cls):
'''Generate an Ember config file with support for backend
"autoconfiguration" at the given path.'''
config_source = render_to_string(
'django_ember_toolkit/environment.js',
{'app_name': cls.get_setting('EMBER_APP_NAME')})
config_path = join(cls.get_full_ember_path(), 'config/environment.js')
with open(config_path, 'w') as config_file:
config_file.write(config_source)
def get_sync_model_set(cls):
'''Return a set containing the actual Model class objects that are
specified by MODELS_TO_SYNC.'''
for app_config in django_apps.get_app_configs():
model_name_set = set(cls.get_setting('MODELS_TO_SYNC'))
model_set = set()
for Model in app_config.get_models():
key = Model._meta.app_label + '.' + Model.__name__
app_star = Model._meta.app_label + '.*'
if key in model_name_set or app_star in model_name_set:
model_set.add(Model)
return model_set
| 2.203125 | 2 |
django/website/wagtail_vue/apps/pages/models.py | hyshka/wagtail-vue-talk | 26 | 12797611 | <reponame>hyshka/wagtail-vue-talk
# -*- coding: utf-8 -*-
"""Page models."""
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.core.models import Page
from wagtail.api import APIField
from wagtail.images.api.fields import ImageRenditionField
from wagtail.core.fields import StreamField
from .streamfields import ContentBlock, ImageGalleryBlock, CallToActionBlock
class HomePage(Page):
"""A home page class."""
template = "cms/pages/home_page.html"
subpage_types = ['pages.FlexPage']
banner_subtitle = models.CharField(
max_length=50, blank=True, null=True, help_text="An optional banner subtitle"
)
banner_image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=False,
on_delete=models.SET_NULL,
related_name="+",
help_text="An optional banner image",
)
content = StreamField([
('ContentBlock', ContentBlock()),
('ImageGalleryBlock', ImageGalleryBlock()),
('CallToActionBlock', CallToActionBlock()),
], null=True, blank=True)
content_panels = [
FieldPanel("title", classname="full title"),
ImageChooserPanel("banner_image"),
FieldPanel("banner_subtitle"),
StreamFieldPanel('content'),
]
api_fields = [
APIField("title"),
APIField("banner_subtitle"),
APIField("banner_image"),
APIField("banner_image_thumbnail", serializer=ImageRenditionField("fill-100x100", source="banner_image")),
APIField("content"),
]
class Meta:
"""Meta information."""
verbose_name = "Home Page"
verbose_name_plural = "Home Pages"
class FlexPage(Page):
"""A Flexible page class. Used for generic pages that don't have a true purpose."""
template = "cms/pages/flex_page.html"
subpage_types = []
content = StreamField([
('ContentBlock', ContentBlock()),
('ImageGalleryBlock', ImageGalleryBlock()),
('CallToActionBlock', CallToActionBlock()),
], null=True, blank=True)
content_panels = [
FieldPanel("title", classname="full title"),
StreamFieldPanel('content'),
]
api_fields = [
APIField("title"),
APIField("content"),
]
class Meta:
"""Meta information."""
verbose_name = "Flex Page"
verbose_name_plural = "Flex Pages"
| 1.945313 | 2 |
exercise12/lib/ProtocolUtils.py | jdiegoh3/distributed_computing | 0 | 12797612 | <gh_stars>0
from xmlrpc import client
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import threading
import socketserver
class MessageHandler(object):
body = None
def __init__(self, message):
self.body = message.decode("utf-8")
def message_loads(self):
if self.body:
result = self.body.split("|")
return result
class MessageBuilder(object):
operand1 = None
operand2 = None
operation = None
def __init__(self, num1=None, num2=None, op=None):
self.operand1 = float(num1)
self.operand2 = float(num2)
self.operation = op
def get_operands(self):
try:
self.operand1 = float(self.operand1)
self.operand2 = float(self.operand2)
except ValueError:
print("Not be numbers")
return self.operand1, self.operand2
def message_builder(self):
if self.operand1 and self.operand2 and self.operation:
result = str(self.operand1) + "|" + str(self.operation) + "|" + str(self.operand2)
return result
class SimpleThreadedXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer):
pass
class ServerThread(threading.Thread):
def __init__(self, address, port):
threading.Thread.__init__(self)
self.local_server = SimpleThreadedXMLRPCServer((address, port))
def register_class_functions(self, class_instance):
self.local_server.register_instance(class_instance)
def register_function(self, function):
self.local_server.register_function(function)
def run(self):
self.local_server.serve_forever()
class ClientThread(threading.Thread):
def __init__(self, address):
threading.Thread.__init__(self)
self.local_client = client.ServerProxy(address)
def get_client(self):
return self.local_client
def call_function(self, arg1, arg2):
return self.local_client.function(arg1, arg2)
def run(self):
pass | 2.9375 | 3 |
src/documentation_builder/test/sourcehandler.py | jouvin/release | 0 | 12797613 | <reponame>jouvin/release
"""Test class for sourcehandler."""
import os
import sys
import shutil
from tempfile import mkdtemp
from unittest import TestCase, main, TestLoader
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib'))) # noqa
from quattordocbuild import sourcehandler
class SourcehandlerTest(TestCase):
"""Test class for sourcehandler."""
def setUp(self):
"""Set up temp dir for tests."""
self.tmpdir = mkdtemp()
def tearDown(self):
"""Remove temp dir."""
shutil.rmtree(self.tmpdir)
def test_maven_clean_compile(self):
"""Test maven_clean_compile."""
repoloc = os.path.join(self.tmpdir, "test")
os.makedirs(repoloc)
# test if it fails on a empty dir
self.assertNotEqual(sourcehandler.maven_clean_compile(repoloc), 0)
# test if it can run a basic pom.xml and return 0
file = open(os.path.join(repoloc, "pom.xml"), "w")
file.write('<project><modelVersion>4.0.0</modelVersion><groupId>test</groupId>')
file.write('<artifactId>test</artifactId><version>1</version></project>')
file.close()
self.assertEqual(sourcehandler.maven_clean_compile(repoloc), 0)
def test_is_wanted_file(self):
"""Test is_wanted_file function."""
# Test valid extensions
for extension in ['pod', 'pm', 'pl', 'pan']:
testfile = "test.%s" % extension
self.assertTrue(sourcehandler.is_wanted_file('', testfile))
# Test invalid extensions
for extension in ['tpl', 'txt', 'xml']:
testfile = "test.%s" % extension
self.assertFalse(sourcehandler.is_wanted_file('', testfile))
# Test valid shebang
testfilename = "test"
file = open(os.path.join(self.tmpdir, testfilename), "w")
file.write('#!/usr/bin/perl\n')
file.close()
self.assertTrue(sourcehandler.is_wanted_file(self.tmpdir, testfilename))
# Test invalid shebang
file = open(os.path.join(self.tmpdir, testfilename), "w")
file.write('#!/usr/bin/python\n')
file.close()
self.assertFalse(sourcehandler.is_wanted_file(self.tmpdir, testfilename))
def test_is_wanted_dir(self):
"""Test is_wanted_dir function."""
# Test we get False if target is not in the given path
self.assertFalse(sourcehandler.is_wanted_dir('/bogusdir/test', []))
# Test for False on empty fileslist
self.assertFalse(sourcehandler.is_wanted_dir('/tmp/target/test/', []))
# Test for wrong subdir
self.assertFalse(sourcehandler.is_wanted_dir('/tmp/target/test/', ['schema.pan']))
# Test for a correct path
self.assertTrue(sourcehandler.is_wanted_dir('/tmp/target/doc/pod', ['schema.pan']))
def test_handle_duplicates(self):
"""Test handle_duplicates function."""
testperlfile = 'test/lib/perl/test.pm'
testpodfile = 'test/doc/pod/test.pod'
# Add a correct item to an empty list
self.assertEquals(sourcehandler.handle_duplicates('test.pm', testperlfile, []),
['test/lib/perl/test.pm'])
# Add a pod file when a pm file is in the list, get a list with only the pod file back
self.assertEquals(sourcehandler.handle_duplicates('test.pod', testpodfile, [testperlfile]), [testpodfile])
# Add a pm file when a pod file is in the list, get a list with only the pod file back
self.assertEquals(sourcehandler.handle_duplicates('test.pm', testperlfile, [testpodfile]), [testpodfile])
def test_list_source_files(self):
"""Test list_source_files function."""
# Test a bogus dir
self.assertEquals(sourcehandler.list_source_files(self.tmpdir), [])
# Test a correct dir
testfile = 'test.pod'
fulltestdir = os.path.join(self.tmpdir, 'target/doc/pod')
os.makedirs(fulltestdir)
file = open(os.path.join(fulltestdir, testfile), 'w')
file.write("test\n")
file.close()
self.assertEquals(sourcehandler.list_source_files(fulltestdir), [os.path.join(fulltestdir, testfile)])
def test_get_source_files(self):
"""Test get_source_files function."""
self.assertEquals(sourcehandler.get_source_files(self.tmpdir, False), [])
self.assertFalse(sourcehandler.get_source_files(self.tmpdir, True))
def suite(self):
"""Return all the testcases in this module."""
return TestLoader().loadTestsFromTestCase(SourcehandlerTest)
if __name__ == '__main__':
main()
| 2.421875 | 2 |
azaka/commands/condition.py | mooncell07/Azaka | 12 | 12797614 | <filename>azaka/commands/condition.py<gh_stars>10-100
from __future__ import annotations
import typing as t
from .proxy import _ConditionProxy
from ..objects import UlistLabels
if t.TYPE_CHECKING:
from ..interface import T
__all__ = (
"VNCondition",
"BaseCondition",
"ReleaseCondition",
"ProducerCondition",
"CharacterCondition",
"StaffCondition",
"QuoteCondition",
"UserCondition",
"UlistLabelsCondition",
"UlistCondition",
"_condition_selector",
)
class Operator:
"""
An object for storing operators for XCondition attributes to check condition support.
Warning:
This object is not meant to be created by users.
"""
__slots__ = ("symbols",)
def __init__(self, *symbols: str) -> None:
"""
Operator constructor.
Args:
*symbols (str): The symbols of the operator.
Attributes:
symbols (t.Tuple[str]): The symbols of the operator.
"""
self.symbols = symbols
@classmethod
def fill_some(cls, *symbols: str) -> Operator:
"""
A factory method for creating an Operator object with some symbols.
Args:
*symbols (str): The additional symbols of the operator.
Returns:
Operator: The created Operator object.
Info:
This method fills the `=` and `!=` symbols.
"""
return cls("=", "!=", *symbols)
@classmethod
def fill_all(cls, *symbols: str) -> Operator:
"""
A factory method for creating an Operator object with all symbols.
Args:
*symbols (str): The additional symbols of the operator.
Returns:
Operator: The created Operator object.
Info:
This method fills the `=`, `!=`, `>`, `<`, `>=`, `<=` symbols.
"""
return cls("=", "!=", ">", ">=", "<", "<=", *symbols)
class BaseCondition:
"""
A base class storing the comman condition attributes.
Tip:
`ALL` below means all operators (`==`, `!=`, `>`, `<`, `>=`, `<=`) are supported.
`SOME` means only operators (`==`, `!=`) are supported.
`SOME + X` means `SOME` and `X` operators are supported.
For example:
`|BaseCondition.ID| ALL |` supports (`==`, `!=`, `>`, `<`, `>=`, `<=`) operators.
`|BaseCondition.ID_ARRAY| SOME |` supports only (`==`, `!=`) operators.
`|UserCondition.USERNAME| SOME + (%)|` supports (`==`, `!=`, `%`) operators.
If there is neither `ALL` nor `SOME` in the condition but an operator is specified, then that means
only that operator is supported.
I hope you understand the above. :)
Tip:
`Field Value Type` means the type of value against which the field should be conditioned.
Tip:
All `X_ARRAY` fields must be conditioned against an Iterable of values and
these fields yield an iterable of objects which match the values from the API.
| Field | Field Value Type | Operations Supported | Description |
|----------|-----------------------------------|----------------------|--------------------------------|
| ID | [int][] | ALL | Filter using an `ID` |
| ID_ARRAY | A [typing.Iterable][] of [int][]s | SOME | Filter using an array of `ID`s.|
""" # noqa: E501
ID: t.Final[_ConditionProxy] = _ConditionProxy("id", operator=Operator.fill_all())
ID_ARRAY: t.Final[_ConditionProxy] = _ConditionProxy(
"id", operator=Operator.fill_some()
)
__slots__ = ()
class VNCondition(BaseCondition):
"""
A class storing all the attributes `VN` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|-----------------|-----------------------------------|----------------------|-------------------------------------------------------------------------------------------------------|
| TITLE | [str][] | *SOME + (%)* | Filter using the TITLE Field. |
| PLATFORMS | [None][] or [str][] | *SOME* | Filter using the PLATFORMS field. |
| PLATFORMS_ARRAY | A [typing.Iterable][] of [str][]s | *SOME* | Filter using an array of PLATFORMS. |
| RELEASED | [None][] | *SOME* | Filter using a `None` value for `RELEASED`. |
| RELEASED_DATE | date | *ALL* | Filter using the release date of the VN. |
| LANGUAGES | [None][] or [str][] | *SOME* | Filter using the language, the VN is available in. |
| LANGUAGES_ARRAY | A [typing.Iterable][] of [str][]s | *SOME* | Filter using the array of languages, the VN is available in. |
| FIRST_CHAR | [None][] or [str][] | *SOME* | Filter using the first character of the VN or None to match all the vn not starting with an alphabet. |
| ORIG_LANG | [str][] | *SOME* | Filter using the original language of the VN. |
| ORIG_LANG_ARRAY | A [typing.Iterable][] of [str][]s | *SOME* | Filter using an array of the original languages of the VN. |
| SEARCH | [str][] | *(%)* | Search for the VN using it's title and releases. |
| TAGS | [int][] | *SOME* | Find VNs by tag. |
| TAGS_ARRAY | A [typing.Iterable][] of [int][]s | *SOME* | Find VNs using an array of tags.
""" # noqa: E501
TITLE: t.Final[_ConditionProxy] = _ConditionProxy(
"title", operator=Operator.fill_some("~")
)
PLATFORMS: t.Final[_ConditionProxy] = _ConditionProxy(
"platforms", operator=Operator.fill_some()
)
PLATFORMS_ARRAY: t.Final[_ConditionProxy] = PLATFORMS
RELEASED: t.Final[_ConditionProxy] = _ConditionProxy(
"released", operator=Operator.fill_some()
)
RELEASED_DATE: t.Final[_ConditionProxy] = _ConditionProxy(
"released", operator=Operator.fill_all()
)
LANGUAGES: t.Final[_ConditionProxy] = _ConditionProxy(
"languages", operator=Operator.fill_some()
)
LANGUAGES_ARRAY: t.Final[_ConditionProxy] = LANGUAGES
FIRSTCHAR: t.Final[_ConditionProxy] = _ConditionProxy(
"firstchar", operator=Operator.fill_some()
)
ORIG_LANG: t.Final[_ConditionProxy] = _ConditionProxy(
"orig_lang", operator=Operator.fill_some()
)
ORIG_LANG_ARRAY: t.Final[_ConditionProxy] = ORIG_LANG
SEARCH: t.Final[_ConditionProxy] = _ConditionProxy("search", operator=Operator("~"))
TAGS: t.Final[_ConditionProxy] = _ConditionProxy("tags", Operator.fill_some())
TAGS_ARRAY: t.Final[_ConditionProxy] = TAGS
__slots__ = ()
class ReleaseCondition(BaseCondition):
"""
A class storing all the attributes `Release` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|-----------------|-----------------------------------|----------------------|----------------------------------------------------------------------------------------------|
| VN | [int][] | *ALL* | Find releases linked to the given visual novel ID. |
| VN_ARRAY | A [typing.Iterable][] of [int][]s | *SOME* | Find all the releases linked to the given visual novel IDs in the array. |
| PRODUCER | [int][] | *(==)* | Find releases linked to the given producer ID. |
| TITLE | [str][] | *SOME + (%)* | Find the release using the title. |
| ORIGINAL | [None][] or [str][] | *SOME + (%)* | Find the release using the original/official title. (`%` operation not supported for `None`) |
| RELEASED | [None][] | *SOME* | Filter using a `None` value for `RELEASED`. |
| RELEASED_DATE | [datetime.date][] | *ALL* | Filter using the release date of the VN. |
| PATCH | [bool][] | *(==)* | Check if the release is a patch. |
| FREEWARE | [bool][] | *(==)* | Check if the release is a freeware. |
| DOUJIN | [bool][] | *(==)* | Check if the release is a doujin. |
| TYPE | [str][] | *SOME* | Filter using the type of release. |
| GTIN | [int][] | *SOME* | Filter using the JAN/UPC/EAN code. |
| CATALOG | [str][] | *SOME* | Filter using the Catalog number. |
| LANGUAGES | [str][] | *SOME* | Filter using the language, the release is available in. |
| LANGUAGES_ARRAY | A [typing.Iterable][] of [str][]s | *SOME* | Filter using the array of languages, the release is available in. |
| PLATFORMS | [str][] | *SOME* | Filter using an array of PLATFORMS. |
| PLATFORMS_ARRAY | A [typing.Iterable][] of [str][]s | *SOME* | Filter using an array of PLATFORMS. |
""" # noqa: E501
VN: t.Final[_ConditionProxy] = _ConditionProxy("vn", operator=Operator.fill_some())
VN_ARRAY: t.Final[_ConditionProxy] = VN
PRODUCER: t.Final[_ConditionProxy] = _ConditionProxy(
"producer", operator=Operator("=")
)
TITLE: t.Final[_ConditionProxy] = _ConditionProxy(
"title", operator=Operator.fill_some("~")
)
ORIGINAL: t.Final[_ConditionProxy] = _ConditionProxy(
"original", operator=Operator.fill_some("~")
)
RELEASED: t.Final[_ConditionProxy] = _ConditionProxy(
"date", operator=Operator.fill_some()
)
RELEASED_DATE: t.Final[_ConditionProxy] = _ConditionProxy(
"date", operator=Operator.fill_all()
)
PATCH: t.Final[_ConditionProxy] = _ConditionProxy("patch", operator=Operator("="))
FREEWARE: t.Final[_ConditionProxy] = _ConditionProxy(
"freeware", operator=Operator("=")
)
DOUJIN: t.Final[_ConditionProxy] = _ConditionProxy("doujin", operator=Operator("="))
TYPE: t.Final[_ConditionProxy] = _ConditionProxy(
"type", operator=Operator.fill_some()
)
GTIN: t.Final[_ConditionProxy] = _ConditionProxy(
"gtin", operator=Operator.fill_some()
)
CATALOG: t.Final[_ConditionProxy] = _ConditionProxy(
"catalog", operator=Operator.fill_some()
)
LANGUAGES: t.Final[_ConditionProxy] = _ConditionProxy(
"languages", operator=Operator.fill_some()
)
LANGUAGES_ARRAY: t.Final[_ConditionProxy] = LANGUAGES
PLATFORMS: t.Final[_ConditionProxy] = _ConditionProxy(
"platforms", operator=Operator.fill_some()
)
PLATFORMS_ARRAY: t.Final[_ConditionProxy] = PLATFORMS
__slots__ = ()
class ProducerCondition(BaseCondition):
"""
A class storing all the attributes `Producer` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|-----------------|-----------------------------------|----------------------|-------------------------------------------------------------------------------|
| NAME | [str][] | *SOME + (%)* | Find using name of producer. |
| ORIGINAL | [None][] or [str][] | *SOME + (%)* | Find using original/official name of the producer. Can't use `%` with `None`. |
| TYPE | [str][] | *SOME* | Filter using type of producer. |
| LANGUAGE | [str][] | *SOME* | Filter using language of producer. |
| LANGUAGES_ARRAY | A [typing.Iterable][] of [str][]s | *SOME* | Filter using an array of languages of producer. |
| SEARCH | [str][] | *(%)* | Performs a search on the name, original and aliases fields. |
""" # noqa: E501
NAME: t.Final[_ConditionProxy] = _ConditionProxy(
"name", operator=Operator.fill_some("~")
)
ORIGINAL: t.Final[_ConditionProxy] = _ConditionProxy(
"original", operator=Operator.fill_some("~")
)
TYPE: t.Final[_ConditionProxy] = _ConditionProxy(
"type", operator=Operator.fill_some()
)
LANGUAGE: t.Final[_ConditionProxy] = _ConditionProxy(
"language", operator=Operator.fill_some()
)
LANGUAGES_ARRAY: t.Final[_ConditionProxy] = LANGUAGE
SEARCH: t.Final[_ConditionProxy] = _ConditionProxy("search", operator=Operator("~"))
__slots__ = ()
class CharacterCondition(BaseCondition):
"""
A class storing all the attributes `Character` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|--------------|-----------------------------------|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| NAME | [str] | *SOME + (%)* | Find using name of character. |
| ORIGINAL | [None][] or [str][] | *SOME + (%)* | Find using original/official name of the character. Can't use `%` with `None`. |
| SEARCH | [str][] | *(%)* | Performs a search on the name, original and aliases fields. |
| VN | [int][] | *(==)* | Find characters linked to the given visual novel ID. |
| VN_ARRAY | A [typing.Iterable][] of [int][]s | *(==)* | Find characters linked to the given visual novel ID array. |
| TRAITS | [int][] | *SOME* | Find characters by trait. |
| TRAITS_ARRAY | A [typing.Iterable][] of [int][]s | *SOME* | The `=` filter will return chars that are linked to any (not all) of the given traits, the `!=` filter will return chars that are not linked to any of the given traits. |
""" # noqa: E501
NAME: t.Final[_ConditionProxy] = _ConditionProxy(
"name", operator=Operator.fill_some("~")
)
ORIGINAL: t.Final[_ConditionProxy] = _ConditionProxy(
"original", operator=Operator.fill_some("~")
)
SEARCH: t.Final[_ConditionProxy] = _ConditionProxy("search", operator=Operator("~"))
VN: t.Final[_ConditionProxy] = _ConditionProxy("vn", operator=Operator("="))
VN_ARRAY: t.Final[_ConditionProxy] = VN
TRAITS: t.Final[_ConditionProxy] = _ConditionProxy(
"traits", operator=Operator.fill_some()
)
TRAITS_ARRAY: t.Final[_ConditionProxy] = TRAITS
__slots__ = ()
class StaffCondition(BaseCondition):
"""
A class storing all the attributes `Staff` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|-----------|-----------------------------------|----------------------|-------------------------------------------------------------|
| AID | [int][] | *(==)* | Find staff by alias ID. |
| AID_ARRAY | A [typing.Iterable][] of [int][]s | *(==)* | Find staff by an array of alias IDs. |
| SEARCH | [str][] | *(%)* | Performs a search on the name, original and aliases fields. |
""" # noqa: E501
AID: t.Final[_ConditionProxy] = _ConditionProxy("aid", operator=Operator("="))
AID_ARRAY: t.Final[_ConditionProxy] = AID
SEARCH: t.Final[_ConditionProxy] = _ConditionProxy("search", operator=Operator("~"))
__slots__ = ()
class QuoteCondition(BaseCondition):
"""
A class storing all the attributes `Staff` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
Info:
This one only supports `ID` and `ID_ARRAY` filters of `BaseCondition`.
""" # noqa: E501
__slots__ = ()
class UserCondition(BaseCondition):
"""
A class storing all the attributes `User` type supports as condition.
Hint:
Check the `BaseCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|----------------|-----------------------------------|----------------------|----------------------------------------|
| USERNAME | [str][] | *SOME + (%)* | Find user by their username. |
| USERNAME_ARRAY | A [typing.Iterable][] of [str][]s | *(==)* | Find user using an array of usernames. |
""" # noqa: E501
USERNAME: t.Final[_ConditionProxy] = _ConditionProxy(
"username", operator=Operator.fill_some("~")
)
USERNAME_ARRAY: t.Final[_ConditionProxy] = _ConditionProxy(
"username", operator=Operator("=")
)
__slots__ = ()
class UlistLabelsCondition:
"""
A class storing all the attributes `UlistLabels` type supports as condition.
Info:
This class doesn't inherit from `BaseCondition` and doesn't have `ID` and `ID_ARRAY` filters.
| Attribute | Field Value Type | Operations Supported | Description |
|-----------|------------------|----------------------|------------------------------------------------------------------------------------------|
| UID | [int][] | *(==)* | Find using user ID. The special value '0' is recognized as the currently logged in user. |
""" # noqa: E501
UID: t.Final[_ConditionProxy] = _ConditionProxy("uid", operator=Operator("="))
class UlistCondition(UlistLabelsCondition):
"""
A class storing all the attributes `Ulist` type supports as condition.
Hint:
Check the `UlistLabelsCondition` class for more information.
| Attribute | Field Value Type | Operations Supported | Description |
|-----------|-----------------------------------|----------------------|------------------------------------------|
| VN | [int][] | *ALL* | Find by visual novel ID. |
| VN_ARRAY | A [typing.Iterable][] of [int][]s | *SOME* | Find using an array of visual novel IDs. |
| LABEL | [int][] | *(==)* | Label assigned to the VN. |
""" # noqa: E501
VN: t.Final[_ConditionProxy] = _ConditionProxy("vn", operator=Operator.fill_all())
VN_ARRAY: t.Final[_ConditionProxy] = _ConditionProxy(
"vn", operator=Operator.fill_some()
)
LABEL: t.Final[_ConditionProxy] = _ConditionProxy("label", operator=Operator("~"))
__slots__ = ()
def _condition_selector(
type: t.Type[T],
):
condition_map = {
"vn": VNCondition,
"release": ReleaseCondition,
"producer": ProducerCondition,
"character": CharacterCondition,
"staff": StaffCondition,
"quote": QuoteCondition,
"user": UserCondition,
"ulist-labels": UlistLabelsCondition,
"ulist": UlistCondition,
}
cls = condition_map[
type.__name__.lower() if type != UlistLabels else "ulist-labels"
]
return cls
| 2.5 | 2 |
src/encoder.py | Codingboy/edoc | 0 | 12797615 | <gh_stars>0
from random import randint
import unittest
from typing import Dict, Tuple, List
class Encoder:
def __init__(self, pw: str):
password = bytearray()
for c in pw:
password.append(ord(c))
index = 0
while len(password) < 4096:
password.append(ord(pw[index%len(pw)]))
index += 1
self.spBox = SPBox(password)
self.buffer = None
self.seeded = False
def encode(self, plain: bytearray):
returnvalue = bytearray()
if self.buffer is not None:
plain = self.buffer+plain
self.buffer = None
if (not self.seeded):
ba = self.spBox.getSeed()
returnvalue.extend(ba)
self.seeded = True
while len(plain) >= 256:
ba = bytearray()
for i in range(256):
ba.append(plain.pop(0))
encoded = self.spBox.encode(ba)
returnvalue.extend(encoded)
if len(plain) > 0:
self.buffer = plain
return returnvalue
def close(self):
while len(self.buffer) < 256:
self.buffer.append(randint(0, 255))
return self.encode(bytearray())
class Decoder:
def __init__(self, pw: str):
password = bytearray()
for c in pw:
password.append(ord(c))
index = 0
while len(password) < 4096:
password.append(ord(pw[index%len(pw)]))
index += 1
self.spBox = SPBox(password)
self.buffer = None
self.seeded = False
def decode(self, encoded: bytearray):
returnvalue = bytearray()
if self.buffer is not None:
encoded = self.buffer+encoded
self.buffer = None
while len(encoded) >= 256:
ba = bytearray()
for i in range(256):
ba.append(encoded.pop(0))
if (self.seeded):
decoded = self.spBox.decode(ba)
returnvalue.extend(decoded)
else:
self.spBox.setSeed(ba)
self.seeded = True
if len(encoded) > 0:
self.buffer = encoded
return returnvalue
def close(self):
return bytearray()
class SBox:
"""
SBox is a substitution cipher.
Attributes:
encodeMap: lookuptable used to encode data
decodeMap: lookuptable used to decode data
Parameters:
pw: password
| **Pre:**
| len(pw) == 256
| **Post:**
| len(self.encodeMap) == 256
| self.encodeMap[i] >= 0
| self.encodeMap[i] < 256
| len(self.decodeMap) == 256
| self.decodeMap[i] >= 0
| self.decodeMap[i] < 256
"""
def __init__(self, pw: bytearray):
self.encodeMap: List[int] = [-1]*256
self.decodeMap: List[int] = [-1]*256
index = 0
for i in range(256):
emptyCounter = 0
maxEmpty = 256-i
targetEmpty = 1+(pw[i]%maxEmpty)
while (emptyCounter < targetEmpty):
if (self.encodeMap[index] == -1):
emptyCounter += 1
if (emptyCounter < targetEmpty):
index = (index+1)%256
self.encodeMap[index] = i
for i in range(256):
self.decodeMap[self.encodeMap[i]] = i
def encode(self, plain: int) -> int:
"""
Encodes a single plain number.
Parameters:
plain: plain number
Returns:
encoded number
| **Pre:**
| plain >= 0
| plain < 256
| **Post:**
| return >= 0
| return < 256
"""
return self.encodeMap[plain]
def decode(self, encoded: int) -> int:
"""
Decodes a single encoded number.
Parameters:
encoded: encoded number
Returns:
decoded number
| **Pre:**
| encoded >= 0
| encoded < 256
| **Post:**
| return >= 0
| return < 256
"""
return self.decodeMap[encoded]
class PBox:
"""
PBox is a transposition cipher.
Attributes:
encodeMap: lookuptable used to encode data
decodeMap: lookuptable used to decode data
Parameters:
pw: password
| **Pre:**
| len(pw) == 2048
| **Post:**
| len(self.encodeMap) == 2048
| self.encodeMap[i] >= 0
| self.encodeMap[i] < 2048
| len(self.decodeMap) == 2048
| self.decodeMap[i] >= 0
| self.decodeMap[i] < 2048
"""
def __init__(self, pw: bytearray):
self.encodeMap: List[int] = [-1]*(256*8)
self.decodeMap: List[int] = [-1]*(256*8)
index = 0
for i in range(256*8):
emptyCounter = 0
maxEmpty = 256*8-i
targetEmpty = 1+(pw[i]%maxEmpty)
while (emptyCounter < targetEmpty):
if (self.encodeMap[index] == -1):
emptyCounter += 1
if (emptyCounter < targetEmpty):
index = (index+1)%(256*8)
self.encodeMap[index] = i
for i in range(256*8):
self.decodeMap[self.encodeMap[i]] = i
def encode(self, plain: bytearray, seed: int) -> bytearray:
"""
Encodes a block of plain numbers.
Parameters:
plain: block of plain numbers
seed: seed
Returns:
block of encoded numbers
| **Pre:**
| len(plain) == 256
| seed >= 0
| seed < 256
| **Post:**
| len(return) == 256
| return[i] >= 0
| return[i] < 256
"""
encoded = bytearray(256)
for i in range(256):
indexVar = i*8+seed
for b in range(8):
if ((plain[i]) & (1<<b)):
index = self.encodeMap[(b+indexVar)%2048]
index8 = int(index/8)
encoded[index8] = encoded[index8]+(1<<(index%8))
return encoded
def decode(self, encoded: bytearray, seed: int) -> List[int]:
"""
Decodes a block of encoded numbers.
Parameters:
encoded: block of encoded numbers
seed: seed
Returns:
block of decoded numbers
| **Pre:**
| len(encoded) == 256
| seed >= 0
| seed < 256
| **Post:**
| len(return) == 256
| return[i] >= 0
| return[i] < 256
"""
decoded = bytearray(256)
for i in range(256):
indexVar = i*8
for b in range(8):
if ((encoded[i]) & (1<<b)):
index = self.decodeMap[indexVar+b]-seed
if (index < 0):
index += 2048
index8 = int(index/8)
decoded[index8] = decoded[index8]+(1<<(index%8))
return decoded
class SPBox:
"""
SPBox is a substitution-permutation network.
Attributes:
sBoxes: list of SBoxes used for substitution
seed: seed
pBox: PBox used for permutation
Parameters:
pw: password
seed: seed
| **Pre:**
| len(pw) == 4096
| len(seed) == 256
| seed[i] >= 1
| **Post:**
| len(self.sBoxes) == 8
| len(self.seed) == 256
| self.seed[i] >= 1
"""
def __init__(self, pw: bytearray, seed: bytearray = None):
self.sBoxes: List[SBox] = [None]*8
if (seed is None):
seed = bytearray(256)
for i in range(256):
seed[i] = randint(1, 255)
self.seed: bytearray = seed
for s in range(8):
spw = bytearray(256)
for i in range(256):
spw[i] = pw[s*256+i]
self.sBoxes[s] = SBox(spw)
ppw = bytearray(2048)
for i in range(2048):
ppw[i] = pw[8*256+i]
self.pBox: PBox = PBox(ppw)
def encodeRound(self, plain: bytearray, round: int, pSeed: int) -> bytearray:
"""
Encodes a block of plain numbers.
Parameters:
plain: block of plain numbers
round: iteration of encode
pSeed: seed for PBox
Returns:
block of encoded numbers
| **Pre:**
| len(plain) == 256
| round >= 0
| round < 8
| pSeed >= 0
| pSeed < 256
| **Post:**
| len(return) == 256
"""
encoded = bytearray(256)
for i in range(256):
seedAtI = self.seed[i]
encoded[i] = plain[i] ^ self.sBoxes[round].encodeMap[i] ^ seedAtI
for j in range(8):
if ((seedAtI & (1<<j)) != 0):
encoded[i] = self.sBoxes[j].encodeMap[
encoded[i]] # replacement for SBox.encode() to improve performance
encoded = self.pBox.encode(encoded, pSeed)
return encoded
def decodeRound(self, encoded: bytearray, round: int, pSeed: int) -> bytearray:
"""
Decodes a block of encoded numbers.
Parameters:
encoded: block of encoded numbers
round: iteration of decode
pSeed: seed for PBox
Returns:
block of decoded numbers
| **Pre:**
| len(encoded) == 256
| round >= 0
| round < 8
| pSeed >= 0
| pSeed < 256
| **Post:**
| len(return) == 256
"""
decoded = self.pBox.decode(encoded, pSeed)
for i in range(256):
seedAtI = self.seed[i]
for invertedJ in range(8):
j = 8-1-invertedJ
if ((seedAtI & (1<<j)) != 0):
decoded[i] = self.sBoxes[j].decodeMap[
decoded[i]] # replacement for SBox.decode() to improve performance
decoded[i] = decoded[i] ^ self.sBoxes[round].encodeMap[i] ^ seedAtI
return decoded
def encode(self, plain: bytearray) -> bytearray:
"""
Encodes a block of plain numbers.
Parameters:
plain: block of plain numbers
Returns:
block of encoded numbers
| **Pre:**
| len(plain) == 256
| **Post:**
| len(return) == 256
| **Modifies:**
| self.seed[i]
"""
pSeed = 0
for i in range(256):
pSeed = (pSeed+self.seed[i])%256
encoded = self.encodeRound(plain, 0, pSeed)
for i in range(7):
encoded = self.encodeRound(encoded, i+1, pSeed)
for i in range(256):
self.seed[i] = plain[i] ^ self.seed[i]
if (self.seed[i] == 0):
self.seed[i] = 1
return encoded
def decode(self, encoded: bytearray) -> bytearray:
"""
Decodes a block of encoded numbers.
Parameters:
encoded: block of encoded numbers
Returns:
block of decoded numbers
| **Pre:**
| len(encoded) == 256
| encoded[i] >= 0
| encoded[i] < 256
| **Post:**
| len(return) == 256
| return[i] >= 0
| return[i] < 256
| **Modifies:**
| self.seed[i]
"""
pSeed = 0
for i in range(256):
pSeed = (pSeed+self.seed[i])%256
decoded = self.decodeRound(encoded, 7, pSeed)
for invertedI in range(7):
i = 6-invertedI
decoded = self.decodeRound(decoded, i, pSeed)
for i in range(256):
self.seed[i] = decoded[i] ^ self.seed[i]
if (self.seed[i] == 0):
self.seed[i] = 1
return decoded
def getSeed(self) -> bytearray:
"""
Gets the seed.
Returns:
block of seed numbers
| **Post:**
| len(return) == 256
| return[i] >= 1
"""
seed = bytearray(256)
for i in range(256):
seed[i] = self.seed[i]
return seed
def setSeed(self, seed: bytearray):
"""
Sets the seed.
Parameters:
seed: block of seed numbers
| **Pre:**
| len(seed) == 256
| seed[i] >= 1
| **Modifies:**
| self.seed[i]
"""
for i in range(256):
self.seed[i] = seed[i]
# TODO change general parameter policy: all parameters may be edited by functions, no deepcopy needed
#TODO change to bytearray
class SBoxUnitTest(unittest.TestCase):
def setUp(self):
self.pw = bytearray()
for i in range(256):
self.pw.append(randint(0, 255))
self.sBox = SBox(self.pw)
def tearDown(self):
self.pw = None
self.sBox = None
def test_simple(self):
decodedMatches = 0
encodedMatches = 0
for i in range(256):
plain = i
encoded = self.sBox.encode(plain)
decoded = self.sBox.decode(encoded)
if (plain == encoded):
encodedMatches += 1
if (plain == decoded):
decodedMatches += 1
self.assertTrue(encodedMatches < 256/10)
self.assertTrue(decodedMatches == 256)
class PBoxUnitTest(unittest.TestCase):
def setUp(self):
self.pw = bytearray()
for i in range(2048):
self.pw.append(randint(0, 255))
self.pBox = PBox(self.pw)
def tearDown(self):
self.pw = None
self.pBox = None
def test_simple(self):
plain = bytearray()
for i in range(256):
plain.append(randint(0, 255))
for seed in range(256):
encoded = self.pBox.encode(plain, seed)
decoded = self.pBox.decode(encoded, seed)
decodedMatches = 0
encodedMatches = 0
for i in range(256):
if (plain[i] == encoded[i]):
encodedMatches += 1
if (plain[i] == decoded[i]):
decodedMatches += 1
self.assertTrue(encodedMatches < 256/10)
self.assertTrue(decodedMatches == 256)
class SPBoxUnitTest(unittest.TestCase):
def setUp(self):
self.pw = bytearray()
for i in range(4096):
self.pw.append(randint(0, 255))
self.spBox = SPBox(self.pw)
def tearDown(self):
self.pw = None
self.spBox = None
def test_simple(self):
plain = bytearray()
for i in range(256):
plain.append(randint(0, 255))
length = len(plain)
seed = self.spBox.getSeed()
for i in range(256):
self.assertTrue(self.spBox.seed[i] != 0)
encoded = self.spBox.encode(plain)
for i in range(256):
self.assertTrue(self.spBox.seed[i] != 0)
seed2 = self.spBox.getSeed()
self.spBox.setSeed(seed)
decoded = self.spBox.decode(encoded)
decodedMatches = 0
seedMatches = 0
for i in range(256):
if (seed[i] == seed2[i]):
seedMatches += 1
for i in range(length):
if (plain[i] == decoded[i]):
decodedMatches += 1
self.assertTrue(decodedMatches == length) # TODO encodeMatches
self.assertTrue(seedMatches < 256/10)
# TODO encode 2nd batch#plain is edited | 2.796875 | 3 |
web-component/python/admin_api/api/__init__.py | AbhiGupta03/SDK | 0 | 12797616 | <filename>web-component/python/admin_api/api/__init__.py<gh_stars>0
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from admin_api.api.auto_generate_app_token_api import AutoGenerateAppTokenApi
from admin_api.api.card_api import CardApi
from admin_api.api.client_api import ClientApi | 1.265625 | 1 |
problems/__init__.py | rampasek/attention-learn-to-route | 0 | 12797617 | <reponame>rampasek/attention-learn-to-route
from problems.tsp.problem_tsp import TSP, TSPEdge
from problems.vrp.problem_vrp import CVRP, SDVRP
from problems.op.problem_op import OP
from problems.pctsp.problem_pctsp import PCTSPDet, PCTSPStoch | 1.21875 | 1 |
sebs/__init__.py | opal-mimuw/serverless-benchmarks | 35 | 12797618 | from .sebs import SeBS # noqa
from .aws import * # noqa
from .azure import * # noqa
from .cache import Cache # noqa
from .benchmark import Benchmark # noqa
# from .experiments import * # noqa
| 0.976563 | 1 |
Array/10 Move all negative numbers to beginning and positive to end.py | ikaushikpal/DS-450-python | 3 | 12797619 | def rearrange(arr, n):
j = 0
for i in range(0, n):
if (arr[i] < 0):
arr[i], arr[j] = arr[j], arr[i]
j += 1
print(arr)
if __name__ == "__main__":
arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]
n = len(arr)
rearrange(arr, n)
| 3.59375 | 4 |
ml-agents/mlagents/envs/__init__.py | icaro56/ml-agents | 134 | 12797620 | from .environment import *
from .brain import *
from .exception import *
| 1.070313 | 1 |
src/parse_tree.py | yxtay/data-structures-algorithms | 1 | 12797621 | import operator
OPERATORS = {"+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv}
LEFT_PAREN = "("
RIGHT_PAREN = ")"
def build_parse_tree(expression):
tree = {}
stack = [tree]
node = tree
for token in expression:
if token == LEFT_PAREN:
node["left"] = {}
stack.append(node)
node = node["left"]
elif token == RIGHT_PAREN:
node = stack.pop()
elif token in OPERATORS:
node["val"] = token
node["right"] = {}
stack.append(node)
node = node["right"]
else:
node["val"] = int(token)
parent = stack.pop()
node = parent
return tree
def evaluate(tree):
try:
operate = OPERATORS[tree["val"]]
return operate(evaluate(tree["left"]), evaluate(tree["right"]))
except KeyError:
# no left or no right, so is a leaf - our base case
return tree["val"]
def construct_expression(parse_tree):
if parse_tree is None:
return ""
left = construct_expression(parse_tree.get("left"))
right = construct_expression(parse_tree.get("right"))
val = parse_tree["val"]
if left and right:
return "({}{}{})".format(left, val, right)
return val
| 3.5 | 4 |
MakeMytripChallenge/script/edalevel1.py | divayjindal95/DataScience | 0 | 12797622 | <reponame>divayjindal95/DataScience
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
train_data = pd.read_csv("../data/train.csv")
train_data_len=len(train_data)
test_data=pd.read_csv("../data/test.csv")
test_data_len=len(test_data)
data=pd.concat(train_data,test_data)
## pd.read_csv("../data/test.csv")
#print train_data.info()
'''
15 cols : A to O for training
Many vars have null value
Non null : C , H , I , J , K , L , M , O , P
with null : A , B , D , E , F, G , N
6 cols with float or int
9 are with string ( seems like binary are there in them)
'''
#print train_data.describe()
#print train_data.head(5)
'''
id A B C D E F G H I J K L M N O P
0 1 b 18.42 10.415 y p aa v 0.125 t f 0 f g 120 375 1
1 2 a 21.75 11.750 u g c v 0.250 f f 0 t g 180 0 1
2 3 b 30.17 1.085 y p c v 0.040 f f 0 f g 170 179 1
3 4 b 22.67 2.540 y p c h 2.585 t f 0 f g 0 0 0
4 5 a 36.00 1.000 u g c v 2.000 t t 11 f g 0 456 0
'''
#print train_data.corr()
'''
the kind of corr seen states the vars are not linearly correlated
'''
print train_data[(train_data.A!='a') & (train_data.A!='b')]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
train_data.A=le.fit_transform(train_data.A)
print le.classes_
#print train_data.D.describe()
#print train_data.D.value_counts()
le=LabelEncoder()
train_data.D = le.fit_transform(train_data.D)
print le.classes_
#print train_data.corr()
#print train_data.E.value_counts()
#print train_data.corr()
le=LabelEncoder()
train_data.E = le.fit_transform(train_data.E)
print train_data.F.value_counts()
print test_data.F.value_counts()
# print train_data.G.value_counts()
# print train_data.I.value_counts()
# print train_data.J.value_counts()
# print train_data.L.value_counts()
# print train_data.M.value_counts()
| 3.09375 | 3 |
ntee/utils/abstract_db.py | studio-ousia/ntee | 87 | 12797623 | # -*- coding: utf-8 -*-
import click
import gzip
import os
import rdflib
import re
import urllib
from collections import defaultdict
from contextlib import closing
from functools import partial
from multiprocessing.pool import Pool
from shelve import DbfilenameShelf
from tokenizer import RegexpTokenizer
class AbstractDB(DbfilenameShelf):
def __init__(self, *args, **kwargs):
DbfilenameShelf.__init__(self, *args, **kwargs)
@staticmethod
def build(in_dir, out_file, pool_size):
with closing(AbstractDB(out_file, protocol=-1)) as db:
target_files = [f for f in sorted(os.listdir(in_dir)) if f.endswith('ttl.gz')]
with closing(Pool(pool_size)) as pool:
f = partial(_process_file, in_dir=in_dir)
for ret in pool.imap(f, target_files):
for (key, obj) in ret:
db[key] = obj
def count_valid_words(self, vocab, max_text_len):
tokenizer = RegexpTokenizer()
keys = self.keys()
words = frozenset(list(vocab.words()))
word_count = 0
with click.progressbar(keys) as bar:
for key in bar:
c = 0
for token in tokenizer.tokenize(self[key]['text']):
if token.text.lower() in words:
c += 1
word_count += min(c, max_text_len)
return word_count
def _process_file(file_name, in_dir):
abs_matcher = re.compile(ur'^http://dbpedia\.org/resource/(.*)/abstract#offset_(\d+)_(\d+)$')
dbp_matcher = re.compile(ur'^http://dbpedia\.org/resource/(.*)$')
click.echo('Processing %s' % file_name)
g = rdflib.Graph()
with gzip.GzipFile(os.path.join(in_dir, file_name)) as f:
g.load(f, format='turtle')
texts = {}
mentions = defaultdict(dict)
mention_titles = defaultdict(dict)
for (s, p, o) in g:
s = unicode(s)
p = unicode(p)
o = unicode(o)
abs_match_obj = abs_matcher.match(s)
title = urllib.unquote(urllib.unquote(abs_match_obj.group(1).encode('utf-8')))
title = title.decode('utf-8').replace(u'_', u' ')
if p == u'http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#isString':
texts[title] = o
elif p == u'http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#anchorOf':
span = (int(abs_match_obj.group(2)), int(abs_match_obj.group(3)))
mentions[title][s] = (o, span)
elif p == u'http://www.w3.org/2005/11/its/rdf#taIdentRef':
match_obj = dbp_matcher.match(o)
if match_obj:
link_title = urllib.unquote(match_obj.group(1).encode('utf-8'))
link_title = link_title.decode('utf-8').replace(u'_', u' ')
mention_titles[title][s] = link_title
ret = []
for (title, text) in texts.iteritems():
links = []
for (key, link_title) in mention_titles[title].items():
(name, span) = mentions[title][key]
links.append((name, link_title, span))
ret.append((title.encode('utf-8'),
dict(title=title, text=text, links=links)))
return ret
| 2.296875 | 2 |
mapfartapi/web.py | aaronr/mapfart | 3 | 12797624 | from flask import render_template
def index():
return render_template('index.html')
def documentation():
return render_template('documentation.html')
def api_landing():
return render_template('api_landing.html')
| 1.929688 | 2 |
migrations/versions/07d04be1e961_add_a_bio_and_profile_pic_path_to_users_.py | WaruiAlfred/sixty-seconds-impression | 0 | 12797625 | <reponame>WaruiAlfred/sixty-seconds-impression
"""add a bio and profile pic path to users table
Revision ID: 0<PASSWORD>
Revises: <PASSWORD>
Create Date: 2021-09-19 11:46:47.937931
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('biodata', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('profile_pic_path', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'profile_pic_path')
op.drop_column('users', 'biodata')
# ### end Alembic commands ###
| 1.421875 | 1 |
setup.py | lopatinay/psycopg2_error_handler | 0 | 12797626 | <reponame>lopatinay/psycopg2_error_handler
from setuptools import setup
setup(
name="psycopg2_error_handler",
install_requires=[
"psycopg-binary >= 3.0",
],
packages=["psycopg2_error"],
version='0.0.4',
description='Psycopg2 Error Handler',
author='<NAME>',
license='MIT',
)
| 1.226563 | 1 |
subirarchivos/models.py | Ax-Angel/NuevoGeco | 4 | 12797627 | from django.conf import settings
from django.db import models
from .validators import validate_file_extension
# Create your models here.
class NormalProject(models.Model):
name = models.CharField(max_length=100, null=False, unique=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='owner_normalproject', on_delete=models.CASCADE)
project_members = models.ManyToManyField(settings.AUTH_USER_MODEL)
public_status = models.BooleanField(default=0)
collab_status = models.BooleanField(default=0)
timestamp = models.DateTimeField(auto_now_add=True)
def is_public(self):
return bool(self.public_status)
def is_collab(self):
return bool(self.collab_status)
def get_project_members(self):
return self.project_members
def get_owner(self):
return self.owner
def set_status_public(self, status):
self.public_status=status
def set_status_collab(self, status):
self.collab_status=status
def __str__(self):
return str(self.name)
class Document(models.Model):
file = models.FileField(blank=False, null=False, upload_to='mediafiles/', validators=[validate_file_extension])
name = models.CharField(max_length=100, null=False, unique=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL,related_name='owner_document', on_delete=models.CASCADE)
project = models.ForeignKey(NormalProject, related_name='project_document', on_delete=models.CASCADE)
tagged_doc = models.CharField(max_length=100)
timestamp = models.DateTimeField(auto_now_add=True)
def set_tagged_doc(self, file_url):
self.tagged_doc = file_url
def set_file(self, file_url):
self.file = file_url
class NormalMetadata(models.Model):
name = models.CharField(max_length=100, null=False, unique=True)
project = models.ForeignKey(NormalProject, related_name='project_normalMetadata', on_delete=models.CASCADE)
class DocumentNormalMetadataRelation(models.Model):
metadata = models.ForeignKey(NormalMetadata, related_name='metadata', on_delete=models.CASCADE)
document = models.ForeignKey(Document, related_name='document', on_delete=models.CASCADE)
data = models.CharField(max_length=100, blank=True, null=True)
class ParallelRelation(models.Model):
doc_one = models.ManyToManyField(Document, related_name='doc_one')
doc_two = models.ManyToManyField(Document, related_name='doc_two')
class ParallelProject(models.Model):
name = models.CharField(max_length=100, null=False, unique=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='owner_parallelproject', on_delete=models.CASCADE)
project_members = models.ManyToManyField(settings.AUTH_USER_MODEL)
relations = models.ManyToManyField(ParallelRelation)
public_status = models.BooleanField(default=0)
timestamp = models.DateTimeField(auto_now_add=True)
def is_public(self):
return bool(self.public_status)
def get_project_members(self):
return self.project_members
def __str__(self):
return str(self.name)
class ParallelMetadata(models.Model):
name = models.CharField(max_length=100, null=False)
project = models.ForeignKey(ParallelProject, related_name='project_parallelmetadata', on_delete=models.CASCADE)
class DocumentParallelMetadaRelation(models.Model):#falta
metadata = models.ForeignKey(ParallelMetadata, related_name="parallelmetadata", on_delete=models.CASCADE)
relation = models.ForeignKey(ParallelRelation, related_name="relation", on_delete=models.CASCADE)
data = models.CharField(max_length=100, blank=True, null=True)
| 2.078125 | 2 |
model/synthesizer/ctabgan_synthesizer.py | Zepp3/CTAB-GAN | 0 | 12797628 | import numpy as np
import pandas as pd
import torch
import torch.utils.data
import torch.optim as optim
from torch.optim import Adam
from torch.nn import functional as F
from torch.nn import (Dropout, LeakyReLU, Linear, Module, ReLU, Sequential,
Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss,SmoothL1Loss)
from model.synthesizer.transformer import ImageTransformer,DataTransformer
from tqdm import tqdm
class Classifier(Module):
def __init__(self,input_dim, dis_dims,st_ed):
super(Classifier,self).__init__()
dim = input_dim-(st_ed[1]-st_ed[0])
seq = []
self.str_end = st_ed
for item in list(dis_dims):
seq += [
Linear(dim, item),
LeakyReLU(0.2),
Dropout(0.5)
]
dim = item
if (st_ed[1]-st_ed[0])==1:
seq += [Linear(dim, 1)]
elif (st_ed[1]-st_ed[0])==2:
seq += [Linear(dim, 1),Sigmoid()]
else:
seq += [Linear(dim,(st_ed[1]-st_ed[0]))]
self.seq = Sequential(*seq)
def forward(self, input):
label=None
if (self.str_end[1]-self.str_end[0])==1:
label = input[:, self.str_end[0]:self.str_end[1]]
else:
label = torch.argmax(input[:, self.str_end[0]:self.str_end[1]], axis=-1)
new_imp = torch.cat((input[:,:self.str_end[0]],input[:,self.str_end[1]:]),1)
if ((self.str_end[1]-self.str_end[0])==2) | ((self.str_end[1]-self.str_end[0])==1):
return self.seq(new_imp).view(-1), label
else:
return self.seq(new_imp), label
def apply_activate(data, output_info):
data_t = []
st = 0
for item in output_info:
if item[1] == 'tanh':
ed = st + item[0]
data_t.append(torch.tanh(data[:, st:ed]))
st = ed
elif item[1] == 'softmax':
ed = st + item[0]
data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2))
st = ed
return torch.cat(data_t, dim=1)
def get_st_ed(target_col_index,output_info):
st = 0
c= 0
tc= 0
for item in output_info:
if c==target_col_index:
break
if item[1]=='tanh':
st += item[0]
elif item[1] == 'softmax':
st += item[0]
c+=1
tc+=1
ed= st+output_info[tc][0]
return (st,ed)
def random_choice_prob_index_sampling(probs,col_idx):
option_list = []
for i in col_idx:
pp = probs[i]
option_list.append(np.random.choice(np.arange(len(probs[i])), p=pp))
return np.array(option_list).reshape(col_idx.shape)
def random_choice_prob_index(a, axis=1):
r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis)
return (a.cumsum(axis=axis) > r).argmax(axis=axis)
def maximum_interval(output_info):
max_interval = 0
for item in output_info:
max_interval = max(max_interval, item[0])
return max_interval
class Cond(object):
def __init__(self, data, output_info):
self.model = []
st = 0
counter = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
counter += 1
self.model.append(np.argmax(data[:, st:ed], axis=-1))
st = ed
self.interval = []
self.n_col = 0
self.n_opt = 0
st = 0
self.p = np.zeros((counter, maximum_interval(output_info)))
self.p_sampling = []
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = np.sum(data[:, st:ed], axis=0)
tmp_sampling = np.sum(data[:, st:ed], axis=0)
tmp = np.log(tmp + 1)
tmp = tmp / np.sum(tmp)
tmp_sampling = tmp_sampling / np.sum(tmp_sampling)
self.p_sampling.append(tmp_sampling)
self.p[self.n_col, :item[0]] = tmp
self.interval.append((self.n_opt, item[0]))
self.n_opt += item[0]
self.n_col += 1
st = ed
self.interval = np.asarray(self.interval)
def sample_train(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
mask = np.zeros((batch, self.n_col), dtype='float32')
mask[np.arange(batch), idx] = 1
opt1prime = random_choice_prob_index(self.p[idx])
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec, mask, idx, opt1prime
def sample(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
opt1prime = random_choice_prob_index_sampling(self.p_sampling,idx)
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec
def cond_loss(data, output_info, c, m):
loss = []
st = 0
st_c = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
ed_c = st_c + item[0]
tmp = F.cross_entropy(
data[:, st:ed],
torch.argmax(c[:, st_c:ed_c], dim=1),
reduction='none')
loss.append(tmp)
st = ed
st_c = ed_c
loss = torch.stack(loss, dim=1)
return (loss * m).sum() / data.size()[0]
class Sampler(object):
def __init__(self, data, output_info):
super(Sampler, self).__init__()
self.data = data
self.model = []
self.n = len(data)
st = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = []
for j in range(item[0]):
tmp.append(np.nonzero(data[:, st + j])[0])
self.model.append(tmp)
st = ed
def sample(self, n, col, opt):
if col is None:
idx = np.random.choice(np.arange(self.n), n)
return self.data[idx]
idx = []
for c, o in zip(col, opt):
idx.append(np.random.choice(self.model[c][o]))
return self.data[idx]
class Discriminator(Module):
def __init__(self, side, layers):
super(Discriminator, self).__init__()
self.side = side
info = len(layers)-2
self.seq = Sequential(*layers)
self.seq_info = Sequential(*layers[:info])
def forward(self, input):
return (self.seq(input)), self.seq_info(input)
class Generator(Module):
def __init__(self, side, layers):
super(Generator, self).__init__()
self.side = side
self.seq = Sequential(*layers)
def forward(self, input_):
return self.seq(input_)
def determine_layers_disc(side, num_channels):
assert side >= 4 and side <= 32
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layers_D = []
for prev, curr in zip(layer_dims, layer_dims[1:]):
layers_D += [
Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
BatchNorm2d(curr[0]),
LeakyReLU(0.2, inplace=True)
]
print()
layers_D += [
Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0),
Sigmoid()
]
return layers_D
def determine_layers_gen(side, random_dim, num_channels):
assert side >= 4 and side <= 32
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layers_G = [
ConvTranspose2d(
random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False)
]
for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])):
layers_G += [
BatchNorm2d(prev[0]),
ReLU(True),
ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)
]
return layers_G
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0)
class CTABGANSynthesizer:
def __init__(self,
class_dim=(256, 256, 256, 256),
random_dim=100,
num_channels=64,
l2scale=1e-5,
batch_size=500,
epochs=1):
self.random_dim = random_dim
self.class_dim = class_dim
self.num_channels = num_channels
self.dside = None
self.gside = None
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def fit(self, train_data=pd.DataFrame, categorical=[], mixed={}, type={}):
problem_type = None
target_index=None
if type:
problem_type = list(type.keys())[0]
if problem_type:
target_index = train_data.columns.get_loc(type[problem_type])
self.transformer = DataTransformer(train_data=train_data, categorical_list=categorical, mixed_dict=mixed)
self.transformer.fit()
train_data = self.transformer.transform(train_data.values)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dim
self.cond_generator = Cond(train_data, self.transformer.output_info)
sides = [4, 8, 16, 24, 32]
col_size_d = data_dim + self.cond_generator.n_opt
for i in sides:
if i * i >= col_size_d:
self.dside = i
break
sides = [4, 8, 16, 24, 32]
col_size_g = data_dim
for i in sides:
if i * i >= col_size_g:
self.gside = i
break
layers_G = determine_layers_gen(self.gside, self.random_dim+self.cond_generator.n_opt, self.num_channels)
layers_D = determine_layers_disc(self.dside, self.num_channels)
self.generator = Generator(self.gside, layers_G).to(self.device)
discriminator = Discriminator(self.dside, layers_D).to(self.device)
optimizer_params = dict(lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale)
optimizerG = Adam(self.generator.parameters(), **optimizer_params)
optimizerD = Adam(discriminator.parameters(), **optimizer_params)
st_ed = None
classifier=None
optimizerC= None
if target_index != None:
st_ed= get_st_ed(target_index,self.transformer.output_info)
classifier = Classifier(data_dim,self.class_dim,st_ed).to(self.device)
optimizerC = optim.Adam(classifier.parameters(),**optimizer_params)
self.generator.apply(weights_init)
discriminator.apply(weights_init)
self.Gtransformer = ImageTransformer(self.gside)
self.Dtransformer = ImageTransformer(self.dside)
steps_per_epoch = max(1, len(train_data) // self.batch_size)
for i in tqdm(range(self.epochs)):
for _ in range(steps_per_epoch):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
perm = np.arange(self.batch_size)
np.random.shuffle(perm)
real = data_sampler.sample(self.batch_size, col[perm], opt[perm])
c_perm = c[perm]
real = torch.from_numpy(real.astype('float32')).to(self.device)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
real_cat = torch.cat([real, c_perm], dim=1)
real_cat_d = self.Dtransformer.transform(real_cat)
fake_cat_d = self.Dtransformer.transform(fake_cat)
optimizerD.zero_grad()
y_real,_ = discriminator(real_cat_d)
y_fake,_ = discriminator(fake_cat_d)
loss_d = (-(torch.log(y_real + 1e-4).mean()) - (torch.log(1. - y_fake + 1e-4).mean()))
loss_d.backward()
optimizerD.step()
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
optimizerG.zero_grad()
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
fake_cat = self.Dtransformer.transform(fake_cat)
y_fake,info_fake = discriminator(fake_cat)
cross_entropy = cond_loss(faket, self.transformer.output_info, c, m)
_,info_real = discriminator(real_cat_d)
g = -(torch.log(y_fake + 1e-4).mean()) + cross_entropy
g.backward(retain_graph=True)
loss_mean = torch.norm(torch.mean(info_fake.view(self.batch_size,-1), dim=0) - torch.mean(info_real.view(self.batch_size,-1), dim=0), 1)
loss_std = torch.norm(torch.std(info_fake.view(self.batch_size,-1), dim=0) - torch.std(info_real.view(self.batch_size,-1), dim=0), 1)
loss_info = loss_mean + loss_std
loss_info.backward()
optimizerG.step()
if problem_type:
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
real_pre, real_label = classifier(real)
fake_pre, fake_label = classifier(fakeact)
c_loss = CrossEntropyLoss()
if (st_ed[1] - st_ed[0])==1:
c_loss= SmoothL1Loss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
real_label = torch.reshape(real_label,real_pre.size())
fake_label = torch.reshape(fake_label,fake_pre.size())
elif (st_ed[1] - st_ed[0])==2:
c_loss = BCELoss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
loss_cc = c_loss(real_pre, real_label)
loss_cg = c_loss(fake_pre, fake_label)
optimizerG.zero_grad()
loss_cg.backward()
optimizerG.step()
optimizerC.zero_grad()
loss_cc.backward()
optimizerC.step()
def sample(self, n):
self.generator.eval()
output_info = self.transformer.output_info
steps = n // self.batch_size + 1
data = []
for i in range(steps):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample(self.batch_size)
c = condvec
c = torch.from_numpy(c).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket,output_info)
data.append(fakeact.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
result = self.transformer.inverse_transform(data)
return result[0:n]
| 2.265625 | 2 |
problem_solving/python/algorithms/bit_manipulation/sum_vs_xor.py | kcc3/hackerrank-solutions | 0 | 12797629 | def sum_xor(n):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/sum-vs-xor/problem
Given an integer n, find each x such that:
0 <= x <= n
n + x = n ^ x
Solve:
We count the number of zeros that are in the binary representation of the integer, because for sum and xor to be
equal, it occurs when there are 0s in the digit where an XOR would return a 1, and an addition would return a 1
as well. For example: for the integer "10", which is binary "1010", if you add or XOR 1, you would end up with
"1011" for both, because of the least significant 0 being flipped to a 1. We then return the total combinations
of these values, which is 2^(number of zeros)
Args:
n (int): Integer to check
Returns:
int: The total number of integers that satisfy the sum = xor problem
"""
if n == 0:
return 1
bin = "{0:b}".format(n)
zeros = bin.count("0")
return pow(2, zeros)
if __name__ == "__main__":
assert sum_xor(5) == 2
assert sum_xor(10) == 4
assert sum_xor(0) == 1
| 4.21875 | 4 |
backend/api/urls.py | ChansongJo/PsychoLingExperiment | 0 | 12797630 | <gh_stars>0
from django.urls import path, include
from backend.api.views import bulkUploadFromFile
urlpatterns = [
path('bulk_upload/', bulkUploadFromFile)
]
| 1.523438 | 2 |
jobs/r_programming/script.py | ivan-brko/GamayunConfigurationSample | 0 | 12797631 | import requests
import json
from bs4 import BeautifulSoup
from gamayun.gamayun_utils import report_result_with_maps_only
from gamayun.gamayun_utils import report_error
from gamayun.gamayun_utils import run_gamayun_script_logic
def parse_single_entry(entry):
# test if this entry contains comment (if it doesn't it is an ad so we skip it)
if entry.find("a", class_="comments") is not None:
result = dict()
result["title"] = entry.find("a", class_="title").text
result["link"] = entry.find("a", class_="title")["href"]
result["comments_link"] = entry.find("a", class_="comments")["href"]
return result
else:
return None
def job_logic():
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:81.0) Gecko/20100101 Firefox/81.0'}
page = requests.get(url = "https://old.reddit.com/r/programming/", headers = headers)
soup = BeautifulSoup(page.content, 'html.parser')
result = [x for x in [parse_single_entry(entry) for entry in soup.find_all("div", class_ = "top-matter")] if x is not None]
report_result_with_maps_only(result)
run_gamayun_script_logic(job_logic)
| 2.859375 | 3 |
tests/funcionales/test_formularios.py | cacao-accounting/cacao-accounting-mockup | 2 | 12797632 | <reponame>cacao-accounting/cacao-accounting-mockup<gh_stars>1-10
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# - <NAME>
# pylint: disable=redefined-outer-name
import pytest
from cacao_accounting import create_app as app_factory
from cacao_accounting.database import database
from cacao_accounting.datos import base_data, dev_data
@pytest.fixture(scope="module", autouse=True)
def app():
from cacao_accounting.config import SQLITE
app = app_factory(
{
"SECRET_KEY": "<KEY>",
"SQLALCHEMY_DATABASE_URI": "sqlite://",
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
"TESTING": True,
"WTF_CSRF_ENABLED": False,
"DEBUG": True,
"DESKTOPMODE": False,
}
)
with app.app_context():
database.drop_all()
database.create_all()
base_data()
dev_data()
app.app_context().push()
yield app
@pytest.fixture
def elimina_variable_entorno(app):
import os
if os.environ.get("CACAO_TEST"):
os.environ.pop("CACAO_TEST")
app.config["ENV"] = "production"
else:
pass
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def runner(app):
return app.test_cli_runner()
class AuthActions:
def __init__(self, client):
self._client = client
def login(self):
return self._client.post("/login", data={"usuario": "cacao", "acceso": "cacao"})
def logout(self):
return self._client.get("/salir")
@pytest.fixture
def auth(client):
return AuthActions(client)
def test_formulario_nueva_entidad(client, auth):
from cacao_accounting.database import Entidad
auth.login()
response = client.get("/accounts/entity/new")
assert b"Crear Nueva Entidad." in response.data
entidad = Entidad.query.filter_by(entidad="Test Form").first()
assert entidad is None
post = client.post(
"/accounts/entity/new",
data={
"nombre_comercial": "Test Form",
"razon_social": "Test Form",
"id_fiscal": "Test Form",
"id": "Test Form",
"moneda": "NIO",
"tipo_entidad": "Asociación",
"correo_electronico": "<EMAIL>",
"web": "https://cacao.io",
"telefono1": "+505 8771 0980",
"telefono2": "+505 8661 2108",
"fax": "+505 2273 0754",
},
follow_redirects=True,
)
entidad = Entidad.query.filter_by(entidad="Test Form").first()
assert entidad is not None
assert entidad.moneda == "NIO"
assert entidad.entidad == "Test Form"
def test_formulario_editar_entidad(client, auth):
from cacao_accounting.database import Entidad
auth.login()
get = client.get("/accounts/entity/edit/dulce")
assert b"Editar Entidad." in get.data
post = client.post(
"/accounts/entity/edit/dulce",
data={
"id_fiscal": "J08100000078",
"nombre_comercial": "<NAME>",
"razon_social": "Dulces Mundo Sabor Sociedad Anonima",
"telefono1": "+506 8771 0980",
"telefono2": "+506 8667 2108",
"correo_electronico": "<EMAIL>",
"fax": "+506 7242 2789",
"web": "candy.org",
},
follow_redirects=True,
)
assert b"<NAME>" in post.data
assert b"J08100000078" in post.data
assert b"Dulces Mundo Sabor Sociedad Anonima" in post.data
assert b"+506 8771 0980" in post.data
assert b"+506 8667 2108" in post.data
assert b"<EMAIL>" in post.data
assert b"+506 7242 2789" in post.data
assert b"candy.org" in post.data
assert b"dulce" in post.data
def test_formulario_nueva_unidad(client, auth):
from cacao_accounting.database import Unidad
auth.login()
response = client.get("/accounts/unit/new")
assert b"Crear Nueva Unidad de Negocios." in response.data
unidad = Unidad.query.filter_by(unidad="Test Form").first()
assert unidad is None
post = client.post(
"/accounts/unit/new",
data={
"id": "test",
"nombre": "Test Form",
"entidad": "cacao",
"correo_electronico": "<EMAIL>",
"web": "https://cacao.io",
"telefono1": "+505 8771 0980",
"telefono2": "+505 8661 2108",
"fax": "+505 2273 0754",
},
)
unidad = Unidad.query.filter_by(unidad="test").first()
assert unidad is not None
assert unidad.entidad == "cacao"
assert unidad.unidad == "test"
| 2.25 | 2 |
graphlets/utils.py | KirillShmilovich/graphlets | 3 | 12797633 | """
util.py
Some utility functions
"""
import os
import numpy as np
from sklearn.neighbors import BallTree, radius_neighbors_graph
import networkx as nx
__all__ = ["ORCA_PATH", "pbc", "orbits", "weights", "compute_graph"]
ORCA_PATH = os.path.abspath(os.path.abspath(__file__) + "../../../orca/orca.exe")
def pbc(x0, x1, dims):
delta = np.abs(x0 - x1)
delta = np.where(delta > 0.5 * dims, delta - dims, delta)
return np.sqrt((delta**2).sum(axis=-1))
orbits = np.array([
1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6, 4, 4, 4, 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7,
7, 5, 7, 6, 7, 6, 5, 5, 6, 8, 7, 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5, 6, 6, 4
],
dtype=np.float)
weights = 1. - np.log(orbits) / np.log(73.)
def compute_graph(X, r_cut, **kwargs):
if kwargs["dims"] is not None:
BT = BallTree(X, metric=kwargs["metric"], dims=kwargs["dims"])
else:
BT = BallTree(X, metric=kwargs["metric"])
rng_con = radius_neighbors_graph(BT, r_cut, n_jobs=1, mode='connectivity')
A = np.matrix(rng_con.toarray())
G = nx.from_numpy_matrix(A)
return G
| 2.734375 | 3 |
plugins/provider_video_ustvgo/lib/translations.py | cookieisland/cabernet | 0 | 12797634 | <reponame>cookieisland/cabernet
# pylama:ignore=E203,E221
"""
MIT License
Copyright (C) 2021 ROCKY4546
https://github.com/rocky4546
This file is part of Cabernet
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
"""
from lib.tvheadend.epg_category import groups
from lib.tvheadend.epg_category import tvh_genres
ustvgo_channels = '<KEY>'
ustvgo_png = '<KEY>'
ustvgo_stream = 'gfpMXf5BjIUCXRpNtNHFzfkQtxdMkqSGgfW='
ustvgo_epg = '<KEY>'
ustvgo_program = '<KEY>
ustvgo_groups = {
}
ustvgo_genres = {
"Action & Adventure": [ tvh_genres['ADVENTURE'] ],
"Business": [ tvh_genres['NEWS'] ],
"Comedy": [ tvh_genres['COMEDY'] ],
"Documentary": [ tvh_genres['DOCUMENTARY'] ],
"Drama": [ tvh_genres['MOVIE'] ],
"Educational": [ tvh_genres['EDUCATIONAL'] ],
"Events & Specials": [ tvh_genres['SPORT_SPECIAL'] ],
"Family": [ tvh_genres['KIDS'] ],
"Fantasy": [ tvh_genres['SF'] ],
"Food & Cooking": [ tvh_genres['COOKING'] ],
"Game Show": [ tvh_genres['GAME'] ],
"Health & Lifestyle": [ tvh_genres['FITNESS'] ],
"Horror": [ tvh_genres['SF'] ],
"Kids": [ tvh_genres['KIDS'] ],
"Music": [ tvh_genres['MUSIC'] ],
"None": None,
"Other": None,
"Pro Sports": [ tvh_genres['SPORT'] ],
"Reality": [ tvh_genres['GAME'] ],
"Science": [ tvh_genres['SCIENCE'] ],
"Science Fiction": [ tvh_genres['SF'] ],
"Sports": [ tvh_genres['SPORT'] ],
"Suspense": [ tvh_genres['SF'] ],
"Talk & Interview": [ tvh_genres['TALK_SHOW'] ],
"Tech & Gaming": [ tvh_genres['TECHNOLOGY'] ],
"Travel": [ tvh_genres['TRAVEL'] ],
"Variety Shows": [ tvh_genres['VARIETY'] ]
}
| 1.492188 | 1 |
test/autocomplete.py | mrHola21/Eterm | 12 | 12797635 | <reponame>mrHola21/Eterm<gh_stars>10-100
import getpass
import os_test
import readline
class MyCompleter:
def __init__(self, options):
self.options = sorted(options)
def complete(self, text, state):
if state == 0:
if text:
self.matches = [s for s in self.options
if s and s.startswith(text)]
else:
self.matches = self.options[:]
try:
return self.matches[state]
except IndexError:
return None
completer = MyCompleter([file for file in os_test.listdir(f'/home/{getpass.getuser()}') if not file.startswith('.')])
readline.set_completer(completer.complete)
readline.parse_and_bind('tab: complete')
input("Input: ")
| 2.65625 | 3 |
logic.py | serkkz/PandaTools | 1 | 12797636 | from panda3d.core import Point3, TransformState, LQuaternion
from panda3d.core import Camera, PerspectiveLens, OrthographicLens, CS_default, CS_zup_right, CS_yup_right, CS_zup_left, CS_yup_left, CS_invalid
from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles
from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot
from panda3d.core import BamFile, BamWriter, Filename, Notify
from panda3d.core import CollisionPolygon, CollisionNode
import bpy
import bmesh
from mathutils.geometry import distance_point_to_plane
ostream = Notify.out()
list_object_support = {'MESH': False, 'PERSP': False, 'ORTHO': False, 'CAMERA':True}
def show_message_box(message = "", title = "Message Box", icon = 'INFO'):
def draw(self, context):
self.layout.label(text = message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
def checkcreate_dirs(path_project_save):
# Проверяем существует ли директория, если нет то создаем.
if not os.path.exists(path_project_save):
try:
os.makedirs(path_project_save)
except OSError as error:
#print(error)
pass
def bam_writer_file(path_save, obj):
file = BamFile()
file.openWrite(Filename.fromOsSpecific(path_save + '.bam'))
writer: BamWriter = file.getWriter()
writer.writeObject(obj)
writer.flush()
file.close()
def conversion_transform(obj):
pos = Point3(*obj.matrix_world.translation)
quat = LQuaternion(*obj.matrix_world.to_quaternion())
scale = Point3(*obj.matrix_world.to_scale())
transform = TransformState.make_pos_quat_scale(pos, quat, scale)
return transform
def get_format(obj):
color = False
texcoord = False
# Создаем новый массив.
geom_vertex_format = GeomVertexArrayFormat()
# Создаем колонку для вершин.
geom_vertex_format.add_column("vertex", 3, Geom.NT_float32, Geom.C_point)
geom_vertex_format.add_column("normal", 3, Geom.NT_float32, Geom.C_normal)
# Проверка есть ли цвета вершин у объекта.
if obj.data.vertex_colors.active:
color = True
# Создаем колонку для цвета c именем по умолчанию.
geom_vertex_format.add_column("color", 4, Geom.NT_uint8, Geom.C_color)
# Так же создаем дополнительные колонки.
for col in obj.data.vertex_colors:
# Если имя не совподает с активным.
if not col.name == obj.data.vertex_colors.active.name:
geom_vertex_format.add_column('color.{}'.format(col.name), 4, Geom.NT_uint8, Geom.C_color)
# Проверка есть ли активные текстурные координаты у объекта.
if obj.data.uv_layers.active:
texcoord = True
# Создаем колонку для координат c именем по умолчанию.
geom_vertex_format.add_column("texcoord", 2, Geom.NT_float32, Geom.C_texcoord)
# Так же создаем дополнительные колонки.
for uv in obj.data.uv_layers:
# Если имя не совподает с активным.
if not uv.name == obj.data.uv_layers.active.name:
geom_vertex_format.add_column('texcoord.{}'.format(uv.name), 2, Geom.NT_float32, Geom.C_texcoord)
# Создаем формат.
my_format = GeomVertexFormat()
my_format.addArray(geom_vertex_format)
# Регистрируем формат.
end_format = GeomVertexFormat.registerFormat(my_format)
return end_format, color, texcoord
def geom_create(obj):
geom_vertex_format = get_format(obj)
color = geom_vertex_format[1]
texcoord = geom_vertex_format[2]
vdata = GeomVertexData(obj.data.name, geom_vertex_format[0], Geom.UHStatic)
vdata.set_num_rows(len(obj.data.vertices))
vertex_position = GeomVertexWriter(vdata, 'vertex')
normal_vertex = GeomVertexWriter(vdata, 'normal')
# Если используются цвета вершин.
if color:
color_vertex_list = {'color': GeomVertexWriter(vdata, 'color')}
# Так же создаем дополнительные слои.
for col in obj.data.vertex_colors:
# Если имя не совподает с активным.
if not col.name == obj.data.vertex_colors.active.name:
color_vertex_list[col.name] = GeomVertexWriter(vdata, 'color.{}'.format(col.name))
# Если используются координаты текстур.
if texcoord:
texcoord_vertex_list = {'texcoord': GeomVertexWriter(vdata, 'texcoord')}
# Так же создаем дополнительные слои.
for uv in obj.data.uv_layers:
# Если имя не совподает с активным.
if not uv.name == obj.data.uv_layers.active.name:
texcoord_vertex_list[uv.name] = GeomVertexWriter(vdata, 'texcoord.{}'.format(uv.name))
# Запишем порядок треугольников.
prim = GeomTriangles(Geom.UHStatic)
prim.makeIndexed()
prim.setIndexType(Geom.NT_uint32)
mesh = obj.data
mesh.calc_loop_triangles()
# Сюда записиваются индексы обработаных вершин.
list_vertext = {}
# Проходим по треугольниуам.
for triangle in mesh.loop_triangles:
# Обработка первой вершины.
if not triangle.loops[0] in list_vertext:
vertex_position.set_row(triangle.loops[0])
normal_vertex.set_row(triangle.loops[0])
vertex_position.add_data3(obj.data.vertices[triangle.vertices[0]].co[0], obj.data.vertices[triangle.vertices[0]].co[1], obj.data.vertices[triangle.vertices[0]].co[2])
if triangle.use_smooth:
normal_vertex.add_data3(obj.data.vertices[triangle.vertices[0]].normal[0], obj.data.vertices[triangle.vertices[0]].normal[1], obj.data.vertices[triangle.vertices[0]].normal[2])
else:
normal_vertex.add_data3(triangle.normal[0], triangle.normal[1], triangle.normal[2])
if texcoord:
for name in texcoord_vertex_list:
texcoord_vertex_list[name].set_row(triangle.loops[0])
if name == 'texcoord':
texcoord_vertex_list[name].addData2(obj.data.uv_layers.active.data[triangle.loops[0]].uv[0], obj.data.uv_layers.active.data[triangle.loops[0]].uv[1])
else:
texcoord_vertex_list[name].addData2(obj.data.uv_layers[name].data[triangle.loops[0]].uv[0], obj.data.uv_layers[name].data[triangle.loops[0]].uv[1])
if color:
for name in color_vertex_list:
color_vertex_list[name].set_row(triangle.loops[0])
if name == 'color':
color_vertex_list[name].addData4(obj.data.vertex_colors.active.data[triangle.loops[0]].color[0],
obj.data.vertex_colors.active.data[triangle.loops[0]].color[1],
obj.data.vertex_colors.active.data[triangle.loops[0]].color[2],
obj.data.vertex_colors.active.data[triangle.loops[0]].color[3])
else:
color_vertex_list[name].addData4(obj.data.vertex_colors[name].data[triangle.loops[0]].color[0],
obj.data.vertex_colors[name].data[triangle.loops[0]].color[1],
obj.data.vertex_colors[name].data[triangle.loops[0]].color[2],
obj.data.vertex_colors[name].data[triangle.loops[0]].color[3])
list_vertext[triangle.loops[0]] = None
# Обработка второй вершины.
if not triangle.loops[1] in list_vertext:
vertex_position.set_row(triangle.loops[1])
normal_vertex.set_row(triangle.loops[1])
vertex_position.add_data3(obj.data.vertices[triangle.vertices[1]].co[0], obj.data.vertices[triangle.vertices[1]].co[1], obj.data.vertices[triangle.vertices[1]].co[2])
if triangle.use_smooth:
normal_vertex.add_data3(obj.data.vertices[triangle.vertices[1]].normal[0], obj.data.vertices[triangle.vertices[1]].normal[1], obj.data.vertices[triangle.vertices[1]].normal[2])
else:
normal_vertex.add_data3(triangle.normal[0], triangle.normal[1], triangle.normal[2])
if texcoord:
for name in texcoord_vertex_list:
texcoord_vertex_list[name].set_row(triangle.loops[1])
if name == 'texcoord':
texcoord_vertex_list[name].addData2(obj.data.uv_layers.active.data[triangle.loops[1]].uv[0], obj.data.uv_layers.active.data[triangle.loops[1]].uv[1])
else:
texcoord_vertex_list[name].addData2(obj.data.uv_layers[name].data[triangle.loops[1]].uv[0], obj.data.uv_layers[name].data[triangle.loops[1]].uv[1])
if color:
for name in color_vertex_list:
color_vertex_list[name].set_row(triangle.loops[1])
if name == 'color':
color_vertex_list[name].addData4(obj.data.vertex_colors.active.data[triangle.loops[1]].color[0],
obj.data.vertex_colors.active.data[triangle.loops[1]].color[1],
obj.data.vertex_colors.active.data[triangle.loops[1]].color[2],
obj.data.vertex_colors.active.data[triangle.loops[1]].color[3])
else:
color_vertex_list[name].addData4(obj.data.vertex_colors[name].data[triangle.loops[1]].color[0],
obj.data.vertex_colors[name].data[triangle.loops[1]].color[1],
obj.data.vertex_colors[name].data[triangle.loops[1]].color[2],
obj.data.vertex_colors[name].data[triangle.loops[1]].color[3])
list_vertext[triangle.loops[1]] = None
# Обработка третьей вершины.
if not triangle.loops[2] in list_vertext:
vertex_position.set_row(triangle.loops[2])
normal_vertex.set_row(triangle.loops[2])
vertex_position.add_data3(obj.data.vertices[triangle.vertices[2]].co[0], obj.data.vertices[triangle.vertices[2]].co[1], obj.data.vertices[triangle.vertices[2]].co[2])
if triangle.use_smooth:
normal_vertex.add_data3(obj.data.vertices[triangle.vertices[2]].normal[0], obj.data.vertices[triangle.vertices[2]].normal[1], obj.data.vertices[triangle.vertices[2]].normal[2])
else:
normal_vertex.add_data3(triangle.normal[0], triangle.normal[1], triangle.normal[2])
if texcoord:
for name in texcoord_vertex_list:
texcoord_vertex_list[name].set_row(triangle.loops[2])
if name == 'texcoord':
texcoord_vertex_list[name].addData2(obj.data.uv_layers.active.data[triangle.loops[2]].uv[0], obj.data.uv_layers.active.data[triangle.loops[2]].uv[1])
else:
texcoord_vertex_list[name].addData2(obj.data.uv_layers[name].data[triangle.loops[2]].uv[0], obj.data.uv_layers[name].data[triangle.loops[2]].uv[1])
if color:
for name in color_vertex_list:
color_vertex_list[name].set_row(triangle.loops[2])
if name == 'color':
color_vertex_list[name].addData4(obj.data.vertex_colors.active.data[triangle.loops[2]].color[0],
obj.data.vertex_colors.active.data[triangle.loops[2]].color[1],
obj.data.vertex_colors.active.data[triangle.loops[2]].color[2],
obj.data.vertex_colors.active.data[triangle.loops[2]].color[3])
else:
color_vertex_list[name].addData4(obj.data.vertex_colors[name].data[triangle.loops[2]].color[0],
obj.data.vertex_colors[name].data[triangle.loops[2]].color[1],
obj.data.vertex_colors[name].data[triangle.loops[2]].color[2],
obj.data.vertex_colors[name].data[triangle.loops[2]].color[3])
list_vertext[triangle.loops[2]] = None
# Добавляем вершины в примитив.
prim.addVertices(triangle.loops[0], triangle.loops[1], triangle.loops[2])
prim.closePrimitive()
geom = Geom(vdata)
geom.addPrimitive(prim)
return geom
def select_not_quad(obj):
not_quad = []
for poly in obj.data.polygons:
if len(poly.vertices) >= 5:
not_quad.append(poly)
for i in obj.data.vertices:
i.select=False
for i in obj.data.edges:
i.select=False
for i in obj.data.polygons:
i.select = False
for poly in not_quad:
poly.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type="FACE")
def check_coplanar(obj, poly):
status = False
# Если вершины три, это значит полигон автоматически копланарен.
if len(poly.vertices) == 3:
status = True
elif len(poly.vertices) >= 3:
v1 = obj.data.vertices[poly.vertices[1]].co - obj.data.vertices[poly.vertices[0]].co
v2 = obj.data.vertices[poly.vertices[2]].co - obj.data.vertices[poly.vertices[0]].co
for index in poly.vertices[3:]:
if abs(distance_point_to_plane(obj.data.vertices[index].co, obj.data.vertices[poly.vertices[0]].co, v1.cross(v2))) < 1e-6:
status = True
else:
status = False
return status
def select_not_coplanar(obj):
not_coplanar = []
for poly in obj.data.polygons:
if not check_coplanar(obj, poly):
not_coplanar.append(poly)
for i in obj.data.vertices:
i.select=False
for i in obj.data.edges:
i.select=False
for i in obj.data.polygons:
i.select = False
for poly in not_coplanar:
poly.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type="FACE")
def triangle_poly(poly, obj):
trangle = {}
triangulator3 = Triangulator3()
index_tr = 0
for index in poly.vertices:
triangulator3.add_polygon_vertex(index_tr)
triangulator3.add_vertex(*obj.data.vertices[index].co)
index_tr += 1
triangulator3.triangulate()
for i in range(triangulator3.getNumTriangles()):
v0 = triangulator3.get_vertex(triangulator3.get_triangle_v0(i))
v1 = triangulator3.get_vertex(triangulator3.get_triangle_v1(i))
v2 = triangulator3.get_vertex(triangulator3.get_triangle_v2(i))
trangle[i] = ((v0[0], v0[1], v0[2]), (v1[0], v1[1], v1[2]), (v2[0], v2[1], v2[2]))
return trangle
def add_polygons_to_dict(dict_named, poly, obj):
# Если нет такого ключа в словаре.
if not obj.data.materials[poly.material_index].name in dict_named:
# Дабавляем ключ и список.
dict_named[obj.data.materials[poly.material_index].name] = [poly]
else:
# Если есть такой ключ, добавляем к списку.
dict_named[obj.data.materials[poly.material_index].name].append(poly)
def colnode_add_dict(collision_node_dict, quad, name):
if name in collision_node_dict:
collision_node_dict[name].add_solid(quad)
else:
collision_node = CollisionNode(name)
collision_node.add_solid(quad)
collision_node_dict[name] = collision_node
def collision_polygon_create(obj, scene):
named_triangles = {}
named_coplanar = {}
named_not_coplanar = {}
named_not_quad = {}
triangles = []
coplanar = []
not_coplanar = []
not_quad = []
# Перебираем полигоны объекта.
for poly in obj.data.polygons:
# Если список материалов не пуст.
if len(obj.data.materials) >= 1:
# Если есть слот материала и он содержит имя, рассортировываем их по словарям под этим именем.
if hasattr(obj.data.materials[poly.material_index], 'name'):
# Если полигон из трех вершин, проверка на компланарность не нужна.
if len(poly.vertices) == 3:
for index in poly.vertices[2:]:
add_polygons_to_dict(named_triangles, poly, obj)
# Если у полигона четыре вершины, необходимо проверить на компланарность.
elif len(poly.vertices) == 4:
# Если полигон компланарный
if check_coplanar(obj, poly):
add_polygons_to_dict(named_coplanar, poly, obj)
else:
add_polygons_to_dict(named_not_coplanar, poly, obj)
# Если у полигона более четырех вершин, необходимо разбить на треугольники.
elif len(poly.vertices) >= 4:
add_polygons_to_dict(named_not_quad, poly, obj)
# Если нет материала, то рассортировываем по спискам
else:
# Если полигон из трех вершин, проверка на компланарность не нужна.
if len(poly.vertices) == 3:
for index in poly.vertices[2:]:
triangles.append(poly)
# Если у полигона четыре вершины, необходимо проверить на компланарность.
elif len(poly.vertices) == 4:
if check_coplanar(obj, poly):
coplanar.append(poly)
else:
not_coplanar.append(poly)
# Если у полигона более четырех вершин, необходимо разбить на треугольники.
elif len(poly.vertices) >= 4:
not_quad.append(poly)
else:
# Если полигон из трех вершин, проверка на компланарность не нужна.
if len(poly.vertices) == 3:
for index in poly.vertices[2:]:
triangles.append(poly)
# Если у полигона четыре вершины, необходимо проверить на компланарность.
elif len(poly.vertices) == 4:
if check_coplanar(obj, poly):
coplanar.append(poly)
else:
not_coplanar.append(poly)
# Если у полигона более четырех вершин, необходимо разбить на треугольники.
elif len(poly.vertices) >= 4:
not_quad.append(poly)
########################
########################
group = NodePath(obj.name)
collision_node_dict = {}
vertext_quad = []
# Создаем полигоны столкновения из треугольников.
for name in named_triangles:
for poly in named_triangles[name]:
for index in poly.vertices:
vertext_quad.append(Point3(*obj.data.vertices[index].co))
colnode_add_dict(collision_node_dict, CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2]), name)
vertext_quad = []
# Создаем полигоны столкновения из компланарных прямольников.
for name in named_coplanar:
for poly in named_coplanar[name]:
for index in poly.vertices:
vertext_quad.append(Point3(*obj.data.vertices[index].co))
colnode_add_dict(collision_node_dict, CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2], vertext_quad[3]), name)
vertext_quad = []
# Создаем полигоны столкновения из некомпланарных прямольников.
for name in named_not_coplanar:
# Нужно разбить некомпланарные полигоны, на треугольники.
for poly in named_not_coplanar[name]:
for vertext in triangle_poly(poly, obj).values():
colnode_add_dict(collision_node_dict, CollisionPolygon(vertext[0], vertext[1], vertext[2]), name)
# Создаем полигоны столкновения из многоугольников.
for name in named_not_quad:
# Нужно разбить многоугольники на треугольники.
for poly in named_not_quad[name]:
for vertext in triangle_poly(poly, obj).values():
colnode_add_dict(collision_node_dict, CollisionPolygon(vertext[0], vertext[1], vertext[2]), name)
for collision_node in collision_node_dict.values():
from_mask = '{}{}{}{}{}{}{}{}'.format(obj.data.materials[collision_node.name].hatcher.from_mask_1.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_2.decode('utf-8'),
obj.data.materials[collision_node.name].hatcher.from_mask_3.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_4.decode('utf-8'),
obj.data.materials[collision_node.name].hatcher.from_mask_5.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_6.decode('utf-8'),
obj.data.materials[collision_node.name].hatcher.from_mask_7.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_8.decode('utf-8'))
collision_node.setFromCollideMask(int(from_mask, 2))
into_mask = '{}{}{}{}{}{}{}{}'.format(obj.data.materials[collision_node.name].hatcher.into_mask_1.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_2.decode('utf-8'),
obj.data.materials[collision_node.name].hatcher.into_mask_3.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_4.decode('utf-8'),
obj.data.materials[collision_node.name].hatcher.into_mask_5.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_6.decode('utf-8'),
obj.data.materials[collision_node.name].hatcher.into_mask_7.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_8.decode('utf-8'))
collision_node.setIntoCollideMask(int(into_mask, 2))
node_path = NodePath(collision_node)
node_path.reparentTo(group)
if obj.data.materials[collision_node.name].hatcher.visibility_collision_polygons:
node_path.show()
collision_node = CollisionNode(obj.name)
# Создаем полигоны столкновения из треугольников.
for poly in triangles:
for index in poly.vertices:
vertext_quad.append(Point3(*obj.data.vertices[index].co))
quad = CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2])
collision_node.add_solid(quad)
vertext_quad = []
# Создаем полигоны столкновения из компланарных прямольников.
for poly in coplanar:
for index in poly.vertices:
vertext_quad.append(Point3(*obj.data.vertices[index].co))
quad = CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2], vertext_quad[3])
collision_node.add_solid(quad)
vertext_quad = []
# Нужно разбить некомпланарные полигоны, на треугольники.
for poly in not_coplanar:
for vertext in triangle_poly(poly, obj).values():
quad = CollisionPolygon(vertext[0], vertext[1], vertext[2])
collision_node.add_solid(quad)
# Нужно разбить полигоны у которых более четырех сторон на треугольники.
for poly in not_quad:
for vertext in triangle_poly(poly, obj).values():
quad = CollisionPolygon(vertext[0], vertext[1], vertext[2])
collision_node.add_solid(quad)
from_mask = '{}{}{}{}{}{}{}{}'.format(obj.hatcher.from_mask_1.decode('utf-8'), obj.hatcher.from_mask_2.decode('utf-8'), obj.hatcher.from_mask_3.decode('utf-8'), obj.hatcher.from_mask_4.decode('utf-8'),
obj.hatcher.from_mask_5.decode('utf-8'), obj.hatcher.from_mask_6.decode('utf-8'), obj.hatcher.from_mask_7.decode('utf-8'), obj.hatcher.from_mask_8.decode('utf-8'))
collision_node.setFromCollideMask(int(from_mask, 2))
into_mask = '{}{}{}{}{}{}{}{}'.format(obj.hatcher.into_mask_1.decode('utf-8'), obj.hatcher.into_mask_2.decode('utf-8'), obj.hatcher.into_mask_3.decode('utf-8'), obj.hatcher.into_mask_4.decode('utf-8'),
obj.hatcher.into_mask_5.decode('utf-8'), obj.hatcher.into_mask_6.decode('utf-8'), obj.hatcher.into_mask_7.decode('utf-8'), obj.hatcher.into_mask_8.decode('utf-8'))
collision_node.setIntoCollideMask(int(into_mask, 2))
# Если полигон столкновения содержит тела.
if collision_node.getNumSolids() >= 1:
node_path = NodePath(collision_node)
node_path.reparentTo(group)
# Если стоит флажок показывать полигон столкновения.
if obj.hatcher.visibility_collision_polygons:
node_path.show()
return group.node().getChild(0)
def geom_node_create(obj, scene):
geom = geom_create(obj)
geom_node = GeomNode(obj.data.name)
geom_node.addGeom(geom)
return geom_node
def camera_create(obj, scene):
frame_size = obj.data.view_frame(scene = scene)
if obj.data.type == 'PERSP':
lens = PerspectiveLens()
if obj.data.type == 'ORTHO':
lens = OrthographicLens()
lens.set_film_size(abs(frame_size[0][0]) + abs(frame_size[1][0]), abs(frame_size[0][1]) + abs(frame_size[1][1]))
lens.set_focal_length(abs(frame_size[0][2]))
lens.set_near_far(obj.data.clip_start, obj.data.clip_end)
if obj.hatcher.coordinate_system == "CS_default":
lens.set_coordinate_system(CS_default)
if obj.hatcher.coordinate_system == "CS_zup_right":
lens.set_coordinate_system(CS_zup_right)
if obj.hatcher.coordinate_system == "CS_yup_right":
lens.set_coordinate_system(CS_yup_right)
if obj.hatcher.coordinate_system == "CS_zup_left":
lens.set_coordinate_system(CS_zup_left)
if obj.hatcher.coordinate_system == "CS_yup_left":
lens.set_coordinate_system(CS_yup_left)
if obj.hatcher.coordinate_system == "CS_invalid":
lens.set_coordinate_system(CS_invalid)
camera = Camera(obj.data.name)
camera.active = obj.hatcher.camera_active
bit = '{}{}{}{}{}{}{}{}'.format(obj.hatcher.draw_mask_1.decode('utf-8'), obj.hatcher.draw_mask_2.decode('utf-8'), obj.hatcher.draw_mask_3.decode('utf-8'), obj.hatcher.draw_mask_4.decode('utf-8'),
obj.hatcher.draw_mask_5.decode('utf-8'), obj.hatcher.draw_mask_6.decode('utf-8'), obj.hatcher.draw_mask_7.decode('utf-8'), obj.hatcher.draw_mask_8.decode('utf-8'))
camera.camera_mask = int(bit, 2)
camera.set_lens(lens)
return camera
def build_hierarchy(obj, scene):
# Узел для формирование иерархии
root = NodePath("root")
# Выполним рекурсию, для поиска всех.
def recurse(obj, parent):
# Переменая которая содережит функцию необходимую для экспорта данного типа объекта.
create_object = None
# Если объект является сеткой.
if obj.type == "MESH":
if obj.hatcher.type_mesh == "Render":
create_object = geom_node_create
if obj.hatcher.type_mesh == "Collision":
create_object = collision_polygon_create
# Если объект является источником цвета.
if obj.type == "LIGHT":
create_object = "LIGHT"
# Если объект является камерой.
if obj.type == "CAMERA":
if obj.data.type != 'PANO':
create_object = camera_create
# Если есть родитель.
if not parent:
npp = NodePath(create_object(obj, scene))
#npp.setName(obj.name)
#npp.show()
npp.reparentTo(root)
npp.set_transform(root, conversion_transform(obj))
else:
# Если нет родителя.
np = NodePath(create_object(obj, scene))
#np.setName(obj.name)
#np.show()
# Проверяем есть ли такой объект в иерархии.
result = root.find('**/{}'.format(parent.name))
if result:
np.reparentTo(result)
np.set_transform(root, conversion_transform(obj))
else:
np.reparentTo(root)
np.set_transform(root, conversion_transform(obj))
# Проходим по детям.
for child in obj.children:
recurse(child, obj)
recurse(obj, obj.parent)
return root.node().getChild(0)
import os
from datetime import datetime
class ExportObject(bpy.types.Operator):
bl_idname = "ui.export_object"
bl_label = "Generator_object"
def execute(self, context):
start_time = datetime.now()
context.view_layer.update()
# Перебираем список выбранных объектов.
for obj in context.selected_objects:
# Объединяем путь проекта и относительную директорию модели.
path_project_save = os.path.join(context.scene.hatcher.ful_path_project, obj.hatcher.rel_path_object)
# Проверяем существует ли директория, если нет то создаем.
checkcreate_dirs(path_project_save)
# Объединяем путь директории и имя файла.
path_save = os.path.join(path_project_save, obj.name)
node = build_hierarchy(obj, context.scene)
root = ModelRoot('{}.bam'.format(obj.name))
root.add_child(node)
bam_writer_file(path_save, root)
show_message_box('Export object: {} completed, time: {}'.format(obj.name, datetime.now() - start_time), "Message")
return {'FINISHED'}
class ExportScene(bpy.types.Operator):
bl_idname = "ui.export_scene"
bl_label = "Generator_scene"
def execute(self, context):
start_time = datetime.now()
context.view_layer.update()
# Объединяем путь проекта и относительную директорию сцены.
path_project_save = os.path.join(context.scene.hatcher.ful_path_project, context.scene.hatcher.rel_path_scene)
# Проверяем существует ли директория, если нет то создаем.
checkcreate_dirs(path_project_save)
# Создаем корень для объединения.
root = ModelRoot('{}.bam'.format(context.scene.name))
# Пройдем по всем объектом в сцене.
for obj in context.scene.objects:
# Нас интересуют объекты только без родителя.
if not obj.parent:
# Проверим есть ли данный тип объекта среди поддерживаемых.
if obj.type in list_object_support:
# Если есть ли подтип.
if list_object_support[obj.type]:
if not obj.data.type == 'PANO':
node = build_hierarchy(obj, context.scene)
root.add_child(node)
else:
node = build_hierarchy(obj, context.scene)
root.add_child(node)
# Объединяем путь директории и имя сцены.
path_save = os.path.join(path_project_save, context.scene.name)
bam_writer_file(path_save, root)
show_message_box('Export scene, completed, time: {}'.format(datetime.now() - start_time), "Message")
return {'FINISHED'}
class ExportSelected(bpy.types.Operator):
bl_idname = "ui.export_selected"
bl_label = "Generator_selected"
def execute(self, context):
start_time = datetime.now()
context.view_layer.update()
# Объединяем путь проекта и относительную директорию сцены.
path_project_save = os.path.join(context.scene.hatcher.ful_path_project, context.scene.hatcher.rel_path_other)
# Проверяем существует ли директория, если нет то создаем.
checkcreate_dirs(path_project_save)
# Если поле имени файла заполнено, то объеденяем в один файл.
if not context.scene.hatcher.file_name_selected == '':
# Создаем корень для объединения.
root = ModelRoot('{}.bam'.format(context.scene.hatcher.file_name_selected))
# Перебираем список выбранных объектов.
for obj in context.selected_objects:
# Проверим есть ли данный тип объекта среди поддерживаемых.
if obj.type in list_object_support:
# Если есть ли подтип.
if list_object_support[obj.type]:
if not obj.data.type == 'PANO':
node = build_hierarchy(obj, context.scene)
root.add_child(node)
else:
node = build_hierarchy(obj, context.scene)
root.add_child(node)
# Объединяем путь директории и имя файла.
path_save = os.path.join(path_project_save, context.scene.hatcher.file_name_selected)
bam_writer_file(path_save, root)
# Если нет, то раздельно.
else:
# Перебираем список выбранных объектов.
for obj in context.selected_objects:
# Проверим есть ли данный тип объекта среди поддерживаемых.
if obj.type in list_object_support:
# Если есть ли подтип.
if list_object_support[obj.type]:
if not obj.data.type == 'PANO':
node = build_hierarchy(obj, context.scene)
# Объединяем путь директории и имя файла.
path_save = os.path.join(path_project_save, obj.name)
bam_writer_file(path_save, node)
else:
node = build_hierarchy(obj, context.scene)
# Объединяем путь директории и имя файла.
path_save = os.path.join(path_project_save, obj.name)
bam_writer_file(path_save, node)
show_message_box('Export selected, completed, time: {}'.format(datetime.now() - start_time), "Message")
return {'FINISHED'}
class CheckingCoplanarity(bpy.types.Operator):
bl_idname = "ui.check_coplanarity"
bl_label = "Checking_coplanarity"
def execute(self, context):
select_not_coplanar(context.object)
return {'FINISHED'}
class CheckingQuad(bpy.types.Operator):
bl_idname = "ui.check_quad"
bl_label = "Checking_quad"
def execute(self, context):
select_not_quad(context.object)
return {'FINISHED'}
| 1.9375 | 2 |
vulnerable_people_form/form_pages/postcode_eligibility.py | StephenGill/govuk-shielded-vulnerable-people-service | 0 | 12797637 | <filename>vulnerable_people_form/form_pages/postcode_eligibility.py<gh_stars>0
from flask import redirect, session
from vulnerable_people_form.form_pages.shared.answers_enums import ApplyingOnOwnBehalfAnswers
from .blueprint import form
from .shared.render import render_template_with_title
from .shared.routing import route_to_next_form_page
from .shared.session import get_errors_from_session, request_form, get_answer_from_form
from .shared.validation import validate_postcode
@form.route("/postcode-eligibility", methods=["GET"])
def get_postcode_eligibility():
applying_on_own_behalf_answer = get_answer_from_form(["applying_on_own_behalf"])
if applying_on_own_behalf_answer == ApplyingOnOwnBehalfAnswers.YES.value:
prev_path = "/nhs-login"
elif applying_on_own_behalf_answer == ApplyingOnOwnBehalfAnswers.NO.value:
prev_path = "/applying-on-own-behalf"
else:
raise ValueError("Unexpected ApplyingOnOwnBehalfAnswers value encountered: " + applying_on_own_behalf_answer)
return render_template_with_title(
"postcode-eligibility.html",
previous_path=prev_path,
values={"postcode": session.get("postcode", "")},
**get_errors_from_session("postcode"),
)
@form.route("/postcode-eligibility", methods=["POST"])
def post_postcode_verification():
session["postcode"] = request_form().get("postcode")
if not validate_postcode(session["postcode"], "postcode"):
return redirect("/postcode-eligibility")
session["error_items"] = {}
return route_to_next_form_page()
| 2.59375 | 3 |
mmdet/models/roi_heads/bbox_heads/__init__.py | liuyanyi/mmdetection | 0 | 12797638 | from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
from .rotated import (BBoxHeadRbbox, Shared2FCBBoxHeadRbbox,
Shared4Conv1FCBBoxHeadRbbox, ConvFCBBoxHeadRbbox,
MHBBoxHeadRbbox)
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead', 'BBoxHeadRbbox', 'ConvFCBBoxHeadRbbox', 'Shared2FCBBoxHeadRbbox',
'Shared4Conv1FCBBoxHeadRbbox', 'MHBBoxHeadRbbox'
]
| 1.179688 | 1 |
hvodash/data.py | wtollett-usgs/dashboards | 0 | 12797639 | # -*- coding: utf-8 -*-
import pandas as pd
import plotly.graph_objs as go
import requests
from base64 import b64encode as be
from dash_html_components import Th, Tr, Td, A
from datetime import datetime, timedelta
from flask import request
from folium import Map
from operator import itemgetter
from os.path import join, dirname, realpath
from random import randint
from requests.auth import HTTPBasicAuth
from .maputils import create_dcircle_marker, create_tcircle_marker
from .utils import (
api_request_to_json,
json_to_dataframe,
starttime_str_to_seconds,
)
TMP = join(dirname(realpath(__file__)), '../tmp/')
LCL = join(dirname(realpath(__file__)), '../images/')
def get_rsam(ch, st):
j = api_request_to_json(f'rsam?channel={ch}&starttime={st}')
data = []
d = pd.DataFrame(j['records'][ch])
if not d.empty:
d.set_index('date', inplace=True)
data = [go.Scatter(
x=d.index,
y=d.rsam,
mode='markers',
marker=dict(size=4)
)]
return {
'data': data,
'layout': {
'margin': {
't': 30
},
'xaxis': {
'range': [d.index.min(), d.index.max()]
},
'yaxis': {
'range': [d.rsam.min() - 20, 2 * d.rsam.mean()]
}
}
}
def get_tilt(ch, st):
j = api_request_to_json(f'tilt?channel={ch}&starttime={st}')
d = pd.DataFrame(j['records'][ch])
traces = []
if not d.empty:
d.set_index('date', inplace=True)
traces.append({
'x': d.index,
'y': d['radial'],
'name': f"radial {j['used_azimuth']:.1f}"
})
traces.append({
'x': d.index,
'y': d['tangential'],
'name': f"tangential {j['tangential_azimuth']:.1f}"
})
return {
'data': traces,
'layout': {
'margin': {
't': 30
}
}
}
def get_rtnet(ch, st):
j = api_request_to_json(f'rtnet?channel={ch}&starttime={st}')
d = pd.DataFrame(j['records'][ch])
traces = []
if not d.empty:
d.set_index('date', inplace=True)
traces.append({
'x': d.index,
'y': d.east,
'name': 'East',
'mode': 'markers',
'marker': dict(
size=4
)
})
traces.append({
'x': d.index,
'y': d.north,
'name': 'North',
'mode': 'markers',
'marker': dict(
size=4
)
})
traces.append({
'x': d.index,
'y': d.up,
'name': 'Up',
'mode': 'markers',
'marker': dict(
size=4
)
})
return {
'data': traces,
'layout': {
'margin': {
't': 30
}
}
}
def get_and_store_hypos(geo, st, current_data):
if is_data_needed(st, current_data):
return get_hypos(geo, st).to_json()
else:
return current_data
def is_data_needed(st, data):
if not data:
return True
now = datetime.now()
olddata = pd.read_json(data)
mindate = olddata.date.min()
maxdate = olddata.date.max()
td = now - mindate
# Requested more than is currently stored?
seconds = starttime_str_to_seconds(st)
if seconds > (td.days * 86400 + td.seconds):
return True
# Data is old
td = now - maxdate
if (td.seconds / 60) > 10:
return True
return False
def get_hypos(geo, st):
j = api_request_to_json(f'hypocenter?geo={geo}&starttime={st}')
d = pd.DataFrame(j['records'])
if not d.empty:
d['date'] = d['date'].str.slice(stop=-2)
d['date'] = pd.to_datetime(d['date'])
d.reset_index(drop=True, inplace=True)
return d
def get_hypos_map(st, kind, data, region):
filename = f'{TMP}hypos{randint(0,9999):04d}.html'
d = json_to_dataframe(st, data)
m = None
if region == 'kism':
m = Map(location=[19.41, -155.27], min_zoom=12, max_zoom=15,
zoom_start=13, tiles='Stamen Terrain')
elif region == 'lerz':
m = Map(location=[19.43, -154.88], min_zoom=11, max_zoom=15,
zoom_start=11, tiles='Stamen Terrain')
if kind == 'T':
mid = d.date.min()
mad = d.date.max()
d.apply(create_tcircle_marker, arg=(m, mid, mad), axis=1)
elif kind == 'A':
d.apply(create_dcircle_marker, args=(m,), axis=1)
m.save(filename)
return open(filename, 'r').read()
def get_hypos_legend(kind):
encoded_img = None
if kind == 'A':
encoded_img = be(open(f'{LCL}dlegend.png', 'rb').read())
elif kind == 'T':
encoded_img = be(open(f'{LCL}tlegend.png', 'rb').read())
return f"data:image/jpg;base64,{encoded_img.decode('utf8')}"
def get_hypos_table(st, data):
d = json_to_dataframe(st, data)
if not d.empty:
d.sort_values('date', inplace=True)
return d.to_dict('records')
def get_hypo_counts(st, data):
d = json_to_dataframe(st, data)
data = []
if not d.empty:
d.sort_values('date', inplace=True)
d['moment'] = d.prefMag.apply(lambda x:
pow(10.0, 16.0 + ((3.0 * x)/2.0)))
d['cmoment'] = d.moment.cumsum()
bins = d.groupby(pd.Grouper(freq='60min', key='date')).count()
data = [go.Bar(
{
'x': bins.index,
'y': bins.depth,
'name': 'Count'
}), go.Scatter(
{
'x': d.date,
'y': d.cmoment,
'name': 'Moment',
'yaxis': 'y2'
})]
return {
'data': data,
'layout': {
'margin': {
't': 30
},
'showlegend': False,
'yaxis': {
'title': 'Earthquakes per Hour'
},
'yaxis2': {
'title': 'Cumulative Moment (dyn-cm)',
'showgrid': False,
'overlaying': 'y',
'side': 'right'
}
}
}
def get_spectrogram(src):
now = datetime.utcnow()
d = now.timetuple().tm_yday
tm = now - timedelta(minutes=now.minute % 10, seconds=now.second,
microseconds=now.microsecond)
if 'ipensive' in src:
t = '%d%s%s-%s%s' % (now.year, str(now.month).zfill(2),
str(now.day).zfill(2), str(tm.hour).zfill(2),
str(tm.minute).zfill(2))
else:
t = '%d%s-%s%s' % (now.year, str(d).zfill(3), str(tm.hour).zfill(2),
str(tm.minute).zfill(2))
return src.format(now.year, d, t)
def get_helicorder(ch):
url = f'a=plot&o=png&tz=Pacific/Honolulu&w=900&h=636&n=1&x.0=75&y.0=20' \
f'&w.0=750&h.0=576&mh.0=900&chCnt.0=1' \
f'&src.0=hvo_seismic_winston_helicorders&st.0=-28800000&et.0=N' \
f'&chNames.0={ch}&dataTypes.0=275.000000&tc.0=15&barMult.0=3' \
f'&sc.0=T&plotSeparately.0=false'
encoded_img = be(open(get_valve_plot(url), 'rb').read())
return f"data:image/jpg;base64,{encoded_img.decode('utf8')}"
def get_tiltv(region):
chs = ''
if region == 'kism':
chs = '18,20'
elif region == 'merz':
chs = '15,16'
url = f'a=plot&o=png&tz=Pacific/Honolulu&w=900&h=1740&n=1&x.0=75&y.0=20' \
f'&w.0=750&h.0=240&mh.0=900&chCnt.0=7&src.0=hvo_def_tilt' \
f'&st.0=-28800000&et.0=N&lg.0=true&ch.0={chs}' \
f'&dataTypes.0=NaN&plotType.0=tv&rk.0=1&ds.0=None&dsInt.0=&sdt.0=' \
f'&az.0=n&azval.0=&linetype.0=l&ysLMin.0=&ysLMax.0=&ysRMin.0=' \
f'&ysRMax.0=&despike_period.0=&filter_arg1.0=&filter_arg2.0=' \
f'&despike.0=F&detrend.0=F&dmo_fl.0=0&filter_arg3.0=' \
f'&dmo_arithmetic.0=None&dmo_arithmetic_value.0=&dmo_db.0=0' \
f'&debias_period.0=&radial.0=T&tangential.0=T&xTilt.0=F&yTilt.0=F' \
f'&magnitude.0=F&azimuth.0=F&holeTemp.0=F&boxTemp.0=F&instVolt.0=F' \
f'&rainfall.0=F&vs.0=&plotSeparately.0=false'
encoded_img = be(open(get_valve_plot(url), 'rb').read())
return f"data:image/jpg;base64,{encoded_img.decode('utf8')}"
def get_valve_plot(itm):
filename = f'{TMP}valve{randint(0,9999):04d}.jpg'
url = f'https://hvovalve.wr.usgs.gov/valve3/valve3.jsp?{itm}'
u = request.authorization.username
p = request.authorization.password
r = requests.get(url, auth=HTTPBasicAuth(u, p))
with open(filename, 'wb') as f:
f.write(r.content)
return filename
def get_ash3d_img():
url = ('https://volcanoes.usgs.gov/vsc/captures/ash3d/'
'332010_1008443_D_deposit.gif')
return url
def get_logs(max_rows=20):
p = api_request_to_json('logs')['posts']
headers = ['Post', 'Author', 'Date']
d = sorted(p, key=itemgetter('date'), reverse=True)
link = 'https://hvointernal.wr.usgs.gov/hvo_logs/read?id={}'
return [[Tr([Th(col) for col in headers])] +
[Tr([
Td(A(href=link.format(d[i]['id']),
children='%s' % d[i]['subject'],
target='_blank')),
Td(children='%s' % d[i]['user']),
Td(children='%s' % d[i]['date'])
]) for i in range(0, max_rows)]]
def get_so2emissions(ch, st):
j = api_request_to_json(f'so2emissions?channel={ch}&starttime={st}')
data = []
d = pd.DataFrame(j['records'][ch])
if not d.empty:
d.set_index('date', inplace=True)
data = [go.Scatter(
x=d.index,
y=d.so2,
mode='markers',
marker=dict(size=10)
)]
return {
'data': data,
'layout': {
'margin': {
't': 30
}
}
}
def get_nps_so2(ch, st):
j = api_request_to_json(f'npsadvisory?channel={ch}&starttime={st}')
data = []
d = pd.DataFrame(j['records'][ch])
if not d.empty:
d.set_index('date', inplace=True)
data = [go.Scatter(
x=d.index,
y=d.avgso2,
mode='markers',
marker=dict(size=6)
)]
return {
'data': data,
'layout': {
'margin': {
't': 30
},
'yaxis': {
'exponentformat': 'none'
}
}
}
def get_nps_wind(ch, st):
url = (f'npsadvisory?channel={ch}&starttime={st}&series=windspeed,winddir')
j = api_request_to_json(url)
data = []
d = pd.DataFrame(j['records'][ch])
if not d.empty:
d.set_index('date', inplace=True)
data = [go.Scatter(
x=d.index,
y=d.windspeed,
name='Wind Speed',
mode='markers',
marker=dict(size=6)
), go.Scatter(
x=d.index,
y=d.winddir,
name='Wind Dir',
yaxis='y2',
mode='markers',
marker=dict(size=6)
)]
return {
'data': data,
'layout': {
'margin': {
't': 30
},
'yaxis': {
'title': 'Windspeed (m/s)'
},
'yaxis2': {
'title': 'Wind Direction (deg)',
'showgrid': False,
'overlaying': 'y',
'side': 'right'
}
}
}
| 2.171875 | 2 |
clue2020-data-lab/predict_sequence_label.py | dedeguo/knowledge_graph_construction | 0 | 12797640 | #!/usr/bin/python
# coding:utf8
"""
@author: <NAME>
@time: 2019-12-07 20:51
"""
import os
import re
import json
import tensorflow as tf
import tokenization
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
vocab_file = "./vocab.txt"
tokenizer_ = tokenization.FullTokenizer(vocab_file=vocab_file)
label2id = json.loads(open("./label2id.json").read())
id2label = [k for k, v in label2id.items()]
def process_one_example_p(tokenizer, text, max_seq_len=128):
textlist = list(text)
tokens = []
# labels = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
# print(token)
tokens.extend(token)
if len(tokens) >= max_seq_len - 1:
tokens = tokens[0:(max_seq_len - 2)]
# labels = labels[0:(max_seq_len - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]") # 句子开始设置CLS 标志
segment_ids.append(0)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
# label_ids.append(label2id[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
ntokens.append("**NULL**")
assert len(input_ids) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
feature = (input_ids, input_mask, segment_ids)
return feature
def load_model(model_folder):
# We retrieve our checkpoint fullpath
try:
checkpoint = tf.train.get_checkpoint_state(model_folder)
input_checkpoint = checkpoint.model_checkpoint_path
print("[INFO] input_checkpoint:", input_checkpoint)
except Exception as e:
input_checkpoint = model_folder
print("[INFO] Model folder", model_folder, repr(e))
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
tf.reset_default_graph()
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We start a session and restore the graph weights
sess_ = tf.Session()
saver.restore(sess_, input_checkpoint)
# opts = sess_.graph.get_operations()
# for v in opts:
# print(v.name)
return sess_
model_path = "./ner_bert_base/"
sess = load_model(model_path)
input_ids = sess.graph.get_tensor_by_name("input_ids:0")
input_mask = sess.graph.get_tensor_by_name("input_mask:0") # is_training
segment_ids = sess.graph.get_tensor_by_name("segment_ids:0") # fc/dense/Relu cnn_block/Reshape
keep_prob = sess.graph.get_tensor_by_name("keep_prob:0")
p = sess.graph.get_tensor_by_name("loss/ReverseSequence_1:0")
def predict(text):
data = [text]
# 逐个分成 最大62长度的 text 进行 batch 预测
features = []
for i in data:
feature = process_one_example_p(tokenizer_, i, max_seq_len=64)
features.append(feature)
feed = {input_ids: [feature[0] for feature in features],
input_mask: [feature[1] for feature in features],
segment_ids: [feature[2] for feature in features],
keep_prob: 1.0
}
[probs] = sess.run([p], feed)
result = []
for index, prob in enumerate(probs):
for v in prob[1:len(data[index]) + 1]:
result.append(id2label[int(v)])
print(result)
labels = {}
start = None
index = 0
for w, t in zip("".join(data), result):
if re.search("^[BS]", t):
if start is not None:
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
# print(te_, labels)
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
# print(te_, labels)
labels[label] = {te_: [[start, index - 1]]}
start = index
# print(start)
if re.search("^O", t):
if start is not None:
# print(start)
label = result[index - 1][2:]
if labels.get(label):
te_ = text[start:index]
# print(te_, labels)
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
# print(te_, labels)
labels[label] = {te_: [[start, index - 1]]}
# else:
# print(start, labels)
start = None
index += 1
if start is not None:
# print(start)
label = result[start][2:]
if labels.get(label):
te_ = text[start:index]
# print(te_, labels)
labels[label][te_] = [[start, index - 1]]
else:
te_ = text[start:index]
# print(te_, labels)
labels[label] = {te_: [[start, index - 1]]}
# print(labels)
return labels
def submit(path):
data = []
for line in open(path):
if not line.strip():
continue
_ = json.loads(line.strip())
res = predict(_["text"])
data.append(json.dumps({"label": res}, ensure_ascii=False))
open("ner_predict.json", "w").write("\n".join(data))
if __name__ == "__main__":
text_ = "梅塔利斯在乌克兰联赛、杯赛及联盟杯中保持9场不败,状态相当出色;"
res_ = predict(text_)
print(res_)
submit("data/thuctc_valid.json")
| 2.515625 | 3 |
lib/cros_test_unittest.py | khromiumos/chromiumos-chromite | 0 | 12797641 | <filename>lib/cros_test_unittest.py
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for CrOSTest."""
from __future__ import print_function
import os
import sys
import mock
import pytest # pylint: disable=import-error
from chromite.lib import constants
from chromite.lib import cros_test
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.scripts import cros_set_lsb_release
from chromite.utils import outcap
pytestmark = cros_test_lib.pytestmark_inside_only
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# pylint: disable=protected-access
class CrOSTesterBase(cros_test_lib.RunCommandTempDirTestCase):
"""Base class for setup and creating a temp file path."""
def createTester(self, opts=None):
"""Builds a CrOSTest suitable for testing.
Args:
opts: Cmd-line args to cros_test used to build a CrOSTest.
Returns:
An instance of cros_test.CrOSTest.
"""
opts = cros_test.ParseCommandLine(opts if opts else [])
opts.enable_kvm = True
# We check if /dev/kvm is writeable to use sudo.
with mock.patch.object(os, 'access', return_value=True):
tester = cros_test.CrOSTest(opts)
tester._device.use_sudo = False
tester._device.board = 'amd64-generic'
tester._device.image_path = self.TempFilePath(
'chromiumos_qemu_image.bin')
osutils.Touch(tester._device.image_path)
version_str = ('QEMU emulator version 2.6.0, Copyright (c) '
'2003-2008 <NAME>')
self.rc.AddCmdResult(partial_mock.In('--version'), output=version_str)
return tester
def setUp(self):
"""Common set up method for all tests."""
self._tester = self.createTester()
def TempFilePath(self, file_path):
"""Creates a temporary file path lasting for the duration of a test."""
return os.path.join(self.tempdir, file_path)
class CrOSTester(CrOSTesterBase):
"""Tests miscellaneous utility methods"""
def testStartVM(self):
"""Verify that a new VM is started before running tests."""
self._tester.start_vm = True
self._tester.Run()
# Check if new VM got launched.
self.assertCommandContains([self._tester._device.qemu_path, '-enable-kvm'])
# Check if new VM is responsive.
self.assertCommandContains(
['ssh', '-p', '9222', 'root@localhost', '--', 'true'])
def testStartVMCustomPort(self):
"""Verify that a custom SSH port is supported for tests."""
self._tester = self.createTester(opts=['--ssh-port=12345'])
self._tester.start_vm = True
self._tester.Run()
# Check that we use the custom port when talking to the VM.
self.assertCommandContains(
['ssh', '-p', '12345', 'root@localhost', '--', 'true'])
def testFlash(self):
"""Tests flash command."""
# Verify that specifying the board gets the latest canary.
self._tester.flash = True
self._tester.public_image = True
self._tester._device.board = 'octopus'
self._tester._device.remote._lsb_release = {
cros_set_lsb_release.LSB_KEY_VERSION: '12900.0.0',
}
self._tester.Run()
self.assertCommandContains([
os.path.join(constants.CHROMITE_BIN_DIR, 'cros'),
'flash', 'ssh://localhost:9222',
'xbuddy://remote/octopus-full/latest',])
# Specify an xbuddy link.
self._tester.xbuddy = 'xbuddy://remote/octopus/R82-12901.0.0'
self._tester.Run()
self.assertCommandContains([
os.path.join(constants.CHROMITE_BIN_DIR, 'cros'),
'flash', 'ssh://localhost:9222',
'xbuddy://remote/octopus/R82-12901.0.0'])
def testFlashSkip(self):
"""Tests flash command is skipped when not needed."""
self._tester.flash = True
self._tester._device.board = 'octopus'
self._tester._device.remote._lsb_release = {
cros_set_lsb_release.LSB_KEY_VERSION: '12901.0.0',
}
self._tester.xbuddy = 'xbuddy://remote/octopus/R82-12901.0.0'
self._tester.Run()
self.assertCommandContains(
[os.path.join(constants.CHROMITE_BIN_DIR, 'cros'),
'flash', 'localhost', 'xbuddy://remote/octopus/R82-12901.0.0'],
expected=False)
def testDeployChrome(self):
"""Tests basic deploy chrome command."""
self._tester.deploy = True
self._tester.build_dir = self.TempFilePath('out_amd64-generic/Release')
self._tester.Run()
self.assertCommandContains(['deploy_chrome', '--force', '--build-dir',
self._tester.build_dir, '--process-timeout',
'180', '--device',
self._tester._device.device + ':9222',
'--board', 'amd64-generic',
'--cache-dir', self._tester.cache_dir])
def testDeployChromeWithArgs(self):
"""Tests deploy chrome command with additional arguments."""
self._tester.deploy = True
self._tester.build_dir = self.TempFilePath('out_amd64-generic/Release')
self._tester.nostrip = True
self._tester.mount = True
self._tester.Run()
self.assertCommandContains(['--nostrip', '--mount'])
def testFetchResults(self):
"""Verify that results files/directories are copied from the DUT."""
self._tester.results_src = ['/tmp/results/cmd_results',
'/tmp/results/filename.txt',
'/tmp/results/test_results']
self._tester.results_dest_dir = self.TempFilePath('results_dir')
osutils.SafeMakedirs(self._tester.results_dest_dir)
self._tester.Run()
for filename in self._tester.results_src:
self.assertCommandContains(['scp', 'root@localhost:%s' % filename,
self._tester.results_dest_dir])
def testFileList(self):
"""Verify that FileList returns the correct files."""
# Ensure FileList returns files when files_from is None.
files = ['/tmp/filename1', '/tmp/filename2']
self.assertEqual(files, cros_test.FileList(files, None))
# Ensure FileList returns files when files_from does not exist.
files_from = self.TempFilePath('file_list')
self.assertEqual(files, cros_test.FileList(files, files_from))
# Ensure FileList uses 'files_from' and ignores 'files'.
file_list = ['/tmp/file1', '/tmp/file2', '/tmp/file3']
osutils.WriteFile(files_from, '\n'.join(file_list))
self.assertEqual(file_list, cros_test.FileList(files, files_from))
class CrOSTesterMiscTests(CrOSTesterBase):
"""Tests miscellaneous test cases."""
@mock.patch('chromite.lib.vm.VM.IsRunning', return_value=True)
def testBasic(self, isrunning_mock):
"""Tests basic functionality."""
self._tester.Run()
isrunning_mock.assert_called()
# Run vm_sanity.
self.assertCommandContains([
'ssh', '-p', '9222', 'root@localhost', '--',
'/usr/local/autotest/bin/vm_sanity.py'
])
def testCatapult(self):
"""Verify catapult test command."""
self._tester.catapult_tests = ['testAddResults']
self._tester.Run()
self.assertCommandContains([
'python', '/usr/local/telemetry/src/third_party/catapult/'
'telemetry/bin/run_tests', '--browser=system', 'testAddResults'
])
def testCatapultAsGuest(self):
"""Verify that we use the correct browser in guest mode."""
self._tester.catapult_tests = ['testAddResults']
self._tester.guest = True
self._tester.Run()
self.assertCommandContains([
'python', '/usr/local/telemetry/src/third_party/catapult/'
'telemetry/bin/run_tests', '--browser=system-guest', 'testAddResults'
])
def testRunDeviceCmd(self):
"""Verify a run device cmd call."""
self._tester.remote_cmd = True
self._tester.files = [self.TempFilePath('crypto_unittests')]
osutils.Touch(self._tester.files[0], mode=0o700)
self._tester.as_chronos = True
self._tester.args = ['crypto_unittests',
'--test-launcher-print-test-stdio=always']
self._tester.Run()
# Ensure target directory is created on the DUT.
self.assertCommandContains(['mkdir', '-p', '/usr/local/cros_test'])
# Ensure test ssh keys are authorized with chronos.
self.assertCommandContains(['cp', '-r', '/root/.ssh/',
'/home/chronos/user/'])
# Ensure chronos has ownership of the directory.
self.assertCommandContains(['chown', '-R', 'chronos:',
'/usr/local/cros_test'])
# Ensure command runs in the target directory.
self.assertCommandContains('cd /usr/local/cros_test && crypto_unittests '
'--test-launcher-print-test-stdio=always')
# Ensure target directory is removed at the end of the test.
self.assertCommandContains(['rm', '-rf', '/usr/local/cros_test'])
def testRunDeviceCmdWithSetCwd(self):
"""Verify a run device command call when giving a cwd."""
self._tester.remote_cmd = True
self._tester.cwd = '/usr/local/autotest'
self._tester.args = ['./bin/vm_sanity.py']
self._tester.Run()
# Ensure command runs in the autotest directory.
self.assertCommandContains('cd /usr/local/autotest && ./bin/vm_sanity.py')
def testRunDeviceCmdWithoutSrcFiles(self):
"""Verify running a remote command when src files are not specified.
The remote command should not change the working directory or create a temp
directory on the target.
"""
self._tester.remote_cmd = True
self._tester.args = ['/usr/local/autotest/bin/vm_sanity.py']
self._tester.Run()
self.assertCommandContains(['ssh', '-p', '9222',
'/usr/local/autotest/bin/vm_sanity.py'])
self.assertCommandContains(['mkdir', '-p'], expected=False)
self.assertCommandContains(['cd %s && /usr/local/autotest/bin/'
'vm_sanity.py' % self._tester.cwd],
expected=False)
self.assertCommandContains(['rm', '-rf'], expected=False)
def testHostCmd(self):
"""Verify running a host command."""
self._tester.host_cmd = True
self._tester.build_dir = '/some/chromium/dir'
self._tester.args = ['tast', 'run', 'localhost:9222', 'ui.ChromeLogin']
self._tester.Run()
# Ensure command is run with an env var for the build dir, and ensure an
# exception is not raised if it fails.
self.assertCommandCalled(
['tast', 'run', 'localhost:9222', 'ui.ChromeLogin'],
check=False, dryrun=False,
extra_env={'CHROMIUM_OUTPUT_DIR': '/some/chromium/dir'})
# Ensure that --host-cmd does not invoke ssh since it runs on the host.
self.assertCommandContains(['ssh', 'tast'], expected=False)
@pytest.mark.usefixtures('testcase_caplog')
class CrOSTesterAutotest(CrOSTesterBase):
"""Tests autotest test cases."""
def testBasicAutotest(self):
"""Tests a simple autotest call."""
self._tester.autotest = ['accessibility_Sanity']
self._tester.Run()
# Check VM got launched.
self.assertCommandContains([self._tester._device.qemu_path, '-enable-kvm'])
# Checks that autotest is running.
self.assertCommandContains([
'test_that', '--no-quickmerge', '--ssh_options',
'-F /dev/null -i /dev/null',
'localhost:9222', 'accessibility_Sanity'])
def testAutotestWithArgs(self):
"""Tests an autotest call with attributes."""
self._tester.autotest = ['accessibility_Sanity']
self._tester.results_dir = 'test_results'
self._tester._device.private_key = '.ssh/testing_rsa'
self._tester._device.log_level = 'debug'
self._tester._device.should_start_vm = False
self._tester._device.ssh_port = None
self._tester._device.device = '172.16.17.32'
self._tester.test_that_args = ['--test_that-args',
'--allow-chrome-crashes']
cwd = os.path.join('/mnt/host/source',
os.path.relpath(os.getcwd(), constants.SOURCE_ROOT))
test_results_dir = os.path.join(cwd, 'test_results')
testing_rsa_dir = os.path.join(cwd, '.ssh/testing_rsa')
self._tester._RunAutotest()
self.assertCommandCalled(
['test_that', '--board', 'amd64-generic', '--results_dir',
test_results_dir, '--ssh_private_key', testing_rsa_dir, '--debug',
'--allow-chrome-crashes', '--no-quickmerge', '--ssh_options',
'-F /dev/null -i /dev/null', '172.16.17.32', 'accessibility_Sanity'],
dryrun=False, enter_chroot=True)
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot', return_value=True)
def testInsideChrootAutotest(self, _check_inside_chroot_mock):
"""Tests running an autotest from within the chroot."""
# Checks that mock version has been called.
# TODO(crbug/1065172): Invalid assertion that had previously been mocked.
# check_inside_chroot_mock.assert_called()
self._tester.autotest = ['accessibility_Sanity']
self._tester.results_dir = '/mnt/host/source/test_results'
self._tester._device.private_key = '/mnt/host/source/.ssh/testing_rsa'
self._tester._RunAutotest()
self.assertCommandContains([
'--results_dir', '/mnt/host/source/test_results',
'--ssh_private_key', '/mnt/host/source/.ssh/testing_rsa'])
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot', return_value=False)
def testOutsideChrootAutotest(self, _check_inside_chroot_mock):
"""Tests running an autotest from outside the chroot."""
# Checks that mock version has been called.
# TODO(crbug/1065172): Invalid assertion that had previously been mocked.
# check_inside_chroot_mock.assert_called()
self._tester.autotest = ['accessibility_Sanity']
# Capture the run command. This is necessary beacuse the mock doesn't
# capture the cros_sdk wrapper.
self._tester._RunAutotest()
# Check that we enter the chroot before running test_that.
self.assertIn(('cros_sdk -- test_that --board amd64-generic --no-quickmerge'
" --ssh_options '-F /dev/null -i /dev/null' localhost:9222"
' accessibility_Sanity'), self.caplog.text)
class CrOSTesterTast(CrOSTesterBase):
"""Tests tast test cases."""
def testSingleBaseTastTest(self):
"""Verify running a single tast test."""
self._tester.tast = ['ui.ChromeLogin']
self._tester.Run()
self.assertCommandContains(['tast', 'run', '-build=false',
'-waituntilready', '-extrauseflags=tast_vm',
'localhost:9222', 'ui.ChromeLogin'])
def testExpressionBaseTastTest(self):
"""Verify running a set of tast tests with an expression."""
self._tester.tast = [
'(("dep:chrome" || "dep:android") && !flaky && !disabled)'
]
self._tester.Run()
self.assertCommandContains([
'tast', 'run', '-build=false', '-waituntilready',
'-extrauseflags=tast_vm', 'localhost:9222',
'(("dep:chrome" || "dep:android") && !flaky && !disabled)'
])
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot')
def testTastTestWithOtherArgs(self, check_inside_chroot_mock):
"""Verify running a single tast test with various arguments."""
self._tester.tast = ['ui.ChromeLogin']
self._tester.test_timeout = 100
self._tester._device.log_level = 'debug'
self._tester._device.should_start_vm = False
self._tester._device.ssh_port = None
self._tester._device.device = '172.16.17.32'
self._tester.results_dir = '/tmp/results'
self._tester.Run()
check_inside_chroot_mock.assert_called()
self.assertCommandContains(['tast', '-verbose', 'run', '-build=false',
'-waituntilready', '-timeout=100',
'-resultsdir', '/tmp/results', '172.16.17.32',
'ui.ChromeLogin'])
def testTastTestSDK(self):
"""Verify running tast tests from the SimpleChrome SDK."""
self._tester.tast = ['ui.ChromeLogin']
self._tester._device.private_key = '/tmp/.ssh/testing_rsa'
tast_cache_dir = cros_test_lib.FakeSDKCache(
self._tester.cache_dir).CreateCacheReference(
self._tester._device.board, 'chromeos-base')
tast_bin_dir = os.path.join(tast_cache_dir, 'tast-cmd/usr/bin')
osutils.SafeMakedirs(tast_bin_dir)
self._tester.Run()
self.assertCommandContains([
os.path.join(tast_bin_dir,
'tast'), 'run', '-build=false', '-waituntilready',
'-remoterunner=%s' % os.path.join(tast_bin_dir, 'remote_test_runner'),
'-remotebundledir=%s' % os.path.join(tast_cache_dir,
'tast-remote-tests-cros/usr',
'libexec/tast/bundles/remote'),
'-remotedatadir=%s' % os.path.join(
tast_cache_dir, 'tast-remote-tests-cros/usr', 'share/tast/data'),
'-ephemeraldevserver=true', '-keyfile', '/tmp/.ssh/testing_rsa',
'-extrauseflags=tast_vm', 'localhost:9222', 'ui.ChromeLogin'
])
class CrOSTesterChromeTest(CrOSTesterBase):
"""Tests chrome test test cases."""
def SetUpChromeTest(self, test_exe, test_label, test_args=None):
"""Sets configurations necessary for running a chrome test.
Args:
test_exe: The name of the chrome test.
test_label: The label of the chrome test.
test_args: A list of arguments of the particular chrome test.
"""
self._tester.args = [test_exe] + test_args if test_args else [test_exe]
self._tester.chrome_test = True
self._tester.build_dir = self.TempFilePath('out_amd64-generic/Release')
osutils.SafeMakedirs(self._tester.build_dir)
isolate_map = self.TempFilePath('testing/buildbot/gn_isolate_map.pyl')
# Add info about the specified chrome test to the isolate map.
osutils.WriteFile(isolate_map,
"""{
"%s": {
"label": "%s",
"type": "console_test_launcher",
}
}""" % (test_exe, test_label), makedirs=True)
self._tester.build = True
self._tester.deploy = True
self._tester.chrome_test_target = test_exe
self._tester.chrome_test_deploy_target_dir = '/usr/local/chrome_test'
# test_label looks like //crypto:crypto_unittests.
# label_root extracts 'crypto' from the test_label in this instance.
label_root = test_label.split(':')[0].lstrip('/')
# A few files used by the chrome test.
runtime_deps = [
'./%s' % test_exe,
'gen.runtime/%s/%s/%s.runtime_deps' % (label_root, test_exe, test_exe),
'../../third_party/chromite']
# Creates the test_exe to be an executable.
osutils.Touch(os.path.join(self._tester.build_dir, runtime_deps[0]),
mode=0o700)
for dep in runtime_deps[1:]:
osutils.Touch(os.path.join(self._tester.build_dir, dep), makedirs=True)
# Mocks the output by providing necessary runtime files.
self.rc.AddCmdResult(
partial_mock.InOrder(['gn', 'desc', test_label]),
output='\n'.join(runtime_deps))
def CheckChromeTestCommands(self, test_exe, test_label, build_dir,
test_args=None):
"""Checks to see that chrome test commands ran properly.
Args:
test_exe: The name of the chrome test.
test_label: The label of the chrome test.
build_dir: The directory where chrome is built.
test_args: Chrome test arguments.
"""
# Ensure chrome is being built.
self.assertCommandContains(['autoninja', '-C', build_dir, test_exe])
# Ensure that the runtime dependencies are checked for.
self.assertCommandContains(['gn', 'desc', build_dir, test_label,
'runtime_deps'])
# Ensure UI is stopped so the test can grab the GPU if needed.
self.assertCommandContains(['ssh', '-p', '9222', 'root@localhost', '--',
'stop ui'])
# Ensure a user activity ping is sent to the device.
self.assertCommandContains(['ssh', '-p', '9222', 'root@localhost', '--',
'dbus-send', '--system', '--type=method_call',
'--dest=org.chromium.PowerManager',
'/org/chromium/PowerManager',
'org.chromium.PowerManager.HandleUserActivity',
'int32:0'])
args = ' '.join(test_args) if test_args else ''
# Ensure the chrome test is run.
self.assertCommandContains(['ssh', '-p', '9222', 'root@localhost', '--',
'cd /usr/local/chrome_test && su chronos -c -- '
'"out_amd64-generic/Release/%s %s"'
% (test_exe, args)])
def testChromeTestRsync(self):
"""Verify build/deploy and chrome test commands using rsync to copy."""
test_exe = 'crypto_unittests'
test_label = '//crypto:' + test_exe
self.SetUpChromeTest(test_exe, test_label)
self._tester.Run()
self.CheckChromeTestCommands(test_exe, test_label, self._tester.build_dir)
# Ensure files are being copied over to the device using rsync.
self.assertCommandContains(['rsync', '%s/' % self._tester.staging_dir,
'[root@localhost]:/usr/local/chrome_test'])
@mock.patch('chromite.lib.remote_access.RemoteDevice.HasRsync',
return_value=False)
def testChromeTestSCP(self, rsync_mock):
"""Verify build/deploy and chrome test commands using scp to copy."""
test_exe = 'crypto_unittests'
test_label = '//crypto:' + test_exe
self.SetUpChromeTest(test_exe, test_label)
self._tester.Run()
self.CheckChromeTestCommands(test_exe, test_label, self._tester.build_dir)
# Ensure files are being copied over to the device using scp.
self.assertCommandContains(['scp', '%s/' % self._tester.staging_dir,
'root@localhost:/usr/local/chrome_test'])
rsync_mock.assert_called()
def testChromeTestExeArg(self):
"""Verify build/deploy and chrome test commands when a test arg is given."""
test_exe = 'crypto_unittests'
test_label = '//crypto:' + test_exe
test_args = ['--test-launcher-print-test-stdio=auto']
self.SetUpChromeTest(test_exe, test_label, test_args)
self._tester.Run()
self.CheckChromeTestCommands(test_exe, test_label, self._tester.build_dir,
test_args)
class CrOSTesterParser(CrOSTesterBase):
"""Tests parser test cases."""
def CheckParserError(self, args, error_msg):
"""Checks that parser error is raised.
Args:
args: List of commandline arguments.
error_msg: Error message to check for.
"""
# Recreate args as a list if it is given as a string.
if isinstance(args, str):
args = [args]
# Putting outcap.OutputCapturer() before assertRaises(SystemExit)
# swallows SystemExit exception check.
with self.assertRaises(SystemExit):
with outcap.OutputCapturer() as output:
cros_test.ParseCommandLine(args)
self.assertIn(error_msg, output.GetStderr())
def testParserErrorChromeTest(self):
"""Verify we get a parser error for --chrome-test when no args are given."""
self.CheckParserError('--chrome-test', '--chrome-test')
def testParserSetsBuildDir(self):
"""Verify that the build directory is set when not specified."""
test_dir = self.TempFilePath('out_amd64-generic/Release/crypto_unittests')
# Retrieves the build directory from the parsed options.
build_dir = cros_test.ParseCommandLine(
['--chrome-test', '--', test_dir]).build_dir
self.assertEqual(build_dir, os.path.dirname(test_dir))
def testParserErrorBuild(self):
"""Verify parser errors for building/deploying Chrome."""
# Parser error if no build directory is specified.
self.CheckParserError('--build', '--build-dir')
# Parser error if build directory is not an existing directory.
self.CheckParserError(['--deploy', '--build-dir', '/not/a/directory'],
'not a directory')
def testParserErrorResultsSrc(self):
"""Verify parser errors for results src/dest directories."""
# Parser error if --results-src is not absolute.
self.CheckParserError(['--results-src', 'tmp/results'], 'absolute')
# Parser error if no results destination dir is given.
self.CheckParserError(['--results-src', '/tmp/results'], 'with results-src')
# Parser error if no results source is given.
self.CheckParserError(['--results-dest-dir', '/tmp/dest_dir'],
'with results-dest-dir')
# Parser error if results destination dir is a file.
filename = '/tmp/dest_dir_file'
osutils.Touch(filename)
self.CheckParserError(['--results-src', '/tmp/results',
'--results-dest-dir', filename], 'existing file')
def testParserErrorCommands(self):
"""Verify we get parser errors when using certain commands."""
# Parser error if no test command is provided.
self.CheckParserError('--remote-cmd', 'specify test command')
# Parser error if using chronos without a test command.
self.CheckParserError('--as-chronos', 'as-chronos')
# Parser error if there are args, but no command.
self.CheckParserError('--some_test some_command',
'--remote-cmd or --host-cmd or --chrome-test')
# Parser error when additional args don't start with --.
self.CheckParserError(['--host-cmd', 'tast', 'run'], 'must start with')
def testParserErrorCWD(self):
"""Verify we get parser errors when specifying the cwd."""
# Parser error if the cwd refers to a parent path.
self.CheckParserError(['--cwd', '../new_cwd'], 'cwd cannot start with ..')
# Parser error if the cwd is not an absolute path.
self.CheckParserError(['--cwd', 'tmp/cwd'], 'cwd must be an absolute path')
def testParserErrorFiles(self):
"""Verify we get parser errors with --files."""
# Parser error when both --files and --files-from are specified.
self.CheckParserError(['--files', 'file_list', '--files-from', 'file'],
'--files and --files-from')
# Parser error when --files-from does not exist.
self.CheckParserError(['--files-from', '/fake/file'], 'is not a file')
# Parser error when a file in --files has an absolute path.
self.CheckParserError(['--files', '/etc/lsb-release'],
'should be a relative path')
# Parser error when a file has a bad path.
self.CheckParserError(['--files', '../some_file'], 'cannot start with ..')
# Parser error when a non-existent file is passed to --files.
self.CheckParserError(['--files', 'fake/file'], 'does not exist')
| 2.046875 | 2 |
utils/utils.py | alphaccw/StereoNet | 0 | 12797642 | import os
import torch
def GERF_loss(GT, pred, args):
mask = (GT < args.maxdisp) & (GT >= 0)
# print(mask.size(), GT.size(), pred.size())
count = len(torch.nonzero(mask))
# print(count)
if count == 0:
count = 1
return torch.sum(torch.sqrt(torch.pow(GT[mask] - pred[mask], 2) + 4) /2 - 1) / count | 2.3125 | 2 |
data/DataManager.py | sash-a/CoDeepNEAT | 28 | 12797643 | import inspect
import os
def get_datasets_folder():
return os.path.join(get_data_folder(), "Datasets")
def get_data_folder():
return os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
| 2.25 | 2 |
libv1/collate.py | hhaAndroid/miniloader | 12 | 12797644 | <reponame>hhaAndroid/miniloader<filename>libv1/collate.py
# -*- coding: utf-8 -*-
# ======================================================
# @Time : 20-12-26 下午4:42
# @Author : <NAME>
# @Email :
# @File : collate.py
# @Comment:
# ======================================================
import torch
def default_collate(batch):
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
return torch.stack(batch, 0)
elif elem_type.__module__ == 'numpy':
return default_collate([torch.as_tensor(b) for b in batch])
else:
raise NotImplementedError
| 2.15625 | 2 |
AOC2018/day02.py | hbldh/AdventOfCode2016 | 0 | 12797645 | from collections import Counter
import difflib
def _checksum(r):
counter = Counter(r)
return int(any([x == 2 for x in counter.values()])), int(any([x == 3 for x in counter.values()]))
def _solve_1(rows):
d = [_checksum(row) for row in rows]
return sum([x[0] for x in d]) * sum([x[1] for x in d])
def _solve_2(rows):
for i, r in enumerate(rows):
for r2 in rows[i:]:
diffs = len(r) - int(round(difflib.SequenceMatcher(a=r, b=r2).ratio() * len(r)))
if diffs == 1:
return "".join([a for a, b in zip(r, r2) if a == b])
return 0
def solve(data):
rows = data.splitlines()
return _solve_1(rows), _solve_2(rows)
if __name__ == '__main__':
from AOC2018 import run_solver
run_solver(solve, __file__)
| 3.25 | 3 |
sioinstagram/io/io_aiohttp.py | pohmelie/sioinstagram | 3 | 12797646 | import asyncio
import functools
import contextlib
import aiohttp
from ..protocol import Protocol
from ..exceptions import InstagramError
__all__ = (
"AioHTTPInstagramApi",
)
class AioHTTPInstagramApi:
def __init__(self, username, password, state=None, delay=5, proxy=None, loop=None, lock=None):
if proxy is None:
self._conn = None
else:
self._conn = aiohttp.ProxyConnector(proxy=proxy)
self.proto = Protocol(username, password, state)
self.delay = delay
self.loop = loop or asyncio.get_event_loop()
self.lock = lock or asyncio.Lock(loop=self.loop)
self.last_request_time = 0
@property
def state(self):
return self.proto.state
def __getattr__(self, name):
method = getattr(self.proto, name)
@functools.wraps(method)
def wrapper(*args, **kwargs):
return self._run(method(*args, **kwargs))
return wrapper
async def _request(self, request):
kw = request._asdict()
async with aiohttp.ClientSession(cookies=kw.pop("cookies")) as session:
async with session.request(**kw) as response:
if not await response.read():
raise InstagramError(response)
return Protocol.Response(
cookies={c.key: c.value for c in session.cookie_jar},
json=await response.json(),
status_code=response.status,
)
async def _run(self, generator):
with (await self.lock):
response = None
with contextlib.suppress(StopIteration):
while True:
request = generator.send(response)
now = self.loop.time()
timeout = max(0, self.delay - (now - self.last_request_time))
await asyncio.sleep(timeout, loop=self.loop)
self.last_request_time = self.loop.time()
response = await self._request(request)
return response.json
| 2.53125 | 3 |
common/utils.py | kosyfrances/crq_bot | 5 | 12797647 | import jinja2
def render(filename, context={}, error=None, path='templates'):
if error:
# Error should be a string
if isinstance(error, str):
context['error'] = error
else:
raise TypeError('Error message must be a string')
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path)
).get_template(filename).render(context)
| 2.6875 | 3 |
src/compas_occ/geometry/curves/_curve.py | jf---/compas_occ | 1 | 12797648 | from compas.data import Data
class Curve(Data):
"""Base class for all curves in this package."""
| 2.0625 | 2 |
p2db/p2db/ui.py | ne75/p2llvm-lib | 0 | 12797649 | <gh_stars>0
from prompt_toolkit import Application
from prompt_toolkit.layout.containers import VSplit, HSplit, Window
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout import FormattedTextControl, WindowAlign
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.widgets import Frame, TextArea, Box
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.data_structures import Point
from prompt_toolkit.formatted_text import ANSI
from prompt_toolkit.layout.screen import Char
from colorama import Fore, Style
import threading
import logging
import time
import re
from . import p2tools
from . import p2db_server
log = logging.getLogger('main')
Char.display_mappings['\t'] = '\t'
class UI:
kb = KeyBindings()
help_text = '''
p2db
----
help : Print this dialog
step [Ctrl+S] : Step by one instruction. Call instructions are stepped over. Modifier instructions (augd/s, setq) will be skipped.
stepin [Ctrl+T] : Step into a function call
stepout [Ctrl+O] : Step out of the current function call
break <addr> : Set breakpoint at 'addr' and continue. 'addr' should be in hex
getreg <reg> : Get the value in 'reg'. 'reg' can be an address or register name. Address should be in hex
getbyte <addr> : Get the byte at hub address 'addr'. Address should be in hex
getlong <addr> : Get the long at hub address 'addr'. Address should be in hex
pins : Update pin status data
cog <n> : Set the active cog to n
cogaddr <addr> : Set the cog execution address (for native cogs)
continue : (unimplemented) Continue execution. Cog will be disconnected until it interrupts itself
reset : (unimplemented) Reload the current program
quit [Ctrl+Q] : Quit
'''
pc_cursor_string = Fore.CYAN + " ---> " + Fore.RESET
pc_cursor_size = 10
instance = None
def __init__(self, server: p2db_server.P2DBServer, objdata):
assert(not UI.instance)
UI.instance = self
self.server = server
self.obj_data = objdata
self.app = Application(full_screen=True)
self.current_func = ''
self.dirty = True
self.render_lock = threading.Lock()
# dict of commands and handler function for each
self.commands = {
"step": self.on_step,
"stepin": self.on_stepin,
"stepout": self.on_stepout,
"break": self.on_break,
"getreg": self.on_getreg,
"getbyte": self.on_getbyte,
"getlong": self.on_getlong,
"continue": self.on_continue,
"pins": self.on_pins,
"cog": self.on_cog,
"cogaddr": self.on_cogaddr,
"reset": self.on_reset,
"quit": self.on_quit,
"help": self.on_help
}
# log stuff
def log_cursor_pos():
y = self.log.text.value.count('\n')
return Point(0, y)
self.log = FormattedTextControl(ANSI(""), get_cursor_position=log_cursor_pos)
self.log_area = Window(self.log)
# prompt stuff
cmd_completer = WordCompleter(list(self.commands.keys()))
self.prompt = TextArea(
height=1,
prompt="p2db > ",
multiline=False,
wrap_lines=False,
complete_while_typing=True,
completer=cmd_completer,
accept_handler = self.accept,
focus_on_click=True,
)
# status window stuff
self.status = FormattedTextControl(ANSI(''))
self.connection = FormattedTextControl(ANSI(''))
self.pins = FormattedTextControl(ANSI(''))
status_split = HSplit([
VSplit([
Window(self.status),
Box(Window(self.connection, align=WindowAlign.RIGHT), 3, padding_top=0)
]),
Frame(Box(Window(self.pins, width=95, height=5), padding=3, padding_bottom=0, padding_top=1), "Pins")
])
# instruction window stuff
def inst_cursor_pos():
y = max(0, min(self.pc_line, self.instructions.text.value.count('\n')))
return Point(0, y)
self.pc_line = 0
self.instructions = FormattedTextControl(ANSI(''), focusable=True, get_cursor_position=inst_cursor_pos)
self.function_header = FormattedTextControl(ANSI(''))
instruction_split = HSplit([
Box(Window(self.function_header, height=1), 1, padding_top=0),
Box(Window(self.instructions, height=40), 1)
])
# Frames for each section
self.cog_status_window = Frame(Box(status_split, 1), "Status")
self.instruction_window = Frame(Box(instruction_split, 1), "Source")
self.log_window = Frame(Box(self.log_area, padding=1, padding_bottom=0))
self.prompt_window = Frame(self.prompt)
body = VSplit([
self.cog_status_window,
self.instruction_window,
])
root_container = HSplit([
body,
self.log_window,
self.prompt_window
])
layout = Layout(root_container, self.prompt)
self.app = Application(layout=layout, key_bindings=self.kb, full_screen=True, before_render=self.prerender, after_render=self.postrender)
self.app.layout.focus(self.prompt_window)
@kb.add('c-c')
@kb.add('c-q')
def exit_(event):
event.app.exit()
def on_help(self, args):
ui_instance = UI.instance
ui_instance.update_log(ui_instance.help_text + "\n")
@kb.add('c-s')
def on_step(self, args=[]):
ui_instance = UI.instance
r = ui_instance.server.step()
if r:
ui_instance.update_log(r + "\n", Fore.RED)
@kb.add('c-t')
def on_stepin(self, args=[]):
ui_instance = UI.instance
r = ui_instance.server.stepin()
if r:
ui_instance.update_log(r + "\n", Fore.RED)
@kb.add('c-o')
def on_stepout(self, args=[]):
ui_instance = UI.instance
r = ui_instance.server.stepout()
if r:
ui_instance.update_log(r + "\n", Fore.RED)
def on_break(self, args):
ui_instance = UI.instance
r = ui_instance.server.breakpoint(args[0])
if r:
ui_instance.update_log(r + "\n", Fore.RED)
def on_getreg(self, args):
ui_instance = UI.instance
if len(args) != 1:
ui_instance.update_log("Error: expected 1 argument\n", Fore.RED)
return
r = ui_instance.server.get_reg(args[0])
if (r[0]):
ui_instance.update_log(r[0], Fore.RED)
else:
try:
addr = int(args[0], 16)
ui_instance.update_log("reg {:#02x} -> {:#02x}".format(addr, r[1]) + "\n")
except ValueError:
ui_instance.update_log("{} -> {:#02x}".format(args[0], r[1]) + "\n")
def on_getbyte(self, args):
ui_instance = UI.instance
if len(args) != 1:
ui_instance.update_log("Error: expected 1 argument\n", Fore.RED)
return
r = ui_instance.server.get_byte(args[0])
if (r[0]):
ui_instance.update_log(r[0] + "\n", Fore.RED)
else:
ui_instance.update_log("byte @ {:#02x} -> {:#02x}".format(int(args[0], 16), r[1]) + "\n")
def on_getlong(self, args):
ui_instance = UI.instance
if len(args) != 1:
ui_instance.update_log("Error: expected 1 argument\n", Fore.RED)
return
r = ui_instance.server.get_long(args[0])
if (r[0]):
ui_instance.update_log(r[0] + "\n", Fore.RED)
else:
ui_instance.update_log("long @ {:#02x} -> {:#02x}".format(int(args[0], 16), r[1]) + "\n")
def on_cog(self, args):
ui_instance = UI.instance
if len(args) != 1:
ui_instance.update_log("Error: expected 1 argument\n", Fore.RED)
return
try:
cog_num = int(args[0])
except ValueError:
ui_instance.update_log("Error: expected numeric argument\n", Fore.RED)
return
ui_instance.server.set_cog(cog_num)
def on_cogaddr(self, args):
ui_instance = UI.instance
if len(args) != 1:
ui_instance.update_log("Error: expected 1 argument\n", Fore.RED)
return
try:
addr = int(args[0], 16)
except ValueError:
ui_instance.update_log("Error: expected numeric argument\n", Fore.RED)
return
ui_instance.server.cog_states[ui_instance.server.current_cog].status.set_cog_addr(addr)
@kb.add('c-p')
def on_pins(self, args=[]):
ui_instance = UI.instance
ui_instance.server.update_pins()
def on_continue(self, args=[]):
ui_instance = UI.instance
ui_instance.server.continue_exec()
def on_reset(self, args):
ui_instance = UI.instance
ui_instance.update_log('reset unimplemented\n')
def on_quit(self, args):
ui_instance = UI.instance
ui_instance.app.exit()
def accept(self, buff):
cmd = self.prompt.text
args = cmd.split(' ')
if args[0] in self.commands:
self.commands[args[0]](args[1:])
else:
self.update_log("Unknown command: " + args[0] + "\n", Fore.RED)
self.dirty = True
@kb.add('c-i')
def shift_focus(e):
e.app.layout.focus_next()
def update_log(self, new_text, color=""):
self.log.text = ANSI(self.log.text.value + color + new_text + Fore.RESET)
def get_section_str(self, sec, ptr):
'''
return a atring for all instructions in a given section. place the cursor string at PTR
'''
data_str = ''
section_addr = sec['section_addr']
for i in range(section_addr, section_addr + 4*(len(sec) - 1), 4):
inst = " {:x}: {} {}\n".format(i, sec[i][0], sec[i][1])
if 'call' in sec[i][1]:
# if 'calla' in sec[i][1]:
# pat = r'^(.*?) #\\([0-9]+)(.*?)' # pattern to get the address of a call instruction
# r = re.search(pat, sec[i][1])
# call_addr = int(r.group(2)) if r else 0
# call_dest = p2tools.get_section(self.obj_data, call_addr)
# if call_addr != 0:
# # if call address is 0x200-0x400, convert it to where the LUT function is stored in HUB ram
# if call_addr >= 0x200 and call_addr < 0x400:
# call_addr = 4*(call_addr - 0x200) + 0x200
# call_dest = p2tools.get_section(self.obj_data, call_addr)
# else:
# call_dest = ''
inst = " {:x}: {}{}{}{}\n".format(i,
sec[i][0],
Fore.LIGHTGREEN_EX,
sec[i][1],
Fore.RESET)
elif 'jmp' in sec[i][1] or 'tj' in sec[i][1] or 'dj' in sec[i][1]:
inst = " {:x}: {}{}{}{}\n".format(i,
sec[i][0],
Fore.CYAN,
sec[i][1],
Fore.RESET)
else:
inst = " {:x}: {}{}\n".format(i, sec[i][0], sec[i][1])
if i == ptr:
data_str += Style.BRIGHT + self.pc_cursor_string + inst + Style.RESET_ALL
self.pc_line = int((i - section_addr)/4)
else:
data_str += ' '*self.pc_cursor_size + inst + Style.RESET_ALL
return data_str
def prerender(self, app):
self.render_lock.acquire()
def postrender(self, app):
self.render_lock.release()
def data_updater(self):
do_redraw = False
while(1):
if (self.server.stat_dirty or self.dirty):
self.render_lock.acquire()
self.server.stat_dirty = False
do_redraw = True
stat = self.server.get_status()
if (stat):
# draw the status dictionary
stat_dict = vars(self.server.get_status())
stat_lines = []
for k in stat_dict:
if k.startswith('_'):
pass
elif k == 'pc':
stat_lines.append("{: >30} : {: <#8x}".format(k, stat_dict[k]))
else:
stat_lines.append("{: >30} : {!s: <8}".format(k, stat_dict[k]))
stat_text = '\n'.join(stat_lines)
self.status.text = stat_text
# draw cog connections status's
conn_str = ''
for i in range(8):
fmt = ''
marker = ''
if self.server.cog_states[i].get_state() == p2db_server.CogState.IDLE:
fmt = Fore.GREEN
elif self.server.cog_states[i].get_state() == p2db_server.CogState.EXECUTING:
fmt = Fore.YELLOW
else:
fmt = Fore.RED
if i == self.server.current_cog:
fmt += Style.BRIGHT
marker = '*'
conn_str += fmt + '{: >10}'.format('{} Cog {}\n'.format(marker, i)) + Style.RESET_ALL + Fore.RESET
self.connection.text = ANSI(conn_str)
# draw the pin states
porta_str = ''
portb_str = ''
for i in range(32):
bit = ''
if (self.server.dira >> i) & 1:
color = Fore.RED + Style.BRIGHT
else:
color = Fore.LIGHTBLUE_EX + Style.BRIGHT
if (self.server.ina >> i) & 1:
bit = 'H'
else:
bit = 'L'
if not self.server.have_pin_data:
bit = 'X'
color = Fore.LIGHTBLACK_EX
porta_str += color + "{0: <3}".format(bit)
if (self.server.dirb >> i) & 1:
color = Fore.RED + Style.BRIGHT
else:
color = Fore.LIGHTBLUE_EX + Style.BRIGHT
if (self.server.inb >> i) & 1:
bit = 'H'
else:
bit = 'L'
if not self.server.have_pin_data:
bit = 'X'
color = Fore.LIGHTBLACK_EX
portb_str += color + "{0: <3}".format(bit)
pin_str = porta_str + '\n\n\n' + portb_str + Fore.RESET + Style.RESET_ALL
self.pins.text = ANSI(pin_str)
# update the dissassembly window
# get the function the current PC is in
pc = stat.get_mem_pc()
cog_mode = stat.exec_mode == "cogex"
func_name = ''
for sec in self.obj_data:
if pc in self.obj_data[sec]:
section = self.obj_data[sec]
func_name = sec
if cog_mode and stat.exec_mode != 'lutex' and stat._cog_exec_base_addr == -1:
self.function_header.text = ANSI(Fore.YELLOW + "Cog Execution Mode. Set base address with 'cogaddr' to see disassembly" + Fore.RESET)
self.instructions.text = ANSI("")
else:
s = self.get_section_str(section, pc)
self.instructions.text = ANSI(s)
self.function_header.text = ANSI(func_name)
else:
self.status.text = "*** No connection to cog"
self.function_header.text = ANSI("*** No connection to cog")
# get the log data
while not self.server.log_queue.empty():
c = self.server.log_queue.get()
if c != '\r':
self.update_log(c, Fore.LIGHTGREEN_EX)
if do_redraw:
self.render_lock.release()
self.app.invalidate()
do_redraw = False
time.sleep(0.02)
def run(self):
t = threading.Thread(target=self.data_updater, daemon=True)
t.start()
self.app.run(); | 2.390625 | 2 |
archive/original_main.py | FDKevin0/Micro-Expression-with-Deep-Learning | 249 | 12797650 | <filename>archive/original_main.py
import numpy as np
import sys
import math
import operator
import csv
import glob,os
import xlrd
import cv2
import pandas as pd
from sklearn.svm import SVC
from collections import Counter
from sklearn.metrics import confusion_matrix
import scipy.io as sio
from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed
from keras.utils import np_utils
from keras import metrics
from keras import backend as K
from labelling import collectinglabel
from reordering import readinput
from evaluationmatrix import fpr
workplace='/media/ice/OS/Datasets/CASME2_TIM/'
dB="CASME2_TIM"
rootpath = '/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'
if dB == "CASME2_raw":
inputDir='/media/ice/OS/Datasets/CASME2-RAW/'
resizedFlag=1;
elif dB== "CASME2_large":
inputDir='/media/ice/OS/Datasets/CASME 2/'
wb=xlrd.open_workbook('/media/ice/OS/Datasets/CASME 2/CASME2_label_Ver_2.xls');
ws=wb.sheet_by_index(0)
colm=ws.col_slice(colx=0,start_rowx=1,end_rowx=None)
iD=[str(x.value) for x in colm]
colm=ws.col_slice(colx=1,start_rowx=1,end_rowx=None)
vidName=[str(x.value) for x in colm]
colm=ws.col_slice(colx=6,start_rowx=1,end_rowx=None)
expression=[str(x.value) for x in colm]
table=np.transpose(np.array([np.array(iD),np.array(vidName),np.array(expression)],dtype=str))
subjects=26
samples=246
n_exp=5
resizedFlag=1;
r=68; w=56
VidPerSubject = [9,13,7,5,19,5,9,3,13,13,10,12,8,4,3,4,34,3,15,11,2,2,12,7,7,16]
IgnoredSamples=['sub09/EP13_02','sub09/EP02_02f','sub10/EP13_01','sub17/EP15_01',
'sub17/EP15_03','sub19/EP19_04','sub24/EP10_03','sub24/EP07_01',
'sub24/EP07_04f','sub24/EP02_07','sub26/EP15_01']
listOfIgnoredSamples=[]
for s in range(len(IgnoredSamples)):
if s==0:
listOfIgnoredSamples=[inputDir+IgnoredSamples[s]]
else:
listOfIgnoredSamples.append(inputDir+IgnoredSamples[s])
elif dB== "CASME2_TIM":
inputDir='/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/' #replace with croppoed for testing
wb=xlrd.open_workbook('/media/ice/OS/Datasets/CASME2_label_Ver_2.xls');
ws=wb.sheet_by_index(0)
colm=ws.col_slice(colx=0,start_rowx=1,end_rowx=None)
iD=[str(x.value) for x in colm]
colm=ws.col_slice(colx=1,start_rowx=1,end_rowx=None)
vidName=[str(x.value) for x in colm]
colm=ws.col_slice(colx=6,start_rowx=1,end_rowx=None)
expression=[str(x.value) for x in colm]
table=np.transpose(np.array([np.array(iD),np.array(vidName),np.array(expression)],dtype=str))
# print(type(table))
r=50; w=50
resizedFlag=1;
subjects=26
samples=246
n_exp=5
VidPerSubject = [9,13,7,5,19,5,9,3,13,13,10,12,8,4,3,4,34,3,15,11,2,2,12,7,7,16]
IgnoredSamples=['sub09/EP13_02/','sub09/EP02_02f/','sub10/EP13_01/','sub17/EP15_01/',
'sub17/EP15_03/','sub19/EP19_04/','sub24/EP10_03/','sub24/EP07_01/',
'sub24/EP07_04f/','sub24/EP02_07/','sub26/EP15_01/']
listOfIgnoredSamples=[]
for s in range(len(IgnoredSamples)):
if s==0:
listOfIgnoredSamples=[inputDir+IgnoredSamples[s]]
else:
listOfIgnoredSamples.append(inputDir+IgnoredSamples[s])
elif dB == "SMIC":
inputDir="/srv/oyh/DataBase/SMIC/HS_naming_modified/"
wb=xlrd.open_workbook('/srv/oyh/DataBase/SMIC_label.xlsx');
ws=wb.sheet_by_index(0)
colm=ws.col_slice(colx=1,start_rowx=1,end_rowx=None)
vidName=[str(x.value) for x in colm]
colm=ws.col_slice(colx=2,start_rowx=1,end_rowx=None)
expression=[int(x.value) for x in colm]
table=np.transpose(np.array([np.array(vidName),np.array(expression)],dtype=str))
samples=164; #6 samples are excluded
subjects=16;
n_exp=3;
r= 170;w=140;
VidPerSubject = [6,6,39,19,2,4,13,4,7,9,10,10,4,7,2,22];
listOfIgnoredSamples=[];
resizedFlag=1;
else:
print("NOT in the selection.")
######### Reading in the input images ########
SubperdB=[]
for sub in sorted([infile for infile in os.listdir(inputDir)]):
VidperSub=[]
for vid in sorted([inrfile for inrfile in os.listdir(inputDir+sub)]):
path=inputDir + sub + '/'+ vid + '/'
if path in listOfIgnoredSamples:
continue
# print(dB)
# print(path)
imgList=readinput(path,dB)
numFrame=len(imgList)
if resizedFlag ==1:
col=w
row=r
else:
img=cv2.imread(imgList[0])
[row,col,_l]=img.shape
## ##read the label for each input video
collectinglabel(table, sub[3:], vid, workplace+'Classification/', dB)
for var in range(numFrame):
img=cv2.imread(imgList[var])
[_,_,dim]=img.shape
if dim ==3:
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if resizedFlag ==1:
#in resize function, [col,row]
img=cv2.resize(img,(col,row))
if var==0:
FrameperVid=img.flatten()
else:
FrameperVid=np.vstack((FrameperVid,img.flatten()))
VidperSub.append(FrameperVid)
SubperdB.append(VidperSub)
##### Setting up the LSTM model ########
data_dim=r*w # 2500
print(data_dim)
timesteps=10
# LSTM1 = LSTM(2500, return_sequences=True, input_shape=(timesteps, data_dim))
model=Sequential()
# model.add(TimeDistributed(Dense(data_dim), input_shape=(timesteps, data_dim)))
model.add(LSTM(2500, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(LSTM(500,return_sequences=False))
##model.add(LSTM(500,return_sequences=True))
##model.add(LSTM(50,return_sequences=False))
model.add(Dense(50,activation='sigmoid'))
model.add(Dense(5,activation='sigmoid'))
model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=[metrics.categorical_accuracy])
#### generate the label based on subjects #########
label=np.loadtxt(workplace+'Classification/'+ dB +'_label.txt')
labelperSub=[]
counter = 0
for sub in range(subjects):
numVid=VidPerSubject[sub]
labelperSub.append(label[counter:counter+numVid])
counter = counter + numVid
##print(np.shape(labelperSub[1]))
##print(labelperSub[1])
######## Seperating the input files into LOSO CV ########
tot_mat=np.zeros((n_exp,n_exp))
for sub in range(subjects):
Train_X=[]
Train_Y=[]
Test_X=SubperdB[sub]
Test_X=np.array(Test_X)
Test_Y=labelperSub[sub]
Test_Yy=np_utils.to_categorical(Test_Y,5)
print(Test_Y)
## print(np.shape(Test_Y))
if sub==0:
for i in range(1,subjects):
Train_X.append(SubperdB[i])
Train_Y.append(labelperSub[i])
elif sub==subjects-1:
for i in range(subjects-1):
Train_X.append(SubperdB[i])
Train_Y.append(labelperSub[i])
else:
for i in range(subjects):
if sub == i:
continue
else:
Train_X.append(SubperdB[i])
Train_Y.append(labelperSub[i])
# print(Train_X)
# Train_X=np.hstack(Train_X)
# print(Train_X.shape)
Train_X=np.vstack(Train_X) # changed to hstack from vstack
# print(Train_X.shape)
# Train_X = Train_X.shape[1:]
# print(Train_X.shape)
# Train_X = np.expand_dims(Train_X, axis=2)
# Train_X = np.reshape(Train_X, Train_X.shape + (1, 1,) )
# Train_X = np.reshape( Train_X, Train_X.shape )
# Train_X = np.reshape(2500, 16077)
print(Train_X.shape)
Train_Y=np.hstack(Train_Y)
Train_Y=np_utils.to_categorical(Train_Y,5)
print (np.shape(Train_Y))
print (np.shape(Train_X))
print (np.shape(Test_Y))
print (np.shape(Test_X))
model.fit(Train_X, Train_Y, validation_split=0.05, epochs=1, batch_size=20)
model.summary()
predict=model.predict_classes(Test_X)
## predict[predict>= 0.5] = 1
## predict[predict<0.5] = 0
print (predict)
print (Test_Y)
#compute the ConfusionMat
ct=confusion_matrix(Test_Y,predict)
#check the order of the CT
order=np.unique(np.concatenate((predict,Test_Y)))
#create an array to hold the CT for each CV
mat=np.zeros((n_exp,n_exp))
#put the order accordingly, in order to form the overall ConfusionMat
for m in range(len(order)):
for n in range(len(order)):
mat[int(order[m]),int(order[n])]=ct[m,n]
tot_mat=mat+tot_mat
# write each CT of each CV into .txt file
if not os.path.exists(workplace+'Classification/'+'Result/'+dB+'/'):
os.mkdir(workplace+'Classification/'+ 'Result/'+dB+'/')
with open(workplace+'Classification/'+ 'Result/'+dB+'/sub_CT.txt','a') as csvfile:
thewriter=csv.writer(csvfile, delimiter=' ')
thewriter.writerow('Sub ' + str(sub+1))
thewriter=csv.writer(csvfile,dialect=csv.excel_tab)
for row in ct:
thewriter.writerow(row)
thewriter.writerow(order)
thewriter.writerow('\n')
if sub==subjects-1:
# compute the accuracy, F1, P and R from the overall CT
microAcc=np.trace(tot_mat)/np.sum(tot_mat)
[f1,p,r]=fpr(tot_mat,n_exp)
# save into a .txt file
with open(workplace+'Classification/'+ 'Result/'+dB+'/final_CT.txt','w') as csvfile:
thewriter=csv.writer(csvfile,dialect=csv.excel_tab)
for row in tot_mat:
thewriter.writerow(row)
thewriter=csv.writer(csvfile, delimiter=' ')
thewriter.writerow('micro:' + str(microAcc))
thewriter.writerow('F1:' + str(f1))
thewriter.writerow('Precision:' + str(p))
thewriter.writerow('Recall:' + str(r))
| 2.0625 | 2 |
Subsets and Splits