ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4166dec2e53e898c9e7183794186be0f1008bc2 | default_app_config = 'src.apps.common.apps.CommonConfig'
|
py | b4166e51aaae3cda11ede98f5af48e8a13ee1648 | # -*- coding: utf-8 -*-
"""
Register apps
"""
from __future__ import unicode_literals
from django.apps import AppConfig
class PollsConfig(AppConfig):
"""
Configuration of polls app
"""
name = 'polls'
|
py | b4166e5d22448dabbd3a8efda2d3f3d813693d51 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import re
def extractTradingPartners(basePath):
if basePath == None:
directory = 'Data Sources/Trading Partners (1)/'
else:
#basePath = C:/Users/Krist/University College London/Digital Visualisation/Final Project/'
directory = basePath+'Data Sources/Trading Partners (1)/'
files = os.listdir(directory)
for i,file in enumerate(files):
# We are only interested in the csv's and not the source text nor a potential zip-folder containing raw data.
if '.csv' in file.lower():
if i == 0:
# Reading in the file using the encoding latin-1 instead of the default unicode
# because the indicator-specifications contains characters which is included in unicode.
# Part of on the reading is converting NaN values to zeros.
tradingPartners = pd.read_csv(directory+file,encoding = 'latin-1').fillna(value=0)
# Extracting the rows, which contains information on the top 5 trading partners.
indicatorOfInterest = [indicator for indicator in tradingPartners.Indicator.unique() if 'top 5' in indicator.lower()]
# Locating the relevant indices
indices = [i for i,indicator in enumerate(tradingPartners.Indicator) if indicator in indicatorOfInterest]
# Constructing the dataframe with the observations of interest
tradingPartners = tradingPartners.loc[indices]
tradingPartners = tradingPartners.reset_index(drop=True)
else:
# Reading in the file using the encoding latin-1 instead of the default unicode
# because the indicator-specifications contains characters which is included in unicode.
# Part of on the reading is converting NaN values to zeros.
temp = pd.read_csv(directory+file,encoding = 'latin-1').fillna(value=0)
# Locating the relevant indices
indices = [i for i,indicator in enumerate(temp.Indicator) if indicator in indicatorOfInterest]
# Constructing the dataframe with the observations of interest
temp = temp.loc[indices]
temp = temp.reset_index(drop=True)
# Concate the new dataframe with the existing
tradingPartners = pd.concat([tradingPartners,temp],axis = 0)
# Ensuring everything is lowercased, to ease the extraction later.
# Firstly the columns;
columns = []
for column in tradingPartners.columns:
#print(column)
try:
column = column.lower()
except:
None
columns.append(column)
tradingPartners.columns = columns
# Secondly the columns which can be expected to be categorical/strings
reporter,partner,products,indicatorType,indicator = [],[],[],[],[]
for report,part,prod,indiType,indi in zip(tradingPartners['reporter'],tradingPartners['partner'],tradingPartners['product categories'],
tradingPartners['indicator type'],tradingPartners['indicator']):
# Reporter
try:
report = report.lower()
except:
None
# Partner
try:
part = part.lower()
except:
None
# Product categories
try:
prod = prod.lower()
except:
None
# Indicator type
try:
indiType = indiType.lower()
except:
None
# Indicator
try:
indi = indi.lower()
except:
None
reporter.append(report)
partner.append(part)
products.append(prod)
indicatorType.append(indiType)
indicator.append(indi)
tradingPartners['reporter'] = reporter
tradingPartners['partner'] = partner
tradingPartners['product category'] = products
tradingPartners['indicator type'] = indicatorType
tradingPartners['indicator'] = indicator
# Writing the CSV.
tradingPartners.to_csv('TradingPartners.csv')
|
py | b4166e6e8467e778c68158c7485c2f367826facf | import time
from typing import Union, Any
from flask import flash
from flask.sessions import SessionMixin
from flask_login import login_user
from flask_dance.consumer import oauth_authorized
import spotipy as sp
from sqlalchemy.exc import NoResultFound
from app import db, spotify_bp
from app.oauth.models import User, OAuth
@oauth_authorized.connect_via(spotify_bp)
def spotify_logged_in(blueprint, token:str):
"""Spotify logged in behavior."""
if not token:
flash("Failed to log in with Spotify.", category="error")
return False
resp = blueprint.session.get("/me")
if not resp.ok:
msg = "Failed to fetch user info from Spotify."
flash(msg, category="error")
return False
spotify_info = resp.json()
spotify_user_id = str(spotify_info["id"])
# Find this OAuth token in the database, or create it
query = OAuth.query.filter_by(
provider=blueprint.name,
provider_user_id=spotify_user_id,
)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider=blueprint.name,
provider_user_id=spotify_user_id,
token=token,
)
if oauth.user:
# If this OAuth token already has an associated local account,
# log in that local user account.
# Note that if we just created this OAuth token, then it can't
# have an associated local account yet.
login_user(oauth.user)
flash("Successfully signed in with Spotify.")
else:
# If this OAuth token doesn't have an associated local account,
# create a new local user account for this user. We can log
# in that account as well, while we're at it.
user = User(
# Remember that `email` can be None, if the user declines
# to publish their email address on Spotify!
uid=spotify_info["id"],
name=spotify_info["display_name"],
email=spotify_info["email"],
)
# Associate the new local user account with the OAuth token
oauth.user = user
# Save and commit our database models
db.session.add_all([user, oauth])
db.session.commit()
# Log in the new local user account
login_user(user)
flash("Successfully signed in with Spotify.")
# Since we're manually creating the OAuth model in the database,
# we should return False so that Flask-Dance knows that
# it doesn't have to do it. If we don't return False, the OAuth token
# could be saved twice, or Flask-Dance could throw an error when
# trying to incorrectly save it for us.
return False
def get_token(session_: SessionMixin, scope:Union[str,list[str]]) -> tuple[dict[str, Any], bool]:
"""Check if token is currently valid and returns new token if there is no token or it is expired."""
token_valid = False
token_info = session_.get("token_info", {})
# Checking if the session already has a token stored
if not session_.get("token_info", False):
token_valid = False
return token_info, token_valid
# Checking if token has expired
now = int(time.time())
is_token_expired = session_.get("token_info").get("expires_at") - now < 60
# Refreshing token if it has expired
if is_token_expired:
# Don't reuse a SpotifyOAuth object because they store token info and you could leak user tokens if you reuse a SpotifyOAuth object
sp_oauth = sp.oauth2.SpotifyOAuth(scope=scope)
token_info = sp_oauth.refresh_access_token(
session_.get("token_info").get("refresh_token")
)
token_valid = True
return token_info, token_valid
|
py | b4166ee6b01572704e7a0468031fcfa24fed789c | class movie():
def __init__(self, name, year, duration):
self.name = name.title()
self.year = year
self.duration = duration
self.likes = 0
def insert_likes(self):
self.likes += 1
class series():
def __init__(self, name, year, seasons):
self.name = name.title()
self.year = year
self.seasons = seasons
self.likes = 0
def insert_likes(self):
self.likes += 1
objeto01 = movie("O homem aranha", 2022, 120)
objeto01.insert_likes()
objeto01.insert_likes()
print('Nome do filme: {}. Ano: {}. Tempo de duração em minutos: {} min.'.format(objeto01.name, objeto01.year,
objeto01.duration))
objeto02 = series("La casa del papel", 2018, 5)
objeto02.insert_likes()
print('Nome da série: {}. Ano de início: {}. Número de temporadas: {}.'.format(objeto02.name, objeto02.year,
objeto02.seasons))
|
py | b4167028f661c591ffde965be952bdb85c4f69c5 | import pickle
import unittest
import pytest
from softlearning.environments.utils import get_environment
from softlearning.samplers.remote_sampler import RemoteSampler
from softlearning.replay_pools.simple_replay_pool import SimpleReplayPool
from softlearning import policies
@pytest.mark.skip(reason="RemoteSampler is currently broken.")
class RemoteSamplerTest(unittest.TestCase):
def setUp(self):
self.env = get_environment('gym', 'Swimmer', 'v3', {})
self.policy = policies.ContinuousUniformPolicy(
action_range=(
self.env.action_space.low,
self.env.action_space.high,
),
input_shapes=self.env.observation_shape,
output_shape=self.env.action_shape,
observation_keys=self.env.observation_keys)
self.pool = SimpleReplayPool(max_size=100, environment=self.env)
self.remote_sampler = RemoteSampler(max_path_length=10)
def test_initialization(self):
self.assertEqual(self.pool.size, 0)
self.remote_sampler.initialize(self.env, self.policy, self.pool)
self.remote_sampler.sample(timeout=10)
self.assertEqual(self.pool.size, 10)
def test_serialize_deserialize(self):
self.assertEqual(self.pool.size, 0)
self.remote_sampler.initialize(self.env, self.policy, self.pool)
self.remote_sampler.sample()
deserialized = pickle.loads(pickle.dumps(self.remote_sampler))
deserialized.initialize(self.env, self.policy, self.pool)
self.assertEqual(self.pool.size, 10)
self.remote_sampler.sample(timeout=10)
self.assertEqual(self.pool.size, 20)
deserialized = pickle.loads(pickle.dumps(self.remote_sampler))
deserialized.initialize(self.env, self.policy, self.pool)
self.assertTrue(isinstance(
deserialized.environment, type(self.remote_sampler.environment)))
self.assertEqual(
self.remote_sampler._n_episodes, deserialized._n_episodes)
self.assertEqual(
self.remote_sampler._max_path_return,
deserialized._max_path_return)
self.assertEqual(
self.remote_sampler._last_path_return,
deserialized._last_path_return)
self.assertEqual(
len(self.remote_sampler._last_n_paths),
len(deserialized._last_n_paths))
self.remote_sampler.sample(timeout=10)
deserialized.sample(timeout=10)
self.assertEqual(
self.remote_sampler._n_episodes, deserialized._n_episodes)
self.assertNotEqual(
self.remote_sampler._last_path_return,
deserialized._last_path_return)
self.assertEqual(
len(self.remote_sampler._last_n_paths),
len(deserialized._last_n_paths))
if __name__ == '__main__':
unittest.main()
|
py | b41670fc003b0fdb63a20f1cfda7f586181ca9c3 | import numpy as np
import torch
def track(tracker,orig_img,inps,boxes,hm,cropped_boxes,im_name,scores):
hm = hm.cpu().data.numpy()
online_targets = tracker.update(orig_img,inps,boxes,hm,cropped_boxes,im_name,scores,_debug=False)
new_boxes,new_scores,new_ids,new_hm,new_crop = [],[],[],[],[]
for t in online_targets:
tlbr = t.tlbr
tid = t.track_id
thm = t.pose
tcrop = t.crop_box
tscore = t.detscore
new_boxes.append(tlbr)
new_crop.append(tcrop)
new_hm.append(thm)
new_ids.append(tid)
new_scores.append(tscore)
new_hm = torch.Tensor(new_hm).cuda()
return new_boxes,new_scores,new_ids,new_hm,new_crop
|
py | b4167108477b527beb4740d3cd705d6ac9423b6b | import requests
from rich.console import Console
from rich.table import Table
import random
import socket
import nmap
class Module:
def __init__(self, logger):
self.logger = logger
self.name = "servicededection"
self.description = "Service dedection."
self.author = "Onur Atakan ULUSOY"
self.runauto = True
self.options = {
"target": {"value": None, "required": True},
}
def info(self):
console = Console()
table = Table()
table.add_column("Name")
table.add_column("Description")
table.add_column("Author")
table.add_row(self.name, self.description, self.author)
console.print(table)
def run(self):
for key in self.options:
if (
self.options[key]["value"] is None
and self.options[key]["required"] is True
):
self.logger.error(f"Required key {str(key)} is not set for {self.name}")
return
target = self.options["target"]["value"]
console = Console()
table = Table()
table.add_column("SERVICE")
table.add_column("VERSION")
table.add_column("HOST")
table.add_column("PROTOCOL")
table.add_column("PORT")
table.add_column("STATE")
try:
nm = nmap.PortScanner()
nm_scan = nm.scan(target, arguments="-sV")
open_service = False
for host in nm.all_hosts():
for proto in nm[host].all_protocols():
lport = list(nm[host][proto].keys())
lport.sort()
for port in lport:
if not str(nm[host][proto][port]["product"]) == "":
open_service = True
version = str(nm[host][proto][port]["product"]) + " "
version += str(nm[host][proto][port]["version"]) + " "
version += str(nm[host][proto][port]["extrainfo"]) + " "
table.add_row(str(nm[host][proto][port]["name"]), version, str(host), str(proto), str(port), str(nm[host][proto][port]['state']))
if open_service:
print(f"\033[32m[+]\033[0m Founded some service on {target}")
console.print(table)
else:
print(f"[-] Any service founded on {target}")
except Exception as e:
self.logger.exception(e)
print(f"[-] Any service founded on {target}") |
py | b41672205458b747bf1080e5d8a31e152d988fb8 | import logging, yaml, importlib
from PyQt5 import QtCore
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class FloatArea(QMdiArea):
global label
_modules = []
_widgets = []
global cfg
def __init__(self, parent=None):
super(FloatArea, self).__init__(parent)
with open("config.yml", 'r') as ymlfile:
self.cfg = yaml.load(ymlfile)
self.maximumSize()
self.setupBackground()
self.initWidgets()
def setupBackground(self):
label = QLabel("")
if 'background' in self.cfg['general']:
if self.cfg['general']['background'][0] == '#':
label.setStyleSheet("QLabel"
"{ background-color: " +
self.cfg['general']['background'] + "; "
"border:1px solid rgb(0,0,0); }")
else:
label.setStyleSheet("QLabel"
"{ border-image: url("+
self.cfg['general']['background']+"); "
"border:1px solid rgb(0,0,0); }")
self._blub = self.addSubWindow(label, Qt.FramelessWindowHint | Qt.WindowDoesNotAcceptFocus)
self._blub.setFocusPolicy(Qt.NoFocus)
def resizeEvent(self, QResizeEvent):
logging.info('Area.resizeEvent central widget size w ' + str(self.width()) + " h " + str(self.width()))
self._blub.resize(self.width(),self.height())
def importWidget(self, index, key):
logging.info(self.cfg['widgets'][key]['class'])
#Import modules
self._modules.append(importlib.import_module('widgets.'+key))
logging.info("module " + str(self._modules[index]))
#Create and store instance
self._widgets.append(getattr(self._modules[index], self.cfg['widgets'][key]['class'])(self.cfg['widgets'][key]))
#Add widget to main window
logging.info("widgets " + str(self._widgets[index]))
_subwindow = self.addSubWindow(self._widgets[index])
#Set subwindow containing widget transparent
_subwindow.setWindowFlags(QtCore.Qt.FramelessWindowHint)
_subwindow.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self._widgets[index].configure()
def initWidgets(self):
for index, key in enumerate(self.cfg['widgets']):
logging.info("index " + str(index) + ' key ' + key)
self.importWidget(index, key)
|
py | b41672be8907f4b67ebea67ad2cf29ecd1e71a6f | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGgplotify(RPackage):
"""ggplotify: Convert Plot to 'grob' or 'ggplot' Object"""
homepage = "https://github.com/GuangchuangYu/ggplotify"
url = "https://cloud.r-project.org/src/contrib/ggplotify_0.0.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ggplotify"
version('0.0.3', sha256='7e7953a2933aa7127a0bac54375e3e0219a0744cfc3249c3d7b76065f7a51892')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-gridgraphics', type=('build', 'run'))
depends_on('r-rvcheck', type=('build', 'run'))
|
py | b416762d460941550504e38002fe2bb04f788295 | from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test import TestCase
from oidc_provider.lib.utils.token import (
create_id_token,
encode_id_token,
)
from oidc_provider import settings
from oidc_provider.tests.app.utils import (
create_fake_client,
create_fake_user,
)
import mock
class EndSessionTestCase(TestCase):
"""
See: http://openid.net/specs/openid-connect-session-1_0.html#RPLogout
"""
def setUp(self):
call_command('creatersakey')
self.user = create_fake_user()
self.oidc_client = create_fake_client('id_token')
self.LOGOUT_URL = 'http://example.com/logged-out/'
self.oidc_client.post_logout_redirect_uris = [self.LOGOUT_URL]
self.oidc_client.save()
self.url = reverse('oidc_provider:end-session')
def test_redirects(self):
query_params = {
'post_logout_redirect_uri': self.LOGOUT_URL,
}
response = self.client.get(self.url, query_params)
# With no id_token the OP MUST NOT redirect to the requested redirect_uri.
self.assertRedirects(response, settings.get('OIDC_LOGIN_URL'), fetch_redirect_response=False)
id_token_dic = create_id_token(user=self.user, aud=self.oidc_client.client_id)
id_token = encode_id_token(id_token_dic, self.oidc_client)
query_params['id_token_hint'] = id_token
response = self.client.get(self.url, query_params)
self.assertRedirects(response, self.LOGOUT_URL, fetch_redirect_response=False)
@mock.patch(settings.get('OIDC_AFTER_END_SESSION_HOOK'))
def test_call_post_end_session_hook(self, hook_function):
self.client.get(self.url)
self.assertTrue(hook_function.called, 'OIDC_AFTER_END_SESSION_HOOK should be called')
self.assertTrue(hook_function.call_count == 1, 'OIDC_AFTER_END_SESSION_HOOK should be called once but was {}'.format(hook_function.call_count))
|
py | b41676de07092a81e7b608dfcbd9704cd2f215fa | #!/usr/bin/env python
#encoding: utf8
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq', 10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: lightsensors_freq")
sys.exit(1)
return f
if __name__ == '__main__':
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
with open(devfile, 'r') as f:
data = f.readline().split()
data = [ int(e) for e in data ]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward = data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except IOError:
rospy.logerr("cannot write to " + devfile)
f = get_freq()
if f != freq:
freq = f
rate = rospy.Rate(freq)
rate.sleep()
|
py | b41678b542527aabeb47de9f9711d18bce36a3c9 | #!/usr/bin/env python3
import os, sys
import numpy as np
from operator import itemgetter
from pprint import pprint
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
from utils.timeit import timeit
def print_field(f):
X,Y = f.shape
for y in range(Y):
for x in range(X):
print(f[x,y] if f[x,y] > 0 else '.', end='')
print('')
@timeit
def parse_lines(lines):
return [tuple(map(lambda x: int(x), line.split(','))) for line in lines]
history = None
@timeit
def make_field(points):
global history
min_x = min(min(points)[0], 0)
max_x = max(points)[0]
min_y = min(min(points, key=itemgetter(1))[1], 0)
max_y = max(points, key=itemgetter(1))[1]
field = np.zeros((max_x + 1, max_y + 1), dtype=int)
history = np.zeros((max_x + 1, max_y + 1), dtype=bool)
for i in range(len(points)):
point = points[i]
field[point[0],point[1]] = i+1
return field
@timeit
def infect_full(field, points):
global history
def infect_single_step(field):
def check_cell(field, x, y, p):
global history
if field[x,y] == p + 1 or field[x,y] == -1: # No touch!
return
elif field[x,y] == 0: # claim it!
field[x,y] = p + 1
elif field[x,y] > 0 and not history[x,y]: # clash!
field[x,y] = -1
max_point = np.max(field) # Number of viruses
X,Y = field.shape
for p in range(max_point):
claimed = np.where(field == p+1)
for s in range(len(claimed[0])):
spot = claimed[0][s], claimed[1][s]
if spot[0] > 0: # Check West
check_cell(field, spot[0]-1, spot[1], p)
if spot[0] < X-1: # Check East
check_cell(field, spot[0]+1, spot[1], p)
if spot[1] > 0: # Check North
check_cell(field, spot[0], spot[1]-1, p)
if spot[1] < Y-1: # Check South
check_cell(field, spot[0], spot[1]+1, p)
while len(np.where(field == 0)[0]) != 0:
infect_single_step(field)
history[field != 0] = True
# print_field(field)
# print()
return field
@timeit
def day6_part6(input):
points = parse_lines(input)
field = make_field(points)
infect_full(field, points)
print_field(field)
return None
if __name__ == '__main__':
# Argument parsing
if len(sys.argv) <= 1:
sys.exit('No entries given. Answer: 0')
if len(sys.argv) > 2:
sys.exit('Too many arguments.')
# Load file
print('Loading file: {}'.format(sys.argv[1]))
with open(sys.argv[1], 'r') as f:
lines = f.readlines()
print(f'Answer: {day6_part6(lines)}') |
py | b416790577edbe6194ea9ddd4a8fede3327ca9e0 | import logging
import unicodedata
from datetime import datetime, timedelta
from discord import Message
from sqlalchemy import desc
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import Session
from sqlalchemy_utils import ScalarListException
from cogs.commands.admin import MiniKarmaMode
from config import CONFIG
from karma.parser import parse_message_content
from karma.transaction import (
KarmaTransaction,
apply_blacklist,
filter_transactions,
make_transactions,
)
from models import Karma, KarmaChange, MiniKarmaChannel
from utils import filter_out_none, get_database_user, get_name_string
def is_in_cooldown(last_change, timeout):
timeout_time = datetime.utcnow() - timedelta(seconds=timeout)
return last_change.created_at > timeout_time
def process_karma(message: Message, message_id: int, db_session: Session, timeout: int):
reply = ""
# Parse the message for karma modifications
karma_items = parse_message_content(message.content)
transactions = make_transactions(karma_items, message)
transactions = filter_transactions(transactions)
transactions = apply_blacklist(transactions, db_session)
# If no karma'd items, just return
if not transactions:
return reply
# TODO: Protect from byte-limit length chars
# Get karma-ing user
user = get_database_user(message.author)
# Get whether the channel is on mini karma or not
channel = (
db_session.query(MiniKarmaChannel)
.filter(MiniKarmaChannel.channel == message.channel.id)
.one_or_none()
)
if channel is None:
karma_mode = MiniKarmaMode.Normal
else:
karma_mode = MiniKarmaMode.Mini
def own_karma_error(topic):
if karma_mode == MiniKarmaMode.Normal:
return f' • Could not change "{topic}" because you cannot change your own karma! :angry:'
else:
return f'could not change "**{topic}**" (own name)'
def internal_error(topic):
if karma_mode == MiniKarmaMode.Normal:
return f' • Could not create "{topic}" due to an internal error.'
else:
return f'could not change "**{topic}**" (internal error)'
def cooldown_error(topic, td):
# Tell the user that the item is on cooldown
if td.seconds < 60:
seconds_plural = f"second{'s' if td.seconds != 1 else ''}"
duration = f"{td.seconds} {seconds_plural}"
else:
mins = td.seconds // 60
mins_plural = f"minute{'s' if mins != 1 else ''}"
duration = f"{mins} {mins_plural}"
if karma_mode == MiniKarmaMode.Normal:
return f' • Could not change "{topic}" since it is still on cooldown (last altered {duration} ago).\n'
else:
return (
f'could not change "**{topic}**" (cooldown, last edit {duration} ago)'
)
def success_item(tr: KarmaTransaction):
# Give some sass if someone is trying to downvote the bot
if (
tr.karma_item.topic.casefold() == "apollo"
and tr.karma_item.operation.value < 0
):
apollo_response = ":wink:"
else:
apollo_response = ""
op = str(tr.karma_item.operation)
# Build the karma item string
if tr.karma_item.reason:
if karma_mode == MiniKarmaMode.Normal:
if tr.self_karma:
return f" • **{truncated_name}** (new score is {karma_change.score}) and your reason has been recorded. *Fool!* that's less karma to you. :smiling_imp:"
else:
return f" • **{truncated_name}** (new score is {karma_change.score}) and your reason has been recorded. {apollo_response}"
else:
return f"**{truncated_name}**{op} (now {karma_change.score}, reason recorded)"
else:
if karma_mode == MiniKarmaMode.Normal:
if tr.self_karma:
return f" • **{truncated_name}** (new score is {karma_change.score}). *Fool!* that's less karma to you. :smiling_imp:"
else:
return f" • **{truncated_name}** (new score is {karma_change.score}). {apollo_response}"
else:
return f"**{truncated_name}**{op} (now {karma_change.score})"
# Start preparing the reply string
if len(transactions) > 1:
transaction_plural = "s"
else:
transaction_plural = ""
items = []
errors = []
# Iterate over the transactions to write them to the database
for transaction in transactions:
# Truncate the topic safely so we 2000 char karmas can be used
truncated_name = (
(transaction.karma_item.topic[300:] + ".. (truncated to 300 chars)")
if len(transaction.karma_item.topic) > 300
else transaction.karma_item.topic
)
# Catch any self-karma transactions early
if transaction.self_karma and transaction.karma_item.operation.value > -1:
errors.append(own_karma_error(truncated_name))
continue
def topic_transformations():
def query(t):
return db_session.query(Karma).filter(Karma.name.ilike(t)).one_or_none()
topic = transaction.karma_item.topic.casefold()
yield query(topic)
yield query(topic.replace(" ", "_"))
yield query(topic.replace("_", " "))
topic = unicodedata.normalize(CONFIG.UNICODE_NORMALISATION_FORM, topic)
yield query(topic)
yield query(topic.replace(" ", "_"))
yield query(topic.replace("_", " "))
topic = "".join(c for c in topic if not unicodedata.combining(c))
yield query(topic)
yield query(topic.replace(" ", "_"))
yield query(topic.replace("_", " "))
# Get the karma item from the database if it exists
karma_item = next(filter_out_none(topic_transformations()), None)
# Update or create the karma item
if not karma_item:
karma_item = Karma(name=transaction.karma_item.topic)
db_session.add(karma_item)
try:
db_session.commit()
except (ScalarListException, SQLAlchemyError) as e:
db_session.rollback()
logging.exception(e)
errors.append(internal_error(truncated_name))
continue
# Get the last change (or none if there was none)
last_change = (
db_session.query(KarmaChange)
.filter(KarmaChange.karma_id == karma_item.id)
.order_by(desc(KarmaChange.created_at))
.first()
)
if not last_change:
# If the bot is being downvoted then the karma can only go up
if transaction.karma_item.topic.casefold() == "apollo":
new_score = abs(transaction.karma_item.operation.value)
else:
new_score = transaction.karma_item.operation.value
karma_change = KarmaChange(
karma_id=karma_item.id,
user_id=user.id,
message_id=message_id,
reason=transaction.karma_item.reason,
change=new_score,
score=new_score,
created_at=datetime.utcnow(),
)
db_session.add(karma_change)
try:
db_session.commit()
except (ScalarListException, SQLAlchemyError) as e:
db_session.rollback()
logging.exception(e)
errors.append(internal_error(truncated_name))
continue
else:
time_delta = datetime.utcnow() - last_change.created_at
if is_in_cooldown(last_change, timeout):
errors.append(cooldown_error(truncated_name, time_delta))
continue
# If the bot is being downvoted then the karma can only go up
if transaction.karma_item.topic.casefold() == "apollo":
new_score = last_change.score + abs(
transaction.karma_item.operation.value
)
else:
new_score = last_change.score + transaction.karma_item.operation.value
karma_change = KarmaChange(
karma_id=karma_item.id,
user_id=user.id,
message_id=message_id,
reason=transaction.karma_item.reason,
score=new_score,
change=(new_score - last_change.score),
created_at=datetime.utcnow(),
)
db_session.add(karma_change)
try:
db_session.commit()
except (ScalarListException, SQLAlchemyError) as e:
db_session.rollback()
logging.exception(e)
errors.append(internal_error(truncated_name))
karma_change = KarmaChange(
karma_id=karma_item.id,
user_id=user.id,
message_id=message_id,
reason=transaction.karma_item.reason,
score=new_score,
change=(new_score - last_change.score),
created_at=datetime.utcnow(),
)
db_session.add(karma_change)
try:
db_session.commit()
except (ScalarListException, SQLAlchemyError) as e:
db_session.rollback()
logging.exception(e)
errors.append(internal_error(truncated_name))
continue
# Update karma counts
if transaction.karma_item.operation.value == 0:
karma_item.neutrals = karma_item.neutrals + 1
elif transaction.karma_item.operation.value == 1:
karma_item.pluses = karma_item.pluses + 1
elif transaction.karma_item.operation.value == -1:
# Make sure the changed operation is updated
if transaction.karma_item.topic.casefold() == "apollo":
karma_item.pluses = karma_item.pluses + 1
else:
karma_item.minuses = karma_item.minuses + 1
items.append(success_item(transaction))
# Get the name, either from discord or irc
author_display = get_name_string(message)
# Construct the reply string in totality
# If you have error(s) and no items processed successfully
if karma_mode == MiniKarmaMode.Normal:
item_str = "\n".join(items)
error_str = "\n".join(errors)
if not item_str and error_str:
reply = f"Sorry {author_display}, I couldn't karma the requested item{transaction_plural} because of the following problem{transaction_plural}:\n\n{error_str}"
# If you have items processed successfully but some errors too
elif item_str and error_str:
reply = f"Thanks {author_display}, I have made changes to the following item(s) karma:\n\n{item_str}\n\nThere were some issues with the following item(s), too:\n\n{error_str}"
# If all items were processed successfully
else:
reply = f"Thanks {author_display}, I have made changes to the following karma item{transaction_plural}:\n\n{item_str}"
else:
item_str = " ".join(items)
error_str = " ".join(errors)
reply = " ".join(filter(None, ["Changes:", item_str, error_str]))
# Commit any changes (in case of any DB inconsistencies)
try:
db_session.commit()
except (ScalarListException, SQLAlchemyError) as e:
logging.exception(e)
db_session.rollback()
return reply.rstrip()
|
py | b4167916d9d481971d5adced5a6089dd43180034 | # Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
from ..remote_annotation_db import RemoteAnnotationDB
class TestRemoteAnnotationDB(unittest.TestCase):
""" Test the in-memory annotation database connected to a remote database.
"""
@classmethod
def setUpClass(cls):
cls.db = RemoteAnnotationDB.from_library_config()
def test_load_package(self):
""" Test loading annotations for a single package.
"""
query = { "package": "pandas" }
docs = list(self.db.filter(query))
self.assertEqual(docs, [])
self.assertTrue(self.db.load_package("pandas"))
docs = list(self.db.filter(query))
self.assertGreater(len(docs), 0)
ids = [ doc['id'] for doc in docs ]
self.assertTrue('series' in ids)
self.assertTrue('data-frame' in ids)
# Don't load twice!
self.assertFalse(self.db.load_package("pandas"))
def test_load_unannotated_package(self):
""" Test that no requests are made for unannotated packages.
"""
self.assertFalse(self.db.load_package("XXX"))
|
py | b4167a9d5981ff3c0622d2416914f13681def8d3 | # -*- coding: utf-8 -*-
# Copyright 2012-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <[email protected]>, 2012-2017
# - Vincent Garonne <[email protected]>, 2012-2015
# - Angelos Molfetas <[email protected]>, 2012
# - Martin Barisits <[email protected]>, 2017
# - Hannes Hansen <[email protected]>, 2019
# - Andrew Lister <[email protected]>, 2019
# - Patrick Austin <[email protected]>, 2020
# - Benedikt Ziemons <[email protected]>, 2020
"""
Test the Identity abstraction layer
"""
import unittest
from rucio.common.config import config_get, config_get_bool
from rucio.common.types import InternalAccount
from rucio.common.utils import generate_uuid as uuid
from rucio.core.account import add_account, del_account
from rucio.core.identity import add_identity, del_identity, add_account_identity, del_account_identity, list_identities
from rucio.db.sqla.constants import AccountType, IdentityType
from rucio.tests.common import account_name_generator, headers, hdrdict, auth
class TestIdentity(unittest.TestCase):
"""
Test the Identity abstraction layer
"""
def setUp(self):
""" Setup the Test Case """
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
self.account = InternalAccount(account_name_generator(), **self.vo)
add_account(self.account, AccountType.USER, '[email protected]')
def tearDown(self):
""" Tear down the Test Case """
del_account(self.account)
def test_userpass(self):
""" IDENTITY (CORE): Test adding and removing username/password authentication """
add_identity(self.account.external, IdentityType.USERPASS, email='[email protected]', password='secret')
add_account_identity('ddmlab_%s' % self.account, IdentityType.USERPASS, self.account, email='[email protected]', password='secret')
add_identity('/ch/cern/rucio/ddmlab_%s' % self.account, IdentityType.X509, email='[email protected]')
add_account_identity('/ch/cern/rucio/ddmlab_%s' % self.account, IdentityType.X509, self.account, email='[email protected]')
add_identity('ddmlab_%s' % self.account, IdentityType.GSS, email='[email protected]')
add_account_identity('ddmlab_%s' % self.account, IdentityType.GSS, self.account, email='[email protected]')
list_identities()
del_account_identity('ddmlab_%s' % self.account, IdentityType.USERPASS, self.account)
del_account_identity('/ch/cern/rucio/ddmlab_%s' % self.account, IdentityType.X509, self.account)
del_account_identity('ddmlab_%s' % self.account, IdentityType.GSS, self.account)
del_identity('ddmlab_%s' % self.account, IdentityType.USERPASS)
def test_ssh(self):
""" IDENTITY (CORE): Test adding and removing SSH public key authentication """
add_identity(self.account.external, IdentityType.SSH, email='[email protected]')
add_account_identity('my_public_key', IdentityType.SSH, self.account, email='[email protected]')
list_identities()
del_account_identity('my_public_key', IdentityType.SSH, self.account)
del_identity(self.account.external, IdentityType.SSH)
def test_userpass(rest_client, auth_token):
""" ACCOUNT (REST): send a POST to add an identity to an account."""
username = uuid()
# normal addition
headers_dict = {'X-Rucio-Username': username, 'X-Rucio-Password': 'secret', 'X-Rucio-Email': 'email'}
response = rest_client.put('/identities/root/userpass', headers=headers(auth(auth_token), hdrdict(headers_dict)))
assert response.status_code == 201
|
py | b4167ab0241632db245f27f5c8d207898cd03570 | #=========================================================================
# BehavioralRTLIRTypeCheckL3Pass.py
#=========================================================================
# Author : Peitian Pan
# Date : March 30, 2019
"""Provide L3 behavioral RTLIR type check pass."""
from pymtl3.passes.rtlir.errors import PyMTLTypeError
from pymtl3.passes.rtlir.rtype import RTLIRDataType as rdt
from pymtl3.passes.rtlir.rtype import RTLIRType as rt
from .BehavioralRTLIRTypeCheckL2Pass import (
BehavioralRTLIRTypeCheckL2Pass,
BehavioralRTLIRTypeCheckVisitorL2,
BehavioralRTLIRTypeEnforcerL2,
)
class BehavioralRTLIRTypeCheckL3Pass( BehavioralRTLIRTypeCheckL2Pass ):
def get_visitor_class( s ):
return BehavioralRTLIRTypeCheckVisitorL3
class BehavioralRTLIRTypeCheckVisitorL3( BehavioralRTLIRTypeCheckVisitorL2 ):
def __init__( s, component, freevars, accessed, tmpvars, rtlir_getter ):
super().__init__( component, freevars, accessed, tmpvars, rtlir_getter )
s.type_expect[ 'Attribute' ] = (
( 'value', (rt.Component, rt.Signal),
'the base of an attribute must be one of: component, signal!' ),
)
def get_enforce_visitor( s ):
return BehavioralRTLIRTypeEnforcerL3
def _visit_Assign_single_target( s, node, target, i ):
try:
rhs_type = node.value.Type.get_dtype()
lhs_type = target.Type.get_dtype()
except AttributeError:
rhs_type = None
lhs_type = None
l_is_struct = isinstance( lhs_type, rdt.Struct )
r_is_struct = isinstance( rhs_type, rdt.Struct )
# At L3 we check if either LHS or RHS is of BitStruct type.
if l_is_struct or r_is_struct:
if l_is_struct and r_is_struct:
# Both sides are of struct type. Type check only if both sides
# are of the _same_ struct type.
if lhs_type.get_name() != rhs_type.get_name():
raise PyMTLTypeError( s.blk, node.ast,
f'LHS and RHS of assignment should have the same type (LHS target#{i+1} of {lhs_type} vs {rhs_type})!' )
else:
# There should be one side of struct type and the other of vector type.
struct_type, vector_type = lhs_type, rhs_type
if r_is_struct:
struct_type, vector_type = rhs_type, lhs_type
if not isinstance( vector_type, rdt.Vector ):
raise PyMTLTypeError( s.blk, node.ast,
f'LHS and RHS of assignment should have agreeable types (LHS target#{i+1} of {lhs_type} vs {rhs_type})!' )
# Type check only if both sides have the same bitwidth.
is_rhs_reinterpretable = not node.value._is_explicit
struct_nbits, vector_nbits = struct_type.get_length(), vector_type.get_length()
# If RHS is an int literal try to enforce the correct bitwidth.
if not r_is_struct and is_rhs_reinterpretable and struct_nbits != vector_nbits:
s.enforcer.enter( s.blk, rt.NetWire(rdt.Vector(struct_nbits)), node.value )
if l_is_struct:
vector_nbits = node.value.Type.get_dtype().get_length()
if struct_nbits != vector_nbits:
if l_is_struct:
lnbits, rnbits = struct_nbits, vector_nbits
else:
lnbits, rnbits = vector_nbits, struct_nbits
raise PyMTLTypeError( s.blk, node.ast,
f'LHS and RHS of assignment should have the same bitwidth (LHS target#{i+1} of {lhs_type} ({lnbits} bits) vs {rhs_type} ({rnbits} bits))!' )
else:
super()._visit_Assign_single_target( node, target, i )
def visit_Attribute( s, node ):
if isinstance( node.value.Type, rt.Signal ):
dtype = node.value.Type.get_dtype()
if not isinstance( dtype, rdt.Struct ):
raise PyMTLTypeError( s.blk, node.ast,
'attribute base should be a struct signal!'
)
if not dtype.has_property( node.attr ):
raise PyMTLTypeError( s.blk, node.ast,
f'{dtype.get_name()} does not have field {node.attr}!' )
dtype = dtype.get_property( node.attr )
if isinstance( node.value.Type, rt.Port ):
rtype = rt.Port( node.value.Type.get_direction(), dtype )
elif isinstance( node.value.Type, rt.Wire ):
rtype = rt.Wire( dtype )
elif isinstance( node.value.Type, rt.Const ):
obj = node.value.Type.get_object()
if obj is None:
rtype = rt.Const( dtype )
else:
try:
rtype = rt.Const( dtype, getattr( obj, node.attr ) )
except AttributeError:
rtype = rt.Const( dtype )
else:
raise PyMTLTypeError( s.blk, node.ast,
f'unrecognized signal type {node.value.Type}!' )
node.Type = rtype
dtype = node.Type.get_dtype()
# Only allow to be re-interpreted if this is a constant vector attribute
if isinstance( node.Type, rt.Const ) and isinstance( dtype, rdt.Vector ):
node._is_explicit = dtype.is_explicit()
else:
node._is_explicit = True
else:
super().visit_Attribute( node )
def visit_StructInst( s, node ):
cls = node.struct
dtype = rdt.get_rtlir_dtype( cls() )
all_properties = dtype.get_all_properties()
if len( all_properties ) != len( node.values ):
raise PyMTLTypeError( s.blk, node.ast,
f"BitStruct {cls.__name__} has {len(all_properties)} fields but only {len(node.values)} arguments are given!" )
all_types = zip( node.values, list(all_properties.items()) )
for idx, ( value, ( name, field ) ) in enumerate( all_types ):
s.visit( value )
# Expect each argument to be a signal
if not isinstance( value.Type, rt.Signal ):
raise PyMTLTypeError( s.blk, node.ast,
f"argument #{idx+1} has type {value.Type} but not a signal!" )
v_dtype = value.Type.get_dtype()
is_field_reinterpretable = not value._is_explicit
# Expect each argument to have data type which corresponds to the field
if v_dtype != field:
if is_field_reinterpretable:
target_nbits = field.get_length()
s.enforcer.enter( s.blk, rt.NetWire(rdt.Vector(target_nbits)), value )
else:
raise PyMTLTypeError( s.blk, node.ast,
f"Expected argument#{idx+1} ( field {name} ) to have type {field}, but got {v_dtype}." )
node.Type = rt.Const( dtype )
node._is_explicit = True
#-------------------------------------------------------------------------
# Enforce types for all terms whose types are inferred (implicit)
#-------------------------------------------------------------------------
class BehavioralRTLIRTypeEnforcerL3( BehavioralRTLIRTypeEnforcerL2 ):
pass
|
py | b4167b47fb7f7fc9dd92df040f2a471ddde6dc12 | import contextlib
import types
from datetime import date, datetime, time
from functools import wraps
import dateutil.parser
from six import string_types, text_type
from slumber.exceptions import SlumberBaseException
from closeio.exceptions import CloseIOError, RateLimitError
@contextlib.contextmanager
def convert_errors():
try:
yield
except CloseIOError:
raise
except SlumberBaseException as e:
if hasattr(e, 'response'):
try:
error_info = e.response.json()
if e.response.status_code == 429:
raise RateLimitError(**error_info['error'])
error_message = error_info['error']
except (ValueError, KeyError):
error_message = e.response.text
request = e.response.request
request_data = 'url: {}\nbody: {}'.format(
request.url,
request.body,
)
else:
error_message = text_type(e)
request_data = ''
raise CloseIOError(error_message, e, request_data)
except Exception as e:
raise CloseIOError(text_type(e), e, '')
def handle_errors(func):
@wraps(func)
def wrapped(*args, **kwargs):
with convert_errors():
return func(*args, **kwargs)
return wrapped
class Item(dict):
def __init__(self, *args, **kwargs):
super(Item, self).__init__(*args, **kwargs)
self.__dict__ = self
def parse(value): # NoQA
try:
return Item({
key: parse(value)
for key, value in value.items()
})
except AttributeError:
pass
if isinstance(value, types.GeneratorType):
return (
parse(item)
for item in value
)
if not isinstance(value, string_types):
try:
return [
parse(item)
for item in value
]
except TypeError:
pass
try:
parsed = dateutil.parser.parse(value)
if parsed.isoformat() == value:
return parsed
if parsed.date().isoformat() == value:
return parsed.date()
if parsed.time().isoformat() == value:
return parsed.time()
except (TypeError, AttributeError, ValueError, OverflowError):
pass
return value
def convert(value):
try:
return {
key: convert(value)
for key, value in value.items()
}
except AttributeError:
pass
if not isinstance(value, string_types):
try:
return [
convert(item)
for item in value
]
except TypeError:
pass
if isinstance(value, (datetime, date, time)):
return value.isoformat()
return value
def parse_response(func):
@wraps(func)
def wrapped(*args, **kwargs):
return parse(func(*args, **kwargs))
return wrapped
def paginate(func, *args, **kwargs):
skip = 0
limit = 100
while True:
kwargs['_skip'] = skip
kwargs['_limit'] = limit
with convert_errors():
response = func(*args, **kwargs)
if not isinstance(response, dict):
raise CloseIOError(
'close.io response is not a dict, '
'so most likely could not be parsed. \n'
'body "{}"'.format(response)
)
for item in response['data']:
yield item
if not response['has_more']:
break
else:
skip += limit
def paginate_via_cursor(func, *args, **kwargs):
cursor = ''
limit = 50
while True:
kwargs['_cursor'] = cursor
kwargs['_limit'] = limit
with convert_errors():
response = func(*args, **kwargs)
if not isinstance(response, dict):
raise CloseIOError(
'close.io response is not a dict, '
'so most likely could not be parsed. \n'
'body "{}"'.format(response)
)
for item in response['data']:
yield item
cursor = response['cursor_next']
if not cursor:
break
class DummyCookieJar(object):
def __init__(self, policy=None):
pass
def set_policy(self, policy):
pass
def add_cookie_header(self, request):
pass
def make_cookies(self, response, request):
pass
def set_cookie_if_ok(self, cookie, request):
pass
def set_cookie(self, cookie):
pass
def extract_cookies(self, response, request):
pass
def clear(self, domain=None, path=None, name=None):
pass
def clear_session_cookies(self):
pass
def clear_expired_cookies(self):
pass
def __iter__(self):
if False:
yield None
def __len__(self):
return 0
def __repr__(self):
return "<%s[%s]>" % (self.__class__, "")
def __str__(self):
return "<%s[%s]>" % (self.__class__, "")
|
py | b4167b92eab95c30041d172203366a88a5fae119 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php
from test_framework.mininode import *
from test_framework.test_framework import DogecoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
class MaxUploadTest(DogecoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=800", "-blockmaxsize=999000"]))
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in range(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print("Peer 1 able to repeatedly download new block")
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print("Peer 1 disconnected after trying to download old block")
print("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print("Peer 2 able to download old block")
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print("Restarting nodes with -whitelist=127.0.0.1")
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in range(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print("Peer 1 still connected after trying to download old block (whitelisted)")
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
py | b4167c5cd298c472c3976f6f0bb1cdd4b595125a | #!/usr/bin/env python
#
# Copyright 2016 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Cloud API Client.
BasicCloudApiCliend does basic setup for a cloud API.
"""
import logging
import socket
import ssl
import six
from six.moves import http_client
# pylint: disable=import-error
import httplib2
from apiclient import errors as gerrors
from apiclient.discovery import build
from oauth2client import client
from acloud import errors
from acloud.internal.lib import utils
logger = logging.getLogger(__name__)
class BaseCloudApiClient(object):
"""A class that does basic setup for a cloud API."""
# To be overriden by subclasses.
API_NAME = ""
API_VERSION = "v1"
SCOPE = ""
# Defaults for retry.
RETRY_COUNT = 5
RETRY_BACKOFF_FACTOR = 1.5
RETRY_SLEEP_MULTIPLIER = 2
RETRY_HTTP_CODES = [
# 403 is to retry the "Rate Limit Exceeded" error.
# We could retry on a finer-grained error message later if necessary.
403,
500, # Internal Server Error
502, # Bad Gateway
503, # Service Unavailable
]
RETRIABLE_ERRORS = (http_client.HTTPException, httplib2.HttpLib2Error,
socket.error, ssl.SSLError)
RETRIABLE_AUTH_ERRORS = (client.AccessTokenRefreshError, )
def __init__(self, oauth2_credentials):
"""Initialize.
Args:
oauth2_credentials: An oauth2client.OAuth2Credentials instance.
"""
self._service = self.InitResourceHandle(oauth2_credentials)
@classmethod
def InitResourceHandle(cls, oauth2_credentials):
"""Authenticate and initialize a Resource object.
Authenticate http and create a Resource object with methods
for interacting with the service.
Args:
oauth2_credentials: An oauth2client.OAuth2Credentials instance.
Returns:
An apiclient.discovery.Resource object
"""
http_auth = oauth2_credentials.authorize(httplib2.Http())
return utils.RetryExceptionType(
exception_types=cls.RETRIABLE_AUTH_ERRORS,
max_retries=cls.RETRY_COUNT,
functor=build,
sleep_multiplier=cls.RETRY_SLEEP_MULTIPLIER,
retry_backoff_factor=cls.RETRY_BACKOFF_FACTOR,
serviceName=cls.API_NAME,
version=cls.API_VERSION,
# This is workaround for a known issue of some veriosn
# of api client.
# https://github.com/google/google-api-python-client/issues/435
cache_discovery=False,
http=http_auth)
@staticmethod
def _ShouldRetry(exception, retry_http_codes,
other_retriable_errors):
"""Check if exception is retriable.
Args:
exception: An instance of Exception.
retry_http_codes: a list of integers, retriable HTTP codes of
HttpError
other_retriable_errors: a tuple of error types to retry other than
HttpError.
Returns:
Boolean, True if retriable, False otherwise.
"""
if isinstance(exception, other_retriable_errors):
return True
if isinstance(exception, errors.HttpError):
if exception.code in retry_http_codes:
return True
logger.debug("_ShouldRetry: Exception code %s not in %s: %s",
exception.code, retry_http_codes, str(exception))
logger.debug("_ShouldRetry: Exception %s is not one of %s: %s",
type(exception),
list(other_retriable_errors) + [errors.HttpError],
str(exception))
return False
@staticmethod
def _TranslateError(exception):
"""Translate the exception to a desired type.
Args:
exception: An instance of Exception.
Returns:
gerrors.HttpError will be translated to errors.HttpError.
If the error code is errors.HTTP_NOT_FOUND_CODE, it will
be translated to errors.ResourceNotFoundError.
Unrecognized error type will not be translated and will
be returned as is.
"""
if isinstance(exception, gerrors.HttpError):
exception = errors.HttpError.CreateFromHttpError(exception)
if exception.code == errors.HTTP_NOT_FOUND_CODE:
exception = errors.ResourceNotFoundError(
exception.code, str(exception))
return exception
def ExecuteOnce(self, api):
"""Execute an api and parse the errors.
Args:
api: An apiclient.http.HttpRequest, representing the api to execute.
Returns:
Execution result of the api.
Raises:
errors.ResourceNotFoundError: For 404 error.
errors.HttpError: For other types of http error.
"""
try:
return api.execute()
except gerrors.HttpError as e:
raise self._TranslateError(e)
def Execute(self,
api,
retry_http_codes=None,
max_retry=None,
sleep=None,
backoff_factor=None,
other_retriable_errors=None):
"""Execute an api with retry.
Call ExecuteOnce and retry on http error with given codes.
Args:
api: An apiclient.http.HttpRequest, representing the api to execute:
retry_http_codes: A list of http codes to retry.
max_retry: See utils.Retry.
sleep: See utils.Retry.
backoff_factor: See utils.Retry.
other_retriable_errors: A tuple of error types that should be retried
other than errors.HttpError.
Returns:
Execution result of the api.
Raises:
See ExecuteOnce.
"""
retry_http_codes = (self.RETRY_HTTP_CODES
if retry_http_codes is None else retry_http_codes)
max_retry = (self.RETRY_COUNT if max_retry is None else max_retry)
sleep = (self.RETRY_SLEEP_MULTIPLIER if sleep is None else sleep)
backoff_factor = (self.RETRY_BACKOFF_FACTOR
if backoff_factor is None else backoff_factor)
other_retriable_errors = (self.RETRIABLE_ERRORS
if other_retriable_errors is None else
other_retriable_errors)
def _Handler(exc):
"""Check if |exc| is a retriable exception.
Args:
exc: An exception.
Returns:
True if exc is an errors.HttpError and code exists in |retry_http_codes|
False otherwise.
"""
if self._ShouldRetry(exc, retry_http_codes,
other_retriable_errors):
logger.debug("Will retry error: %s", str(exc))
return True
return False
return utils.Retry(
_Handler,
max_retries=max_retry,
functor=self.ExecuteOnce,
sleep_multiplier=sleep,
retry_backoff_factor=backoff_factor,
api=api)
def BatchExecuteOnce(self, requests):
"""Execute requests in a batch.
Args:
requests: A dictionary where key is request id and value
is an http request.
Returns:
results, a dictionary in the following format
{request_id: (response, exception)}
request_ids are those from requests; response
is the http response for the request or None on error;
exception is an instance of DriverError or None if no error.
"""
results = {}
def _CallBack(request_id, response, exception):
results[request_id] = (response, self._TranslateError(exception))
batch = self._service.new_batch_http_request()
for request_id, request in six.iteritems(requests):
batch.add(
request=request, callback=_CallBack, request_id=request_id)
batch.execute()
return results
def BatchExecute(self,
requests,
retry_http_codes=None,
max_retry=None,
sleep=None,
backoff_factor=None,
other_retriable_errors=None):
"""Batch execute multiple requests with retry.
Call BatchExecuteOnce and retry on http error with given codes.
Args:
requests: A dictionary where key is request id picked by caller,
and value is a apiclient.http.HttpRequest.
retry_http_codes: A list of http codes to retry.
max_retry: See utils.Retry.
sleep: See utils.Retry.
backoff_factor: See utils.Retry.
other_retriable_errors: A tuple of error types that should be retried
other than errors.HttpError.
Returns:
results, a dictionary in the following format
{request_id: (response, exception)}
request_ids are those from requests; response
is the http response for the request or None on error;
exception is an instance of DriverError or None if no error.
"""
executor = utils.BatchHttpRequestExecutor(
self.BatchExecuteOnce,
requests=requests,
retry_http_codes=retry_http_codes or self.RETRY_HTTP_CODES,
max_retry=max_retry or self.RETRY_COUNT,
sleep=sleep or self.RETRY_SLEEP_MULTIPLIER,
backoff_factor=backoff_factor or self.RETRY_BACKOFF_FACTOR,
other_retriable_errors=other_retriable_errors
or self.RETRIABLE_ERRORS)
executor.Execute()
return executor.GetResults()
def ListWithMultiPages(self, api_resource, *args, **kwargs):
"""Call an api that list a type of resource.
Multiple google services support listing a type of
resource (e.g list gce instances, list storage objects).
The querying pattern is similar --
Step 1: execute the api and get a response object like,
{
"items": [..list of resource..],
# The continuation token that can be used
# to get the next page.
"nextPageToken": "A String",
}
Step 2: execute the api again with the nextPageToken to
retrieve more pages and get a response object.
Step 3: Repeat Step 2 until no more page.
This method encapsulates the generic logic of
calling such listing api.
Args:
api_resource: An apiclient.discovery.Resource object
used to create an http request for the listing api.
*args: Arguments used to create the http request.
**kwargs: Keyword based arguments to create the http
request.
Returns:
A list of items.
"""
items = []
next_page_token = None
while True:
api = api_resource(pageToken=next_page_token, *args, **kwargs)
response = self.Execute(api)
items.extend(response.get("items", []))
next_page_token = response.get("nextPageToken")
if not next_page_token:
break
return items
@property
def service(self):
"""Return self._service as a property."""
return self._service
|
py | b4167c87c885adf22f02a4d2305618d13ef285b7 | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""message"""
import importlib.util
import json
import json.decoder as jd
import logging
import traceback
import os
from pathlib import Path
import akg.tvm
from akg.utils import kernel_exec as utils
from akg.utils import validation_check as vc_util
from akg import composite
from . import cce
from . import gpu
from . import op_build
from akg.global_configs import get_dump_ir_flag
from akg.global_configs import get_dump_code_flag
@vc_util.check_input_type(dict, dict)
def _compilewithjson_to_module(kernel_info, attrs):
"""compile with json."""
supported_processors = ['cuda', 'aicore', 'cpu']
processor = 'cuda'
if 'process' in kernel_info:
processor = kernel_info['process']
if processor not in supported_processors:
logging.error("supported processors: %s, current processor: %s", supported_processors, processor)
return False
if processor == 'cuda' and 'compute_capability' in kernel_info:
attrs['compute_capability'] = kernel_info['compute_capability']
if 'composite' in kernel_info and kernel_info['composite'] is True:
try:
composite.build(kernel_info, attrs)
return True
except Exception:
logging.error(traceback.format_exc())
return False
op_name = kernel_info['name']
op_func = None
# get custom ops implementation first.
if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None:
impl_path = os.path.realpath(kernel_info['impl_path'])
if os.path.isfile(impl_path):
custom_mod_name = Path(impl_path).resolve().stem
mod_spec = importlib.util.spec_from_file_location(
custom_mod_name, impl_path)
custom_mod = importlib.util.module_from_spec(mod_spec)
mod_spec.loader.exec_module(custom_mod)
op_func = getattr(custom_mod, op_name, None)
# get built-in ops.
if op_func is None:
if processor == 'cuda':
op_func = getattr(gpu, op_name, None)
if op_func is not None:
input_shapes = []
input_types = []
for input_desc in kernel_info['input_desc']:
input_shapes.append(input_desc[0]['shape'])
input_types.append(input_desc[0]['data_type'])
op_attrs = []
if kernel_info['attr']:
for ext_arg in kernel_info['attr']:
op_attrs.append(ext_arg['value'])
dump_ir = os.getenv(get_dump_ir_flag()) == "on"
dump_code = os.getenv(get_dump_code_flag()) == "on"
utils.op_build(op_func, input_shapes, input_types, op_attrs, kernel_info['op'], dump_ir=dump_ir,
dump_code=dump_code)
return True
else:
op_func = getattr(cce, op_name, None)
if op_func is None:
logging.error(
"this op not support by akg, please check op name %s", str(op_name))
return False
args = {}
tsr = []
for input_desc in kernel_info['input_desc']:
if len(input_desc) == 1:
tensor_shape = input_desc[0]['shape']
tensor_shape = (1,) if not tensor_shape else tensor_shape
vc_util.shape_dtype_max_size_check(
tensor_shape, input_desc[0]['data_type'])
args[input_desc[0]['name']] = akg.tvm.placeholder(
shape=tensor_shape, name=input_desc[0]['tensor_name'], dtype=input_desc[0]['data_type'])
tsr.append(args[input_desc[0]['name']])
else:
tmp_input = []
for tmp_desc in input_desc:
tensor_shape = tmp_desc['shape']
tensor_shape = (1,) if not tensor_shape else tensor_shape
vc_util.shape_dtype_max_size_check(
tensor_shape, tmp_desc['data_type'])
tmp_input.append(akg.tvm.placeholder(
shape=tensor_shape, name=tmp_desc['tensor_name'], dtype=tmp_desc['data_type']))
args[input_desc[0]['name']] = tmp_input
tsr = tsr + tmp_input
if kernel_info['attr']:
for ext_arg in kernel_info['attr']:
args[ext_arg['name']] = ext_arg['value']
output = op_func(**args)
schedule_func = None
if isinstance(output, (list, tuple)):
from inspect import isfunction
tmp_outputs = []
for elem in output:
if isfunction(elem):
schedule_func = elem
elif isinstance(elem, dict):
for key, value in elem.items():
if key not in attrs or not attrs[key]:
attrs[key] = value
else:
tmp_outputs.append(elem)
output = tmp_outputs
else:
output = [output]
tsr = tsr + [i for i in output if utils.TensorUtils.is_output_value(i)]
build_res = op_build([op_name], output, tsr, schedule_func, processor, kernel_info['op'], attrs)
if not build_res:
return False
return True
def compilewithjson(json_str, attrs=None):
if attrs is None:
attrs = {}
try:
kernel_info = json.loads(json_str)
if isinstance(attrs, str):
attrs = json.loads(attrs)
except jd.JSONDecodeError:
logging.error(traceback.format_exc())
return False
return _compilewithjson_to_module(kernel_info, attrs)
def compilewithjsonname(json_file, attrs=None):
with open(json_file, 'r') as f:
return compilewithjson(f.read().strip(), attrs)
|
py | b4167ce5a07d6dcc20f27a187c44e69516cc094c | from __future__ import absolute_import, division, print_function
import copy
import inspect
from builtins import (str)
from cfn_model.model.References import References
from cfn_model.model.Parameter import Parameter
def lineno():
"""Returns the current line number in our program."""
return str(' - CfnModel - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))
class CfnModel:
"""
Cloudformation model
"""
def __init__(self, debug=False):
"""
Initialize
:param debug:
"""
# attr_accessor :raw_model
self.parameters={}
self.resources = {}
self.raw_model = None
self.debug = debug
if self.debug:
print('CfnModel - init'+lineno())
def copy(self):
"""
copy the model
:return: copy of model
"""
if self.debug:
print('CfnModel - copy'+lineno())
return copy.copy(self.raw_model)
def security_groups(self):
"""
Get security groups
:return:
"""
if self.debug:
print("\n\n################################################################")
print('CfnModel - security_groups - getting security group resources'+lineno())
print("####################################################################\n")
return self.resources_by_type('AWS::EC2::SecurityGroup')
def iam_users(self):
"""
Get iam users
:return:
"""
if self.debug:
print("\n\n################################################################")
print('CfnModel - iam_users - getting iam users resources'+lineno())
print("####################################################################\n")
return self.resources_by_type('AWS::IAM::User')
def standalone_ingress(self):
"""
Get standalone ingress resources
:return:
"""
if self.debug:
print("\n\n################################################################")
print('CfnModel - standalone_ingress - getting security group ingress resources'+lineno())
print("####################################################################\n")
security_group_ingresses = []
resources = self.resources_by_type('AWS::EC2::SecurityGroupIngress')
for resource in resources:
if self.debug:
print("\n\n#############################################")
print('Stand alone ingress'+lineno())
print(str(resource) + lineno())
print("################################################\n")
if 'Properties' in resource.cfn_model:
if self.debug:
print('properties in cfn_model: '+lineno())
if 'GroupId' in resource.cfn_model['Properties']:
if self.debug:
print('groupid in properties: '+lineno())
if References.is_security_group_id_external(str(resource.cfn_model['Properties']['GroupId']) ,debug=self.debug):
security_group_ingresses.append(resource)
if self.debug:
print("\n############################################")
print('These are the standalone security_group_ingresses: '+str(security_group_ingresses)+lineno())
print("##############################################\n")
return security_group_ingresses
def standalone_egress(self):
"""
Get standalone egress resources
:return:
"""
if self.debug:
print("\n\n################################################################")
print('CfnModel - standalone_egress - getting security group egress resources'+lineno())
print("####################################################################\n")
security_group_egresses = []
resources = self.resources_by_type('AWS::EC2::SecurityGroupEgress')
for resource in resources:
if self.debug:
print(str(resource) + lineno())
if 'Properties' in resource.cfn_model:
if self.debug:
print('Properties in cfn_model '+lineno())
if 'GroupId' in resource.cfn_model['Properties']:
if self.debug:
print('GroupId in properties'+lineno())
if References.is_security_group_id_external(
resource.cfn_model['Properties']['GroupId'],
debug=self.debug):
security_group_egresses.append(resource)
if 'groupId' in resource.cfn_model['Properties']:
if self.debug:
print('groupId in properties'+lineno())
if References.is_security_group_id_external(
resource.cfn_model['Properties']['groupId'],
debug=self.debug):
security_group_egresses.append(resource)
if self.debug:
print('security_group_egresses: '+str(security_group_egresses)+lineno())
return security_group_egresses
def resources_by_type(self, resource_type):
"""
Get cfn resources by type
:param resource_type:
:return:
"""
if self.debug:
print('CfnModel - resource_by_type'+lineno())
print("\n\n####################################")
print('#### Looking for resource_type: '+str(resource_type)+' in raw_model'+lineno())
print("####################################\n\n")
resources = []
if self.debug:
print(str(self.resources)+lineno())
# Iterating through the resources in the raw_model
for resource in self.resources:
if self.debug:
print('resource: '+str(resource)+lineno())
print('resource object: '+str(self.resources[resource])+lineno())
print('type: '+str(self.resources[resource].resource_type)+lineno())
print('vars: '+str(vars(self.resources[resource]))+lineno())
print('resource type is: '+str(self.resources[resource].resource_type)+lineno())
if str(self.resources[resource].resource_type) == str(resource_type):
if self.debug:
print(' ### FOUND MATCHING RESOURCE TYPE '+lineno())
resources.append(self.resources[resource])
if self.debug:
print('CfnModel - resources_by_type - returning resource'+lineno())
if len(resources)<1:
if self.debug:
print('### Could not find matching type for: '+str(resource_type)+lineno())
else:
if self.debug:
print("\n\n########################################")
print('### found '+str(len(resources))+' '+str(resource_type)+' resources'+lineno())
print("########################################\n\n")
return resources
def find_security_group_by_group_id(self, security_group_reference):
"""
Get security group by security group id
:param security_group_reference:
:return:
"""
if self.debug:
print('CfnModel - find_security_group_by_group_id'+lineno())
print('security_group_reference: '+str(security_group_reference)+lineno())
security_group_id = References.resolve_security_group_id(security_group_reference)
if not security_group_id:
# # leave it alone since external ref or something we don't grok
return security_group_reference
else:
security_groups = self.security_groups()
for sg in security_groups:
if self.debug:
print('sg: '+str(sg)+lineno())
print('vars: '+str(vars(sg))+lineno())
if sg.logical_resource_id == sg:
return sg
# leave it alone since external ref or something we don't grok
return security_group_reference
def transform_hash_into_parameters(self, cfn_hash):
"""
Transform hash into parameters
:param cfn_hash:
:return:
"""
if self.debug:
print('CfnParser - transform_hash_into_parameters'+lineno())
print('cfn_hash: '+str(cfn_hash)+lineno())
if 'Parameters' in cfn_hash:
for param in cfn_hash['Parameters']:
if self.debug:
print(param+lineno())
print(str(cfn_hash['Parameters'][param])+lineno())
parameter = Parameter(debug=self.debug)
parameter.id = param
parameter.type = cfn_hash['Parameters'][param]['Type']
parameter.instance_variables.append(param+'='+cfn_hash['Parameters'][param]['Type'].lower().replace('-','_'))
self.parameters[param] = parameter
|
py | b4167d3ab023a2284a3401e3bf4f5a8cac059a41 | from tkinter import *
class MyWindow:
def __init__(self, win):
self.lbl1=Label(win, text='Enter your fullname:', fg='Red')
self.t1=Entry()
self.t1.place(x=150, y=70)
self.t1.place(width=150, height=20)
self.t2 = Entry()
self.t2.place(x=200, y=100)
self.lbl1.place(x=30, y=70)
self.btn1 = Button(win,text='Click to Display your fullname', fg='Red', font="Times 9")
self.btn1.place(x=30, y=100)
self.t2.place(width=150, height=20)
window = Tk()
mywin = MyWindow(window)
window.title('Midterm Exam')
window.geometry("400x300+10+10")
window.mainloop() |
py | b4167dda7be66a6b03fe974d8ae4fda8ea679a2f | from abc import ABCMeta, abstractmethod
class QueryBuilderAbstract:
_sql = ""
_param_dict = {}
_select_sql = ""
_from_sql = ""
_join_sql = ""
_where_sql = ""
_group_sql = ""
_order_sql = ""
_limit_sql = ""
@abstractmethod
def select(self, **kwargs):
pass
@abstractmethod
def from_table(self, cls, on_expression):
pass
@abstractmethod
def left_join(self, cls, on_expression):
pass
@abstractmethod
def right_join(self, cls, on_expression):
pass
@abstractmethod
def inner_join(self, cls, on_expression):
pass
@abstractmethod
def where(self, *args):
pass
@abstractmethod
def order_by(self, *args):
pass
@abstractmethod
def group_by(self, **args):
pass
@abstractmethod
def limit(self, top_count):
pass
@abstractmethod
def first(self, cls=None):
pass
@abstractmethod
def find(self, cls=None):
pass
@abstractmethod
def find_page(self, page_index, page_size, cls=None):
pass
@abstractmethod
def find_count(self):
pass
|
py | b4167eab9794ea3eb2e267c00f7d8f8e9eb45973 | import random
import string
import datetime
def isunique_1(s):
# O(n^2)
i = 0
duplicated = False
while i < len(s):
j = i + 1
while j < len(s):
if s[i] == s[j]:
duplicated = True
j+=1
if duplicated:
return False
else:
i+=1
return True
def isunique_2(s):
# O(n)
dic = {}
for i in s:
if i in dic:
return False
else:
dic[i] = 1
return True
def isunique_3(s):
# O(n)
l = []
for i in s:
if i in l:
return False
else:
l.append(i)
return True
def isunique_4(string):
# O(n)
# https://github.com/careercup/CtCI-6th-Edition-Python/blob/master/Chapter1/1_Is%20Unique/IsUnique.py
# Assuming character set is ASCII (128 characters)
if len(string) > 128:
return False
char_set = [False for _ in range(128)]
for char in string:
val = ord(char)
if char_set[val]:
# Char already found in string
return False
char_set[val] = True
return True
class Gen:
def __init__(self):
self.size = random.randint(100,200)
self.string = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(self.size))
class UniqueTest:
def __init__(self):
random.seed(10)
self.strings = [Gen().string for i in range(20)]
def test_isunique1(self):
start = datetime.datetime.now()
res = []
print(self.strings)
for i in self.strings:
res.append(isunique_1(i))
print(res)
time = datetime.datetime.now() - start
print(time)
def test_isunique2(self):
start = datetime.datetime.now()
res = []
print(self.strings)
for i in self.strings:
res.append(isunique_2(i))
print(res)
time = datetime.datetime.now() - start
print(time)
def test_isunique3(self):
start = datetime.datetime.now()
res = []
print(self.strings)
for i in self.strings:
res.append(isunique_3(i))
print(res)
time = datetime.datetime.now() - start
print(time)
def test_isunique4(self):
start = datetime.datetime.now()
res = []
print(self.strings)
for i in self.strings:
res.append(isunique_4(i))
print(res)
time = datetime.datetime.now() - start
print(time)
def main():
UniqueTest().test_isunique1()
UniqueTest().test_isunique2()
UniqueTest().test_isunique3()
UniqueTest().test_isunique4()
if __name__ == "__main__":
main() |
py | b4167ec9d1499646f8f7c5ade5d3c4da52db2810 | """
Gamma and related functions
"""
from __future__ import print_function, absolute_import
from six.moves import range
from six import integer_types
from sage.symbolic.function import GinacFunction, BuiltinFunction
from sage.libs.pynac.pynac import (register_symbol, symbol_table,
py_factorial_py, I)
from sage.structure.element import coercion_model
from sage.structure.all import parent as s_parent
from sage.symbolic.expression import Expression
from sage.rings.all import Integer, Rational, RealField, ZZ, ComplexField
from sage.functions.exp_integral import Ei
from sage.libs.mpmath import utils as mpmath_utils
from sage.arith.all import binomial as arith_binomial
from .log import exp
from .other import sqrt
from sage.symbolic.constants import pi
class Function_gamma(GinacFunction):
def __init__(self):
r"""
The Gamma function. This is defined by
.. MATH::
\Gamma(z) = \int_0^\infty t^{z-1}e^{-t} dt
for complex input `z` with real part greater than zero, and by
analytic continuation on the rest of the complex plane (except
for negative integers, which are poles).
It is computed by various libraries within Sage, depending on
the input type.
EXAMPLES::
sage: from sage.functions.gamma import gamma1
sage: gamma1(CDF(0.5,14))
-4.0537030780372815e-10 - 5.773299834553605e-10*I
sage: gamma1(CDF(I))
-0.15494982830181067 - 0.49801566811835607*I
Recall that `\Gamma(n)` is `n-1` factorial::
sage: gamma1(11) == factorial(10)
True
sage: gamma1(6)
120
sage: gamma1(1/2)
sqrt(pi)
sage: gamma1(-1)
Infinity
sage: gamma1(I)
gamma(I)
sage: gamma1(x/2)(x=5)
3/4*sqrt(pi)
sage: gamma1(float(6)) # For ARM: rel tol 3e-16
120.0
sage: gamma(6.)
120.000000000000
sage: gamma1(x)
gamma(x)
::
sage: gamma1(pi)
gamma(pi)
sage: gamma1(i)
gamma(I)
sage: gamma1(i).n()
-0.154949828301811 - 0.498015668118356*I
sage: gamma1(int(5))
24
::
sage: conjugate(gamma(x))
gamma(conjugate(x))
::
sage: plot(gamma1(x),(x,1,5))
Graphics object consisting of 1 graphics primitive
To prevent automatic evaluation use the ``hold`` argument::
sage: gamma1(1/2,hold=True)
gamma(1/2)
To then evaluate again, we currently must use Maxima via
:meth:`sage.symbolic.expression.Expression.simplify`::
sage: gamma1(1/2,hold=True).simplify()
sqrt(pi)
TESTS:
sage: gamma(x)._sympy_()
gamma(x)
We verify that we can convert this function to Maxima and
convert back to Sage::
sage: z = var('z')
sage: maxima(gamma1(z)).sage()
gamma(z)
sage: latex(gamma1(z))
\Gamma\left(z\right)
Test that :trac:`5556` is fixed::
sage: gamma1(3/4)
gamma(3/4)
sage: gamma1(3/4).n(100)
1.2254167024651776451290983034
Check that negative integer input works::
sage: (-1).gamma()
Infinity
sage: (-1.).gamma()
NaN
sage: CC(-1).gamma()
Infinity
sage: RDF(-1).gamma()
NaN
sage: CDF(-1).gamma()
Infinity
Check if :trac:`8297` is fixed::
sage: latex(gamma(1/4))
\Gamma\left(\frac{1}{4}\right)
Test pickling::
sage: loads(dumps(gamma(x)))
gamma(x)
Check that the implementations roughly agrees (note there might be
difference of several ulp on more complicated entries)::
sage: import mpmath
sage: float(gamma(10.)) == gamma(10.r) == float(gamma(mpmath.mpf(10)))
True
sage: float(gamma(8.5)) == gamma(8.5r) == float(gamma(mpmath.mpf(8.5)))
True
Check that ``QQbar`` half integers work with the ``pi`` formula::
sage: gamma(QQbar(1/2))
sqrt(pi)
sage: gamma(QQbar(-9/2))
-32/945*sqrt(pi)
.. SEEALSO::
:meth:`gamma`
"""
GinacFunction.__init__(self, 'gamma', latex_name=r"\Gamma",
ginac_name='gamma',
conversions={'mathematica':'Gamma',
'maple':'GAMMA',
'sympy':'gamma',
'fricas':'Gamma',
'giac':'Gamma'})
gamma1 = Function_gamma()
class Function_log_gamma(GinacFunction):
def __init__(self):
r"""
The principal branch of the log gamma function. Note that for
`x < 0`, ``log(gamma(x))`` is not, in general, equal to
``log_gamma(x)``.
It is computed by the ``log_gamma`` function for the number type,
or by ``lgamma`` in Ginac, failing that.
Gamma is defined for complex input `z` with real part greater
than zero, and by analytic continuation on the rest of the
complex plane (except for negative integers, which are poles).
EXAMPLES:
Numerical evaluation happens when appropriate, to the
appropriate accuracy (see :trac:`10072`)::
sage: log_gamma(6)
log(120)
sage: log_gamma(6.)
4.78749174278205
sage: log_gamma(6).n()
4.78749174278205
sage: log_gamma(RealField(100)(6))
4.7874917427820459942477009345
sage: log_gamma(2.4 + I)
-0.0308566579348816 + 0.693427705955790*I
sage: log_gamma(-3.1)
0.400311696703985 - 12.5663706143592*I
sage: log_gamma(-1.1) == log(gamma(-1.1))
False
Symbolic input works (see :trac:`10075`)::
sage: log_gamma(3*x)
log_gamma(3*x)
sage: log_gamma(3 + I)
log_gamma(I + 3)
sage: log_gamma(3 + I + x)
log_gamma(x + I + 3)
Check that :trac:`12521` is fixed::
sage: log_gamma(-2.1)
1.53171380819509 - 9.42477796076938*I
sage: log_gamma(CC(-2.1))
1.53171380819509 - 9.42477796076938*I
sage: log_gamma(-21/10).n()
1.53171380819509 - 9.42477796076938*I
sage: exp(log_gamma(-1.3) + log_gamma(-0.4) -
....: log_gamma(-1.3 - 0.4)).real_part() # beta(-1.3, -0.4)
-4.92909641669610
In order to prevent evaluation, use the ``hold`` argument;
to evaluate a held expression, use the ``n()`` numerical
evaluation method::
sage: log_gamma(SR(5), hold=True)
log_gamma(5)
sage: log_gamma(SR(5), hold=True).n()
3.17805383034795
TESTS::
sage: log_gamma(-2.1 + I)
-1.90373724496982 - 7.18482377077183*I
sage: log_gamma(pari(6))
4.78749174278205
sage: log_gamma(x)._sympy_()
loggamma(x)
sage: log_gamma(CC(6))
4.78749174278205
sage: log_gamma(CC(-2.5))
-0.0562437164976741 - 9.42477796076938*I
sage: log_gamma(RDF(-2.5))
-0.056243716497674054 - 9.42477796076938*I
sage: log_gamma(CDF(-2.5))
-0.056243716497674054 - 9.42477796076938*I
sage: log_gamma(float(-2.5))
(-0.056243716497674054-9.42477796076938j)
sage: log_gamma(complex(-2.5))
(-0.056243716497674054-9.42477796076938j)
``conjugate(log_gamma(x)) == log_gamma(conjugate(x))`` unless on the
branch cut, which runs along the negative real axis.::
sage: conjugate(log_gamma(x))
conjugate(log_gamma(x))
sage: var('y', domain='positive')
y
sage: conjugate(log_gamma(y))
log_gamma(y)
sage: conjugate(log_gamma(y + I))
conjugate(log_gamma(y + I))
sage: log_gamma(-2)
+Infinity
sage: conjugate(log_gamma(-2))
+Infinity
"""
GinacFunction.__init__(self, "log_gamma", latex_name=r'\log\Gamma',
conversions=dict(mathematica='LogGamma',
maxima='log_gamma',
sympy='loggamma',
fricas='logGamma'))
log_gamma = Function_log_gamma()
class Function_gamma_inc(BuiltinFunction):
def __init__(self):
r"""
The upper incomplete gamma function.
It is defined by the integral
.. MATH::
\Gamma(a,z)=\int_z^\infty t^{a-1}e^{-t}\,\mathrm{d}t
EXAMPLES::
sage: gamma_inc(CDF(0,1), 3)
0.0032085749933691158 + 0.012406185811871568*I
sage: gamma_inc(RDF(1), 3)
0.049787068367863944
sage: gamma_inc(3,2)
gamma(3, 2)
sage: gamma_inc(x,0)
gamma(x)
sage: latex(gamma_inc(3,2))
\Gamma\left(3, 2\right)
sage: loads(dumps((gamma_inc(3,2))))
gamma(3, 2)
sage: i = ComplexField(30).0; gamma_inc(2, 1 + i)
0.70709210 - 0.42035364*I
sage: gamma_inc(2., 5)
0.0404276819945128
sage: x,y=var('x,y')
sage: gamma_inc(x,y).diff(x)
diff(gamma(x, y), x)
sage: (gamma_inc(x,x+1).diff(x)).simplify()
-(x + 1)^(x - 1)*e^(-x - 1) + D[0](gamma)(x, x + 1)
TESTS:
Check that :trac:`21407` is fixed::
sage: gamma(-1,5)._sympy_()
expint(2, 5)/5
sage: gamma(-3/2,5)._sympy_()
-6*sqrt(5)*exp(-5)/25 + 4*sqrt(pi)*erfc(sqrt(5))/3
.. SEEALSO::
:meth:`gamma`
"""
BuiltinFunction.__init__(self, "gamma", nargs=2, latex_name=r"\Gamma",
conversions={'maxima':'gamma_incomplete', 'mathematica':'Gamma',
'maple':'GAMMA', 'sympy':'uppergamma', 'giac':'ugamma'})
def _eval_(self, x, y):
"""
EXAMPLES::
sage: gamma_inc(2.,0)
1.00000000000000
sage: gamma_inc(2,0)
1
sage: gamma_inc(1/2,2)
-sqrt(pi)*(erf(sqrt(2)) - 1)
sage: gamma_inc(1/2,1)
-sqrt(pi)*(erf(1) - 1)
sage: gamma_inc(1/2,0)
sqrt(pi)
sage: gamma_inc(x,0)
gamma(x)
sage: gamma_inc(1,2)
e^(-2)
sage: gamma_inc(0,2)
-Ei(-2)
"""
if y == 0:
return gamma(x)
if x == 1:
return exp(-y)
if x == 0:
return -Ei(-y)
if x == Rational(1)/2: #only for x>0
from sage.functions.error import erf
return sqrt(pi)*(1-erf(sqrt(y)))
return None
def _evalf_(self, x, y, parent=None, algorithm='pari'):
"""
EXAMPLES::
sage: gamma_inc(0,2)
-Ei(-2)
sage: gamma_inc(0,2.)
0.0489005107080611
sage: gamma_inc(0,2).n(algorithm='pari')
0.0489005107080611
sage: gamma_inc(0,2).n(200)
0.048900510708061119567239835228...
sage: gamma_inc(3,2).n()
1.35335283236613
TESTS:
Check that :trac:`7099` is fixed::
sage: R = RealField(1024)
sage: gamma(R(9), R(10^-3)) # rel tol 1e-308
40319.99999999999999999999999999988898884344822911869926361916294165058203634104838326009191542490601781777105678829520585311300510347676330951251563007679436243294653538925717144381702105700908686088851362675381239820118402497959018315224423868693918493033078310647199219674433536605771315869983788442389633
sage: numerical_approx(gamma(9, 10^(-3)) - gamma(9), digits=40) # abs tol 1e-36
-1.110111598370794007949063502542063148294e-28
Check that :trac:`17328` is fixed::
sage: gamma_inc(float(-1), float(-1))
(-0.8231640121031085+3.141592653589793j)
sage: gamma_inc(RR(-1), RR(-1))
-0.823164012103109 + 3.14159265358979*I
sage: gamma_inc(-1, float(-log(3))) - gamma_inc(-1, float(-log(2))) # abs tol 1e-15
(1.2730972164471142+0j)
Check that :trac:`17130` is fixed::
sage: r = gamma_inc(float(0), float(1)); r
0.21938393439552029
sage: type(r)
<... 'float'>
"""
R = parent or s_parent(x)
# C is the complex version of R
# prec is the precision of R
if R is float:
prec = 53
C = complex
else:
try:
prec = R.precision()
except AttributeError:
prec = 53
try:
C = R.complex_field()
except AttributeError:
C = R
if algorithm == 'pari':
v = ComplexField(prec)(x).gamma_inc(y)
else:
import mpmath
v = ComplexField(prec)(mpmath_utils.call(mpmath.gammainc, x, y, parent=R))
if v.is_real():
return R(v)
else:
return C(v)
# synonym.
gamma_inc = Function_gamma_inc()
class Function_gamma_inc_lower(BuiltinFunction):
def __init__(self):
r"""
The lower incomplete gamma function.
It is defined by the integral
.. MATH::
\Gamma(a,z)=\int_0^z t^{a-1}e^{-t}\,\mathrm{d}t
EXAMPLES::
sage: gamma_inc_lower(CDF(0,1), 3)
-0.1581584032951798 - 0.5104218539302277*I
sage: gamma_inc_lower(RDF(1), 3)
0.950212931632136
sage: gamma_inc_lower(3, 2, hold=True)
gamma_inc_lower(3, 2)
sage: gamma_inc_lower(3, 2)
-10*e^(-2) + 2
sage: gamma_inc_lower(x, 0)
0
sage: latex(gamma_inc_lower(x, x))
\gamma\left(x, x\right)
sage: loads(dumps((gamma_inc_lower(x, x))))
gamma_inc_lower(x, x)
sage: i = ComplexField(30).0; gamma_inc_lower(2, 1 + i)
0.29290790 + 0.42035364*I
sage: gamma_inc_lower(2., 5)
0.959572318005487
Interfaces to other software::
sage: gamma_inc_lower(x,x)._sympy_()
lowergamma(x, x)
sage: maxima(gamma_inc_lower(x,x))
gamma_greek(_SAGE_VAR_x,_SAGE_VAR_x)
.. SEEALSO::
:class:`Function_gamma_inc`
"""
BuiltinFunction.__init__(self, "gamma_inc_lower", nargs=2, latex_name=r"\gamma",
conversions={'maxima':'gamma_greek', 'mathematica':'Gamma',
'maple':'GAMMA', 'sympy':'lowergamma', 'giac':'igamma'})
def _eval_(self, x, y):
"""
EXAMPLES::
sage: gamma_inc_lower(2.,0)
0.000000000000000
sage: gamma_inc_lower(2,0)
0
sage: gamma_inc_lower(1/2,2)
sqrt(pi)*erf(sqrt(2))
sage: gamma_inc_lower(1/2,1)
sqrt(pi)*erf(1)
sage: gamma_inc_lower(1/2,0)
0
sage: gamma_inc_lower(x,0)
0
sage: gamma_inc_lower(1,2)
-e^(-2) + 1
sage: gamma_inc_lower(0,2)
+Infinity
sage: gamma_inc_lower(2,377/79)
-456/79*e^(-377/79) + 1
sage: gamma_inc_lower(3,x)
-x^2*e^(-x) - 2*x*e^(-x) - 2*e^(-x) + 2
sage: gamma_inc_lower(9/2,37/7)
105/16*sqrt(pi)*erf(1/7*sqrt(259)) - 836473/19208*sqrt(259)*e^(-37/7)
"""
if y == 0:
return 0
if x == 0:
from sage.rings.infinity import Infinity
return Infinity
elif x == 1:
return 1-exp(-y)
elif (2*x).is_integer():
return self(x,y,hold=True)._sympy_()
else:
return None
def _evalf_(self, x, y, parent=None, algorithm='mpmath'):
"""
EXAMPLES::
sage: gamma_inc_lower(3,2.)
0.646647167633873
sage: gamma_inc_lower(3,2).n(200)
0.646647167633873081060005050275155...
sage: gamma_inc_lower(0,2.)
+infinity
"""
R = parent or s_parent(x)
# C is the complex version of R
# prec is the precision of R
if R is float:
prec = 53
C = complex
else:
try:
prec = R.precision()
except AttributeError:
prec = 53
try:
C = R.complex_field()
except AttributeError:
C = R
if algorithm == 'pari':
try:
v = ComplexField(prec)(x).gamma() - ComplexField(prec)(x).gamma_inc(y)
except AttributeError:
if not (is_ComplexNumber(x)):
if is_ComplexNumber(y):
C = y.parent()
else:
C = ComplexField()
x = C(x)
v = ComplexField(prec)(x).gamma() - ComplexField(prec)(x).gamma_inc(y)
else:
import mpmath
v = ComplexField(prec)(mpmath_utils.call(mpmath.gammainc, x, 0, y, parent=R))
if v.is_real():
return R(v)
else:
return C(v)
def _derivative_(self, x, y, diff_param=None):
"""
EXAMPLES::
sage: x,y = var('x,y')
sage: gamma_inc_lower(x,y).diff(y)
y^(x - 1)*e^(-y)
sage: gamma_inc_lower(x,y).diff(x)
Traceback (most recent call last):
...
NotImplementedError: cannot differentiate gamma_inc_lower in the first parameter
"""
if diff_param == 0:
raise NotImplementedError("cannot differentiate gamma_inc_lower in the"
" first parameter")
else:
return exp(-y)*y**(x - 1)
# synonym.
gamma_inc_lower = Function_gamma_inc_lower()
def gamma(a, *args, **kwds):
r"""
Gamma and upper incomplete gamma functions in one symbol.
Recall that `\Gamma(n)` is `n-1` factorial::
sage: gamma(11) == factorial(10)
True
sage: gamma(6)
120
sage: gamma(1/2)
sqrt(pi)
sage: gamma(-4/3)
gamma(-4/3)
sage: gamma(-1)
Infinity
sage: gamma(0)
Infinity
::
sage: gamma_inc(3,2)
gamma(3, 2)
sage: gamma_inc(x,0)
gamma(x)
::
sage: gamma(5, hold=True)
gamma(5)
sage: gamma(x, 0, hold=True)
gamma(x, 0)
::
sage: gamma(CDF(I))
-0.15494982830181067 - 0.49801566811835607*I
sage: gamma(CDF(0.5,14))
-4.0537030780372815e-10 - 5.773299834553605e-10*I
Use ``numerical_approx`` to get higher precision from
symbolic expressions::
sage: gamma(pi).n(100)
2.2880377953400324179595889091
sage: gamma(3/4).n(100)
1.2254167024651776451290983034
The precision for the result is also deduced from the precision of the
input. Convert the input to a higher precision explicitly if a result
with higher precision is desired.::
sage: t = gamma(RealField(100)(2.5)); t
1.3293403881791370204736256125
sage: t.prec()
100
The gamma function only works with input that can be coerced to the
Symbolic Ring::
sage: Q.<i> = NumberField(x^2+1)
sage: gamma(i)
Traceback (most recent call last):
...
TypeError: cannot coerce arguments: no canonical coercion from Number Field in i with defining polynomial x^2 + 1 to Symbolic Ring
.. SEEALSO::
:class:`Function_gamma`
"""
if not args:
return gamma1(a, **kwds)
if len(args) > 1:
raise TypeError("Symbolic function gamma takes at most 2 arguments (%s given)"%(len(args)+1))
return gamma_inc(a,args[0],**kwds)
def incomplete_gamma(*args, **kwds):
"""
Deprecated name for :class:`Function_gamma_inc`.
TESTS::
sage: incomplete_gamma(1,1)
doctest:...: DeprecationWarning: Please use gamma_inc().
See http://trac.sagemath.org/16697 for details.
e^(-1)
"""
from sage.misc.superseded import deprecation
deprecation(16697, 'Please use gamma_inc().')
return gamma_inc(*args, **kwds)
# We have to add the wrapper function manually to the symbol_table when we have
# two functions with different number of arguments and the same name
symbol_table['functions']['gamma'] = gamma
class Function_psi1(GinacFunction):
def __init__(self):
r"""
The digamma function, `\psi(x)`, is the logarithmic derivative of the
gamma function.
.. MATH::
\psi(x) = \frac{d}{dx} \log(\Gamma(x)) = \frac{\Gamma'(x)}{\Gamma(x)}
EXAMPLES::
sage: from sage.functions.gamma import psi1
sage: psi1(x)
psi(x)
sage: psi1(x).derivative(x)
psi(1, x)
::
sage: psi1(3)
-euler_gamma + 3/2
::
sage: psi(.5)
-1.96351002602142
sage: psi(RealField(100)(.5))
-1.9635100260214234794409763330
TESTS::
sage: latex(psi1(x))
\psi\left(x\right)
sage: loads(dumps(psi1(x)+1))
psi(x) + 1
sage: t = psi1(x); t
psi(x)
sage: t.subs(x=.2)
-5.28903989659219
sage: psi(x)._sympy_()
polygamma(0, x)
"""
GinacFunction.__init__(self, "psi", nargs=1, latex_name='\psi',
conversions=dict(mathematica='PolyGamma',
maxima='psi[0]',
sympy='digamma'))
class Function_psi2(GinacFunction):
def __init__(self):
r"""
Derivatives of the digamma function `\psi(x)`. T
EXAMPLES::
sage: from sage.functions.gamma import psi2
sage: psi2(2, x)
psi(2, x)
sage: psi2(2, x).derivative(x)
psi(3, x)
sage: n = var('n')
sage: psi2(n, x).derivative(x)
psi(n + 1, x)
::
sage: psi2(0, x)
psi(x)
sage: psi2(-1, x)
log(gamma(x))
sage: psi2(3, 1)
1/15*pi^4
::
sage: psi2(2, .5).n()
-16.8287966442343
sage: psi2(2, .5).n(100)
-16.828796644234319995596334261
TESTS::
sage: psi2(n, x).derivative(n)
Traceback (most recent call last):
...
RuntimeError: cannot diff psi(n,x) with respect to n
sage: latex(psi2(2,x))
\psi\left(2, x\right)
sage: loads(dumps(psi2(2,x)+1))
psi(2, x) + 1
sage: psi(2, x)._sympy_()
polygamma(2, x)
"""
GinacFunction.__init__(self, "psi", nargs=2, latex_name='\psi',
conversions=dict(mathematica='PolyGamma',
sympy='polygamma',
giac='Psi'))
def _maxima_init_evaled_(self, *args):
"""
EXAMPLES:
These are indirect doctests for this function.::
sage: from sage.functions.gamma import psi2
sage: psi2(2, x)._maxima_()
psi[2](_SAGE_VAR_x)
sage: psi2(4, x)._maxima_()
psi[4](_SAGE_VAR_x)
"""
args_maxima = []
for a in args:
if isinstance(a, str):
args_maxima.append(a)
elif hasattr(a, '_maxima_init_'):
args_maxima.append(a._maxima_init_())
else:
args_maxima.append(str(a))
n, x = args_maxima
return "psi[%s](%s)"%(n, x)
psi1 = Function_psi1()
psi2 = Function_psi2()
def psi(x, *args, **kwds):
r"""
The digamma function, `\psi(x)`, is the logarithmic derivative of the
gamma function.
.. MATH::
\psi(x) = \frac{d}{dx} \log(\Gamma(x)) = \frac{\Gamma'(x)}{\Gamma(x)}
We represent the `n`-th derivative of the digamma function with
`\psi(n, x)` or `psi(n, x)`.
EXAMPLES::
sage: psi(x)
psi(x)
sage: psi(.5)
-1.96351002602142
sage: psi(3)
-euler_gamma + 3/2
sage: psi(1, 5)
1/6*pi^2 - 205/144
sage: psi(1, x)
psi(1, x)
sage: psi(1, x).derivative(x)
psi(2, x)
::
sage: psi(3, hold=True)
psi(3)
sage: psi(1, 5, hold=True)
psi(1, 5)
TESTS::
sage: psi(2, x, 3)
Traceback (most recent call last):
...
TypeError: Symbolic function psi takes at most 2 arguments (3 given)
"""
if not args:
return psi1(x, **kwds)
if len(args) > 1:
raise TypeError("Symbolic function psi takes at most 2 arguments (%s given)"%(len(args)+1))
return psi2(x,args[0],**kwds)
# We have to add the wrapper function manually to the symbol_table when we have
# two functions with different number of arguments and the same name
symbol_table['functions']['psi'] = psi
def _swap_psi(a, b): return psi(b, a)
register_symbol(_swap_psi, {'giac':'Psi'})
class Function_beta(GinacFunction):
def __init__(self):
r"""
Return the beta function. This is defined by
.. MATH::
\operatorname{B}(p,q) = \int_0^1 t^{p-1}(1-t)^{q-1} dt
for complex or symbolic input `p` and `q`.
Note that the order of inputs does not matter:
`\operatorname{B}(p,q)=\operatorname{B}(q,p)`.
GiNaC is used to compute `\operatorname{B}(p,q)`. However, complex inputs
are not yet handled in general. When GiNaC raises an error on
such inputs, we raise a NotImplementedError.
If either input is 1, GiNaC returns the reciprocal of the
other. In other cases, GiNaC uses one of the following
formulas:
.. MATH::
\operatorname{B}(p,q) = \frac{\Gamma(p)\Gamma(q)}{\Gamma(p+q)}
or
.. MATH::
\operatorname{B}(p,q) = (-1)^q \operatorname{B}(1-p-q, q).
For numerical inputs, GiNaC uses the formula
.. MATH::
\operatorname{B}(p,q) = \exp[\log\Gamma(p)+\log\Gamma(q)-\log\Gamma(p+q)]
INPUT:
- ``p`` - number or symbolic expression
- ``q`` - number or symbolic expression
OUTPUT: number or symbolic expression (if input is symbolic)
EXAMPLES::
sage: beta(3,2)
1/12
sage: beta(3,1)
1/3
sage: beta(1/2,1/2)
beta(1/2, 1/2)
sage: beta(-1,1)
-1
sage: beta(-1/2,-1/2)
0
sage: ex = beta(x/2,3)
sage: set(ex.operands()) == set([1/2*x, 3])
True
sage: beta(.5,.5)
3.14159265358979
sage: beta(1,2.0+I)
0.400000000000000 - 0.200000000000000*I
sage: ex = beta(3,x+I)
sage: set(ex.operands()) == set([x+I, 3])
True
The result is symbolic if exact input is given::
sage: ex = beta(2,1+5*I); ex
beta(...
sage: set(ex.operands()) == set([1+5*I, 2])
True
sage: beta(2, 2.)
0.166666666666667
sage: beta(I, 2.)
-0.500000000000000 - 0.500000000000000*I
sage: beta(2., 2)
0.166666666666667
sage: beta(2., I)
-0.500000000000000 - 0.500000000000000*I
sage: beta(x, x)._sympy_()
beta(x, x)
Test pickling::
sage: loads(dumps(beta))
beta
Check that :trac:`15196` is fixed::
sage: beta(-1.3,-0.4)
-4.92909641669610
"""
GinacFunction.__init__(self, 'beta', nargs=2,
latex_name=r"\operatorname{B}",
conversions=dict(maxima='beta',
mathematica='Beta',
sympy='beta',
fricas='Beta',
giac='Beta'))
beta = Function_beta()
|
py | b4167ecd28581a0c081b5a726254a6b85d90b23f | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import ctypes
import os
import sys
# ============= local library imports ==========================
TOUPCAM_EVENT_EXPOSURE = 1 # exposure time changed
TOUPCAM_EVENT_TEMPTINT = 2 # white balance changed
TOUPCAM_EVENT_CHROME = 3 # reversed, do not use it
TOUPCAM_EVENT_IMAGE = 4 # live image arrived, use Toupcam_PullImage to get this image
TOUPCAM_EVENT_STILLIMAGE = 5 # snap (still) frame arrived, use Toupcam_PullStillImage to get this frame
TOUPCAM_EVENT_ERROR = 80 # something error happens
TOUPCAM_EVENT_DISCONNECTED = 81 # camera disconnected
root = os.path.dirname(__file__)
if sys.platform == 'darwin':
lib = ctypes.cdll.LoadLibrary(os.path.join(root, 'osx', 'libtoupcam.dylib'))
else:
directory = 'x64' if sys.maxsize > 2 ** 32 else 'x86'
# ext = 'lib' if sys.platform.startswith('linux') else 'dll'
if sys.platform.startswith('linux'):
name = 'libtoupcam.so'
lib = ctypes.cdll.LoadLibrary(os.path.join(root, directory, name))
else:
name = 'toupcam.dll'
lib = ctypes.windll.LoadLibrary(os.path.join(root, directory, name))
class HToupCam(ctypes.Structure):
_fields_ = [('unused', ctypes.c_int)]
def success(r):
"""
return true if r==0
:param r:
:return:
"""
return r == 0
# ============= EOF =============================================
|
py | b4167ef92c297fac8813a025c39fad050ede1f40 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import exceptions
from telemetry.testing import tab_test_case
import py_utils
class InspectorRuntimeTest(tab_test_case.TabTestCase):
def testRuntimeEvaluateSimple(self):
res = self._tab.EvaluateJavaScript('1+1')
assert res == 2
def testRuntimeEvaluateThatFails(self):
with self.assertRaises(exceptions.EvaluateException) as ex_context:
self._tab.EvaluateJavaScript('var x = 1;\nfsdfsdfsf')
exception_message = str(ex_context.exception)
self.assertIn('ReferenceError: fsdfsdfsf is not defined', exception_message)
def testRuntimeEvaluateOfSomethingThatCantJSONize(self):
def Test():
self._tab.EvaluateJavaScript("""
var cur = {};
var root = {next: cur};
for (var i = 0; i < 1000; i++) {
next = {};
cur.next = next;
cur = next;
}
root;""")
self.assertRaises(exceptions.EvaluateException, Test)
def testRuntimeExecuteOfSomethingThatCantJSONize(self):
self._tab.ExecuteJavaScript('window')
def testPromise(self):
promiseCommand = """
var promise1 = Promise.resolve(123);
promise1.then(function(value) {
return(value);
// expected output: 123
});"""
withPromise = self._tab.EvaluateJavaScript(promiseCommand, promise=True)
withoutPromise = self._tab.EvaluateJavaScript(promiseCommand, promise=False)
self.assertEqual(withPromise, 123)
self.assertEqual(withoutPromise, {})
noPromiseCommand = """
function test(){
return 456;}
test();"""
# A non promise function should work like normal, even when promise is True.
noPromise = self._tab.EvaluateJavaScript(noPromiseCommand, promise=True)
self.assertEqual(noPromise, 456)
def testIFrame(self):
self.Navigate('host.html')
# Access host page.
test_defined_js = "typeof(testVar) != 'undefined'"
self._tab.WaitForJavaScriptCondition(test_defined_js, timeout=10)
py_utils.WaitFor(
lambda: len(self._tab.EnableAllContexts()) >= 4, timeout=10)
self.assertEquals(self._tab.EvaluateJavaScript('testVar'), 'host')
def TestVarReady(context_id):
"""Returns True if the context and testVar are both ready."""
try:
return self._tab.EvaluateJavaScript(
test_defined_js, context_id=context_id)
except exceptions.EvaluateException:
# This happens when the context is not ready.
return False
def TestVar(context_id):
"""Waits for testVar and the context to be ready, then returns the value
of testVar."""
py_utils.WaitFor(lambda: TestVarReady(context_id), timeout=10)
return self._tab.EvaluateJavaScript('testVar', context_id=context_id)
all_contexts_list = list(self._tab.EnableAllContexts())
all_contexts_list.sort()
# Access parent page using EvaluateJavaScriptInContext.
host_context = all_contexts_list[0]
self.assertEquals(TestVar(host_context), 'host')
# Access the iframes without guarantees on which order they loaded.
iframe1 = TestVar(context_id=all_contexts_list[1])
iframe2 = TestVar(context_id=all_contexts_list[2])
iframe3 = TestVar(context_id=all_contexts_list[3])
self.assertEqual(set([iframe1, iframe2, iframe3]),
set(['iframe1', 'iframe2', 'iframe3']))
# Accessing a non-existent iframe throws an exception.
self.assertRaises(
exceptions.EvaluateException,
lambda: self._tab.EvaluateJavaScript(
'1+1', context_id=all_contexts_list[-1] + 1))
|
py | b4167f0ebf4ed3d4964c109acd1cd226c0f2fb1e | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import http.client as http_client
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import DEFAULT_EXECUTEABLE_PATH, Service
class WebDriver(RemoteWebDriver):
"""
Controls the WPEWebKitDriver and allows you to drive the browser.
"""
def __init__(self, executable_path=DEFAULT_EXECUTEABLE_PATH,
port=0, options=None,
desired_capabilities=DesiredCapabilities.WPEWEBKIT,
service_log_path=None):
"""
Creates a new instance of the WPEWebKit driver.
Starts the service and then creates new instance of WPEWebKit Driver.
:Args:
- executable_path : path to the executable. If the default is used it assumes the executable is in the $PATH.
- port : port you would like the service to run, if left as 0, a free port will be found.
- options : an instance of WPEWebKitOptions
- desired_capabilities : Dictionary object with desired capabilities
- service_log_path : Path to write service stdout and stderr output.
"""
if options:
capabilities = options.to_capabilities()
capabilities.update(desired_capabilities)
desired_capabilities = capabilities
self.service = Service(executable_path, port=port, log_path=service_log_path)
self.service.start()
RemoteWebDriver.__init__(
self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the WPEWebKitDriver executable
that is started when starting the WPEWebKitDriver
"""
try:
RemoteWebDriver.quit(self)
except http_client.BadStatusLine:
pass
finally:
self.service.stop()
|
py | b4167f13bd01a1b0e3db3494f276605f29ff4a46 | '''
Name: motionDetection
Author: Infineon Technologies AG
Copyright: 2021 Infineon Technologies AG
Description: This example demonstrates how to detect a moving object while the shield is
connected to Raspberry Pi using polling method. Press CTRL+C to end this example.
Connection details:
-----------------------------------------------------
Pin on shield Connected to pin on Raspberry Pi 4B
-----------------------------------------------------
TD WiringPi 15 (header 8)
PD WiringPi 16 (header 10)
GND GND (e.g. header 6)
Vin 3.3V (e.g. header 1)
-----------------------------------------------------
Decoding on-board LED output of BGT60LTR11AIP shield:
- Red LED indicates the output of direction of motion once target is detected (PD)
---------------------------------------------
LED State Output explanation
---------------------------------------------
Red ON Departing target
OFF Approaching target
---------------------------------------------
- Green LED indicates the output of target in motion detection (TD)
---------------------------------------------
LED State Output explanation
---------------------------------------------
Green ON Moving target detected
OFF No target detected
---------------------------------------------
SPDX-License-Identifier: MIT
'''
import bgt60_py as bgt60
from time import sleep
# Define GPIO pins that will be connected to the shield
TD = 15
PD = 16
# Create radarShield object with following arguments:
# TD : Target Detect Pin
# PD : Phase Detect Pin
radarShield = bgt60.Bgt60Rpi(TD, PD)
# Configures the GPIO pins to input mode
init_status = radarShield.init()
# Check if the initialization was successful
if radarShield.OK != init_status:
print("Initialization failed!")
else:
print("Intialization successful!")
# Initialize the variable to radarShield.NO_MOTION to be able to record new events
motion = radarShield.NO_MOTION
while True:
# The getMotion() API does two things:
## 1. Returns the success or failure to detect moving object as a message of type Error_t.
## Any value other than OK indicates failure
## 2. Sets recent event in "motion" variable. Events can be: NO_MOTION or MOTION
err = radarShield.getMotion(motion)
# Check if API execution is successful
if err == radarShield.OK:
# Variable "motion" is set to radarShield.MOTION when moving target is detected
if motion == radarShield.MOTION:
print("Target in motion detected!")
# Variable "motion" is set to radarShield.NO_MOTION when moving target is not present
elif motion == radarShield.NO_MOTION:
print("No target in motion detected.")
# API execution returned error
else:
print("Error occured!")
# Wait 0.5 second
sleep(0.5)
|
py | b4167f1ee76befee5e08296ed55d9ceb66e8be28 | ''' Byteorder utilities for system - numpy byteorder encoding
Converts a variety of string codes for little endian, big endian,
native byte order and swapped byte order to explicit numpy endian
codes - one of '<' (little endian) or '>' (big endian)
'''
from __future__ import division, print_function, absolute_import
import sys
sys_is_le = sys.byteorder == 'little'
native_code = sys_is_le and '<' or '>'
swapped_code = sys_is_le and '>' or '<'
aliases = {'little': ('little', '<', 'l', 'le'),
'big': ('big', '>', 'b', 'be'),
'native': ('native', '='),
'swapped': ('swapped', 'S')}
def to_numpy_code(code):
"""
Convert various order codings to numpy format.
Parameters
----------
code : str
The code to convert. It is converted to lower case before parsing.
Legal values are:
'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=',
'swapped', 's'.
Returns
-------
out_code : {'<', '>'}
Here '<' is the numpy dtype code for little endian,
and '>' is the code for big endian.
Examples
--------
>>> import sys
>>> sys_is_le == (sys.byteorder == 'little')
True
>>> to_numpy_code('big')
'>'
>>> to_numpy_code('little')
'<'
>>> nc = to_numpy_code('native')
>>> nc == '<' if sys_is_le else nc == '>'
True
>>> sc = to_numpy_code('swapped')
>>> sc == '>' if sys_is_le else sc == '<'
True
"""
code = code.lower()
if code is None:
return native_code
if code in aliases['little']:
return '<'
elif code in aliases['big']:
return '>'
elif code in aliases['native']:
return native_code
elif code in aliases['swapped']:
return swapped_code
else:
raise ValueError(
'We cannot handle byte order %s' % code)
|
py | b4167fa45d18203264eb9d5d355501d14022948f | import json
import os
from pprint import pprint
from . import CASES, EVENTS, FAILED_EVENTS
HERE = os.path.dirname(os.path.abspath(__file__))
PARENT = os.path.dirname(HERE)
DATA_DIR = os.path.join(PARENT, 'pipelinejob')
def get_jobs():
for filename, uuid, valid in CASES:
data = json.load(open(os.path.join(DATA_DIR, filename), 'r'))
test_struct = {'data': data, 'uuid': uuid, 'valid': valid}
yield test_struct
def get_events():
for filename, uuid, valid in EVENTS:
data = json.load(open(os.path.join(DATA_DIR, filename), 'r'))
test_struct = {'data': data, 'uuid': uuid, 'valid': valid}
yield test_struct
def get_events_wrong_uuid():
for filename, uuid, valid in FAILED_EVENTS:
data = json.load(open(os.path.join(DATA_DIR, filename), 'r'))
test_struct = {'data': data, 'uuid': uuid, 'valid': valid}
yield test_struct
|
py | b41680a2ad5b0cf65abc787a0b41410ba1b9b520 | """Download sticker packs from Telegram
"""
import argparse
import os
from pathlib import Path
from sys import exit as sysexit
from .downloader import StickerDownloader
def cli():
"""cli entry point"""
parser = argparse.ArgumentParser("Welcome to TStickers, providing all of your sticker needs")
parser.add_argument(
"-t",
"--token",
help="Pass in a bot token inline",
)
parser.add_argument(
"-p",
"--pack",
action="append",
nargs="+",
help="Pass in a pack url inline",
)
parser.add_argument(
"--frameskip",
default=1,
type=int,
help="Set frameskip. default=1",
)
parser.add_argument(
"--scale",
default=1,
type=float,
help="Set scale. default=1.0",
)
args = parser.parse_args()
# Get the token
token = args.token
if args.token is None:
token = ""
for candidate in [Path(os.getcwd() + "/env.txt"), Path(os.getcwd() + "/env")]:
if candidate.exists():
token = candidate.read_text(encoding="utf-8").strip()
if not token:
print(
'!! Generate a bot token and paste in a file called "env". Send a '
+ "message to @BotFather to get started"
)
sysexit(1)
# Get the packs
packs = sum(args.pack, [])
if packs is None:
packs = []
while True:
name = input("Enter sticker_set url (leave blank to stop): ").strip()
if name == "":
break
packs.append(name)
packs = [name.split("/")[-1] for name in packs]
downloader = StickerDownloader(token)
for pack in packs:
print("=" * 60)
stickerPack = downloader.getPack(pack)
if stickerPack is None:
continue
print("-" * 60)
_ = downloader.downloadPack(stickerPack)
print("-" * 60)
downloader.convertPack(pack, args.frameskip, args.scale)
if __name__ == "__main__":
cli()
|
py | b41680d98b2de2b90080000def2bce2ccce2b2dd | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'seeker'
GOOGLE_APPLICATION_CREDENTIALS=os.environ.get("SECRET_KEY")
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False |
py | b41682e34338622c8bd6ee30920a04b74712e42e | import datetime
import hashlib
from typing import List
from unittest.mock import patch
import pytz
from constance.test import override_config
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.utils import timezone
from freezegun import freeze_time
from posthog.email import EmailMessage, _send_email
from posthog.models import Event, MessagingRecord, Organization, Person, Team, User
from posthog.tasks.email import send_weekly_email_reports
from posthog.test.base import BaseTest
class TestEmail(BaseTest):
def create_person(self, team: Team, base_distinct_id: str = "") -> Person:
person = Person.objects.create(team=team)
person.add_distinct_id(base_distinct_id)
return person
@freeze_time("2020-09-21")
def setUp(self):
super().setUp()
self.organization = Organization.objects.create()
self.team = Team.objects.create(organization=self.organization, name="The Bakery")
self.user = User.objects.create(email="[email protected]")
self.user2 = User.objects.create(email="[email protected]")
self.user_red_herring = User.objects.create(email="[email protected]")
self.organization.members.add(self.user)
self.organization.members.add(self.user2)
self.organization.members.add(self.user_red_herring)
MessagingRecord.objects.get_or_create(
raw_email="[email protected]",
campaign_key=f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
defaults={"sent_at": timezone.now()},
) # This user should not get the emails
last_week = datetime.datetime(2020, 9, 17, 3, 22, tzinfo=pytz.UTC)
two_weeks_ago = datetime.datetime(2020, 9, 8, 19, 54, tzinfo=pytz.UTC)
self.persons: List = [self.create_person(self.team, str(i)) for i in range(0, 7)]
# Resurrected
self.persons[0].created_at = timezone.now() - datetime.timedelta(weeks=3)
self.persons[0].save()
self.persons[1].created_at = timezone.now() - datetime.timedelta(weeks=4)
self.persons[1].save()
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=0)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=1)
# Retained
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=2)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=2)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=3)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=3)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=4)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=4)
# New
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
# Churned
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=6)
def test_cant_send_emails_if_not_properly_configured(self) -> None:
with override_config(EMAIL_HOST=None):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("test_campaign", "Subject", "template")
self.assertEqual(
str(e.exception), "Email is not enabled in this instance.",
)
with override_config(EMAIL_ENABLED=False):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("test_campaign", "Subject", "template")
self.assertEqual(
str(e.exception), "Email is not enabled in this instance.",
)
@override_config(EMAIL_HOST="localhost")
def test_cant_send_same_campaign_twice(self) -> None:
sent_at = timezone.now()
record, _ = MessagingRecord.objects.get_or_create(raw_email="[email protected]", campaign_key="campaign_1")
record.sent_at = sent_at
record.save()
with self.settings(CELERY_TASK_ALWAYS_EAGER=True):
_send_email(
campaign_key="campaign_1",
to=[{"raw_email": "[email protected]", "recipient": "Test Posthog <[email protected]>"}],
subject="Test email",
headers={},
)
self.assertEqual(len(mail.outbox), 0)
record.refresh_from_db()
self.assertEqual(record.sent_at, sent_at)
@freeze_time("2020-09-21")
@override_config(EMAIL_HOST="localhost")
def test_weekly_email_report(self) -> None:
record_count: int = MessagingRecord.objects.count()
expected_recipients: List[str] = ["[email protected]", "[email protected]"]
with self.settings(CELERY_TASK_ALWAYS_EAGER=True, SITE_URL="http://localhost:9999"):
send_weekly_email_reports()
self.assertSetEqual({",".join(outmail.to) for outmail in mail.outbox}, set(expected_recipients))
self.assertEqual(
mail.outbox[0].subject, "PostHog weekly report for Sep 14, 2020 to Sep 20",
)
self.assertEqual(
mail.outbox[0].body, "",
) # no plain-text version support yet
html_message = mail.outbox[0].alternatives[0][0] # type: ignore
self.validate_basic_html(
html_message,
"http://localhost:9999",
preheader="Your PostHog weekly report is ready! Your team had 6 active users last week! 🎉",
)
# Ensure records are properly saved to prevent duplicate emails
self.assertEqual(MessagingRecord.objects.count(), record_count + 2)
for email in expected_recipients:
email_hash = hashlib.sha256(f"{settings.SECRET_KEY}_{email}".encode()).hexdigest()
record = MessagingRecord.objects.get(
email_hash=email_hash, campaign_key=f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
)
self.assertTrue((timezone.now() - record.sent_at).total_seconds() < 5)
@patch("posthog.tasks.email.EmailMessage")
@override_config(EMAIL_HOST="localhost")
@freeze_time("2020-09-21")
def test_weekly_email_report_content(self, mock_email_message):
with self.settings(CELERY_TASK_ALWAYS_EAGER=True):
send_weekly_email_reports()
self.assertEqual(
mock_email_message.call_args[1]["campaign_key"], f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
) # Campaign key
self.assertEqual(
mock_email_message.call_args[1]["subject"], "PostHog weekly report for Sep 14, 2020 to Sep 20",
) # Email subject
self.assertEqual(mock_email_message.call_args[1]["template_name"], "weekly_report")
template_context = mock_email_message.call_args[1]["template_context"]
self.assertEqual(template_context["team"], "The Bakery")
self.assertEqual(
template_context["period_start"], datetime.datetime(2020, 9, 14, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["period_end"], datetime.datetime(2020, 9, 20, 23, 59, 59, 999999, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["active_users"], 6,
)
self.assertEqual(
template_context["active_users_delta"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["new"], 2), 0.17,
)
self.assertEqual(
template_context["user_distribution"]["retained"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["resurrected"], 2), 0.33,
)
self.assertEqual(
template_context["churned_users"], {"abs": 1, "ratio": 0.25, "delta": None},
)
|
py | b4168323ea989e0bd413d5965d394132c4ce1baf | #!/usr/bin/env python
"""
weekly_summary_report.py [<log_file>]
Generates weekly summary report and prints it through stdout.
If <log_file> is specified:
[1] a copy of the report is written to <log_file>, and
[2] <log_file> will be "enrcp-ed" to
*srv2:/diska/tape-inventory/WEEKLY_SUMMARY, and
[3] the content of <log_file> will be mailed to $ENSTORE_MAIL
"""
import pg
import time
import option
import configuration_client
import sys
import os
# mailing adress, if it is needed
mail_address = os.environ.get("ENSTORE_MAIL", "[email protected]")
# get a configuration client
intf = option.Interface()
csc = configuration_client.ConfigurationClient((intf.config_host, intf.config_port))
# get db hosts, ports, and names for enstore db and accounting db
database = csc.get('database')
accounting_server = csc.get('accounting_server')
# use volume_clerk['host'] to determine the system
volume_clerk = csc.get('volume_clerk')
# get inventory rcp directory
inventory = csc.get('inventory')
# connections to the databases
enstoredb = pg.DB(host=database['db_host'], port=database['db_port'], user=database['dbuser'],dbname=database['dbname'])
accdb = pg.DB(host=accounting_server['dbhost'], port=accounting_server.get('dbport', 5432), user=accounting_server['dbuser'],dbname=accounting_server['dbname'])
def eprint(ff, s):
if ff:
print >> ff, s
print s
if len(sys.argv) > 1:
f = open(sys.argv[1], 'w')
else:
f = None
if volume_clerk['host'][:3] == "d0e":
system = "D0"
else:
system = volume_clerk['host'][:3].upper()
# add [email protected] to CDF and D0 report
# add [email protected] to all reports
if system == 'D0' or system == 'CDF':
mail_address = mail_address + ' [email protected]'
if system == 'CDF':
mail_address = mail_address + ' [email protected]'
mail_address = mail_address + ' [email protected]'
eprint(f, "This report is generated at %s for %s system"%(
time.ctime(time.time()), system))
# calculate the reporting period
t = time.localtime(time.time())
t1 = time.mktime((t[0], t[1], t[2], 0, 0, 0, 0, 0, 0))
t2 = t1 - 60*60*24*7
eprint(f, "Reporting period: %s -- %s\n\n"%(
time.ctime(t2), time.ctime(t1-1)))
eprint(f, "=======================")
eprint(f, "Transfer in last 7 days")
eprint(f, "=======================")
eprint(f, accdb.query("select * from data_transfer_last_7days() order by storage_group;"))
eprint(f, "=============================")
eprint(f, "Tapes recycled in last 7 days")
eprint(f, "=============================")
eprint(f, enstoredb.query("select * from tapes_recycled_last_7days() order by media_type;"))
eprint(f, "=============================")
eprint(f, "Bytes recycled in last 7 days")
eprint(f, "=============================")
eprint(f, enstoredb.query("select * from bytes_deleted_last_7days() order by storage_group;"))
eprint(f, "================")
eprint(f, "Remaining blanks")
eprint(f, "================")
eprint(f, enstoredb.query("select * from remaining_blanks order by media_type;"))
eprint(f, "===========================")
eprint(f, "Blanks drawn in last 7 days")
eprint(f, "===========================")
eprint(f, accdb.query("select * from blanks_drawn_last_7days() order by media_type;"))
# if there is a file, copy it to *srv2:/diska/tape_intventory
if f:
f.close()
# copy it to *srv2
cmd = "enrcp %s %s"%(
sys.argv[1],
os.path.join(inventory['inventory_rcp_dir'], "WEEKLY_SUMMARY"))
print cmd
try:
os.system(cmd)
except:
print "Error: Can not", cmd
sys.exit(1)
# mail it out to $ENSTORE_MAIL
cmd = 'mail -s "Weekly Summary Report for %s System" %s < %s'%(
system, mail_address, sys.argv[1])
print cmd
try:
os.system(cmd)
except:
print "Error: Can not", cmd
sys.exit(1)
|
py | b41684520f2cecf85ecb517bad11eae72f9e3e73 | # -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class Reggie(object):
""" Enables Flask Regex Routes """
def __init__(self, app=None):
self.app = app
if self.app:
self.init_app(self.app)
def init_app(self, app):
""" Configures the Regex Converter """
app.url_map.converters['regex'] = RegexConverter
|
py | b41684541ae067b4ddf6669a92e94380e7435f72 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for nova management.
"""
from __future__ import print_function
import argparse
import decorator
import os
import sys
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
from nova.api.ec2 import ec2utils
from nova import availability_zones
from nova.compute import flavors
from nova import config
from nova import context
from nova import db
from nova.db import migration
from nova import exception
from nova import objects
from nova.openstack.common import cliutils
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('flat_network_bridge', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('multi_host', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('vlan_start', 'nova.network.manager')
CONF.import_opt('vpn_start', 'nova.network.manager')
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
QUOTAS = quota.QUOTAS
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def param2id(object_id):
"""Helper function to convert various volume id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
return ec2utils.ec2_vol_id_to_uuid(object_id)
else:
return object_id
class VpnCommands(object):
"""Class for managing VPNs."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--ip', metavar='<IP Address>', help='IP Address')
@args('--port', metavar='<Port>', help='Port')
def change(self, project_id, ip, port):
"""Change the ip and port for a vpn.
this will update all networks associated with a project
not sure if that's the desired behavior or not, patches accepted
"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_update(admin_context,
network['id'],
{'vpn_public_address': ip,
'vpn_public_port': int(port)})
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', metavar='<bpython|ipython|python >',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
import IPython
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', metavar='<path>', help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print(caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'nova-manage db sync' before running this command."))
exit(1)
class ProjectCommands(object):
"""Class for managing projects."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--user', dest='user_id', metavar='<User name>',
help='User name')
@args('--key', metavar='<key>', help='Key')
@args('--value', metavar='<value>', help='Value')
def quota(self, project_id, user_id=None, key=None, value=None):
"""Create, update or display quotas for project/user
If no quota key is provided, the quota will be displayed.
If a valid quota key is provided and it does not exist,
it will be created. Otherwise, it will be updated.
"""
ctxt = context.get_admin_context()
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
user_id = None
quota = QUOTAS.get_project_quotas(ctxt, project_id)
# if key is None, that means we need to show the quotas instead
# of updating them
if key:
settable_quotas = QUOTAS.get_settable_quotas(ctxt,
project_id,
user_id=user_id)
if key in quota:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
if value.lower() == 'unlimited':
value = -1
if int(value) < -1:
print(_('Quota limit must be -1 or greater.'))
return(2)
if ((int(value) < minimum) and
(maximum != -1 or (maximum == -1 and int(value) != -1))):
print(_('Quota limit must be greater than %s.') % minimum)
return(2)
if maximum != -1 and int(value) > maximum:
print(_('Quota limit must be less than %s.') % maximum)
return(2)
try:
db.quota_create(ctxt, project_id, key, value,
user_id=user_id)
except exception.QuotaExists:
db.quota_update(ctxt, project_id, key, value,
user_id=user_id)
else:
print(_('%(key)s is not a valid quota key. Valid options are: '
'%(options)s.') % {'key': key,
'options': ', '.join(quota)})
return(2)
print_format = "%-36s %-10s %-10s %-10s"
print(print_format % (
_('Quota'),
_('Limit'),
_('In Use'),
_('Reserved')))
# Retrieve the quota after update
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
quota = QUOTAS.get_project_quotas(ctxt, project_id)
for key, value in quota.iteritems():
if value['limit'] < 0 or value['limit'] is None:
value['limit'] = 'unlimited'
print(print_format % (key, value['limit'], value['in_use'],
value['reserved']))
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
def scrub(self, project_id):
"""Deletes data associated with project."""
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_disassociate(admin_context, network['id'])
groups = db.security_group_get_by_project(admin_context, project_id)
for group in groups:
db.security_group_destroy(admin_context, group['id'])
AccountCommands = ProjectCommands
class FixedIpCommands(object):
"""Class for managing fixed ip."""
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all fixed ips (optionally by host)."""
ctxt = context.get_admin_context()
try:
if host is None:
fixed_ips = db.fixed_ip_get_all(ctxt)
else:
fixed_ips = db.fixed_ip_get_by_host(ctxt, host)
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
instances = db.instance_get_all(context.get_admin_context())
instances_by_uuid = {}
for instance in instances:
instances_by_uuid[instance['uuid']] = instance
print("%-18s\t%-15s\t%-15s\t%s" % (_('network'),
_('IP address'),
_('hostname'),
_('host')))
all_networks = {}
try:
# use network_get_all to retrieve all existing networks
# this is to ensure that IPs associated with deleted networks
# will not throw exceptions.
for network in db.network_get_all(context.get_admin_context()):
all_networks[network.id] = network
except exception.NoNetworksFound:
# do not have any networks, so even if there are IPs, these
# IPs should have been deleted ones, so return.
print(_('No fixed IP found.'))
return
has_ip = False
for fixed_ip in fixed_ips:
hostname = None
host = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
if fixed_ip.get('instance_uuid'):
instance = instances_by_uuid.get(fixed_ip['instance_uuid'])
if instance:
hostname = instance['hostname']
host = instance['host']
else:
print(_('WARNING: fixed ip %s allocated to missing'
' instance') % str(fixed_ip['address']))
print("%-18s\t%-15s\t%-15s\t%s" % (
network['cidr'],
fixed_ip['address'],
hostname, host))
if not has_ip:
print(_('No fixed IP found.'))
@args('--address', metavar='<ip address>', help='IP address')
def reserve(self, address):
"""Mark fixed ip as reserved
arguments: address
"""
return self._set_reserved(address, True)
@args('--address', metavar='<ip address>', help='IP address')
def unreserve(self, address):
"""Mark fixed ip as free to use
arguments: address
"""
return self._set_reserved(address, False)
def _set_reserved(self, address, reserved):
ctxt = context.get_admin_context()
try:
fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
if fixed_ip is None:
raise exception.NotFound('Could not find address')
db.fixed_ip_update(ctxt, fixed_ip['address'],
{'reserved': reserved})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
class FloatingIpCommands(object):
"""Class for managing floating ip."""
@staticmethod
def address_to_hosts(addresses):
"""Iterate over hosts within an address range.
If an explicit range specifier is missing, the parameter is
interpreted as a specific individual address.
"""
try:
return [netaddr.IPAddress(addresses)]
except ValueError:
net = netaddr.IPNetwork(addresses)
if net.size < 4:
reason = _("/%s should be specified as single address(es) "
"not in cidr format") % net.prefixlen
raise exception.InvalidInput(reason=reason)
elif net.size >= 1000000:
# NOTE(dripton): If we generate a million IPs and put them in
# the database, the system will slow to a crawl and/or run
# out of memory and crash. This is clearly a misconfiguration.
reason = _("Too many IP addresses will be generated. Please "
"increase /%s to reduce the number generated."
) % net.prefixlen
raise exception.InvalidInput(reason=reason)
else:
return net.iter_hosts()
@args('--ip_range', metavar='<range>', help='IP range')
@args('--pool', metavar='<pool>', help='Optional pool')
@args('--interface', metavar='<interface>', help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
"""Creates floating ips for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
if not interface:
interface = CONF.public_interface
ips = ({'address': str(address), 'pool': pool, 'interface': interface}
for address in self.address_to_hosts(ip_range))
try:
db.floating_ip_bulk_create(admin_context, ips)
except exception.FloatingIpExists as exc:
# NOTE(simplylizz): Maybe logging would be better here
# instead of printing, but logging isn't used here and I
# don't know why.
print('error: %s' % exc)
return(1)
@args('--ip_range', metavar='<range>', help='IP range')
def delete(self, ip_range):
"""Deletes floating ips by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
for address in self.address_to_hosts(ip_range))
db.floating_ip_bulk_destroy(admin_context, ips)
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all floating ips (optionally by host).
Note: if host is given, only active floating IPs are returned
"""
ctxt = context.get_admin_context()
try:
if host is None:
floating_ips = db.floating_ip_get_all(ctxt)
else:
floating_ips = db.floating_ip_get_all_by_host(ctxt, host)
except exception.NoFloatingIpsDefined:
print(_("No floating IP addresses have been defined."))
return
for floating_ip in floating_ips:
instance_uuid = None
if floating_ip['fixed_ip_id']:
fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id'])
instance_uuid = fixed_ip['instance_uuid']
print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'],
floating_ip['address'],
instance_uuid,
floating_ip['pool'],
floating_ip['interface']))
@decorator.decorator
def validate_network_plugin(f, *args, **kwargs):
"""Decorator to validate the network plugin."""
if utils.is_neutron():
print(_("ERROR: Network commands are not supported when using the "
"Neutron API. Use python-neutronclient instead."))
return(2)
return f(*args, **kwargs)
class NetworkCommands(object):
"""Class for managing networks."""
@validate_network_plugin
@args('--label', metavar='<label>', help='Label for network (ex: public)')
@args('--fixed_range_v4', dest='cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet (ex: 10.0.0.0/8)')
@args('--num_networks', metavar='<number>',
help='Number of networks to create')
@args('--network_size', metavar='<number>',
help='Number of IPs per network')
@args('--vlan', dest='vlan_start', metavar='<vlan id>', help='vlan id')
@args('--vpn', dest='vpn_start', help='vpn start')
@args('--fixed_range_v6', dest='cidr_v6',
help='IPv6 subnet (ex: fe80::/64')
@args('--gateway', help='gateway')
@args('--gateway_v6', help='ipv6 gateway')
@args('--bridge', metavar='<bridge>',
help='VIFs on this network are connected to this bridge')
@args('--bridge_interface', metavar='<bridge interface>',
help='the bridge is connected to this interface')
@args('--multi_host', metavar="<'T'|'F'>",
help='Multi host')
@args('--dns1', metavar="<DNS Address>", help='First DNS')
@args('--dns2', metavar="<DNS Address>", help='Second DNS')
@args('--uuid', metavar="<network uuid>", help='Network UUID')
@args('--fixed_cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet for fixed IPS (ex: 10.20.0.0/16)')
@args('--project_id', metavar="<project id>",
help='Project id')
@args('--priority', metavar="<number>", help='Network interface priority')
def create(self, label=None, cidr=None, num_networks=None,
network_size=None, multi_host=None, vlan_start=None,
vpn_start=None, cidr_v6=None, gateway=None,
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
"""Creates fixed ips for host by range."""
kwargs = dict(((k, v) for k, v in locals().iteritems()
if v and k != "self"))
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
net_manager = importutils.import_object(CONF.network_manager)
net_manager.create_networks(context.get_admin_context(), **kwargs)
@validate_network_plugin
def list(self):
"""List all created networks."""
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print(_fmt % (_('id'),
_('IPv4'),
_('IPv6'),
_('start address'),
_('DNS1'),
_('DNS2'),
_('VlanID'),
_('project'),
_("uuid")))
try:
# Since network_get_all can throw exception.NoNetworksFound
# for this command to show a nice result, this exception
# should be caught and handled as such.
networks = db.network_get_all(context.get_admin_context())
except exception.NoNetworksFound:
print(_('No networks found'))
else:
for network in networks:
print(_fmt % (network.id,
network.cidr,
network.cidr_v6,
network.dhcp_start,
network.dns1,
network.dns2,
network.vlan,
network.project_id,
network.uuid))
@validate_network_plugin
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to delete')
@args('--uuid', metavar='<uuid>', help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
"""Deletes a network."""
if fixed_range is None and uuid is None:
raise Exception(_("Please specify either fixed_range or uuid"))
net_manager = importutils.import_object(CONF.network_manager)
if "NeutronManager" in CONF.network_manager:
if uuid is None:
raise Exception(_("UUID is required to delete "
"Neutron Networks"))
if fixed_range:
raise Exception(_("Deleting by fixed_range is not supported "
"with the NeutronManager"))
# delete the network
net_manager.delete_network(context.get_admin_context(),
fixed_range, uuid)
@validate_network_plugin
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to modify')
@args('--project', metavar='<project name>',
help='Project name to associate')
@args('--host', metavar='<host>', help='Host to associate')
@args('--disassociate-project', action="store_true", dest='dis_project',
default=False, help='Disassociate Network from Project')
@args('--disassociate-host', action="store_true", dest='dis_host',
default=False, help='Disassociate Host from Project')
def modify(self, fixed_range, project=None, host=None,
dis_project=None, dis_host=None):
"""Associate/Disassociate Network with Project and/or Host
arguments: network project host
leave any field blank to ignore it
"""
admin_context = context.get_admin_context()
network = db.network_get_by_cidr(admin_context, fixed_range)
net = {}
#User can choose the following actions each for project and host.
#1) Associate (set not None value given by project/host parameter)
#2) Disassociate (set None by disassociate parameter)
#3) Keep unchanged (project/host key is not added to 'net')
if dis_project:
net['project_id'] = None
if dis_host:
net['host'] = None
# The --disassociate-X are boolean options, but if they user
# mistakenly provides a value, it will be used as a positional argument
# and be erroneously interepreted as some other parameter (e.g.
# a project instead of host value). The safest thing to do is error-out
# with a message indicating that there is probably a problem with
# how the disassociate modifications are being used.
if dis_project or dis_host:
if project or host:
error_msg = "ERROR: Unexpected arguments provided. Please " \
"use separate commands."
print(error_msg)
return(1)
db.network_update(admin_context, network['id'], net)
return
if project:
net['project_id'] = project
if host:
net['host'] = host
db.network_update(admin_context, network['id'], net)
class VmCommands(object):
"""Class for mangaging VM instances."""
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Show a list of all instances."""
print(("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5s" % (_('instance'),
_('node'),
_('type'),
_('state'),
_('launched'),
_('image'),
_('kernel'),
_('ramdisk'),
_('project'),
_('user'),
_('zone'),
_('index'))))
if host is None:
instances = db.instance_get_all(context.get_admin_context())
else:
instances = db.instance_get_all_by_host(
context.get_admin_context(), host)
for instance in instances:
instance_type = flavors.extract_flavor(instance)
print(("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5d" % (instance['display_name'],
instance['host'],
instance_type['name'],
instance['vm_state'],
instance['launched_at'],
instance['image_ref'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
instance['launch_index'])))
class ServiceCommands(object):
"""Enable and disable running services."""
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def list(self, host=None, service=None):
"""Show a list of all running services. Filter by host & service
name
"""
servicegroup_api = servicegroup.API()
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print(print_format % (
_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated_At')))
for svc in services:
alive = servicegroup_api.service_is_up(svc)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print(print_format % (svc['binary'], svc['host'],
svc['availability_zone'], active, art,
svc['updated_at']))
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def enable(self, host, service):
"""Enable scheduling for a service."""
ctxt = context.get_admin_context()
try:
svc = db.service_get_by_args(ctxt, host, service)
db.service_update(ctxt, svc['id'], {'disabled': False})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
print((_("Service %(service)s on host %(host)s enabled.") %
{'service': service, 'host': host}))
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def disable(self, host, service):
"""Disable scheduling for a service."""
ctxt = context.get_admin_context()
try:
svc = db.service_get_by_args(ctxt, host, service)
db.service_update(ctxt, svc['id'], {'disabled': True})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
print((_("Service %(service)s on host %(host)s disabled.") %
{'service': service, 'host': host}))
def _show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns:
example format is below::
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
'vcpus_used': 12, 'memory_mb_used': 10240,
'local_gb_used': 64}
"""
# Getting compute node info and related instances info
service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
service_ref['host'])
# Getting total available/used resource
compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
'vcpus_used': compute_ref['vcpus_used'],
'memory_mb_used': compute_ref['memory_mb_used'],
'local_gb_used': compute_ref['local_gb_used']}
usage = dict()
if not instance_refs:
return {'resource': resource, 'usage': usage}
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
root = [i['root_gb'] for i in instance_refs
if i['project_id'] == project_id]
ephemeral = [i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
usage[project_id] = {'vcpus': sum(vcpus),
'memory_mb': sum(mem),
'root_gb': sum(root),
'ephemeral_gb': sum(ephemeral)}
return {'resource': resource, 'usage': usage}
@args('--host', metavar='<host>', help='Host')
def describe_resource(self, host):
"""Describes cpu/memory/hdd info for host.
:param host: hostname.
"""
try:
result = self._show_host_resources(context.get_admin_context(),
host=host)
except exception.NovaException as ex:
print(_("error: %s") % ex)
return 2
if not isinstance(result, dict):
print(_('An unexpected error has occurred.'))
print(_('[Result]'), result)
else:
# Printing a total and used_now
# (NOTE)The host name width 16 characters
print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {"a": _('HOST'),
"b": _('PROJECT'),
"c": _('cpu'),
"d": _('mem(mb)'),
"e": _('hdd')})
print(('%(a)-16s(total)%(b)26s%(c)8s%(d)8s' %
{"a": host,
"b": result['resource']['vcpus'],
"c": result['resource']['memory_mb'],
"d": result['resource']['local_gb']}))
print(('%(a)-16s(used_now)%(b)23s%(c)8s%(d)8s' %
{"a": host,
"b": result['resource']['vcpus_used'],
"c": result['resource']['memory_mb_used'],
"d": result['resource']['local_gb_used']}))
# Printing a used_max
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for p_id, val in result['usage'].items():
cpu_sum += val['vcpus']
mem_sum += val['memory_mb']
hdd_sum += val['root_gb']
hdd_sum += val['ephemeral_gb']
print('%(a)-16s(used_max)%(b)23s%(c)8s%(d)8s' % {"a": host,
"b": cpu_sum,
"c": mem_sum,
"d": hdd_sum})
for p_id, val in result['usage'].items():
print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {
"a": host,
"b": p_id,
"c": val['vcpus'],
"d": val['memory_mb'],
"e": val['root_gb'] + val['ephemeral_gb']})
class HostCommands(object):
"""List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print("%-25s\t%-15s" % (_('host'),
_('zone')))
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print("%-25s\t%-15s" % (h['host'], h['availability_zone']))
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('--version', metavar='<version>', help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', metavar='<number>',
help='Maximum number of deleted rows to archive')
def archive_deleted_rows(self, max_rows):
"""Move up to max_rows deleted rows from production tables to shadow
tables.
"""
if max_rows is not None:
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return(1)
admin_context = context.get_admin_context()
db.archive_deleted_rows(admin_context, max_rows)
class FlavorCommands(object):
"""Class for managing flavors.
Note instance type is a deprecated synonym for flavor.
"""
description = ('DEPRECATED: Use the nova flavor-* commands from '
'python-novaclient instead. The flavor subcommand will be '
'removed in the 2015.1 release')
def _print_flavors(self, val):
is_public = ('private', 'public')[val["is_public"] == 1]
print(("%s: Memory: %sMB, VCPUS: %s, Root: %sGB, Ephemeral: %sGb, "
"FlavorID: %s, Swap: %sMB, RXTX Factor: %s, %s, ExtraSpecs %s") % (
val["name"], val["memory_mb"], val["vcpus"], val["root_gb"],
val["ephemeral_gb"], val["flavorid"], val["swap"],
val["rxtx_factor"], is_public, val["extra_specs"]))
@args('--name', metavar='<name>',
help='Name of flavor')
@args('--memory', metavar='<memory size>', help='Memory size')
@args('--cpu', dest='vcpus', metavar='<num cores>', help='Number cpus')
@args('--root_gb', metavar='<root_gb>', help='Root disk size')
@args('--ephemeral_gb', metavar='<ephemeral_gb>',
help='Ephemeral disk size')
@args('--flavor', dest='flavorid', metavar='<flavor id>',
help='Flavor ID')
@args('--swap', metavar='<swap>', help='Swap')
@args('--rxtx_factor', metavar='<rxtx_factor>', help='rxtx_factor')
@args('--is_public', metavar='<is_public>',
help='Make flavor accessible to the public')
def create(self, name, memory, vcpus, root_gb, ephemeral_gb=0,
flavorid=None, swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
try:
flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb, flavorid=flavorid,
swap=swap, rxtx_factor=rxtx_factor,
is_public=is_public)
except exception.InvalidInput as e:
print(_("Must supply valid parameters to create flavor"))
print(e)
return 1
except exception.FlavorExists:
print(_("Flavor exists."))
print(_("Please ensure flavor name and flavorid are "
"unique."))
print(_("Currently defined flavor names and flavorids:"))
print()
self.list()
return 2
except Exception:
print(_("Unknown error"))
return 3
else:
print(_("%s created") % name)
@args('--name', metavar='<name>', help='Name of flavor')
def delete(self, name):
"""Marks flavors as deleted."""
try:
flavors.destroy(name)
except exception.FlavorNotFound:
print(_("Valid flavor name is required"))
return 1
except db_exc.DBError as e:
print(_("DB Error: %s") % e)
return(2)
except Exception:
return(3)
else:
print(_("%s deleted") % name)
@args('--name', metavar='<name>', help='Name of flavor')
def list(self, name=None):
"""Lists all active or specific flavors."""
try:
if name is None:
inst_types = flavors.get_all_flavors()
else:
inst_types = flavors.get_flavor_by_name(name)
except db_exc.DBError as e:
_db_error(e)
if isinstance(inst_types.values()[0], dict):
for k, v in inst_types.iteritems():
self._print_flavors(v)
else:
self._print_flavors(inst_types)
@args('--name', metavar='<name>', help='Name of flavor')
@args('--key', metavar='<key>', help='The key of the key/value pair')
@args('--value', metavar='<value>', help='The value of the key/value pair')
def set_key(self, name, key, value=None):
"""Add key/value pair to specified flavor's extra_specs."""
try:
try:
inst_type = flavors.get_flavor_by_name(name)
except exception.FlavorNotFoundByName as e:
print(e)
return(2)
ctxt = context.get_admin_context()
ext_spec = {key: value}
db.flavor_extra_specs_update_or_create(
ctxt,
inst_type["flavorid"],
ext_spec)
print((_("Key %(key)s set to %(value)s on instance "
"type %(name)s") %
{'key': key, 'value': value, 'name': name}))
except db_exc.DBError as e:
_db_error(e)
@args('--name', metavar='<name>', help='Name of flavor')
@args('--key', metavar='<key>', help='The key to be deleted')
def unset_key(self, name, key):
"""Delete the specified extra spec for flavor."""
try:
try:
inst_type = flavors.get_flavor_by_name(name)
except exception.FlavorNotFoundByName as e:
print(e)
return(2)
ctxt = context.get_admin_context()
db.flavor_extra_specs_delete(
ctxt,
inst_type["flavorid"],
key)
print((_("Key %(key)s on flavor %(name)s unset") %
{'key': key, 'name': name}))
except db_exc.DBError as e:
_db_error(e)
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def create(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Creates a new agent build."""
ctxt = context.get_admin_context()
db.agent_build_create(ctxt, {'hypervisor': hypervisor,
'os': os,
'architecture': architecture,
'version': version,
'url': url,
'md5hash': md5hash})
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def delete(self, os, architecture, hypervisor='xen'):
"""Deletes an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_destroy(ctxt, agent_build_ref['id'])
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: None)')
def list(self, hypervisor=None):
"""Lists all agent builds.
arguments: <none>
"""
fmt = "%-10s %-8s %12s %s"
ctxt = context.get_admin_context()
by_hypervisor = {}
for agent_build in db.agent_build_get_all(ctxt):
buildlist = by_hypervisor.get(agent_build.hypervisor)
if not buildlist:
buildlist = by_hypervisor[agent_build.hypervisor] = []
buildlist.append(agent_build)
for key, buildlist in by_hypervisor.iteritems():
if hypervisor and key != hypervisor:
continue
print(_('Hypervisor: %s') % key)
print(fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32))
for agent_build in buildlist:
print(fmt % (agent_build.os, agent_build.architecture,
agent_build.version, agent_build.md5hash))
print(' %s' % agent_build.url)
print()
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def modify(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Update an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_update(ctxt, agent_build_ref['id'],
{'version': version,
'url': url,
'md5hash': md5hash})
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
linenum = len(lines) - index
print((_('Line %(linenum)d : %(line)s') %
{'linenum': linenum, 'line': line}))
if error_found == 0:
print(_('No errors in logfiles!'))
@args('--num_entries', metavar='<number of entries>',
help='number of entries(default: 10)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the nova syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_('Unable to find system log file!'))
return(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_('Last %s nova syslog entries:-') % (entries))
for line in lines:
if line.find("nova") > 0:
count += 1
print("%s" % (line))
if count == entries:
break
if count == 0:
print(_('No nova entries in syslog!'))
class CellCommands(object):
"""Commands for managing cells."""
@args('--name', metavar='<name>', help='Name for the new cell')
@args('--cell_type', metavar='<parent|child>',
help='Whether the cell is a parent or child')
@args('--username', metavar='<username>',
help='Username for the message broker in this cell')
@args('--password', metavar='<password>',
help='Password for the message broker in this cell')
@args('--hostname', metavar='<hostname>',
help='Address of the message broker in this cell')
@args('--port', metavar='<number>',
help='Port number of the message broker in this cell')
@args('--virtual_host', metavar='<virtual_host>',
help='The virtual host of the message broker in this cell')
@args('--woffset', metavar='<float>')
@args('--wscale', metavar='<float>')
def create(self, name, cell_type='child', username=None, password=None,
hostname=None, port=None, virtual_host=None,
woffset=None, wscale=None):
if cell_type not in ['parent', 'child']:
print("Error: cell type must be 'parent' or 'child'")
return(2)
# Set up the transport URL
transport_host = messaging.TransportHost(hostname=hostname,
port=int(port),
username=username,
password=password)
transport_url = rpc.get_transport_url()
transport_url.hosts.append(transport_host)
transport_url.virtual_host = virtual_host
is_parent = cell_type == 'parent'
values = {'name': name,
'is_parent': is_parent,
'transport_url': str(transport_url),
'weight_offset': float(woffset),
'weight_scale': float(wscale)}
ctxt = context.get_admin_context()
db.cell_create(ctxt, values)
@args('--cell_name', metavar='<cell_name>',
help='Name of the cell to delete')
def delete(self, cell_name):
ctxt = context.get_admin_context()
db.cell_delete(ctxt, cell_name)
def list(self):
ctxt = context.get_admin_context()
cells = db.cell_get_all(ctxt)
fmt = "%3s %-10s %-6s %-10s %-15s %-5s %-10s"
print(fmt % ('Id', 'Name', 'Type', 'Username', 'Hostname',
'Port', 'VHost'))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
for cell in cells:
url = rpc.get_transport_url(cell.transport_url)
host = url.hosts[0] if url.hosts else messaging.TransportHost()
print(fmt % (cell.id, cell.name,
'parent' if cell.is_parent else 'child',
host.username, host.hostname,
host.port, url.virtual_host))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
CATEGORIES = {
'account': AccountCommands,
'agent': AgentBuildCommands,
'cell': CellCommands,
'db': DbCommands,
'fixed': FixedIpCommands,
'flavor': FlavorCommands,
'floating': FloatingIpCommands,
'host': HostCommands,
'logs': GetLogCommands,
'network': NetworkCommands,
'project': ProjectCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'vm': VmCommands,
'vpn': VpnCommands,
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
parser = subparsers.add_parser('version')
parser = subparsers.add_parser('bash-completion')
parser.add_argument('query_category', nargs='?')
for category in CATEGORIES:
command_object = CATEGORIES[category]()
desc = getattr(command_object, 'description', None)
parser = subparsers.add_parser(category, description=desc)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action, description=desc)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# FIXME(markmc): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*',
help=argparse.SUPPRESS)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
try:
config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run nova-manage as root.'))
return(2)
objects.register_all()
if CONF.category.name == "version":
print(version.version_string_with_package())
return(0)
if CONF.category.name == "bash-completion":
if not CONF.category.query_category:
print(" ".join(CATEGORIES.keys()))
elif CONF.category.query_category in CATEGORIES:
fn = CATEGORIES[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print(" ".join([k for (k, v) in actions]))
return(0)
fn = CONF.category.action_fn
fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, six.string_types):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
try:
cliutils.validate_args(fn, *fn_args, **fn_kwargs)
except cliutils.MissingArgs as e:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
print(e)
return(1)
try:
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return(ret)
except Exception:
print(_("Command failed, please check log for more info"))
raise
|
py | b416847954d859cc83756337fd604349046cd8b6 | # noinspection PyShadowingBuiltins,PyUnusedLocal
def compute(x, y):
if type(x) not in [int] or type(y) not in [int]:
raise TypeError("The arguments must be integer")
if (x < 0) or (x > 100) or (y < 0) or (y > 100):
raise valueError("The arguments must be in range 0-100")
return x+y
|
py | b41684fb70e1b0f21bd201f8608e7ee09c20c44d | import bmw
import numpy as np
problem = bmw.Problem.parse(filepath='../../data/3-refined')
dat = np.load('../2-prod/test-0.npz')
constellation = dat['constellation']
constellation_type_indices = dat['constellation_type_indices']
metric = np.array([10 * problem.test_groups[index] - problem.test_set.counts[index] for index in range(len(problem.test_groups))])
# test_indices = np.argsort(problem.test_groups)
test_indices = np.argsort(metric)
for tindex2, tindex1 in enumerate(test_indices):
expression = problem.test_set.expressions[tindex1]
count = problem.test_set.counts[tindex1]
passes = [index for index, state in enumerate(constellation) if expression.evaluate(state)]
print('%3d : %3d %1d : %3d %3d' % (
tindex2,
tindex1,
problem.test_groups[tindex1],
len(passes),
count,
))
test_array = np.zeros((10, 400))
test_array[...] = -1
car_array = np.zeros((10, 400))
car_array[...] = -1
time_index = 0
slot_index = 0
counter = 0
while len(test_indices):
if counter > 3000:
break
counter += 1
for test_index in test_indices:
expression = problem.test_set.expressions[test_index]
count = problem.test_set.counts[test_index]
car_candidates = [index for index, state in enumerate(constellation) if expression.evaluate(state)]
np.random.shuffle(car_candidates)
# TODO: Try a different test if we can't fill
found = False
for car_index in car_candidates:
if car_index in car_array[:slot_index, time_index]: continue
car_array[slot_index, time_index] = car_index
test_array[slot_index, time_index] = test_index
slot_index += 1
if slot_index == test_array.shape[0]:
slot_index = 0
time_index += 1
found = True
break
if not found:
slot_index = 0
time_index += 1
else:
# test_indices = test_indices[1:]
print(count, np.sum(test_array == test_index))
if np.sum(test_array == test_index) >= count:
test_indices = test_indices[1:]
break
print(car_array[:, :20])
s = ''
for col_index, col in enumerate(car_array.T):
# print(row)
if np.max(col) < 0.0: break
v = ''.join([' ' if _ == -1 else 'O' for _ in col])
print('%3d: %s' % (col_index, v))
# s += ''.join([' ' if _ == -1 else 'O' for _ in row])
# s += '\n'
# print(s)
|
py | b416858219846e6404fa26755b8edadc89f90bac | # Natural Language Toolkit: NLTK's very own tokenizer.
#
# Copyright (C) 2001-2020 NLTK Project
# Author:
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
import re
from nltk.tokenize.api import TokenizerI
class MacIntyreContractions:
"""
List of contractions adapted from Robert MacIntyre's tokenizer.
"""
CONTRACTIONS2 = [
r"(?i)\b(can)(?#X)(not)\b",
r"(?i)\b(d)(?#X)('ye)\b",
r"(?i)\b(gim)(?#X)(me)\b",
r"(?i)\b(gon)(?#X)(na)\b",
r"(?i)\b(got)(?#X)(ta)\b",
r"(?i)\b(lem)(?#X)(me)\b",
r"(?i)\b(more)(?#X)('n)\b",
r"(?i)\b(wan)(?#X)(na)\s",
]
CONTRACTIONS3 = [r"(?i) ('t)(?#X)(is)\b", r"(?i) ('t)(?#X)(was)\b"]
CONTRACTIONS4 = [r"(?i)\b(whad)(dd)(ya)\b", r"(?i)\b(wha)(t)(cha)\b"]
class NLTKWordTokenizer(TokenizerI):
"""
The NLTK tokenizer that has improved upon the TreebankWordTokenizer.
The tokenizer is "destructive" such that the regexes applied will munge the
input string to a state beyond re-construction. It is possible to apply
`TreebankWordDetokenizer.detokenize` to the tokenized outputs of
`NLTKDestructiveWordTokenizer.tokenize` but there's no guarantees to
revert to the original string.
"""
# Starting quotes.
STARTING_QUOTES = [
(re.compile(u"([«“‘„]|[`]+)", re.U), r" \1 "),
(re.compile(r"^\""), r"``"),
(re.compile(r"(``)"), r" \1 "),
(re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "),
(re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d|n)(\w)\b", re.U), r"\1 \2"),
]
# Ending quotes.
ENDING_QUOTES = [
(re.compile(u"([»”’])", re.U), r" \1 "),
(re.compile(r'"'), " '' "),
(re.compile(r"(\S)(\'\')"), r"\1 \2 "),
(re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
(re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
]
# For improvements for starting/closing quotes from TreebankWordTokenizer,
# see discussion on https://github.com/nltk/nltk/pull/1437
# Adding to TreebankWordTokenizer, nltk.word_tokenize now splits on
# - chervon quotes u'\xab' and u'\xbb' .
# - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d'
# See https://github.com/nltk/nltk/issues/1995#issuecomment-376741608
# Also, behavior of splitting on clitics now follows Stanford CoreNLP
# - clitics covered (?!re|ve|ll|m|t|s|d)(\w)\b
# Punctuation.
PUNCTUATION = [
(re.compile(r'([^\.])(\.)([\]\)}>"\'' u"»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "),
(re.compile(r"([:,])([^\d])"), r" \1 \2"),
(re.compile(r"([:,])$"), r" \1 "),
(re.compile(r"\.{2,}", re.U), r" \g<0> "), # See https://github.com/nltk/nltk/pull/2322
(re.compile(r"[;@#$%&]"), r" \g<0> "),
(
re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'),
r"\1 \2\3 ",
), # Handles the final period.
(re.compile(r"[?!]"), r" \g<0> "),
(re.compile(r"([^'])' "), r"\1 ' "),
(re.compile(r"[*]", re.U), r" \g<0> "), # See https://github.com/nltk/nltk/pull/2322
]
# Pads parentheses
PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ")
# Optionally: Convert parentheses, brackets and converts them to PTB symbols.
CONVERT_PARENTHESES = [
(re.compile(r"\("), "-LRB-"),
(re.compile(r"\)"), "-RRB-"),
(re.compile(r"\["), "-LSB-"),
(re.compile(r"\]"), "-RSB-"),
(re.compile(r"\{"), "-LCB-"),
(re.compile(r"\}"), "-RCB-"),
]
DOUBLE_DASHES = (re.compile(r"--"), r" -- ")
# List of contractions adapted from Robert MacIntyre's tokenizer.
_contractions = MacIntyreContractions()
CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2))
CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3))
def tokenize(self, text, convert_parentheses=False, return_str=False):
for regexp, substitution in self.STARTING_QUOTES:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PUNCTUATION:
text = regexp.sub(substitution, text)
# Handles parentheses.
regexp, substitution = self.PARENS_BRACKETS
text = regexp.sub(substitution, text)
# Optionally convert parentheses
if convert_parentheses:
for regexp, substitution in self.CONVERT_PARENTHESES:
text = regexp.sub(substitution, text)
# Handles double dash.
regexp, substitution = self.DOUBLE_DASHES
text = regexp.sub(substitution, text)
# add extra space to make things easier
text = " " + text + " "
for regexp, substitution in self.ENDING_QUOTES:
text = regexp.sub(substitution, text)
for regexp in self.CONTRACTIONS2:
text = regexp.sub(r" \1 \2 ", text)
for regexp in self.CONTRACTIONS3:
text = regexp.sub(r" \1 \2 ", text)
# We are not using CONTRACTIONS4 since
# they are also commented out in the SED scripts
# for regexp in self._contractions.CONTRACTIONS4:
# text = regexp.sub(r' \1 \2 \3 ', text)
return text if return_str else text.split()
|
py | b416864457ad442de1f5276a7713161a088dcf87 | import os
import json
import numpy as np
import pytest
from requests import HTTPError
from world_trade_data import get_indicator, get_tariff_reported, get_tariff_estimated
from world_trade_data.data import _wits_data_to_df
def test_get_indicator():
df = get_indicator(reporter='usa',
year='2000',
partner='wld',
product='fuels',
indicator='AHS-SMPL-AVRG',
datasource='tradestats-tariff')
assert len(df.index)
def test_get_indicator2():
df = get_indicator(reporter='usa',
year='2017',
partner='wld',
product='all',
indicator='MPRT-TRD-VL',
datasource='tradestats-trade')
assert len(df.index)
def test_get_tariff_reported():
df = get_tariff_reported(reporter='840', partner='all', product='970600')
assert len(df.index) == 1
assert df.Value.dtype == np.float64
def test_get_tariff_estimated():
df = get_tariff_estimated(reporter='840', partner='000', product='970600')
assert len(df.index) == 1
assert df.Value.dtype == np.float64
def test_tariff_data_to_df():
current_path = os.path.dirname(__file__)
sample_file = os.path.join(current_path, 'data', 'sample_tariff_data.json')
with open(sample_file) as fp:
data = json.load(fp)
df = _wits_data_to_df(data, 'Rate')
assert len(df.index) > 1
assert len(df.columns) > 1
def test_trade_data_to_df():
current_path = os.path.dirname(__file__)
sample_file = os.path.join(current_path, 'data', 'sample_trade_data.json')
with open(sample_file) as fp:
data = json.load(fp)
df = _wits_data_to_df(data)
assert len(df.index) > 1
assert len(df.columns) > 1
def test_warning_on_request_all_reporter_partner(caplog):
with pytest.raises(HTTPError, match='Request Entity Too Large'):
get_indicator(reporter='all',
partner='all',
year='2017',
product='fuels',
indicator='MPRT-TRD-VL',
datasource='tradestats-trade')
assert 'Limitation on Data Request' in caplog.text
def test_warning_on_allx3(caplog):
with pytest.raises(HTTPError, match='Request Entity Too Large'):
get_indicator(reporter='usa',
partner='all',
year='all',
product='all',
indicator='MPRT-TRD-VL',
datasource='tradestats-trade')
assert 'Limitation on Data Request' in caplog.text
|
py | b41687c8d8f3f104724ed9c3e335835b20c50a96 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['Lag1Trend'] , ['BestCycle'] , ['AR'] ); |
py | b41688539e1224a50984b88d2dbcdb52e4a94eb5 | """
Definition of models.
"""
from django.db import models
class Student(models.Model):
name = models.CharField(max_length = 128)
branch = models.CharField(max_length = 128)
semester = models.CharField(max_length = 1)
def __str__(self):
return self.name
class Subject(models.Model):
students = models.ForeignKey(Student, on_delete = models.CASCADE)
sub_name = models.CharField(max_length = 128)
class Lecture(models.Model):
subject = models.ForeignKey(Subject, on_delete = models.CASCADE)
date = models.CharField(max_length = 20)
class Attendance(models.Model):
lecture = models.ForeignKey(Lecture, on_delete = models.CASCADE)
student = models.ForeignKey(Subject, on_delete = models.CASCADE)
attendance = models.CharField(max_length = 1)
|
py | b416885583a7c36c79c66c336e39882abd4fb6bd | """
Copyright 2019 Jeff Klesky
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import time
from flentsqm.controller import DownPassController
from flentsqm.naming import protect_for_filename
from flentsqm.runs import RunCollector
from flentsqm.router import Router
try:
device = sys.argv[1]
except IndexError:
print(f"Usage: {sys.argv[0]} 'Device Name'")
exit(1)
tunnels = ["WireGuard"]
tests = ["tcp_8down"]
router = Router()
nowstr = time.strftime("%Y-%m-%d_%H%M")
basedir = protect_for_filename(f"{device}_{nowstr}")
try:
os.mkdir(basedir)
except FileExistsError as e:
print(f"Output directory '{basedir}' already exists. Wait a minute and try again.")
exit(1)
class DownScan(DownPassController):
def always_fail(self, run):
return self.current_target < 10
def __init__(self, run_collector: RunCollector, router: Router, device, test, tunnel, destdir, logname,
factor=0.7, start_at=380):
super().__init__(run_collector, router, device, test, tunnel, destdir, logname, factor, start_at)
self.run_successful_test = self.always_fail
self.target_failure_requires = 1
self.target_success_requires = 1
for tunnel in tunnels:
destdir = os.path.join(basedir,
protect_for_filename(tunnel or "None"))
try:
os.mkdir(destdir)
except FileExistsError as e:
pass
for test in tests:
rc_sqm = RunCollector(device=device, tunnel=tunnel, test=test)
down_pass = DownScan(run_collector=rc_sqm,
router=router, device=device, test=test, tunnel=tunnel,
destdir=destdir, logname=f"{destdir}/{test}_sqm.log")
down_pass.start()
|
py | b4168883cf99b501bb831650d0683b3047da0387 | import paho.mqtt.client as mqtt
import json
import mqttsqlite.settings.private_settings as Settings
MANAGEMENT_PASSWORD = Settings.QUERY_PASSWORD
MQTT_HOST = Settings.MQTT_HOST
MQTT_PORT = Settings.MQTT_PORT
ROOT_TOPIC = Settings.ROOT_TOPIC
desired_topic = 'salon/humedad'
payload = {}
payload['client'] = 'simple_example'
payload['topic'] = desired_topic
payload['options'] = 20
payload['password'] = MANAGEMENT_PASSWORD
def on_connect(client, userdata, flags, rc):
client_topic = ROOT_TOPIC + 'log/query/minutes'
client.subscribe(ROOT_TOPIC + 'response')
client.publish(client_topic, json.dumps(payload))
def on_message(client, userdata, msg):
received_data = json.loads(msg.payload)
if 'client' in received_data:
if received_data['client'] == payload['client']:
print('Received Message from Logger: ')
print(received_data)
client.disconnect()
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_forever()
|
py | b4168948de79b0c3cecc76c23b49f765ea93f2ab | #!/usr/bin/env python
# Author: L. Tattrie
# Purpose: svgwrite examples
# Created: 2012/5/31
# Copyright (C) 2012, L. Tattrie
# License: LGPL
# Python version 2.7
import sys
import svgwrite
#
# http://www.w3.org/TR/SVG/coords.html#Units
# The supported length unit identifiers are: em, ex, px, pt, pc, cm, mm, in, and percentages.
#
PROGNAME = sys.argv[0].rstrip('.py')
def create_svg(name):
SVG_SIZE = 900
FONT_SIZE = 20
title1 = name + ': Example of units of length'
title2 = name + ': Example of class Unit and import from svgwrite cm, mm'
sample = (
('px', ' one user unit which may or may not be pixels. This is the default if no units are specified.'),
('pt', ' 1.25px.'),
('mm', ' 3.543307px.'),
('ex', " the current font's height of the character x."),
('%', ' percent of the size of the viewport.'),
('pc', ' 15px.'),
('em', " the current font's height."),
('cm', ' 35.43307px.'),
('in', ' 90px.')
)
dwg = svgwrite.Drawing(name, (SVG_SIZE, SVG_SIZE))
# background will be white.
dwg.add(dwg.rect(insert=(0, 0), size=('100%', '100%'), fill='white'))
# give the name of the example and a title.
y = FONT_SIZE + 5
x = 5
linespace = FONT_SIZE * 2
group = dwg.add(dwg.g(font_family="serif", font_size=FONT_SIZE, fill='black'))
group.add(dwg.text(title1, insert=(x, y)))
for i, sample_item in enumerate(sample):
y += linespace
unit = sample_item[0]
group.add(dwg.rect(insert=(0, y), size=('1' + unit, '3px'), fill='red'))
group.add(dwg.text("size='1%s': %s" % sample_item, insert=('2in', y+3)))
# Show the use of class Unit
y += linespace
textlines = (
title2,
'The Unit class overrides the right hand multiply',
'2*3*cm returns the string "6cm"'
)
for txt in textlines:
group.add(dwg.text(txt, insert=(x, y)))
y += linespace
dwg.save()
if __name__ == '__main__':
create_svg(PROGNAME + '.svg')
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 textwidth=99
|
py | b41689659ed3704465912f7be815d8c03251743c | import tornado.web
class BaseStaticFileHandler(tornado.web.StaticFileHandler):
def compute_etag(self):
return None
def get_cache_time(self, path, modified, mime_type):
return 0
|
py | b41689ce4b973dd93985346f114e9b04e5d5e5bb | from typing import List, Union
import pandas as pd
from pd_utils.optimize.typing import PdDTypeQuadTuple, StrDict
def optimized_df(df: pd.DataFrame) -> pd.DataFrame:
columns = [col for col in df.columns]
type_dfs: PdDTypeQuadTuple = _optimized_dtype_dfs(df)
return pd.concat(type_dfs, axis=1)[columns]
def _optimized_dtype_dfs(df: pd.DataFrame) -> PdDTypeQuadTuple:
obj_df = df.select_dtypes(include=["object"])
if not obj_df.empty:
obj_df = obj_df.astype("category")
int_df = df.select_dtypes(include=["int"])
if not int_df.empty:
int_df = int_df.apply(pd.to_numeric, downcast="unsigned")
float_df = df.select_dtypes(include=["float"])
if not float_df.empty:
float_df = float_df.apply(pd.to_numeric, downcast="float")
type_dfs = (obj_df, int_df, float_df)
optimized_columns: List[Union[str, float, int]] = []
for type_df in type_dfs:
optimized_columns += [col for col in type_df.columns]
# Excluded dtype should just be 'datetime', which does not need conversion
excluded_columns = [col for col in df.columns if col not in optimized_columns]
return type_dfs + (df[excluded_columns],)
def df_types_dict(df: pd.DataFrame, remove_dates=True) -> StrDict:
df_types_dict = _df_types_dict(df)
if remove_dates:
return {
col_name: dtype
for col_name, dtype in df_types_dict.items()
if "date" not in dtype
}
else:
return df_types_dict
def _df_types_dict(df: pd.DataFrame) -> StrDict:
return df.dtypes.apply(lambda x: str(x)).to_dict()
|
py | b4168d23bbb7136b2ec7be87624fdd54e206c8ae | # -*- coding: utf-8 -*-
'''
@Time : 2020/05/06 21:05
@Author : Tianxiaomo
@File : Cfg.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
'''
import os
from easydict import EasyDict
_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
Cfg = EasyDict()
Cfg.use_darknet_cfg = False
Cfg.cfgfile = os.path.join(_BASE_DIR, 'cfg', 'yolov4.cfg')
Cfg.batch = 16
Cfg.subdivisions = 16
Cfg.width = 608
Cfg.height = 608
Cfg.channels = 3
Cfg.momentum = 0.949
Cfg.decay = 0.0005
Cfg.angle = 0
Cfg.saturation = 1.5
Cfg.exposure = 1.5
Cfg.hue = .1
Cfg.learning_rate = 0.00261
Cfg.burn_in = 1000
Cfg.max_batches = 500500
Cfg.steps = [400000, 450000]
Cfg.policy = Cfg.steps
Cfg.scales = .1, .1
Cfg.cutmix = 0
Cfg.mosaic = 1
Cfg.letter_box = 0
Cfg.jitter = 0.2
Cfg.classes = 80
Cfg.track = 0
Cfg.w = Cfg.width
Cfg.h = Cfg.height
Cfg.flip = 1
Cfg.blur = 0
Cfg.gaussian = 0
Cfg.boxes = 10 # box num
Cfg.TRAIN_EPOCHS = 300
Cfg.train_label = os.path.join(_BASE_DIR, 'data', 'train.txt')
Cfg.val_label = os.path.join(_BASE_DIR, 'val.txt')
Cfg.TRAIN_OPTIMIZER = 'adam'
'''
image_path1 x1,y1,x2,y2,id x1,y1,x2,y2,id x1,y1,x2,y2,id ...
image_path2 x1,y1,x2,y2,id x1,y1,x2,y2,id x1,y1,x2,y2,id ...
...
'''
if Cfg.mosaic and Cfg.cutmix:
Cfg.mixup = 4
elif Cfg.cutmix:
Cfg.mixup = 2
elif Cfg.mosaic:
Cfg.mixup = 3
Cfg.checkpoints = os.path.join(_BASE_DIR, 'checkpoints')
Cfg.TRAIN_TENSORBOARD_DIR = os.path.join(_BASE_DIR, 'log')
Cfg.iou_type = 'iou' # 'giou', 'diou', 'ciou'
Cfg.keep_checkpoint_max = 10
|
py | b4168d849037ada2522eb66de8af5f0402f4b3b4 | '''
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: [1,2,3,4,5,6,7] and k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: [-1,-100,3,99] and k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
Could you do it in-place with O(1) extra space?
'''
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return nums
k %= len(nums)
res = [x for x in nums]
tmp = nums[-1]
for i in reversed(xrange(len(nums))):
pre = (i + len(nums) - k) % len(nums)
res[i] = nums[pre]
for i in xrange(len(nums)):
nums[i] = res[i]
|
py | b4168e50f0bbf1ed6a689ba623f74c00dda83a74 | #Esta funcion verifica si el pseudonimo ingresado para un nuevo usuario es valido.
def validar_pseudonimo(pseudo, datos_usuarios):
if pseudo in datos_usuarios:
print("\n* ¡El nombre de usuario ingresado ya se encuentra en uso. Por favor seleccione otro! *\n")
return True
else:
prueba = pseudo.replace("_", "")
if prueba.islower() and prueba.isalnum():
return False
else:
print("\n* ¡Hay caracteres invalidos en el pseudonimo. Vuelva a intentarlo! *\n")
return True
#Esta función verifica cuantas mayusculas, minusculas y numeros tiene la contraseña
def contar_caracteres_contraseña(contraseña):
mayusculas, minusculas, digitos = 0, 0, 0
for caracter in contraseña:
if caracter.isdigit():
digitos += 1
elif caracter.isalpha():
if caracter.islower():
minusculas += 1
elif caracter.isupper():
mayusculas += 1
return mayusculas, minusculas, digitos
#Esta función verifica si la contraseña ingresada es valida
def validar_contraseña(contraseña):
if len(contraseña) >= 5:
caracteres = contar_caracteres_contraseña(contraseña)
if (0 not in caracteres):
return False
print("\n* ¡La contraseña ingresada no cumple los requisitos! *")
return True
#Esta funcion convalida si los datos de ingreso son correctos.
def validar_datos_de_ingreso(usuario, clave, datos_usuarios):
if usuario not in datos_usuarios:
print("\n* Usuario invalido, vuelva a ingresar los datos *")
return True
elif datos_usuarios[usuario]["Contraseña"] != clave:
print("\n* Clave invalida, vuelva a ingresar los datos *")
return True
else:
return False
#Esta funcion validara si la eleccion hecha por el usuario en el menu principal es correcta
def validar_menu_principal(eleccion):
if(eleccion not in [1, 2, 3, 4, 5]):
print("╔═══════════════════════════════════════════════╗")
print("║*****Opcion invalida. Vuelva a intentarlo******║")
print("╚═══════════════════════════════════════════════╝")
return True
return False |
py | b4168e719049944807d2a2001ca4aa71f4c82304 | class ComputeInstance:
def __init__(self, driver):
self.driver = driver
def list_instances(self):
""" List OpenStack Instances and associated info """
# Get all nodes info associated with this OpenStack account
nodes = self.driver.list_nodes()
print "All OpenStack instances:"
# Loop through nodes and extract instance information
for node in nodes:
print '\tInstance Name:', node.name, ' Instance Id:', node.id, ' State:', node.extra['vm_state'],\
'IP Address:', node.public_ips[0] if node.public_ips else None, 'Launch time:', node.extra['created']
print
def create_instance(self, ami_id, key_name, instance_name, instance_size):
""" Creates an instance """
print "Trying to create new instance..."
sizes = self.driver.list_sizes()
images = self.driver.list_images()
size = [s for s in sizes if s.id == instance_size][0]
image = [i for i in images if i.id == ami_id][0]
node = self.driver.create_node(name=instance_name, image=image, size=size, ex_keyname=key_name)
print "New instance id", node.id, "called", node.name, "was created.\n"
def stop_instance(self, instance_id):
""" Stops a running instance"""
nodes = self.driver.list_nodes()
node = [n for n in nodes if n.id == instance_id][0]
if node.extra['status'] == u'running':
self.driver.ex_stop_node(node)
print "Instance id", instance_id, "now is stopped"
elif node.extra['status'] == u'stopped':
print "Instance " + instance_id + " is already stopped."
else:
print "Instance " + instance_id + " cannot be stopped, state: " + node.extra['status']
print
def terminate_instance(self, instance_id):
""" Terminate an instance"""
nodes = self.driver.list_nodes()
node = [n for n in nodes if n.id == instance_id][0]
if node.extra['status'] == u'terminated':
print "Instance " + instance_id + " is already terminated."
else:
print "Terminating instance " + instance_id + "..."
self.driver.destroy_node(node)
print
def terminate_all_instances(self):
""" Terminate all instances """
nodes = self.driver.list_nodes()
for node in nodes:
if node.extra['status'] != u'terminated':
self.driver.destroy_node(node)
print "All instances were terminated."
|
py | b4168f6c69fda4b7a4f2bd7951f8e811e581ebb6 | #! /usr/bin/env python3
import argparse
import sys
from typing import Dict
from arcadeutils import FileBytes
from naomi import NaomiRom, NaomiRomRegionEnum
def main() -> int:
# Create the argument parser
parser = argparse.ArgumentParser(
description="Utility for printing information about a ROM file.",
)
parser.add_argument(
'bin',
metavar='BIN',
type=str,
help='The binary file we should generate info for.',
)
# Grab what we're doing
args = parser.parse_args()
# Grab the rom, parse it
with open(args.bin, "rb") as fp:
data = FileBytes(fp)
# Create a text LUT
region_lut: Dict[NaomiRomRegionEnum, str] = {
NaomiRomRegionEnum.REGION_JAPAN: "Japan",
NaomiRomRegionEnum.REGION_USA: "USA",
NaomiRomRegionEnum.REGION_EXPORT: "Export",
NaomiRomRegionEnum.REGION_KOREA: "Korea",
NaomiRomRegionEnum.REGION_AUSTRALIA: "Australia",
}
# First, assume its a Naomi ROM
naomi = NaomiRom(data)
if naomi.valid:
print("NAOMI ROM")
print("=========")
print(f"Publisher: {naomi.publisher}")
print(f"Japan Title: {naomi.names[NaomiRomRegionEnum.REGION_JAPAN]}")
print(f"USA Title: {naomi.names[NaomiRomRegionEnum.REGION_USA]}")
print(f"Export Title: {naomi.names[NaomiRomRegionEnum.REGION_EXPORT]}")
print(f"Korea Title: {naomi.names[NaomiRomRegionEnum.REGION_KOREA]}")
print(f"Australia Title: {naomi.names[NaomiRomRegionEnum.REGION_AUSTRALIA]}")
print(f"Publish Date: {naomi.date}")
print(f"Serial Number: {naomi.serial.decode('ascii')}")
print(f"ROM Size: {len(data)} bytes")
print("")
print("Supported Configurations")
print("------------------------")
print(f"Regions: {', '.join(region_lut[r] for r in naomi.regions)}")
print(f"Players: {', '.join(str(p) for p in naomi.players)}")
print(f"Monitor: {', '.join(str(f) + 'khz' for f in naomi.frequencies)}")
print(f"Orientation: {', '.join(o for o in naomi.orientations)}")
print(f"Service Type: {naomi.servicetype}")
print("")
print("Main Executable Sections")
print("------------------------")
for section in naomi.main_executable.sections:
print(f"ROM Offset: {hex(section.offset)}")
print(f"Memory Offset: {hex(section.load_address)}")
print(f"Section Length: {section.length} bytes")
print("")
print(f"Entrypoint: {hex(naomi.main_executable.entrypoint)}")
print("")
print("Test Executable Sections")
print("------------------------")
for section in naomi.test_executable.sections:
print(f"ROM Offset: {hex(section.offset)}")
print(f"Memory Offset: {hex(section.load_address)}")
print(f"Section Length: {section.length} bytes")
print("")
print(f"Entrypoint: {hex(naomi.test_executable.entrypoint)}")
print("")
print("Per-Region EEPROM Defaults")
print("--------------------------")
for region, default in naomi.defaults.items():
print(f"{region_lut[region]}")
if not default.apply_settings:
print("Override: disabled")
else:
print("Override: enabled")
print(f"Force vertical: {'yes' if default.force_vertical else 'no'}")
print(f"Force silent: {'yes' if default.force_silent else 'no'}")
print(f"Chute type: {default.chute}")
if default.coin_setting < 27:
setting = f"#{default.coin_setting}"
elif default.coin_setting == 27:
setting = "free play"
elif default.coin_setting == 28:
setting = "manual assignment"
print(f"Coin setting: {setting}")
if default.coin_setting == 28:
print(f"Coin 1 rate: {default.coin_1_rate}")
print(f"Coin 2 rate: {default.coin_2_rate}")
print(f"Credit rate: {default.credit_rate}")
print(f"Bonus: {default.bonus}")
for i, text in enumerate(default.sequences):
print(f"Sequence {i + 1}: {text}")
print("")
return 0
# Couldn't figure out ROM type
print("Couldn't determine ROM type!", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())
|
py | b4168f839761f2367e902e6a3695ec06984c3233 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_utils import timeutils
import six
from barbican import i18n as u
from barbican.model import models
from barbican.tasks import common
from barbican.tasks import resources
from barbican.tests import utils
class BaseOrderTestCase(utils.BaseTestCase, utils.MockModelRepositoryMixin):
def setUp(self):
super(BaseOrderTestCase, self).setUp()
self.requestor = 'requestor1234'
self.order = models.Order()
self.order.id = "id1"
self.order.requestor = self.requestor
self.order.type = "key"
self.meta = {'name': 'name',
'payload_content_type':
'application/octet-stream',
'algorithm': 'AES',
'bit_length': 256,
'expiration': timeutils.utcnow(),
'mode': 'CBC'}
self.order.meta = self.meta
self.external_project_id = 'keystone1234'
self.project_id = 'projectid1234'
self.project = models.Project()
self.project.id = self.project_id
self.project.external_id = self.external_project_id
self.project_repo = mock.MagicMock()
self.project_repo.get.return_value = self.project
self.setup_project_repository_mock(self.project_repo)
self.order.status = models.States.PENDING
self.order.id = 'orderid1234'
self.order.project_id = self.project_id
self.order_repo = mock.MagicMock()
self.order_repo.get.return_value = self.order
self.setup_order_repository_mock(self.order_repo)
self.setup_order_plugin_meta_repository_mock()
self.setup_order_barbican_meta_repository_mock()
self.secret = models.Secret()
self.secret_repo = mock.MagicMock()
self.secret_repo.create_from.return_value = None
self.setup_secret_repository_mock(self.secret_repo)
self.datum_repo = mock.MagicMock()
self.datum_repo.create_from.return_value = None
self.setup_encrypted_datum_repository_mock(self.datum_repo)
self.setup_kek_datum_repository_mock()
self.setup_secret_meta_repository_mock()
self.container_repo = mock.MagicMock()
self.container_repo.create_from.return_value = None
self.setup_container_repository_mock(self.container_repo)
self.container_secret_repo = mock.MagicMock()
self.container_secret_repo.create_from.return_value = None
self.setup_container_secret_repository_mock(self.container_secret_repo)
self.container = models.Container()
class WhenUsingOrderTaskHelper(BaseOrderTestCase):
def setUp(self):
super(WhenUsingOrderTaskHelper, self).setUp()
self.result = common.FollowOnProcessingStatusDTO()
self.helper = resources._OrderTaskHelper()
def test_should_retrieve_entity(self):
order_model = self.helper.retrieve_entity(
self.order.id, self.external_project_id)
self.assertEqual(self.order.id, order_model.id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
def test_should_handle_error(self):
self.helper.handle_error(self.order, 'status_code', 'reason',
ValueError())
self.assertEqual(models.States.ERROR, self.order.status)
self.assertEqual('status_code', self.order.error_status_code)
self.assertEqual('reason', self.order.error_reason)
self.order_repo.save.assert_called_once_with(self.order)
def test_should_handle_success_no_result(self):
self.helper.handle_success(self.order, None)
self.assertEqual(models.States.ACTIVE, self.order.status)
self.assertIsNone(self.order.sub_status)
self.assertIsNone(self.order.sub_status_message)
self.order_repo.save.assert_called_once_with(self.order)
def test_should_handle_success_result_no_follow_on_needed(self):
self.helper.handle_success(self.order, self.result)
self.assertEqual(models.States.ACTIVE, self.order.status)
self.assertEqual('Unknown', self.order.sub_status)
self.assertEqual('Unknown', self.order.sub_status_message)
self.order_repo.save.assert_called_once_with(self.order)
def test_should_handle_success_result_follow_on_needed(self):
self.result.retry_task = common.RetryTasks.INVOKE_SAME_TASK
self.result.status = 'status'
self.result.status_message = 'status_message'
self.helper.handle_success(self.order, self.result)
self.assertEqual(models.States.PENDING, self.order.status)
self.assertEqual('status', self.order.sub_status)
self.assertEqual('status_message', self.order.sub_status_message)
self.order_repo.save.assert_called_once_with(self.order)
def test_should_handle_success_result_large_statuses_clipped(self):
sub_status = 'z' * (models.SUB_STATUS_LENGTH + 1)
sub_status_message = 'z' * (models.SUB_STATUS_MESSAGE_LENGTH + 1)
self.result.status = sub_status
self.result.status_message = sub_status_message
self.helper.handle_success(self.order, self.result)
self.assertEqual(sub_status[:-1], self.order.sub_status)
self.assertEqual(
sub_status_message[:-1], self.order.sub_status_message)
self.order_repo.save.assert_called_once_with(self.order)
class WhenBeginningKeyTypeOrder(BaseOrderTestCase):
def setUp(self):
super(WhenBeginningKeyTypeOrder, self).setUp()
self.resource = resources.BeginTypeOrder()
@mock.patch('barbican.plugin.resources.generate_secret')
def test_should_process_key_order(self, mock_generate_secret):
mock_generate_secret.return_value = self.secret
self.resource.process(self.order.id, self.external_project_id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
self.assertEqual(self.order.status, models.States.ACTIVE)
secret_info = self.order.to_dict_fields()['meta']
mock_generate_secret.assert_called_once_with(
secret_info,
secret_info.get('payload_content_type',
'application/octet-stream'),
self.project
)
def test_should_fail_during_retrieval(self):
# Force an error during the order retrieval phase.
self.order_repo.get = mock.MagicMock(return_value=None,
side_effect=ValueError())
self.assertRaises(
ValueError,
self.resource.process,
self.order.id,
self.external_project_id,
)
# Order state doesn't change because can't retrieve it to change it.
self.assertEqual(models.States.PENDING, self.order.status)
def test_should_fail_during_processing(self):
# Force an error during the processing handler phase.
self.project_repo.get = mock.MagicMock(return_value=None,
side_effect=ValueError())
self.assertRaises(
ValueError,
self.resource.process,
self.order.id,
self.external_project_id,
)
self.assertEqual(models.States.ERROR, self.order.status)
self.assertEqual(500, self.order.error_status_code)
self.assertEqual(u._('Process TypeOrder failure seen - please contact '
'site administrator.'), self.order.error_reason)
@mock.patch('barbican.plugin.resources.generate_secret')
def test_should_fail_during_success_report_fail(self,
mock_generate_secret):
mock_generate_secret.return_value = self.secret
# Force an error during the processing handler phase.
self.order_repo.save = mock.MagicMock(return_value=None,
side_effect=ValueError())
self.assertRaises(
ValueError,
self.resource.process,
self.order.id,
self.external_project_id,
)
def test_should_fail_during_error_report_fail(self):
# Force an error during the error-report handling after
# error in processing handler phase.
# Force an error during the processing handler phase.
self.project_repo.get = mock.MagicMock(return_value=None,
side_effect=TypeError())
# Force exception in the error-reporting phase.
self.order_repo.save = mock.MagicMock(return_value=None,
side_effect=ValueError())
# Should see the original exception (TypeError) instead of the
# secondary one (ValueError).
self.assertRaises(
TypeError,
self.resource.process,
self.order.id,
self.external_project_id,
)
self.project_repo.get.assert_called_once_with(self.project_id)
self.order_repo.save.assert_called_once_with(self.order)
class WhenBeginningCertificateTypeOrder(BaseOrderTestCase):
def setUp(self):
super(WhenBeginningCertificateTypeOrder, self).setUp()
self.order.type = models.OrderType.CERTIFICATE
self.resource = resources.BeginTypeOrder()
@mock.patch(
'barbican.tasks.certificate_resources.issue_certificate_request')
def test_should_process_order_no_container(
self, mock_issue_cert_request):
mock_issue_cert_request.return_value = None
result = self.resource.process_and_suppress_exceptions(
self.order.id, self.external_project_id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
self.assertEqual(self.order.status, models.States.ACTIVE)
mock_issue_cert_request.assert_called_once_with(
self.order,
self.project,
mock.ANY
)
self.assertIsNone(self.order.container_id)
self.assertIsInstance(result, common.FollowOnProcessingStatusDTO)
@mock.patch(
'barbican.tasks.certificate_resources.issue_certificate_request')
def test_should_process_order_with_container(
self, mock_issue_cert_request):
mock_issue_cert_request.return_value = self.container
result = self.resource.process(
self.order.id, self.external_project_id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
self.assertEqual(self.order.status, models.States.ACTIVE)
mock_issue_cert_request.assert_called_once_with(
self.order,
self.project,
mock.ANY
)
self.assertEqual(self.container.id, self.order.container_id)
self.assertIsInstance(result, common.FollowOnProcessingStatusDTO)
class WhenUpdatingOrder(BaseOrderTestCase):
def setUp(self):
super(WhenUpdatingOrder, self).setUp()
self.updated_meta = 'updated'
self.resource = resources.UpdateOrder()
@mock.patch(
'barbican.tasks.certificate_resources.modify_certificate_request')
def test_should_update_certificate_order(self, mock_modify_cert_request):
self.order.type = models.OrderType.CERTIFICATE
self.resource.process_and_suppress_exceptions(
self.order.id, self.external_project_id, self.updated_meta)
self.assertEqual(self.order.status, models.States.ACTIVE)
mock_modify_cert_request.assert_called_once_with(
self.order,
self.updated_meta
)
@mock.patch(
'barbican.tasks.certificate_resources.modify_certificate_request')
def test_should_fail_during_processing(self, mock_mod_cert):
mock_mod_cert.side_effect = ValueError('Abort!')
self.order.type = models.OrderType.CERTIFICATE
exception = self.assertRaises(
ValueError,
self.resource.process,
self.order_id,
self.external_project_id,
self.meta
)
self.assertEqual('Abort!', six.text_type(exception))
mock_mod_cert.assert_called_once_with(self.order, self.meta)
self.assertEqual(models.States.ERROR, self.order.status)
self.assertEqual(500, self.order.error_status_code)
self.assertEqual(u._('Update Order failure seen - please contact '
'site administrator.'), self.order.error_reason)
class WhenBeginningAsymmetricTypeOrder(BaseOrderTestCase):
def setUp(self):
super(WhenBeginningAsymmetricTypeOrder, self).setUp()
self.order.type = "asymmetric"
self.resource = resources.BeginTypeOrder()
@mock.patch('barbican.plugin.resources.generate_asymmetric_secret')
def test_should_process_asymmetric_order(self,
mock_generate_asymmetric_secret):
mock_generate_asymmetric_secret.return_value = self.container
self.resource.process(self.order.id, self.external_project_id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
self.assertEqual(self.order.status, models.States.ACTIVE)
secret_info = self.order.to_dict_fields()['meta']
mock_generate_asymmetric_secret.assert_called_once_with(
secret_info,
secret_info.get('payload_content_type',
'application/octet-stream'),
self.project
)
def test_should_fail_during_retrieval(self):
# Force an error during the order retrieval phase.
self.order_repo.get = mock.MagicMock(return_value=None,
side_effect=ValueError())
self.assertRaises(
ValueError,
self.resource.process,
self.order.id,
self.external_project_id,
)
# Order state doesn't change because can't retrieve it to change it.
self.assertEqual(models.States.PENDING, self.order.status)
def test_should_fail_during_processing(self):
# Force an error during the processing handler phase.
self.project_repo.get = mock.MagicMock(return_value=None,
side_effect=ValueError())
self.assertRaises(
ValueError,
self.resource.process,
self.order.id,
self.external_project_id,
)
self.assertEqual(models.States.ERROR, self.order.status)
self.assertEqual(500, self.order.error_status_code)
self.assertEqual(u._('Process TypeOrder failure seen - please contact '
'site administrator.'), self.order.error_reason)
@mock.patch('barbican.plugin.resources.generate_asymmetric_secret')
def test_should_fail_during_success_report_fail(self,
mock_generate_asym_secret):
mock_generate_asym_secret.return_value = self.container
# Force an error during the processing handler phase.
self.order_repo.save = mock.MagicMock(return_value=None,
side_effect=ValueError())
self.assertRaises(
ValueError,
self.resource.process,
self.order.id,
self.external_project_id,
)
def test_should_fail_during_error_report_fail(self):
# Force an error during the error-report handling after
# error in processing handler phase.
# Force an error during the processing handler phase.
self.project_repo.get = mock.MagicMock(return_value=None,
side_effect=TypeError())
# Force exception in the error-reporting phase.
self.order_repo.save = mock.MagicMock(return_value=None,
side_effect=ValueError())
# Should see the original exception (TypeError) instead of the
# secondary one (ValueError).
self.assertRaises(
TypeError,
self.resource.process,
self.order.id,
self.external_project_id,
)
self.project_repo.get.assert_called_once_with(self.project_id)
self.order_repo.save.assert_called_once_with(self.order)
class WhenCheckingCertificateStatus(BaseOrderTestCase):
def setUp(self):
super(WhenCheckingCertificateStatus, self).setUp()
self.order.type = models.OrderType.CERTIFICATE
self.resource = resources.CheckCertificateStatusOrder()
@mock.patch(
'barbican.tasks.certificate_resources.check_certificate_request')
def test_should_process_order_no_container(
self, mock_check_cert_request):
mock_check_cert_request.return_value = None
result = self.resource.process_and_suppress_exceptions(
self.order.id, self.external_project_id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
self.assertEqual(self.order.status, models.States.ACTIVE)
mock_check_cert_request.assert_called_once_with(
self.order,
self.project,
mock.ANY
)
self.assertIsNone(self.order.container_id)
self.assertIsInstance(result, common.FollowOnProcessingStatusDTO)
@mock.patch(
'barbican.tasks.certificate_resources.check_certificate_request')
def test_should_process_order_with_container(
self, mock_check_cert_request):
mock_check_cert_request.return_value = self.container
self.resource.process(self.order.id, self.external_project_id)
self.order_repo.get.assert_called_once_with(
entity_id=self.order.id,
external_project_id=self.external_project_id)
self.assertEqual(self.order.status, models.States.ACTIVE)
mock_check_cert_request.assert_called_once_with(
self.order,
self.project,
mock.ANY
)
self.assertEqual(self.container.id, self.order.container_id)
def test_should_fail_with_bogus_order_type(self):
self.order.type = 'bogus-type'
self.assertRaises(
NotImplementedError,
self.resource.process,
self.order.id,
self.external_project_id,
)
# Order state should be set to ERROR.
self.assertEqual(models.States.ERROR, self.order.status)
self.assertEqual(
six.u('Check Certificate Order Status failure seen - '
'please contact site administrator.'),
self.order.error_reason)
self.assertEqual(500, self.order.error_status_code)
|
py | b416913b40772690860771698fbed777cdbd23da | from typing import List
class _Base:
def __init__(self, type_id, x, y, owner, color=-1):
self.type_id = type_id
self.variation: int = 0
self.x: float = x
self.y: float = y
self.z: float = 0.0
self.rotation: float = 0.0
self.x_scale: float = 1.0
self.y_scale: float = 1.0
self.z_scale: float = 1.0
# byte: flags*
self.owner: int = owner
# byte: unknown (0)
# byte: unknown (0)
self.hp: int = -1
self.mp: int = 0
self.item_table_pointer: int = -1
self.dropped_item_sets: List = []
self.gold_amount: int = 12500
self.target_acquisition: float = -1 # -1 = normal, -2 = camp
self.hero_level: int = 1
self.strength: int = 0
self.agility: int = 0
self.intelligence: int = 0
self.items: List = []
self.modified_abilities: List = []
self.color = color
self.way_gate: int = -1
self.creation_number: int = 0
"""
int: random unit/item flag "r" (for uDNR units and iDNR items)
0 = Any neutral passive building/item, in this case we have
byte[3]: level of the random unit/item,-1 = any (this is actually interpreted as a 24-bit number)
byte: item class of the random item, 0 = any, 1 = permanent ... (this is 0 for units)
r is also 0 for non random units/items so we have these 4 bytes anyway (even if the id wasn't uDNR or iDNR)
1 = random unit from random group (defined in the w3i), in this case we have
int: unit group number (which group from the global table)
int: position number (which column of this group)
the column should of course have the item flag set (in the w3i) if this is a random item
2 = random unit from custom table, in this case we have
int: number "n" of different available units
then we have n times a random unit structure
"""
Unit = _Base
Item = _Base
|
py | b4169186f2f6f1b8ec5bddd37448a38e82255067 | import records
db = records.Database('sqlite:///:memory:')
db.query('CREATE TABLE foo (a integer)')
def test_failing_transaction():
tx = db.transaction()
try:
db.query('INSERT INTO foo VALUES (42)')
db.query('INSERT INTO foo VALUES (43)')
raise ValueError()
tx.commit()
db.query('INSERT INTO foo VALUES (44)')
except:
tx.rollback()
finally:
assert db.query('SELECT count(*) AS n FROM foo')[0].n == 0
def test_passing_transaction():
tx = db.transaction()
try:
db.query('INSERT INTO foo VALUES (42)')
db.query('INSERT INTO foo VALUES (43)')
tx.commit()
except:
tx.rollback()
finally:
assert db.query('SELECT count(*) AS n FROM foo')[0].n == 2
|
py | b41691d1bb59c3df5379f543cb0dfa0f04afe429 | import requests
def download_file_from_google_drive(id):
destination = 'data/'+id
if id == 'allNoisyData.csv':
URL = "https://drive.google.com/uc?id=1BHe7DX0Jz8xbwNtRr5B_wlzcDCXnCW62"
elif id == 'allNoisyData2.csv':
URL = "https://drive.google.com/uc?id=1BNRQGeGGZud6gJOgwaUn9pqj3TLZ-98_"
else:
URL = "https://drive.google.com/u/1/uc?id=16Zjvc0iqcWAznTCIceG_i3CBoGMuJfYt&export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
|
py | b41691da8ff41fba4e4bfef6ec9da2b58bcaf572 | import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmid", parent_name="scattercarpet.marker.line", **kwargs
):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
)
|
py | b41692b3c863e8b747f81d6e8051ff24d1a636d4 | #!python
'''
Stocastic Sparsy Perceptron
'''
import csv
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
# ============ FILE load and write stuff ===========================
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
def read_asc_data(filename):
f= open(filename,'r')
tmp_str=f.readline()
tmp_arr=tmp_str[:-1].split(' ')
N=int(tmp_arr[0]);n_row=int(tmp_arr[1]);n_col=int(tmp_arr[2])
data=np.zeros([N,n_row*n_col+1])
for n in range(N):
tmp_str=f.readline()
tmp_arr=tmp_str[:-1].split(' ')
for i in range(n_row*n_col+1):
data[n][i]=int(tmp_arr[i])
f.close()
return N,n_row,n_col,data
def plot_data(row,col,n_row,n_col,data):
fig=plt.figure(figsize=(row,col))
for n in range(1, row*col +1):
img=np.reshape(data[n-1][:-1],(n_row,n_col))
fig.add_subplot(row, col, n)
plt.imshow(img,interpolation='none',cmap='binary')
plt.show()
def plot_tagged_data(row,col,n_row,n_col,X,Y,ew):
fig=plt.figure(figsize=(row,col))
for n in range(row*col):
img=np.reshape(X[n],(n_row,n_col))
fig.add_subplot(row, col, n+1)
#if(Y[n]>0):#exact case
yn,a=predictor(X[n],ew)
if(yn>0):
plt.imshow(img,interpolation='none',cmap='RdPu')
else:
plt.imshow(img,interpolation='none',cmap='cool')
plt.show()
def plot_error(err):
plt.plot(range(len(err)), err, marker='o')
plt.xlabel('Iterations')
plt.ylabel('Number of misclassifications')
plt.ylim([0,1])
plt.show()
return
def confusion(Xeval,Yeval,N,ew):
C=np.zeros([2,2])
for n in range(N):
y,r=predictor(Xeval[n],ew)
if(y<0. and Yeval[n]<0.): C[0,0]=C[0,0]+1
if(y>0. and Yeval[n]>0.): C[1,1]=C[1,1]+1
if(y<0. and Yeval[n]>0.): C[1,0]=C[1,0]+1
if(y>0. and Yeval[n]<0.): C[0,1]=C[0,1]+1
return C
#============== Perceptron Stuff ==================
def normalization(ew):
return(ew/np.linalg.norm(ew,2))
def predictor(x,ew):
r=ew[0]
r=r+np.dot(x,ew[1:])
sgn=np.sign(r)
return sgn,r
def cost(X,Y,N,ew):
En=0
for n in range(N):
ypred,a=predictor(X[n],ew)
En=En+np.abs(0.5*(ypred-Y[n]))
En=En/N
return En
def update(x,y,eta,ew,s):
ypred,a=predictor(x,ew)
cor=1/(1+math.fabs(a)/eta)
r=eta*0.5*(y-ypred)*cor
ew[0]=ew[0]+r
ew[1:]=ew[1:]+r*x
ew = np.multiply(ew, s) # apply the mask (multiplication component by component)
return ew
def run_stocastic(X,Y,N,subloop,eta,MAX_ITER,ew,s,err):
epsi=0
it=0
while(err[-1]>epsi):
for j in range(subloop):
n=int(np.random.rand()*N)
ew=update(X[n],Y[n],eta,ew,s)
ew=normalization(ew)
EN=cost(X,Y,N,ew)
err.append(EN)
print('iter %d, cost=%f, eta=%e \r' % (it,EN,eta),end='')
it=it+subloop
if(it>MAX_ITER): break
#ew=normalization(ew);
return ew, err
# =========== MAIN CODE ===============
N,n_row,n_col,data=read_asc_data('./Data/line600.txt')
#N,n_row,n_col,data=read_asc_data('./Data/line1500.txt')
#N,n_row,n_col,data=read_asc_data('./Data/rectangle600.txt')
#N,n_row,n_col,data=read_asc_data('./Data/square_circle.txt')
#N,n_row,n_col,data=read_asc_data('./Data/XOR.txt')
#N,n_row,n_col,data=read_asc_data('./Data/AND.txt')
print('find %d images of %d X %d pixels' % (N,n_row,n_col))
#plot_data(10,10,n_row,n_col,data)
Nt=int(N*0.8);I=n_row*n_col; #split training vs test sets
Xt=data[:Nt,:-1];Yt=data[:Nt,-1]
np.place(Yt, Yt!=1, [-1])
ew=np.ones([I+1]);ew=normalization(ew); #initialisation
K = 14 # maximum number of active nodes
MC = 10 # number of Monte-Carlo iterations
s_best = np.zeros([I+1]) # best mask
err_best = 1e+20 # best error
for t in range(MC):
s = np.zeros([I+1]) # mask vector
s[:K] = 1; np.random.shuffle(s) # activate K nodes
s[0] = 1 # activate bias
print('mask:', s) # show mask
err=[];err.append(cost(Xt,Yt,Nt,ew))
print('cost=%f ' % (err[-1]))
# ======= RUNNING ===========================
eta=0.5;nbiter=5000;subloop=50
ew,err=run_stocastic(Xt,Yt,Nt,subloop,eta,nbiter,ew,s,err)
print('cost=%f ' % (err[-1]));eta=0.4*eta
ew,err=run_stocastic(Xt,Yt,Nt,subloop,eta,nbiter,ew,s,err)
print('cost=%f ' % (err[-1]));eta=0.3*eta
ew,err=run_stocastic(Xt,Yt,Nt,subloop,eta,nbiter,ew,s,err)
print('cost=%f ' % (err[-1]));eta=0.3*eta
ew,err=run_stocastic(Xt,Yt,Nt,subloop,eta,nbiter,ew,s,err)
print('cost=%f ' % (err[-1]));eta=0.5*eta
ew,err=run_stocastic(Xt,Yt,Nt,subloop,eta,nbiter,ew,s,err)
print('cost=%f ' % (err[-1]));eta=0.5*eta
print('experience %i, in-samples error=%f' % (t, err[-1]))
if (err[-1] < err_best):
err_best = err[-1]
s_best = s
err_history = err
ew_best = ew
# ============ OUTPUT =====================
#--- In-samples error ---
print
print('=========')
print('in-samples error=%f' % (err_best))
C =confusion(Xt,Yt,Nt,ew_best)
print(C)
#--- Out-samples error ---
Ne=N-Nt
Xe=data[Nt:N,:-1];Ye=data[Nt:N,-1]
np.place(Ye, Ye!=1, [-1])
print('--------')
print('out-samples error=%f' % (cost(Xe,Ye,Ne,ew_best)))
C =confusion(Xe,Ye,Ne,ew_best)
print(C)
plot_tagged_data(10,10,n_row,n_col,Xe,Ye,ew_best)
print('bye') |
py | b4169370d0d634fa8d8944bdd2c054734cf1b65f | """
Update claims-based hospitalization indicator.
Author: Maria Jahja
Created: 2020-09-27
"""
# standard packages
import logging
from multiprocessing import Pool, cpu_count
# third party
import numpy as np
import pandas as pd
from delphi_utils import GeoMapper
# first party
from delphi_utils import Weekday
from .config import Config, GeoConstants
from .load_data import load_data
from .indicator import ClaimsHospIndicator
class ClaimsHospIndicatorUpdater:
"""Updater class for claims-based hospitalization indicator."""
# pylint: disable=too-many-instance-attributes, too-many-arguments
# all variables are used
def __init__(self, startdate, enddate, dropdate, geo, parallel, weekday,
write_se, signal_name):
"""
Initialize updater for the claims-based hospitalization indicator.
Args:
startdate: first indicator date (YYYY-mm-dd)
enddate: last indicator date (YYYY-mm-dd)
dropdate: data drop date (YYYY-mm-dd)
geo: geographic resolution, one of ["county", "state", "msa", "hrr", "hhs", "nation"]
parallel: boolean to run the indicator update in parallel
weekday: boolean to adjust for weekday effects
write_se: boolean to write out standard errors, if true, use an obfuscated name
signal_name: string signal name
"""
self.startdate, self.enddate, self.dropdate = [pd.to_datetime(t) for t in
(startdate, enddate, dropdate)]
self.geo, self.parallel, self.weekday, self.write_se, self.signal_name = \
geo.lower(), parallel, weekday, write_se, signal_name
# init in shift_dates, declared here for pylint
self.burnindate, self.fit_dates, self.burn_in_dates, self.output_dates = \
[None] * 4
assert (
self.startdate > (Config.FIRST_DATA_DATE + Config.BURN_IN_PERIOD)
), f"not enough data to produce estimates starting {self.startdate}"
assert self.startdate < self.enddate, "start date >= end date"
assert self.enddate <= self.dropdate, "end date > drop date"
assert (
geo in ['county', 'state', 'msa', 'hrr', 'hhs', 'nation']
), f"{geo} is invalid, pick one of 'county', 'state', 'msa', 'hrr', 'hhs', 'nation'"
def shift_dates(self):
"""
Shift estimates forward to account for time lag.
Explanation:
We will shift estimates one day forward to account for a 1 day lag. For example,
we want to produce estimates for the time range May 2 to May 20, inclusive.
Given a drop on May 20, we have data up until May 19. We then train on data from
Jan 1 until May 19, storing only the values on May 1 to May 19. we then shift
the dates forward by 1, giving us values on May 2 to May 20. We shift the
startdate back by one day in order to get the proper estimate at May 1.
"""
drange = lambda s, e: pd.date_range(start=s, periods=(e - s).days, freq='D')
self.startdate = self.startdate - Config.DAY_SHIFT
self.burnindate = self.startdate - Config.BURN_IN_PERIOD
self.fit_dates = drange(Config.FIRST_DATA_DATE, self.dropdate)
self.burn_in_dates = drange(self.burnindate, self.dropdate)
self.output_dates = drange(self.startdate, self.enddate)
def geo_reindex(self, data):
"""
Reindex dataframe based on desired output geography.
Args:
data: dataframe, the output of load_data::load_data()
Returns:
reindexed dataframe
"""
geo_map = GeoMapper()
if self.geo == "county":
data_frame = geo_map.fips_to_megacounty(data,
Config.MIN_DEN,
Config.MAX_BACKWARDS_PAD_LENGTH,
thr_col="den",
mega_col=self.geo)
elif self.geo == "state":
data_frame = geo_map.replace_geocode(data,
from_code="fips",
new_col=self.geo,
new_code="state_id")
data_frame[self.geo] = data_frame[self.geo]
elif self.geo in ["msa", "hhs", "nation"]:
data_frame = geo_map.replace_geocode(data,
from_code="fips",
new_code=self.geo)
elif self.geo == "hrr":
data_frame = data # data is already adjusted in aggregation step above
else:
logging.error(
"%s is invalid, pick one of 'county', 'state', 'msa', 'hrr', 'hhs', nation'",
self.geo)
return False
unique_geo_ids = pd.unique(data_frame[self.geo])
data_frame.set_index([self.geo, "timestamp"], inplace=True)
# for each location, fill in all missing dates with 0 values
multiindex = pd.MultiIndex.from_product((unique_geo_ids, self.fit_dates),
names=[self.geo, Config.DATE_COL])
assert (
len(multiindex) <= (GeoConstants.MAX_GEO[self.geo] * len(self.fit_dates))
), "more loc-date pairs than maximum number of geographies x number of dates"
# fill dataframe with missing dates using 0
data_frame = data_frame.reindex(multiindex, fill_value=0)
data_frame.fillna(0, inplace=True)
return data_frame
def update_indicator(self, input_filepath, outpath, logger):
"""
Generate and output indicator values.
Args:
input_filepath: path to the aggregated claims data
outpath: output path for the csv results
"""
self.shift_dates()
final_output_inds = \
(self.burn_in_dates >= self.startdate) & (self.burn_in_dates <= self.enddate)
# load data
base_geo = Config.HRR_COL if self.geo == Config.HRR_COL else Config.FIPS_COL
data = load_data(input_filepath, self.dropdate, base_geo)
data_frame = self.geo_reindex(data)
# handle if we need to adjust by weekday
wd_params = Weekday.get_params(
data_frame,
"den",
["num"],
Config.DATE_COL,
[1, 1e5],
logger,
) if self.weekday else None
# run fitting code (maybe in parallel)
rates = {}
std_errs = {}
valid_inds = {}
if not self.parallel:
for geo_id, sub_data in data_frame.groupby(level=0):
sub_data.reset_index(inplace=True)
if self.weekday:
sub_data = Weekday.calc_adjustment(
wd_params, sub_data, ["num"], Config.DATE_COL)
sub_data.set_index(Config.DATE_COL, inplace=True)
res = ClaimsHospIndicator.fit(sub_data, self.burnindate, geo_id)
res = pd.DataFrame(res)
rates[geo_id] = np.array(res.loc[final_output_inds, "rate"])
std_errs[geo_id] = np.array(res.loc[final_output_inds, "se"])
valid_inds[geo_id] = np.array(res.loc[final_output_inds, "incl"])
else:
n_cpu = min(Config.MAX_CPU_POOL, cpu_count())
logging.debug("starting pool with %d workers", n_cpu)
with Pool(n_cpu) as pool:
pool_results = []
for geo_id, sub_data in data_frame.groupby(level=0, as_index=False):
sub_data.reset_index(inplace=True)
if self.weekday:
sub_data = Weekday.calc_adjustment(
wd_params, sub_data, ["num"], Config.DATE_COL)
sub_data.set_index(Config.DATE_COL, inplace=True)
pool_results.append(
pool.apply_async(
ClaimsHospIndicator.fit,
args=(sub_data, self.burnindate, geo_id,),
)
)
pool_results = [proc.get() for proc in pool_results]
for res in pool_results:
geo_id = res["geo_id"]
res = pd.DataFrame(res)
rates[geo_id] = np.array(res.loc[final_output_inds, "rate"])
std_errs[geo_id] = np.array(res.loc[final_output_inds, "se"])
valid_inds[geo_id] = np.array(res.loc[final_output_inds, "incl"])
# write out results
unique_geo_ids = list(rates.keys())
output_dict = {
"rates": rates,
"se": std_errs,
"dates": self.output_dates,
"geo_ids": unique_geo_ids,
"geo_level": self.geo,
"include": valid_inds,
}
self.write_to_csv(output_dict, outpath)
logging.debug("wrote files to %s", outpath)
def write_to_csv(self, output_dict, output_path="./receiving"):
"""
Write values to csv.
Args:
output_dict: dictionary containing values, se, unique dates, and unique geo_id
output_path: outfile path to write the csv
"""
if self.write_se:
logging.info("========= WARNING: WRITING SEs TO %s =========",
self.signal_name)
geo_level = output_dict["geo_level"]
dates = output_dict["dates"]
geo_ids = output_dict["geo_ids"]
all_rates = output_dict["rates"]
all_se = output_dict["se"]
all_include = output_dict["include"]
out_n = 0
for i, date in enumerate(dates):
filename = "%s/%s_%s_%s.csv" % (
output_path,
(date + Config.DAY_SHIFT).strftime("%Y%m%d"),
geo_level,
self.signal_name,
)
with open(filename, "w") as outfile:
outfile.write("geo_id,val,se,direction,sample_size\n")
for geo_id in geo_ids:
val = all_rates[geo_id][i]
se = all_se[geo_id][i]
if all_include[geo_id][i]:
assert not np.isnan(val), "value for included value is nan"
assert not np.isnan(se), "se for included rate is nan"
if val > 90:
logging.warning("value suspicious, %s: %d", geo_id, val)
assert se < 5, f"se suspicious, {geo_id}: {se}"
if self.write_se:
assert val > 0 and se > 0, "p=0, std_err=0 invalid"
outfile.write(
"%s,%f,%s,%s,%s\n" % (geo_id, val, se, "NA", "NA"))
else:
# for privacy reasons we will not report the standard error
outfile.write(
"%s,%f,%s,%s,%s\n" % (geo_id, val, "NA", "NA", "NA"))
out_n += 1
logging.debug("wrote %d rows for %d %s", out_n, len(geo_ids), geo_level)
|
py | b41694b8711bf7359b35acdf1728e5462ae992ce | import random
import socket
import time
from _thread import *
import threading
from datetime import datetime
import json
clients_lock = threading.Lock()
connected = 0
xStep = 1.5
clients = {}
# this is the receiving message loop
def connectionLoop(sock):
global xStep
global connected
while True:
data, addr = sock.recvfrom(1024)
data = str(data)
if addr in clients:
# received a heartbeat
if 'heartbeat' in data:
#updates heartbeat data to make sure client is still alive
clients[addr]['lastBeat'] = datetime.now()
if 'cube_position' in data:
#print(data)
positionMessage = data[2:-1]
#print(positionMessage)
positionData = json.JSONDecoder().decode(positionMessage)
clients[addr]['position'] = positionData['position']
else:
# new client - receives a connect
if 'connect' in data:
# This deal with new connections
clients[addr] = {}
clients[addr]['lastBeat'] = datetime.now()
clients[addr]['color'] = {"R": random.random(), "G": random.random(), "B": random.random()}
# Calculate the position of the newly acquired member
finalCount = connected + 1
xCoord = (finalCount//2) * xStep
#print(xCoord)
if (int(finalCount) % 2 == 1):
xCoord = -1 * xCoord
clients[addr]['position'] = {"x": xCoord,"y":0.0, "z":0.0}
# Sends information of the new connected client to everyone - but the newly connected client
message = {"cmd": 0,"players":[{"id":str(addr), "color": clients[addr]['color'], "position": clients[addr]['position']}]}
m = json.dumps(message)
for c in clients:
if c != addr :
#print('NEW messsage: ')
#print(m)
#print('**************************************')
sock.sendto(bytes(m,'utf8'), (c[0],c[1]))
#print("Now going to the other message")
# Sends information of all connected clients to the newly connected client
Others = {"cmd": 2, "players": []}
for c in clients:
player = {}
player['id'] = str(c)
player['color'] = clients[c]['color']
player['position'] = clients[c]['position']
Others['players'].append(player)
oth=json.dumps(Others)
sock.sendto(bytes(oth,'utf8'), (addr[0], addr[1]))
connected += 1
# Every second verifies if clients are still active or not based on their heartbeat
def cleanClients(sock):
while True:
needToSend = False
deleteMessage = {"cmd": 3,"players":[]}
for c in list(clients.keys()):
if (datetime.now() - clients[c]['lastBeat']).total_seconds() > 5:
needToSend = True
print('Dropped Client: ', c)
player = {}
player['id'] = str(c)
player['color'] = clients[c]['color']
deleteMessage['players'].append(player)
clients_lock.acquire()
del clients[c]
clients_lock.release()
if needToSend :
print('Sending DELETE message')
dm = json.dumps(deleteMessage)
for f in clients:
sock.sendto(bytes(dm,'utf8'), (f[0],f[1]))
time.sleep(1)
# Every second sends message about whoelse is still connected to the server
def gameLoop(sock):
while True:
# This sends the UPDATE (1) message to the client
GameState = {"cmd": 1, "players": []}
clients_lock.acquire()
#print (clients)
for c in clients:
player = {}
#Change color
#clients[c]['color'] = {"R": random.random(), "G": random.random(), "B": random.random()}
player['id'] = str(c)
player['color'] = clients[c]['color']
player['position'] = clients[c]['position']
GameState['players'].append(player)
s=json.dumps(GameState)
#print(s)
for c in clients:
sock.sendto(bytes(s,'utf8'), (c[0],c[1]))
clients_lock.release()
#time.sleep(1/3)
#time.sleep(1/10)
#time.sleep(1/30)
time.sleep(1/60)
def main():
port = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', port))
start_new_thread(gameLoop, (s,))
start_new_thread(connectionLoop, (s,))
start_new_thread(cleanClients,(s,))
while True:
time.sleep(1)
if __name__ == '__main__':
print('Starting operations')
main()
|
py | b416965c1c1fb6e1456e0afcae681393c3e7e713 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
from airflow import configuration, models
from airflow.providers.salesforce.hooks.tableau import TableauHook
from airflow.utils import db
class TestTableauHook(unittest.TestCase):
def setUp(self):
configuration.conf.load_test_config()
db.merge_conn(
models.Connection(
conn_id='tableau_test_password',
conn_type='tableau',
host='tableau',
login='user',
password='password',
extra='{"site_id": "my_site"}',
)
)
db.merge_conn(
models.Connection(
conn_id='tableau_test_token',
conn_type='tableau',
host='tableau',
extra='{"token_name": "my_token", "personal_access_token": "my_personal_access_token"}',
)
)
@patch('airflow.providers.salesforce.hooks.tableau.TableauAuth')
@patch('airflow.providers.salesforce.hooks.tableau.Server')
def test_get_conn_auth_via_password_and_site_in_connection(self, mock_server, mock_tableau_auth):
with TableauHook(tableau_conn_id='tableau_test_password') as tableau_hook:
mock_server.assert_called_once_with(tableau_hook.conn.host, use_server_version=True)
mock_tableau_auth.assert_called_once_with(
username=tableau_hook.conn.login,
password=tableau_hook.conn.password,
site_id=tableau_hook.conn.extra_dejson['site_id'],
)
mock_server.return_value.auth.sign_in.assert_called_once_with(mock_tableau_auth.return_value)
mock_server.return_value.auth.sign_out.assert_called_once_with()
@patch('airflow.providers.salesforce.hooks.tableau.PersonalAccessTokenAuth')
@patch('airflow.providers.salesforce.hooks.tableau.Server')
def test_get_conn_auth_via_token_and_site_in_init(self, mock_server, mock_tableau_auth):
with TableauHook(site_id='test', tableau_conn_id='tableau_test_token') as tableau_hook:
mock_server.assert_called_once_with(tableau_hook.conn.host, use_server_version=True)
mock_tableau_auth.assert_called_once_with(
token_name=tableau_hook.conn.extra_dejson['token_name'],
personal_access_token=tableau_hook.conn.extra_dejson['personal_access_token'],
site_id=tableau_hook.site_id,
)
mock_server.return_value.auth.sign_in_with_personal_access_token.assert_called_once_with(
mock_tableau_auth.return_value
)
mock_server.return_value.auth.sign_out.assert_called_once_with()
@patch('airflow.providers.salesforce.hooks.tableau.TableauAuth')
@patch('airflow.providers.salesforce.hooks.tableau.Server')
@patch('airflow.providers.salesforce.hooks.tableau.Pager', return_value=[1, 2, 3])
def test_get_all(self, mock_pager, mock_server, mock_tableau_auth): # pylint: disable=unused-argument
with TableauHook(tableau_conn_id='tableau_test_password') as tableau_hook:
jobs = tableau_hook.get_all(resource_name='jobs')
self.assertEqual(jobs, mock_pager.return_value)
mock_pager.assert_called_once_with(mock_server.return_value.jobs.get)
|
py | b4169776b7dd74a2c11dade0b977c456b4ff681c | #!/usr/bin/env python
# https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
import os.path
import time
import logging
import argparse
import requests
import xml.etree.ElementTree
import copy
from termcolor import colored
logging.VERBOSE = (logging.INFO + logging.DEBUG) // 2
logger = logging.getLogger("dash-proxy")
ns = {"mpd": "urn:mpeg:dash:schema:mpd:2011"}
class Formatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
super(Formatter, self).__init__(fmt, datefmt)
def format(self, record):
color = None
if record.levelno == logging.ERROR:
color = "red"
if record.levelno == logging.INFO:
color = "green"
if record.levelno == logging.WARNING:
color = "yellow"
if color:
return colored(record.msg, color)
else:
return record.msg
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = Formatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
def baseUrl(url):
idx = url.rfind("/")
if idx >= 0:
return url[: idx + 1]
else:
return url
class RepAddr(object):
def __init__(self, period_idx, adaptation_set_idx, representation_idx):
self.period_idx = period_idx
self.adaptation_set_idx = adaptation_set_idx
self.representation_idx = representation_idx
def __str__(self):
return "Representation (period=%d adaptation-set=%d representation=%d)" % (
self.period_idx,
self.adaptation_set_idx,
self.representation_idx,
)
class MpdLocator(object):
def __init__(self, mpd):
self.mpd = mpd
def base_url(self, rep_addr):
return self.representation(rep_addr).find("mpd:BaseURL", ns)
def representation(self, rep_addr):
return self.adaptation_set(rep_addr).findall("mpd:Representation", ns)[
rep_addr.representation_idx
]
def segment_template(self, rep_addr):
rep_st = self.representation(rep_addr).find("mpd:SegmentTemplate", ns)
if rep_st is not None:
return rep_st
else:
return self.adaptation_set(rep_addr).find("mpd:SegmentTemplate", ns)
def segment_timeline(self, rep_addr):
return self.segment_template(rep_addr).find("mpd:SegmentTimeline", ns)
def adaptation_set(self, rep_addr):
return self.mpd.findall("mpd:Period", ns)[rep_addr.period_idx].findall(
"mpd:AdaptationSet", ns
)[rep_addr.adaptation_set_idx]
class HasLogger(object):
def verbose(self, msg):
self.logger.log(logging.VERBOSE, msg)
def info(self, msg):
self.logger.log(logging.INFO, msg)
def debug(self, msg):
self.logger.log(logging.DEBUG, msg)
def warning(self, msg):
self.logger.log(logging.WARNING, msg)
def error(self, msg):
self.logger.log(logging.ERROR, msg)
class DashProxy(HasLogger):
retry_interval = 10
def __init__(self, mpd, output_dir, download, save_mpds=False):
self.logger = logger
self.mpd = mpd
self.output_dir = output_dir
self.download = download
self.save_mpds = save_mpds
self.i_refresh = 0
self.downloaders = {}
def run(self):
logger.log(
logging.INFO,
"Running dash proxy for stream %s. Output goes in %s"
% (self.mpd, self.output_dir),
)
self.refresh_mpd()
def refresh_mpd(self, after=0):
self.i_refresh += 1
if after > 0:
time.sleep(after)
r = requests.get(self.mpd)
if r.status_code < 200 or r.status_code >= 300:
logger.log(
logging.WARNING,
"Cannot GET the MPD. Server returned %s. Retrying after %ds"
% (r.status_code, self.retry_interval),
)
self.refresh_mpd(after=self.retry_interval)
xml.etree.ElementTree.register_namespace("", ns["mpd"])
mpd = xml.etree.ElementTree.fromstring(r.text)
self.handle_mpd(mpd)
def get_base_url(self, mpd):
base_url = baseUrl(self.mpd)
location = mpd.find("mpd:Location", ns)
if location is not None:
base_url = baseUrl(location.text)
baseUrlNode = mpd.find("mpd:BaseUrl", ns)
if baseUrlNode:
if baseUrlNode.text.startswith("http://") or baseUrlNode.text.startswith(
"https://"
):
base_url = baseUrl(baseUrlNode.text)
else:
base_url = base_url + baseUrlNode.text
return base_url
def handle_mpd(self, mpd):
original_mpd = copy.deepcopy(mpd)
periods = mpd.findall("mpd:Period", ns)
logger.log(logging.INFO, "mpd=%s" % (periods,))
logger.log(
logging.VERBOSE, "Found %d periods choosing the 1st one" % (len(periods),)
)
period = periods[0]
for as_idx, adaptation_set in enumerate(
period.findall("mpd:AdaptationSet", ns)
):
for rep_idx, representation in enumerate(
adaptation_set.findall("mpd:Representation", ns)
):
self.verbose(
"Found representation with id %s"
% (representation.attrib.get("id", "UKN"),)
)
rep_addr = RepAddr(0, as_idx, rep_idx)
self.ensure_downloader(mpd, rep_addr)
self.write_output_mpd(original_mpd)
minimum_update_period = mpd.attrib.get("minimumUpdatePeriod", "")
if minimum_update_period:
# TODO parse minimum_update_period
self.refresh_mpd(after=10)
else:
self.info("VOD MPD. Nothing more to do. Stopping...")
def ensure_downloader(self, mpd, rep_addr):
if rep_addr in self.downloaders:
self.verbose("A downloader for %s already started" % (rep_addr,))
else:
self.info("Starting a downloader for %s" % (rep_addr,))
downloader = DashDownloader(self, rep_addr)
self.downloaders[rep_addr] = downloader
downloader.handle_mpd(mpd, self.get_base_url(mpd))
def write_output_mpd(self, mpd):
self.info("Writing the update MPD file")
content = xml.etree.ElementTree.tostring(mpd, encoding="utf-8").decode("utf-8")
dest = os.path.join(self.output_dir, "manifest.mpd")
with open(dest, "wt") as f:
f.write(content)
if self.save_mpds:
dest = os.path.join(
self.output_dir, "manifest.{}.mpd".format(self.i_refresh)
)
with open(dest, "wt") as f:
f.write(content)
class DashDownloader(HasLogger):
def __init__(self, proxy, rep_addr):
self.logger = logger
self.proxy = proxy
self.rep_addr = rep_addr
self.mpd_base_url = ""
self.initialization_downloaded = False
def download_single(self, rep_addr):
dest = self.mpd.base_url(self.rep_addr).text
dest_url = self.full_url(dest)
self.info("download_single requesting %s from %s" % (dest, dest_url))
r = requests.get(dest_url)
if r.status_code >= 200 and r.status_code < 300:
self.write(dest, r.content)
else:
self.error(
"cannot download %s server returned %d" % (dest_url, r.status_code)
)
def handle_mpd(self, mpd, base_url):
self.mpd_base_url = base_url
self.mpd = MpdLocator(mpd)
rep = self.mpd.representation(self.rep_addr)
segment_template = self.mpd.segment_template(self.rep_addr)
if segment_template is None:
self.download_single(self.rep_addr)
return
segment_timeline = self.mpd.segment_timeline(self.rep_addr)
initialization_template = segment_template.attrib.get("initialization", "")
if initialization_template and not self.initialization_downloaded:
self.initialization_downloaded = True
self.download_template(initialization_template, rep)
segments = copy.deepcopy(segment_timeline.findall("mpd:S", ns))
for segment in segment_timeline.findall("mpd:S", ns):
segment_timeline.remove(segment)
idx = 0
for segment in segments:
duration = int(segment.attrib.get("d", "0"))
repeat = int(segment.attrib.get("r", "0"))
for _ in range(0, repeat + 1):
elem = xml.etree.ElementTree.Element(
"{urn:mpeg:dash:schema:mpd:2011}S",
attrib={"d": duration, "i": idx + 1},
)
segment_timeline.insert(idx, elem)
self.verbose("appending a new elem")
idx = idx + 1
media_template = segment_template.attrib.get("media", "")
next_time = 0
for segment in segment_timeline.findall("mpd:S", ns):
current_time = int(segment.attrib.get("t", "-1"))
if current_time == -1:
segment.attrib["t"] = next_time
else:
next_time = current_time
next_time += int(segment.attrib.get("d", "0"))
self.download_template(media_template, rep, segment)
def download_template(self, template, representation=None, segment=None):
dest = self.render_template(template, representation, segment)
dest_url = self.full_url(dest)
self.info("requesting %s from %s" % (dest, dest_url))
r = requests.get(dest_url)
if r.status_code >= 200 and r.status_code < 300:
self.write(dest, r.content)
else:
self.error(
"cannot download %s server returned %d" % (dest_url, r.status_code)
)
def render_template(self, template, representation=None, segment=None):
template = template.replace("$RepresentationID$", "{representation_id}")
template = template.replace("$Time$", "{time}")
template = template.replace(
"$Number%05d$", "{number}"
) # TODO printf format width: %0[width]d (ISO/IEC 23009-1:2014(E))
template = template.replace("$Number$", "{number}")
args = {}
if representation is not None:
args["representation_id"] = representation.attrib.get("id", "")
if segment is not None:
args["time"] = segment.attrib.get("t", "")
args["number"] = segment.attrib.get("i", "")
template = template.format(**args)
return template
def full_url(self, dest):
return self.mpd_base_url + dest # TODO remove hardcoded arrd
def write(self, dest, content):
dest = dest.split("?")[0]
dest = os.path.join(self.proxy.output_dir, dest)
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
f = open(dest, "wb")
f.write(content)
f.close()
def run(args):
logger.setLevel(logging.VERBOSE if args.v else logging.INFO)
proxy = DashProxy(
mpd=args.mpd,
output_dir=args.o,
download=args.d,
save_mpds=args.save_individual_mpds,
)
return proxy.run()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("mpd")
parser.add_argument("-v", action="store_true")
parser.add_argument("-d", action="store_true")
parser.add_argument("-o", default=".")
parser.add_argument("--save-individual-mpds", action="store_true")
args = parser.parse_args()
run(args)
if __name__ == "__main__":
main()
|
py | b41697c966deea57dd9bfb3b4cc288ed57bec300 | # Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installation script for Oppia python backend libraries."""
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import json
import os
import re
import shutil
import subprocess
import sys
import python_utils
from scripts import common
import utils
import pkg_resources
OPPIA_REQUIRED_PIP_VERSION = '20.3.4'
GIT_DIRECT_URL_REQUIREMENT_PATTERN = (
# NOTE: Direct URLs to GitHub must specify a specific commit hash in their
# definition. This helps stabilize the implementation we depend upon.
re.compile(r'^(git\+git://github\.com/.*?@[0-9a-f]{40})#egg=([^\s]*)'))
def normalize_python_library_name(library_name):
"""Returns a normalized version of the python library name.
Normalization of a library name means converting the library name to
lowercase, and removing any "[...]" suffixes that occur. The reason we do
this is because of 2 potential confusions when comparing library names that
will cause this script to find incorrect mismatches.
1. Python library name strings are case-insensitive, which means that
libraries are considered equivalent even if the casing of the library
names is different.
2. There are certain python libraries with a default version and multiple
variants. These variants have names like `library[sub-library]` and
signify that it is a version of the library with special support for
the sub-library. These variants can be considered equivalent to an
individual developer and project because at any point in time, only one
of these variants is allowed to be installed/used in a project.
Here are some examples of ambiguities that this function resolves:
- 'googleappenginemapreduce' is listed in the 'requirements.txt' file as
all lowercase. However, the installed directories have names starting with
the string 'GoogleAppEngineMapReduce'. This causes confusion when
searching for mismatches because python treats the two library names as
different even though they are equivalent.
- If the name 'google-api-core[grpc]' is listed in the 'requirements.txt'
file, this means that a variant of the 'google-api-core' package that
supports grpc is required. However, the import names, the package
directory names, and the metadata directory names of the installed package
do not actually contain the sub-library identifier. This causes
incorrect mismatches to be found because the script treats the installed
package's library name, 'library', differently from the 'requirements.txt'
listed library name, 'library[sub-library]'
Args:
library_name: str. The library name to be normalized.
Returns:
str. A normalized library name.
"""
# Remove the special support package designation (e.g [grpc]) in the
# brackets when parsing the requirements file to resolve confusion 2 in the
# docstring.
# NOTE: This does not cause ambiguities because there is no viable scenario
# where both the library and a variant of the library exist in the
# directory. Both the default version and the variant are imported in the
# same way (e.g import google.api.core) and if pip allowed a scenario where
# both versions were installed, then there would be ambiguities in the
# imports. For this reason, it is safe to disambiguate the names by removing
# the suffix. We have also implemented the backend tests,
# test_uniqueness_of_lib_names_in_requirements_file and
# test_uniqueness_of_lib_names_in_compiled_requirements_file, in
# scripts/install_backend_python_libs_test.py to ensure that all
# library names in the requirements files are distinct when normalized.
library_name = re.sub(r'\[[^\[^\]]+\]', '', library_name)
return library_name.lower()
def normalize_directory_name(directory_name):
"""Returns a normalized (lowercase) version of the directory name.
Python library name strings are case insensitive which means that
libraries are equivalent even if the casing of the library names are
different. When python libraries are installed, the generated metadata
directories also use the python library names as part of the directory name.
This function normalizes directory names so that metadata directories with
different case won't be treated as different in code. For example,
`GoogleAppEnginePipeline-1.9.22.1.dist-info` and
`googleappenginepipeline-1.9.22.1.dist-info` are equivalent, although their
names are not the same. To make sure these two directory names are
considered equal, we use this method to enforce that all directory names are
lowercase.
Args:
directory_name: str. The directory name to be normalized.
Returns:
str. A normalized directory name string that is all lowercase.
"""
return directory_name.lower()
def _get_requirements_file_contents():
"""Returns a dictionary containing all of the required normalized library
names with their corresponding version strings listed in the
'requirements.txt' file.
Returns:
dict(str, str). Dictionary with the normalized name of the library as
the key and the version string of that library as the value.
"""
requirements_contents = collections.defaultdict()
with python_utils.open_file(
common.COMPILED_REQUIREMENTS_FILE_PATH, 'r') as f:
trimmed_lines = (line.strip() for line in f.readlines())
for line_num, line in enumerate(trimmed_lines, start=1):
if not line or line.startswith('#'):
continue
elif line.startswith('git'):
match = GIT_DIRECT_URL_REQUIREMENT_PATTERN.match(line)
if not match:
raise Exception(
'%r on line %d of %s does not match '
'GIT_DIRECT_URL_REQUIREMENT_PATTERN=%r' % (
line, line_num,
common.COMPILED_REQUIREMENTS_FILE_PATH,
GIT_DIRECT_URL_REQUIREMENT_PATTERN.pattern))
library_name, version_string = match.group(2, 1)
else:
library_name, version_string = line.split(' ')[0].split('==')
# Libraries with different case are considered equivalent libraries:
# e.g 'Flask' is the same library as 'flask'. Therefore, we
# normalize all library names in order to compare libraries without
# ambiguities.
normalized_library_name = (
normalize_python_library_name(library_name))
requirements_contents[normalized_library_name] = version_string
return requirements_contents
def _get_third_party_python_libs_directory_contents():
"""Returns a dictionary containing all of the normalized libraries name
strings with their corresponding version strings installed in the
'third_party/python_libs' directory.
Returns:
dict(str, str). Dictionary with the normalized name of the library
installed as the key and the version string of that library as the
value.
"""
direct_url_packages, standard_packages = utils.partition(
pkg_resources.find_distributions(common.THIRD_PARTY_PYTHON_LIBS_DIR),
predicate=lambda dist: dist.has_metadata('direct_url.json'))
installed_packages = {
pkg.project_name: pkg.version for pkg in standard_packages
}
for pkg in direct_url_packages:
metadata = json.loads(pkg.get_metadata('direct_url.json'))
version_string = '%s+%s@%s' % (
metadata['vcs_info']['vcs'], metadata['url'],
metadata['vcs_info']['commit_id'])
installed_packages[pkg.project_name] = version_string
# Libraries with different case are considered equivalent libraries:
# e.g 'Flask' is the same library as 'flask'. Therefore, we
# normalize all library names in order to compare libraries without
# ambiguities.
directory_contents = {
normalize_python_library_name(library_name): version_string
for library_name, version_string in installed_packages.items()
}
return directory_contents
def _remove_metadata(library_name, version_string):
"""Removes the residual metadata files pertaining to a specific library that
was reinstalled with a new version. The reason we need this function is
because `pip install --upgrade` upgrades libraries to a new version but
does not remove the metadata that was installed with the previous version.
These metadata files confuse the pkg_resources function that extracts all of
the information about the currently installed python libraries and causes
this installation script to behave incorrectly.
Args:
library_name: str. Name of the library to remove the metadata for.
version_string: str. Stringified version of the library to remove the
metadata for.
"""
possible_normalized_directory_names = (
_get_possible_normalized_metadata_directory_names(
library_name, version_string))
normalized_directory_names = [
normalize_directory_name(name)
for name in os.listdir(common.THIRD_PARTY_PYTHON_LIBS_DIR)
if os.path.isdir(
os.path.join(common.THIRD_PARTY_PYTHON_LIBS_DIR, name))
]
for normalized_directory_name in normalized_directory_names:
# Python metadata directory names contain a python library name that
# does not have uniform case. However, python libraries are equivalent
# regardless of their case. Therefore, in order to check if a python
# library's metadata exists in a directory, we need to normalize the
# directory name. Otherwise, we would need to check every permutation of
# the casing for metadata directories generated with the naming
# convention: <library_name>-<library-version>.
if normalized_directory_name in possible_normalized_directory_names:
path_to_delete = os.path.join(
common.THIRD_PARTY_PYTHON_LIBS_DIR, normalized_directory_name)
shutil.rmtree(path_to_delete)
def _rectify_third_party_directory(mismatches):
"""Rectifies the 'third_party/python_libs' directory state to reflect the
current 'requirements.txt' file requirements. It takes a list of mismatches
and corrects those mismatches by installing or uninstalling packages.
Args:
mismatches: dict(str, tuple(str|None, str|None)). Dictionary
with the normalized library names as keys and a tuple as values. The
1st element of the tuple is the version string of the library
required by the requirements.txt file while the 2nd element is the
version string of the library currently installed in the
'third_party/python_libs' directory. If the library doesn't exist,
the corresponding tuple element will be None. For example, this
dictionary signifies that 'requirements.txt' requires flask with
version 1.0.1 while the 'third_party/python_libs' directory contains
flask 1.1.1:
{
flask: ('1.0.1', '1.1.1')
}
"""
# Handling 5 or more mismatches requires 5 or more individual `pip install`
# commands, which is slower than just reinstalling all of the libraries
# using `pip install -r requirements.txt`.
if len(mismatches) >= 5:
if os.path.isdir(common.THIRD_PARTY_PYTHON_LIBS_DIR):
shutil.rmtree(common.THIRD_PARTY_PYTHON_LIBS_DIR)
_reinstall_all_dependencies()
return
# The library is installed in the directory but is not listed in
# requirements. We don't have functionality to remove a library cleanly, and
# if we ignore the library, this might cause issues when pushing the branch
# to develop as there might be possible hidden use cases of a deleted
# library that the developer did not catch. The only way to enforce the
# removal of a library is to clean out the folder and reinstall everything
# from scratch.
if any(required is None for required, _ in mismatches.values()):
if os.path.isdir(common.THIRD_PARTY_PYTHON_LIBS_DIR):
shutil.rmtree(common.THIRD_PARTY_PYTHON_LIBS_DIR)
_reinstall_all_dependencies()
return
git_mismatches, pip_mismatches = (
utils.partition(mismatches.items(), predicate=_is_git_url_mismatch))
for normalized_library_name, versions in git_mismatches:
requirements_version, directory_version = versions
# The library listed in 'requirements.txt' is not in the
# 'third_party/python_libs' directory.
if not directory_version or requirements_version != directory_version:
_install_direct_url(normalized_library_name, requirements_version)
for normalized_library_name, versions in pip_mismatches:
requirements_version = (
pkg_resources.parse_version(versions[0]) if versions[0] else None)
directory_version = (
pkg_resources.parse_version(versions[1]) if versions[1] else None)
# The library listed in 'requirements.txt' is not in the
# 'third_party/python_libs' directory.
if not directory_version:
_install_library(
normalized_library_name,
python_utils.convert_to_bytes(requirements_version))
# The currently installed library version is not equal to the required
# 'requirements.txt' version.
elif requirements_version != directory_version:
_install_library(
normalized_library_name,
python_utils.convert_to_bytes(requirements_version))
_remove_metadata(
normalized_library_name,
python_utils.convert_to_bytes(directory_version))
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git')
def _install_direct_url(library_name, direct_url):
"""Installs a direct URL to GitHub into the third_party/python_libs folder.
Args:
library_name: str. Name of the library to install.
direct_url: str. Full definition of the URL to install. Must match
GIT_DIRECT_URL_REQUIREMENT_PATTERN.
"""
pip_install(
'%s#egg=%s' % (direct_url, library_name),
common.THIRD_PARTY_PYTHON_LIBS_DIR,
upgrade=True,
no_dependencies=True)
def _get_pip_versioned_package_string(library_name, version_string):
"""Returns the standard 'library==version' string for the given values.
Args:
library_name: str. The normalized name of the library.
version_string: str. The version of the package as a string.
Returns:
str. The standard versioned library package name.
"""
return '%s==%s' % (library_name, version_string)
def _install_library(library_name, version_string):
"""Installs a library with a certain version to the
'third_party/python_libs' folder.
Args:
library_name: str. Name of the library to install.
version_string: str. Stringified version of the library to install.
"""
pip_install(
_get_pip_versioned_package_string(library_name, version_string),
common.THIRD_PARTY_PYTHON_LIBS_DIR,
upgrade=True,
no_dependencies=True
)
def _reinstall_all_dependencies():
"""Reinstalls all of the libraries detailed in the compiled
'requirements.txt' file to the 'third_party/python_libs' folder.
"""
_pip_install_requirements(
common.THIRD_PARTY_PYTHON_LIBS_DIR,
common.COMPILED_REQUIREMENTS_FILE_PATH
)
def _get_possible_normalized_metadata_directory_names(
library_name, version_string):
"""Returns possible normalized metadata directory names for python libraries
installed using pip (following the guidelines of PEP-427 and PEP-376).
This ensures that our _remove_metadata() function works as intended. More
details about the guidelines concerning the metadata folders can be found
here:
https://www.python.org/dev/peps/pep-0427/#file-contents
https://www.python.org/dev/peps/pep-0376/#how-distributions-are-installed.
Args:
library_name: str. Name of the library.
version_string: str. Stringified version of the library.
Returns:
set(str). Set containing the possible normalized directory name strings
of metadata folders.
"""
# Some metadata folders replace the hyphens in the library name with
# underscores.
# TODO(#11474): The '-py2.7' suffix might be used in some metadata directory
# names, this will need to be changed after the Python 3 migration.
return {
normalize_directory_name(
'%s-%s.dist-info' % (library_name, version_string)),
normalize_directory_name(
'%s-%s.dist-info' % (
library_name.replace('-', '_'), version_string)),
normalize_directory_name(
'%s-%s.egg-info' % (library_name, version_string)),
normalize_directory_name(
'%s-%s.egg-info' % (
library_name.replace('-', '_'), version_string)),
normalize_directory_name(
'%s-%s-py2.7.egg-info' % (library_name, version_string)),
normalize_directory_name(
'%s-%s-py2.7.egg-info' % (
library_name.replace('-', '_'), version_string)),
}
def verify_pip_is_installed():
"""Verify that pip is installed.
Raises:
ImportError. Error importing pip.
"""
python_utils.PRINT('Checking if pip is installed on the local machine')
try:
import pip
except ImportError as e:
common.print_each_string_after_two_new_lines([
'Pip is required to install Oppia dependencies, but pip wasn\'t '
'found on your local machine.',
'Please see \'Installing Oppia\' on the Oppia developers\' wiki '
'page:'])
if common.is_mac_os():
python_utils.PRINT(
'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Mac-'
'OS%29')
elif common.is_linux_os():
python_utils.PRINT(
'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Linux'
'%29')
else:
python_utils.PRINT(
'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28'
'Windows%29')
raise ImportError('Error importing pip: %s' % e)
else:
if pip.__version__ != OPPIA_REQUIRED_PIP_VERSION:
common.print_each_string_after_two_new_lines([
'Oppia requires pip==%s, but you have pip==%s installed.' % (
OPPIA_REQUIRED_PIP_VERSION, pip.__version__),
'Upgrading pip on your behalf...',
])
_run_pip_command(
['install', 'pip==%s' % OPPIA_REQUIRED_PIP_VERSION])
def _run_pip_command(cmd_parts):
"""Run pip command with some flags and configs. If it fails try to rerun it
with additional flags and else raise an exception.
Args:
cmd_parts: list(str). List of cmd parts to be run with pip.
Raises:
Exception. Error installing package.
"""
# The call to python -m is used to ensure that Python and Pip versions are
# compatible.
command = [sys.executable, '-m', 'pip'] + cmd_parts
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
python_utils.PRINT(stdout)
elif 'can\'t combine user with prefix' in stderr:
python_utils.PRINT('Trying by setting --user and --prefix flags.')
subprocess.check_call(
command + ['--user', '--prefix=', '--system'])
else:
python_utils.PRINT(stderr)
python_utils.PRINT(
'Refer to https://github.com/oppia/oppia/wiki/Troubleshooting')
raise Exception('Error installing package')
def pip_install_to_system(package, version):
"""Installs third party libraries with pip to the user's system.
Note: These libraries are installed to the user's default system-wide
'site-packages' folder, not to a local Oppia third-party directory. This is
ONLY required in very specific cases where the development server scripts
require default libraries. (When running another python script using
the shell, the call stack that is instantiated for that python script cannot
be edited by us; therefore, we have no control over which system paths, the
script visits when it looks for libraries and can only install those
necessary libraries to the default system path.)
In general, please DO NOT use this method when installing packages required
for oppia. Use pip_install instead.
Args:
package: str. The package name.
version: str. The package version.
"""
verify_pip_is_installed()
_run_pip_command(
['install', _get_pip_versioned_package_string(package, version)])
def pip_install(
versioned_package, install_path, upgrade=False, no_dependencies=False):
"""Installs third party libraries with pip to a specific path.
Args:
versioned_package: str. A 'lib==version' formatted string.
install_path: str. The installation path for the package.
upgrade: bool. Whether to call pip with the --upgrade flag.
no_dependencies: bool. Whether call the pip with --no-dependencies flag.
"""
verify_pip_is_installed()
additional_pip_args = []
if upgrade:
additional_pip_args.append('--upgrade')
if no_dependencies:
additional_pip_args.append('--no-dependencies')
_run_pip_command([
'install', versioned_package, '--target', install_path
] + additional_pip_args)
def _pip_install_requirements(install_path, requirements_path):
"""Installs third party libraries from requirements files with pip.
Args:
install_path: str. The installation path for the packages.
requirements_path: str. The path to the requirements file.
"""
verify_pip_is_installed()
_run_pip_command([
'install', '--target', install_path, '--no-dependencies',
'-r', requirements_path, '--upgrade'
])
def get_mismatches():
"""Returns a dictionary containing mismatches between the 'requirements.txt'
file and the 'third_party/python_libs' directory. Mismatches are defined as
the following inconsistencies:
1. A library exists in the requirements file but is not installed in the
'third_party/python_libs' directory.
2. A library is installed in the 'third_party/python_libs'
directory but it is not listed in the requirements file.
3. The library version installed is not as recent as the library version
listed in the requirements file.
4. The library version installed is more recent than the library version
listed in the requirements file.
Returns:
dict(str, tuple(str|None, str|None)). Dictionary with the
library names as keys and tuples as values. The 1st element of the
tuple is the version string of the library required by the
requirements.txt file while the 2nd element is the version string of
the library currently in the 'third_party/python_libs' directory. If
the library doesn't exist, the corresponding tuple element will be None.
For example, the following dictionary signifies that 'requirements.txt'
requires flask with version 1.0.1 while the 'third_party/python_libs'
directory contains flask 1.1.1 (or mismatch 4 above):
{
flask: ('1.0.1', '1.1.1')
}
"""
requirements_contents = _get_requirements_file_contents()
directory_contents = _get_third_party_python_libs_directory_contents()
mismatches = {}
for normalized_library_name in requirements_contents:
# Library exists in the directory and the requirements file.
if normalized_library_name in directory_contents:
# Library matches but version doesn't match.
if (directory_contents[normalized_library_name] !=
requirements_contents[normalized_library_name]):
mismatches[normalized_library_name] = (
requirements_contents[normalized_library_name],
directory_contents[normalized_library_name])
# Library exists in the requirements file but not in the directory.
else:
mismatches[normalized_library_name] = (
requirements_contents[normalized_library_name], None)
for normalized_library_name in directory_contents:
# Library exists in the directory but is not in the requirements file.
if normalized_library_name not in requirements_contents:
mismatches[normalized_library_name] = (
None, directory_contents[normalized_library_name])
return mismatches
def validate_metadata_directories():
"""Validates that each library installed in the 'third_party/python_libs'
has a corresponding metadata directory following the correct naming
conventions detailed in PEP-427, PEP-376, and common Python guidelines.
Raises:
Exception. An installed library's metadata does not exist in the
'third_party/python_libs' directory in the format which we expect
(following the PEP-427 and PEP-376 python guidelines).
"""
directory_contents = _get_third_party_python_libs_directory_contents()
# Each python metadata directory name contains a python library name that
# does not have uniform case. This is because we cannot guarantee the casing
# of the directory names generated and there are no options that we can
# provide to `pip install` to actually guarantee that a certain casing
# format is used to create the directory names. The only official guidelines
# for naming directories is that it must start with the string:
# '<library_name>-<library-version>' but no casing guidelines are specified.
# Therefore, in order to efficiently check if a python library's metadata
# exists in a directory, we need to normalize the directory name. Otherwise,
# we would need to check every permutation of the casing.
normalized_directory_names = {
normalize_directory_name(name)
for name in os.listdir(common.THIRD_PARTY_PYTHON_LIBS_DIR)
if os.path.isdir(os.path.join(common.THIRD_PARTY_PYTHON_LIBS_DIR, name))
}
for normalized_library_name, version_string in directory_contents.items():
# Direct URL libraries are guaranteed to have metadata directories,
# because that's how _get_third_party_python_libs_directory_contents
# obtains the version_string being checked here.
if version_string.startswith('git+'):
continue
# Possible names of the metadata directory installed when <library_name>
# is installed.
possible_normalized_directory_names = (
_get_possible_normalized_metadata_directory_names(
normalized_library_name, version_string))
# If any of the possible metadata directory names show up in the
# directory, that is confirmation that <library_name> was installed
# correctly with the correct metadata.
if not any(
normalized_directory_name in normalized_directory_names
for normalized_directory_name in
possible_normalized_directory_names):
raise Exception(
'The python library %s was installed without the correct '
'metadata folders which may indicate that the convention for '
'naming the metadata folders have changed. Please go to '
'`scripts/install_backend_python_libs` and modify our '
'assumptions in the '
'_get_possible_normalized_metadata_directory_names'
' function for what metadata directory names can be.' %
normalized_library_name)
def main():
"""Compares the state of the current 'third_party/python_libs' directory to
the libraries listed in the 'requirements.txt' file. If there are
mismatches, regenerate the 'requirements.txt' file and correct the
mismatches.
"""
verify_pip_is_installed()
python_utils.PRINT('Regenerating "requirements.txt" file...')
# Calls the script to regenerate requirements. The reason we cannot call the
# regenerate requirements functionality inline is because the python script
# that regenerates the file is a command-line interface (CLI). Once the CLI
# finishes execution, it forces itself and any python scripts in the current
# callstack to exit.
# Therefore, in order to allow continued execution after the requirements
# file is generated, we must call it as a separate process.
subprocess.check_call(
['python', '-m', 'scripts.regenerate_requirements'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Adds a note to the beginning of the 'requirements.txt' file to make sure
# developers understand that they should not append or change this
# autogenerated file.
with python_utils.open_file(
common.COMPILED_REQUIREMENTS_FILE_PATH, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(
'# Developers: Please do not modify this auto-generated file. If\n'
'# you want to add, remove, upgrade, or downgrade libraries,\n'
'# please change the `requirements.in` file, and then follow\n'
'# the instructions there to regenerate this file.\n' + content)
mismatches = get_mismatches()
if mismatches:
_rectify_third_party_directory(mismatches)
validate_metadata_directories()
else:
python_utils.PRINT(
'All third-party Python libraries are already installed correctly.')
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when install_third_party_libs.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
py | b4169995e996b17dda3dc09f2e5961d77e045d87 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command-line client for managing jobs with the Aurora scheduler.
"""
from __future__ import print_function
import json
import os
import pprint
import subprocess
import sys
import time
from datetime import datetime
from tempfile import NamedTemporaryFile
from twitter.common import app, log
from twitter.common.python.pex import PexInfo
from apache.aurora.client.api.disambiguator import LiveJobDisambiguator
from apache.aurora.client.api.job_monitor import JobMonitor
from apache.aurora.client.api.quota_check import print_quota
from apache.aurora.client.api.updater_util import UpdaterConfig
from apache.aurora.client.base import (
check_and_log_response,
deprecation_warning,
die,
handle_open,
requires,
synthesize_url
)
from apache.aurora.client.config import get_config, GlobalHookRegistry
from apache.aurora.client.factory import make_client, make_client_factory
from apache.aurora.client.options import (
BATCH_OPTION,
CLUSTER_CONFIG_OPTION,
CLUSTER_INVOKE_OPTION,
CLUSTER_NAME_OPTION,
DISABLE_HOOKS_OPTION,
ENV_CONFIG_OPTION,
ENVIRONMENT_BIND_OPTION,
FROM_JOBKEY_OPTION,
HEALTH_CHECK_INTERVAL_SECONDS_OPTION,
JSON_OPTION,
MAX_FAILURES_OPTION,
OPEN_BROWSER_OPTION,
SHARDS_OPTION,
WAIT_UNTIL_OPTION
)
from apache.aurora.common.aurora_job_key import AuroraJobKey
from gen.apache.aurora.api.constants import ACTIVE_STATES, AURORA_EXECUTOR_NAME, CURRENT_API_VERSION
from gen.apache.aurora.api.ttypes import ExecutorConfig, ResponseCode, ScheduleStatus
class CoreCommandHook(object):
"""Limited version of the command hooks framework ported to clientv1 commands.
Core command hooks can only be created by invoking "CoreCommandHook.register_hook"
in a module compiled into the aurora client executable.
Core command hooks are currently only supported for the following commands:
create, kill, killall, restart, start_cron, update, cancel_update
"""
def execute(self, cmd, options, *args, **kwargs):
"""
:param cmd: the command being invoked
:param options: the options object created by processing command line options
:param args: all other positional arguments taken by the command.
:param kwargs: all other keyword argumetns taken by the command.
This is invoked by each core client command before the command is executed, *after* options
are parsed. If this returns non-zero, the command execution will be aborted and the return
code of this method will be used as the exit code. To make a hook work with a specific
command, hook implementors should check the "cmd" parameter.
"""
pass
ALL_HOOKS = []
@property
def name(self):
pass
@classmethod
def register_hook(cls, hook):
cls.ALL_HOOKS.append(hook)
@classmethod
def clear_hooks(cls):
cls.ALL_HOOKS = []
@classmethod
def run_hooks(cls, cmd, options, *args, **kwargs):
for hook in cls.ALL_HOOKS:
result = hook.execute(cmd, options, *args, **kwargs)
if result != 0:
print("Command execution aborted by hook %s" % hook.name)
exit(result)
def get_job_config(job_spec, config_file, options):
try:
job_key = AuroraJobKey.from_path(job_spec)
select_cluster = job_key.cluster
select_env = job_key.env
select_role = job_key.role
jobname = job_key.name
except AuroraJobKey.Error:
deprecation_warning('Please refer to your job in CLUSTER/ROLE/ENV/NAME format.')
select_cluster = options.cluster if options.cluster else None
select_env = options.env
select_role = None
jobname = job_spec
try:
json_option = options.json
except AttributeError:
json_option = False
try:
bindings = options.bindings
except AttributeError:
bindings = ()
return get_config(
jobname,
config_file,
json_option,
bindings,
select_cluster=select_cluster,
select_role=select_role,
select_env=select_env)
def wait_kill_tasks(scheduler, job_key, instances=None):
monitor = JobMonitor(scheduler, job_key)
if not monitor.wait_until(monitor.terminal, instances=instances, with_timeout=True):
die('Tasks were not killed in time.')
@app.command
def version(args):
"""usage: version
Prints information about the version of the aurora client being run.
"""
try:
pex_info = PexInfo.from_pex(sys.argv[0])
print("Aurora client build info:")
print("\tsha: %s" % pex_info.build_properties['sha'])
print("\tdate: %s" % pex_info.build_properties['date'])
except (IOError, OSError):
print("Aurora client build info not available")
print("Aurora API version: %s" % CURRENT_API_VERSION)
def maybe_disable_hooks(options):
"""Checks the hooks disable option, and disables the hooks if required.
This could be done with a callback in the option, but this is better for the way that
we test clientv1.
"""
if options.disable_all_hooks_reason is not None:
GlobalHookRegistry.disable_hooks()
log.info('Client hooks disabled; reason given by user: %s' % options.disable_all_hooks_reason)
@app.command
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@app.command_option(WAIT_UNTIL_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def create(job_spec, config_file):
"""usage: create cluster/role/env/job config
Creates a job based on a configuration file.
"""
options = app.get_options()
CoreCommandHook.run_hooks("create", options, job_spec, config_file)
maybe_disable_hooks(options)
try:
config = get_job_config(job_spec, config_file, options)
except ValueError as v:
print("Error: %s" % v)
sys.exit(1)
api = make_client(config.cluster())
resp = api.create_job(config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, config.role(), config.environment(),
config.name())
if options.wait_until == 'RUNNING':
JobMonitor(api.scheduler_proxy, config.job_key()).wait_until(JobMonitor.running_or_finished)
elif options.wait_until == 'FINISHED':
JobMonitor(api.scheduler_proxy, config.job_key()).wait_until(JobMonitor.terminal)
@app.command
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@app.command_option(FROM_JOBKEY_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def diff(job_spec, config_file):
"""usage: diff cluster/role/env/job config
Compares a job configuration against a running job.
By default the diff will be displayed using 'diff', though you may choose an alternate
diff program by specifying the DIFF_VIEWER environment variable."""
options = app.get_options()
config = get_job_config(job_spec, config_file, options)
if options.rename_from:
cluster, role, env, name = options.rename_from
else:
cluster = config.cluster()
role = config.role()
env = config.environment()
name = config.name()
api = make_client(cluster)
resp = api.query(api.build_query(role, name, statuses=ACTIVE_STATES, env=env))
if resp.responseCode != ResponseCode.OK:
die('Request failed, server responded with "%s"' % resp.messageDEPRECATED)
remote_tasks = [t.assignedTask.task for t in resp.result.scheduleStatusResult.tasks]
resp = api.populate_job_config(config)
if resp.responseCode != ResponseCode.OK:
die('Request failed, server responded with "%s"' % resp.messageDEPRECATED)
local_tasks = resp.result.populateJobResult.populated
pp = pprint.PrettyPrinter(indent=2)
def pretty_print_task(task):
# The raw configuration is not interesting - we only care about what gets parsed.
task.configuration = None
task.executorConfig = ExecutorConfig(
name=AURORA_EXECUTOR_NAME,
data=json.loads(task.executorConfig.data))
return pp.pformat(vars(task))
def pretty_print_tasks(tasks):
return ',\n'.join([pretty_print_task(t) for t in tasks])
def dump_tasks(tasks, out_file):
out_file.write(pretty_print_tasks(tasks))
out_file.write('\n')
out_file.flush()
diff_program = os.environ.get('DIFF_VIEWER', 'diff')
with NamedTemporaryFile() as local:
dump_tasks(local_tasks, local)
with NamedTemporaryFile() as remote:
dump_tasks(remote_tasks, remote)
result = subprocess.call([diff_program, remote.name, local.name])
# Unlike most commands, diff doesn't return zero on success; it returns
# 1 when a successful diff is non-empty.
if result != 0 and result != 1:
return result
else:
return 0
@app.command(name='open')
def do_open(args, _):
"""usage: open cluster[/role[/env/job]]
Opens the scheduler page for a cluster, role or job in the default web browser.
"""
cluster_name = role = env = job = None
if len(args) == 0:
print('Open command requires a jobkey parameter.')
exit(1)
args = args[0].split("/")
if len(args) > 0:
cluster_name = args[0]
if len(args) > 1:
role = args[1]
if len(args) > 2:
env = args[2]
if len(args) > 3:
job = args[3]
else:
# TODO(ksweeney): Remove this after MESOS-2945 is completed.
die('env scheduler pages are not yet implemented, please specify job')
if not cluster_name:
die('cluster is required')
api = make_client(cluster_name)
import webbrowser
webbrowser.open_new_tab(
synthesize_url(api.scheduler_proxy.scheduler_client().url, role, env, job))
@app.command
@app.command_option('--local', dest='local', default=False, action='store_true',
help='Inspect the configuration as would be created by the "spawn" command.')
@app.command_option('--raw', dest='raw', default=False, action='store_true',
help='Show the raw configuration.')
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def inspect(job_spec, config_file):
"""usage: inspect cluster/role/env/job config
Verifies that a job can be parsed from a configuration file, and displays
the parsed configuration.
"""
options = app.get_options()
config = get_job_config(job_spec, config_file, options)
if options.raw:
print('Parsed job config: %s' % config.job())
return
job_thrift = config.job()
job = config.raw()
job_thrift = config.job()
print('Job level information')
print(' name: %s' % job.name())
print(' role: %s' % job.role())
print(' contact: %s' % job.contact())
print(' cluster: %s' % job.cluster())
print(' instances: %s' % job.instances())
if job.has_cron_schedule():
print(' cron:')
print(' schedule: %s' % job.cron_schedule())
print(' policy: %s' % job.cron_collision_policy())
if job.has_constraints():
print(' constraints:')
for constraint, value in job.constraints().get().items():
print(' %s: %s' % (constraint, value))
print(' service: %s' % job_thrift.taskConfig.isService)
print(' production: %s' % bool(job.production().get()))
print()
task = job.task()
print('Task level information')
print(' name: %s' % task.name())
if len(task.constraints().get()) > 0:
print(' constraints:')
for constraint in task.constraints():
print(' %s' % (' < '.join(st.get() for st in constraint.order())))
print()
processes = task.processes()
for process in processes:
print('Process %s:' % process.name())
if process.daemon().get():
print(' daemon')
if process.ephemeral().get():
print(' ephemeral')
if process.final().get():
print(' final')
print(' cmdline:')
for line in process.cmdline().get().splitlines():
print(' ' + line)
print()
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
def start_cron(args, options):
"""usage: start_cron cluster/role/env/job
Invokes a cron job immediately, out of its normal cron cycle.
This does not affect the cron cycle in any way.
"""
CoreCommandHook.run_hooks("start_cron", options, *args)
maybe_disable_hooks(options)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
resp = api.start_cronjob(job_key, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
@app.command
@app.command_option(
'--pretty',
dest='pretty',
default=False,
action='store_true',
help='Show job information in prettyprinted format')
@app.command_option(
'--show-cron',
'-c',
dest='show_cron_schedule',
default=False,
action='store_true',
help='List jobs registered with the Aurora scheduler')
@requires.exactly('cluster/role')
def list_jobs(cluster_and_role):
"""usage: list_jobs [--show-cron] cluster/role/env/job
Shows all jobs that match the job-spec known by the scheduler.
If --show-cron is specified, then also shows the registered cron schedule.
"""
def show_job_simple(job):
if options.show_cron_schedule:
print(('{0}/{1.key.role}/{1.key.environment}/{1.key.name}' +
'\t\'{1.cronSchedule}\'\t{1.cronCollisionPolicy}').format(cluster, job))
else:
print('{0}/{1.key.role}/{1.key.environment}/{1.key.name}'.format(cluster, job))
def show_job_pretty(job):
print("Job %s/%s/%s/%s:" %
(cluster, job.key.role, job.key.environment, job.key.name))
print('\tcron schedule: %s' % job.cronSchedule)
print('\tcron policy: %s' % job.cronCollisionPolicy)
options = app.get_options()
if options.show_cron_schedule and options.pretty:
print_fn = show_job_pretty
else:
print_fn = show_job_simple
# Take the cluster_and_role parameter, and split it into its two components.
if cluster_and_role.count('/') != 1:
die('list_jobs parameter must be in cluster/role format')
cluster, role = cluster_and_role.split('/')
api = make_client(cluster)
resp = api.get_jobs(role)
check_and_log_response(resp)
for job in resp.result.getJobsResult.configs:
print_fn(job)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(SHARDS_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
@app.command_option(BATCH_OPTION)
@app.command_option(MAX_FAILURES_OPTION)
def kill(args, options):
"""usage: kill --shards=shardspec cluster/role/env/job
Kills a group of tasks in a running job, blocking until all specified tasks have terminated.
"""
CoreCommandHook.run_hooks("kill", options, *args)
maybe_disable_hooks(options)
if options.shards is None:
print('Shards option is required for kill; use killall to kill all shards', file=sys.stderr)
exit(1)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
options = app.get_options()
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
if options.batch_size is not None:
kill_in_batches(api, job_key, options.shards, options.batch_size, options.max_failures_option)
else:
resp = api.kill_job(job_key, options.shards, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
wait_kill_tasks(api.scheduler_proxy, job_key, options.shards)
def kill_in_batches(api, job_key, instances_arg, batch_size, max_failures):
""" Common behavior shared by kill and killAll for killing instances in
a sequence of batches.
"""
def make_batches(instances, batch_size):
result = []
while (len(instances) > 0):
batch = []
for i in range(min(batch_size, len(instances))):
batch.append(instances.pop())
result.append(batch)
return result
resp = api.check_status(job_key)
if resp.responseCode is not ResponseCode.OK:
log.error("Job %s could not be found" % job_key)
exit(1)
tasks = resp.result.scheduleStatusResult.tasks or None
if batch_size is not None and batch_size > 0 and tasks is not None:
instance_ids = set(instance.assignedTask.instanceId for instance in tasks)
instances_to_kill = instance_ids & set(instances_arg or instance_ids)
errors = 0
for batch in make_batches(instances_to_kill, batch_size):
resp = api.kill_job(job_key, batch)
if resp.responseCode is not ResponseCode.OK:
log.info("Kill of shards %s failed with error %s" % (batch, resp.messageDEPRECATED))
print('ERROR IN KILL_JOB')
errors += 1
if errors > max_failures:
log.error("Exceeded maximum number of errors while killing instances")
exit(1)
if errors > 0:
print("Warning: errors occurred during batch kill")
exit(1)
else:
if tasks is None or len(tasks) == 0:
log.error('No tasks to kill found for job %s' % job_key)
return 1
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
@app.command_option(BATCH_OPTION)
@app.command_option(MAX_FAILURES_OPTION)
def killall(args, options):
"""usage: killall cluster/role/env/job
Kills all tasks in a running job, blocking until all specified tasks have been terminated.
"""
CoreCommandHook.run_hooks("killall", options, *args)
maybe_disable_hooks(options)
job_key = AuroraJobKey.from_path(args[0])
config_file = args[1] if len(args) > 1 else None # the config for hooks
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
api = make_client(job_key.cluster)
if options.batch_size is not None:
kill_in_batches(api, job_key, None, options.batch_size, options.max_failures_option)
else:
resp = api.kill_job(job_key, None, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
wait_kill_tasks(api.scheduler_proxy, job_key)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
def status(args, options):
"""usage: status cluster/role/env/job
Fetches and prints information about the active tasks in a job.
"""
def is_active(task):
return task.status in ACTIVE_STATES
def print_task(scheduled_task):
assigned_task = scheduled_task.assignedTask
taskInfo = assigned_task.task
taskString = ''
if taskInfo:
taskString += '''cpus: %s, ram: %s MB, disk: %s MB''' % (taskInfo.numCpus,
taskInfo.ramMb,
taskInfo.diskMb)
if assigned_task.assignedPorts:
taskString += '\n\tports: %s' % assigned_task.assignedPorts
taskString += '\n\tfailure count: %s (max %s)' % (scheduled_task.failureCount,
taskInfo.maxTaskFailures)
taskString += '\n\tevents:'
for event in scheduled_task.taskEvents:
taskString += '\n\t\t %s %s: %s' % (datetime.fromtimestamp(event.timestamp / 1000),
ScheduleStatus._VALUES_TO_NAMES[event.status],
event.message)
taskString += '\n\tmetadata:'
if assigned_task.task.metadata is not None:
for md in assigned_task.task.metadata:
taskString += ('\n\t\t%s: %s' % (md.key, md.value))
return taskString
def print_tasks(tasks):
for task in tasks:
taskString = print_task(task)
log.info('role: %s, env: %s, name: %s, shard: %s, status: %s on %s\n%s' %
(task.assignedTask.task.owner.role,
task.assignedTask.task.environment,
task.assignedTask.task.jobName,
task.assignedTask.instanceId,
ScheduleStatus._VALUES_TO_NAMES[task.status],
task.assignedTask.slaveHost,
taskString))
api, job_key, _ = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
resp = api.check_status(job_key)
check_and_log_response(resp)
tasks = resp.result.scheduleStatusResult.tasks
if tasks:
active_tasks = filter(is_active, tasks)
log.info('Active Tasks (%s)' % len(active_tasks))
print_tasks(active_tasks)
inactive_tasks = filter(lambda x: not is_active(x), tasks)
log.info('Inactive Tasks (%s)' % len(inactive_tasks))
print_tasks(inactive_tasks)
else:
log.info('No tasks found.')
@app.command
@app.command_option(SHARDS_OPTION)
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
@app.command_option(
'--force',
dest='force',
default=True, # TODO(maximk): Temporary bandaid for MESOS-4310 until a better fix is available.
action='store_true',
help='Turn off warning message that the update looks large enough to be disruptive.')
@requires.exactly('cluster/role/env/job', 'config')
def update(job_spec, config_file):
"""usage: update cluster/role/env/job config
Performs a rolling upgrade on a running job, using the update configuration
within the config file as a control for update velocity and failure tolerance.
Updates are fully controlled client-side, so aborting an update halts the
update and leaves the job in a 'locked' state on the scheduler.
Subsequent update attempts will fail until the update is 'unlocked' using the
'cancel_update' command.
The updater only takes action on shards in a job that have changed, meaning
that changing a single shard will only induce a restart on the changed shard.
You may want to consider using the 'diff' subcommand before updating,
to preview what changes will take effect.
"""
def warn_if_dangerous_change(api, job_spec, config):
# Get the current job status, so that we can check if there's anything
# dangerous about this update.
resp = api.query_no_configs(api.build_query(config.role(), config.name(),
statuses=ACTIVE_STATES, env=config.environment()))
if resp.responseCode != ResponseCode.OK:
die('Could not get job status from server for comparison: %s' % resp.messageDEPRECATED)
remote_tasks = [t.assignedTask.task for t in resp.result.scheduleStatusResult.tasks]
resp = api.populate_job_config(config)
if resp.responseCode != ResponseCode.OK:
die('Server could not populate job config for comparison: %s' % resp.messageDEPRECATED)
local_task_count = len(resp.result.populateJobResult.populated)
remote_task_count = len(remote_tasks)
if (local_task_count >= 4 * remote_task_count or local_task_count <= 4 * remote_task_count
or local_task_count == 0):
print('Warning: this update is a large change. Press ^c within 5 seconds to abort')
time.sleep(5)
options = app.get_options()
CoreCommandHook.run_hooks("update", options, job_spec, config_file)
maybe_disable_hooks(options)
config = get_job_config(job_spec, config_file, options)
api = make_client(config.cluster())
if not options.force:
warn_if_dangerous_change(api, job_spec, config)
resp = api.update_job(config, options.health_check_interval_seconds, options.shards)
check_and_log_response(resp)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(SHARDS_OPTION)
@app.command_option(
'--batch_size',
dest='batch_size',
type=int,
default=1,
help='Number of shards to be restarted in one iteration.')
@app.command_option(
'--max_per_shard_failures',
dest='max_per_shard_failures',
type=int,
default=0,
help='Maximum number of restarts per shard during restart. Increments total failure count when '
'this limit is exceeded.')
@app.command_option(
'--max_total_failures',
dest='max_total_failures',
type=int,
default=0,
help='Maximum number of shard failures to be tolerated in total during restart.')
@app.command_option(
'--restart_threshold',
dest='restart_threshold',
type=int,
default=60,
help='Maximum number of seconds before a shard must move into the RUNNING state before '
'considered a failure.')
@app.command_option(
'--watch_secs',
dest='watch_secs',
type=int,
default=30,
help='Minimum number of seconds a shard must remain in RUNNING state before considered a '
'success.')
@app.command_option(DISABLE_HOOKS_OPTION)
def restart(args, options):
"""usage: restart cluster/role/env/job
[--shards=SHARDS]
[--batch_size=INT]
[--updater_health_check_interval_seconds=SECONDS]
[--max_per_shard_failures=INT]
[--max_total_failures=INT]
[--restart_threshold=INT]
[--watch_secs=SECONDS]
Performs a rolling restart of shards within a job.
Restarts are fully controlled client-side, so aborting halts the restart.
"""
CoreCommandHook.run_hooks("restart", options, *args)
if options.max_total_failures < 0:
print("max_total_failures option must be >0, but you specified %s" % options.max_total_failures,
file=sys.stderr)
exit(1)
maybe_disable_hooks(options)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
updater_config = UpdaterConfig(
options.batch_size,
options.restart_threshold,
options.watch_secs,
options.max_per_shard_failures,
options.max_total_failures)
resp = api.restart(job_key, options.shards, updater_config,
options.health_check_interval_seconds, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
def cancel_update(args, options):
"""usage: cancel_update cluster/role/env/job
Unlocks a job for updates.
A job may be locked if a client's update session terminated abnormally,
or if another user is actively updating the job. This command should only
be used when the user is confident that they are not conflicting with another user.
"""
CoreCommandHook.run_hooks("cancel_update", options, *args)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
resp = api.cancel_update(job_key, config=config)
check_and_log_response(resp)
@app.command
@app.command_option(CLUSTER_NAME_OPTION)
@requires.exactly('role')
def get_quota(role):
"""usage: get_quota --cluster=CLUSTER role
Prints the production quota that has been allocated to a user.
"""
options = app.get_options()
resp = make_client(options.cluster).get_quota(role)
quota_result = resp.result.getQuotaResult
print_quota(quota_result.quota, 'Total allocated quota', role)
if resp.result.getQuotaResult.prodConsumption:
print_quota(quota_result.prodConsumption,
'Resources consumed by production jobs',
role)
if resp.result.getQuotaResult.nonProdConsumption:
print_quota(quota_result.nonProdConsumption,
'Resources consumed by non-production jobs',
role)
|
py | b4169a7e5ce438a69f7e6714c04bea0b8d9a4b07 | import cv2
filepath = "OpenCV/meinv.png"
img = cv2.imread(filepath)
cv2.namedWindow('Image')
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
py | b4169b3b2763e5fe39340dd4a09b9ef378402983 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.ACE/Serif_16/udhr_Latn.ACE_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | b4169d3dad9585e04667863b320a8089af377da0 | """
This file offers the methods to automatically retrieve the graph socfb-Bucknell39.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 11:48:42.276381
The undirected graph socfb-Bucknell39 has 3826 nodes and 158864 unweighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.02171 and has 2 connected components, where the component with most
nodes has 3824 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 72, the mean node degree is 83.04, and
the node degree mode is 49. The top 5 most central nodes are 2874 (degree
506), 572 (degree 429), 534 (degree 417), 2025 (degree 405) and 865 (degree
399).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@article{traud2012social,
title={Social structure of {F}acebook networks},
author={Traud, Amanda L and Mucha, Peter J and Porter, Mason A},
journal={Phys. A},
month={Aug},
number={16},
pages={4165--4180},
volume={391},
year={2012}
}
@article{Traud:2011fs,
title={Comparing Community Structure to Characteristics in Online Collegiate Social Networks},
author={Traud, Amanda L and Kelsic, Eric D and Mucha, Peter J and Porter, Mason A},
journal={SIAM Rev.},
number={3},
pages={526--543},
volume={53},
year={2011}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import SocfbBucknell39
# Then load the graph
graph = SocfbBucknell39()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def SocfbBucknell39(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the socfb-Bucknell39 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of socfb-Bucknell39 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 11:48:42.276381
The undirected graph socfb-Bucknell39 has 3826 nodes and 158864 unweighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.02171 and has 2 connected components, where the component with most
nodes has 3824 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 72, the mean node degree is 83.04, and
the node degree mode is 49. The top 5 most central nodes are 2874 (degree
506), 572 (degree 429), 534 (degree 417), 2025 (degree 405) and 865 (degree
399).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@article{traud2012social,
title={Social structure of {F}acebook networks},
author={Traud, Amanda L and Mucha, Peter J and Porter, Mason A},
journal={Phys. A},
month={Aug},
number={16},
pages={4165--4180},
volume={391},
year={2012}
}
@article{Traud:2011fs,
title={Comparing Community Structure to Characteristics in Online Collegiate Social Networks},
author={Traud, Amanda L and Kelsic, Eric D and Mucha, Peter J and Porter, Mason A},
journal={SIAM Rev.},
number={3},
pages={526--543},
volume={53},
year={2011}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import SocfbBucknell39
# Then load the graph
graph = SocfbBucknell39()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="SocfbBucknell39",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | b4169d46c1d0650032972d4d6bfb939c95c36d13 | """
This submodule contains various functionality related to Steam Guard.
:class:`SteamAuthenticator` provides methods for genereating codes
and enabling 2FA on a Steam account. Operations managing the authenticator
on an account require an instance of either :class:`.MobileWebAuth` or
:class:`.SteamClient`. The instance needs to be logged in.
Adding an authenticator
.. code:: python
wa = MobileWebAuth('steamuser')
wa.cli_login()
sa = SteamAuthenticator(backend=wa)
sa.add() # SMS code will be send to the account's phone number
sa.secrets # dict with authenticator secrets (SAVE THEM!!)
# save the secrets, for example to a file
json.dump(sa.secrets, open('./mysecrets.json', 'w'))
# HINT: You can stop here and add authenticator on your phone.
# The secrets will be the same, and you will be able to
# both use your phone and SteamAuthenticator.
sa.finalize('SMS CODE') # activate the authenticator
sa.get_code() # generate 2FA code for login
sa.remove() # removes the authenticator from the account
.. warning::
Before you finalize the authenticator, make sure to save your secrets.
Otherwise you will lose access to the account.
Once authenticator is enabled all you need is the secrets to generate codes.
.. code:: python
secrets = json.load(open('./mysecrets.json'))
sa = SteamAuthenticator(secrets)
sa.get_code()
You can obtain the authenticator secrets from an Android device using
:func:`extract_secrets_from_android_rooted`. See the function docstring for
details on what is required for it to work.
"""
import json
import subprocess
import struct
import requests
from base64 import b64decode, b64encode
from binascii import hexlify
from time import time
from steam import webapi
from steam.enums import ETwoFactorTokenType
from steam.steamid import SteamID
from steam.core.crypto import hmac_sha1, sha1_hash
from steam.enums.common import EResult
from steam.webauth import MobileWebAuth
from steam.utils.proto import proto_to_dict
class SteamAuthenticator(object):
"""Add/Remove authenticator from an account. Generate 2FA and confirmation codes."""
_finalize_attempts = 5
backend = None #: instance of :class:`.MobileWebAuth` or :class:`.SteamClient`
steam_time_offset = None #: offset from steam server time
align_time_every = 0 #: how often to align time with Steam (``0`` never, otherwise interval in seconds)
_offset_last_check = 0
secrets = None #: :class:`dict` with authenticator secrets
def __init__(self, secrets=None, backend=None):
"""
:param secret: a dict of authenticator secrets
:type secret: dict
:param backend: logged on session for steam user
:type backend: :class:`.MobileWebAuth`, :class:`.SteamClient`
"""
self.secrets = secrets or {}
self.backend = backend
def __getattr__(self, key):
if key not in self.secrets:
raise AttributeError("No %s attribute" % repr(key))
return self.secrets[key]
def get_time(self):
"""
:return: Steam aligned timestamp
:rtype: int
"""
if (self.steam_time_offset is None
or (self.align_time_every and (time() - self._offset_last_check) > self.align_time_every)
):
self.steam_time_offset = get_time_offset()
if self.steam_time_offset is not None:
self._offset_last_check = time()
return int(time() + (self.steam_time_offset or 0))
def get_code(self, timestamp=None):
"""
:param timestamp: time to use for code generation
:type timestamp: int
:return: two factor code
:rtype: str
"""
return generate_twofactor_code_for_time(b64decode(self.shared_secret),
self.get_time() if timestamp is None else timestamp)
def get_confirmation_key(self, tag='', timestamp=None):
"""
:param tag: see :func:`generate_confirmation_key` for this value
:type tag: str
:param timestamp: time to use for code generation
:type timestamp: int
:return: trade confirmation key
:rtype: str
"""
return generate_confirmation_key(b64decode(self.identity_secret), tag,
self.get_time() if timestamp is None else timestamp)
def _send_request(self, action, params):
backend = self.backend
if isinstance(backend, MobileWebAuth):
if not backend.logged_on:
raise SteamAuthenticatorError("MobileWebAuth instance not logged in")
params['access_token'] = backend.oauth_token
params['http_timeout'] = 10
try:
resp = webapi.post('ITwoFactorService', action, 1, params=params)
except requests.exceptions.RequestException as exp:
raise SteamAuthenticatorError("Error adding via WebAPI: %s" % str(exp))
resp = resp['response']
else:
if not backend.logged_on:
raise SteamAuthenticatorError("SteamClient instance not logged in")
resp = backend.send_um_and_wait("TwoFactor.%s#1" % action,
params, timeout=10)
if resp is None:
raise SteamAuthenticatorError("Failed. Request timeout")
if resp.header.eresult != EResult.OK:
raise SteamAuthenticatorError("Failed: %s (%s)" % (resp.header.error_message,
repr(resp.header.eresult)))
resp = proto_to_dict(resp.body)
if action == 'AddAuthenticator':
for key in ['shared_secret', 'identity_secret', 'secret_1']:
resp[key] = b64encode(resp[key]).decode('ascii')
return resp
def add(self):
"""Add authenticator to an account.
The account's phone number will receive a SMS code required for :meth:`finalize`.
:raises: :class:`SteamAuthenticatorError`
"""
if not self.has_phone_number():
raise SteamAuthenticatorError("Account doesn't have a verified phone number")
resp = self._send_request('AddAuthenticator', {
'steamid': self.backend.steam_id,
'authenticator_time': int(time()),
'authenticator_type': int(ETwoFactorTokenType.ValveMobileApp),
'device_identifier': generate_device_id(self.backend.steam_id),
'sms_phone_id': '1',
})
if resp['status'] != EResult.OK:
raise SteamAuthenticatorError("Failed to add authenticator. Error: %s" % repr(EResult(resp['status'])))
self.secrets = resp
self.steam_time_offset = int(resp['server_time']) - time()
def finalize(self, activation_code):
"""Finalize authenticator with received SMS code
:param activation_code: SMS code
:type activation_code: str
:raises: :class:`SteamAuthenticatorError`
"""
resp = self._send_request('FinalizeAddAuthenticator', {
'steamid': self.backend.steam_id,
'authenticator_time': int(time()),
'authenticator_code': self.get_code(),
'activation_code': activation_code,
})
if resp['status'] != EResult.TwoFactorActivationCodeMismatch and resp.get('want_more', False) and self._finalize_attempts:
self.steam_time_offset += 30
self._finalize_attempts -= 1
self.finalize(activation_code)
return
elif not resp['success']:
self._finalize_attempts = 5
raise SteamAuthenticatorError("Failed to finalize authenticator. Error: %s" % repr(EResult(resp['status'])))
self.steam_time_offset = int(resp['server_time']) - time()
def remove(self, revocation_code=None):
"""Remove authenticator
:param revocation_code: revocation code for account (e.g. R12345)
:type revocation_code: str
.. note::
After removing authenticator Steam Guard will be set to email codes
.. warning::
Doesn't work via :class:`.SteamClient`. Disabled by Valve
:raises: :class:`SteamAuthenticatorError`
"""
if not self.secrets:
raise SteamAuthenticatorError("No authenticator secrets available?")
if not isinstance(self.backend, MobileWebAuth):
raise SteamAuthenticatorError("Only available via MobileWebAuth")
resp = self._send_request('RemoveAuthenticator', {
'steamid': self.backend.steam_id,
'revocation_code': revocation_code if revocation_code else self.revocation_code,
'steamguard_scheme': 1,
})
if not resp['success']:
raise SteamAuthenticatorError("Failed to remove authenticator. (attempts remaining: %s)" % (
resp['revocation_attempts_remaining'],
))
self.secrets.clear()
def status(self):
"""Fetch authenticator status for the account
:raises: :class:`SteamAuthenticatorError`
:return: dict with status parameters
:rtype: dict
"""
return self._send_request('QueryStatus', {'steamid': self.backend.steam_id})
def create_emergency_codes(self, code=None):
"""Generate emergency codes
:param code: SMS code
:type code: str
:raises: :class:`SteamAuthenticatorError`
:return: list of codes
:rtype: list
.. note::
A confirmation code is required to generate emergency codes and this method needs
to be called twice as shown below.
.. code:: python
sa.create_emergency_codes() # request a SMS code
sa.create_emergency_codes(code='12345') # creates emergency codes
"""
if code:
return self._send_request('createemergencycodes', {'code': code}).get('codes', [])
else:
self._send_request('createemergencycodes', {})
return None
def destroy_emergency_codes(self):
"""Destroy all emergency codes
:raises: :class:`SteamAuthenticatorError`
"""
self._send_request('DestroyEmergencyCodes', {'steamid': self.backend.steam_id})
def _get_web_session(self):
"""
:return: authenticated web session
:rtype: :class:`requests.Session`
:raises: :class:`RuntimeError` when session is unavailable
"""
if isinstance(self.backend, MobileWebAuth):
return self.backend.session
else:
if self.backend.logged_on:
sess = self.backend.get_web_session()
if sess is None:
raise RuntimeError("Failed to get a web session. Try again in a few minutes")
else:
return sess
else:
raise RuntimeError("SteamClient instance is not connected")
def add_phone_number(self, phone_number):
"""Add phone number to account
Steps:
1. Call :meth:`add_phone_number()` then check ``email_confirmation`` key in the response
i. On ``True``, user needs to click link in email, then step 2
ii. On ``False``, SMS code is sent, go to step 3
2. Confirm email via :meth:`confirm_email()`, SMS code is sent
3. Finalize phone number with SMS code :meth:`confirm_phone_number(sms_code)`
:param phone_number: phone number with country code
:type phone_number: :class:`str`
:return: see example below
:rtype: :class:`dict`
.. code:: python
{'success': True,
'email_confirmation': True,
'error_text': '',
'fatal': False}
"""
sess = self._get_web_session()
try:
resp = sess.post('https://steamcommunity.com/steamguard/phoneajax',
data={
'op': 'add_phone_number',
'arg': phone_number,
'checkfortos': 0,
'skipvoip': 0,
'sessionid': sess.cookies.get('sessionid', domain='steamcommunity.com'),
},
timeout=15).json()
except:
return {'success': False}
return resp
def confirm_email(self):
"""Confirm email confirmation. See :meth:`add_phone_number()`
.. note::
If ``email_confirmation`` is ``True``, then user hasn't clicked the link yet.
:return: see example below
:rtype: :class:`dict`
.. code:: python
{'success': True,
'email_confirmation': True,
'error_text': '',
'fatal': False}
"""
sess = self._get_web_session()
try:
resp = sess.post('https://steamcommunity.com/steamguard/phoneajax',
data={
'op': 'email_confirmation',
'arg': '',
'checkfortos': 1,
'skipvoip': 1,
'sessionid': sess.cookies.get('sessionid', domain='steamcommunity.com'),
},
timeout=15).json()
except:
return {'fatal': True, 'success': False}
return resp
def confirm_phone_number(self, sms_code):
"""Confirm phone number with the recieved SMS code. See :meth:`add_phone_number()`
:param sms_code: sms code
:type sms_code: :class:`str`
:return: see example below
:rtype: :class:`dict`
.. code:: python
{'success': True,
'error_text': '',
'fatal': False}
"""
sess = self._get_web_session()
try:
resp = sess.post('https://steamcommunity.com/steamguard/phoneajax',
data={
'op': 'check_sms_code',
'arg': sms_code,
'checkfortos': 1,
'skipvoip': 1,
'sessionid': sess.cookies.get('sessionid', domain='steamcommunity.com'),
},
timeout=15).json()
except:
return {'success': False}
return resp
def has_phone_number(self):
"""Check whether the account has a verified phone number
:return: see example below
:rtype: :class:`dict`
.. code:: python
{'success': True,
'has_phone': True,
'error_text': '',
'fatal': False}
"""
sess = self._get_web_session()
try:
resp = sess.post('https://steamcommunity.com/steamguard/phoneajax',
data={
'op': 'has_phone',
'arg': '0',
'checkfortos': 0,
'skipvoip': 1,
'sessionid': sess.cookies.get('sessionid', domain='steamcommunity.com'),
},
timeout=15).json()
except:
return {'success': False}
return resp
def validate_phone_number(self, phone_number):
"""Test whether phone number is valid for Steam
:param phone_number: phone number with country code
:type phone_number: :class:`str`
:return: see example below
:rtype: :class:`dict`
.. code:: python
{'is_fixed': False,
'is_valid': False,
'is_voip': True,
'number': '+1 123-555-1111',
'success': True}
"""
sess = self._get_web_session()
try:
resp = sess.post('https://store.steampowered.com/phone/validate',
data={
'phoneNumber': phone_number,
'sessionID': sess.cookies.get('sessionid', domain='store.steampowered.com'),
},
allow_redirects=False,
timeout=15).json()
except:
resp = {'success': False}
return resp
class SteamAuthenticatorError(Exception):
pass
def generate_twofactor_code(shared_secret):
"""Generate Steam 2FA code for login with current time
:param shared_secret: authenticator shared shared_secret
:type shared_secret: bytes
:return: steam two factor code
:rtype: str
"""
return generate_twofactor_code_for_time(shared_secret, time() + (get_time_offset() or 0))
def generate_twofactor_code_for_time(shared_secret, timestamp):
"""Generate Steam 2FA code for timestamp
:param shared_secret: authenticator shared secret
:type shared_secret: bytes
:param timestamp: timestamp to use, if left out uses current time
:type timestamp: int
:return: steam two factor code
:rtype: str
"""
hmac = hmac_sha1(bytes(shared_secret),
struct.pack('>Q', int(timestamp)//30)) # this will NOT stop working in 2038
start = ord(hmac[19:20]) & 0xF
codeint = struct.unpack('>I', hmac[start:start+4])[0] & 0x7fffffff
charset = '23456789BCDFGHJKMNPQRTVWXY'
code = ''
for _ in range(5):
codeint, i = divmod(codeint, len(charset))
code += charset[i]
return code
def generate_confirmation_key(identity_secret, tag, timestamp):
"""Generate confirmation key for trades. Can only be used once.
:param identity_secret: authenticator identity secret
:type identity_secret: bytes
:param tag: tag identifies what the request, see list below
:type tag: str
:param timestamp: timestamp to use for generating key
:type timestamp: int
:return: confirmation key
:rtype: bytes
Tag choices:
* ``conf`` to load the confirmations page
* ``details`` to load details about a trade
* ``allow`` to confirm a trade
* ``cancel`` to cancel a trade
"""
data = struct.pack('>Q', int(timestamp)) + tag.encode('ascii') # this will NOT stop working in 2038
return hmac_sha1(bytes(identity_secret), data)
def get_time_offset():
"""Get time offset from steam server time via WebAPI
:return: time offset (``None`` when Steam WebAPI fails to respond)
:rtype: :class:`int`, :class:`None`
"""
try:
resp = webapi.post('ITwoFactorService', 'QueryTime', 1, params={'http_timeout': 10})
except:
return None
ts = int(time())
return int(resp.get('response', {}).get('server_time', ts)) - ts
def generate_device_id(steamid):
"""Generate Android device id
:param steamid: Steam ID
:type steamid: :class:`.SteamID`, :class:`int`
:return: android device id
:rtype: str
"""
h = hexlify(sha1_hash(str(steamid).encode('ascii'))).decode('ascii')
return "android:%s-%s-%s-%s-%s" % (h[:8], h[8:12], h[12:16], h[16:20], h[20:32])
def extract_secrets_from_android_rooted(adb_path='adb'):
"""Extract Steam Authenticator secrets from a rooted Android device
Prerequisite for this to work:
- rooted android device
- `adb binary <https://developer.android.com/studio/command-line/adb.html>`_
- device in debug mode, connected and paired
.. note::
If you know how to make this work, without requiring the device to be rooted,
please open a issue on github. Thanks
:param adb_path: path to adb binary
:type adb_path: str
:raises: When there is any problem
:return: all secrets from the device, steamid as key
:rtype: dict
"""
data = subprocess.check_output([
adb_path, 'shell', 'su', '-c',
"'cat /data/data/com.valvesoftware.android.steam.community/files/Steamguard*'"
])
# When adb daemon is not running, `adb` will print a couple of lines before our data.
# The data doesn't have new lines and its always on the last line.
data = data.decode('utf-8').split('\n')[-1]
if data[0] != "{":
raise RuntimeError("Got invalid data: %s" % repr(data))
return {int(x['steamid']): x
for x in map(json.loads, data.replace("}{", '}|||||{').split('|||||'))}
|
py | b4169e379148d1546a5fdbe3ee9e9dfd41507e2d | import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from unet import Unet
MODEL_NAME = f"model-{int(time.time())}"
learning_rate = 0.001
epochs = 4
validation_percentage = 0.1
u_net = Unet()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
u_net.to(device)
optimizer = optim.Adam(u_net.parameters(), lr=learning_rate)
loss_func = nn.MSELoss()
def filter_img(img00, img01):
kernel = np.ones((4, 4), np.uint8)
subtract = cv2.subtract((img00 + 15), img01)
kernel2 = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
img03 = cv2.filter2D(subtract, -1, kernel2)
img03 = cv2.GaussianBlur(img03, (5, 5), 0)
img03 = cv2.Canny(img03, 85, 255)
img03 = cv2.morphologyEx(img03, cv2.MORPH_CLOSE, kernel, iterations=1)
img03 = cv2.bitwise_not(img03)
img03 = img03 & img00
return img03
def train_net(training_path):
dataset = np.load(training_path)
print(dataset.shape)
print(MODEL_NAME)
x = torch.Tensor([i[0] for i in dataset], device=device).view(-1, 64, 64)
y = torch.Tensor([i[1] for i in dataset], device=device).view(-1, 64, 64)
val_len = int(len(dataset) * validation_percentage)
train_x = x[:-val_len]
train_y = y[:-val_len]
val_x = x[-val_len:]
val_y = y[-val_len:]
batch_size = 10
end = False
outputs = []
accuracy = 0
with open("model.log", "a") as f:
for epoch in range(epochs):
for i in tqdm(range(0, len(train_x), batch_size)):
batch_x = train_x[i:i + batch_size].view(-1, 1, 64, 64)
batch_y = train_y[i:i + batch_size].view(-1, 1, 64, 64)
u_net.zero_grad()
output = u_net.forward(batch_x)
loss = loss_func(output, batch_y)
loss.backward()
optimizer.step()
if i % batch_size == 0:
f.write(
f"{MODEL_NAME},{round(i, 3)},{round(float(accuracy), 2)},{round(float(loss), 2)},{epoch}\n")
with torch.no_grad():
total = 0
for k in range(0, len(val_x)):
out = u_net(val_x[k].view(-1, 1, 64, 64))
outputs.append(out)
img1 = np.uint8(out[0].view(64, 64) * 255)
img2 = np.uint8(y[k].view(64, 64) * 255)
ref = np.uint32(0)
dif = np.uint32(0)
for i in range(img1.shape[0]):
for j in range(img1.shape[1]):
ref += img2[i, j]
a1 = np.int16(img1[i, j])
a2 = np.int16(img2[i, j])
dif += abs(a1 - a2)
accuracy += round(1 - (dif / ref), 3)
total += 1
accuracy = accuracy / total
# print("Dokladnosc: ", accuracy)
if accuracy > 0.97:
# print("Uzyskano wystarczającą dokładoność")
torch.save(u_net, 'u_net.pt')
end = True
if end:
break
if end:
break
torch.save(u_net, 'u_net.pt')
img00 = np.uint8(val_x[0].view(64, 64) * 255)
img01 = np.uint8(outputs[0].view(64, 64) * 255)
img02 = np.uint8(val_y[0].view(64, 64) * 255)
img10 = np.uint8(val_x[1].view(64, 64) * 255)
img11 = np.uint8(outputs[1].view(64, 64) * 255)
img12 = np.uint8(val_y[1].view(64, 64) * 255)
img20 = np.uint8(val_x[2].view(64, 64) * 255)
img21 = np.uint8(outputs[2].view(64, 64) * 255)
img22 = np.uint8(val_y[2].view(64, 64) * 255)
img30 = np.uint8(val_x[3].view(64, 64) * 255)
img31 = np.uint8(outputs[3].view(64, 64) * 255)
img32 = np.uint8(val_y[3].view(64, 64) * 255)
fig, axs = plt.subplots(4, 3)
fig.suptitle(f'Wyniki dla {dataset.shape[0]} próbek i {epochs} epok', fontsize=15)
axs[0, 0].imshow(img00, cmap='gray')
axs[0, 0].set_title('Wylosowane punkty')
axs[0, 1].imshow(img01, cmap='gray')
axs[0, 1].set_title('Odpowiedź sieci neuronowej')
axs[0, 2].imshow(img02, cmap='gray')
axs[0, 2].set_title('Ścieżka znaleziona dzięki RRT*')
axs[1, 0].imshow(img10, cmap='gray')
axs[1, 1].imshow(img11, cmap='gray')
axs[1, 2].imshow(img12, cmap='gray')
axs[2, 0].imshow(img20, cmap='gray')
axs[2, 1].imshow(img21, cmap='gray')
axs[2, 2].imshow(img22, cmap='gray')
axs[3, 0].imshow(img30, cmap='gray')
axs[3, 1].imshow(img31, cmap='gray')
axs[3, 2].imshow(img32, cmap='gray')
# plt.savefig('nn_out.png')
plt.show()
def test_net(dateset_path, unet_path):
dataset = np.load(dateset_path)
u_net = torch.load(unet_path)
u_net.to(device)
x = torch.Tensor([i[0] for i in dataset], device=device).view(-1, 64, 64)
y = torch.Tensor([i[1] for i in dataset], device=device).view(-1, 64, 64)
outputs = []
accuracy = 0
total = 0
for k in tqdm(range(0, len(x))):
with torch.no_grad():
out = u_net(x[k].view(-1, 1, 64, 64))
outputs.append(out)
img1 = np.uint8(out[0].view(64, 64) * 255)
img2 = np.uint8(y[k].view(64, 64) * 255)
ref = np.uint32(0)
dif = np.uint32(0)
for i in range(img1.shape[0]):
for j in range(img1.shape[1]):
ref += img2[i, j]
a1 = np.int16(img1[i, j])
a2 = np.int16(img2[i, j])
dif += abs(a1 - a2)
accuracy += round(1 - (dif / ref), 3)
total += 1
accuracy = accuracy / total
print("Dokladnosc: ", accuracy)
img00 = np.uint8(x[0].view(64, 64) * 255)
img01 = np.uint8(outputs[0].view(64, 64) * 255)
img02 = np.uint8(y[0].view(64, 64) * 255)
img03 = filter_img(img00, img01)
img10 = np.uint8(x[1].view(64, 64) * 255)
img11 = np.uint8(outputs[1].view(64, 64) * 255)
img12 = np.uint8(y[1].view(64, 64) * 255)
img13 = filter_img(img10, img11)
img20 = np.uint8(x[2].view(64, 64) * 255)
img21 = np.uint8(outputs[2].view(64, 64) * 255)
img22 = np.uint8(y[2].view(64, 64) * 255)
img23 = filter_img(img20, img21)
img30 = np.uint8(x[3].view(64, 64) * 255)
img31 = np.uint8(outputs[3].view(64, 64) * 255)
img32 = np.uint8(y[3].view(64, 64) * 255)
img33 = filter_img(img30, img31)
fig, axs = plt.subplots(4, 4)
fig.suptitle(f'Wyniki testów dla {dataset.shape[0]} próbek i {epochs} epok', fontsize=15)
axs[0, 0].imshow(img00, cmap='gray')
axs[0, 0].set_title('Wylosowane punkty')
axs[0, 1].imshow(img01, cmap='gray')
axs[0, 1].set_title('Odpowiedź sieci neuronowej')
axs[0, 2].imshow(img02, cmap='gray')
axs[0, 2].set_title('Ścieżka znaleziona dzięki RRT*')
axs[0, 3].imshow(img03, cmap='gray')
axs[0, 3].set_title('Przefiltrowana ścieżka')
axs[1, 0].imshow(img10, cmap='gray')
axs[1, 1].imshow(img11, cmap='gray')
axs[1, 2].imshow(img12, cmap='gray')
axs[1, 3].imshow(img13, cmap='gray')
axs[2, 0].imshow(img20, cmap='gray')
axs[2, 1].imshow(img21, cmap='gray')
axs[2, 2].imshow(img22, cmap='gray')
axs[2, 3].imshow(img23, cmap='gray')
axs[3, 0].imshow(img30, cmap='gray')
axs[3, 1].imshow(img31, cmap='gray')
axs[3, 2].imshow(img32, cmap='gray')
axs[3, 3].imshow(img33, cmap='gray')
# plt.savefig('nn_out.png')
plt.show()
if __name__ == "__main__":
train_net('training_data.npy')
# test_net('testing_data_3.npy', 'u_net.pt')
|
py | b4169e73b83d3d8e23f4c239354cf7cbc30844af | import unittest
import json
from bitmovin import Bitmovin, Response, H265CodecConfiguration, H265Profile, H265Level, BAdapt, MaxCTUSize, \
TUIntraDepth, TUInterDepth, MotionSearch, ChromaLocation, ColorSpace, ColorPrimaries, ColorRange, ColorTransfer, \
InputColorSpace, InputColorRange, ColorConfig
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class H265CodecConfigurationTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_h265_codec_configuration(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
codec_configuration_resource_response = self.bitmovin.codecConfigurations.H265.create(
sample_codec_configuration)
self.assertIsNotNone(codec_configuration_resource_response)
self.assertIsNotNone(codec_configuration_resource_response.resource)
self.assertIsNotNone(codec_configuration_resource_response.resource.id)
self._compare_h265_codec_configurations(sample_codec_configuration,
codec_configuration_resource_response.resource)
def test_retrieve_h265_codec_configuration(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
created_codec_configuration_response = self.bitmovin.codecConfigurations.H265.create(sample_codec_configuration)
self.assertIsNotNone(created_codec_configuration_response)
self.assertIsNotNone(created_codec_configuration_response.resource)
self.assertIsNotNone(created_codec_configuration_response.resource.id)
self._compare_h265_codec_configurations(sample_codec_configuration,
created_codec_configuration_response.resource)
retrieved_codec_configuration_response = self.bitmovin.codecConfigurations.H265.retrieve(
created_codec_configuration_response.resource.id)
self.assertIsNotNone(retrieved_codec_configuration_response)
self.assertIsNotNone(retrieved_codec_configuration_response.resource)
self._compare_h265_codec_configurations(created_codec_configuration_response.resource,
retrieved_codec_configuration_response.resource)
def test_delete_h265_codec_configuration(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
created_codec_configuration_response = self.bitmovin.codecConfigurations.H265.create(sample_codec_configuration)
self.assertIsNotNone(created_codec_configuration_response)
self.assertIsNotNone(created_codec_configuration_response.resource)
self.assertIsNotNone(created_codec_configuration_response.resource.id)
self._compare_h265_codec_configurations(sample_codec_configuration,
created_codec_configuration_response.resource)
deleted_minimal_resource = self.bitmovin.codecConfigurations.H265.delete(
created_codec_configuration_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.codecConfigurations.H265.retrieve(created_codec_configuration_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving codec_configuration after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_h265_codec_configurations(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
created_codec_configuration_response = self.bitmovin.codecConfigurations.H265.create(sample_codec_configuration)
self.assertIsNotNone(created_codec_configuration_response)
self.assertIsNotNone(created_codec_configuration_response.resource)
self.assertIsNotNone(created_codec_configuration_response.resource.id)
self._compare_h265_codec_configurations(sample_codec_configuration,
created_codec_configuration_response.resource)
codec_configurations = self.bitmovin.codecConfigurations.H265.list()
self.assertIsNotNone(codec_configurations)
self.assertIsNotNone(codec_configurations.resource)
self.assertIsNotNone(codec_configurations.response)
self.assertIsInstance(codec_configurations.resource, list)
self.assertIsInstance(codec_configurations.response, Response)
self.assertGreater(codec_configurations.resource.__sizeof__(), 1)
def test_retrieve_h265_codec_configuration_custom_data(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
sample_codec_configuration.customData = '<pre>my custom data</pre>'
created_codec_configuration_response = self.bitmovin.codecConfigurations.H265.create(sample_codec_configuration)
self.assertIsNotNone(created_codec_configuration_response)
self.assertIsNotNone(created_codec_configuration_response.resource)
self.assertIsNotNone(created_codec_configuration_response.resource.id)
self._compare_h265_codec_configurations(sample_codec_configuration,
created_codec_configuration_response.resource)
custom_data_response = self.bitmovin.codecConfigurations.H265.retrieve_custom_data(
created_codec_configuration_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_codec_configuration.customData, json.loads(custom_data.customData))
def test_create_h265_codec_config_with_bitrate_and_crf(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
self.assertIsNotNone(sample_codec_configuration.bitrate)
sample_codec_configuration.crf = 51.0
self.assertEqual(sample_codec_configuration.crf, 51.0)
try:
self.bitmovin.codecConfigurations.H265.create(sample_codec_configuration)
except BitmovinApiError as error:
self.assertEqual(error.response.status, 'ERROR')
self.assertEqual(error.response.data.code, 1001)
def test_create_h265_codec_configuration_with_crf(self):
sample_codec_configuration = self._get_sample_h265_codec_configuration()
sample_codec_configuration.bitrate = None
sample_codec_configuration.crf = 51.0
codec_configuration_resource_response = self.bitmovin.codecConfigurations.H265.create(
sample_codec_configuration)
self.assertIsNotNone(codec_configuration_resource_response)
self.assertIsNotNone(codec_configuration_resource_response.resource)
self.assertIsNotNone(codec_configuration_resource_response.resource.id)
self._compare_h265_codec_configurations(sample_codec_configuration,
codec_configuration_resource_response.resource)
def _compare_h265_codec_configurations(self, first: H265CodecConfiguration, second: H265CodecConfiguration):
"""
:param first: H265CodecConfiguration
:param second: H265CodecConfiguration
:return: bool
"""
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.bitrate, second.bitrate)
self.assertEqual(first.rate, second.rate)
self.assertEqual(first.width, second.width)
self.assertEqual(first.height, second.height)
self.assertEqual(first.profile, second.profile)
self.assertEqual(first.bframes, second.bframes)
self.assertEqual(first.refFrames, second.refFrames)
self.assertEqual(first.qp, second.qp)
self.assertEqual(first.maxBitrate, second.maxBitrate)
self.assertEqual(first.minBitrate, second.minBitrate)
self.assertEqual(first.bufsize, second.bufsize)
self.assertEqual(first.minGop, second.minGop)
self.assertEqual(first.maxGop, second.maxGop)
self.assertEqual(first.level, second.level)
self.assertEqual(first.rcLookahead, second.rcLookahead)
self.assertEqual(first.bAdapt, second.bAdapt)
self.assertEqual(first.maxCTUSize, second.maxCTUSize)
self.assertEqual(first.tuIntraDepth, second.tuIntraDepth)
self.assertEqual(first.tuInterDepth, second.tuInterDepth)
self.assertEqual(first.motionSearch, second.motionSearch)
self.assertEqual(first.subMe, second.subMe)
self.assertEqual(first.motionSearchRange, second.motionSearchRange)
self.assertEqual(first.weightPredictionOnBSlice, second.weightPredictionOnBSlice)
self.assertEqual(first.weightPredictionOnPSlice, second.weightPredictionOnPSlice)
self.assertEqual(first.sao, second.sao)
self.assertEqual(first.crf, second.crf)
self.assertEqual(first.maxKeyframeInterval, second.maxKeyframeInterval)
self.assertEqual(first.minKeyframeInterval, second.minKeyframeInterval)
self.assertEqual(first.sceneCutThreshold, second.sceneCutThreshold)
self.assertTrue(self._compare_color_configs(first.colorConfig, second.colorConfig))
return True
def _compare_color_configs(self, first: ColorConfig, second: ColorConfig):
"""
:param first: ColorConfig
:param second: ColorConfig
:return: bool
"""
self.assertEqual(first.inputColorSpace, second.inputColorSpace)
self.assertEqual(first.colorTransfer, second.colorTransfer)
self.assertEqual(first.colorRange, second.colorRange)
self.assertEqual(first.colorPrimaries, second.colorPrimaries)
self.assertEqual(first.colorSpace, second.colorSpace)
self.assertEqual(first.chromaLocation, second.chromaLocation)
self.assertEqual(first.copyChromaLocationFlag, second.copyChromaLocationFlag)
self.assertEqual(first.copyColorPrimariesFlag, second.copyColorPrimariesFlag)
self.assertEqual(first.copyColorRangeFlag, second.copyColorRangeFlag)
self.assertEqual(first.copyColorSpaceFlag, second.copyColorSpaceFlag)
self.assertEqual(first.copyColorTransferFlag, second.copyColorTransferFlag)
return True
def _get_sample_h265_codec_configuration(self):
h265_codec_configuration = H265CodecConfiguration(name='H265 Sample Codec Config',
description='Long description for H265 Codec Config',
bitrate=10000000,
rate=23.97,
profile=H265Profile.main,
width=1920,
height=1080,
bframes=3,
ref_frames=5,
qp=10,
max_bitrate=10000000,
min_bitrate=5000000,
level=H265Level.L5_1,
bufsize=10000000,
min_gop=None,
max_gop=None,
rc_lookahead=20,
b_adapt=BAdapt.FULL,
max_ctu_size=MaxCTUSize.S64,
tu_intra_depth=TUIntraDepth.D1,
tu_inter_depth=TUInterDepth.D1,
motion_search=MotionSearch.HEX,
sub_me=2,
motion_search_range=57,
weight_prediction_on_b_slice=False,
weight_prediction_on_p_slice=True,
sao=True,
crf=None,
pixel_format=None,
scene_cut_threshold=30,
max_keyframe_interval=5,
min_keyframe_interval=3,
color_config=ColorConfig(
copy_chroma_location_flag=True,
copy_color_space_flag=True,
copy_color_primaries_flag=True,
copy_color_range_flag=True,
copy_color_transfer_flag=True,
chroma_location=ChromaLocation.BOTTOM,
color_space=ColorSpace.BT2020_CL,
color_primaries=ColorPrimaries.BT709,
color_range=ColorRange.MPEG,
color_transfer=ColorTransfer.BT2020_10,
input_color_space=InputColorSpace.BT470BG,
input_color_range=InputColorRange.JPEG
))
self.assertIsNotNone(h265_codec_configuration.name)
self.assertIsNotNone(h265_codec_configuration.description)
self.assertIsNotNone(h265_codec_configuration.bitrate)
self.assertIsNotNone(h265_codec_configuration.rate)
self.assertIsNotNone(h265_codec_configuration.profile)
self.assertIsNotNone(h265_codec_configuration.width)
self.assertIsNotNone(h265_codec_configuration.height)
self.assertIsNotNone(h265_codec_configuration.bframes)
self.assertIsNotNone(h265_codec_configuration.refFrames)
self.assertIsNotNone(h265_codec_configuration.qp)
self.assertIsNotNone(h265_codec_configuration.maxBitrate)
self.assertIsNotNone(h265_codec_configuration.minBitrate)
self.assertIsNotNone(h265_codec_configuration.level)
self.assertIsNotNone(h265_codec_configuration.bufsize)
self.assertIsNotNone(h265_codec_configuration.rcLookahead)
self.assertIsNotNone(h265_codec_configuration.bAdapt)
self.assertIsNotNone(h265_codec_configuration.maxCTUSize)
self.assertIsNotNone(h265_codec_configuration.tuIntraDepth)
self.assertIsNotNone(h265_codec_configuration.tuInterDepth)
self.assertIsNotNone(h265_codec_configuration.motionSearch)
self.assertIsNotNone(h265_codec_configuration.subMe)
self.assertIsNotNone(h265_codec_configuration.motionSearchRange)
self.assertIsNotNone(h265_codec_configuration.weightPredictionOnPSlice)
self.assertIsNotNone(h265_codec_configuration.weightPredictionOnBSlice)
self.assertIsNotNone(h265_codec_configuration.sao)
return h265_codec_configuration
if __name__ == '__main__':
unittest.main()
|
py | b4169ecc969243d6a435fa3224e8aa1898dd7276 | from __future__ import unicode_literals
import datetime
from business_rules.actions import BaseActions, rule_action
from business_rules.fields import FIELD_NUMERIC
from business_rules.variables import (
BaseVariables,
numeric_rule_variable,
string_rule_variable,
)
from django.utils import timezone
from django_business_rules.business_rule import BusinessRule
from test_app.models import ProductOrder
class ProductVariables(BaseVariables):
def __init__(self, product):
self.product = product
@numeric_rule_variable
def current_inventory(self):
return self.product.current_inventory
@numeric_rule_variable(label="Days until expiration")
def expiration_days(self):
last_order = self.product.orders[-1]
expiration_days = (last_order.expiration_date - datetime.date.today()).days
return expiration_days
@string_rule_variable()
def current_month(self):
return timezone.now().strftime("%B")
class ProductActions(BaseActions):
def __init__(self, product):
self.product = product
@rule_action(params={"sale_percentage": FIELD_NUMERIC})
def put_on_sale(self, sale_percentage):
self.product.price *= 1.0 - sale_percentage
self.product.save()
@rule_action(params={"number_to_order": FIELD_NUMERIC})
def order_more(self, number_to_order):
ProductOrder.objects.create(
product=self.product,
quantity=number_to_order,
expiration_date=timezone.now() + timezone.timedelta(weeks=4),
)
class ProductBusinessRule(BusinessRule):
name = "Product rules"
variables = ProductVariables
actions = ProductActions
|
py | b416a0a60cd362f1557ef2312f103f7c4e6da092 |
##########################################
###### This file was autogenerated. ######
######### DO NOT EDIT this file. #########
##########################################
### file to edit: dev_nb/imflash217__04_lsuv.ipynb ####
from exp.nb_03_batchnorm import *
def get_batch(dl, run):
run.xb, run.yb = next(iter(dl))
for cb in run.cbs:
cb.set_runner(run)
run("begin_batch")
return run.xb, run.yb
def find_modules(module, cond):
if cond(module):
return [module]
return sum([find_modules(m, cond) for m in module.children()], [])
def is_lin_layer(l):
lin_layers = (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Linear, torch.nn.ReLU)
return isinstance(l, lin_layers)
def lsuv_module(module, xb):
h = Hook(module, append_stat)
while learner.model(xb) is not None and abs(h.mean) > 1e-3:
module.bias -= h.mean
while learner.model(xb) is not None and abs(h.std-1) > 1e-3:
module.weight.data /= h.std
h.remove()
return h.mean, h.std |
py | b416a1f500826841f362ef3641eb87198a2b7375 | #!/usr/bin/python3
# -*- encoding: UTF-8 -*-
import shlex
import subprocess
import bot.dummy
import oth.roark.exceptions
class process(bot.dummy.dummy):
def __init__(self):
self.tracked_process = dict()
def request(self, path, query):
response = None
try:
if len(path) == 1 and query['command'] == 'POST' and \
'command_line' in query:
command_line = shlex.split(query['command_line'])
p = subprocess.Popen(command_line,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=('shell' in query))
self.tracked_process[p.pid] = p
response = {'pid': p.pid}
elif len(path) == 1 and query['command'] == 'GET':
response = {'process': dict(
zip(
self.tracked_process.keys(),
(' '.join(val.args) for val in self.tracked_process.values())
)
)}
elif len(path) == 2 and query['command'] == 'GET':
pid = int(path[1])
(outs, errs) = self.tracked_process[pid].communicate(timeout=1)
(outs, errs) = (outs.decode('utf-8'), errs.decode('utf-8'))
response = {'pid': pid,
'stdout': outs,
'stderr': errs,
'returncode': self.tracked_process[pid].returncode}
elif len(path) == 2 and query['command'] == 'PUT':
pid = int(path[1])
if 'signal' in query:
self.tracked_process[pid].send_signal(query['signal'])
(outs, errs) = self.tracked_process[pid].communicate(input=query.get('input', None),
timeout=1)
response = {'pid': pid,
'stdout': outs,
'stderr': errs,
'returncode': self.tracked_process[pid].returncode}
elif len(path) == 2 and query['command'] == 'DELETE':
pid = int(path[1])
self.tracked_process[pid].terminate()
response = {'pid': pid,
'acknowledge': True}
else:
raise oth.roark.exceptions.CommandException("Invalid command.", None)
except KeyError as e:
raise oth.roark.exceptions.CommandException("PID {pid} not registered.".format(pid=path[1]), str(e))
except subprocess.TimeoutExpired as e:
raise oth.roark.exceptions.CommandException("PID {pid} hasn't responded in 1 second.".format(pid=path[1]), str(e))
else:
return response
|
py | b416a274797f6c4f8620a2f3507a1207a6242906 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-31 09:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('first_app', '0006_auto_20160331_0821'),
]
operations = [
migrations.CreateModel(
name='InventoryCharacter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('char', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Character')),
('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Loot')),
],
),
]
|
py | b416a2860b982a60c3609d9851f2417b55cfb021 | import json
import sys
import decimal
from collaborative_filtering import CollaborativeFiltering
# define the recommendation algorithm and the hyperparameters
recommender = CollaborativeFiltering('movie_ratings')
hyperparameters = {'rank': 8, 'iterations': 15, 'lambda': .1}
user_id = 0
if len(sys.argv) != 2:
print('Actual argument count is {0}'.format(len(sys.argv)))
raise ValueError('Must specify user ratings as an argument')
# load the users ratings into tuples, noting that 0 is the
# sentinal value for the current user
given_ratings = [(user_id, int(rating['movie_id']), decimal.Decimal(str(rating['rating'])))
for rating
in json.loads(sys.argv[1])]
recommendations = recommender.recommend(given_ratings,
user_id,
hyperparameters)
# print a json formatted version of the results to the console
print(str(json.dumps(recommendations)).replace("u'", "").replace("'", ""))
|
py | b416a2da399ff504839b88096356185ccc262f49 | from mathchem import *
def batch_process(infile, file_format, outfile, function, hydrogens=False) :
""" Read file molecule-by-molecule and apply a <function> to each molecule
Good for large files containing thousands of molecules
"""
f_out = open(outfile, 'w')
f_in = open(infile, 'r')
if file_format == 'g6' or file_format == 's6':
for line in f_in:
#line = f_in.readline()
m = Mol(line)
f_out.write(str(function(m))+"\n")
elif file_format =='sdf':
while True:
m = _read_sdf_molecule(f_in, hydrogens)
if m == False: break
f_out.write(str(function(m))+"\n")
elif file_format =='mol2':
while True:
m = _read_mol2_molecule(f_in, hydrogens)
if m == False: break
f_out.write(str(function(m))+"\n")
# TODO: read the directory because mol file contain only one molecule
elif file_format =='mol':
m = read_from_mol(f_in, hydrogens)
if m != False:
f_out.write(str(function(m))+"\n")
f_in.close()
f_out.close()
return
#
# Functions that read all the file and return list of Mol instances
#
def read_from_sdf(fname, hydrogens = False):
"""
Read the whole .sdf file and return list of Mol instances
"""
f_in = open(fname, 'r')
mols = []
while True:
m = _read_sdf_molecule(f_in, hydrogens)
if m == False: break
mols.append(m)
f_in.close()
return mols
def read_from_mol(fname, hydrogens = False):
"""
Read the whole .mol file and return Mol instance
"""
f_in = open(fname, 'r')
m = _read_sdf_molecule(f_in, hydrogens)
f_in.close()
return m
def read_from_mol2(fname, hydrogens = False):
"""
Read the whole .mol2 file and return list of Mol instances
"""
f_in = open(fname, 'r')
mols = []
while True:
m = _read_mol2_molecule(f_in, hydrogens)
if m == False: break
mols.append(m)
f_in.close()
return mols
def read_from_g6(fname):
"""
Read the whole .g6 file (graph6 fromat) and return list of Mol instances
"""
f_in = open(fname, 'r')
mols = []
for line in f_in:
mols.append(Mol(line))
f_in.close()
return mols
def read_from_s6(fname):
"""
Read the whole .s6 file (sparse6 format) and return list of Mol instances
"""
f_in = open(fname, 'r')
mols = []
for line in f_in:
mols.append(Mol(line))
f_in.close()
return mols
def read_from_planar_code(fname):
"""
Read the whole file (planar code fromat) and return list of Mol instances
"""
f_in = open(fname, 'rb')
mols = []
# read header >>planar_code<<
f_in.read(15)
# TODO: check for correct header
byte = f_in.read(1)
# read byte by byte
while byte != "":
n = ord(byte)
m = Mol()
A = [[0 for col in range(n)] for row in range(n)]
E = [] # here we will collect edges
k = 1 # current vertex
while byte != "":
byte = f_in.read(1)
b = ord(byte)
if b == 0: # go to the next vertex
k += 1
if k == n+1: # go to the next graph
break
elif A[k-1][b-1] == 0: # if we don't have already added the edge
E.append((k-1,b-1))
A[k-1][b-1] = 1
A[b-1][k-1] = 1
m._set_Order(n)
m._set_A(A)
m._set_Edges(E)
mols.append(m)
byte = f_in.read(1)
f_in.close()
return mols
#
# NCI functions
#
def read_from_NCI_by_NSC(num, hydrogens = False):
url = 'http://cactus.nci.nih.gov/ncidb2.2/nci2.2.tcl?op1=nsc&data1='+num+'&output=sdf&nomsg=1&maxhits=1000000'
return _read_from_NCI(url, hydrogens)
def read_from_NCI_by_name(name, hydrogens = False, fields = []):
fields_string = ''
for f in fields: fields_string = fields_string + '&fields=' + urllib.quote_plus(f)
url = 'http://cactus.nci.nih.gov/ncidb2.2/nci2.2.tcl?op1=name&data1='+name+'&method1=substring&output=sdf&nomsg=1&maxhits=1000000'+ fields_string
return _read_from_NCI(url, hydrogens)
def read_from_NCI_by_CAS(num, hydrogens = False):
url = 'http://cactus.nci.nih.gov/ncidb2.2/nci2.2.tcl?op1=cas&data1='+num+'&output=sdf&nomsg=1&maxhits=1000000'
return _read_from_NCI(url, hydrogens)
# helpers
def spectrum(matrix):
r""" Calculates spectrum of the matrix"""
from numpy import linalg as la
s = la.eigvalsh(matrix).tolist()
s.sort(reverse=True)
return s
def all_adriatic():
""" Generate all possible parameters sets for adriatic indices"""
r = []
for p in [0,1]:
for i in [1,2,3]:
for j in range(1,9):
if i == 3:
for a in [0.5, 2]:
r.append((p,i,j,a))
elif i == 2 and j in range(1,6):
for a in [-1, -0.5, 0.5, 1, 2]:
r.append((p,i,j,a))
elif i == 2 or i == 1:
for a in [0.5, 1, 2]:
r.append((p,i,j,a))
return r
def adriatic_name(p,i,j,a):
""" Return the name for given parameters of Adriatic indices"""
#(j)
name1 = {1:'Randic type ',\
2:'sum ',\
3:'inverse sum ', \
4:'misbalance ', \
5:'inverse misbalance ', \
6:'min-max ', \
7:'max-min ', \
8:'symmetric division '}
# (i,a)
name2 = {(1, 0.5):'lor',\
(1,1):'lo', \
(1,2):'los', \
(2,-1):'in', \
(2, -0.5):'ir', \
(2, 0.5):'ro', \
(2,1):'', \
(2,2):'s', \
(3, 0.5):'ha', \
(3,2):'two'}
#(p)
name3 = {0:'deg', 1:'di'}
return(name1[j]+name2[(i,a)]+name3[p])
def spectral_moment( k, matrix):
""" Return k-th spectral moment of a matrix
parameters: matrix
"""
return np.sum(np.power(spectrum(matrix),k))
def spectral_radius(matrix):
s = spectrum(matrix)
return max(abs(s[0]), abs(s[len(s)-1]))
def energy(matrix):
""" Return energy of a matrix
parameters: matrix
"""
s = spectrum(matrix)
a = np.sum(s,dtype=np.float128)/len(s)
return np.float64(np.sum( map( lambda x: abs(x-a) , s), dtype=np.float128))
###
###
### Private functions
###
###
# make a request to the NCI and retreive data in form of SDF-file
def _read_from_NCI(url, hydrogens = False):
import urllib2, tempfile
try:
resp = urllib2.urlopen(url)
except e:
print 'Can not open NCI online database.'
return False
if resp.code != 200:
print 'Server returned error code ', resp.code
return False
f = tempfile.TemporaryFile()
f.write(resp.read())
f.seek(0)
mols = []
while True:
m = _read_sdf_molecule(f, hydrogens)
if m == False: break
mols.append(m)
f.close()
return mols
# functions which parse a fragment of file and initialize Mol instance
# read a single molecule from file
def _read_sdf_molecule(file, hydrogens = False):
# read the header 3 lines
for i in range(3):
file.readline()
line = file.readline()
if line == '': return False
# this does not work for 123456 which must be 123 and 456
#(atoms, bonds) = [t(s) for t,s in zip((int,int),line.split())]
atoms = int(line[:3])
bonds = int(line[3:6])
order = atoms
v = [];
for i in range( atoms ):
line = file.readline()
symbol = line.split()[3]
if hydrogens == False and (symbol == 'H' or symbol == 'h'):
order = order - 1
else:
v.append(i+1);
# fill the matrix A zeros
A = [[0 for col in range(order)] for row in range(order)]
edges = []
for i in range( bonds ):
line = file.readline()
#(a1, a2) = [t(s) for t,s in zip((int,int),line.split())]
a1 = int(line[:3])
a2 = int(line[3:6])
if a1 in v and a2 in v:
# add edge here!
k = v.index(a1)
j = v.index(a2)
A[k][j] = 1
A[j][k] = 1
edges.append((k,j))
while line !='':
line = file.readline()
if line[:4] == "$$$$": break
m = Mol()
m._set_A(A)
m._set_Order(order)
m._set_Edges(edges)
return m
# read a single molecule from file
def _read_mol2_molecule(file, hydrogens = False):
# seek for MOLECULE tag
line = file.readline()
while line != '':
if line.strip() == '@<TRIPOS>MOLECULE': break
line = file.readline()
if line == '': return False
#skip molecule name
file.readline()
# read
line = file.readline()
atoms = int(line.split()[0])
# TODO: number of bonds may not be present
bonds = int(line.split()[1])
#print atoms, bonds
order = atoms
v = [];
# seek for ATOM tag
line = file.readline()
while line != '':
if line.strip() == '@<TRIPOS>ATOM': break
line = file.readline()
for i in range( atoms ):
line = file.readline()
arr = line.split()
id = int(arr[0])
symbol = arr[4]
if hydrogens == False and (symbol == 'H' or symbol == 'h'):
order = order - 1
else:
v.append(id);
# fill the matrix A zeros
A = [[0 for col in range(order)] for row in range(order)]
edges = []
#seek for bonds tag @<TRIPOS>BOND
line = file.readline()
while line !='':
if line.strip() == '@<TRIPOS>BOND': break
line = file.readline()
if line == '': return False
for i in range( bonds ):
line = file.readline()
(bid, a1, a2) = [t(s) for t,s in zip((int, int,int),line.split())]
if a1 in v and a2 in v:
# add edge here!
k = v.index(a1)
j = v.index(a2)
A[k][j] = 1
A[j][k] = 1
edges.append((k,j))
m = Mol()
m._set_A(A)
m._set_Order(order)
m._set_Edges(edges)
return m
|
py | b416a4d4f957c35b77b62c763e606b4bb7f0a5e9 | from typing import Iterable, List, Dict, Any, Union, Callable
import keras as K
import numpy as np
from apollo.blocks.base import OutputBlock, pad_list
class OutputBlockList:
def __init__(self, blocks: Iterable[OutputBlock]):
self.__blocks = blocks
self.__output_layers = dict()
self.loses = dict()
self.metrics = dict()
def output_layers(self) -> List[K.layers.Layer]:
return list(self.__output_layers.values())
def output_layer(self, name) -> K.layers.Layer:
return self.__output_layers[name]
def get_output_data(self, dataset: 'apollo.data.DataSet') -> Dict[str, Any]:
outputs = dict()
for block in self.__blocks:
for name in block.observations():
outputs[name] = dataset[name]
return outputs
def __call__(self, dataset: 'apollo.data.DataSet') -> 'OutputBlockList':
for block in self.__blocks:
block(dataset)
self.__output_layers.update(block.output_layers())
self.loses.update(block.losses())
self.metrics.update(block.metrics())
return self
class CRFSequenceOutput(OutputBlock):
def __init__(self, observation: str, max_sequence_length=112):
super(CRFSequenceOutput, self).__init__()
self.__output = observation
self.__label_size = 0
self.__max_sequence_length = max_sequence_length
def losses(self) -> Dict[str, Any]:
from keras_contrib.losses import crf_loss
return {self.__output: crf_loss}
def metrics(self) -> Dict[str, Any]:
from keras_contrib.metrics import crf_marginal_accuracy
return {self.__output: crf_marginal_accuracy}
def _create_output_layers(self) -> Dict[str, K.layers.Layer]:
from keras_contrib.layers import CRF
return {self.__output: CRF(self.__label_size, learn_mode="marginal", name=self.__output)}
def observations(self) -> Iterable[str]:
return [self.__output]
def __call__(self, data: 'apollo.data.DataSet') -> None:
name = self.__output
self.__label_size = data.dimension(name)
data[name] = np.array([K.utils.to_categorical(pad_list(x, self.__max_sequence_length, 0), self.__label_size)
for x in data[name]])
class CategoricalOutput(OutputBlock):
def __init__(self,
observation: str):
super(CategoricalOutput, self).__init__()
self.__output = observation
self.__label_size = 0
def losses(self) -> Dict[str, Any]:
if self.__label_size <= 2:
return {self.__output: "binary_crossentropy"}
return {self.__output: "categorical_crossentropy"}
def metrics(self) -> Dict[str, Any]:
return {self.__output: "accuracy"}
def _create_output_layers(self) -> Dict[str, K.layers.Layer]:
activation = "sigmoid" if self.__label_size <= 2 else "softmax"
return {self.__output: K.layers.Dense(self.__label_size, activation=activation, name=self.__output)}
def observations(self) -> Iterable[str]:
return [self.__output]
def __call__(self, data: 'apollo.data.DataSet') -> None:
name = self.__output
self.__label_size = data.dimension(name)
data[name] = K.utils.to_categorical(np.array(data[name]), num_classes=self.__label_size)
|
py | b416a6d288262233c228846844993d5faa8ee5f1 | import os
import re
import json
from Qt import QtGui, QtCore, QtWidgets
import nodz_utils as utils
defaultConfigPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_config.json')
class Nodz(QtWidgets.QGraphicsView):
"""
The main view for the node graph representation.
The node view implements a state pattern to control all the
different user interactions.
"""
signal_NodeCreated = QtCore.Signal(object)
signal_NodeDeleted = QtCore.Signal(object)
signal_NodeEdited = QtCore.Signal(object, object)
signal_NodeSelected = QtCore.Signal(object)
signal_AttrCreated = QtCore.Signal(object, object)
signal_AttrDeleted = QtCore.Signal(object, object)
signal_AttrEdited = QtCore.Signal(object, object, object)
signal_PlugConnected = QtCore.Signal(object, object, object, object)
signal_PlugDisconnected = QtCore.Signal(object, object, object, object)
signal_SocketConnected = QtCore.Signal(object, object, object, object)
signal_SocketDisconnected = QtCore.Signal(object, object, object, object)
signal_GraphSaved = QtCore.Signal()
signal_GraphLoaded = QtCore.Signal()
signal_GraphCleared = QtCore.Signal()
signal_GraphEvaluated = QtCore.Signal()
signal_KeyPressed = QtCore.Signal(object)
signal_Dropped = QtCore.Signal()
def __init__(self, parent, configPath=defaultConfigPath):
"""
Initialize the graphics view.
"""
super(Nodz, self).__init__(parent)
# Load nodz configuration.
self.loadConfig(configPath)
# General data.
self.gridVisToggle = True
self.gridSnapToggle = False
self._nodeSnap = False
self.selectedNodes = None
# Connections data.
self.drawingConnection = False
self.currentHoveredNode = None
self.sourceSlot = None
# Display options.
self.currentState = 'DEFAULT'
self.pressedKeys = list()
def wheelEvent(self, event):
"""
Zoom in the view with the mouse wheel.
"""
self.currentState = 'ZOOM_VIEW'
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
inFactor = 1.15
outFactor = 1 / inFactor
if event.delta() > 0:
zoomFactor = inFactor
else:
zoomFactor = outFactor
self.scale(zoomFactor, zoomFactor)
self.currentState = 'DEFAULT'
def mousePressEvent(self, event):
"""
Initialize tablet zoom, drag canvas and the selection.
"""
# Tablet zoom
if (event.button() == QtCore.Qt.RightButton and
event.modifiers() == QtCore.Qt.AltModifier):
self.currentState = 'ZOOM_VIEW'
self.initMousePos = event.pos()
self.zoomInitialPos = event.pos()
self.initMouse = QtGui.QCursor.pos()
self.setInteractive(False)
# Drag view
elif (event.button() == QtCore.Qt.MiddleButton and
event.modifiers() == QtCore.Qt.AltModifier):
self.currentState = 'DRAG_VIEW'
self.prevPos = event.pos()
self.setCursor(QtCore.Qt.ClosedHandCursor)
self.setInteractive(False)
# Rubber band selection
elif (event.button() == QtCore.Qt.LeftButton and
event.modifiers() == QtCore.Qt.NoModifier and
self.scene().itemAt(self.mapToScene(event.pos()), QtGui.QTransform()) is None):
self.currentState = 'SELECTION'
self._initRubberband(event.pos())
self.setInteractive(False)
# Drag Item
elif (event.button() == QtCore.Qt.LeftButton and
event.modifiers() == QtCore.Qt.NoModifier and
self.scene().itemAt(self.mapToScene(event.pos()), QtGui.QTransform()) is not None):
self.currentState = 'DRAG_ITEM'
self.setInteractive(True)
# Add selection
elif (event.button() == QtCore.Qt.LeftButton and
QtCore.Qt.Key_Shift in self.pressedKeys and
QtCore.Qt.Key_Control in self.pressedKeys):
self.currentState = 'ADD_SELECTION'
self._initRubberband(event.pos())
self.setInteractive(False)
# Subtract selection
elif (event.button() == QtCore.Qt.LeftButton and
event.modifiers() == QtCore.Qt.ControlModifier):
self.currentState = 'SUBTRACT_SELECTION'
self._initRubberband(event.pos())
self.setInteractive(False)
# Toggle selection
elif (event.button() == QtCore.Qt.LeftButton and
event.modifiers() == QtCore.Qt.ShiftModifier):
self.currentState = 'TOGGLE_SELECTION'
self._initRubberband(event.pos())
self.setInteractive(False)
else:
self.currentState = 'DEFAULT'
super(Nodz, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
"""
Update tablet zoom, canvas dragging and selection.
"""
# Zoom.
if self.currentState == 'ZOOM_VIEW':
offset = self.zoomInitialPos.x() - event.pos().x()
if offset > self.previousMouseOffset:
self.previousMouseOffset = offset
self.zoomDirection = -1
self.zoomIncr -= 1
elif offset == self.previousMouseOffset:
self.previousMouseOffset = offset
if self.zoomDirection == -1:
self.zoomDirection = -1
else:
self.zoomDirection = 1
else:
self.previousMouseOffset = offset
self.zoomDirection = 1
self.zoomIncr += 1
if self.zoomDirection == 1:
zoomFactor = 1.03
else:
zoomFactor = 1 / 1.03
# Perform zoom and re-center on initial click position.
pBefore = self.mapToScene(self.initMousePos)
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
self.scale(zoomFactor, zoomFactor)
pAfter = self.mapToScene(self.initMousePos)
diff = pAfter - pBefore
self.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.translate(diff.x(), diff.y())
# Drag canvas.
elif self.currentState == 'DRAG_VIEW':
offset = self.prevPos - event.pos()
self.prevPos = event.pos()
self.verticalScrollBar().setValue(self.verticalScrollBar().value() + offset.y())
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value() + offset.x())
# RuberBand selection.
elif (self.currentState == 'SELECTION' or
self.currentState == 'ADD_SELECTION' or
self.currentState == 'SUBTRACT_SELECTION' or
self.currentState == 'TOGGLE_SELECTION'):
self.rubberband.setGeometry(QtCore.QRect(self.origin, event.pos()).normalized())
super(Nodz, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
"""
Apply tablet zoom, dragging and selection.
"""
# Zoom the View.
if self.currentState == '.ZOOM_VIEW':
self.offset = 0
self.zoomDirection = 0
self.zoomIncr = 0
self.setInteractive(True)
# Drag View.
elif self.currentState == 'DRAG_VIEW':
self.setCursor(QtCore.Qt.ArrowCursor)
self.setInteractive(True)
# Selection.
elif self.currentState == 'SELECTION':
self.rubberband.setGeometry(QtCore.QRect(self.origin,
event.pos()).normalized())
painterPath = self._releaseRubberband()
self.setInteractive(True)
self.scene().setSelectionArea(painterPath)
# Add Selection.
elif self.currentState == 'ADD_SELECTION':
self.rubberband.setGeometry(QtCore.QRect(self.origin,
event.pos()).normalized())
painterPath = self._releaseRubberband()
self.setInteractive(True)
for item in self.scene().items(painterPath):
item.setSelected(True)
# Subtract Selection.
elif self.currentState == 'SUBTRACT_SELECTION':
self.rubberband.setGeometry(QtCore.QRect(self.origin,
event.pos()).normalized())
painterPath = self._releaseRubberband()
self.setInteractive(True)
for item in self.scene().items(painterPath):
item.setSelected(False)
# Toggle Selection
elif self.currentState == 'TOGGLE_SELECTION':
self.rubberband.setGeometry(QtCore.QRect(self.origin,
event.pos()).normalized())
painterPath = self._releaseRubberband()
self.setInteractive(True)
for item in self.scene().items(painterPath):
if item.isSelected():
item.setSelected(False)
else:
item.setSelected(True)
self.currentState = 'DEFAULT'
super(Nodz, self).mouseReleaseEvent(event)
def keyPressEvent(self, event):
"""
Save pressed key and apply shortcuts.
Shortcuts are:
DEL - Delete the selected nodes
F - Focus view on the selection
"""
if event.key() not in self.pressedKeys:
self.pressedKeys.append(event.key())
if event.key() == QtCore.Qt.Key_Delete:
self._deleteSelectedNodes()
if event.key() == QtCore.Qt.Key_F:
self._focus()
if event.key() == QtCore.Qt.Key_S:
self._nodeSnap = True
# Emit signal.
self.signal_KeyPressed.emit(event.key())
def keyReleaseEvent(self, event):
"""
Clear the key from the pressed key list.
"""
if event.key() == QtCore.Qt.Key_S:
self._nodeSnap = False
if event.key() in self.pressedKeys:
self.pressedKeys.remove(event.key())
def _initRubberband(self, position):
"""
Initialize the rubber band at the given position.
"""
self.rubberBandStart = position
self.origin = position
self.rubberband.setGeometry(QtCore.QRect(self.origin, QtCore.QSize()))
self.rubberband.show()
def _releaseRubberband(self):
"""
Hide the rubber band and return the path.
"""
painterPath = QtGui.QPainterPath()
rect = self.mapToScene(self.rubberband.geometry())
painterPath.addPolygon(rect)
self.rubberband.hide()
return painterPath
def _focus(self):
"""
Center on selected nodes or all of them if no active selection.
"""
if self.scene().selectedItems():
itemsArea = self._getSelectionBoundingbox()
self.fitInView(itemsArea, QtCore.Qt.KeepAspectRatio)
else:
itemsArea = self.scene().itemsBoundingRect()
self.fitInView(itemsArea, QtCore.Qt.KeepAspectRatio)
def _getSelectionBoundingbox(self):
"""
Return the bounding box of the selection.
"""
bbx_min = None
bbx_max = None
bby_min = None
bby_max = None
bbw = 0
bbh = 0
for item in self.scene().selectedItems():
pos = item.scenePos()
x = pos.x()
y = pos.y()
w = x + item.boundingRect().width()
h = y + item.boundingRect().height()
# bbx min
if bbx_min is None:
bbx_min = x
elif x < bbx_min:
bbx_min = x
# end if
# bbx max
if bbx_max is None:
bbx_max = w
elif w > bbx_max:
bbx_max = w
# end if
# bby min
if bby_min is None:
bby_min = y
elif y < bby_min:
bby_min = y
# end if
# bby max
if bby_max is None:
bby_max = h
elif h > bby_max:
bby_max = h
# end if
# end if
bbw = bbx_max - bbx_min
bbh = bby_max - bby_min
return QtCore.QRectF(QtCore.QRect(bbx_min, bby_min, bbw, bbh))
def _deleteSelectedNodes(self):
"""
Delete selected nodes.
"""
selected_nodes = list()
for node in self.scene().selectedItems():
selected_nodes.append(node.name)
node._remove()
# Emit signal.
self.signal_NodeDeleted.emit(selected_nodes)
def _returnSelection(self):
"""
Wrapper to return selected items.
"""
selected_nodes = list()
if self.scene().selectedItems():
for node in self.scene().selectedItems():
selected_nodes.append(node.name)
# Emit signal.
self.signal_NodeSelected.emit(selected_nodes)
##################################################################
# API
##################################################################
def loadConfig(self, filePath):
"""
Set a specific configuration for this instance of Nodz.
:type filePath: str.
:param filePath: The path to the config file that you want to
use.
"""
self.config = utils._loadConfig(filePath)
def initialize(self):
"""
Setup the view's behavior.
"""
# Setup view.
config = self.config
self.setRenderHint(QtGui.QPainter.Antialiasing, config['antialiasing'])
self.setRenderHint(QtGui.QPainter.TextAntialiasing, config['antialiasing'])
self.setRenderHint(QtGui.QPainter.HighQualityAntialiasing, config['antialiasing_boost'])
self.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, config['smooth_pixmap'])
self.setRenderHint(QtGui.QPainter.NonCosmeticDefaultPen, True)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.FullViewportUpdate)
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.rubberband = QtWidgets.QRubberBand(QtWidgets.QRubberBand.Rectangle, self)
# Setup scene.
scene = NodeScene(self)
sceneWidth = config['scene_width']
sceneHeight = config['scene_height']
scene.setSceneRect(0, 0, sceneWidth, sceneHeight)
self.setScene(scene)
# Tablet zoom.
self.previousMouseOffset = 0
self.zoomDirection = 0
self.zoomIncr = 0
# Connect signals.
self.scene().selectionChanged.connect(self._returnSelection)
# NODES
def createNode(self, name='default', preset='node_default', position=None, alternate=True):
"""
Create a new node with a given name, position and color.
:type name: str.
:param name: The name of the node. The name has to be unique
as it is used as a key to store the node object.
:type preset: str.
:param preset: The name of graphical preset in the config file.
:type position: QtCore.QPoint.
:param position: The position of the node once created. If None,
it will be created at the center of the scene.
:type alternate: bool.
:param alternate: The attribute color alternate state, if True,
every 2 attribute the color will be slightly
darker.
:return : The created node
"""
# Check for name clashes
if name in self.scene().nodes.keys():
print 'A node with the same name already exists : {0}'.format(name)
print 'Node creation aborted !'
return
else:
nodeItem = NodeItem(name=name, alternate=alternate, preset=preset,
config=self.config)
# Store node in scene.
self.scene().nodes[name] = nodeItem
if not position:
# Get the center of the view.
position = self.mapToScene(self.viewport().rect().center())
# Set node position.
self.scene().addItem(nodeItem)
nodeItem.setPos(position - nodeItem.nodeCenter)
# Emit signal.
self.signal_NodeCreated.emit(name)
return nodeItem
def deleteNode(self, node):
"""
Delete the specified node from the view.
:type node: class.
:param node: The node instance that you want to delete.
"""
if not node in self.scene().nodes.values():
print 'Node object does not exist !'
print 'Node deletion aborted !'
return
if node in self.scene().nodes.values():
nodeName = node.name
node._remove()
# Emit signal.
self.signal_NodeDeleted.emit([nodeName])
def editNode(self, node, newName=None):
"""
Rename an existing node.
:type node: class.
:param node: The node instance that you want to delete.
:type newName: str.
:param newName: The new name for the given node.
"""
if not node in self.scene().nodes.values():
print 'Node object does not exist !'
print 'Node edition aborted !'
return
oldName = node.name
if newName != None:
# Check for name clashes
if newName in self.scene().nodes.keys():
print 'A node with the same name already exists : {0}'.format(newName)
print 'Node edition aborted !'
return
else:
node.name = newName
# Replace node data.
self.scene().nodes[newName] = self.scene().nodes[oldName]
self.scene().nodes.pop(oldName)
# Store new node name in the connections
if node.sockets:
for socket in node.sockets.values():
for connection in socket.connections:
connection.socketNode = newName
if node.plugs:
for plug in node.plugs.values():
for connection in plug.connections:
connection.plugNode = newName
node.update()
# Emit signal.
self.signal_NodeEdited.emit(oldName, newName)
# ATTRS
def createAttribute(self, node, name='default', index=-1, preset='attr_default', plug=True, socket=True, dataType=None):
"""
Create a new attribute with a given name.
:type node: class.
:param node: The node instance that you want to delete.
:type name: str.
:param name: The name of the attribute. The name has to be
unique as it is used as a key to store the node
object.
:type index: int.
:param index: The index of the attribute in the node.
:type preset: str.
:param preset: The name of graphical preset in the config file.
:type plug: bool.
:param plug: Whether or not this attribute can emit connections.
:type socket: bool.
:param socket: Whether or not this attribute can receive
connections.
:type dataType: type.
:param dataType: Type of the data represented by this attribute
in order to highlight attributes of the same
type while performing a connection.
"""
if not node in self.scene().nodes.values():
print 'Node object does not exist !'
print 'Attribute creation aborted !'
return
if name in node.attrs:
print 'An attribute with the same name already exists : {0}'.format(name)
print 'Attribute creation aborted !'
return
node._createAttribute(name=name, index=index, preset=preset, plug=plug, socket=socket, dataType=dataType)
# Emit signal.
self.signal_AttrCreated.emit(node.name, index)
def deleteAttribute(self, node, index):
"""
Delete the specified attribute.
:type node: class.
:param node: The node instance that you want to delete.
:type index: int.
:param index: The index of the attribute in the node.
"""
if not node in self.scene().nodes.values():
print 'Node object does not exist !'
print 'Attribute deletion aborted !'
return
node._deleteAttribute(index)
# Emit signal.
self.signal_AttrDeleted.emit(node.name, index)
def editAttribute(self, node, index, newName=None, newIndex=None):
"""
Edit the specified attribute.
:type node: class.
:param node: The node instance that you want to delete.
:type index: int.
:param index: The index of the attribute in the node.
:type newName: str.
:param newName: The new name for the given attribute.
:type newIndex: int.
:param newIndex: The index for the given attribute.
"""
if not node in self.scene().nodes.values():
print 'Node object does not exist !'
print 'Attribute creation aborted !'
return
if newName != None:
if newName in node.attrs:
print 'An attribute with the same name already exists : {0}'.format(newName)
print 'Attribute edition aborted !'
return
else:
oldName = node.attrs[index]
# Rename in the slot item(s).
if node.attrsData[oldName]['plug']:
node.plugs[oldName].attribute = newName
node.plugs[newName] = node.plugs[oldName]
node.plugs.pop(oldName)
for connection in node.plugs[newName].connections:
connection.plugAttr = newName
if node.attrsData[oldName]['socket']:
node.sockets[oldName].attribute = newName
node.sockets[newName] = node.sockets[oldName]
node.sockets.pop(oldName)
for connection in node.sockets[newName].connections:
connection.socketAttr = newName
# Replace attribute data.
node.attrsData[oldName]['name'] = newName
node.attrsData[newName] = node.attrsData[oldName]
node.attrsData.pop(oldName)
node.attrs[index] = newName
if isinstance(newIndex, int):
attrName = node.attrs[index]
utils._swapListIndices(node.attrs, index, newIndex)
# Refresh connections.
for plug in node.plugs.values():
plug.update()
if plug.connections:
for connection in plug.connections:
if isinstance(connection.source, PlugItem):
connection.source = plug
connection.source_point = plug.center()
else:
connection.target = plug
connection.target_point = plug.center()
if newName:
connection.plugAttr = newName
connection.updatePath()
for socket in node.sockets.values():
socket.update()
if socket.connections:
for connection in socket.connections:
if isinstance(connection.source, SocketItem):
connection.source = socket
connection.source_point = socket.center()
else:
connection.target = socket
connection.target_point = socket.center()
if newName:
connection.socketAttr = newName
connection.updatePath()
self.scene().update()
node.update()
# Emit signal.
if newIndex:
self.signal_AttrEdited.emit(node.name, index, newIndex)
else:
self.signal_AttrEdited.emit(node.name, index, index)
# GRAPH
def saveGraph(self, filePath='path'):
"""
Get all the current graph infos and store them in a .json file
at the given location.
:type filePath: str.
:param filePath: The path where you want to save your graph at.
"""
data = dict()
# Store nodes data.
data['NODES'] = dict()
nodes = self.scene().nodes.keys()
for node in nodes:
nodeInst = self.scene().nodes[node]
preset = nodeInst.nodePreset
nodeAlternate = nodeInst.alternate
data['NODES'][node] = {'preset': preset,
'position': [nodeInst.pos().x(), nodeInst.pos().y()],
'alternate': nodeAlternate,
'attributes': []}
attrs = nodeInst.attrs
for attr in attrs:
attrData = nodeInst.attrsData[attr]
# serialize dataType if needed.
if isinstance(attrData['dataType'], type):
attrData['dataType'] = str(attrData['dataType'])
data['NODES'][node]['attributes'].append(attrData)
# Store connections data.
data['CONNECTIONS'] = self.evaluateGraph()
# Save data.
try:
utils._saveData(filePath=filePath, data=data)
except:
print 'Invalid path : {0}'.format(filePath)
print 'Save aborted !'
return False
# Emit signal.
self.signal_GraphSaved.emit()
def loadGraph(self, filePath='path'):
"""
Get all the stored info from the .json file at the given location
and recreate the graph as saved.
:type filePath: str.
:param filePath: The path where you want to load your graph from.
"""
# Load data.
if os.path.exists(filePath):
data = utils._loadData(filePath=filePath)
else:
print 'Invalid path : {0}'.format(filePath)
print 'Load aborted !'
return False
# Apply nodes data.
nodesData = data['NODES']
nodesName = nodesData.keys()
for name in nodesName:
preset = nodesData[name]['preset']
position = nodesData[name]['position']
position = QtCore.QPointF(position[0], position[1])
alternate = nodesData[name]['alternate']
node = self.createNode(name=name,
preset=preset,
position=position,
alternate=alternate)
# Apply attributes data.
attrsData = nodesData[name]['attributes']
for attrData in attrsData:
index = attrsData.index(attrData)
name = attrData['name']
plug = attrData['plug']
socket = attrData['socket']
preset = attrData['preset']
dataType = attrData['dataType']
# un-serialize data type if needed
if (isinstance(dataType, unicode) and dataType.find('<') == 0):
dataType = eval(str(dataType.split('\'')[1]))
self.createAttribute(node=node,
name=name,
index=index,
preset=preset,
plug=plug,
socket=socket,
dataType=dataType)
# Apply connections data.
connectionsData = data['CONNECTIONS']
for connection in connectionsData:
source = connection[0]
sourceNode = source.split('.')[0]
sourceAttr = source.split('.')[1]
target = connection[1]
targetNode = target.split('.')[0]
targetAttr = target.split('.')[1]
self.createConnection(sourceNode, sourceAttr,
targetNode, targetAttr)
self.scene().update()
# Emit signal.
self.signal_GraphLoaded.emit()
def createConnection(self, sourceNode, sourceAttr, targetNode, targetAttr):
"""
Create a manual connection.
:type sourceNode: str.
:param sourceNode: Node that emits the connection.
:type sourceAttr: str.
:param sourceAttr: Attribute that emits the connection.
:type targetNode: str.
:param targetNode: Node that receives the connection.
:type targetAttr: str.
:param targetAttr: Attribute that receives the connection.
"""
plug = self.scene().nodes[sourceNode].plugs[sourceAttr]
socket = self.scene().nodes[targetNode].sockets[targetAttr]
connection = ConnectionItem(plug.center(), socket.center(), plug, socket)
connection.plugNode = plug.parentItem().name
connection.plugAttr = plug.attribute
connection.socketNode = socket.parentItem().name
connection.socketAttr = socket.attribute
plug.connect(socket, connection)
socket.connect(plug, connection)
connection.updatePath()
self.scene().addItem(connection)
return connection
def evaluateGraph(self):
"""
Create a list of connection tuples.
[("sourceNode.attribute", "TargetNode.attribute"), ...]
"""
scene = self.scene()
data = list()
for item in scene.items():
if isinstance(item, ConnectionItem):
connection = item
data.append(connection._outputConnectionData())
# Emit Signal
self.signal_GraphEvaluated.emit()
return data
def clearGraph(self):
"""
Clear the graph.
"""
self.scene().clear()
self.scene().nodes = dict()
# Emit signal.
self.signal_GraphCleared.emit()
##################################################################
# END API
##################################################################
class NodeScene(QtWidgets.QGraphicsScene):
"""
The scene displaying all the nodes.
"""
def __init__(self, parent):
"""
Initialize the class.
"""
super(NodeScene, self).__init__(parent)
# General.
self.gridSize = parent.config['grid_size']
# Nodes storage.
self.nodes = dict()
def dragEnterEvent(self, event):
"""
Make the dragging of nodes into the scene possible.
"""
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
def dragMoveEvent(self, event):
"""
Make the dragging of nodes into the scene possible.
"""
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
def dropEvent(self, event):
"""
Create a node from the dropped item.
"""
# Emit signal.
self.signal_Dropped.emit(event.scenePos())
event.accept()
def drawBackground(self, painter, rect):
"""
Draw a grid in the background.
"""
if self.views()[0].gridVisToggle:
leftLine = rect.left() - rect.left() % self.gridSize
topLine = rect.top() - rect.top() % self.gridSize
lines = list()
i = int(leftLine)
while i < int(rect.right()):
lines.append(QtCore.QLineF(i, rect.top(), i, rect.bottom()))
i += self.gridSize
u = int(topLine)
while u < int(rect.bottom()):
lines.append(QtCore.QLineF(rect.left(), u, rect.right(), u))
u += self.gridSize
self.pen = QtGui.QPen()
config = self.parent().config
self.pen.setColor(utils._convertDataToColor(config['grid_color']))
self.pen.setWidth(0)
painter.setPen(self.pen)
painter.drawLines(lines)
def updateScene(self):
"""
Update the connections position.
"""
for connection in [i for i in self.items() if isinstance(i, ConnectionItem)]:
connection.target_point = connection.target.center()
connection.source_point = connection.source.center()
connection.updatePath()
class NodeItem(QtWidgets.QGraphicsItem):
"""
A graphic representation of a node containing attributes.
"""
def __init__(self, name, alternate, preset, config):
"""
Initialize the class.
:type name: str.
:param name: The name of the node. The name has to be unique
as it is used as a key to store the node object.
:type alternate: bool.
:param alternate: The attribute color alternate state, if True,
every 2 attribute the color will be slightly
darker.
:type preset: str.
:param preset: The name of graphical preset in the config file.
"""
super(NodeItem, self).__init__()
self.setZValue(1)
# Storage
self.name = name
self.alternate = alternate
self.nodePreset = preset
self.attrPreset = None
# Attributes storage.
self.attrs = list()
self.attrsData = dict()
self.attrCount = 0
self.currentDataType = None
self.plugs = dict()
self.sockets = dict()
# Methods.
self._createStyle(config)
@property
def height(self):
"""
Increment the final height of the node every time an attribute
is created.
"""
if self.attrCount > 0:
return (self.baseHeight +
self.attrHeight * self.attrCount +
self.border +
0.5 * self.radius)
else:
return self.baseHeight
@property
def pen(self):
"""
Return the pen based on the selection state of the node.
"""
if self.isSelected():
return self._penSel
else:
return self._pen
def _createStyle(self, config):
"""
Read the node style from the configuration file.
"""
self.setAcceptHoverEvents(True)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable)
# Dimensions.
self.baseWidth = config['node_width']
self.baseHeight = config['node_height']
self.attrHeight = config['node_attr_height']
self.border = config['node_border']
self.radius = config['node_radius']
self.nodeCenter = QtCore.QPointF()
self.nodeCenter.setX(self.baseWidth / 2.0)
self.nodeCenter.setY(self.height / 2.0)
self._brush = QtGui.QBrush()
self._brush.setStyle(QtCore.Qt.SolidPattern)
self._brush.setColor(utils._convertDataToColor(config[self.nodePreset]['bg']))
self._pen = QtGui.QPen()
self._pen.setStyle(QtCore.Qt.SolidLine)
self._pen.setWidth(self.border)
self._pen.setColor(utils._convertDataToColor(config[self.nodePreset]['border']))
self._penSel = QtGui.QPen()
self._penSel.setStyle(QtCore.Qt.SolidLine)
self._penSel.setWidth(self.border)
self._penSel.setColor(utils._convertDataToColor(config[self.nodePreset]['border_sel']))
self._textPen = QtGui.QPen()
self._textPen.setStyle(QtCore.Qt.SolidLine)
self._textPen.setColor(utils._convertDataToColor(config[self.nodePreset]['text']))
self._nodeTextFont = QtGui.QFont(config['node_font'], config['node_font_size'], QtGui.QFont.Bold)
self._attrTextFont = QtGui.QFont(config['attr_font'], config['attr_font_size'], QtGui.QFont.Normal)
self._attrBrush = QtGui.QBrush()
self._attrBrush.setStyle(QtCore.Qt.SolidPattern)
self._attrBrushAlt = QtGui.QBrush()
self._attrBrushAlt.setStyle(QtCore.Qt.SolidPattern)
self._attrPen = QtGui.QPen()
self._attrPen.setStyle(QtCore.Qt.SolidLine)
def _createAttribute(self, name, index, preset, plug, socket, dataType):
"""
Create an attribute by expanding the node, adding a label and
connection items.
:type name: str.
:param name: The name of the attribute. The name has to be
unique as it is used as a key to store the node
object.
:type index: int.
:param index: The index of the attribute in the node.
:type preset: str.
:param preset: The name of graphical preset in the config file.
:type plug: bool.
:param plug: Whether or not this attribute can emit connections.
:type socket: bool.
:param socket: Whether or not this attribute can receive
connections.
:type dataType: type.
:param dataType: Type of the data represented by this attribute
in order to highlight attributes of the same
type while performing a connection.
"""
if name in self.attrs:
print 'An attribute with the same name already exists on this node : {0}'.format(name)
print 'Attribute creation aborted !'
return
self.attrPreset = preset
# Create a plug connection item.
if plug:
plugInst = PlugItem(parent=self,
attribute=name,
index=self.attrCount,
preset=preset,
dataType=dataType)
self.plugs[name] = plugInst
# Create a socket connection item.
if socket:
socketInst = SocketItem(parent=self,
attribute=name,
index=self.attrCount,
preset=preset,
dataType=dataType)
self.sockets[name] = socketInst
self.attrCount += 1
# Add the attribute based on its index.
if index == -1 or index > self.attrCount:
self.attrs.append(name)
else:
self.attrs.insert(index, name)
# Store attr data.
self.attrsData[name] = {'name': name,
'socket': socket,
'plug': plug,
'preset': preset,
'dataType': dataType}
# Update node height.
self.update()
def _deleteAttribute(self, index):
"""
Remove an attribute by reducing the node, removing the label
and the connection items.
:type index: int.
:param index: The index of the attribute in the node.
"""
name = self.attrs[index]
# Remove socket and its connections.
if name in self.sockets.keys():
for connection in self.sockets[name].connections:
connection._remove()
self.scene().removeItem(self.sockets[name])
self.sockets.pop(name)
# Remove plug and its connections.
if name in self.plugs.keys():
for connection in self.plugs[name].connections:
connection._remove()
self.scene().removeItem(self.plugs[name])
self.plugs.pop(name)
# Reduce node height.
if self.attrCount > 0:
self.attrCount -= 1
# Remove attribute from node.
if name in self.attrs:
self.attrs.remove(name)
self.update()
def _remove(self):
"""
Remove this node instance from the scene.
Make sure that all the connections to this node are also removed
in the process
"""
self.scene().nodes.pop(self.name)
# Remove all sockets connections.
for socket in self.sockets.values():
while len(socket.connections)>0:
socket.connections[0]._remove()
# Remove all plugs connections.
for plug in self.plugs.values():
while len(plug.connections)>0:
plug.connections[0]._remove()
# Remove node.
scene = self.scene()
scene.removeItem(self)
scene.update()
def boundingRect(self):
"""
The bounding rect based on the width and height variables.
"""
rect = QtCore.QRect(0, 0, self.baseWidth, self.height)
rect = QtCore.QRectF(rect)
return rect
def shape(self):
"""
The shape of the item.
"""
path = QtGui.QPainterPath()
path.addRect(self.boundingRect())
return path
def paint(self, painter, option, widget):
"""
Paint the node and attributes.
"""
# Node base.
painter.setBrush(self._brush)
painter.setPen(self.pen)
painter.drawRoundedRect(0, 0,
self.baseWidth,
self.height,
self.radius,
self.radius)
# Node label.
painter.setPen(self._textPen)
painter.setFont(self._nodeTextFont)
metrics = QtGui.QFontMetrics(painter.font())
text_width = metrics.boundingRect(self.name).width() + 14
text_height = metrics.boundingRect(self.name).height() + 14
margin = (text_width - self.baseWidth) * 0.5
textRect = QtCore.QRect(-margin,
-text_height,
text_width,
text_height)
painter.drawText(textRect,
QtCore.Qt.AlignCenter,
self.name)
# Attributes.
offset = 0
for attr in self.attrs:
nodzInst = self.scene().views()[0]
config = nodzInst.config
# Attribute rect.
rect = QtCore.QRect(self.border / 2,
self.baseHeight - self.radius + offset,
self.baseWidth - self.border,
self.attrHeight)
attrData = self.attrsData[attr]
name = attr
preset = attrData['preset']
# Attribute base.
self._attrBrush.setColor(utils._convertDataToColor(config[preset]['bg']))
if self.alternate:
self._attrBrushAlt.setColor(utils._convertDataToColor(config[preset]['bg'], True, config['alternate_value']))
self._attrPen.setColor(utils._convertDataToColor([0, 0, 0, 0]))
painter.setPen(self._attrPen)
painter.setBrush(self._attrBrush)
if (offset / self.attrHeight) % 2:
painter.setBrush(self._attrBrushAlt)
painter.drawRect(rect)
# Attribute label.
painter.setPen(utils._convertDataToColor(config[preset]['text']))
painter.setFont(self._attrTextFont)
# Search non-connectable attributes.
if nodzInst.drawingConnection:
if self == nodzInst.currentHoveredNode:
if (attrData['dataType'] != nodzInst.sourceSlot.dataType or
(nodzInst.sourceSlot.slotType == 'plug' and attrData['socket'] == False or
nodzInst.sourceSlot.slotType == 'socket' and attrData['plug'] == False)):
# Set non-connectable attributes color.
painter.setPen(utils._convertDataToColor(config['non_connectable_color']))
painter.drawText(QtCore.QRect(0.5 + 10,
self.baseHeight-self.radius+(self.attrHeight/4)+offset,
self.baseWidth - 20,
self.attrHeight),
0.5, name)
offset += self.attrHeight
def mousePressEvent(self, event):
"""
Keep the selected node on top of the others.
"""
nodes = self.scene().nodes
for node in nodes.values():
node.setZValue(1)
for item in self.scene().items():
if isinstance(item, ConnectionItem):
item.setZValue(1)
self.setZValue(2)
super(NodeItem, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
"""
.
"""
if self.scene().views()[0].gridVisToggle:
if self.scene().views()[0].gridSnapToggle or self.scene().views()[0]._nodeSnap:
gridSize = self.scene().gridSize
currentPos = self.mapToScene(event.pos().x() - self.baseWidth / 2,
event.pos().y() - self.height / 2)
snap_x = (round(currentPos.x() / gridSize) * gridSize) - gridSize/4
snap_y = (round(currentPos.y() / gridSize) * gridSize) - gridSize/4
snap_pos = QtCore.QPointF(snap_x, snap_y)
self.setPos(snap_pos)
self.scene().updateScene()
else:
self.scene().updateScene()
super(NodeItem, self).mouseMoveEvent(event)
def hoverLeaveEvent(self, event):
"""
.
"""
nodzInst = self.scene().views()[0]
for item in nodzInst.scene().items():
if isinstance(item, ConnectionItem):
item.setZValue(0)
super(NodeItem, self).hoverLeaveEvent(event)
class SlotItem(QtWidgets.QGraphicsItem):
"""
The base class for graphics item representing attributes hook.
"""
def __init__(self, parent, attribute, preset, index, dataType):
"""
Initialize the class.
:param parent: The parent item of the slot.
:type parent: QtWidgets.QGraphicsItem instance.
:param attribute: The attribute associated to the slot.
:type attribute: String.
:param index: int.
:type index: The index of the attribute in the node.
:type preset: str.
:param preset: The name of graphical preset in the config file.
:param dataType: The data type associated to the attribute.
:type dataType: Type.
"""
super(SlotItem, self).__init__(parent)
# Status.
self.setAcceptHoverEvents(True)
# Storage.
self.slotType = None
self.attribute = attribute
self.preset = preset
self.index = index
self.dataType = dataType
# Style.
self.brush = QtGui.QBrush()
self.brush.setStyle(QtCore.Qt.SolidPattern)
self.pen = QtGui.QPen()
self.pen.setStyle(QtCore.Qt.SolidLine)
# Connections storage.
self.connected_slots = list()
self.newConnection = None
self.connections = list()
def mousePressEvent(self, event):
"""
Start the connection process.
"""
if event.button() == QtCore.Qt.LeftButton:
self.newConnection = ConnectionItem(self.center(),
self.mapToScene(event.pos()),
self,
None)
self.connections.append(self.newConnection)
self.scene().addItem(self.newConnection)
nodzInst = self.scene().views()[0]
nodzInst.drawingConnection = True
nodzInst.sourceSlot = self
nodzInst.currentDataType = self.dataType
else:
super(SlotItem, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
"""
Update the new connection's end point position.
"""
nodzInst = self.scene().views()[0]
config = nodzInst.config
if nodzInst.drawingConnection:
mbb = utils._createPointerBoundingBox(pointerPos=event.scenePos().toPoint(),
bbSize=config['mouse_bounding_box'])
# Get nodes in pointer's bounding box.
targets = self.scene().items(mbb)
if any(isinstance(target, NodeItem) for target in targets):
if self.parentItem() not in targets:
for target in targets:
if isinstance(target, NodeItem):
nodzInst.currentHoveredNode = target
else:
nodzInst.currentHoveredNode = None
# Set connection's end point.
self.newConnection.target_point = self.mapToScene(event.pos())
self.newConnection.updatePath()
else:
super(SlotItem, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
"""
Apply the connection if target_slot is valid.
"""
nodzInst = self.scene().views()[0]
if event.button() == QtCore.Qt.LeftButton:
nodzInst.drawingConnection = False
nodzInst.currentDataType = None
target = self.scene().itemAt(event.scenePos().toPoint(), QtGui.QTransform())
if not isinstance(target, SlotItem):
self.newConnection._remove()
super(SlotItem, self).mouseReleaseEvent(event)
return
if target.accepts(self):
self.newConnection.target = target
self.newConnection.source = self
self.newConnection.target_point = target.center()
self.newConnection.source_point = self.center()
# Perform the ConnectionItem.
self.connect(target, self.newConnection)
target.connect(self, self.newConnection)
self.newConnection.updatePath()
else:
self.newConnection._remove()
else:
super(SlotItem, self).mouseReleaseEvent(event)
nodzInst.currentHoveredNode = None
def shape(self):
"""
The shape of the Slot is a circle.
"""
path = QtGui.QPainterPath()
path.addRect(self.boundingRect())
return path
def paint(self, painter, option, widget):
"""
Paint the Slot.
"""
painter.setBrush(self.brush)
painter.setPen(self.pen)
nodzInst = self.scene().views()[0]
config = nodzInst.config
if nodzInst.drawingConnection:
if self.parentItem() == nodzInst.currentHoveredNode:
painter.setBrush(utils._convertDataToColor(config['non_connectable_color']))
if (self.slotType == nodzInst.sourceSlot.slotType or (self.slotType != nodzInst.sourceSlot.slotType and self.dataType != nodzInst.sourceSlot.dataType)):
painter.setBrush(utils._convertDataToColor(config['non_connectable_color']))
else:
_penValid = QtGui.QPen()
_penValid.setStyle(QtCore.Qt.SolidLine)
_penValid.setWidth(2)
_penValid.setColor(QtGui.QColor(255, 255, 255, 255))
painter.setPen(_penValid)
painter.setBrush(self.brush)
painter.drawEllipse(self.boundingRect())
def center(self):
"""
Return The center of the Slot.
"""
rect = self.boundingRect()
center = QtCore.QPointF(rect.x() + rect.width() * 0.5,
rect.y() + rect.height() * 0.5)
return self.mapToScene(center)
class PlugItem(SlotItem):
"""
A graphics item representing an attribute out hook.
"""
def __init__(self, parent, attribute, index, preset, dataType):
"""
Initialize the class.
:param parent: The parent item of the slot.
:type parent: QtWidgets.QGraphicsItem instance.
:param attribute: The attribute associated to the slot.
:type attribute: String.
:param index: int.
:type index: The index of the attribute in the node.
:type preset: str.
:param preset: The name of graphical preset in the config file.
:param dataType: The data type associated to the attribute.
:type dataType: Type.
"""
super(PlugItem, self).__init__(parent, attribute, preset, index, dataType)
# Storage.
self.attributte = attribute
self.preset = preset
self.slotType = 'plug'
# Methods.
self._createStyle(parent)
def _createStyle(self, parent):
"""
Read the attribute style from the configuration file.
"""
config = parent.scene().views()[0].config
self.brush = QtGui.QBrush()
self.brush.setStyle(QtCore.Qt.SolidPattern)
self.brush.setColor(utils._convertDataToColor(config[self.preset]['plug']))
def boundingRect(self):
"""
The bounding rect based on the width and height variables.
"""
width = height = self.parentItem().attrHeight / 2.0
nodzInst = self.scene().views()[0]
config = nodzInst.config
x = self.parentItem().baseWidth - (width / 2.0)
y = (self.parentItem().baseHeight - config['node_radius'] +
self.parentItem().attrHeight / 4 +
self.parentItem().attrs.index(self.attribute) * self.parentItem().attrHeight)
rect = QtCore.QRectF(QtCore.QRect(x, y, width, height))
return rect
def accepts(self, socket_item):
"""
Only accepts socket items that belong to other nodes.
"""
if isinstance(socket_item, SocketItem):
if self.parentItem() != socket_item.parentItem():
if socket_item.dataType == self.dataType:
if socket_item in self.connected_slots:
return False
else:
return True
else:
return False
else:
return False
def connect(self, socket_item, connection):
"""
Connect to the given socket_item.
"""
# Populate connection.
connection.socketItem = socket_item
connection.plugNode = self.parentItem().name
connection.plugAttr = self.attribute
# Add socket to connected slots.
if socket_item in self.connected_slots:
self.connected_slots.remove(socket_item)
self.connected_slots.append(socket_item)
# Add connection.
if connection not in self.connections:
self.connections.append(connection)
# Emit signal.
nodzInst = self.scene().views()[0]
nodzInst.signal_PlugConnected.emit(connection.plugNode, connection.plugAttr, connection.socketNode, connection.socketAttr)
def disconnect(self, connection):
"""
Disconnect the given connection from this plug item.
"""
# Emit signal.
nodzInst = self.scene().views()[0]
nodzInst.signal_PlugDisconnected.emit(connection.plugNode, connection.plugAttr, connection.socketNode, connection.socketAttr)
# Remove connected socket from plug
if connection.socketItem in self.connected_slots:
self.connected_slots.remove(connection.socketItem)
# Remove connection
self.connections.remove(connection)
class SocketItem(SlotItem):
"""
A graphics item representing an attribute in hook.
"""
def __init__(self, parent, attribute, index, preset, dataType):
"""
Initialize the socket.
:param parent: The parent item of the slot.
:type parent: QtWidgets.QGraphicsItem instance.
:param attribute: The attribute associated to the slot.
:type attribute: String.
:param index: int.
:type index: The index of the attribute in the node.
:type preset: str.
:param preset: The name of graphical preset in the config file.
:param dataType: The data type associated to the attribute.
:type dataType: Type.
"""
super(SocketItem, self).__init__(parent, attribute, preset, index, dataType)
# Storage.
self.attributte = attribute
self.preset = preset
self.slotType = 'socket'
# Methods.
self._createStyle(parent)
def _createStyle(self, parent):
"""
Read the attribute style from the configuration file.
"""
config = parent.scene().views()[0].config
self.brush = QtGui.QBrush()
self.brush.setStyle(QtCore.Qt.SolidPattern)
self.brush.setColor(utils._convertDataToColor(config[self.preset]['socket']))
def boundingRect(self):
"""
The bounding rect based on the width and height variables.
"""
width = height = self.parentItem().attrHeight / 2.0
nodzInst = self.scene().views()[0]
config = nodzInst.config
x = - width / 2.0
y = (self.parentItem().baseHeight - config['node_radius'] +
(self.parentItem().attrHeight/4) +
self.parentItem().attrs.index(self.attribute) * self.parentItem().attrHeight )
rect = QtCore.QRectF(QtCore.QRect(x, y, width, height))
return rect
def accepts(self, plug_item):
"""
Only accepts plug items that belong to other nodes.
"""
if isinstance(plug_item, PlugItem):
if (self.parentItem() != plug_item.parentItem() and
len(self.connected_slots) <= 1):
if plug_item.dataType == self.dataType:
if plug_item in self.connected_slots:
return False
else:
return True
else:
return False
else:
return False
def connect(self, plug_item, connection):
"""
Connect to the given plug item.
"""
if len(self.connected_slots) > 0:
# Already connected.
self.connections[0]._remove()
self.connected_slots = list()
# Populate connection.
connection.plugItem = plug_item
connection.socketNode = self.parentItem().name
connection.socketAttr = self.attribute
# Add plug to connected slots.
self.connected_slots.append(plug_item)
# Add connection.
if connection not in self.connections:
self.connections.append(connection)
# Emit signal.
nodzInst = self.scene().views()[0]
nodzInst.signal_SocketConnected.emit(connection.plugNode, connection.plugAttr, connection.socketNode, connection.socketAttr)
def disconnect(self, connection):
"""
Disconnect the given connection from this socket item.
"""
# Emit signal.
nodzInst = self.scene().views()[0]
nodzInst.signal_SocketDisconnected.emit(connection.plugNode, connection.plugAttr, connection.socketNode, connection.socketAttr)
# Remove connected plugs
if connection.plugItem in self.connected_slots:
self.connected_slots.remove(connection.plugItem)
# Remove connections
self.connections.remove(connection)
class ConnectionItem(QtWidgets.QGraphicsPathItem):
"""
A graphics path representing a connection between two attributes.
"""
def __init__(self, source_point, target_point, source, target):
"""
Initialize the class.
:param sourcePoint: Source position of the connection.
:type sourcePoint: QPoint.
:param targetPoint: Target position of the connection
:type targetPoint: QPoint.
:param source: Source item (plug or socket).
:type source: class.
:param target: Target item (plug or socket).
:type target: class.
"""
super(ConnectionItem, self).__init__()
self.setZValue(1)
# Storage.
self.socketNode = None
self.socketAttr = None
self.plugNode = None
self.plugAttr = None
self.source_point = source_point
self.target_point = target_point
self.source = source
self.target = target
self.plugItem = None
self.socketItem = None
self.movable_point = None
self.data = tuple()
# Methods.
self._createStyle()
def _createStyle(self):
"""
Read the connection style from the configuration file.
"""
config = self.source.scene().views()[0].config
self.setAcceptHoverEvents(True)
self.setZValue(-1)
self._pen = QtGui.QPen(utils._convertDataToColor(config['connection_color']))
self._pen.setWidth(config['connection_width'])
def _outputConnectionData(self):
"""
.
"""
return ("{0}.{1}".format(self.plugNode, self.plugAttr),
"{0}.{1}".format(self.socketNode, self.socketAttr))
def mousePressEvent(self, event):
"""
Snap the Connection to the mouse.
"""
nodzInst = self.scene().views()[0]
for item in nodzInst.scene().items():
if isinstance(item, ConnectionItem):
item.setZValue(0)
nodzInst.drawingConnection = True
d_to_target = (event.pos() - self.target_point).manhattanLength()
d_to_source = (event.pos() - self.source_point).manhattanLength()
if d_to_target < d_to_source:
self.target_point = event.pos()
self.movable_point = 'target_point'
self.target.disconnect(self)
self.target = None
nodzInst.sourceSlot = self.source
else:
self.source_point = event.pos()
self.movable_point = 'source_point'
self.source.disconnect(self)
self.source = None
nodzInst.sourceSlot = self.target
self.updatePath()
def mouseMoveEvent(self, event):
"""
Move the Connection with the mouse.
"""
nodzInst = self.scene().views()[0]
config = nodzInst.config
mbb = utils._createPointerBoundingBox(pointerPos=event.scenePos().toPoint(),
bbSize=config['mouse_bounding_box'])
# Get nodes in pointer's bounding box.
targets = self.scene().items(mbb)
if any(isinstance(target, NodeItem) for target in targets):
if nodzInst.sourceSlot.parentItem() not in targets:
for target in targets:
if isinstance(target, NodeItem):
nodzInst.currentHoveredNode = target
else:
nodzInst.currentHoveredNode = None
if self.movable_point == 'target_point':
self.target_point = event.pos()
else:
self.source_point = event.pos()
self.updatePath()
def mouseReleaseEvent(self, event):
"""
Create a Connection if possible, otherwise delete it.
"""
nodzInst = self.scene().views()[0]
nodzInst.drawingConnection = False
slot = self.scene().itemAt(event.scenePos().toPoint(), QtGui.QTransform())
if not isinstance(slot, SlotItem):
self._remove()
self.updatePath()
super(ConnectionItem, self).mouseReleaseEvent(event)
return
if self.movable_point == 'target_point':
if slot.accepts(self.source):
# Plug reconnection.
self.target = slot
self.target_point = slot.center()
plug = self.source
socket = self.target
# Reconnect.
socket.connect(plug, self)
self.updatePath()
else:
self._remove()
else:
if slot.accepts(self.target):
# Socket Reconnection
self.source = slot
self.source_point = slot.center()
socket = self.target
plug = self.source
# Reconnect.
plug.connect(socket, self)
self.updatePath()
else:
self._remove()
def _remove(self):
"""
Remove this Connection from the scene.
"""
if self.source is not None:
self.source.disconnect(self)
if self.target is not None:
self.target.disconnect(self)
scene = self.scene()
scene.removeItem(self)
scene.update()
def updatePath(self):
"""
Update the path.
"""
self.setPen(self._pen)
path = QtGui.QPainterPath()
path.moveTo(self.source_point)
dx = (self.target_point.x() - self.source_point.x()) * 0.5
dy = self.target_point.y() - self.source_point.y()
ctrl1 = QtCore.QPointF(self.source_point.x() + dx, self.source_point.y() + dy * 0)
ctrl2 = QtCore.QPointF(self.source_point.x() + dx, self.source_point.y() + dy * 1)
path.cubicTo(ctrl1, ctrl2, self.target_point)
self.setPath(path)
|
py | b416a7bb9cca1ea969bb8420f2167d029b702e27 | """Evaluation and skill scores for ensemble forecasts."""
import numpy as np
from .interface import get_method
def ensemble_skill(X_f, X_o, metric, **kwargs):
"""Compute mean ensemble skill for a given skill metric.
Parameters
----------
X_f : array-like
Array of shape (l,m,n) containing the forecast fields of shape (m,n)
from l ensemble members.
X_o : array_like
Array of shape (m,n) containing the observed field corresponding to
the forecast.
metric : str
The deterministic skill metric to be used (list available in
:func:`~pysteps.verification.interface.get_method`)
Other Parameters
----------------
thr : float
Intensity threshold for categorical scores.
scale : int
The spatial scale to verify in px. In practice it represents the size of
the moving window that it is used to compute the fraction of pixels above
the threshold for the FSS.
Returns
-------
out : float
The mean skill of all ensemble members that is used as defintion of
ensemble skill (as in Zacharov and Rezcova 2009 with the FSS).
References
----------
:cite:`ZR2009`
"""
if len(X_f.shape) != 3:
raise ValueError("the number of dimensions of X_f must be equal to 3, but %i dimensions were passed"
% len(X_f.shape))
if X_f.shape[1:] != X_o.shape:
raise ValueError("the shape of X_f does not match the shape of X_o (%d,%d)!=(%d,%d)"
% (X_f.shape[1], X_f.shape[2], X_o.shape[0], X_o.shape[1]))
thr = kwargs.get("thr", None)
scale = kwargs.get("scale", None)
compute_skill = get_method(metric, type="deterministic")
l = X_f.shape[0]
skill = []
for member in range(l):
skill_ = compute_skill(X_f[member, :, :], X_o, thr=thr, scale=scale)
skill.append(skill_)
return np.mean(skill)
def ensemble_spread(X_f, metric, **kwargs):
"""Compute mean ensemble spread for a given skill metric.
Parameters
----------
X_f : array-like
Array of shape (l,m,n) containing the forecast fields of shape (m,n)
from l ensemble members.
metric : str
The skill metric to be used, the list includes:
Other Parameters
----------------
thr : float
Intensity threshold for categorical scores.
scale : int
The spatial scale to verify in px. In practice it represents the size of
the moving window that it is used to compute the fraction of pixels above
the threshold for the FSS.
Returns
-------
out : float
The mean skill compted between all possible pairs of the ensemble members,
which can be used as definition of mean ensemble spread (as in Zacharov
and Rezcova 2009 with the FSS).
References
----------
:cite:`ZR2009`
"""
if len(X_f.shape) != 3:
raise ValueError("the number of dimensions of X_f must be equal to 3, but %i dimensions were passed"
% len(X_f.shape))
if X_f.shape[0] < 2:
raise ValueError("the number of members in X_f must be greater than 1, but %i members were passed"
% X_f.shape[0])
thr = kwargs.get("thr", None)
scale = kwargs.get("scale", None)
compute_skill = get_method(metric, type="deterministic")
l = X_f.shape[0]
skill = []
for member in range(l):
for othermember in range(member + 1, l):
skill_ = compute_skill(X_f[member, :, :], X_f[othermember, :, :], thr=thr, scale=scale)
skill.append(skill_)
return np.mean(skill)
def rankhist_init(num_ens_members, X_min):
"""Initialize a rank histogram object.
Parameters
----------
num_ens_members : int
Number ensemble members in the forecasts to accumulate into the rank
histogram.
X_min : float
Threshold for minimum intensity. Forecast-observation pairs, where all
ensemble members and verifying observations are below X_min, are not
counted in the rank histogram.
Returns
-------
out : dict
The rank histogram object.
"""
rankhist = {}
rankhist["num_ens_members"] = num_ens_members
rankhist["n"] = np.zeros(num_ens_members+1, dtype=int)
rankhist["X_min"] = X_min
return rankhist
def rankhist_accum(rankhist, X_f, X_o):
"""Accumulate forecast-observation pairs to the given rank histogram.
Parameters
----------
X_f : array-like
Array of shape (k,m,n,...) containing the values from an ensemble
forecast of k members with shape (m,n,...).
X_o : array_like
Array of shape (m,n,...) containing the observed values corresponding
to the forecast.
"""
if X_f.shape[0] != rankhist["num_ens_members"]:
raise ValueError("the number of ensemble members in X_f does not match the number of members in the rank histogram (%d!=%d)" % (X_f.shape[1], rankhist["num_ens_members"]))
X_min = rankhist["X_min"]
X_f = np.vstack([X_f[i, :].flatten() for i in range(X_f.shape[0])]).T
X_o = X_o.flatten()
mask = np.logical_and(np.isfinite(X_o), np.all(np.isfinite(X_f), axis=1))
X_f = X_f[mask, :].copy()
X_o = X_o[mask]
mask_nz = np.logical_or(X_o >= X_min, np.all(X_f >= X_min, axis=1))
X_f.sort(axis=1)
bin_idx = [np.digitize([v], f)[0] for v,f in zip(X_o[mask_nz], X_f[mask_nz, :])]
# handle ties, where the verifying observation lies between ensemble
# members having the same value
# ignore the cases where the verifying observations and all ensemble
# members are below the threshold X_min
for i in np.where(~mask_nz)[0]:
if np.any(X_f[i, :] >= X_min):
i_eq = np.where(X_f[i, :] < X_min)[0]
if len(i_eq) > 1 and X_o[i] < X_min:
bin_idx.append(np.random.randint(low=np.min(i_eq),
high=np.max(i_eq)+1))
for bi in bin_idx:
rankhist["n"][bi] += 1
def rankhist_compute(rankhist, normalize=True):
"""Return the rank histogram counts and optionally normalize the histogram.
Parameters
----------
rankhist : dict
A rank histogram object created with rankhist_init.
normalize : bool
If True, normalize the rank histogram so that the bin counts sum to one.
Returns
-------
out : array_like
The counts for the n+1 bins in the rank histogram, where n is the number
of ensemble members.
"""
if normalize:
return 1.0*rankhist["n"] / sum(rankhist["n"])
else:
return rankhist["n"]
|
py | b416a889f62975bf98e5a3b7de73946ce71bc2ba | import sys
import typing
from typing import TYPE_CHECKING, Optional
from weakref import WeakSet
import magicgui as mgui
from .components.viewer_model import ViewerModel
from .utils import _magicgui, config
if TYPE_CHECKING:
# helpful for IDE support
from ._qt.qt_main_window import Window
@mgui.register_type(bind=_magicgui.proxy_viewer_ancestor)
class Viewer(ViewerModel):
"""Napari ndarray viewer.
Parameters
----------
title : string, optional
The title of the viewer window. by default 'napari'.
ndisplay : {2, 3}, optional
Number of displayed dimensions. by default 2.
order : tuple of int, optional
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3. by default None
axis_labels : list of str, optional
Dimension names. by default they are labeled with sequential numbers
show : bool, optional
Whether to show the viewer after instantiation. by default True.
"""
_window: 'Window' = None # type: ignore
if sys.version_info < (3, 9):
_instances: typing.ClassVar[WeakSet] = WeakSet()
else:
_instances: typing.ClassVar[WeakSet['Viewer']] = WeakSet()
def __init__(
self,
*,
title='napari',
ndisplay=2,
order=(),
axis_labels=(),
show=True,
):
super().__init__(
title=title,
ndisplay=ndisplay,
order=order,
axis_labels=axis_labels,
)
# having this import here makes all of Qt imported lazily, upon
# instantiating the first Viewer.
from .window import Window
self._window = Window(self, show=show)
self._instances.add(self)
# Expose private window publically. This is needed to keep window off pydantic model
@property
def window(self) -> 'Window':
return self._window
def update_console(self, variables):
"""Update console's namespace with desired variables.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the console's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
"""
if self.window._qt_viewer._console is None:
return
else:
self.window._qt_viewer.console.push(variables)
def screenshot(
self,
path=None,
*,
size=None,
scale=None,
canvas_only=True,
flash: bool = True,
):
"""Take currently displayed screen and convert to an image array.
Parameters
----------
path : str
Filename for saving screenshot image.
size : tuple (int, int)
Size (resolution) of the screenshot. By default, the currently displayed size.
Only used if `canvas_only` is True.
scale : float
Scale factor used to increase resolution of canvas for the screenshot. By default, the currently displayed resolution.
Only used if `canvas_only` is True.
canvas_only : bool
If True, screenshot shows only the image display canvas, and
if False include the napari viewer frame in the screenshot,
By default, True.
flash : bool
Flag to indicate whether flash animation should be shown after
the screenshot was captured.
By default, True.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
return self.window.screenshot(
path=path,
size=size,
scale=scale,
flash=flash,
canvas_only=canvas_only,
)
def show(self, *, block=False):
"""Resize, show, and raise the viewer window."""
self.window.show(block=block)
def close(self):
"""Close the viewer window."""
# Remove all the layers from the viewer
self.layers.clear()
# Close the main window
self.window.close()
if config.async_loading:
from .components.experimental.chunk import chunk_loader
# TODO_ASYNC: Find a cleaner way to do this? This fixes some
# tests. We are telling the ChunkLoader that this layer is
# going away:
# https://github.com/napari/napari/issues/1500
for layer in self.layers:
chunk_loader.on_layer_deleted(layer)
self._instances.discard(self)
@classmethod
def close_all(cls) -> int:
"""
Class metod, Close all existing viewer instances.
This is mostly exposed to avoid leaking of viewers when running tests.
As having many non-closed viewer can adversely affect performances.
It will return the number of viewer closed.
Returns
-------
int :
number of viewer closed.
"""
# copy to not iterate while changing.
viewers = [v for v in cls._instances]
ret = len(viewers)
for viewer in viewers:
viewer.close()
return ret
def current_viewer() -> Optional[Viewer]:
"""Return the currently active napari viewer."""
try:
from napari._qt.qt_main_window import _QtMainWindow
return _QtMainWindow.current_viewer()
except ImportError:
return None
|
py | b416a8a871f8a712421cda520f9f74ca428ce97c | # -*- coding: utf-8 -*-
"""
Integration tests for the PyOWM library
These are "live" executions, that of course need the OWM web API to be up
and running
"""
import unittest
import os
from datetime import datetime
from pyowm.constants import DEFAULT_API_KEY
from pyowm.webapi25.configuration25 import parsers
from pyowm.webapi25.owm25 import OWM25
from pyowm.exceptions import api_call_error, unauthorized_error
class IntegrationTestsWebAPI25(unittest.TestCase):
__owm = OWM25(parsers, os.getenv('OWM_API_KEY', DEFAULT_API_KEY))
def test_is_API_online(self):
self.assertTrue(self.__owm.is_API_online())
def test_weather_at_place(self):
"""
Test feature: get currently observed weather at specific location
"""
o1 = self.__owm.weather_at_place('London,uk')
o2 = self.__owm.weather_at_place('Kiev')
self.assertTrue(o1 is not None)
self.assertTrue(o1.get_reception_time() is not None)
loc = o1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o1.get_weather()
self.assertTrue(weat is not None)
self.assertTrue(o2 is not None)
self.assertTrue(o2.get_reception_time() is not None)
loc = o2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o2.get_weather()
self.assertTrue(weat is not None)
def test_weather_at_coords(self):
"""
Test feature: get currently observed weather at specific coordinates
"""
o1 = self.__owm.weather_at_coords(41.896144, 12.484589) # Rome
o2 = self.__owm.weather_at_coords(-33.936524, 18.503723) # Cape Town
self.assertTrue(o1)
self.assertTrue(o1.get_reception_time())
loc = o1.get_location()
self.assertTrue(loc)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o1.get_weather()
self.assertTrue(weat)
self.assertTrue(o2)
self.assertTrue(o2.get_reception_time())
loc = o2.get_location()
self.assertTrue(loc)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o2.get_weather()
self.assertTrue(weat)
def test_weather_at_zipcode(self):
"""
Test feature: get currently observed weather at specific postcode
"""
o1 = self.__owm.weather_at_zip_code("94040", "US")
self.assertTrue(o1)
self.assertTrue(o1.get_reception_time())
loc = o1.get_location()
self.assertTrue(loc)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o1.get_weather()
self.assertTrue(weat)
def test_weather_at_id(self):
o1 = self.__owm.weather_at_id(5128581) # New York
o2 = self.__owm.weather_at_id(703448) # Kiev'
o3 = self.__owm.weather_at_id(99999999) # Shall be None
self.assertTrue(o1 is not None)
self.assertTrue(o1.get_reception_time() is not None)
loc = o1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o1.get_weather()
self.assertTrue(weat is not None)
self.assertTrue(o2 is not None)
self.assertTrue(o2.get_reception_time() is not None)
loc = o2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o2.get_weather()
self.assertTrue(weat is not None)
self.assertFalse(o3 is not None)
def test_weather_at_ids(self):
# New York, Kiev
observations = self.__owm.weather_at_ids([5128581,703448])
o1 = observations[0]
o2 = observations[1]
self.assertTrue(o1 is not None)
self.assertTrue(o1.get_reception_time() is not None)
loc = o1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o1.get_weather()
self.assertTrue(weat is not None)
self.assertTrue(o2 is not None)
self.assertTrue(o2.get_reception_time() is not None)
loc = o2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = o2.get_weather()
self.assertTrue(weat is not None)
def test_weather_at_places(self):
"""
Test feature: find currently observed weather for locations matching
the specified text search pattern
"""
# Test using searchtype=accurate
o1 = self.__owm.weather_at_places("London", "accurate")
o2 = self.__owm.weather_at_places("Paris", "accurate", 2)
self.assertTrue(isinstance(o1, list))
for item in o1:
self.assertTrue(item)
self.assertTrue(item.get_reception_time())
loc = item.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.get_weather()
self.assertTrue(weat is not None)
self.assertTrue(isinstance(o2, list))
self.assertFalse(len(o2) > 2)
for item in o2:
self.assertTrue(item)
self.assertTrue(item.get_reception_time())
loc = item.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.get_weather()
self.assertTrue(weat is not None)
# Test using searchtype=like
o3 = self.__owm.weather_at_places("London", "like")
o4 = self.__owm.weather_at_places("Paris", "like", 2)
self.assertTrue(isinstance(o3, list))
for item in o3:
self.assertTrue(item)
self.assertTrue(item.get_reception_time())
loc = item.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.get_weather()
self.assertTrue(weat is not None)
self.assertTrue(isinstance(o4, list))
self.assertFalse(len(o4) > 2)
for item in o4:
self.assertTrue(item)
self.assertTrue(item.get_reception_time())
loc = item.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.get_weather()
self.assertTrue(weat is not None)
def test_weather_around_coords(self):
"""
Test feature: find currently observed weather for locations that are
nearby the specified coordinates
"""
o2 = self.__owm.weather_around_coords(57.0, -2.15) # Scotland
self.assertTrue(isinstance(o2, list))
for item in o2:
self.assertTrue(item is not None)
self.assertTrue(item.get_reception_time() is not None)
loc = item.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.get_weather()
self.assertTrue(weat is not None)
o1 = self.__owm.weather_around_coords(57.0, -2.15, 2) # Scotland
self.assertTrue(isinstance(o1, list))
for item in o1:
self.assertTrue(item is not None)
self.assertTrue(item.get_reception_time() is not None)
loc = item.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.get_weather()
self.assertTrue(weat is not None)
def test_three_hours_forecast(self):
"""
Test feature: get 3 hours forecast for a specific location
"""
fc1 = self.__owm.three_hours_forecast("London,uk")
fc2 = self.__owm.three_hours_forecast('Kiev')
self.assertTrue(fc1)
f1 = fc1.get_forecast()
self.assertTrue(f1 is not None)
self.assertTrue(f1.get_reception_time() is not None)
loc = f1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f1:
self.assertTrue(weather is not None)
self.assertTrue(fc2 is not None)
f2 = fc2.get_forecast()
self.assertTrue(f2 is not None)
self.assertTrue(f2.get_reception_time() is not None)
loc = f2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f2:
self.assertTrue(weather is not None)
def test_three_hours_forecast_at_coords(self):
"""
Test feature: get 3 hours forecast at a specific geographic coordinate
"""
# London,uk
fc1 = self.__owm.three_hours_forecast_at_coords(51.5073509, -0.1277583)
# Kiev
fc2 = self.__owm.three_hours_forecast_at_coords(50.4501, 30.5234)
self.assertTrue(fc1)
f1 = fc1.get_forecast()
self.assertTrue(f1 is not None)
self.assertTrue(f1.get_reception_time() is not None)
loc = f1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f1:
self.assertTrue(weather is not None)
self.assertTrue(fc2 is not None)
f2 = fc2.get_forecast()
self.assertTrue(f2 is not None)
self.assertTrue(f2.get_reception_time() is not None)
loc = f2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f2:
self.assertTrue(weather is not None)
with self.assertRaises(ValueError):
self.__owm.three_hours_forecast_at_coords(199, 199)
def test_three_hours_forecast_at_id(self):
"""
Test feature: get 3 hours forecast for city ID
"""
# London,uk
fc1 = self.__owm.three_hours_forecast_at_id(2643743)
# Kiev
fc2 = self.__owm.three_hours_forecast_at_id(703448)
# Shall be None
fc3 = self.__owm.three_hours_forecast_at_id(99999999)
self.assertTrue(fc1)
f1 = fc1.get_forecast()
self.assertTrue(f1 is not None)
self.assertTrue(f1.get_reception_time() is not None)
loc = f1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f1:
self.assertTrue(weather is not None)
self.assertTrue(fc2 is not None)
f2 = fc2.get_forecast()
self.assertTrue(f2 is not None)
self.assertTrue(f2.get_reception_time() is not None)
loc = f2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f2:
self.assertTrue(weather is not None)
self.assertEqual(fc3, None)
def test_daily_forecast(self):
"""
Test feature: get daily forecast for a specific location
"""
fc1 = self.__owm.daily_forecast("London,uk")
fc2 = self.__owm.daily_forecast('Kiev')
self.assertTrue(fc1)
f1 = fc1.get_forecast()
self.assertTrue(f1 is not None)
self.assertTrue(f1.get_reception_time() is not None)
loc = f1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f1:
self.assertTrue(weather is not None)
self.assertTrue(fc2 is not None)
f2 = fc2.get_forecast()
self.assertTrue(f2 is not None)
self.assertTrue(f2.get_reception_time() is not None)
loc = f2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f2:
self.assertTrue(weather is not None)
def test_daily_forecast_at_coords(self):
"""
Test feature: get daily forecast at a specific geographic coordinate
"""
fc1 = self.__owm.daily_forecast_at_coords(51.5073509, -0.1277583) # London,uk
self.assertTrue(fc1)
f1 = fc1.get_forecast()
self.assertTrue(f1 is not None)
self.assertTrue(f1.get_reception_time() is not None)
loc = f1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f1:
self.assertTrue(weather is not None)
with self.assertRaises(ValueError):
self.__owm.daily_forecast_at_coords(199, 199)
def test_daily_forecast_at_id(self):
"""
Test feature: get daily forecast for a specific city ID
"""
# London,uk
fc1 = self.__owm.daily_forecast_at_id(2643743)
# Kiev
fc2 = self.__owm.daily_forecast_at_id(703448)
try:
fc3 = self.__owm.daily_forecast_at_id(99999999)
raise AssertionError("APICallError was expected here")
except api_call_error.APICallError:
pass # Ok!
self.assertTrue(fc1)
f1 = fc1.get_forecast()
self.assertTrue(f1 is not None)
self.assertTrue(f1.get_reception_time() is not None)
loc = f1.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f1:
self.assertTrue(weather is not None)
self.assertTrue(fc2 is not None)
f2 = fc2.get_forecast()
self.assertTrue(f2 is not None)
self.assertTrue(f2.get_reception_time() is not None)
loc = f2.get_location()
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
for weather in f2:
self.assertTrue(weather is not None)
def test_weather_history_at_place(self):
"""
Test feature: get weather history for a specific location
"""
start_iso = "2013-09-06 09:20:00+00"
start_unix = 1378459200
start_date = datetime(2013, 9, 6, 9, 20, 0)
end_iso = "2013-09-06 20:26:40+00"
end_unix = 1378499200
end_date = datetime(2013, 9, 6, 20, 26, 40)
try:
l1 = self.__owm.weather_history_at_place("London,UK")
if l1 is not None:
for weather in l1:
self.assertTrue(weather is not None)
l2 = self.__owm.weather_history_at_place('Kiev', start_unix, end_unix)
if l2 is not None:
for weather in l2:
self.assertTrue(weather is not None)
l3 = self.__owm.weather_history_at_place('Rome', start_iso, end_iso)
if l3 is not None:
for weather in l3:
self.assertTrue(weather is not None)
l4 = self.__owm.weather_history_at_place('Berlin', start_date, end_date)
if l4 is not None:
for weather in l4:
self.assertTrue(weather is not None)
l5 = self.__owm.weather_history_at_place('QmFoPIlbf') # Shall be None
self.assertTrue(l5 is None)
except unauthorized_error.UnauthorizedError:
pass # it's a paid-level API feature
def test_weather_history_at_coords(self):
try:
l1 = self.__owm.weather_history_at_coords(51.5073509, -0.1277583)
if l1 is not None:
for weather in l1:
self.assertTrue(weather is not None)
except unauthorized_error.UnauthorizedError:
pass # it's a paid-level API feature
def test_weather_history_at_id(self):
"""
Test feature: get weather history for a specific city ID
"""
try:
start_iso = "2013-09-06 09:20:00+00"
start_unix = 1378459200
start_date = datetime(2013, 9, 6, 9, 20, 0)
end_iso = "2013-09-06 20:26:40+00"
end_unix = 1378499200
end_date = datetime(2013, 9, 6, 20, 26, 40)
l1 = self.__owm.weather_history_at_id(2756723) # Dongen
if l1 is not None:
for weather in l1:
self.assertTrue(weather is not None)
l2 = self.__owm.weather_history_at_id(2756723, start_unix, end_unix)
if l2 is not None:
for weather in l2:
self.assertTrue(weather is not None)
l3 = self.__owm.weather_history_at_id(2756723, start_iso, end_iso)
if l3 is not None:
for weather in l3:
self.assertTrue(weather is not None)
l4 = self.__owm.weather_history_at_id(2756723, start_date, end_date)
if l4 is not None:
for weather in l4:
self.assertTrue(weather is not None)
except unauthorized_error.UnauthorizedError:
pass # it's a paid-level API feature
def test_station_at_coords(self):
"""
Test feature: get a list of meteostations nearest to a geographical
point
"""
s1 = self.__owm.station_at_coords(51.5073509, -0.1277583, 2)
self.assertEqual(2, len(s1))
for station in s1:
self.assertTrue(station is not None)
self.assertTrue(
all(v is not None for v in station.__dict__.values()))
with self.assertRaises(ValueError):
self.__owm.station_at_coords(51.5073509, 220)
with self.assertRaises(ValueError):
self.__owm.station_at_coords(220, -0.1277583)
with self.assertRaises(ValueError):
self.__owm.station_at_coords(51.5073509, -0.1277583, -3)
with self.assertRaises(AssertionError):
self.__owm.station_at_coords(51.5073509, -0.1277582, 'foo')
def test_station_tick_history(self):
"""
Test feature: get station tick weather history for a specific
meteostation
"""
try:
h1 = self.__owm.station_tick_history(39276)
if h1 is not None:
sh1 = h1.get_station_history()
self.assertTrue(sh1 is not None)
data1 = sh1.get_measurements()
self.assertTrue(data1 is not None)
self.assertFalse(0, len(data1))
h2 = self.__owm.station_tick_history(39276, limit=2)
self.assertTrue(h2 is not None)
sh2 = h2.get_station_history()
self.assertTrue(sh2 is not None)
data2 = sh2.get_measurements()
self.assertTrue(data2 is not None)
self.assertFalse(len(data2) > 2)
h3 = self.__owm.station_tick_history(987654) # Shall be None
self.assertFalse(h3 is not None)
except unauthorized_error.UnauthorizedError:
pass # it's a paid-level API feature
def test_station_hour_history(self):
"""
Test feature: get station hour weather history for a specific
meteostation
"""
try:
h1 = self.__owm.station_hour_history(123)
if h1 is not None:
sh1 = h1.get_station_history()
self.assertTrue(sh1 is not None)
data1 = sh1.get_measurements()
self.assertTrue(data1 is not None)
self.assertFalse(0, len(data1))
h2 = self.__owm.station_hour_history(987654) # Shall be None
self.assertFalse(h2 is not None)
except unauthorized_error.UnauthorizedError:
pass # it's a paid-level API feature
def test_station_day_history(self):
"""
Test feature: get station hour weather history for a specific
meteostation
"""
try:
h1 = self.__owm.station_day_history(123)
if h1 is not None:
sh1 = h1.get_station_history()
self.assertTrue(sh1 is not None)
data1 = sh1.get_measurements()
self.assertTrue(data1 is not None)
self.assertFalse(0, len(data1))
h2 = self.__owm.station_day_history(123, limit=3)
self.assertTrue(h2 is not None)
sh2 = h2.get_station_history()
self.assertTrue(sh2 is not None)
data2 = sh2.get_measurements()
self.assertTrue(data2 is not None)
h3 = self.__owm.station_day_history(987654) # Shall be None
self.assertFalse(h3 is not None)
except unauthorized_error.UnauthorizedError:
pass # it's a paid-level API feature
def test_weather_at_station(self):
"""
Test feature: get current weather measurement for a specific
meteostation
"""
o = self.__owm.weather_at_station(1000)
self.assertTrue(o is not None)
self.assertTrue(o.get_reception_time() is not None)
weat = o.get_weather()
self.assertTrue(weat is not None)
def test_weather_at_stations_in_bbox(self):
"""
Test feature: get current weather observations from meteostations
inside of a bounding box determined by geo-coordinates.
"""
o = self.__owm.weather_at_stations_in_bbox(49.07,8.87,61.26,65.21)
self.assertTrue(isinstance(o, list))
for item in o:
self.assertTrue(item is not None)
self.assertTrue(item.get_reception_time() is not None)
loc = item.get_location()
self.assertTrue(loc is not None)
weat = item.get_weather()
self.assertTrue(weat is not None)
def test_uvindex_around_coords(self):
"""
Test feature: get UV index around geo-coordinates.
"""
u = self.__owm.uvindex_around_coords(45,9)
self.assertIsNotNone(u)
self.assertIsNotNone(u.get_value())
self.assertIsNotNone(u.get_reception_time())
self.assertIsNotNone(u.get_location())
def test_coindex_around_coords(self):
"""
Test feature: get CO index around geo-coordinates.
"""
u = self.__owm.coindex_around_coords(45, 9)
self.assertIsNotNone(u)
self.assertIsNotNone(u.get_co_samples())
self.assertIsNotNone(u.get_reception_time())
self.assertIsNotNone(u.get_reference_time())
self.assertIsNotNone(u.get_interval())
self.assertIsNotNone(u.get_location())
def test_ozone_around_coords(self):
"""
Test feature: get ozone around geo-coordinates.
"""
u = self.__owm.ozone_around_coords(45, 9)
self.assertIsNotNone(u)
self.assertIsNotNone(u.get_du_value())
self.assertIsNotNone(u.get_reception_time())
self.assertIsNotNone(u.get_reference_time())
self.assertIsNotNone(u.get_interval())
self.assertIsNotNone(u.get_location())
def test_no2index_around_coords(self):
"""
Test feature: get NO2 index around geo-coordinates.
"""
u = self.__owm.no2index_around_coords(45, 9)
self.assertIsNotNone(u)
self.assertIsNotNone(u.get_no2_samples())
self.assertIsNotNone(u.get_reception_time())
self.assertIsNotNone(u.get_reference_time())
self.assertIsNotNone(u.get_interval())
self.assertIsNotNone(u.get_location())
def test_so2index_around_coords(self):
"""
Test feature: get SO2 index around geo-coordinates.
"""
u = self.__owm.so2index_around_coords(45, 9)
self.assertIsNotNone(u)
self.assertIsNotNone(u.get_so2_samples())
self.assertIsNotNone(u.get_reception_time())
self.assertIsNotNone(u.get_reference_time())
self.assertIsNotNone(u.get_interval())
self.assertIsNotNone(u.get_location())
if __name__ == "__main__":
unittest.main()
|
py | b416a8c1f7a7b2dd9946c6213d43cf89f3fd27be | # -*- coding: utf-8 -*-
"""
sphinx.util.osutil
~~~~~~~~~~~~~~~~~~
Operating system-related utility functions for Sphinx.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import re
import sys
import time
import errno
import locale
import shutil
from os import path
import contextlib
from six import PY2, text_type
# Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0)
ENOENT = getattr(errno, 'ENOENT', 0)
EPIPE = getattr(errno, 'EPIPE', 0)
EINVAL = getattr(errno, 'EINVAL', 0)
# SEP separates path elements in the canonical file names
#
# Define SEP as a manifest constant, not so much because we expect it to change
# in the future as to avoid the suspicion that a stray "/" in the code is a
# hangover from more *nix-oriented origins.
SEP = "/"
def os_path(canonicalpath):
return canonicalpath.replace(SEP, path.sep)
def canon_path(nativepath):
"""Return path in OS-independent form"""
return nativepath.replace(path.sep, SEP)
def relative_uri(base, to):
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
b2 = base.split(SEP)
t2 = to.split(SEP)
# remove common segments (except the last segment)
for x, y in zip(b2[:-1], t2[:-1]):
if x != y:
break
b2.pop(0)
t2.pop(0)
if b2 == t2:
# Special case: relative_uri('f/index.html','f/index.html')
# returns '', not 'index.html'
return ''
if len(b2) == 1 and t2 == ['']:
# Special case: relative_uri('f/index.html','f/') should
# return './', not ''
return '.' + SEP
return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)
def ensuredir(path):
"""Ensure that a path exists."""
try:
os.makedirs(path)
except OSError as err:
# 0 for Jython/Win32
if err.errno not in [0, EEXIST]:
raise
# This function is same as os.walk of Python2.6, 2.7, 3.2, 3.3 except a
# customization that check UnicodeError.
# The customization obstacle to replace the function with the os.walk.
def walk(top, topdown=True, followlinks=False):
"""Backport of os.walk from 2.6, where the *followlinks* argument was
added.
"""
names = os.listdir(top)
dirs, nondirs = [], []
for name in names:
try:
fullpath = path.join(top, name)
except UnicodeError:
print('%s:: ERROR: non-ASCII filename not supported on this '
'filesystem encoding %r, skipped.' % (name, fs_encoding),
file=sys.stderr)
continue
if path.isdir(fullpath):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
fullpath = path.join(top, name)
if followlinks or not path.islink(fullpath):
for x in walk(fullpath, topdown, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mtimes_of_files(dirnames, suffix):
for dirname in dirnames:
for root, dirs, files in os.walk(dirname):
for sfile in files:
if sfile.endswith(suffix):
try:
yield path.getmtime(path.join(root, sfile))
except EnvironmentError:
pass
def movefile(source, dest):
"""Move a file, removing the destination if it exists."""
if os.path.exists(dest):
try:
os.unlink(dest)
except OSError:
pass
os.rename(source, dest)
def copytimes(source, dest):
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
os.utime(dest, (st.st_atime, st.st_mtime))
def copyfile(source, dest):
"""Copy a file and its modification times, if possible."""
shutil.copyfile(source, dest)
try:
# don't do full copystat because the source may be read-only
copytimes(source, dest)
except OSError:
pass
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
return no_fn_re.sub('', string) or 'sphinx'
def ustrftime(format, *args):
# [DEPRECATED] strftime for unicode strings
# It will be removed at Sphinx-1.5
if not args:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
time_struct = time.gmtime(float(source_date_epoch))
args = [time_struct]
if PY2:
# if a locale is set, the time strings are encoded in the encoding
# given by LC_TIME; if that is available, use it
enc = locale.getlocale(locale.LC_TIME)[1] or 'utf-8'
return time.strftime(text_type(format).encode(enc), *args).decode(enc)
else: # Py3
# On Windows, time.strftime() and Unicode characters will raise UnicodeEncodeError.
# http://bugs.python.org/issue8304
try:
return time.strftime(format, *args)
except UnicodeEncodeError:
r = time.strftime(format.encode('unicode-escape').decode(), *args)
return r.encode().decode('unicode-escape')
def safe_relpath(path, start=None):
try:
return os.path.relpath(path, start)
except ValueError:
return path
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def abspath(pathdir):
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):
pathdir = pathdir.decode(fs_encoding)
return pathdir
def getcwd():
if hasattr(os, 'getcwdu'):
return os.getcwdu()
return os.getcwd()
@contextlib.contextmanager
def cd(target_dir):
cwd = getcwd()
try:
os.chdir(target_dir)
yield
finally:
os.chdir(cwd)
def rmtree(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
|
py | b416aaeb9651b44b3e5425c5a7cd5f937ffa4acc | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('collect_mail', '0004_auto_20151102_1520'),
]
operations = [
# migrations.AlterField(
# model_name='statistics',
# name='customer',
# field=models.ForeignKey(related_name='collect_statistics', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='core.Customer', null=True),
# ),
]
|
py | b416ab7a6692fe4aa66adc5eaee0d01e3b7aede1 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .get_function import *
from .get_service_account import *
from .trail import *
from ._inputs import *
from . import outputs
|
py | b416acc0e9d2253033600cc5cac2db19c7422845 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on Fri Nov 1 16:38:58 2019
@author: SHI YI
"""
class Solution:
def isValid(self, s: str) -> bool:
left = "({["
right = ")}]"
stack = [] # empty list, used as stack
for c in s:
if c in left: # add c to ls, if c in 'left'
stack.append(c)
elif c in right: # skip c, if c in 'right'
if len(stack) == 0:
return False
elif right.index(c) != left.index(stack.pop()):
return False
return len(stack) == 0
s = "("
solve = Solution()
if solve.isValid(s):
print("True")
else:
print("False")
|
py | b416ad2aa8a9706c8c42bda2a7fa1d90c2567173 | # A middleware for serving static files that is more selective than
# pump.middleware.file.
from pump.middleware.file import *
# Wrap the app so that if the request URL falls under any of the URLs in
# static_urls (the URL must start with one of the static_urls), it looks in
# public_dir for a file corresponding to the request URL. If no such file is
# not found, the request is handled normally.
#
# Note that the paths in static_urls should include the leading '/'.
def wrap_static(app, public_dir, static_urls):
app_with_file = wrap_file(app, public_dir)
def wrapped_app(req):
if any([req["uri"].startswith(s) for s in static_urls]):
return app_with_file(req)
else:
return app(req)
return wrapped_app |
py | b416aeb7881ed22086beeed0ce9757584c91fe1b | #Answer = 40886
#cost = 0.176s
import time
start = time.time()
limit = 100
pr = 100
summary = 0
def root_dig(x):
global pr
temp = x ** (1/2)
res = 0
if temp == int(temp):
return res
else:
a = pow(10, 100)
eq = x * a ** 2
upp = (int(temp) + 1) * a
low = int(temp) * a
mid = (upp + low) // 2
while mid != upp and mid != low:
upp_loss = abs(upp ** 2 - eq)
low_loss = abs(low ** 2 - eq)
mid_loss = abs(mid ** 2 - eq)
if upp_loss > low_loss:
upp = mid
else:
low = mid
mid = (upp + low) // 2
if upp ** 2 > eq:
right = low
else:
right = upp
s = str(right)
for i in s[:pr]:
res += int(i)
return res
for i in range(2, limit + 1):
summary += root_dig(i)
print(summary)
end = time.time()
print(end - start) |
py | b416aec367d40e92b59c7ab1eccce4ff525846cc | # -*- coding: utf-8 -*-
"""Top-level package for ingest-graph-validator."""
__author__ = """Javier Ferrer"""
__email__ = "[email protected]"
__version__ = "0.5.4"
|
py | b416aed7dec90f058a050973c67a8e12e5509d91 | import json
from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from authors.apps.articles.models import Article
from authors.apps.authentication.models import User
from authors.apps.authentication.verification import SendEmail
from authors.apps.authentication.views import Activate
class ViewTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
"""User data for getting token"""
self.test_user = {
"user": {
"username": "janey",
"email": "[email protected]",
"password": "SecretSecret254"
}
}
self.test_user2 = {
"user": {
"username": "james",
"email": "[email protected]",
"password": "SecretSecret254"
}
}
"""Articles data for testing rate feature"""
self.article = {
"article": {
"title": "lolitas",
"body": "lolitas",
"description": "lolitas quantum physics",
}
}
"""Correct rate data passing all validations"""
self.rate = {
"rate": {
"rating": 3
}
}
"""Rate data that is beyond the range 1 - 5"""
self.rate_beyond_range = {
"rate": {
"rating": 6
}
}
"""Rate data that is not numeric"""
self.string_rate = {
"rate": {
"rating": "d"
}
}
"""An empty value for rate"""
self.empty_rate = {
"rate": {
"rating": " "
}
}
"""Initialize client"""
self.factory = APIRequestFactory()
self.client = APIClient()
def create_user_unverified(self):
"""
Creates a user without verifying them
"""
self.client.post(
'/api/users/',
self.test_user,
format='json'
)
def login_unverified_user(self):
"""
Logs in and unverified user
"""
response = self.client.post(
'/api/users/login/',
self.test_user,
format='json'
)
json_response = json.loads(response.content)
return json_response.get('user').get('token')
def create_and_verify_user(self, test_user):
"""
Check user has verified account from email
"""
user_obj = User.objects.create_user(username=test_user.get('user').get(
'username'), email=test_user.get('user').get('email'), password=test_user.get('user').get('password'))
request = self.factory.get(reverse("authentication:register"))
token, uid = SendEmail().send_verification_email(user_obj.email, request)
request = self.factory.get(
reverse("authentication:activate", args=[uid, token]))
force_authenticate(request, user_obj, token=user_obj.token)
view = Activate.as_view()
view(request, uidb64=uid, token=token)
user = User.objects.last()
return user.is_verified
def login_verified_user(self, test_user):
"""
Logs in created and verified user to get token
"""
if self.create_and_verify_user(test_user) is True:
response = self.client.post(
'/api/users/login/',
test_user,
format='json'
)
json_response = json.loads(response.content)
return json_response.get('user').get('token')
def create_article(self, token, article):
"""
Creates an article for testing
"""
return self.client.post(
'/api/articles/',
self.article,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
def rating(self, token):
"""
Method for rating an aricle
"""
return self.client.post(
'/api/articles/lolitas/rate/',
self.rate,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
def test_rating_inexisting_article(self):
"""Test user rating an inexisting article"""
token = self.login_verified_user(self.test_user2)
self.create_article(token, self.article)
response = self.client.post('/api/articles/lola/rate/',
self.rate,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn("An article with this slug does not exist",
response.content.decode())
def test_empty_rating(self):
"""Test user tries to give an empty rating"""
token = self.login_verified_user(self.test_user2)
self.create_article(token, self.article)
response = self.client.post(
'/api/articles/lolitas/rate/',
self.empty_rate,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
self.assertIn("A valid integer is required.",
response.content.decode())
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_rate_not_an_integer(self):
"""Test user cannot give an alphabetic input as rate"""
token = self.login_verified_user(self.test_user2)
self.create_article(token, self.article)
response = self.client.post(
'/api/articles/lolitas/rate/',
self.string_rate,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
self.assertIn("A valid integer is required.",
response.content.decode())
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_rate_beyond_range(self):
"""Test user cannot give a rate beyond the range of 1-5"""
token = self.login_verified_user(self.test_user2)
self.create_article(token, self.article)
response = self.client.post(
'/api/articles/lolitas/rate/',
self.rate_beyond_range,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
self.assertIn("Rate must be a value between 1 and 5",
response.content.decode())
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_valid_rate(self):
"""Test user can rate an aticle successfully"""
token = self.login_verified_user(self.test_user2)
self.create_article(token, self.article)
response = self.client.post(
'/api/articles/lolitas/rate/',
self.rate,
HTTP_AUTHORIZATION='Token ' + token,
format='json'
)
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
def test_rating_more_than_five_times(self):
"""Test that a user cannot rate an article for more than 5 times"""
token = self.login_verified_user(self.test_user2)
self.create_article(token, self.article)
self.rating(token)
self.rating(token)
self.rating(token)
self.rating(token)
self.rating(token)
self.rating(token)
response = self.rating(token)
self.assertIn("You are not allowed to rate this article more than 5 times.",
response.content.decode())
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
|
py | b416aef12c80477adfdf09fa60541912a3cb2249 | # Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import inspect
import sys
from patron import filters
from patron import loadables
from patron import test
class Filter1(filters.BaseFilter):
"""Test Filter class #1."""
pass
class Filter2(filters.BaseFilter):
"""Test Filter class #2."""
pass
class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
def test_filter_all_recursive_yields(self):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
total_iterations = 200
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
# returning False for 'obj2'. So, 'obj1' will get yielded
# 'total_iterations' number of times before the first filter_all()
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in xrange(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in xrange(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in xrange(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
def test_get_filtered_objects(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
run_filter_for_index() method returns false
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
# Shouldn't be called.
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertIsNone(result)
|
py | b416b0714267f19190553bfd08f12c36feaf3086 | import math
import time
import torch
import numpy as np
from .dataselectionstrategy import DataSelectionStrategy
from ..helpers import OrthogonalMP_REG_Parallel, OrthogonalMP_REG
from torch.utils.data import Subset, DataLoader
class OMPGradMatchStrategy(DataSelectionStrategy):
"""
Implementation of OMPGradMatch Strategy from the paper :footcite:`sivasubramanian2020gradmatch` for supervised learning frameworks.
OMPGradMatch strategy tries to solve the optimization problem given below:
.. math::
\\min_{\\mathbf{w}, S: |S| \\leq k} \\Vert \\sum_{i \\in S} w_i \\nabla_{\\theta}L_T^i(\\theta) - \\nabla_{\\theta}L(\\theta)\\Vert
In the above equation, :math:`\\mathbf{w}` denotes the weight vector that contains the weights for each data instance, :math:`\mathcal{U}` training set where :math:`(x^i, y^i)` denotes the :math:`i^{th}` training data point and label respectively,
:math:`L_T` denotes the training loss, :math:`L` denotes either training loss or validation loss depending on the parameter valid,
:math:`S` denotes the data subset selected at each round, and :math:`k` is the budget for the subset.
The above optimization problem is solved using the Orthogonal Matching Pursuit(OMP) algorithm.
Parameters
----------
trainloader: class
Loading the training data using pytorch DataLoader
valloader: class
Loading the validation data using pytorch DataLoader
model: class
Model architecture used for training
loss_type: class
The type of loss criterion
eta: float
Learning rate. Step size for the one step gradient update
device: str
The device being utilized - cpu | cuda
num_classes: int
The number of target classes in the dataset
linear_layer: bool
Apply linear transformation to the data
selection_type: str
Type of selection -
- 'PerClass': PerClass method is where OMP algorithm is applied on each class data points seperately.
- 'PerBatch': PerBatch method is where OMP algorithm is applied on each minibatch data points.
- 'PerClassPerGradient': PerClassPerGradient method is same as PerClass but we use the gradient corresponding to classification layer of that class only.
valid : bool, optional
If valid==True we use validation dataset gradient sum in OMP otherwise we use training dataset (default: False)
lam : float
Regularization constant of OMP solver
eps : float
Epsilon parameter to which the above optimization problem is solved using OMP algorithm
"""
def __init__(self, trainloader, valloader, model, loss,
eta, device, num_classes, linear_layer, selection_type, valid=True, lam=0, eps=1e-4, r=1):
"""
Constructor method
"""
super().__init__(trainloader, valloader, model, num_classes, linear_layer, loss, device)
self.eta = eta # step size for the one step gradient update
self.device = device
self.init_out = list()
self.init_l1 = list()
self.selection_type = selection_type
self.valid = valid
self.lam = lam
self.eps = eps
def ompwrapper(self, X, Y, bud):
if self.device == "cpu":
reg = OrthogonalMP_REG(X.cpu().numpy(), Y.cpu().numpy(), nnz=bud, positive=True, lam=0)
ind = np.nonzero(reg)[0]
else:
reg = OrthogonalMP_REG_Parallel(X, Y, nnz=bud,
positive=True, lam=self.lam,
tol=self.eps, device=self.device)
ind = torch.nonzero(reg).view(-1)
return ind.tolist(), reg[ind].tolist()
def select(self, budget, model_params):
"""
Apply OMP Algorithm for data selection
Parameters
----------
budget: int
The number of data points to be selected
model_params: OrderedDict
Python dictionary object containing models parameters
Returns
----------
idxs: list
List containing indices of the best datapoints,
gammas: weights tensors
Tensor containing weights of each instance
"""
omp_start_time = time.time()
self.update_model(model_params)
if self.selection_type == 'PerClass':
self.get_labels(valid=self.valid)
idxs = []
gammas = []
for i in range(self.num_classes):
trn_subset_idx = torch.where(self.trn_lbls == i)[0].tolist()
trn_data_sub = Subset(self.trainloader.dataset, trn_subset_idx)
self.pctrainloader = DataLoader(trn_data_sub, batch_size=self.trainloader.batch_size,
shuffle=False, pin_memory=True)
if self.valid:
val_subset_idx = torch.where(self.val_lbls == i)[0].tolist()
val_data_sub = Subset(self.valloader.dataset, val_subset_idx)
self.pcvalloader = DataLoader(val_data_sub, batch_size=self.trainloader.batch_size,
shuffle=False, pin_memory=True)
self.compute_gradients(self.valid, batch=False, perClass=True)
trn_gradients = self.grads_per_elem
if self.valid:
sum_val_grad = torch.sum(self.val_grads_per_elem, dim=0)
else:
sum_val_grad = torch.sum(trn_gradients, dim=0)
idxs_temp, gammas_temp = self.ompwrapper(torch.transpose(trn_gradients, 0, 1),
sum_val_grad, math.ceil(budget * len(trn_subset_idx) / self.N_trn))
idxs.extend(list(np.array(trn_subset_idx)[idxs_temp]))
gammas.extend(gammas_temp)
elif self.selection_type == 'PerBatch':
self.compute_gradients(self.valid, batch=True, perClass=False)
idxs = []
gammas = []
trn_gradients = self.grads_per_elem
if self.valid:
sum_val_grad = torch.sum(self.val_grads_per_elem, dim=0)
else:
sum_val_grad = torch.sum(trn_gradients, dim=0)
idxs_temp, gammas_temp = self.ompwrapper(torch.transpose(trn_gradients, 0, 1),
sum_val_grad, math.ceil(budget/self.trainloader.batch_size))
batch_wise_indices = list(self.trainloader.batch_sampler)
for i in range(len(idxs_temp)):
tmp = batch_wise_indices[idxs_temp[i]]
idxs.extend(tmp)
gammas.extend(list(gammas_temp[i] * np.ones(len(tmp))))
elif self.selection_type == 'PerClassPerGradient':
self.get_labels(valid=self.valid)
idxs = []
gammas = []
embDim = self.model.get_embedding_dim()
for i in range(self.num_classes):
trn_subset_idx = torch.where(self.trn_lbls == i)[0].tolist()
trn_data_sub = Subset(self.trainloader.dataset, trn_subset_idx)
self.pctrainloader = DataLoader(trn_data_sub, batch_size=self.trainloader.batch_size,
shuffle=False, pin_memory=True)
if self.valid:
val_subset_idx = torch.where(self.val_lbls == i)[0].tolist()
val_data_sub = Subset(self.valloader.dataset, val_subset_idx)
self.pcvalloader = DataLoader(val_data_sub, batch_size=self.trainloader.batch_size,
shuffle=False, pin_memory=True)
self.compute_gradients(self.valid, batch=False, perClass=True)
trn_gradients = self.grads_per_elem
tmp_gradients = trn_gradients[:, i].view(-1, 1)
tmp1_gradients = trn_gradients[:,
self.num_classes + (embDim * i): self.num_classes + (embDim * (i + 1))]
trn_gradients = torch.cat((tmp_gradients, tmp1_gradients), dim=1)
if self.valid:
val_gradients = self.val_grads_per_elem
tmp_gradients = val_gradients[:, i].view(-1, 1)
tmp1_gradients = val_gradients[:,
self.num_classes + (embDim * i): self.num_classes + (embDim * (i + 1))]
val_gradients = torch.cat((tmp_gradients, tmp1_gradients), dim=1)
sum_val_grad = torch.sum(val_gradients, dim=0)
else:
sum_val_grad = torch.sum(trn_gradients, dim=0)
idxs_temp, gammas_temp = self.ompwrapper(torch.transpose(trn_gradients, 0, 1),
sum_val_grad, math.ceil(budget * len(trn_subset_idx) / self.N_trn))
idxs.extend(list(np.array(trn_subset_idx)[idxs_temp]))
gammas.extend(gammas_temp)
omp_end_time = time.time()
diff = budget - len(idxs)
if diff > 0:
remainList = set(np.arange(self.N_trn)).difference(set(idxs))
new_idxs = np.random.choice(list(remainList), size=diff, replace=False)
idxs.extend(new_idxs)
gammas.extend([1 for _ in range(diff)])
idxs = np.array(idxs)
gammas = np.array(gammas)
if self.selection_type in ["PerClass", "PerClassPerGradient"]:
rand_indices = np.random.permutation(len(idxs))
idxs = list(np.array(idxs)[rand_indices])
gammas = list(np.array(gammas)[rand_indices])
print("OMP algorithm Subset Selection time is: ", omp_end_time - omp_start_time)
return idxs, gammas |
py | b416b0df1365ca79d9862d506f9c373f799bfd0e | import uuid
import datetime as dt
class Event:
# Event.py
# Python implementation of the Class Event
# represents a TAK event: this class is instantiated with a standard set of
# values.
# Generated by Enterprise Architect
# Created on: 11-Feb-2020 11:08:07 AM
# Original author: Corvo
#
# event as an XML
#<?xml version="1.0" encoding="UTF-8" standalone="yes"?><event version="2.0" uid="Linux-ABC.server-ping" type="b-t-f" time="2020-02-14T20:32:31.444Z" start="2020-02-14T20:32:31.444Z" stale="2020-02-15T20:32:31.444Z" how="h-g-i-g-o">
#default constructor
def __init__(self, linkType=None, linkuid = None, linkproduction_time=None, linkrelation=None, linktype=None, linkparent_callsign=None, eventType = 'default', eventisPing = 0 ,eventtype = "a-f-G-I" , eventhow = 'm-g' ,eventisGeochat = 0 ,eventDATETIME_FMT = "%Y-%m-%dT%H:%M:%SZ", eventuid = "UIDString", eventversion = '2.0', eventconnType=None, pointlat="00.00000000", pointlon='00.00000000', pointle = "9999999.0", pointce = "9999999.0", pointhae = "00.00000000", chatType = None, chatsenderCallsign = None, chatchatroom = None, chatgroupOwner = None,chatid = None, chatparent = None ,chatgrpid = None ,chatgrpuid0 = None, chatgrpuid1 = None):
from Model.detail import Detail
from Model.point import Point
case = {
'default': self.defaultFunc,
'timeout': self.timeoutFunc
}
DATETIME_FMT = eventDATETIME_FMT
# flag to determin e if this event is a geo chcat if so, will be added as a
# prefix to the uid
# starting time when an event should be considered valid
start = "%Y-%m-%dT%H:%M:%SZ"
# basic event
# Gives a hint about how the coordinates were generated
# Schema version of this event instance (e.g. 2.0)
# time stamp: when the event was generated
time = "%Y-%m-%dT%H:%M:%SZ"
# Hierarchically organized hint about event type (defaultis is 'a-f-G-I'
# for infrastructure)
# ending time when an event should no longer be considered valid
stale = "%Y-%m-%dT%H:%M:%SZ"
# Globally unique name for this information on this event can have
# additional information attached.
# e.g. -ping means that this request is a ping
# flag to determine if this event is a Ping, in this case append to the UID
#calls detail and point
self.point = Point(lat=pointlat, lon=pointlon, le=pointle, ce=pointce, hae=pointhae)
self.detail = Detail(connType=eventconnType,linkuid=linkuid ,linkType=linkType, uid = eventuid, linkproduction_time=linkproduction_time, linkrelation=linkrelation, linktype=linktype, linkparent_callsign=linkparent_callsign, chatType = chatType, chatsenderCallsign = chatsenderCallsign, chatchatroom = chatchatroom, chatgroupOwner = chatgroupOwner, chatid = chatid, chatparent = chatparent, chatgrpuid0 = chatgrpuid0, chatgrpuid1 = chatgrpuid1)
case[eventType](DATETIME_FMT,version = eventversion, uid = eventuid, how = eventhow, type = eventtype,isGeochat = eventisGeochat, isPing = eventisPing)
def defaultFunc(self, DATETIME_FMT, version, uid, type, how, isGeochat, isPing):
self.how = how
timer = dt.datetime
now = timer.utcnow()
zulu = now.strftime(DATETIME_FMT)
stale_part = dt.datetime.strptime(zulu, DATETIME_FMT) + dt.timedelta(minutes = 1)
stale_part = stale_part.strftime(DATETIME_FMT)
self.setstale(str(stale_part))
self.setstart(zulu)
self.settime(zulu)
self.type = type
self.setuid(isGeochat = isGeochat, isPing=isPing)
self.version = version
def timeoutFunc(self, DATETIME_FMT, version, uid, type, how, isGeochat, isPing):
self.how = how
timer = dt.datetime
now = timer.utcnow()
zulu = now.strftime(DATETIME_FMT)
stale_part = dt.datetime.strptime(zulu, DATETIME_FMT) - dt.timedelta(minutes = 1)
stale_part = stale_part.strftime(DATETIME_FMT)
self.setstale(str(stale_part))
self.setstart(zulu)
self.settime(zulu)
self.type = type
self.setuid(isGeochat = isGeochat, isPing=isPing)
self.version = version
#Start getter
def getstart(self):
return self.Start
# Start setter
def setstart(self, Start=0):
self.start = Start
# m_point setter
def setpoint(self, m_point=0):
self.point = m_point
# how getter
def gethow(self):
return self.how
# how setter
def sethow(self, how=0):
self.how = how
# uid getter
def getuid(self):
return self.uid
# uid setter
def setuid(self, isGeochat, isPing):
GEOCHATPREFIX = "GeoChat."
PINGSUFFIX = "-ping"
a = uuid.uuid1()
self.uid = str(a)
if isGeochat == 1:
uid = GEOCHATPREFIX + uid
self.settype('h-g-i-g-o')
elif isPing == 1:
self.uid = self.uid + PINGSUFFIX
self.settype('t-x-c-t')
# version getter
def getversion(self):
return self.version
# version setter
def setversion(self, version):
self.version = version
# time getter
def gettime(self):
return self.time
# time setter
def settime(self, time=0):
self.time = time
# stale getter
def getstale(self):
return self.stale
# stale setter
def setstale(self, stale=0):
self.stale = stale
# type getter
def gettype(self):
return self.type
# type setter
def settype(self, type=0):
self.type = type |
py | b416b1f98d3d1b997571910204481a08250e3c30 | """Start Home Assistant."""
import argparse
import os
import platform
import subprocess
import sys
import threading
from typing import List, Dict, Any, TYPE_CHECKING
from homeassistant import monkey_patch
from homeassistant.const import __version__, REQUIRED_PYTHON_VER, RESTART_EXIT_CODE
if TYPE_CHECKING:
from homeassistant import core
def set_loop() -> None:
"""Attempt to use uvloop."""
import asyncio
from asyncio.events import BaseDefaultEventLoopPolicy
policy = None
if sys.platform == "win32":
if hasattr(asyncio, "WindowsProactorEventLoopPolicy"):
# pylint: disable=no-member
policy = asyncio.WindowsProactorEventLoopPolicy()
else:
class ProactorPolicy(BaseDefaultEventLoopPolicy):
"""Event loop policy to create proactor loops."""
_loop_factory = asyncio.ProactorEventLoop
policy = ProactorPolicy()
else:
try:
import uvloop
except ImportError:
pass
else:
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def validate_python() -> None:
"""Validate that the right Python version is running."""
if sys.version_info[:3] < REQUIRED_PYTHON_VER:
print(
"Home Assistant requires at least Python {}.{}.{}".format(
*REQUIRED_PYTHON_VER
)
)
sys.exit(1)
def ensure_config_path(config_dir: str) -> None:
"""Validate the configuration directory."""
import homeassistant.config as config_util
lib_dir = os.path.join(config_dir, "deps")
# Test if configuration directory exists
if not os.path.isdir(config_dir):
if config_dir != config_util.get_default_config_dir():
print(
(
"Fatal Error: Specified configuration directory does "
"not exist {} "
).format(config_dir)
)
sys.exit(1)
try:
os.mkdir(config_dir)
except OSError:
print(
(
"Fatal Error: Unable to create default configuration "
"directory {} "
).format(config_dir)
)
sys.exit(1)
# Test if library directory exists
if not os.path.isdir(lib_dir):
try:
os.mkdir(lib_dir)
except OSError:
print(
("Fatal Error: Unable to create library " "directory {} ").format(
lib_dir
)
)
sys.exit(1)
async def ensure_config_file(hass: "core.HomeAssistant", config_dir: str) -> str:
"""Ensure configuration file exists."""
import homeassistant.config as config_util
config_path = await config_util.async_ensure_config_exists(hass, config_dir)
if config_path is None:
print("Error getting configuration path")
sys.exit(1)
return config_path
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
import homeassistant.config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate."
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"--demo-mode", action="store_true", help="Start Home Assistant in demo mode"
)
parser.add_argument(
"--debug", action="store_true", help="Start Home Assistant in debug mode"
)
parser.add_argument(
"--open-ui", action="store_true", help="Open the webinterface in a browser"
)
parser.add_argument(
"--skip-pip",
action="store_true",
help="Skips pip install of required packages on startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging to file."
)
parser.add_argument(
"--pid-file",
metavar="path_to_pid_file",
default=None,
help="Path to PID file useful for running as daemon",
)
parser.add_argument(
"--log-rotate-days",
type=int,
default=None,
help="Enables daily log rotation and keeps up to the specified days",
)
parser.add_argument(
"--log-file",
type=str,
default=None,
help="Log file to write to. If not set, CONFIG/home-assistant.log " "is used",
)
parser.add_argument(
"--log-no-color", action="store_true", help="Disable color logs"
)
parser.add_argument(
"--runner",
action="store_true",
help=f"On restart exit with code {RESTART_EXIT_CODE}",
)
parser.add_argument(
"--script", nargs=argparse.REMAINDER, help="Run one of the embedded scripts"
)
if os.name == "posix":
parser.add_argument(
"--daemon", action="store_true", help="Run Home Assistant as daemon"
)
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, "daemon", False)
return arguments
def daemonize() -> None:
"""Move current process to daemon process."""
# Create first fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# Decouple fork
os.setsid()
# Create second fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# redirect standard file descriptors to devnull
infd = open(os.devnull, "r")
outfd = open(os.devnull, "a+")
sys.stdout.flush()
sys.stderr.flush()
os.dup2(infd.fileno(), sys.stdin.fileno())
os.dup2(outfd.fileno(), sys.stdout.fileno())
os.dup2(outfd.fileno(), sys.stderr.fileno())
def check_pid(pid_file: str) -> None:
"""Check that Home Assistant is not already running."""
# Check pid file
try:
with open(pid_file, "r") as file:
pid = int(file.readline())
except OSError:
# PID File does not exist
return
# If we just restarted, we just found our own pidfile.
if pid == os.getpid():
return
try:
os.kill(pid, 0)
except OSError:
# PID does not exist
return
print("Fatal Error: HomeAssistant is already running.")
sys.exit(1)
def write_pid(pid_file: str) -> None:
"""Create a PID File."""
pid = os.getpid()
try:
with open(pid_file, "w") as file:
file.write(str(pid))
except OSError:
print(f"Fatal Error: Unable to write pid file {pid_file}")
sys.exit(1)
def closefds_osx(min_fd: int, max_fd: int) -> None:
"""Make sure file descriptors get closed when we restart.
We cannot call close on guarded fds, and we cannot easily test which fds
are guarded. But we can set the close-on-exec flag on everything we want to
get rid of.
"""
from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC
for _fd in range(min_fd, max_fd):
try:
val = fcntl(_fd, F_GETFD)
if not val & FD_CLOEXEC:
fcntl(_fd, F_SETFD, val | FD_CLOEXEC)
except OSError:
pass
def cmdline() -> List[str]:
"""Collect path and arguments to re-execute the current hass instance."""
if os.path.basename(sys.argv[0]) == "__main__.py":
modulepath = os.path.dirname(sys.argv[0])
os.environ["PYTHONPATH"] = os.path.dirname(modulepath)
return [sys.executable] + [arg for arg in sys.argv if arg != "--daemon"]
return [arg for arg in sys.argv if arg != "--daemon"]
async def setup_and_run_hass(config_dir: str, args: argparse.Namespace) -> int:
"""Set up HASS and run."""
# pylint: disable=redefined-outer-name
from homeassistant import bootstrap, core
hass = core.HomeAssistant()
if args.demo_mode:
config: Dict[str, Any] = {"frontend": {}, "demo": {}}
bootstrap.async_from_config_dict(
config,
hass,
config_dir=config_dir,
verbose=args.verbose,
skip_pip=args.skip_pip,
log_rotate_days=args.log_rotate_days,
log_file=args.log_file,
log_no_color=args.log_no_color,
)
else:
config_file = await ensure_config_file(hass, config_dir)
print("Config directory:", config_dir)
await bootstrap.async_from_config_file(
config_file,
hass,
verbose=args.verbose,
skip_pip=args.skip_pip,
log_rotate_days=args.log_rotate_days,
log_file=args.log_file,
log_no_color=args.log_no_color,
)
if args.open_ui and hass.config.api is not None:
import webbrowser
hass.add_job(webbrowser.open, hass.config.api.base_url)
return await hass.async_run()
def try_to_restart() -> None:
"""Attempt to clean up state and start a new Home Assistant instance."""
# Things should be mostly shut down already at this point, now just try
# to clean up things that may have been left behind.
sys.stderr.write("Home Assistant attempting to restart.\n")
# Count remaining threads, ideally there should only be one non-daemonized
# thread left (which is us). Nothing we really do with it, but it might be
# useful when debugging shutdown/restart issues.
try:
nthreads = sum(
thread.is_alive() and not thread.daemon for thread in threading.enumerate()
)
if nthreads > 1:
sys.stderr.write(f"Found {nthreads} non-daemonic threads.\n")
# Somehow we sometimes seem to trigger an assertion in the python threading
# module. It seems we find threads that have no associated OS level thread
# which are not marked as stopped at the python level.
except AssertionError:
sys.stderr.write("Failed to count non-daemonic threads.\n")
# Try to not leave behind open filedescriptors with the emphasis on try.
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError:
max_fd = 256
if platform.system() == "Darwin":
closefds_osx(3, max_fd)
else:
os.closerange(3, max_fd)
# Now launch into a new instance of Home Assistant. If this fails we
# fall through and exit with error 100 (RESTART_EXIT_CODE) in which case
# systemd will restart us when RestartForceExitStatus=100 is set in the
# systemd.service file.
sys.stderr.write("Restarting Home Assistant\n")
args = cmdline()
os.execv(args[0], args)
def main() -> int:
"""Start Home Assistant."""
validate_python()
monkey_patch_needed = sys.version_info[:3] < (3, 6, 3)
if monkey_patch_needed and os.environ.get("HASS_NO_MONKEY") != "1":
monkey_patch.disable_c_asyncio()
monkey_patch.patch_weakref_tasks()
set_loop()
# Run a simple daemon runner process on Windows to handle restarts
if os.name == "nt" and "--runner" not in sys.argv:
nt_args = cmdline() + ["--runner"]
while True:
try:
subprocess.check_call(nt_args)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except subprocess.CalledProcessError as exc:
if exc.returncode != RESTART_EXIT_CODE:
sys.exit(exc.returncode)
args = get_arguments()
if args.script is not None:
from homeassistant import scripts
return scripts.run(args.script)
config_dir = os.path.join(os.getcwd(), args.config)
ensure_config_path(config_dir)
# Daemon functions
if args.pid_file:
check_pid(args.pid_file)
if args.daemon:
daemonize()
if args.pid_file:
write_pid(args.pid_file)
from homeassistant.util.async_ import asyncio_run
exit_code = asyncio_run(setup_and_run_hass(config_dir, args))
if exit_code == RESTART_EXIT_CODE and not args.runner:
try_to_restart()
return exit_code # type: ignore
if __name__ == "__main__":
sys.exit(main())
|
py | b416b22984e609747db7e069a1850ea512cd1396 | """This modules creates a sac model in PyTorch."""
import copy
from dowel import logger, tabular
import numpy as np
import torch
import torch.nn.functional as F
from metarl.np.algos.off_policy_rl_algorithm import OffPolicyRLAlgorithm
from metarl.torch.utils import np_to_torch, torch_to_np
from collections import deque
from metarl import log_performance
class SAC(OffPolicyRLAlgorithm):
""" A SAC Model in Torch.
Soft Actor Critic (SAC) is an algorithm which optimizes a stochastic
policy in an off-policy way, forming a bridge between stochastic policy
optimization and DDPG-style approaches.
A central feature of SAC is entropy regularization. The policy is trained
to maximize a trade-off between expected return and entropy, a measure of
randomness in the policy. This has a close connection to the
exploration-exploitation trade-off: increasing entropy results in more
exploration, which can accelerate learning later on. It can also prevent
the policy from prematurely converging to a bad local optimum.
"""
def __init__(self,
env_spec,
policy,
qf1,
qf2,
replay_buffer,
gradient_steps_per_itr,
single_step=True,
alpha=None,
target_entropy=None,
initial_log_entropy=0.,
use_automatic_entropy_tuning=True,
discount=0.99,
max_path_length=None,
buffer_batch_size=64,
min_buffer_size=int(1e4),
target_update_tau=5e-3,
policy_lr=3e-4,
qf_lr=3e-4,
reward_scale=1.0,
optimizer=torch.optim.Adam,
smooth_return=True,
input_include_goal=False):
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.replay_buffer = replay_buffer
self.tau = target_update_tau
self.policy_lr = policy_lr
self.qf_lr = qf_lr
self.initial_log_entropy = initial_log_entropy
self.gradient_steps = gradient_steps_per_itr
self.evaluate = False
self.input_include_goal = input_include_goal
super().__init__(env_spec=env_spec,
policy=policy,
qf=qf1,
n_train_steps=self.gradient_steps,
max_path_length=max_path_length,
buffer_batch_size=buffer_batch_size,
min_buffer_size=min_buffer_size,
replay_buffer=replay_buffer,
use_target=True,
discount=discount,
smooth_return=smooth_return)
self.reward_scale = reward_scale
# use 2 target q networks
self.target_qf1 = copy.deepcopy(self.qf1)
self.target_qf2 = copy.deepcopy(self.qf2)
self.policy_optimizer = optimizer(self.policy.parameters(),
lr=self.policy_lr)
self.qf1_optimizer = optimizer(self.qf1.parameters(), lr=self.qf_lr)
self.qf2_optimizer = optimizer(self.qf2.parameters(), lr=self.qf_lr)
# automatic entropy coefficient tuning
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning and not alpha:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(
self.env_spec.action_space.shape).item()
self.log_alpha = torch.tensor([self.initial_log_entropy], dtype=torch.float, requires_grad=True)
self.alpha_optimizer = optimizer([self.log_alpha], lr=self.policy_lr)
else:
self.alpha = alpha
self.episode_rewards = deque(maxlen=30)
self.success_history = []
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
for _ in runner.step_epochs():
if self.replay_buffer.n_transitions_stored < self.min_buffer_size:
batch_size = self.min_buffer_size
else:
batch_size = None
runner.step_path = runner.obtain_samples(runner.step_itr, batch_size)
for sample in runner.step_path:
self.replay_buffer.store(obs=sample.observation,
act=sample.action,
rew=sample.reward,
next_obs=sample.next_observation,
done=sample.terminal)
self.episode_rewards.append(sum([sample.reward for sample in runner.step_path]))
for _ in range(self.gradient_steps):
last_return, policy_loss, qf1_loss, qf2_loss = self.train_once(runner.step_itr,
runner.step_path)
log_performance(
runner.step_itr,
self._obtain_evaluation_samples(runner.get_env_copy(), num_trajs=10),
discount=self.discount)
self.log_statistics(policy_loss, qf1_loss, qf2_loss)
tabular.record('TotalEnvSteps', runner.total_env_steps)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""
"""
if self.replay_buffer.n_transitions_stored >= self.min_buffer_size: # noqa: E501
samples = self.replay_buffer.sample(self.buffer_batch_size)
policy_loss, qf1_loss, qf2_loss = self.optimize_policy(itr, samples)
self.update_targets()
return 0, policy_loss, qf1_loss, qf2_loss
def temperature_objective(self, log_pi):
"""
implemented inside optimize_policy
"""
alpha_loss = 0
if self.use_automatic_entropy_tuning:
alpha_loss = (-(self.log_alpha) * (log_pi.detach() + self.target_entropy)).mean()
return alpha_loss
def actor_objective(self, obs, log_pi, new_actions):
alpha = self.log_alpha.detach().exp()
min_q_new_actions = torch.min(self.qf1(torch.Tensor(obs), torch.Tensor(new_actions)),
self.qf2(torch.Tensor(obs), torch.Tensor(new_actions)))
policy_objective = ((alpha * log_pi) - min_q_new_actions.flatten()).mean()
return policy_objective
def critic_objective(self, samples):
'''
QF Loss
'''
obs = samples["observation"]
actions = samples["action"]
rewards = samples["reward"]
terminals = samples["terminal"]
next_obs = samples["next_observation"]
alpha = self.log_alpha.detach().exp()
q1_pred = self.qf1(torch.Tensor(obs), torch.Tensor(actions))
q2_pred = self.qf2(torch.Tensor(obs), torch.Tensor(actions))
new_next_actions_dist = self.policy(torch.Tensor(next_obs))
new_next_actions_pre_tanh, new_next_actions = new_next_actions_dist.rsample_with_pre_tanh_value()
new_log_pi = new_next_actions_dist.log_prob(value=new_next_actions, pre_tanh_value=new_next_actions_pre_tanh)
target_q_values = torch.min(
self.target_qf1(torch.Tensor(next_obs), new_next_actions),
self.target_qf2(torch.Tensor(next_obs), new_next_actions)
).flatten() - (alpha * new_log_pi)
with torch.no_grad():
q_target = self.reward_scale * torch.Tensor(rewards) + (1. - torch.Tensor(terminals)) * self.discount * target_q_values
qf1_loss = F.mse_loss(q1_pred.flatten(), q_target)
qf2_loss = F.mse_loss(q2_pred.flatten(), q_target)
return qf1_loss, qf2_loss
def update_targets(self):
"""Update parameters in the target q-functions."""
target_qfs = [self.target_qf1, self.target_qf2]
qfs = [self.qf1, self.qf2]
for target_qf, qf in zip(target_qfs, qfs):
for t_param, param in zip(target_qf.parameters(),
qf.parameters()):
t_param.data.copy_(t_param.data * (1.0 - self.tau) +
param.data * self.tau)
def optimize_policy(self, itr, samples):
""" Optimize the policy based on the policy objective from the sac paper.
Args:
itr (int) - current training iteration
samples() - samples recovered from the replay buffer
Returns:
None
"""
obs = samples["observation"]
qf1_loss, qf2_loss = self.critic_objective(samples)
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
action_dists = self.policy(torch.Tensor(obs))
new_actions_pre_tanh, new_actions = action_dists.rsample_with_pre_tanh_value()
log_pi = action_dists.log_prob(value=new_actions, pre_tanh_value=new_actions_pre_tanh)
policy_loss = self.actor_objective(obs, log_pi, new_actions)
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
if self.use_automatic_entropy_tuning:
alpha_loss = (-(self.log_alpha)* (log_pi.detach() + self.target_entropy)).mean()
alpha_loss = self.temperature_objective(log_pi)
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
return policy_loss, qf1_loss, qf2_loss
def log_statistics(self, policy_loss, qf1_loss, qf2_loss):
tabular.record("alpha", torch.exp(self.log_alpha.detach()).item())
tabular.record("policy_loss", policy_loss.item())
tabular.record("qf_loss/{}".format("qf1_loss"), float(qf1_loss))
tabular.record("qf_loss/{}".format("qf2_loss"), float(qf2_loss))
tabular.record("buffer_size", self.replay_buffer.n_transitions_stored)
tabular.record('local/average_return', np.mean(self.episode_rewards))
|
py | b416b27f62044480d9dabddbe5381b5cb5bb6f0f | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMethylcode(PythonPackage):
"""MethylCoder is a single program that takes of bisulfite-treated
reads and outputs per-base methylation data. """
homepage = "https://github.com/brentp/methylcode"
url = "https://github.com/brentp/methylcode/archive/master.zip"
version('1.0.0', 'd0ba07c1ab2c74adddd1b23f8e5823e7')
depends_on('[email protected]:2.7.999')
depends_on('py-six')
depends_on('py-setuptools')
depends_on('py-numpy')
depends_on('py-pyparsing')
depends_on('py-pyfasta')
depends_on('py-bsddb3')
depends_on('bowtie')
|
py | b416b31be4e3951abb817aa90ad5ed3dfda048ac | import unittest
from dekespo_ai_sdk.core.dimensions import Dim2D
from dekespo_ai_sdk.core.graph import Graph
from dekespo_ai_sdk.core.shapes import Shape2DType
from dekespo_ai_sdk.core.raw_data_handler import RawDataHandler
from dekespo_ai_sdk.core.neighbour import NeighbourData, NeighbourType
from tests.templates.rectangle_world import (
example_small_random,
example_unreachable_positions,
)
# TODO: Add non-blocking (empty) graph
# TODO: Separate test_graph_data_with_blocking into small unittests with setup
class GraphTest(unittest.TestCase):
def test_graph_data_with_blocking(self):
raw_data_handler = RawDataHandler(example_small_random())
graph = Graph(raw_data_handler, Shape2DType.RECTANGLE)
self.assertTrue(
str(graph),
"""
Shape Type: Type.RECTANGLE
Raw data:
0 |1 |0 |1
1 |0 |0 |0
0 |0 |0 |0
""",
)
self.assertTrue(graph.raw_data_handler, example_small_random())
self.assertTupleEqual(graph.blocking_values, ())
self.assertTupleEqual(graph.blocking_positions, ())
new_blocking_values = set([1])
graph.update_blocking_data(new_blocking_values)
self.assertTrue(graph.blocking_values, set([1]))
self.assertTrue(
graph.blocking_positions, [Dim2D(1, 0), Dim2D(3, 0), Dim2D(1, 1)]
)
self.assertTrue(graph.graph_shape.is_inside_boundaries(Dim2D(1, 1)))
self.assertFalse(graph.graph_shape.is_inside_boundaries(Dim2D(-1, 1)))
pos = Dim2D(1, 1)
neighbour_data = NeighbourData(NeighbourType.DIAMOND, 2)
available_poses_dic = graph.get_available_neighbours(pos, neighbour_data)
self.assertEqual(len(available_poses_dic), 7)
self.assertIn(Dim2D(0, 0), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(0, 0)], 2)
self.assertIn(Dim2D(0, 2), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(0, 2)], 2)
self.assertIn(Dim2D(1, 2), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(1, 2)], 1)
self.assertIn(Dim2D(2, 0), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(2, 0)], 2)
self.assertIn(Dim2D(2, 1), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(2, 1)], 1)
self.assertIn(Dim2D(2, 2), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(2, 2)], 2)
self.assertIn(Dim2D(3, 1), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(3, 1)], 2)
pos1 = Dim2D(0, 2)
neighbour_data = NeighbourData(NeighbourType.CROSS, 1)
available_poses_dic = graph.get_available_neighbours(pos1, neighbour_data)
self.assertEqual(len(available_poses_dic), 1)
self.assertEqual(tuple(available_poses_dic.keys())[0], Dim2D(1, 2))
pos2 = Dim2D(2, 1)
neighbour_data = NeighbourData(NeighbourType.SQUARE)
available_poses_dic = graph.get_available_neighbours(pos2, neighbour_data)
self.assertEqual(len(available_poses_dic), 6)
self.assertIn(Dim2D(2, 0), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(2, 0)], 1)
self.assertIn(Dim2D(2, 2), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(2, 2)], 1)
self.assertIn(Dim2D(1, 1), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(1, 1)], 1)
self.assertIn(Dim2D(1, 2), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(1, 2)], 2)
self.assertIn(Dim2D(3, 1), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(3, 1)], 1)
def test_unreachable_graph_data(self):
raw_data_handler = RawDataHandler(example_unreachable_positions())
blocking_set = set([1])
unreachable_positions_set = set([Dim2D(3, 2)])
graph = Graph(
raw_data_handler,
Shape2DType.RECTANGLE,
blocking_set,
unreachable_positions_set,
)
self.assertTrue(graph.blocking_values, tuple(blocking_set))
self.assertTrue(
graph.blocking_positions, [Dim2D(2, 1), Dim2D(3, 1), Dim2D(2, 2)]
)
self.assertTrue(graph.unreachable_positions, tuple(unreachable_positions_set))
pos = Dim2D(1, 2)
neighbour_data = NeighbourData(NeighbourType.CROSS, 2)
available_poses_dic = graph.get_available_neighbours(pos, neighbour_data)
self.assertEqual(len(available_poses_dic), 3)
self.assertIn(Dim2D(1, 0), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(1, 0)], 2)
self.assertIn(Dim2D(1, 1), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(1, 1)], 1)
self.assertIn(Dim2D(0, 2), available_poses_dic)
self.assertEqual(available_poses_dic[Dim2D(0, 2)], 1)
if __name__ == "__main__":
unittest.main()
|
py | b416b3719f3972c5c3c5986050a92c8623a0fbec | #!/usr/bin/python3
import os
import time
import requests
import psutil
from subprocess import check_output
dns_suffix = check_output(['sudo', 'printenv', 'DNS_SUFFIX'], universal_newlines=True).strip()
url = f"https://buildthewarrior{dns_suffix}/complete"
def check_cpu_usage():
current_cpu_percent = psutil.cpu_percent(interval=1)
if current_cpu_percent >= 40.0:
return True
else:
return False
def publish(question):
token = os.environ.get(f'WORKOUTKEY{question}')
workout_id = os.environ.get('WORKOUTID')
workout = {
"workout_id": workout_id,
"token": token,
}
publish = requests.post(url, json=workout)
last_reboot = psutil.boot_time()
current_time = time.time()
if current_time - last_reboot > 300:
if check_cpu_usage():
publish(question=0)
print("Workout Complete")
else:
print("Incomplete")
|
py | b416b4468e6b5782d5a2f1cf4eb32dc6df06e834 | # Parallel test script for initializing problem with preexisting array
# Standard modules
import sys
# Other modules
import logging
import numpy as np
import h5py
# Athena modules
import scripts.utils.athena as athena
sys.path.insert(0, '../../vis/python')
import athena_read # noqa
athena_read.check_nan_flag = True
logger = logging.getLogger('athena' + __name__[7:]) # set logger name based on module
# Parameters
filename_input = 'initial_data.hdf5'
filename_output = 'from_array.cons.00000.athdf'
dataset_cons = 'cons'
dataset_b1 = 'b1'
dataset_b2 = 'b2'
dataset_b3 = 'b3'
nb1 = 4
nx1 = 4
nx2 = 6
nx3 = 4
gamma = 5.0/3.0
num_ranks = 3
# Prepare Athena++
def prepare(**kwargs):
logger.debug('Running test ' + __name__)
# Configure and compile code
athena.configure('b',
'mpi',
'hdf5', 'h5double',
prob='from_array',
**kwargs)
athena.make()
# Calculate initial field values
b1 = np.empty((nx3, nx2, nb1 * nx1 + 1))
b1[...] = np.arange(nx2)[None, :, None] - np.arange(nx3)[:, None, None]
b1_input = np.empty((nb1, nx3, nx2, nx1 + 1))
b2_input = np.zeros((nb1, nx3, nx2 + 1, nx1))
b3_input = np.zeros((nb1, nx3 + 1, nx2, nx1))
for n in range(nb1):
b1_input[n, ...] = b1[:, :, n*nx1:(n+1)*nx1+1]
# (second-order accurate assumption)
b1v = 0.5 * (b1_input[:, :, :, :-1] + b1_input[:, :, :, 1:])
# Calculate initial conserved values
num_cells = nb1 * nx1 * nx2 * nx3
density = np.reshape(np.arange(1, num_cells+1), (1, nb1, nx3, nx2, nx1))
momentum = np.zeros((3, nb1, nx3, nx2, nx1))
energy = np.ones((1, nb1, nx3, nx2, nx1)) / (gamma - 1.0) + 0.5 * b1v[None, ...] ** 2
cons_input = np.vstack((density, momentum, energy))
# Write file to be loaded
with h5py.File('bin/{0}'.format(filename_input), 'w') as f:
f.create_dataset(dataset_cons, data=cons_input)
f.create_dataset(dataset_b1, data=b1_input)
f.create_dataset(dataset_b2, data=b2_input)
f.create_dataset(dataset_b3, data=b3_input)
# Run Athena++
def run(**kwargs):
arguments = ['time/tlim=0',
'time/ncycle_out=0',
'mesh/nx1={0}'.format(nb1 * nx1),
'mesh/nx2={0}'.format(nx2),
'mesh/nx3={0}'.format(nx3),
'meshblock/nx1={0}'.format(nx1),
'meshblock/nx2={0}'.format(nx2),
'meshblock/nx3={0}'.format(nx3),
'problem/input_filename={0}'.format(filename_input)]
athena.mpirun(kwargs['mpirun_cmd'], kwargs['mpirun_opts'], num_ranks,
'mhd/athinput.from_array', arguments)
# Analyze outputs
def analyze():
analyze_status = True
# Read input data
with h5py.File('bin/{0}'.format(filename_input), 'r') as f:
cons_input = f[dataset_cons][:]
b1_input = f[dataset_b1][:]
b2_input = f[dataset_b2][:]
b3_input = f[dataset_b3][:]
# Calculate cell-centered field inputs from face-centered values
# (second-order accurate assumption)
b1v = 0.5 * (b1_input[:, :, :, :-1] + b1_input[:, :, :, 1:])
b2v = 0.5 * (b2_input[:, :, :-1, :] + b2_input[:, :, 1:, :])
b3v = 0.5 * (b3_input[:, :-1, :, :] + b3_input[:, 1:, :, :])
# Read output data
with h5py.File('bin/{0}'.format(filename_output), 'r') as f:
num_vars = f.attrs['NumVariables']
dataset_names = f.attrs['DatasetNames'].astype('U')
output_vars = f.attrs['VariableNames'].astype('U')
cons_output = f['cons'][:]
field_output = f['B'][:]
# Order conserved output data to match inputs
index_cons = np.where(dataset_names == 'cons')[0][0]
num_vars_cons = num_vars[index_cons]
num_vars_pre_cons = np.sum(num_vars[:index_cons])
output_vars_cons = output_vars[num_vars_pre_cons:num_vars_pre_cons+num_vars_cons]
dens = cons_output[np.where(output_vars_cons == 'dens')[0], ...]
mom1 = cons_output[np.where(output_vars_cons == 'mom1')[0], ...]
mom2 = cons_output[np.where(output_vars_cons == 'mom2')[0], ...]
mom3 = cons_output[np.where(output_vars_cons == 'mom3')[0], ...]
etot = cons_output[np.where(output_vars_cons == 'Etot')[0], ...]
cons_output = np.vstack((dens, mom1, mom2, mom3, etot))
# Order field output data to match inputs
index_field = np.where(dataset_names == 'B')[0][0]
num_vars_field = num_vars[index_field]
num_vars_pre_field = np.sum(num_vars[:index_field])
output_vars_field = output_vars[num_vars_pre_field:num_vars_pre_field+num_vars_field]
b1_output = field_output[np.where(output_vars_field == 'Bcc1')[0][0], ...]
b2_output = field_output[np.where(output_vars_field == 'Bcc2')[0][0], ...]
b3_output = field_output[np.where(output_vars_field == 'Bcc3')[0][0], ...]
# Check that outputs match inputs
if not np.all(cons_output == cons_input):
analyze_status = False
if not np.all(b1_output == b1v):
analyze_status = False
if not np.all(b2_output == b2v):
analyze_status = False
if not np.all(b3_output == b3v):
analyze_status = False
return analyze_status
|
Subsets and Splits